代码拉取完成,页面将自动刷新
同步操作将从 src-openEuler/etmem 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
From 433c75be35d8e9aeabafa9c35cbd9d38855e0497 Mon Sep 17 00:00:00 2001
From: liubo <[email protected]>
Date: Mon, 14 Aug 2023 16:03:38 +0800
Subject: [PATCH] etmem: fix project stop cmd timeout problem
In patch "fix fd leak when user stop task timer", set the
thread to PTHREAD_CANCEL_DISABLE to solve the FD leakage
problem during task obtaining.
However, when the project stop command is executed,
the rpc cmd exits only after all task threads are executed.
But the RPC cmd timeout period is only 10 seconds.
When the task execution time exceeds the timeout period, the RPC
command times out and an error message is returned.
According to code analysis, the most time-consuming part of all
tasks is in the sleep phase of the scanning logic.
The preceding problem is solved by skipping the corresponding
sleep environment and allowing the thread to exit directly.
Signed-off-by: liubo <[email protected]>
Signed-off-by: volcanodragon <[email protected]>
---
etmem/inc/etmemd_inc/etmemd_threadpool.h | 2 +-
etmem/src/etmemd_src/etmemd_pool_adapter.c | 5 ++++-
etmem/src/etmemd_src/etmemd_scan.c | 17 ++++++++++++++++-
etmem/src/etmemd_src/etmemd_slide.c | 22 +++++++++++-----------
etmem/src/etmemd_src/etmemd_threadpool.c | 4 ++--
etmem/src/etmemd_src/etmemd_threadtimer.c | 18 ++++++------------
6 files changed, 40 insertions(+), 28 deletions(-)
diff --git a/etmem/inc/etmemd_inc/etmemd_threadpool.h b/etmem/inc/etmemd_inc/etmemd_threadpool.h
index 57af8be..4e62c81 100644
--- a/etmem/inc/etmemd_inc/etmemd_threadpool.h
+++ b/etmem/inc/etmemd_inc/etmemd_threadpool.h
@@ -74,6 +74,6 @@ void threadpool_reset_status(thread_pool** inst);
/*
* Stop and destroy the thread pool instance
* */
-void threadpool_stop_and_destroy(thread_pool** inst);
+void threadpool_stop_and_destroy(thread_pool **inst);
#endif //ETMEMD_THREADPOOL_H
diff --git a/etmem/src/etmemd_src/etmemd_pool_adapter.c b/etmem/src/etmemd_src/etmemd_pool_adapter.c
index dfda3f4..39f9451 100644
--- a/etmem/src/etmemd_src/etmemd_pool_adapter.c
+++ b/etmem/src/etmemd_src/etmemd_pool_adapter.c
@@ -50,6 +50,8 @@ static void *launch_threadtimer_executor(void *arg)
int scheduing_count;
if (tk->eng->proj->start) {
+ (void)pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
+
if (etmemd_get_task_pids(tk, true) != 0) {
return NULL;
}
@@ -57,6 +59,7 @@ static void *launch_threadtimer_executor(void *arg)
push_ctrl_workflow(&tk->pids, executor->func);
threadpool_notify(tk->threadpool_inst);
+ (void)pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
pool_inst = tk->threadpool_inst;
scheduing_count = __atomic_load_n(&pool_inst->scheduing_size, __ATOMIC_SEQ_CST);
@@ -120,12 +123,12 @@ void stop_and_delete_threadpool_work(struct task *tk)
tk->value, tk->eng->proj->name);
return;
}
-
/* stop the threadtimer first */
thread_timer_stop(tk->timer_inst);
/* destroy them then */
thread_timer_destroy(&tk->timer_inst);
+
threadpool_stop_and_destroy(&tk->threadpool_inst);
}
diff --git a/etmem/src/etmemd_src/etmemd_scan.c b/etmem/src/etmemd_src/etmemd_scan.c
index 699b1cd..5620951 100644
--- a/etmem/src/etmemd_src/etmemd_scan.c
+++ b/etmem/src/etmemd_src/etmemd_scan.c
@@ -120,6 +120,18 @@ void free_vmas(struct vmas *vmas)
free(vmas);
}
+static void clean_vmas_resource_unexpected(void *arg)
+{
+ struct vmas **vmas = (struct vmas **)arg;
+
+ if (*vmas == NULL) {
+ return;
+ }
+
+ free_vmas(*vmas);
+ *vmas = NULL;
+}
+
static bool parse_vma_seg0(struct vma *vma, const char *seg0)
{
int ret;
@@ -777,6 +789,7 @@ struct page_refs *etmemd_do_scan(const struct task_pid *tpid, const struct task
return NULL;
}
+ pthread_cleanup_push(clean_vmas_resource_unexpected, &vmas);
/* get vmas of target pid first. */
vmas = get_vmas(pid);
if (vmas == NULL) {
@@ -799,10 +812,12 @@ struct page_refs *etmemd_do_scan(const struct task_pid *tpid, const struct task
page_refs = NULL;
break;
}
+ (void)pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
sleep((unsigned)page_scan->sleep);
+ (void)pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
}
- free_vmas(vmas);
+ pthread_cleanup_pop(1);
return page_refs;
}
diff --git a/etmem/src/etmemd_src/etmemd_slide.c b/etmem/src/etmemd_src/etmemd_slide.c
index 1a11f45..25fa45d 100644
--- a/etmem/src/etmemd_src/etmemd_slide.c
+++ b/etmem/src/etmemd_src/etmemd_slide.c
@@ -212,13 +212,16 @@ static void *slide_executor(void *arg)
struct memory_grade *memory_grade = NULL;
struct page_sort *page_sort = NULL;
- if (check_should_swap(tk_pid) == DONT_SWAP) {
- return NULL;
- }
+ /* The pthread_setcancelstate interface returns an error only when the
+ * input parameter state is invalid, no need to check return value.
+ */
+ (void)pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
- if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL) != 0) {
- etmemd_log(ETMEMD_LOG_ERR, "failed to set pthread cancel state.\n");
- return NULL;
+ /* register cleanup function in case of unexpected cancellation detected */
+ pthread_cleanup_push(clean_page_refs_unexpected, &page_refs);
+
+ if (check_should_swap(tk_pid) == DONT_SWAP) {
+ goto scan_out;
}
page_refs = etmemd_do_scan(tk_pid, tk_pid->tk);
@@ -238,8 +241,7 @@ static void *slide_executor(void *arg)
scan_out:
clean_page_sort_unexpected(&page_sort);
- /* no need to use page_refs any longer. */
- clean_page_refs_unexpected(&page_refs);
+ pthread_cleanup_pop(1);
if (memory_grade == NULL) {
etmemd_log(ETMEMD_LOG_DEBUG, "pid %u memory grade is empty\n", tk_pid->pid);
@@ -261,9 +263,7 @@ exit:
etmemd_log(ETMEMD_LOG_INFO, "malloc_trim to release memory for pid %u fail\n", tk_pid->pid);
}
- if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0) {
- etmemd_log(ETMEMD_LOG_DEBUG, "pthread_setcancelstate PTHREAD_CANCEL_ENABLE failed.\n");
- }
+ (void)pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
pthread_testcancel();
return NULL;
diff --git a/etmem/src/etmemd_src/etmemd_threadpool.c b/etmem/src/etmemd_src/etmemd_threadpool.c
index 4375ca1..6d35fe1 100644
--- a/etmem/src/etmemd_src/etmemd_threadpool.c
+++ b/etmem/src/etmemd_src/etmemd_threadpool.c
@@ -203,7 +203,7 @@ static void threadpool_cancel_tasks_working(const thread_pool *inst)
int i;
for (i = 0; i < inst->max_thread_cap; i++) {
- pthread_cancel(inst->tid[i]);
+ (void)pthread_cancel(inst->tid[i]);
}
}
@@ -236,7 +236,7 @@ void threadpool_stop_and_destroy(thread_pool **inst)
threadpool_cancel_tasks_working(thread_instance);
for (i = 0; i < thread_instance->max_thread_cap; i++) {
- pthread_join(thread_instance->tid[i], NULL);
+ (void)pthread_join(thread_instance->tid[i], NULL);
}
free(thread_instance->tid);
diff --git a/etmem/src/etmemd_src/etmemd_threadtimer.c b/etmem/src/etmemd_src/etmemd_threadtimer.c
index 4014c72..582a84d 100644
--- a/etmem/src/etmemd_src/etmemd_threadtimer.c
+++ b/etmem/src/etmemd_src/etmemd_threadtimer.c
@@ -37,11 +37,7 @@ static void *thread_timer_routine(void *arg)
expired_time = timer->expired_time;
- if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL) != 0) {
- etmemd_log(ETMEMD_LOG_ERR, "failed to set pthread cancel state.\n");
- return NULL;
- }
-
+ pthread_cleanup_push(threadtimer_cancel_unlock, &timer->cond_mutex);
pthread_mutex_lock(&timer->cond_mutex);
while (!timer->down) {
if (clock_gettime(CLOCK_MONOTONIC, ×pec) != 0) {
@@ -64,12 +60,9 @@ static void *thread_timer_routine(void *arg)
break;
}
}
- threadtimer_cancel_unlock(&timer->cond_mutex);
+ /* unlock th timer->cond_mutex */
+ pthread_cleanup_pop(1);
- if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0) {
- etmemd_log(ETMEMD_LOG_DEBUG, "pthread_setcancelstate PTHREAD_CANCEL_ENABLE failed.\n");
- }
- pthread_testcancel();
pthread_exit(NULL);
}
@@ -149,9 +142,10 @@ void thread_timer_stop(timer_thread* inst)
return;
}
inst->down = true;
+ pthread_cond_broadcast(&(inst->cond));
- pthread_cancel(inst->pthread);
- pthread_join(inst->pthread, NULL);
+ (void)pthread_cancel(inst->pthread);
+ (void)pthread_join(inst->pthread, NULL);
etmemd_log(ETMEMD_LOG_DEBUG, "Timer instance stops ! \n");
}
--
2.33.0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。