Skip to content

Improve nodes (un)locking #261

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions native/mod_manager/mod_manager.c
Original file line number Diff line number Diff line change
Expand Up @@ -1367,7 +1367,9 @@ static char *process_config(request_rec *r, char **ptr, int *errtype)
return err_msg;
}
/* Node part */
ap_assert(loc_lock_nodes() == APR_SUCCESS);
err_msg = process_config_node(ptr[i], ptr[i + 1], &nodeinfo, errtype);
loc_unlock_nodes();
if (err_msg != NULL) {
return err_msg;
}
Expand Down Expand Up @@ -1412,8 +1414,7 @@ static char *process_config(request_rec *r, char **ptr, int *errtype)
nodeinfo.mess.ResponseFieldSize = mconf->response_field_size;
}
/* Insert or update balancer description */
rv = loc_lock_nodes();
ap_assert(rv == APR_SUCCESS);
ap_assert(loc_lock_nodes() == APR_SUCCESS);
if (insert_update_balancer(balancerstatsmem, &balancerinfo) != APR_SUCCESS) {
loc_unlock_nodes();
*errtype = TYPEMEM;
Expand Down
14 changes: 7 additions & 7 deletions native/mod_proxy_cluster/mod_proxy_cluster.c
Original file line number Diff line number Diff line change
Expand Up @@ -1740,10 +1740,8 @@ static proxy_worker *internal_find_best_byrequests(const proxy_balancer *balance

/* create workers for new nodes */
if (!cache_share_for) {
ap_assert(node_storage->lock_nodes() == APR_SUCCESS);
update_workers_node(conf, r->pool, r->server, 1, node_table);
check_workers(conf, r->server);
node_storage->unlock_nodes();
}

/* do this once now to avoid repeating find_node_context_host through loop iterations */
Expand Down Expand Up @@ -2992,8 +2990,10 @@ static proxy_worker *find_best_worker(const proxy_balancer *balancer, const prox
return NULL;
}

ap_assert(node_storage->lock_nodes() == APR_SUCCESS);
candidate = internal_find_best_byrequests(balancer, conf, r, domain, failoverdomain, vhost_table, context_table,
node_table);
node_storage->unlock_nodes();

if ((rv = PROXY_THREAD_UNLOCK(balancer)) != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, r->server,
Expand Down Expand Up @@ -3240,7 +3240,6 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal
check_workers(conf, r->server);
node_storage->unlock_nodes();
if (!(*balancer = ap_proxy_get_balancer(r->pool, conf, *url, 0))) {
/* node_storage->unlock_nodes(); */
ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, "proxy_cluster_pre_request: CLUSTER no balancer for %s",
*url);
return DECLINED;
Expand All @@ -3250,8 +3249,9 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal
}

/* Step 2: find the session route */

ap_assert(node_storage->lock_nodes() == APR_SUCCESS);
runtime = find_session_route(*balancer, r, &route, &sticky, url, &domain, vhost_table, context_table, node_table);
node_storage->unlock_nodes();

/* Lock the LoadBalancer
* XXX: perhaps we need the process lock here
Expand Down Expand Up @@ -3332,9 +3332,6 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal
*worker = runtime;
}

(*worker)->s->busy++;
apr_pool_cleanup_register(r->pool, *worker, decrement_busy_count, apr_pool_cleanup_null);

/* Also mark the context here note that find_best_worker set BALANCER_CONTEXT_ID */
context_id = apr_table_get(r->subprocess_env, "BALANCER_CONTEXT_ID");
ap_assert(node_storage->lock_nodes() == APR_SUCCESS);
Expand All @@ -3346,7 +3343,9 @@ static int proxy_cluster_pre_request(proxy_worker **worker, proxy_balancer **bal
/* XXX: Do we need the lock here??? */
helper = (proxy_cluster_helper *)(*worker)->context;
helper->count_active++;
(*worker)->s->busy++;
node_storage->unlock_nodes();
apr_pool_cleanup_register(r->pool, *worker, decrement_busy_count, apr_pool_cleanup_null);

/*
* get_route_balancer already fills all of the notes and some subprocess_env
Expand Down Expand Up @@ -3469,6 +3468,7 @@ static int proxy_cluster_post_request(proxy_worker *worker, proxy_balancer *bala
worker->s->name,
#endif
val);

worker->s->status |= PROXY_WORKER_IN_ERROR;
worker->s->error_time = apr_time_now();
break;
Expand Down