| |
@@ -1,6 +1,7 @@
|
| |
/** BEGIN COPYRIGHT BLOCK
|
| |
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
|
| |
* Copyright (C) 2005 Red Hat, Inc.
|
| |
+ * Copyright (C) 2018 William Brown <william@blackhats.net.au>
|
| |
* All rights reserved.
|
| |
*
|
| |
* License: GPL (version 3 or any later version).
|
| |
@@ -38,17 +39,8 @@
|
| |
ber_len_t ber_len,
|
| |
ber_len_t maxbersize);
|
| |
|
| |
- static PRStack *op_stack; /* stack of Slapi_Operation * objects so we don't have to malloc/free every time */
|
| |
- static PRInt32 op_stack_size; /* size of op_stack */
|
| |
-
|
| |
- struct Slapi_op_stack
|
| |
- {
|
| |
- PRStackElem stackelem; /* must be first in struct for PRStack to work */
|
| |
- Slapi_Operation *op;
|
| |
- };
|
| |
-
|
| |
- static void add_work_q(work_q_item *, struct Slapi_op_stack *);
|
| |
- static work_q_item *get_work_q(struct Slapi_op_stack **);
|
| |
+ static void add_work_q(work_q_item *, Operation *);
|
| |
+ static work_q_item *get_work_q(Operation **);
|
| |
|
| |
/*
|
| |
* We maintain a global work queue of items that have not yet
|
| |
@@ -58,7 +50,7 @@
|
| |
{
|
| |
PRStackElem stackelem; /* must be first in struct for PRStack to work */
|
| |
work_q_item *work_item;
|
| |
- struct Slapi_op_stack *op_stack_obj;
|
| |
+ Operation *op;
|
| |
struct Slapi_work_q *next_work_item;
|
| |
};
|
| |
|
| |
@@ -92,7 +84,7 @@
|
| |
destroy_work_q(struct Slapi_work_q **work_q)
|
| |
{
|
| |
if (work_q && *work_q) {
|
| |
- (*work_q)->op_stack_obj = NULL;
|
| |
+ (*work_q)->op = NULL;
|
| |
(*work_q)->work_item = NULL;
|
| |
PR_StackPush(work_q_stack, (PRStackElem *)*work_q);
|
| |
PR_AtomicIncrement(&work_q_stack_size);
|
| |
@@ -102,33 +94,17 @@
|
| |
}
|
| |
}
|
| |
|
| |
- static struct Slapi_op_stack *
|
| |
+ static Operation *
|
| |
connection_get_operation(void)
|
| |
{
|
| |
- struct Slapi_op_stack *stack_obj = (struct Slapi_op_stack *)PR_StackPop(op_stack);
|
| |
- if (!stack_obj) {
|
| |
- stack_obj = (struct Slapi_op_stack *)slapi_ch_calloc(1, sizeof(struct Slapi_op_stack));
|
| |
- stack_obj->op = operation_new(plugin_build_operation_action_bitmap(0,
|
| |
- plugin_get_server_plg()));
|
| |
- } else {
|
| |
- PR_AtomicDecrement(&op_stack_size);
|
| |
- if (!stack_obj->op) {
|
| |
- stack_obj->op = operation_new(plugin_build_operation_action_bitmap(0,
|
| |
- plugin_get_server_plg()));
|
| |
- } else {
|
| |
- operation_init(stack_obj->op,
|
| |
- plugin_build_operation_action_bitmap(0, plugin_get_server_plg()));
|
| |
- }
|
| |
- }
|
| |
- return stack_obj;
|
| |
+ return operation_new(plugin_build_operation_action_bitmap(0, plugin_get_server_plg()));
|
| |
+
|
| |
}
|
| |
|
| |
static void
|
| |
- connection_done_operation(Connection *conn, struct Slapi_op_stack *stack_obj)
|
| |
+ connection_done_operation(Connection *conn, Operation *op)
|
| |
{
|
| |
- operation_done(&(stack_obj->op), conn);
|
| |
- PR_StackPush(op_stack, (PRStackElem *)stack_obj);
|
| |
- PR_AtomicIncrement(&op_stack_size);
|
| |
+ operation_free(&op, conn);
|
| |
}
|
| |
|
| |
/*
|
| |
@@ -169,6 +145,9 @@
|
| |
* soon anyway, so please be patient while I undertake this!
|
| |
*
|
| |
* - wibrown December 2016.
|
| |
+ *
|
| |
+ * It's happening!!!!
|
| |
+ * William Sep 2018
|
| |
*/
|
| |
}
|
| |
|
| |
@@ -280,7 +259,7 @@
|
| |
char *str_unknown = "unknown";
|
| |
int in_referral_mode = config_check_referral_mode();
|
| |
|
| |
- slapi_log_err(SLAPI_LOG_CONNS, "connection_reset", "new %sconnection on %d\n", pTmp, conn->c_sd);
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "connection_reset", "new %sconnection on fd=%d\n", pTmp, conn->c_sd);
|
| |
|
| |
/* bump our count of connections and update SNMP stats */
|
| |
conn->c_connid = slapi_counter_increment(num_conns);
|
| |
@@ -431,7 +410,6 @@
|
| |
void
|
| |
init_op_threads()
|
| |
{
|
| |
- int i;
|
| |
PRErrorCode errorCode;
|
| |
int max_threads = config_get_threadnumber();
|
| |
/* Initialize the locks and cv */
|
| |
@@ -453,10 +431,8 @@
|
| |
|
| |
work_q_stack = PR_CreateStack("connection_work_q");
|
| |
|
| |
- op_stack = PR_CreateStack("connection_operation");
|
| |
-
|
| |
/* start the operation threads */
|
| |
- for (i = 0; i < max_threads; i++) {
|
| |
+ for (size_t i = 0; i < max_threads; i++) {
|
| |
PR_SetConcurrency(4);
|
| |
if (PR_CreateThread(PR_USER_THREAD,
|
| |
(VFP)(void *)connection_threadmain, NULL,
|
| |
@@ -709,7 +685,8 @@
|
| |
if (!release_only && (conn->c_refcnt == 1) && (conn->c_flags & CONN_FLAG_CLOSING)) {
|
| |
/* if refcnt == 1 usually means only the active connection list has a ref */
|
| |
/* refcnt == 0 means conntable just dropped the last ref */
|
| |
- ns_connection_post_io_or_closing(conn);
|
| |
+ /* ns_connection_post_io_or_closing(conn); */
|
| |
+ ns_handle_closure_conn_nomutex(conn);
|
| |
}
|
| |
|
| |
return 0;
|
| |
@@ -724,12 +701,12 @@
|
| |
|
| |
/* this function should be called under c_mutex */
|
| |
int
|
| |
- connection_acquire_nolock_ext(Connection *conn, int allow_when_closing)
|
| |
+ connection_acquire_nolock(Connection *conn)
|
| |
{
|
| |
/* connection in the closing state can't be acquired */
|
| |
- if (!allow_when_closing && (conn->c_flags & CONN_FLAG_CLOSING)) {
|
| |
+ if (conn->c_flags & CONN_FLAG_CLOSING) {
|
| |
/* This may happen while other threads are still working on this connection */
|
| |
- slapi_log_err(SLAPI_LOG_ERR, "connection_acquire_nolock_ext",
|
| |
+ slapi_log_err(SLAPI_LOG_ERR, "connection_acquire_nolock",
|
| |
"conn=%" PRIu64 " fd=%d Attempt to acquire connection in the closing state\n",
|
| |
conn->c_connid, conn->c_sd);
|
| |
return -1;
|
| |
@@ -739,12 +716,6 @@
|
| |
}
|
| |
}
|
| |
|
| |
- int
|
| |
- connection_acquire_nolock(Connection *conn)
|
| |
- {
|
| |
- return connection_acquire_nolock_ext(conn, 0);
|
| |
- }
|
| |
-
|
| |
/* returns non-0 if connection can be reused and 0 otherwise */
|
| |
int
|
| |
connection_is_free(Connection *conn, int use_lock)
|
| |
@@ -945,17 +916,16 @@
|
| |
void
|
| |
connection_make_new_pb(Slapi_PBlock *pb, Connection *conn)
|
| |
{
|
| |
- struct Slapi_op_stack *stack_obj = NULL;
|
| |
+ Operation *op = NULL;
|
| |
/* we used to malloc/free the pb for each operation - now, just use a local stack pb
|
| |
* in connection_threadmain, and just clear it out
|
| |
*/
|
| |
/* *ppb = (Slapi_PBlock *) slapi_ch_calloc( 1, sizeof(Slapi_PBlock) ); */
|
| |
/* *ppb = slapi_pblock_new(); */
|
| |
slapi_pblock_set(pb, SLAPI_CONNECTION, conn);
|
| |
- stack_obj = connection_get_operation();
|
| |
- slapi_pblock_set(pb, SLAPI_OPERATION, stack_obj->op);
|
| |
- slapi_pblock_set_op_stack_elem(pb, stack_obj);
|
| |
- connection_add_operation(conn, stack_obj->op);
|
| |
+ op = connection_get_operation();
|
| |
+ slapi_pblock_set(pb, SLAPI_OPERATION, op);
|
| |
+ connection_add_operation(conn, op);
|
| |
}
|
| |
|
| |
int
|
| |
@@ -963,7 +933,7 @@
|
| |
{
|
| |
int ret = CONN_FOUND_WORK_TO_DO;
|
| |
work_q_item *wqitem = NULL;
|
| |
- struct Slapi_op_stack *op_stack_obj = NULL;
|
| |
+ Operation *op = NULL;
|
| |
|
| |
PR_Lock(work_q_lock);
|
| |
|
| |
@@ -974,15 +944,14 @@
|
| |
if (op_shutdown) {
|
| |
slapi_log_err(SLAPI_LOG_TRACE, "connection_wait_for_new_work", "shutdown\n");
|
| |
ret = CONN_SHUTDOWN;
|
| |
- } else if (NULL == (wqitem = get_work_q(&op_stack_obj))) {
|
| |
+ } else if (NULL == (wqitem = get_work_q(&op))) {
|
| |
/* not sure how this can happen */
|
| |
slapi_log_err(SLAPI_LOG_TRACE, "connection_wait_for_new_work", "no work to do\n");
|
| |
ret = CONN_NOWORK;
|
| |
} else {
|
| |
/* make new pb */
|
| |
slapi_pblock_set(pb, SLAPI_CONNECTION, wqitem);
|
| |
- slapi_pblock_set_op_stack_elem(pb, op_stack_obj);
|
| |
- slapi_pblock_set(pb, SLAPI_OPERATION, op_stack_obj->op);
|
| |
+ slapi_pblock_set(pb, SLAPI_OPERATION, op);
|
| |
}
|
| |
|
| |
PR_Unlock(work_q_lock);
|
| |
@@ -1118,7 +1087,7 @@
|
| |
|
| |
*/
|
| |
int
|
| |
- connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *remaining_data)
|
| |
+ connection_read_operation_nolock(Connection *conn, Operation *op, ber_tag_t *tag, int *remaining_data)
|
| |
{
|
| |
ber_len_t len = 0;
|
| |
int ret = 0;
|
| |
@@ -1131,7 +1100,6 @@
|
| |
size_t buffer_data_avail;
|
| |
int conn_closed = 0;
|
| |
|
| |
- pthread_mutex_lock(&(conn->c_mutex));
|
| |
/*
|
| |
* if the socket is still valid, get the ber element
|
| |
* waiting for us on this connection. timeout is handled
|
| |
@@ -1139,8 +1107,7 @@
|
| |
*/
|
| |
if ((conn->c_sd == SLAPD_INVALID_SOCKET) ||
|
| |
(conn->c_flags & CONN_FLAG_CLOSING)) {
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
|
| |
*tag = LBER_DEFAULT;
|
| |
@@ -1150,8 +1117,7 @@
|
| |
if (0 != get_next_from_buffer(buffer + conn->c_private->c_buffer_offset,
|
| |
buffer_data_avail,
|
| |
&len, tag, op->o_ber, conn)) {
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
new_operation = 0;
|
| |
}
|
| |
@@ -1166,8 +1132,7 @@
|
| |
} else {
|
| |
ret = get_next_from_buffer(NULL, 0, &len, tag, op->o_ber, conn);
|
| |
if (ret == -1) {
|
| |
- ret = CONN_DONE;
|
| |
- goto done; /* get_next_from_buffer does the disconnect stuff */
|
| |
+ return CONN_DONE;
|
| |
} else if (ret == 0) {
|
| |
ret = len;
|
| |
}
|
| |
@@ -1179,14 +1144,12 @@
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_BAD_BER_TAG, 0);
|
| |
conn->c_gettingber = 0;
|
| |
signal_listner();
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
/* err = PR_GetError(); */
|
| |
/* If we would block, we need to poll for a while */
|
| |
syserr = PR_GetOSError();
|
| |
- if (SLAPD_PR_WOULD_BLOCK_ERROR(err) ||
|
| |
- SLAPD_SYSTEM_WOULD_BLOCK_ERROR(syserr)) {
|
| |
+ if (SLAPD_PR_WOULD_BLOCK_ERROR(err) || SLAPD_SYSTEM_WOULD_BLOCK_ERROR(syserr)) {
|
| |
struct PRPollDesc pr_pd;
|
| |
PRIntervalTime timeout = PR_MillisecondsToInterval(CONN_TURBO_TIMEOUT_INTERVAL);
|
| |
pr_pd.fd = (PRFileDesc *)conn->c_prfd;
|
| |
@@ -1200,14 +1163,12 @@
|
| |
if (0 == ret) {
|
| |
/* We timed out, should the server shutdown ? */
|
| |
if (op_shutdown) {
|
| |
- ret = CONN_SHUTDOWN;
|
| |
- goto done;
|
| |
+ return CONN_SHUTDOWN;
|
| |
}
|
| |
/* We timed out, is this the first read in a PDU ? */
|
| |
if (new_operation) {
|
| |
/* If so, we return */
|
| |
- ret = CONN_TIMEDOUT;
|
| |
- goto done;
|
| |
+ return CONN_TIMEDOUT;
|
| |
} else {
|
| |
/* Otherwise we loop, unless we exceeded the ioblock timeout */
|
| |
if (waits_done > ioblocktimeout_waits) {
|
| |
@@ -1215,8 +1176,7 @@
|
| |
"ioblocktimeout expired on connection %" PRIu64 "\n", conn->c_connid);
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1,
|
| |
SLAPD_DISCONNECT_IO_TIMEOUT, 0);
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
} else {
|
| |
|
| |
/* The turbo mode may cause threads starvation.
|
| |
@@ -1236,8 +1196,7 @@
|
| |
"PR_Poll for connection %" PRIu64 " returns %d (%s)\n", conn->c_connid, err, slapd_pr_strerror(err));
|
| |
/* If this happens we should close the connection */
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1, err, syserr);
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
slapi_log_err(SLAPI_LOG_CONNS,
|
| |
"connection_read_operation", "connection %" PRIu64 " waited %d times for read to be ready\n", conn->c_connid, waits_done);
|
| |
@@ -1248,8 +1207,7 @@
|
| |
"PR_Recv for connection %" PRIu64 " returns %d (%s)\n", conn->c_connid, err, slapd_pr_strerror(err));
|
| |
/* If this happens we should close the connection */
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1, err, syserr);
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
} else {
|
| |
/* We read some data off the network, do something with it */
|
| |
@@ -1260,8 +1218,7 @@
|
| |
if (get_next_from_buffer(buffer,
|
| |
conn->c_private->c_buffer_bytes - conn->c_private->c_buffer_offset,
|
| |
&len, tag, op->o_ber, conn) != 0) {
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
}
|
| |
slapi_log_err(SLAPI_LOG_CONNS,
|
| |
@@ -1277,8 +1234,7 @@
|
| |
*remaining_data = 1;
|
| |
} else if (conn_closed) {
|
| |
/* connection closed */
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
|
| |
if (*tag != LDAP_TAG_MESSAGE) {
|
| |
@@ -1290,8 +1246,7 @@
|
| |
conn->c_connid, *tag, LDAP_TAG_MESSAGE);
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1,
|
| |
SLAPD_DISCONNECT_BAD_BER_TAG, EPROTO);
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
|
| |
if ((*tag = ber_get_int(op->o_ber, &msgid)) != LDAP_TAG_MSGID) {
|
| |
@@ -1299,13 +1254,11 @@
|
| |
slapi_log_err(SLAPI_LOG_ERR,
|
| |
"connection_read_operation", "conn=%" PRIu64 " unable to read tag for incoming request\n", conn->c_connid);
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_BAD_BER_TAG, EPROTO);
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
if (is_ber_too_big(conn, len)) {
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_BER_TOO_BIG, 0);
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
}
|
| |
op->o_msgid = msgid;
|
| |
|
| |
@@ -1317,36 +1270,32 @@
|
| |
slapi_log_err(SLAPI_LOG_ERR,
|
| |
"connection_read_operation", "conn=%" PRIu64 " ber_peek_tag returns 0x%lx\n", conn->c_connid, *tag);
|
| |
disconnect_server_nomutex(conn, conn->c_connid, -1, SLAPD_DISCONNECT_BER_PEEK, EPROTO);
|
| |
- ret = CONN_DONE;
|
| |
- goto done;
|
| |
+ return CONN_DONE;
|
| |
default:
|
| |
break;
|
| |
}
|
| |
op->o_tag = *tag;
|
| |
done:
|
| |
- pthread_mutex_unlock(&(conn->c_mutex));
|
| |
return ret;
|
| |
}
|
| |
|
| |
- void
|
| |
- connection_make_readable(Connection *conn)
|
| |
- {
|
| |
+ int
|
| |
+ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *remaining_data) {
|
| |
+ int ret = 0;
|
| |
pthread_mutex_lock(&(conn->c_mutex));
|
| |
- conn->c_gettingber = 0;
|
| |
+ ret = connection_read_operation_nolock(conn, op, tag, remaining_data);
|
| |
pthread_mutex_unlock(&(conn->c_mutex));
|
| |
- signal_listner();
|
| |
+ return ret;
|
| |
}
|
| |
|
| |
- void
|
| |
+ static void
|
| |
connection_make_readable_nolock(Connection *conn)
|
| |
{
|
| |
conn->c_gettingber = 0;
|
| |
- slapi_log_err(SLAPI_LOG_CONNS, "connection_make_readable_nolock", "making readable conn %" PRIu64 " fd=%d\n",
|
| |
- conn->c_connid, conn->c_sd);
|
| |
- if (!(conn->c_flags & CONN_FLAG_CLOSING)) {
|
| |
- /* if the connection is closing, try the close in connection_release_nolock */
|
| |
- ns_connection_post_io_or_closing(conn);
|
| |
- }
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "connection_make_readable_nolock",
|
| |
+ "making readable conn %" PRIu64 " fd=%d\n",
|
| |
+ conn->c_connid, conn->c_sd);
|
| |
+ signal_listner();
|
| |
}
|
| |
|
| |
/*
|
| |
@@ -1810,9 +1759,9 @@
|
| |
pthread_mutex_unlock(&(conn->c_mutex));
|
| |
}
|
| |
/* ps_add makes a shallow copy of the pb - so we
|
| |
- * can't free it or init it here - just set operation to NULL.
|
| |
- * ps_send_results will call connection_remove_operation_ext to free it
|
| |
- */
|
| |
+ * can't free it or init it here - just set operation to NULL.
|
| |
+ * ps_send_results will call connection_remove_operation_ext to free it
|
| |
+ */
|
| |
slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
|
| |
slapi_pblock_init(pb);
|
| |
} else {
|
| |
@@ -1835,15 +1784,17 @@
|
| |
more_data = conn_buffered_data_avail_nolock(conn, &conn_closed) ? 1 : 0;
|
| |
}
|
| |
if (!more_data) {
|
| |
+ /*
|
| |
+ * If is_timedout is 1, then thread_turbo_flag is set to 0, so we
|
| |
+ * move the check for timeout to the outer and then continue. This resolves
|
| |
+ * a coverity issue for dead code detection.
|
| |
+ */
|
| |
+ if (replication_connection || (is_timedout == 1)) {
|
| |
+ connection_make_readable_nolock(conn);
|
| |
+ need_wakeup = 1;
|
| |
+ }
|
| |
+
|
| |
if (!thread_turbo_flag) {
|
| |
- /*
|
| |
- * Don't release the connection now.
|
| |
- * But note down what to do.
|
| |
- */
|
| |
- if (replication_connection || (1 == is_timedout)) {
|
| |
- connection_make_readable_nolock(conn);
|
| |
- need_wakeup = 1;
|
| |
- }
|
| |
if (!need_wakeup) {
|
| |
if (conn->c_threadnumber == maxthreads) {
|
| |
need_wakeup = 1;
|
| |
@@ -1864,10 +1815,6 @@
|
| |
if (need_wakeup) {
|
| |
signal_listner();
|
| |
}
|
| |
- } else if (1 == is_timedout) {
|
| |
- /* covscan reports this code is unreachable (2019/6/4) */
|
| |
- connection_make_readable_nolock(conn);
|
| |
- signal_listner();
|
| |
}
|
| |
}
|
| |
pthread_mutex_unlock(&(conn->c_mutex));
|
| |
@@ -1875,11 +1822,188 @@
|
| |
} /* while (1) */
|
| |
}
|
| |
|
| |
+ void
|
| |
+ ns_connection_cleanup_nolock(Connection *c, Operation *op, Slapi_PBlock *pb) {
|
| |
+ /* Destroy the pblock (with conditions if psearch) */
|
| |
+ if (op->o_flags & OP_FLAG_PS) {
|
| |
+ /*
|
| |
+ * If this operation begins a psearch, then we detach the operation
|
| |
+ * from the pblock. When we call pb_destroy, this prevents the op being
|
| |
+ * destroyed. If we don't we trigger use-after-free!
|
| |
+ *
|
| |
+ * ps_send_results will call connection_remove_operation_ext to free it
|
| |
+ * later for us.
|
| |
+ */
|
| |
+ slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
|
| |
+ } else {
|
| |
+ connection_remove_operation_ext(pb, c, op);
|
| |
+ }
|
| |
+
|
| |
+ /* Remove the refcnt from ns_connection_activity */
|
| |
+ connection_release_nolock_ext(c, 1);
|
| |
+ /* Finally kill the pb */
|
| |
+ slapi_pblock_destroy(pb);
|
| |
+ }
|
| |
+
|
| |
+ void
|
| |
+ ns_connection_do_work(Connection *conn, Operation *op, Slapi_PBlock *pb) {
|
| |
+
|
| |
+ /* are we in referral-only mode? */
|
| |
+ if (config_check_referral_mode() && op->o_tag != LDAP_REQ_UNBIND) {
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_do_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - sending referral\n",
|
| |
+ conn->c_connid, conn->c_sd);
|
| |
+ referral_mode_reply(pb);
|
| |
+ } else if (connection_need_new_password(conn, op, pb)) {
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_do_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - new password required\n",
|
| |
+ conn->c_connid, conn->c_sd);
|
| |
+
|
| |
+ /* check if new password is required */
|
| |
+ return;
|
| |
+ } else if (conn->c_flags & CONN_FLAG_IMPORT) {
|
| |
+ /* if this is a bulk import, only "add" and "import done"
|
| |
+ * are allowed */
|
| |
+ if ((op->o_tag != LDAP_REQ_ADD) && (op->o_tag != LDAP_REQ_EXTENDED)) {
|
| |
+ /* no cookie for you. */
|
| |
+ slapi_log_err(SLAPI_LOG_ERR, "ns_connection_do_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - attempted operation %lu from within bulk import\n",
|
| |
+ conn->c_connid, conn->c_sd, op->o_tag);
|
| |
+ slapi_send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, NULL);
|
| |
+ }
|
| |
+ } else {
|
| |
+ /*
|
| |
+ * Call the do_<operation> function to process this request.
|
| |
+ */
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_do_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - connection_dispatch_operation\n",
|
| |
+ conn->c_connid, conn->c_sd);
|
| |
+ connection_dispatch_operation(conn, op, pb);
|
| |
+ }
|
| |
+ }
|
| |
+
|
| |
+ void
|
| |
+ ns_handle_work_done(struct ns_job_t *job) {
|
| |
+ Slapi_PBlock *pb = (Slapi_PBlock *)ns_job_get_data(job);
|
| |
+ PR_ASSERT(pb);
|
| |
+ Connection *c = NULL;
|
| |
+ Operation *op = NULL;
|
| |
+ slapi_pblock_get(pb, SLAPI_CONNECTION, &c);
|
| |
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
| |
+ PR_ASSERT(c);
|
| |
+ PR_ASSERT(op);
|
| |
+
|
| |
+ pthread_mutex_lock(&(c->c_mutex));
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_work_done",
|
| |
+ "conn %" PRIu64 " for fd=%d - removing async operation references\n",
|
| |
+ c->c_connid, c->c_sd);
|
| |
+ ns_connection_cleanup_nolock(c, op, pb);
|
| |
+ pthread_mutex_unlock(&(c->c_mutex));
|
| |
+ }
|
| |
+
|
| |
+ void
|
| |
+ ns_handle_work(struct ns_job_t *job) {
|
| |
+ /* Actually do something with the operation! */
|
| |
+ /* All the IO should be done, just do the op, then write the result. */
|
| |
+ Slapi_PBlock *pb = (Slapi_PBlock *)ns_job_get_data(job);
|
| |
+ PR_ASSERT(pb);
|
| |
+ Connection *c = NULL;
|
| |
+ Operation *op = NULL;
|
| |
+ slapi_pblock_get(pb, SLAPI_CONNECTION, &c);
|
| |
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
|
| |
+ PR_ASSERT(c);
|
| |
+ PR_ASSERT(op);
|
| |
+ /*
|
| |
+ * Do we have the resources for the operation?
|
| |
+ */
|
| |
+
|
| |
+ pthread_mutex_lock(&(c->c_mutex));
|
| |
+ int32_t maxthreads = config_get_maxthreadsperconn();
|
| |
+ if (c->c_threadnumber >= maxthreads) {
|
| |
+ c->c_maxthreadsblocked++;
|
| |
+ /* Requeue for a future attempt */
|
| |
+ ns_job_rearm(job);
|
| |
+ slapi_log_err(SLAPI_LOG_WARNING, "ns_handle_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - maxthreads %"PRId32" of %"PRId32" per conn limit reached, requeuing\n",
|
| |
+ c->c_connid, c->c_sd, c->c_threadnumber, maxthreads);
|
| |
+ pthread_mutex_unlock(&(c->c_mutex));
|
| |
+ return;
|
| |
+ } else {
|
| |
+ c->c_threadnumber += 1;
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - maxthreads %"PRId32" of %"PRId32" per conn, starting operation ...\n",
|
| |
+ c->c_connid, c->c_sd, c->c_threadnumber, maxthreads);
|
| |
+
|
| |
+ /* Some stolen code for stats of thread counting */
|
| |
+ if (c->c_threadnumber == maxthreads) {
|
| |
+ c->c_maxthreadscount++;
|
| |
+ slapi_counter_increment(max_threads_count);
|
| |
+ slapi_counter_increment(conns_in_maxthreads);
|
| |
+ slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads);
|
| |
+ }
|
| |
+
|
| |
+ pthread_mutex_unlock(&(c->c_mutex));
|
| |
+ }
|
| |
+
|
| |
+ /*
|
| |
+ ********************
|
| |
+ * DO THE OPERATION *
|
| |
+ ********************
|
| |
+ */
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - started operation\n",
|
| |
+ c->c_connid, c->c_sd);
|
| |
+ ns_connection_do_work(c, op, pb);
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - ended operation\n",
|
| |
+ c->c_connid, c->c_sd);
|
| |
+
|
| |
+ /*
|
| |
+ * WARNING!!! It is UNSAFE to call the direct ns close functions
|
| |
+ * for the connection here. Use disconnect_server_nomutex instead.
|
| |
+ */
|
| |
+
|
| |
+ /* The thread is complete */
|
| |
+ pthread_mutex_lock(&(c->c_mutex));
|
| |
+ c->c_threadnumber -= 1;
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - maxthreads %"PRId32" of %"PRId32" per conn, at end of operation\n",
|
| |
+ c->c_connid, c->c_sd, c->c_threadnumber, maxthreads);
|
| |
+ pthread_mutex_unlock(&(c->c_mutex));
|
| |
+
|
| |
+ /* Destroy this worker at last - this frees everything! */
|
| |
+ ns_job_done(job);
|
| |
+ }
|
| |
+
|
| |
+ /*
|
| |
+ * The caller here still has c mutex held.
|
| |
+ */
|
| |
+ void
|
| |
+ ns_handle_add_work(Connection *c, Slapi_PBlock *pb) {
|
| |
+ /* Add a job to async handle the operation itself. */
|
| |
+ ns_result_t job_result = ns_add_job(c->c_tp, NS_JOB_THREAD, ns_handle_work, ns_handle_work_done, pb, NULL);
|
| |
+
|
| |
+ if (job_result != NS_SUCCESS) {
|
| |
+ if (job_result == NS_SHUTDOWN) {
|
| |
+ slapi_log_err(SLAPI_LOG_ERR, "ns_handle_add_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - async operation failed to be added to queue as server is shuttdng down\n",
|
| |
+ c->c_connid, c->c_sd);
|
| |
+ } else {
|
| |
+ slapi_log_err(SLAPI_LOG_ERR, "ns_handle_add_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - async operation failed to be added to queue error %d\n",
|
| |
+ c->c_connid, c->c_sd, job_result);
|
| |
+ }
|
| |
+ } else {
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_add_work",
|
| |
+ "conn %" PRIu64 " for fd=%d - async operation added to work queue\n",
|
| |
+ c->c_connid, c->c_sd);
|
| |
+ }
|
| |
+ }
|
| |
+
|
| |
/* thread need to hold conn->c_mutex before calling this function */
|
| |
- int
|
| |
- connection_activity(Connection *conn, int maxthreads)
|
| |
- {
|
| |
- struct Slapi_op_stack *op_stack_obj;
|
| |
+ static Operation *
|
| |
+ connection_activity_ext(Connection *conn, int maxthreads) {
|
| |
+ Operation *op = NULL;
|
| |
|
| |
if (connection_acquire_nolock(conn) == -1) {
|
| |
slapi_log_err(SLAPI_LOG_CONNS,
|
| |
@@ -1887,12 +2011,19 @@
|
| |
conn->c_connid, conn->c_sd);
|
| |
/* XXX how to handle this error? */
|
| |
/* MAB: 25 Jan 01: let's return on error and pray this won't leak */
|
| |
- return (-1);
|
| |
+ return NULL;
|
| |
}
|
| |
|
| |
/* set these here so setup_pr_read_pds will not add this conn back to the poll array */
|
| |
conn->c_gettingber = 1;
|
| |
- conn->c_threadnumber++;
|
| |
+ /*
|
| |
+ * With nunc stans we count threads differently. As a result
|
| |
+ * If we see maxthreads == 0 here, we skip the ++. Remove this hack job
|
| |
+ * when we gut the old conn code.
|
| |
+ */
|
| |
+ if (maxthreads > 0) {
|
| |
+ conn->c_threadnumber++;
|
| |
+ }
|
| |
if (conn->c_threadnumber == maxthreads) {
|
| |
conn->c_flags |= CONN_FLAG_MAX_THREADS;
|
| |
conn->c_maxthreadscount++;
|
| |
@@ -1901,24 +2032,128 @@
|
| |
slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads);
|
| |
slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsMaxThreadsHits);
|
| |
}
|
| |
- op_stack_obj = connection_get_operation();
|
| |
- connection_add_operation(conn, op_stack_obj->op);
|
| |
- /* Add conn to the end of the work queue. */
|
| |
- /* have to do this last - add_work_q will signal waiters in connection_wait_for_new_work */
|
| |
- add_work_q((work_q_item *)conn, op_stack_obj);
|
| |
+
|
| |
+ op = connection_get_operation();
|
| |
+ connection_add_operation(conn, op);
|
| |
|
| |
if (!config_check_referral_mode()) {
|
| |
slapi_counter_increment(ops_initiated);
|
| |
slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsInOps);
|
| |
}
|
| |
- return 0;
|
| |
+ return op;
|
| |
+ }
|
| |
+
|
| |
+ /*
|
| |
+ * Called as from the ns_handle_pr_read_ready callback. That function
|
| |
+ * holds the NS job lock then the conn lock, so we can just use nomutex
|
| |
+ * functions.
|
| |
+ *
|
| |
+ * This guarantees we are the only IO on the conn at the time.
|
| |
+ *
|
| |
+ * It also helpfully seperates an IO event thread from a worker doing
|
| |
+ * a search. This just sets up the worker ready to go. This means most
|
| |
+ * connections can have 1:N IO:Workers in operation, without rely on
|
| |
+ * weird locking tricks.
|
| |
+ */
|
| |
+ int32_t
|
| |
+ ns_connection_activity(Connection *conn, int32_t maxthreads) {
|
| |
+ /* THIS ACQUIRES THE CONNECTION */
|
| |
+ /* We added one to the refcnt!!! Remember to remove it! */
|
| |
+ Operation *op = connection_activity_ext(conn, maxthreads);
|
| |
+ if (op == NULL) {
|
| |
+ return -1;
|
| |
+ }
|
| |
+
|
| |
+ ber_tag_t tag = 0;
|
| |
+ int32_t more_data = 0;
|
| |
+ int32_t result = 0;
|
| |
+ /* Actually read a PDU here and get it ready .... */
|
| |
+ int cret = connection_read_operation_nolock(conn, op, &tag, &more_data);
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_activity",
|
| |
+ "conn %" PRIu64 " - cret %d\n", conn->c_connid, cret);
|
| |
+ switch (cret) {
|
| |
+ case CONN_FOUND_WORK_TO_DO:
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_activity",
|
| |
+ "conn %" PRIu64 " read operation successfully - more_data %d "
|
| |
+ "ops_initiated %d refcnt %d flags %d\n",
|
| |
+ conn->c_connid, more_data,
|
| |
+ conn->c_opsinitiated, conn->c_refcnt, conn->c_flags);
|
| |
+ /* Create the pb and set items into it. */
|
| |
+ Slapi_PBlock *pb = slapi_pblock_new();
|
| |
+ slapi_pblock_set(pb, SLAPI_CONNECTION, conn);
|
| |
+ slapi_pblock_set(pb, SLAPI_OPERATION, op);
|
| |
+ /* IF REPLICATION || UNBIND RUN IT NOW IN THIS THREAD */
|
| |
+ if (tag == LDAP_REQ_UNBIND || conn->c_isreplication_session) {
|
| |
+ /*
|
| |
+ * This won't deadlock anything because this won't spawn
|
| |
+ * new threads, and we hold job lock AND the conn lock.
|
| |
+ * As both are monitors, we are pretty safe here.
|
| |
+ */
|
| |
+ ns_connection_do_work(conn, op, pb);
|
| |
+ /* Now clean up after ourselves. */
|
| |
+ ns_connection_cleanup_nolock(conn, op, pb);
|
| |
+ /* Tell our caller not to rearm after an unbind */
|
| |
+ if (tag == LDAP_REQ_UNBIND) {
|
| |
+ result = -4;
|
| |
+ }
|
| |
+
|
| |
+ } else {
|
| |
+ /*
|
| |
+ * schedule for async dispatch instead.
|
| |
+ * We don't need to worry about max threads here, because the
|
| |
+ * worker will just reset the job if it's too busy.
|
| |
+ */
|
| |
+ ns_handle_add_work(conn, pb);
|
| |
+ }
|
| |
+ break;
|
| |
+ case CONN_DONE:
|
| |
+ case CONN_TIMEDOUT:
|
| |
+ slapi_log_err(SLAPI_LOG_CONNS, "ns_connection_activity",
|
| |
+ "conn %" PRIu64 " read not ready due to %d - more_data %d "
|
| |
+ "ops_initiated %d refcnt %d flags %d\n",
|
| |
+ conn->c_connid, cret, more_data,
|
| |
+ conn->c_opsinitiated, conn->c_refcnt, conn->c_flags);
|
| |
+ connection_release_nolock_ext(conn, 1);
|
| |
+ /* Free the allocated operation. */
|
| |
+ connection_remove_operation(conn, op);
|
| |
+ connection_done_operation(conn, op);
|
| |
+ result = -2;
|
| |
+ break;
|
| |
+ default:
|
| |
+ slapi_log_err(SLAPI_LOG_ERR, "ns_connection_activity",
|
| |
+ "conn %" PRIu64 " read operation failed UNKNOWN %d - more_data %d "
|
| |
+ "ops_initiated %d refcnt %d flags %d\n",
|
| |
+ conn->c_connid, cret, more_data,
|
| |
+ conn->c_opsinitiated, conn->c_refcnt, conn->c_flags);
|
| |
+ connection_release_nolock_ext(conn, 1);
|
| |
+ /* Free the allocated operation. */
|
| |
+ connection_remove_operation(conn, op);
|
| |
+ connection_done_operation(conn, op);
|
| |
+ result = -3;
|
| |
+ break;
|
| |
+ }
|
| |
+
|
| |
+ return result;
|
| |
+ }
|
| |
+
|
| |
+ int32_t
|
| |
+ connection_activity(Connection *conn, int maxthreads)
|
| |
+ {
|
| |
+ Operation *op = connection_activity_ext(conn, maxthreads);
|
| |
+ /* Add conn to the end of the work queue. */
|
| |
+ /* have to do this last - add_work_q will signal waiters in connection_wait_for_new_work */
|
| |
+ if (op != NULL) {
|
| |
+ add_work_q((work_q_item *)conn, op);
|
| |
+ return 0;
|
| |
+ }
|
| |
+ return -1;
|
| |
}
|
| |
|
| |
/* add_work_q(): will add a work_q_item to the end of the global work queue. The work queue
|
| |
is implemented as a single link list. */
|
| |
|
| |
static void
|
| |
- add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj)
|
| |
+ add_work_q(work_q_item *wqitem, Operation *op)
|
| |
{
|
| |
struct Slapi_work_q *new_work_q = NULL;
|
| |
|
| |
@@ -1926,7 +2161,7 @@
|
| |
|
| |
new_work_q = create_work_q();
|
| |
new_work_q->work_item = wqitem;
|
| |
- new_work_q->op_stack_obj = op_stack_obj;
|
| |
+ new_work_q->op = op;
|
| |
new_work_q->next_work_item = NULL;
|
| |
|
| |
PR_Lock(work_q_lock);
|
| |
@@ -1950,7 +2185,7 @@
|
| |
with the work_q_lock held */
|
| |
|
| |
static work_q_item *
|
| |
- get_work_q(struct Slapi_op_stack **op_stack_obj)
|
| |
+ get_work_q(Operation **op)
|
| |
{
|
| |
struct Slapi_work_q *tmp = NULL;
|
| |
work_q_item *wqitem;
|
| |
@@ -1968,7 +2203,7 @@
|
| |
head_work_q = tmp->next_work_item;
|
| |
|
| |
wqitem = tmp->work_item;
|
| |
- *op_stack_obj = tmp->op_stack_obj;
|
| |
+ *op = tmp->op;
|
| |
PR_AtomicDecrement(&work_q_size); /* decrement q size */
|
| |
/* Free the memory used by the item found. */
|
| |
destroy_work_q(&tmp);
|
| |
@@ -1986,8 +2221,8 @@
|
| |
op_thread_cleanup()
|
| |
{
|
| |
slapi_log_err(SLAPI_LOG_INFO, "op_thread_cleanup",
|
| |
- "slapd shutting down - signaling operation threads - op stack size %d max work q size %d max work q stack size %d\n",
|
| |
- op_stack_size, work_q_size_max, work_q_stack_size_max);
|
| |
+ "slapd shutting down - signaling operation threads - max work q size %d max work q stack size %d\n",
|
| |
+ work_q_size_max, work_q_stack_size_max);
|
| |
|
| |
PR_AtomicIncrement(&op_shutdown);
|
| |
PR_Lock(work_q_lock);
|
| |
@@ -1999,32 +2234,23 @@
|
| |
void
|
| |
connection_post_shutdown_cleanup()
|
| |
{
|
| |
- struct Slapi_op_stack *stack_obj;
|
| |
+ Operation *op = NULL;
|
| |
int stack_cnt = 0;
|
| |
struct Slapi_work_q *work_q;
|
| |
int work_cnt = 0;
|
| |
|
| |
while ((work_q = (struct Slapi_work_q *)PR_StackPop(work_q_stack))) {
|
| |
Connection *conn = (Connection *)work_q->work_item;
|
| |
- stack_obj = work_q->op_stack_obj;
|
| |
- if (stack_obj) {
|
| |
- if (conn) {
|
| |
- connection_remove_operation(conn, stack_obj->op);
|
| |
- }
|
| |
- connection_done_operation(conn, stack_obj);
|
| |
+ op = work_q->op;
|
| |
+ if (op && conn) {
|
| |
+ connection_remove_operation(conn, op);
|
| |
+ connection_done_operation(conn, op);
|
| |
}
|
| |
slapi_ch_free((void **)&work_q);
|
| |
work_cnt++;
|
| |
}
|
| |
PR_DestroyStack(work_q_stack);
|
| |
work_q_stack = NULL;
|
| |
- while ((stack_obj = (struct Slapi_op_stack *)PR_StackPop(op_stack))) {
|
| |
- operation_free(&stack_obj->op, NULL);
|
| |
- slapi_ch_free((void **)&stack_obj);
|
| |
- stack_cnt++;
|
| |
- }
|
| |
- PR_DestroyStack(op_stack);
|
| |
- op_stack = NULL;
|
| |
slapi_log_err(SLAPI_LOG_INFO, "connection_post_shutdown_cleanup",
|
| |
"slapd shutting down - freed %d work q stack objects - freed %d op stack objects\n",
|
| |
work_cnt, stack_cnt);
|
| |
@@ -2075,8 +2301,7 @@
|
| |
connection_remove_operation_ext(Slapi_PBlock *pb, Connection *conn, Operation *op)
|
| |
{
|
| |
connection_remove_operation(conn, op);
|
| |
- void *op_stack_elem = slapi_pblock_get_op_stack_elem(pb);
|
| |
- connection_done_operation(conn, op_stack_elem);
|
| |
+ connection_done_operation(conn, op);
|
| |
slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
|
| |
slapi_pblock_init(pb);
|
| |
}
|
| |
@@ -2239,7 +2464,7 @@
|
| |
*/
|
| |
|
| |
void
|
| |
- disconnect_server_nomutex_ext(Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error, int schedule_closure_job)
|
| |
+ disconnect_server_nomutex(Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error)
|
| |
{
|
| |
if ((conn->c_sd != SLAPD_INVALID_SOCKET &&
|
| |
conn->c_connid == opconnid) &&
|
| |
@@ -2247,15 +2472,6 @@
|
| |
slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext", "Setting conn %" PRIu64 " fd=%d "
|
| |
"to be disconnected: reason %d\n",
|
| |
conn->c_connid, conn->c_sd, reason);
|
| |
- /*
|
| |
- * PR_Close must be called before anything else is done because
|
| |
- * of NSPR problem on NT which requires that the socket on which
|
| |
- * I/O timed out is closed before any other I/O operation is
|
| |
- * attempted by the thread.
|
| |
- * WARNING : As of today the current code does not fulfill the
|
| |
- * requirements above.
|
| |
- */
|
| |
-
|
| |
/* Mark that the socket should be closed on this connection.
|
| |
* We don't want to actually close the socket here, because
|
| |
* the listener thread could be PR_Polling over it right now.
|
| |
@@ -2316,9 +2532,9 @@
|
| |
}
|
| |
}
|
| |
}
|
| |
- if (schedule_closure_job) {
|
| |
- ns_connection_post_io_or_closing(conn); /* make sure event loop wakes up and closes this conn */
|
| |
- }
|
| |
+
|
| |
+ /* This checks NS enable for us :) */
|
| |
+ ns_handle_closure_conn_nomutex(conn);
|
| |
|
| |
} else {
|
| |
slapi_log_err(SLAPI_LOG_CONNS, "disconnect_server_nomutex_ext", "Not setting conn %d to be disconnected: %s\n",
|
| |
@@ -2328,12 +2544,6 @@
|
| |
}
|
| |
|
| |
void
|
| |
- disconnect_server_nomutex(Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error)
|
| |
- {
|
| |
- disconnect_server_nomutex_ext(conn, opconnid, opid, reason, error, 1);
|
| |
- }
|
| |
-
|
| |
- void
|
| |
connection_abandon_operations(Connection *c)
|
| |
{
|
| |
Operation *op;
|
| |
Bug Description: The integration of NS and DS doesn't account
for threadsafety. Adding thread saftey to NS however didn't
fix DS's broken model. DS makes assumptions about IO ownership
that NS doesn't support nicely. As a result some deadlocks could
occur.
Fix Description: Add a method to allow an external caller to
lock NS jobs. This allows DS to correctly lock in the correct
order.
https://pagure.io/389-ds-base/issue/49569
Author: wibrown
Review by: ???