The Samba-Bugzilla – Attachment 17794 Details for
Bug 15310
New samba-dcerpc architecture does not scale gracefully
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
git-am fix for 4.18.next
bug-15310 (text/plain), 23.12 KB, created by
Jeremy Allison
on 2023-03-07 00:31:04 UTC
(
hide
)
Description:
git-am fix for 4.18.next
Filename:
MIME Type:
Creator:
Jeremy Allison
Created:
2023-03-07 00:31:04 UTC
Size:
23.12 KB
patch
obsolete
>From 883564335c734ec425b49c68cbdd1fe694b57acf Mon Sep 17 00:00:00 2001 >From: Volker Lendecke <vl@samba.org> >Date: Mon, 6 Mar 2023 13:55:43 +0100 >Subject: [PATCH 1/6] librpc: Make rpc_pipe_open_np() public and async > >Signed-off-by: Volker Lendecke <vl@samba.org> >Reviewed-by: Jeremy Allison <jra@samba.org> >(cherry picked from commit 07ebf97a74fb5c0d0504e76c50f3aca8257dab1f) >--- > source3/rpc_client/cli_pipe.c | 132 +++++++++++++++++++++++++--------- > source3/rpc_client/cli_pipe.h | 13 ++++ > 2 files changed, 113 insertions(+), 32 deletions(-) > >diff --git a/source3/rpc_client/cli_pipe.c b/source3/rpc_client/cli_pipe.c >index 5e26dc1806d..2af68b169af 100644 >--- a/source3/rpc_client/cli_pipe.c >+++ b/source3/rpc_client/cli_pipe.c >@@ -3172,74 +3172,142 @@ static int rpc_pipe_client_np_ref_destructor(struct rpc_pipe_client_np_ref *np_r > * > ****************************************************************************/ > >-static NTSTATUS rpc_pipe_open_np(struct cli_state *cli, >- const struct ndr_interface_table *table, >- struct rpc_pipe_client **presult) >-{ >+struct rpc_pipe_open_np_state { >+ struct cli_state *cli; >+ const struct ndr_interface_table *table; > struct rpc_pipe_client *result; >- NTSTATUS status; >- struct rpc_pipe_client_np_ref *np_ref; >+}; >+ >+static void rpc_pipe_open_np_done(struct tevent_req *subreq); > >- /* sanity check to protect against crashes */ >+struct tevent_req *rpc_pipe_open_np_send( >+ TALLOC_CTX *mem_ctx, >+ struct tevent_context *ev, >+ struct cli_state *cli, >+ const struct ndr_interface_table *table) >+{ >+ struct tevent_req *req = NULL, *subreq = NULL; >+ struct rpc_pipe_open_np_state *state = NULL; >+ struct rpc_pipe_client *result = NULL; > >- if ( !cli ) { >- return NT_STATUS_INVALID_HANDLE; >+ req = tevent_req_create( >+ mem_ctx, &state, struct rpc_pipe_open_np_state); >+ if (req == NULL) { >+ return NULL; > } >+ state->cli = cli; >+ state->table = table; > >- result = talloc_zero(NULL, struct rpc_pipe_client); >- if (result == NULL) { >- return NT_STATUS_NO_MEMORY; >+ state->result = talloc_zero(state, struct rpc_pipe_client); >+ if (tevent_req_nomem(state->result, req)) { >+ return tevent_req_post(req, ev); > } >+ result = state->result; > > result->abstract_syntax = table->syntax_id; > result->transfer_syntax = ndr_transfer_syntax_ndr; > > result->desthost = talloc_strdup( > result, smbXcli_conn_remote_name(cli->conn)); >- if (result->desthost == NULL) { >- TALLOC_FREE(result); >- return NT_STATUS_NO_MEMORY; >+ if (tevent_req_nomem(result->desthost, req)) { >+ return tevent_req_post(req, ev); > } > > result->srv_name_slash = talloc_asprintf_strupper_m( > result, "\\\\%s", result->desthost); >- if (result->srv_name_slash == NULL) { >- TALLOC_FREE(result); >- return NT_STATUS_NO_MEMORY; >+ if (tevent_req_nomem(result->srv_name_slash, req)) { >+ return tevent_req_post(req, ev); > } > > result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN; > >- status = rpc_transport_np_init(result, cli, table, >- &result->transport); >- if (!NT_STATUS_IS_OK(status)) { >- TALLOC_FREE(result); >- return status; >+ subreq = rpc_transport_np_init_send(state, ev, cli, table); >+ if (tevent_req_nomem(subreq, req)) { >+ return tevent_req_post(req, ev); >+ } >+ tevent_req_set_callback(subreq, rpc_pipe_open_np_done, req); >+ return req; >+} >+ >+static void rpc_pipe_open_np_done(struct tevent_req *subreq) >+{ >+ struct tevent_req *req = tevent_req_callback_data( >+ subreq, struct tevent_req); >+ struct rpc_pipe_open_np_state *state = tevent_req_data( >+ req, struct rpc_pipe_open_np_state); >+ struct rpc_pipe_client *result = state->result; >+ struct rpc_pipe_client_np_ref *np_ref = NULL; >+ NTSTATUS status; >+ >+ status = rpc_transport_np_init_recv( >+ subreq, result, &result->transport); >+ TALLOC_FREE(subreq); >+ if (tevent_req_nterror(req, status)) { >+ return; > } > > result->transport->transport = NCACN_NP; > > np_ref = talloc(result->transport, struct rpc_pipe_client_np_ref); >- if (np_ref == NULL) { >- TALLOC_FREE(result); >- return NT_STATUS_NO_MEMORY; >+ if (tevent_req_nomem(np_ref, req)) { >+ return; > } >- np_ref->cli = cli; >+ np_ref->cli = state->cli; > np_ref->pipe = result; > > DLIST_ADD(np_ref->cli->pipe_list, np_ref->pipe); > talloc_set_destructor(np_ref, rpc_pipe_client_np_ref_destructor); > >- result->binding_handle = rpccli_bh_create(result, NULL, table); >- if (result->binding_handle == NULL) { >- TALLOC_FREE(result); >- return NT_STATUS_NO_MEMORY; >+ result->binding_handle = rpccli_bh_create(result, NULL, state->table); >+ if (tevent_req_nomem(result->binding_handle, req)) { >+ return; > } > >- *presult = result; >+ tevent_req_done(req); >+} >+ >+NTSTATUS rpc_pipe_open_np_recv( >+ struct tevent_req *req, >+ TALLOC_CTX *mem_ctx, >+ struct rpc_pipe_client **_result) >+{ >+ struct rpc_pipe_open_np_state *state = tevent_req_data( >+ req, struct rpc_pipe_open_np_state); >+ NTSTATUS status; >+ >+ if (tevent_req_is_nterror(req, &status)) { >+ return status; >+ } >+ *_result = talloc_move(mem_ctx, &state->result); > return NT_STATUS_OK; > } > >+NTSTATUS rpc_pipe_open_np(struct cli_state *cli, >+ const struct ndr_interface_table *table, >+ struct rpc_pipe_client **presult) >+{ >+ struct tevent_context *ev = NULL; >+ struct tevent_req *req = NULL; >+ NTSTATUS status = NT_STATUS_NO_MEMORY; >+ >+ ev = samba_tevent_context_init(cli); >+ if (ev == NULL) { >+ goto fail; >+ } >+ req = rpc_pipe_open_np_send(ev, ev, cli, table); >+ if (req == NULL) { >+ goto fail; >+ } >+ if (!tevent_req_poll_ntstatus(req, ev, &status)) { >+ goto fail; >+ } >+ status = rpc_pipe_open_np_recv(req, NULL, presult); >+fail: >+ TALLOC_FREE(req); >+ TALLOC_FREE(ev); >+ return status; >+} >+ > /**************************************************************************** > Open a pipe to a remote server. > ****************************************************************************/ >diff --git a/source3/rpc_client/cli_pipe.h b/source3/rpc_client/cli_pipe.h >index d7b175456ed..d9826ca8e5c 100644 >--- a/source3/rpc_client/cli_pipe.h >+++ b/source3/rpc_client/cli_pipe.h >@@ -38,6 +38,19 @@ NTSTATUS rpc_pipe_bind_recv(struct tevent_req *req); > NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli, > struct pipe_auth_data *auth); > >+struct tevent_req *rpc_pipe_open_np_send( >+ TALLOC_CTX *mem_ctx, >+ struct tevent_context *ev, >+ struct cli_state *cli, >+ const struct ndr_interface_table *table); >+NTSTATUS rpc_pipe_open_np_recv( >+ struct tevent_req *req, >+ TALLOC_CTX *mem_ctx, >+ struct rpc_pipe_client **_result); >+NTSTATUS rpc_pipe_open_np(struct cli_state *cli, >+ const struct ndr_interface_table *table, >+ struct rpc_pipe_client **presult); >+ > unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli, > unsigned int timeout); > >-- >2.34.1 > > >From 307863007d75cfab35b925c3d30f7bce76119efd Mon Sep 17 00:00:00 2001 >From: Volker Lendecke <vl@samba.org> >Date: Mon, 6 Mar 2023 13:57:20 +0100 >Subject: [PATCH 2/6] librpc: Remove unused sync rpc_transport_np_init() > >Signed-off-by: Volker Lendecke <vl@samba.org> >Reviewed-by: Jeremy Allison <jra@samba.org> >(cherry picked from commit f3ce9970002459a069344ca5519fb91feacb4a6e) >--- > source3/rpc_client/rpc_transport.h | 3 --- > source3/rpc_client/rpc_transport_np.c | 31 --------------------------- > 2 files changed, 34 deletions(-) > >diff --git a/source3/rpc_client/rpc_transport.h b/source3/rpc_client/rpc_transport.h >index 1c774583bdc..dccfa36cdc5 100644 >--- a/source3/rpc_client/rpc_transport.h >+++ b/source3/rpc_client/rpc_transport.h >@@ -88,9 +88,6 @@ struct tevent_req *rpc_transport_np_init_send(TALLOC_CTX *mem_ctx, > NTSTATUS rpc_transport_np_init_recv(struct tevent_req *req, > TALLOC_CTX *mem_ctx, > struct rpc_cli_transport **presult); >-NTSTATUS rpc_transport_np_init(TALLOC_CTX *mem_ctx, struct cli_state *cli, >- const struct ndr_interface_table *table, >- struct rpc_cli_transport **presult); > > /* The following definitions come from rpc_client/rpc_transport_sock.c */ > >diff --git a/source3/rpc_client/rpc_transport_np.c b/source3/rpc_client/rpc_transport_np.c >index 27e38235ca0..fbe9a35aa3d 100644 >--- a/source3/rpc_client/rpc_transport_np.c >+++ b/source3/rpc_client/rpc_transport_np.c >@@ -177,34 +177,3 @@ NTSTATUS rpc_transport_np_init_recv(struct tevent_req *req, > *presult = talloc_move(mem_ctx, &state->transport); > return NT_STATUS_OK; > } >- >-NTSTATUS rpc_transport_np_init(TALLOC_CTX *mem_ctx, struct cli_state *cli, >- const struct ndr_interface_table *table, >- struct rpc_cli_transport **presult) >-{ >- TALLOC_CTX *frame = talloc_stackframe(); >- struct tevent_context *ev; >- struct tevent_req *req; >- NTSTATUS status = NT_STATUS_OK; >- >- ev = samba_tevent_context_init(frame); >- if (ev == NULL) { >- status = NT_STATUS_NO_MEMORY; >- goto fail; >- } >- >- req = rpc_transport_np_init_send(frame, ev, cli, table); >- if (req == NULL) { >- status = NT_STATUS_NO_MEMORY; >- goto fail; >- } >- >- if (!tevent_req_poll_ntstatus(req, ev, &status)) { >- goto fail; >- } >- >- status = rpc_transport_np_init_recv(req, mem_ctx, presult); >- fail: >- TALLOC_FREE(frame); >- return status; >-} >-- >2.34.1 > > >From 9331ce89a64aeaa538a9dde671d00f7f2d6912f8 Mon Sep 17 00:00:00 2001 >From: Volker Lendecke <vl@samba.org> >Date: Mon, 6 Mar 2023 10:05:41 +0100 >Subject: [PATCH 3/6] torture3: test rpc scalability > >With > >smbtorture3 //127.0.0.1/ipc\$ rpc-scale -N 50 -o 1000 > >I am able to immediately trigger bug 15130. > >Not running by default, this is a pure load test. > >BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 > >Signed-off-by: Volker Lendecke <vl@samba.org> >Reviewed-by: Jeremy Allison <jra@samba.org> >(cherry picked from commit 86e95b57d6848482dc73c624c2e8d2bdb79c1d21) >--- > source3/torture/proto.h | 1 + > source3/torture/test_rpc_scale.c | 301 +++++++++++++++++++++++++++++++ > source3/torture/torture.c | 4 + > source3/torture/wscript_build | 1 + > 4 files changed, 307 insertions(+) > create mode 100644 source3/torture/test_rpc_scale.c > >diff --git a/source3/torture/proto.h b/source3/torture/proto.h >index df98a7445d7..5e6d914c3da 100644 >--- a/source3/torture/proto.h >+++ b/source3/torture/proto.h >@@ -175,5 +175,6 @@ bool run_hidenewfiles(int dummy); > bool run_hidenewfiles_showdirs(int dummy); > bool run_readdir_timestamp(int dummy); > bool run_ctdbd_conn1(int dummy); >+bool run_rpc_scale(int dummy); > > #endif /* __TORTURE_H__ */ >diff --git a/source3/torture/test_rpc_scale.c b/source3/torture/test_rpc_scale.c >new file mode 100644 >index 00000000000..6ef26f37a99 >--- /dev/null >+++ b/source3/torture/test_rpc_scale.c >@@ -0,0 +1,301 @@ >+/* >+ * Unix SMB/CIFS implementation. >+ * >+ * This program is free software; you can redistribute it and/or modify >+ * it under the terms of the GNU General Public License as published by >+ * the Free Software Foundation; either version 3 of the License, or >+ * (at your option) any later version. >+ * >+ * This program is distributed in the hope that it will be useful, >+ * but WITHOUT ANY WARRANTY; without even the implied warranty of >+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the >+ * GNU General Public License for more details. >+ * >+ * You should have received a copy of the GNU General Public License >+ * along with this program. If not, see <http://www.gnu.org/licenses/>. >+ */ >+ >+#include "source3/include/includes.h" >+#include "source3/torture/proto.h" >+#include "source3/libsmb/libsmb.h" >+#include "librpc/gen_ndr/ndr_spoolss_c.h" >+#include "lib/util/tevent_ntstatus.h" >+#include "source3/rpc_client/rpc_client.h" >+#include "source3/rpc_client/cli_pipe.h" >+#include "libcli/smb/smbXcli_base.h" >+ >+extern int torture_nprocs; >+extern int torture_numops; >+ >+struct rpc_scale_one_state { >+ struct tevent_context *ev; >+ struct cli_state *cli; >+ size_t num_iterations; >+ struct rpc_pipe_client *rpccli; >+ DATA_BLOB buffer; >+ uint32_t needed; >+ uint32_t num_printers; >+ union spoolss_PrinterInfo *printers; >+}; >+ >+static void rpc_scale_one_opened(struct tevent_req *subreq); >+static void rpc_scale_one_bound(struct tevent_req *subreq); >+static void rpc_scale_one_listed(struct tevent_req *subreq); >+ >+static struct tevent_req *rpc_scale_one_send( >+ TALLOC_CTX *mem_ctx, >+ struct tevent_context *ev, >+ struct cli_state *cli, >+ size_t num_iterations) >+{ >+ struct tevent_req *req = NULL, *subreq = NULL; >+ struct rpc_scale_one_state *state = NULL; >+ >+ req = tevent_req_create(mem_ctx, &state, struct rpc_scale_one_state); >+ if (req == NULL) { >+ return NULL; >+ } >+ state->ev = ev; >+ state->cli = cli; >+ state->num_iterations = num_iterations; >+ >+ subreq = rpc_pipe_open_np_send( >+ state, ev, cli, &ndr_table_spoolss); >+ if (tevent_req_nomem(subreq, req)) { >+ return tevent_req_post(req, ev); >+ } >+ tevent_req_set_callback(subreq, rpc_scale_one_opened, req); >+ return req; >+} >+ >+static void rpc_scale_one_opened(struct tevent_req *subreq) >+{ >+ struct tevent_req *req = tevent_req_callback_data( >+ subreq, struct tevent_req); >+ struct rpc_scale_one_state *state = tevent_req_data( >+ req, struct rpc_scale_one_state); >+ struct pipe_auth_data *auth = NULL; >+ NTSTATUS status; >+ >+ status = rpc_pipe_open_np_recv(subreq, state, &state->rpccli); >+ TALLOC_FREE(subreq); >+ if (tevent_req_nterror(req, status)) { >+ return; >+ } >+ >+ status = rpccli_anon_bind_data(state, &auth); >+ if (tevent_req_nterror(req, status)) { >+ return; >+ } >+ >+ subreq = rpc_pipe_bind_send(state, state->ev, state->rpccli, auth); >+ if (tevent_req_nomem(subreq, req)) { >+ return; >+ } >+ tevent_req_set_callback(subreq, rpc_scale_one_bound, req); >+} >+ >+static void rpc_scale_one_bound(struct tevent_req *subreq) >+{ >+ struct tevent_req *req = tevent_req_callback_data( >+ subreq, struct tevent_req); >+ struct rpc_scale_one_state *state = tevent_req_data( >+ req, struct rpc_scale_one_state); >+ char *server = NULL; >+ NTSTATUS status; >+ >+ status = rpc_pipe_bind_recv(subreq); >+ if (tevent_req_nterror(req, status)) { >+ return; >+ } >+ >+ server = talloc_asprintf( >+ state, >+ "\\%s\n", >+ smbXcli_conn_remote_name(state->cli->conn)); >+ if (tevent_req_nomem(server, req)) { >+ return; >+ } >+ state->buffer = data_blob_talloc(state, NULL, 4096); >+ if (tevent_req_nomem(state->buffer.data, req)) { >+ return; >+ } >+ >+ subreq = dcerpc_spoolss_EnumPrinters_send( >+ state, >+ state->ev, >+ state->rpccli->binding_handle, >+ PRINTER_ENUM_LOCAL, >+ server, >+ 1, /* level */ >+ &state->buffer, >+ state->buffer.length, >+ &state->num_printers, >+ &state->printers, >+ &state->needed); >+ if (tevent_req_nomem(subreq, req)) { >+ return; >+ } >+ tevent_req_set_callback(subreq, rpc_scale_one_listed, req); >+} >+ >+static void rpc_scale_one_listed(struct tevent_req *subreq) >+{ >+ struct tevent_req *req = tevent_req_callback_data( >+ subreq, struct tevent_req); >+ struct rpc_scale_one_state *state = tevent_req_data( >+ req, struct rpc_scale_one_state); >+ NTSTATUS status; >+ WERROR result; >+ >+ status = dcerpc_spoolss_EnumPrinters_recv(subreq, state, &result); >+ if (tevent_req_nterror(req, status)) { >+ return; >+ } >+ >+ if (!W_ERROR_IS_OK(result)) { >+ status = werror_to_ntstatus(result); >+ tevent_req_nterror(req, status); >+ return; >+ } >+ >+ /* >+ * This will trigger a sync close. Making that async will be a >+ * lot of effort, and even with this being sync this test is >+ * nasty enough. >+ */ >+ TALLOC_FREE(state->rpccli); >+ >+ state->num_iterations -= 1; >+ >+ if (state->num_iterations == 0) { >+ tevent_req_done(req); >+ return; >+ } >+ >+ subreq = rpc_pipe_open_np_send( >+ state, state->ev, state->cli, &ndr_table_spoolss); >+ if (tevent_req_nomem(subreq, req)) { >+ return; >+ } >+ tevent_req_set_callback(subreq, rpc_scale_one_opened, req); >+} >+ >+static NTSTATUS rpc_scale_one_recv(struct tevent_req *req) >+{ >+ return tevent_req_simple_recv_ntstatus(req); >+} >+ >+struct rpc_scale_state { >+ size_t num_reqs; >+ size_t done; >+}; >+ >+static void rpc_scale_done(struct tevent_req *subreq); >+ >+static struct tevent_req *rpc_scale_send( >+ TALLOC_CTX *mem_ctx, >+ struct tevent_context *ev, >+ struct cli_state **clis) >+{ >+ struct tevent_req *req = NULL; >+ struct rpc_scale_state *state = NULL; >+ size_t i, num_clis = talloc_array_length(clis); >+ >+ req = tevent_req_create(mem_ctx, &state, struct rpc_scale_state); >+ if (req == NULL) { >+ return NULL; >+ } >+ state->num_reqs = num_clis; >+ >+ for (i=0; i<num_clis; i++) { >+ struct tevent_req *subreq = rpc_scale_one_send( >+ state, ev, clis[i], torture_numops); >+ if (tevent_req_nomem(subreq, req)) { >+ return tevent_req_post(req, ev); >+ } >+ tevent_req_set_callback(subreq, rpc_scale_done, req); >+ } >+ return req; >+} >+ >+static void rpc_scale_done(struct tevent_req *subreq) >+{ >+ struct tevent_req *req = tevent_req_callback_data( >+ subreq, struct tevent_req); >+ struct rpc_scale_state *state = tevent_req_data( >+ req, struct rpc_scale_state); >+ NTSTATUS status; >+ >+ status = rpc_scale_one_recv(subreq); >+ TALLOC_FREE(subreq); >+ if (tevent_req_nterror(req, status)) { >+ return; >+ } >+ >+ state->done += 1; >+ >+ if (state->done == state->num_reqs) { >+ tevent_req_done(req); >+ } >+} >+ >+static NTSTATUS rpc_scale_recv(struct tevent_req *req) >+{ >+ return tevent_req_simple_recv_ntstatus(req); >+} >+ >+bool run_rpc_scale(int dummy) >+{ >+ TALLOC_CTX *frame = talloc_stackframe(); >+ struct cli_state **clis = NULL; >+ struct tevent_req *req = NULL; >+ struct tevent_context *ev = NULL; >+ bool ok, result = false; >+ NTSTATUS status; >+ int i; >+ >+ clis = talloc_zero_array( >+ talloc_tos(), struct cli_state *, torture_nprocs); >+ if (clis == NULL) { >+ fprintf(stderr, "talloc failed\n"); >+ goto fail; >+ } >+ >+ for (i=0; i<torture_nprocs; i++) { >+ ok = torture_open_connection_flags(&clis[i], i, 0); >+ if (!ok) { >+ fprintf(stderr, "could not open connection %d\n", i); >+ goto fail; >+ } >+ } >+ >+ ev = samba_tevent_context_init(talloc_tos()); >+ if (ev == NULL) { >+ goto fail; >+ } >+ >+ req = rpc_scale_send(talloc_tos(), ev, clis); >+ if (req == NULL) { >+ goto fail; >+ } >+ >+ ok = tevent_req_poll_ntstatus(req, ev, &status); >+ if (!ok) { >+ fprintf(stderr, >+ "rpc_scale_send failed: %s\n", >+ nt_errstr(status)); >+ goto fail; >+ } >+ >+ status = rpc_scale_recv(req); >+ if (!NT_STATUS_IS_OK(status)) { >+ fprintf(stderr, "rpc_scale failed: %s\n", nt_errstr(status)); >+ goto fail; >+ } >+ >+ result = true; >+fail: >+ TALLOC_FREE(frame); >+ return result; >+} >diff --git a/source3/torture/torture.c b/source3/torture/torture.c >index acf245f3cb5..c63db3f9385 100644 >--- a/source3/torture/torture.c >+++ b/source3/torture/torture.c >@@ -15631,6 +15631,10 @@ static struct { > .name = "readdir-timestamp", > .fn = run_readdir_timestamp, > }, >+ { >+ .name = "rpc-scale", >+ .fn = run_rpc_scale, >+ }, > { > .name = NULL, > }, >diff --git a/source3/torture/wscript_build b/source3/torture/wscript_build >index 18106aaf59a..1df5d88cd31 100644 >--- a/source3/torture/wscript_build >+++ b/source3/torture/wscript_build >@@ -58,6 +58,7 @@ bld.SAMBA3_BINARY('smbtorture' + bld.env.suffix3, > test_idmap_cache.c > test_hidenewfiles.c > test_readdir_timestamp.c >+ test_rpc_scale.c > ''' + TORTURE3_ADDITIONAL_SOURCE, > deps=''' > talloc >-- >2.34.1 > > >From 559c223942155331b5ff67368ec4ffcb45dbd7f7 Mon Sep 17 00:00:00 2001 >From: Volker Lendecke <vl@samba.org> >Date: Wed, 1 Mar 2023 14:40:37 +0100 >Subject: [PATCH 4/6] rpcd: Increase listening queue > >Allow more waiters under load. > >BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 > >Signed-off-by: Volker Lendecke <vl@samba.org> >Reviewed-by: Jeremy Allison <jra@samba.org> >(cherry picked from commit f23eb1b3b728d7a13add8c6614d9992aad2c3653) >--- > source3/rpc_server/rpc_host.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > >diff --git a/source3/rpc_server/rpc_host.c b/source3/rpc_server/rpc_host.c >index f58c825e7cc..a5b3f1de819 100644 >--- a/source3/rpc_server/rpc_host.c >+++ b/source3/rpc_server/rpc_host.c >@@ -1689,7 +1689,7 @@ static void rpc_server_setup_got_endpoints(struct tevent_req *subreq) > } > > for (j=0; j<e->num_fds; j++) { >- ret = listen(e->fds[j], 5); >+ ret = listen(e->fds[j], 256); > if (ret == -1) { > tevent_req_nterror( > req, map_nt_error_from_unix(errno)); >-- >2.34.1 > > >From 29c795d0a78a290170c3028633083ea229a8f3cd Mon Sep 17 00:00:00 2001 >From: Volker Lendecke <vl@samba.org> >Date: Wed, 1 Mar 2023 14:42:00 +0100 >Subject: [PATCH 5/6] rpcd: Do blocking connects to local pipes > >We don't have real async callers yet, and this is the simplest way to >fix our missing light-weight deterministic async fallback mechanism. > >BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 > >Signed-off-by: Volker Lendecke <vl@samba.org> >Reviewed-by: Jeremy Allison <jra@samba.org> >(cherry picked from commit 0ab7b84ccbd383bb2f696ce258438d4af57fe6f0) >--- > source3/rpc_client/local_np.c | 14 +++++++++++++- > 1 file changed, 13 insertions(+), 1 deletion(-) > >diff --git a/source3/rpc_client/local_np.c b/source3/rpc_client/local_np.c >index 5b1a818c88d..f1d61a09ee3 100644 >--- a/source3/rpc_client/local_np.c >+++ b/source3/rpc_client/local_np.c >@@ -101,7 +101,7 @@ static struct tevent_req *np_sock_connect_send( > return tevent_req_post(req, ev); > } > >- ret = set_blocking(state->sock, false); >+ ret = set_blocking(state->sock, true); > if (ret == -1) { > tevent_req_error(req, errno); > return tevent_req_post(req, ev); >@@ -174,6 +174,18 @@ static void np_sock_connect_connected(struct tevent_req *subreq) > return; > } > >+ /* >+ * As a quick workaround for bug 15310 we have done the >+ * connect in blocking mode (see np_sock_connect_send()). The >+ * rest of our code expects a nonblocking socket, activate >+ * this after the connect succeeded. >+ */ >+ ret = set_blocking(state->sock, false); >+ if (ret == -1) { >+ tevent_req_error(req, errno); >+ return; >+ } >+ > ret = tstream_bsd_existing_socket( > state, state->sock, &state->transport); > if (ret == -1) { >-- >2.34.1 > > >From 8b8282a0ad43faeb0acd3257bcbd22be53a3f2d6 Mon Sep 17 00:00:00 2001 >From: Volker Lendecke <vl@samba.org> >Date: Mon, 20 Feb 2023 18:46:50 +0100 >Subject: [PATCH 6/6] rpcd: With npa->need_idle_server we can have more than > 256 servers > >Before this patch the worker-status cut the worker index such that >samba-dcerpcd could not properly update status of the surplus rpc >daemons. This could lead to those daemons to stay around forever, >samba-dcerpcd will never notice they are idle and can exit. > >BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 > >Signed-off-by: Volker Lendecke <vl@samba.org> >Reviewed-by: Jeremy Allison <jra@samba.org> > >Autobuild-User(master): Jeremy Allison <jra@samba.org> >Autobuild-Date(master): Mon Mar 6 22:35:00 UTC 2023 on atb-devel-224 > >(cherry picked from commit a1780ed8d1b46e4760319b27a4978e7ce7a1df80) >--- > source3/librpc/idl/rpc_host.idl | 2 +- > source3/rpc_server/rpc_worker.c | 2 +- > 2 files changed, 2 insertions(+), 2 deletions(-) > >diff --git a/source3/librpc/idl/rpc_host.idl b/source3/librpc/idl/rpc_host.idl >index 0fc3f9514aa..c8abb6c4379 100644 >--- a/source3/librpc/idl/rpc_host.idl >+++ b/source3/librpc/idl/rpc_host.idl >@@ -66,7 +66,7 @@ interface rpc_host_msg > /** > * @brief Which of the processes of a helper prog is this from > */ >- uint8 worker_index; >+ uint32 worker_index; > > /** > * @brief How many clients this process serves right now >diff --git a/source3/rpc_server/rpc_worker.c b/source3/rpc_server/rpc_worker.c >index 1bc84531e55..11f6a721a63 100644 >--- a/source3/rpc_server/rpc_worker.c >+++ b/source3/rpc_server/rpc_worker.c >@@ -93,7 +93,7 @@ static void rpc_worker_print_interface( > > static NTSTATUS rpc_worker_report_status(struct rpc_worker *worker) > { >- uint8_t buf[6]; >+ uint8_t buf[9]; > DATA_BLOB blob = { .data = buf, .length = sizeof(buf), }; > enum ndr_err_code ndr_err; > NTSTATUS status; >-- >2.34.1 >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Raw
Flags:
vl
:
review+
Actions:
View
Attachments on
bug 15310
:
17785
| 17794 |
17795