From 5c018211013a89d803a7ee51e369c1e9c18a2b12 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Mon, 6 Mar 2023 13:55:43 +0100 Subject: [PATCH 1/6] librpc: Make rpc_pipe_open_np() public and async Signed-off-by: Volker Lendecke Reviewed-by: Jeremy Allison (cherry picked from commit 07ebf97a74fb5c0d0504e76c50f3aca8257dab1f) --- source3/rpc_client/cli_pipe.c | 132 +++++++++++++++++++++++++--------- source3/rpc_client/cli_pipe.h | 13 ++++ 2 files changed, 113 insertions(+), 32 deletions(-) diff --git a/source3/rpc_client/cli_pipe.c b/source3/rpc_client/cli_pipe.c index 5e26dc1806d..2af68b169af 100644 --- a/source3/rpc_client/cli_pipe.c +++ b/source3/rpc_client/cli_pipe.c @@ -3172,74 +3172,142 @@ static int rpc_pipe_client_np_ref_destructor(struct rpc_pipe_client_np_ref *np_r * ****************************************************************************/ -static NTSTATUS rpc_pipe_open_np(struct cli_state *cli, - const struct ndr_interface_table *table, - struct rpc_pipe_client **presult) -{ +struct rpc_pipe_open_np_state { + struct cli_state *cli; + const struct ndr_interface_table *table; struct rpc_pipe_client *result; - NTSTATUS status; - struct rpc_pipe_client_np_ref *np_ref; +}; + +static void rpc_pipe_open_np_done(struct tevent_req *subreq); - /* sanity check to protect against crashes */ +struct tevent_req *rpc_pipe_open_np_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct cli_state *cli, + const struct ndr_interface_table *table) +{ + struct tevent_req *req = NULL, *subreq = NULL; + struct rpc_pipe_open_np_state *state = NULL; + struct rpc_pipe_client *result = NULL; - if ( !cli ) { - return NT_STATUS_INVALID_HANDLE; + req = tevent_req_create( + mem_ctx, &state, struct rpc_pipe_open_np_state); + if (req == NULL) { + return NULL; } + state->cli = cli; + state->table = table; - result = talloc_zero(NULL, struct rpc_pipe_client); - if (result == NULL) { - return NT_STATUS_NO_MEMORY; + state->result = talloc_zero(state, struct rpc_pipe_client); + if (tevent_req_nomem(state->result, req)) { + return tevent_req_post(req, ev); } + result = state->result; result->abstract_syntax = table->syntax_id; result->transfer_syntax = ndr_transfer_syntax_ndr; result->desthost = talloc_strdup( result, smbXcli_conn_remote_name(cli->conn)); - if (result->desthost == NULL) { - TALLOC_FREE(result); - return NT_STATUS_NO_MEMORY; + if (tevent_req_nomem(result->desthost, req)) { + return tevent_req_post(req, ev); } result->srv_name_slash = talloc_asprintf_strupper_m( result, "\\\\%s", result->desthost); - if (result->srv_name_slash == NULL) { - TALLOC_FREE(result); - return NT_STATUS_NO_MEMORY; + if (tevent_req_nomem(result->srv_name_slash, req)) { + return tevent_req_post(req, ev); } result->max_xmit_frag = RPC_MAX_PDU_FRAG_LEN; - status = rpc_transport_np_init(result, cli, table, - &result->transport); - if (!NT_STATUS_IS_OK(status)) { - TALLOC_FREE(result); - return status; + subreq = rpc_transport_np_init_send(state, ev, cli, table); + if (tevent_req_nomem(subreq, req)) { + return tevent_req_post(req, ev); + } + tevent_req_set_callback(subreq, rpc_pipe_open_np_done, req); + return req; +} + +static void rpc_pipe_open_np_done(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct rpc_pipe_open_np_state *state = tevent_req_data( + req, struct rpc_pipe_open_np_state); + struct rpc_pipe_client *result = state->result; + struct rpc_pipe_client_np_ref *np_ref = NULL; + NTSTATUS status; + + status = rpc_transport_np_init_recv( + subreq, result, &result->transport); + TALLOC_FREE(subreq); + if (tevent_req_nterror(req, status)) { + return; } result->transport->transport = NCACN_NP; np_ref = talloc(result->transport, struct rpc_pipe_client_np_ref); - if (np_ref == NULL) { - TALLOC_FREE(result); - return NT_STATUS_NO_MEMORY; + if (tevent_req_nomem(np_ref, req)) { + return; } - np_ref->cli = cli; + np_ref->cli = state->cli; np_ref->pipe = result; DLIST_ADD(np_ref->cli->pipe_list, np_ref->pipe); talloc_set_destructor(np_ref, rpc_pipe_client_np_ref_destructor); - result->binding_handle = rpccli_bh_create(result, NULL, table); - if (result->binding_handle == NULL) { - TALLOC_FREE(result); - return NT_STATUS_NO_MEMORY; + result->binding_handle = rpccli_bh_create(result, NULL, state->table); + if (tevent_req_nomem(result->binding_handle, req)) { + return; } - *presult = result; + tevent_req_done(req); +} + +NTSTATUS rpc_pipe_open_np_recv( + struct tevent_req *req, + TALLOC_CTX *mem_ctx, + struct rpc_pipe_client **_result) +{ + struct rpc_pipe_open_np_state *state = tevent_req_data( + req, struct rpc_pipe_open_np_state); + NTSTATUS status; + + if (tevent_req_is_nterror(req, &status)) { + return status; + } + *_result = talloc_move(mem_ctx, &state->result); return NT_STATUS_OK; } +NTSTATUS rpc_pipe_open_np(struct cli_state *cli, + const struct ndr_interface_table *table, + struct rpc_pipe_client **presult) +{ + struct tevent_context *ev = NULL; + struct tevent_req *req = NULL; + NTSTATUS status = NT_STATUS_NO_MEMORY; + + ev = samba_tevent_context_init(cli); + if (ev == NULL) { + goto fail; + } + req = rpc_pipe_open_np_send(ev, ev, cli, table); + if (req == NULL) { + goto fail; + } + if (!tevent_req_poll_ntstatus(req, ev, &status)) { + goto fail; + } + status = rpc_pipe_open_np_recv(req, NULL, presult); +fail: + TALLOC_FREE(req); + TALLOC_FREE(ev); + return status; +} + /**************************************************************************** Open a pipe to a remote server. ****************************************************************************/ diff --git a/source3/rpc_client/cli_pipe.h b/source3/rpc_client/cli_pipe.h index d7b175456ed..d9826ca8e5c 100644 --- a/source3/rpc_client/cli_pipe.h +++ b/source3/rpc_client/cli_pipe.h @@ -38,6 +38,19 @@ NTSTATUS rpc_pipe_bind_recv(struct tevent_req *req); NTSTATUS rpc_pipe_bind(struct rpc_pipe_client *cli, struct pipe_auth_data *auth); +struct tevent_req *rpc_pipe_open_np_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct cli_state *cli, + const struct ndr_interface_table *table); +NTSTATUS rpc_pipe_open_np_recv( + struct tevent_req *req, + TALLOC_CTX *mem_ctx, + struct rpc_pipe_client **_result); +NTSTATUS rpc_pipe_open_np(struct cli_state *cli, + const struct ndr_interface_table *table, + struct rpc_pipe_client **presult); + unsigned int rpccli_set_timeout(struct rpc_pipe_client *cli, unsigned int timeout); -- 2.34.1 From 87d9c6771f1c863417df75b724dc3ef8cca0e6fd Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Mon, 6 Mar 2023 13:57:20 +0100 Subject: [PATCH 2/6] librpc: Remove unused sync rpc_transport_np_init() Signed-off-by: Volker Lendecke Reviewed-by: Jeremy Allison (cherry picked from commit f3ce9970002459a069344ca5519fb91feacb4a6e) --- source3/rpc_client/rpc_transport.h | 3 --- source3/rpc_client/rpc_transport_np.c | 31 --------------------------- 2 files changed, 34 deletions(-) diff --git a/source3/rpc_client/rpc_transport.h b/source3/rpc_client/rpc_transport.h index 1c774583bdc..dccfa36cdc5 100644 --- a/source3/rpc_client/rpc_transport.h +++ b/source3/rpc_client/rpc_transport.h @@ -88,9 +88,6 @@ struct tevent_req *rpc_transport_np_init_send(TALLOC_CTX *mem_ctx, NTSTATUS rpc_transport_np_init_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx, struct rpc_cli_transport **presult); -NTSTATUS rpc_transport_np_init(TALLOC_CTX *mem_ctx, struct cli_state *cli, - const struct ndr_interface_table *table, - struct rpc_cli_transport **presult); /* The following definitions come from rpc_client/rpc_transport_sock.c */ diff --git a/source3/rpc_client/rpc_transport_np.c b/source3/rpc_client/rpc_transport_np.c index 27e38235ca0..fbe9a35aa3d 100644 --- a/source3/rpc_client/rpc_transport_np.c +++ b/source3/rpc_client/rpc_transport_np.c @@ -177,34 +177,3 @@ NTSTATUS rpc_transport_np_init_recv(struct tevent_req *req, *presult = talloc_move(mem_ctx, &state->transport); return NT_STATUS_OK; } - -NTSTATUS rpc_transport_np_init(TALLOC_CTX *mem_ctx, struct cli_state *cli, - const struct ndr_interface_table *table, - struct rpc_cli_transport **presult) -{ - TALLOC_CTX *frame = talloc_stackframe(); - struct tevent_context *ev; - struct tevent_req *req; - NTSTATUS status = NT_STATUS_OK; - - ev = samba_tevent_context_init(frame); - if (ev == NULL) { - status = NT_STATUS_NO_MEMORY; - goto fail; - } - - req = rpc_transport_np_init_send(frame, ev, cli, table); - if (req == NULL) { - status = NT_STATUS_NO_MEMORY; - goto fail; - } - - if (!tevent_req_poll_ntstatus(req, ev, &status)) { - goto fail; - } - - status = rpc_transport_np_init_recv(req, mem_ctx, presult); - fail: - TALLOC_FREE(frame); - return status; -} -- 2.34.1 From f8335c48bf78858ae21c3c188445aa4edc722d15 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Mon, 6 Mar 2023 10:05:41 +0100 Subject: [PATCH 3/6] torture3: test rpc scalability With smbtorture3 //127.0.0.1/ipc\$ rpc-scale -N 50 -o 1000 I am able to immediately trigger bug 15130. Not running by default, this is a pure load test. BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 Signed-off-by: Volker Lendecke Reviewed-by: Jeremy Allison (back-ported from commit 86e95b57d6848482dc73c624c2e8d2bdb79c1d21) --- source3/torture/proto.h | 1 + source3/torture/test_rpc_scale.c | 301 +++++++++++++++++++++++++++++++ source3/torture/torture.c | 4 + source3/torture/wscript_build | 1 + 4 files changed, 307 insertions(+) create mode 100644 source3/torture/test_rpc_scale.c diff --git a/source3/torture/proto.h b/source3/torture/proto.h index 551c4ea80ac..4fa2fbd12a1 100644 --- a/source3/torture/proto.h +++ b/source3/torture/proto.h @@ -167,5 +167,6 @@ bool run_local_idmap_cache1(int dummy); bool run_hidenewfiles(int dummy); bool run_readdir_timestamp(int dummy); bool run_ctdbd_conn1(int dummy); +bool run_rpc_scale(int dummy); #endif /* __TORTURE_H__ */ diff --git a/source3/torture/test_rpc_scale.c b/source3/torture/test_rpc_scale.c new file mode 100644 index 00000000000..6ef26f37a99 --- /dev/null +++ b/source3/torture/test_rpc_scale.c @@ -0,0 +1,301 @@ +/* + * Unix SMB/CIFS implementation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "source3/include/includes.h" +#include "source3/torture/proto.h" +#include "source3/libsmb/libsmb.h" +#include "librpc/gen_ndr/ndr_spoolss_c.h" +#include "lib/util/tevent_ntstatus.h" +#include "source3/rpc_client/rpc_client.h" +#include "source3/rpc_client/cli_pipe.h" +#include "libcli/smb/smbXcli_base.h" + +extern int torture_nprocs; +extern int torture_numops; + +struct rpc_scale_one_state { + struct tevent_context *ev; + struct cli_state *cli; + size_t num_iterations; + struct rpc_pipe_client *rpccli; + DATA_BLOB buffer; + uint32_t needed; + uint32_t num_printers; + union spoolss_PrinterInfo *printers; +}; + +static void rpc_scale_one_opened(struct tevent_req *subreq); +static void rpc_scale_one_bound(struct tevent_req *subreq); +static void rpc_scale_one_listed(struct tevent_req *subreq); + +static struct tevent_req *rpc_scale_one_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct cli_state *cli, + size_t num_iterations) +{ + struct tevent_req *req = NULL, *subreq = NULL; + struct rpc_scale_one_state *state = NULL; + + req = tevent_req_create(mem_ctx, &state, struct rpc_scale_one_state); + if (req == NULL) { + return NULL; + } + state->ev = ev; + state->cli = cli; + state->num_iterations = num_iterations; + + subreq = rpc_pipe_open_np_send( + state, ev, cli, &ndr_table_spoolss); + if (tevent_req_nomem(subreq, req)) { + return tevent_req_post(req, ev); + } + tevent_req_set_callback(subreq, rpc_scale_one_opened, req); + return req; +} + +static void rpc_scale_one_opened(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct rpc_scale_one_state *state = tevent_req_data( + req, struct rpc_scale_one_state); + struct pipe_auth_data *auth = NULL; + NTSTATUS status; + + status = rpc_pipe_open_np_recv(subreq, state, &state->rpccli); + TALLOC_FREE(subreq); + if (tevent_req_nterror(req, status)) { + return; + } + + status = rpccli_anon_bind_data(state, &auth); + if (tevent_req_nterror(req, status)) { + return; + } + + subreq = rpc_pipe_bind_send(state, state->ev, state->rpccli, auth); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, rpc_scale_one_bound, req); +} + +static void rpc_scale_one_bound(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct rpc_scale_one_state *state = tevent_req_data( + req, struct rpc_scale_one_state); + char *server = NULL; + NTSTATUS status; + + status = rpc_pipe_bind_recv(subreq); + if (tevent_req_nterror(req, status)) { + return; + } + + server = talloc_asprintf( + state, + "\\%s\n", + smbXcli_conn_remote_name(state->cli->conn)); + if (tevent_req_nomem(server, req)) { + return; + } + state->buffer = data_blob_talloc(state, NULL, 4096); + if (tevent_req_nomem(state->buffer.data, req)) { + return; + } + + subreq = dcerpc_spoolss_EnumPrinters_send( + state, + state->ev, + state->rpccli->binding_handle, + PRINTER_ENUM_LOCAL, + server, + 1, /* level */ + &state->buffer, + state->buffer.length, + &state->num_printers, + &state->printers, + &state->needed); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, rpc_scale_one_listed, req); +} + +static void rpc_scale_one_listed(struct tevent_req *subreq) +{ + struct tevent_req *req = tevent_req_callback_data( + subreq, struct tevent_req); + struct rpc_scale_one_state *state = tevent_req_data( + req, struct rpc_scale_one_state); + NTSTATUS status; + WERROR result; + + status = dcerpc_spoolss_EnumPrinters_recv(subreq, state, &result); + if (tevent_req_nterror(req, status)) { + return; + } + + if (!W_ERROR_IS_OK(result)) { + status = werror_to_ntstatus(result); + tevent_req_nterror(req, status); + return; + } + + /* + * This will trigger a sync close. Making that async will be a + * lot of effort, and even with this being sync this test is + * nasty enough. + */ + TALLOC_FREE(state->rpccli); + + state->num_iterations -= 1; + + if (state->num_iterations == 0) { + tevent_req_done(req); + return; + } + + subreq = rpc_pipe_open_np_send( + state, state->ev, state->cli, &ndr_table_spoolss); + if (tevent_req_nomem(subreq, req)) { + return; + } + tevent_req_set_callback(subreq, rpc_scale_one_opened, req); +} + +static NTSTATUS rpc_scale_one_recv(struct tevent_req *req) +{ + return tevent_req_simple_recv_ntstatus(req); +} + +struct rpc_scale_state { + size_t num_reqs; + size_t done; +}; + +static void rpc_scale_done(struct tevent_req *subreq); + +static struct tevent_req *rpc_scale_send( + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct cli_state **clis) +{ + struct tevent_req *req = NULL; + struct rpc_scale_state *state = NULL; + size_t i, num_clis = talloc_array_length(clis); + + req = tevent_req_create(mem_ctx, &state, struct rpc_scale_state); + if (req == NULL) { + return NULL; + } + state->num_reqs = num_clis; + + for (i=0; idone += 1; + + if (state->done == state->num_reqs) { + tevent_req_done(req); + } +} + +static NTSTATUS rpc_scale_recv(struct tevent_req *req) +{ + return tevent_req_simple_recv_ntstatus(req); +} + +bool run_rpc_scale(int dummy) +{ + TALLOC_CTX *frame = talloc_stackframe(); + struct cli_state **clis = NULL; + struct tevent_req *req = NULL; + struct tevent_context *ev = NULL; + bool ok, result = false; + NTSTATUS status; + int i; + + clis = talloc_zero_array( + talloc_tos(), struct cli_state *, torture_nprocs); + if (clis == NULL) { + fprintf(stderr, "talloc failed\n"); + goto fail; + } + + for (i=0; i Date: Wed, 1 Mar 2023 14:40:37 +0100 Subject: [PATCH 4/6] rpcd: Increase listening queue Allow more waiters under load. BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 Signed-off-by: Volker Lendecke Reviewed-by: Jeremy Allison (cherry picked from commit f23eb1b3b728d7a13add8c6614d9992aad2c3653) --- source3/rpc_server/rpc_host.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source3/rpc_server/rpc_host.c b/source3/rpc_server/rpc_host.c index 7ca24240a92..5bc927763bc 100644 --- a/source3/rpc_server/rpc_host.c +++ b/source3/rpc_server/rpc_host.c @@ -1689,7 +1689,7 @@ static void rpc_server_setup_got_endpoints(struct tevent_req *subreq) } for (j=0; jnum_fds; j++) { - ret = listen(e->fds[j], 5); + ret = listen(e->fds[j], 256); if (ret == -1) { tevent_req_nterror( req, map_nt_error_from_unix(errno)); -- 2.34.1 From 576c5040d4ba6b9c26ca3aace4f71f8f2e026d5b Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Wed, 1 Mar 2023 14:42:00 +0100 Subject: [PATCH 5/6] rpcd: Do blocking connects to local pipes We don't have real async callers yet, and this is the simplest way to fix our missing light-weight deterministic async fallback mechanism. BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 Signed-off-by: Volker Lendecke Reviewed-by: Jeremy Allison (cherry picked from commit 0ab7b84ccbd383bb2f696ce258438d4af57fe6f0) --- source3/rpc_client/local_np.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source3/rpc_client/local_np.c b/source3/rpc_client/local_np.c index 5b1a818c88d..f1d61a09ee3 100644 --- a/source3/rpc_client/local_np.c +++ b/source3/rpc_client/local_np.c @@ -101,7 +101,7 @@ static struct tevent_req *np_sock_connect_send( return tevent_req_post(req, ev); } - ret = set_blocking(state->sock, false); + ret = set_blocking(state->sock, true); if (ret == -1) { tevent_req_error(req, errno); return tevent_req_post(req, ev); @@ -174,6 +174,18 @@ static void np_sock_connect_connected(struct tevent_req *subreq) return; } + /* + * As a quick workaround for bug 15310 we have done the + * connect in blocking mode (see np_sock_connect_send()). The + * rest of our code expects a nonblocking socket, activate + * this after the connect succeeded. + */ + ret = set_blocking(state->sock, false); + if (ret == -1) { + tevent_req_error(req, errno); + return; + } + ret = tstream_bsd_existing_socket( state, state->sock, &state->transport); if (ret == -1) { -- 2.34.1 From 1d93e60191400c3f8e97574680557fc10bec4e2d Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Mon, 20 Feb 2023 18:46:50 +0100 Subject: [PATCH 6/6] rpcd: With npa->need_idle_server we can have more than 256 servers Before this patch the worker-status cut the worker index such that samba-dcerpcd could not properly update status of the surplus rpc daemons. This could lead to those daemons to stay around forever, samba-dcerpcd will never notice they are idle and can exit. BUG: https://bugzilla.samba.org/show_bug.cgi?id=15310 Signed-off-by: Volker Lendecke Reviewed-by: Jeremy Allison Autobuild-User(master): Jeremy Allison Autobuild-Date(master): Mon Mar 6 22:35:00 UTC 2023 on atb-devel-224 (cherry picked from commit a1780ed8d1b46e4760319b27a4978e7ce7a1df80) --- source3/librpc/idl/rpc_host.idl | 2 +- source3/rpc_server/rpc_worker.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source3/librpc/idl/rpc_host.idl b/source3/librpc/idl/rpc_host.idl index 0fc3f9514aa..c8abb6c4379 100644 --- a/source3/librpc/idl/rpc_host.idl +++ b/source3/librpc/idl/rpc_host.idl @@ -66,7 +66,7 @@ interface rpc_host_msg /** * @brief Which of the processes of a helper prog is this from */ - uint8 worker_index; + uint32 worker_index; /** * @brief How many clients this process serves right now diff --git a/source3/rpc_server/rpc_worker.c b/source3/rpc_server/rpc_worker.c index 2d2bb35af0f..9b7474c2c86 100644 --- a/source3/rpc_server/rpc_worker.c +++ b/source3/rpc_server/rpc_worker.c @@ -93,7 +93,7 @@ static void rpc_worker_print_interface( static NTSTATUS rpc_worker_report_status(struct rpc_worker *worker) { - uint8_t buf[6]; + uint8_t buf[9]; DATA_BLOB blob = { .data = buf, .length = sizeof(buf), }; enum ndr_err_code ndr_err; NTSTATUS status; -- 2.34.1