The Samba-Bugzilla – Attachment 14191 Details for
Bug 13425
VFS modules that implement pread/pwrite must also implement pread_send/pwrite_send.
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
git-am fix for 4.8.next
0001-vfs_ceph-add-fake-async-pwrite-pread-send-recv-hooks.patch (text/plain), 5.04 KB, created by
Jeremy Allison
on 2018-05-09 21:04:21 UTC
(
hide
)
Description:
git-am fix for 4.8.next
Filename:
MIME Type:
Creator:
Jeremy Allison
Created:
2018-05-09 21:04:21 UTC
Size:
5.04 KB
patch
obsolete
>From 8496ac644b541c728e2e2b21b2f297e4e7166fae Mon Sep 17 00:00:00 2001 >From: David Disseldorp <ddiss@samba.org> >Date: Wed, 9 May 2018 16:51:34 +0200 >Subject: [PATCH] vfs_ceph: add fake async pwrite/pread send/recv hooks > >As found by Jeremy, VFS modules that don't provide pread_send() or >pwrite_send() hooks result in vfs_default fallback, which is >catastrophic for VFS modules with non-mounted filesystems such as >vfs_ceph. > >Bug: https://bugzilla.samba.org/show_bug.cgi?id=13425 > >Reported-by: Jeremy Allison <jra@samba.org> >Signed-off-by: David Disseldorp <ddiss@samba.org> >Reviewed-by: Jeremy Allison <jra@samba.org> >(cherry picked from commit f0e6453b0420fe9d062936d4ddc05f44b40cf2ba) >--- > source3/modules/vfs_ceph.c | 109 ++++++++++++++++++++++++++++++++++++- > 1 file changed, 108 insertions(+), 1 deletion(-) > >diff --git a/source3/modules/vfs_ceph.c b/source3/modules/vfs_ceph.c >index 61df5dedf82..857310c2ac3 100644 >--- a/source3/modules/vfs_ceph.c >+++ b/source3/modules/vfs_ceph.c >@@ -482,6 +482,57 @@ static ssize_t cephwrap_pread(struct vfs_handle_struct *handle, files_struct *fs > WRAP_RETURN(result); > } > >+struct cephwrap_pread_state { >+ ssize_t bytes_read; >+ struct vfs_aio_state vfs_aio_state; >+}; >+ >+/* >+ * Fake up an async ceph read by calling the synchronous API. >+ */ >+static struct tevent_req *cephwrap_pread_send(struct vfs_handle_struct *handle, >+ TALLOC_CTX *mem_ctx, >+ struct tevent_context *ev, >+ struct files_struct *fsp, >+ void *data, >+ size_t n, off_t offset) >+{ >+ struct tevent_req *req = NULL; >+ struct cephwrap_pread_state *state = NULL; >+ int ret = -1; >+ >+ DBG_DEBUG("[CEPH] %s\n", __func__); >+ req = tevent_req_create(mem_ctx, &state, struct cephwrap_pread_state); >+ if (req == NULL) { >+ return NULL; >+ } >+ >+ ret = ceph_read(handle->data, fsp->fh->fd, data, n, offset); >+ if (ret < 0) { >+ /* ceph returns -errno on error. */ >+ tevent_req_error(req, -ret); >+ return tevent_req_post(req, ev); >+ } >+ >+ state->bytes_read = ret; >+ tevent_req_done(req); >+ /* Return and schedule the completion of the call. */ >+ return tevent_req_post(req, ev); >+} >+ >+static ssize_t cephwrap_pread_recv(struct tevent_req *req, >+ struct vfs_aio_state *vfs_aio_state) >+{ >+ struct cephwrap_pread_state *state = >+ tevent_req_data(req, struct cephwrap_pread_state); >+ >+ DBG_DEBUG("[CEPH] %s\n", __func__); >+ if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) { >+ return -1; >+ } >+ *vfs_aio_state = state->vfs_aio_state; >+ return state->bytes_read; >+} > > static ssize_t cephwrap_write(struct vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n) > { >@@ -510,6 +561,58 @@ static ssize_t cephwrap_pwrite(struct vfs_handle_struct *handle, files_struct *f > WRAP_RETURN(result); > } > >+struct cephwrap_pwrite_state { >+ ssize_t bytes_written; >+ struct vfs_aio_state vfs_aio_state; >+}; >+ >+/* >+ * Fake up an async ceph write by calling the synchronous API. >+ */ >+static struct tevent_req *cephwrap_pwrite_send(struct vfs_handle_struct *handle, >+ TALLOC_CTX *mem_ctx, >+ struct tevent_context *ev, >+ struct files_struct *fsp, >+ const void *data, >+ size_t n, off_t offset) >+{ >+ struct tevent_req *req = NULL; >+ struct cephwrap_pwrite_state *state = NULL; >+ int ret = -1; >+ >+ DBG_DEBUG("[CEPH] %s\n", __func__); >+ req = tevent_req_create(mem_ctx, &state, struct cephwrap_pwrite_state); >+ if (req == NULL) { >+ return NULL; >+ } >+ >+ ret = ceph_write(handle->data, fsp->fh->fd, data, n, offset); >+ if (ret < 0) { >+ /* ceph returns -errno on error. */ >+ tevent_req_error(req, -ret); >+ return tevent_req_post(req, ev); >+ } >+ >+ state->bytes_written = ret; >+ tevent_req_done(req); >+ /* Return and schedule the completion of the call. */ >+ return tevent_req_post(req, ev); >+} >+ >+static ssize_t cephwrap_pwrite_recv(struct tevent_req *req, >+ struct vfs_aio_state *vfs_aio_state) >+{ >+ struct cephwrap_pwrite_state *state = >+ tevent_req_data(req, struct cephwrap_pwrite_state); >+ >+ DBG_DEBUG("[CEPH] %s\n", __func__); >+ if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) { >+ return -1; >+ } >+ *vfs_aio_state = state->vfs_aio_state; >+ return state->bytes_written; >+} >+ > static off_t cephwrap_lseek(struct vfs_handle_struct *handle, files_struct *fsp, off_t offset, int whence) > { > off_t result = 0; >@@ -571,7 +674,7 @@ static int cephwrap_fsync(struct vfs_handle_struct *handle, files_struct *fsp) > } > > /* >- * Fake up an async ceph fsync by calling the sychronous API. >+ * Fake up an async ceph fsync by calling the synchronous API. > */ > > static struct tevent_req *cephwrap_fsync_send(struct vfs_handle_struct *handle, >@@ -1485,8 +1588,12 @@ static struct vfs_fn_pointers ceph_fns = { > .close_fn = cephwrap_close, > .read_fn = cephwrap_read, > .pread_fn = cephwrap_pread, >+ .pread_send_fn = cephwrap_pread_send, >+ .pread_recv_fn = cephwrap_pread_recv, > .write_fn = cephwrap_write, > .pwrite_fn = cephwrap_pwrite, >+ .pwrite_send_fn = cephwrap_pwrite_send, >+ .pwrite_recv_fn = cephwrap_pwrite_recv, > .lseek_fn = cephwrap_lseek, > .sendfile_fn = cephwrap_sendfile, > .recvfile_fn = cephwrap_recvfile, >-- >2.17.0 >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Raw
Flags:
ddiss
:
review+
Actions:
View
Attachments on
bug 13425
: 14191