From 8496ac644b541c728e2e2b21b2f297e4e7166fae Mon Sep 17 00:00:00 2001 From: David Disseldorp Date: Wed, 9 May 2018 16:51:34 +0200 Subject: [PATCH] vfs_ceph: add fake async pwrite/pread send/recv hooks As found by Jeremy, VFS modules that don't provide pread_send() or pwrite_send() hooks result in vfs_default fallback, which is catastrophic for VFS modules with non-mounted filesystems such as vfs_ceph. Bug: https://bugzilla.samba.org/show_bug.cgi?id=13425 Reported-by: Jeremy Allison Signed-off-by: David Disseldorp Reviewed-by: Jeremy Allison (cherry picked from commit f0e6453b0420fe9d062936d4ddc05f44b40cf2ba) --- source3/modules/vfs_ceph.c | 109 ++++++++++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 1 deletion(-) diff --git a/source3/modules/vfs_ceph.c b/source3/modules/vfs_ceph.c index 61df5dedf82..857310c2ac3 100644 --- a/source3/modules/vfs_ceph.c +++ b/source3/modules/vfs_ceph.c @@ -482,6 +482,57 @@ static ssize_t cephwrap_pread(struct vfs_handle_struct *handle, files_struct *fs WRAP_RETURN(result); } +struct cephwrap_pread_state { + ssize_t bytes_read; + struct vfs_aio_state vfs_aio_state; +}; + +/* + * Fake up an async ceph read by calling the synchronous API. + */ +static struct tevent_req *cephwrap_pread_send(struct vfs_handle_struct *handle, + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct files_struct *fsp, + void *data, + size_t n, off_t offset) +{ + struct tevent_req *req = NULL; + struct cephwrap_pread_state *state = NULL; + int ret = -1; + + DBG_DEBUG("[CEPH] %s\n", __func__); + req = tevent_req_create(mem_ctx, &state, struct cephwrap_pread_state); + if (req == NULL) { + return NULL; + } + + ret = ceph_read(handle->data, fsp->fh->fd, data, n, offset); + if (ret < 0) { + /* ceph returns -errno on error. */ + tevent_req_error(req, -ret); + return tevent_req_post(req, ev); + } + + state->bytes_read = ret; + tevent_req_done(req); + /* Return and schedule the completion of the call. */ + return tevent_req_post(req, ev); +} + +static ssize_t cephwrap_pread_recv(struct tevent_req *req, + struct vfs_aio_state *vfs_aio_state) +{ + struct cephwrap_pread_state *state = + tevent_req_data(req, struct cephwrap_pread_state); + + DBG_DEBUG("[CEPH] %s\n", __func__); + if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) { + return -1; + } + *vfs_aio_state = state->vfs_aio_state; + return state->bytes_read; +} static ssize_t cephwrap_write(struct vfs_handle_struct *handle, files_struct *fsp, const void *data, size_t n) { @@ -510,6 +561,58 @@ static ssize_t cephwrap_pwrite(struct vfs_handle_struct *handle, files_struct *f WRAP_RETURN(result); } +struct cephwrap_pwrite_state { + ssize_t bytes_written; + struct vfs_aio_state vfs_aio_state; +}; + +/* + * Fake up an async ceph write by calling the synchronous API. + */ +static struct tevent_req *cephwrap_pwrite_send(struct vfs_handle_struct *handle, + TALLOC_CTX *mem_ctx, + struct tevent_context *ev, + struct files_struct *fsp, + const void *data, + size_t n, off_t offset) +{ + struct tevent_req *req = NULL; + struct cephwrap_pwrite_state *state = NULL; + int ret = -1; + + DBG_DEBUG("[CEPH] %s\n", __func__); + req = tevent_req_create(mem_ctx, &state, struct cephwrap_pwrite_state); + if (req == NULL) { + return NULL; + } + + ret = ceph_write(handle->data, fsp->fh->fd, data, n, offset); + if (ret < 0) { + /* ceph returns -errno on error. */ + tevent_req_error(req, -ret); + return tevent_req_post(req, ev); + } + + state->bytes_written = ret; + tevent_req_done(req); + /* Return and schedule the completion of the call. */ + return tevent_req_post(req, ev); +} + +static ssize_t cephwrap_pwrite_recv(struct tevent_req *req, + struct vfs_aio_state *vfs_aio_state) +{ + struct cephwrap_pwrite_state *state = + tevent_req_data(req, struct cephwrap_pwrite_state); + + DBG_DEBUG("[CEPH] %s\n", __func__); + if (tevent_req_is_unix_error(req, &vfs_aio_state->error)) { + return -1; + } + *vfs_aio_state = state->vfs_aio_state; + return state->bytes_written; +} + static off_t cephwrap_lseek(struct vfs_handle_struct *handle, files_struct *fsp, off_t offset, int whence) { off_t result = 0; @@ -571,7 +674,7 @@ static int cephwrap_fsync(struct vfs_handle_struct *handle, files_struct *fsp) } /* - * Fake up an async ceph fsync by calling the sychronous API. + * Fake up an async ceph fsync by calling the synchronous API. */ static struct tevent_req *cephwrap_fsync_send(struct vfs_handle_struct *handle, @@ -1485,8 +1588,12 @@ static struct vfs_fn_pointers ceph_fns = { .close_fn = cephwrap_close, .read_fn = cephwrap_read, .pread_fn = cephwrap_pread, + .pread_send_fn = cephwrap_pread_send, + .pread_recv_fn = cephwrap_pread_recv, .write_fn = cephwrap_write, .pwrite_fn = cephwrap_pwrite, + .pwrite_send_fn = cephwrap_pwrite_send, + .pwrite_recv_fn = cephwrap_pwrite_recv, .lseek_fn = cephwrap_lseek, .sendfile_fn = cephwrap_sendfile, .recvfile_fn = cephwrap_recvfile, -- 2.17.0