[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 01/15] tools/libxc: Implement writev_exact() in the same style as write_exact()
This implementation of writev_exact() will cope with an iovcnt greater than IOV_MAX because glibc will actually let this work anyway, and it is very useful not to have to work about this in the caller of writev_exact(). The caller is still required to ensure that the sum of iov_len's doesn't overflow a ssize_t. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CC: Ian Campbell <Ian.Campbell@xxxxxxxxxx> CC: Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx> CC: Wei Liu <wei.liu2@xxxxxxxxxx> --- v8: Extra comments indicating why we loop when we do --- tools/libxc/xc_private.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++ tools/libxc/xc_private.h | 14 ++++++++ 2 files changed, 99 insertions(+) diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c index 2eb44b6..83ead5e 100644 --- a/tools/libxc/xc_private.c +++ b/tools/libxc/xc_private.c @@ -867,6 +867,91 @@ int write_exact(int fd, const void *data, size_t size) return 0; } +#if defined(__MINIOS__) +/* + * MiniOS's libc doesn't know about writev(). Implement it as multiple write()s. + */ +int writev_exact(int fd, const struct iovec *iov, int iovcnt) +{ + int rc, i; + + for ( i = 0; i < iovcnt; ++i ) + { + rc = write_exact(fd, iov[i].iov_base, iov[i].iov_len); + if ( rc ) + return rc; + } + + return 0; +} +#else +int writev_exact(int fd, const struct iovec *iov, int iovcnt) +{ + struct iovec *local_iov = NULL; + int rc = 0, iov_idx = 0, saved_errno = 0; + ssize_t len; + + while ( iov_idx < iovcnt ) + { + /* + * Skip over iov[] entries with 0 length. + * + * This is needed to cover the case where we took a partial write and + * all remaining vectors are of 0 length. In such a case, the results + * from writev() are indistinguishable from EOF. + */ + while ( iov[iov_idx].iov_len == 0 ) + if ( ++iov_idx == iovcnt ) + goto out; + + len = writev(fd, &iov[iov_idx], min(iovcnt - iov_idx, IOV_MAX)); + saved_errno = errno; + + if ( (len == -1) && (errno == EINTR) ) + continue; + if ( len <= 0 ) + { + rc = -1; + goto out; + } + + /* Check iov[] to see whether we had a partial or complete write. */ + while ( (len > 0) && (iov_idx < iovcnt) ) + { + if ( len >= iov[iov_idx].iov_len ) + len -= iov[iov_idx++].iov_len; + else + { + /* Partial write of iov[iov_idx]. Copy iov so we can adjust + * element iov_idx and resubmit the rest. */ + if ( !local_iov ) + { + local_iov = malloc(iovcnt * sizeof(*iov)); + if ( !local_iov ) + { + saved_errno = ENOMEM; + goto out; + } + + iov = memcpy(local_iov, iov, iovcnt * sizeof(*iov)); + } + + local_iov[iov_idx].iov_base += len; + local_iov[iov_idx].iov_len -= len; + break; + } + } + } + + saved_errno = 0; + + out: + free(local_iov); + errno = saved_errno; + return rc; +} +#endif + int xc_ffs8(uint8_t x) { int i; diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h index 45b8644..f74f7d7 100644 --- a/tools/libxc/xc_private.h +++ b/tools/libxc/xc_private.h @@ -42,6 +42,19 @@ #define VALGRIND_MAKE_MEM_UNDEFINED(addr, len) /* addr, len */ #endif +#if defined(__MINIOS__) +/* + * MiniOS's libc doesn't know about sys/uio.h or writev(). + * Declare enough of sys/uio.h to compile. + */ +struct iovec { + void *iov_base; + size_t iov_len; +}; +#else +#include <sys/uio.h> +#endif + #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall #define DECLARE_DOMCTL struct xen_domctl domctl #define DECLARE_SYSCTL struct xen_sysctl sysctl @@ -395,6 +408,7 @@ int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu, /* Return 0 on success; -1 on error setting errno. */ int read_exact(int fd, void *data, size_t size); /* EOF => -1, errno=0 */ int write_exact(int fd, const void *data, size_t size); +int writev_exact(int fd, const struct iovec *iov, int iovcnt); int xc_ffs8(uint8_t x); int xc_ffs16(uint16_t x); -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |