[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V2] xl: Apply CLOEXEC to the restore_fd.
At restore time, the file descriptor opened on the migration state file is still open in the device model. Let's apply FD_CLOEXEC to it. This patch provides libxl_fd_set_cloexec to users of libxl, instead of keeping this function internal. Signed-off-by: Anthony PERARD <anthony.perard@xxxxxxxxxx> --- Change: - this time, apply cloexec only to the opened migration state file and not the the fd provide by the caller. tools/libxl/libxl.c | 13 +++++++++++++ tools/libxl/libxl.h | 3 +++ tools/libxl/libxl_internal.c | 13 ------------- tools/libxl/libxl_internal.h | 1 - tools/libxl/libxl_qmp.c | 2 +- tools/libxl/xl_cmdimpl.c | 8 ++++++-- 6 files changed, 23 insertions(+), 17 deletions(-) diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c index a171142..de35adf 100644 --- a/tools/libxl/libxl.c +++ b/tools/libxl/libxl.c @@ -3330,6 +3330,19 @@ int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid) return 0; } +int libxl_fd_set_cloexec(int fd) +{ + int flags = 0; + + if ((flags = fcntl(fd, F_GETFD)) == -1) { + flags = 0; + } + if ((flags & FD_CLOEXEC)) { + return 0; + } + return fcntl(fd, F_SETFD, flags | FD_CLOEXEC); +} + /* * Local variables: * mode: C diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h index 289dc85..8e42822 100644 --- a/tools/libxl/libxl.h +++ b/tools/libxl/libxl.h @@ -635,6 +635,9 @@ const char *libxl_lock_dir_path(void); const char *libxl_run_dir_path(void); const char *libxl_xenpaging_dir_path(void); +/* misc */ +int libxl_fd_set_cloexec(int fd); + #endif /* LIBXL_H */ /* diff --git a/tools/libxl/libxl_internal.c b/tools/libxl/libxl_internal.c index 34edaf3..028f90f 100644 --- a/tools/libxl/libxl_internal.c +++ b/tools/libxl/libxl_internal.c @@ -306,19 +306,6 @@ _hidden int libxl__compare_macs(libxl_mac *a, libxl_mac *b) return 0; } -int libxl__fd_set_cloexec(int fd) -{ - int flags = 0; - - if ((flags = fcntl(fd, F_GETFD)) == -1) { - flags = 0; - } - if ((flags & FD_CLOEXEC)) { - return 0; - } - return fcntl(fd, F_SETFD, flags | FD_CLOEXEC); -} - libxl_device_model_version libxl__device_model_version_running(libxl__gc *gc, uint32_t domid) { diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h index 84da6b1..6ce34fd 100644 --- a/tools/libxl/libxl_internal.h +++ b/tools/libxl/libxl_internal.h @@ -503,7 +503,6 @@ _hidden int libxl__error_set(libxl__gc *gc, int code); _hidden int libxl__file_reference_map(libxl_file_reference *f); _hidden int libxl__file_reference_unmap(libxl_file_reference *f); -_hidden int libxl__fd_set_cloexec(int fd); _hidden int libxl__e820_alloc(libxl__gc *gc, uint32_t domid, libxl_domain_config *d_config); diff --git a/tools/libxl/libxl_qmp.c b/tools/libxl/libxl_qmp.c index f749e01..66a0134 100644 --- a/tools/libxl/libxl_qmp.c +++ b/tools/libxl/libxl_qmp.c @@ -324,7 +324,7 @@ static int qmp_open(libxl__qmp_handler *qmp, const char *qmp_socket_path, if (fcntl(qmp->qmp_fd, F_SETFL, flags | O_NONBLOCK) == -1) { return -1; } - libxl__fd_set_cloexec(qmp->qmp_fd); + libxl_fd_set_cloexec(qmp->qmp_fd); memset(&qmp->addr, 0, sizeof (&qmp->addr)); qmp->addr.sun_family = AF_UNIX; diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c index f1e729c..97141d1 100644 --- a/tools/libxl/xl_cmdimpl.c +++ b/tools/libxl/xl_cmdimpl.c @@ -1459,8 +1459,12 @@ static int create_domain(struct domain_create *dom_info) union { uint32_t u32; char b[4]; } u32buf; uint32_t badflags; - restore_fd = migrate_fd >= 0 ? migrate_fd : - open(restore_file, O_RDONLY); + if (migrate_fd >= 0) { + restore_fd = migrate_fd; + } else { + restore_fd = open(restore_file, O_RDONLY); + libxl_fd_set_cloexec(restore_fd); + } CHK_ERRNO( libxl_read_exactly(ctx, restore_fd, &hdr, sizeof(hdr), restore_file, "header") ); -- tg: (4ed28e3..) fix/close-migration-state (depends on: master) _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |