|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] The hypercall will fail and return EFAULT when the page becomes COW by forking process in linux
Hi, everybody
I meet a trouble. in dom0, I call the xc_domain_set_pod_target hypercall in
one thread, and meanwhile fork a process in another thread, it will return
EFAULT by the function of copy_to_user failed in hypervisor. I see that when
forking a process, the page will become COW, copy_to_user will cause a wirte
protection page fault and return EFAULT.
Is There any ideas for it?
I write a simple codes to reproduce it:
typedef unsigned long xen_pfn_t;
typedef unsigned long uint64_t;
typedef unsigned short domid_t;
#include <unistd.h>
#include <sys/ioctl.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/types.h>
#include <xen/sys/privcmd.h>
#include <errno.h>
#define BUFFER_SIZE 4096
struct xen_pod_target {
/* IN */
uint64_t target_pages;
/* OUT */
uint64_t tot_pages;
uint64_t pod_cache_pages;
uint64_t pod_entries;
/* IN */
domid_t domid;
};
#define PRIVCMD_INTF "/proc/xen/privcmd"
#define XENMEM_get_pod_target 17
#define __HYPERVISOR_memory_op 12
int main (int argc, char *argv[])
{
int err;
domid_t domid;
struct xen_pod_target *pod = NULL;
int fd;
privcmd_hypercall_t hypercall;
fd = open (PRIVCMD_INTF, O_RDWR);
assert (0 < fd);
pod = (struct xen_pod_target *) memalign(4096, BUFFER_SIZE);
memset (pod, 0, BUFFER_SIZE);
pod->domid = 0;
printf("pid = %d, pod=0x%p\n",getpid(), pod);
hypercall.op = __HYPERVISOR_memory_op;
hypercall.arg[0] = (unsigned long)XENMEM_get_pod_target;
hypercall.arg[1] = (unsigned long)pod;
if (0 == fork())
{
exit(0);
}
err = ioctl (fd, IOCTL_PRIVCMD_HYPERCALL, &hypercall);
if (0 > err) {
printf("Failed get_pod_target dom0, errno = %d\n", errno);
}
return 0;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |