[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] cpu_ioreq_pio, cpu_ioreq_move: introduce read_phys_req_item, write_phys_req_item
Replace a lot of formulaic multiplications (containing casts, no less) with calls to a pair of functions. This encapsulates in a single place the operations which require care relating to integer overflow. Cc: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> --- xen-all.c | 73 ++++++++++++++++++++++++++++++++++++------------------------- 1 files changed, 43 insertions(+), 30 deletions(-) diff --git a/xen-all.c b/xen-all.c index 046cc2a..97c8ef4 100644 --- a/xen-all.c +++ b/xen-all.c @@ -682,11 +682,42 @@ static void do_outp(pio_addr_t addr, } } -static void cpu_ioreq_pio(ioreq_t *req) +/* + * Helper functions which read/write an object from/to physical guest + * memory, as part of the implementation of an ioreq. + * + * Equivalent to + * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, + * val, req->size, 0/1) + * except without the integer overflow problems. + */ +static void rw_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val, int rw) +{ + /* Do everything unsigned so overflow just results in a truncated result + * and accesses to undesired parts of guest memory, which is up + * to the guest */ + hwaddr offset = (hwaddr)req->size * i; + if (req->df) addr -= offset; + else addr += offset; + cpu_physical_memory_rw(addr, val, req->size, rw); +} + +static inline void read_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 0); +} +static inline void write_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) { - int i, sign; + rw_phys_req_item(addr, req, i, val, 1); +} - sign = req->df ? -1 : 1; + +static void cpu_ioreq_pio(ioreq_t *req) +{ + int i; if (req->dir == IOREQ_READ) { if (!req->data_is_ptr) { @@ -696,9 +727,7 @@ static void cpu_ioreq_pio(ioreq_t *req) for (i = 0; i < req->count; i++) { tmp = do_inp(req->addr, req->size); - cpu_physical_memory_write( - req->data + (sign * i * (int64_t)req->size), - (uint8_t *) &tmp, req->size); + write_phys_req_item(req->data, req, i, &tmp); } } } else if (req->dir == IOREQ_WRITE) { @@ -708,9 +737,7 @@ static void cpu_ioreq_pio(ioreq_t *req) for (i = 0; i < req->count; i++) { uint32_t tmp = 0; - cpu_physical_memory_read( - req->data + (sign * i * (int64_t)req->size), - (uint8_t*) &tmp, req->size); + read_phys_req_item(req->data, req, i, &tmp); do_outp(req->addr, req->size, tmp); } } @@ -719,22 +746,16 @@ static void cpu_ioreq_pio(ioreq_t *req) static void cpu_ioreq_move(ioreq_t *req) { - int i, sign; - - sign = req->df ? -1 : 1; + int i; if (!req->data_is_ptr) { if (req->dir == IOREQ_READ) { for (i = 0; i < req->count; i++) { - cpu_physical_memory_read( - req->addr + (sign * i * (int64_t)req->size), - (uint8_t *) &req->data, req->size); + read_phys_req_item(req->addr, req, i, &req->data); } } else if (req->dir == IOREQ_WRITE) { for (i = 0; i < req->count; i++) { - cpu_physical_memory_write( - req->addr + (sign * i * (int64_t)req->size), - (uint8_t *) &req->data, req->size); + write_phys_req_item(req->addr, req, i, &req->data); } } } else { @@ -742,21 +763,13 @@ static void cpu_ioreq_move(ioreq_t *req) if (req->dir == IOREQ_READ) { for (i = 0; i < req->count; i++) { - cpu_physical_memory_read( - req->addr + (sign * i * (int64_t)req->size), - (uint8_t*) &tmp, req->size); - cpu_physical_memory_write( - req->data + (sign * i * (int64_t)req->size), - (uint8_t*) &tmp, req->size); + read_phys_req_item(req->addr, req, i, &tmp); + write_phys_req_item(req->data, req, i, &tmp); } } else if (req->dir == IOREQ_WRITE) { for (i = 0; i < req->count; i++) { - cpu_physical_memory_read( - req->data + (sign * i * (int64_t)req->size), - (uint8_t*) &tmp, req->size); - cpu_physical_memory_write( - req->addr + (sign * i * (int64_t)req->size), - (uint8_t*) &tmp, req->size); + read_phys_req_item(req->data, req, i, &tmp); + write_phys_req_item(req->addr, req, i, &tmp); } } } -- 1.7.2.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |