[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/5] x86/HVM: properly handle backward string instruction emulation



On 30/09/13 13:57, Jan Beulich wrote:
Multiplying a signed 32-bit quantity with an unsigned 32-bit quantity
produces an unsigned 32-bit result, yet for emulation of backward
string instructions we need the result sign extended before getting
added to the base address.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>


--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -48,7 +48,7 @@ static int hvm_mmio_access(struct vcpu *
                            hvm_mmio_write_t write_handler)
 {
     unsigned long data;
-    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;
+    int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
 
     if ( !p->data_is_ptr )
     {
@@ -68,13 +68,10 @@ static int hvm_mmio_access(struct vcpu *
         {
             int ret;
 
-            rc = read_handler(v, p->addr + (sign * i * p->size), p->size,
-                              &data);
+            rc = read_handler(v, p->addr + step * i, p->size, &data);
             if ( rc != X86EMUL_OKAY )
                 break;
-            ret = hvm_copy_to_guest_phys(p->data + (sign * i * p->size),
-                                         &data,
-                                         p->size);
+            ret = hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
             if ( (ret == HVMCOPY_gfn_paged_out) || 
                  (ret == HVMCOPY_gfn_shared) )
             {
@@ -87,8 +84,7 @@ static int hvm_mmio_access(struct vcpu *
     {
         for ( i = 0; i < p->count; i++ )
         {
-            switch ( hvm_copy_from_guest_phys(&data,
-                                              p->data + sign * i * p->size,
+            switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                               p->size) )
             {
             case HVMCOPY_okay:
@@ -109,8 +105,7 @@ static int hvm_mmio_access(struct vcpu *
             }
             if ( rc != X86EMUL_OKAY )
                 break;
-            rc = write_handler(v, p->addr + (sign * i * p->size), p->size,
-                               data);
+            rc = write_handler(v, p->addr + step * i, p->size, data);
             if ( rc != X86EMUL_OKAY )
                 break;
         }
@@ -142,7 +137,7 @@ int hvm_mmio_intercept(ioreq_t *p)
 
 static int process_portio_intercept(portio_action_t action, ioreq_t *p)
 {
-    int rc = X86EMUL_OKAY, i, sign = p->df ? -1 : 1;
+    int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
     uint32_t data;
 
     if ( !p->data_is_ptr )
@@ -167,8 +162,7 @@ static int process_portio_intercept(port
             rc = action(IOREQ_READ, p->addr, p->size, &data);
             if ( rc != X86EMUL_OKAY )
                 break;
-            (void)hvm_copy_to_guest_phys(p->data + sign*i*p->size,
-                                         &data, p->size);
+            (void)hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
         }
     }
     else /* p->dir == IOREQ_WRITE */
@@ -176,8 +170,7 @@ static int process_portio_intercept(port
         for ( i = 0; i < p->count; i++ )
         {
             data = ""
-            switch ( hvm_copy_from_guest_phys(&data,
-                                              p->data + sign * i * p->size,
+            switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                               p->size) )
             {
             case HVMCOPY_okay:
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -294,7 +294,7 @@ void hvm_io_assist(void)
 
 static int dpci_ioport_read(uint32_t mport, ioreq_t *p)
 {
-    int i, sign = p->df ? -1 : 1;
+    int i, step = p->df ? -p->size : p->size;
     uint32_t data = ""
 
     for ( i = 0; i < p->count; i++ )
@@ -317,8 +317,7 @@ static int dpci_ioport_read(uint32_t mpo
         if ( p->data_is_ptr )
         {
             int ret;
-            ret = hvm_copy_to_guest_phys(p->data + (sign * i * p->size), &data,
-                                         p->size);
+            ret = hvm_copy_to_guest_phys(p->data + step * i, &data, p->size);
             if ( (ret == HVMCOPY_gfn_paged_out) ||
                  (ret == HVMCOPY_gfn_shared) )
                 return X86EMUL_RETRY;
@@ -332,7 +331,7 @@ static int dpci_ioport_read(uint32_t mpo
 
 static int dpci_ioport_write(uint32_t mport, ioreq_t *p)
 {
-    int i, sign = p->df ? -1 : 1;
+    int i, step = p->df ? -p->size : p->size;
     uint32_t data;
 
     for ( i = 0; i < p->count; i++ )
@@ -340,8 +339,7 @@ static int dpci_ioport_write(uint32_t mp
         data = ""
         if ( p->data_is_ptr )
         {
-            switch ( hvm_copy_from_guest_phys(&data,
-                                              p->data + sign * i * p->size,
+            switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                               p->size) )
             {
             case HVMCOPY_okay:
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -467,15 +467,17 @@ static uint32_t read_data;
 static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
 {
     int i;
-    int sign = p->df ? -1 : 1;
+    uint64_t addr = p->addr;
     p2m_type_t p2mt;
     struct domain *d = current->domain;
 
     if ( p->data_is_ptr )
     {
+        uint64_t data = "" tmp;
+        int step = p->df ? -p->size : p->size;
+
         if ( p->dir == IOREQ_READ )
         {
-            uint64_t addr = p->addr, data = "" tmp;
             for ( i = 0; i < p->count; i++ ) 
             {
                 tmp = stdvga_mem_read(addr, p->size);
@@ -498,13 +500,12 @@ static int mmio_move(struct hvm_hw_stdvg
                     ASSERT(!dp);
                     stdvga_mem_write(data, tmp, p->size);
                 }
-                data += sign * p->size;
-                addr += sign * p->size;
+                data += step;
+                addr += step;
             }
         }
         else
         {
-            uint32_t addr = p->addr, data = "" tmp;
             for ( i = 0; i < p->count; i++ )
             {
                 if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
@@ -523,31 +524,18 @@ static int mmio_move(struct hvm_hw_stdvg
                     tmp = stdvga_mem_read(data, p->size);
                 }
                 stdvga_mem_write(addr, tmp, p->size);
-                data += sign * p->size;
-                addr += sign * p->size;
+                data += step;
+                addr += step;
             }
         }
     }
     else
     {
+        ASSERT(p->count == 1);
         if ( p->dir == IOREQ_READ )
-        {
-            uint32_t addr = p->addr;
-            for ( i = 0; i < p->count; i++ )
-            {
-                p->data = "" p->size);
-                addr += sign * p->size;
-            }
-        }
+            p->data = "" p->size);
         else
-        {
-            uint32_t addr = p->addr;
-            for ( i = 0; i < p->count; i++ )
-            {
-                stdvga_mem_write(addr, p->data, p->size);
-                addr += sign * p->size;
-            }
-        }
+            stdvga_mem_write(addr, p->data, p->size);
     }
 
     read_data = p->data;




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.