[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XenPPC] [xenppc-unstable] [ppc] clean up VMX S&R code



# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID 1cef59998284f834c96b66331a359f35b0baf99a
# Parent  7b25f1309eb19dc548fc5c1359b584b7eda23887
[ppc] clean up VMX S&R code

This patch clarifies a poorly written comment and uses a "0" where
"r0" was used when we were not refering to a register.  Thanks to
<segher@xxxxxxxxxxxxxxxxxxx> for the suggestions.

Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
---
 xen/arch/ppc/float.S |  145 ++++++++++++++++++++++++---------------------------
 1 files changed, 70 insertions(+), 75 deletions(-)

diff -r 7b25f1309eb1 -r 1cef59998284 xen/arch/ppc/float.S
--- a/xen/arch/ppc/float.S      Tue Jun 13 15:14:57 2006 -0400
+++ b/xen/arch/ppc/float.S      Wed Jun 14 12:31:28 2006 -0400
@@ -105,27 +105,23 @@ load_fp:
 
 #define VCPU_vr(n) (VCPU_vrs + ((n) * 16))
 
-/* So you might think that we could use the VRSAVE register to
- * streamline this but "If this [VRSAVE] approach is taken it must be
- * applied rigorously".
- *
- * Since we cannot rely on partitions to use this correctly
- * or at all we must save/restore them all, including vrsave
+/*
+ * We cannot rely on the domain to correctly use VRSAVE
+ * so it is required that all VMX registers are saved and restored.
  */
 save_vmx:
        mfspr   r0,SPRN_VRSAVE
        stw     r0,VCPU_vrsave(r3)
 
-       /* r0 as the second operand is considered 0 */
-       addi r0,r3,VCPU_vr(0);  stvxl   vr0,r0,r0
-       addi r0,r3,VCPU_vr(1);  stvxl   vr1,r0,r0
-       addi r0,r3,VCPU_vr(2);  stvxl   vr2,r0,r0
-       addi r0,r3,VCPU_vr(3);  stvxl   vr3,r0,r0
-       addi r0,r3,VCPU_vr(4);  stvxl   vr4,r0,r0
-       addi r0,r3,VCPU_vr(5);  stvxl   vr5,r0,r0
-       addi r0,r3,VCPU_vr(6);  stvxl   vr6,r0,r0
-       addi r0,r3,VCPU_vr(7);  stvxl   vr7,r0,r0
-       addi r0,r3,VCPU_vr(8);  stvxl   vr8,r0,r0
+       addi r0,r3,VCPU_vr(0);  stvxl   vr0,0,r0
+       addi r0,r3,VCPU_vr(1);  stvxl   vr1,0,r0
+       addi r0,r3,VCPU_vr(2);  stvxl   vr2,0,r0
+       addi r0,r3,VCPU_vr(3);  stvxl   vr3,0,r0
+       addi r0,r3,VCPU_vr(4);  stvxl   vr4,0,r0
+       addi r0,r3,VCPU_vr(5);  stvxl   vr5,0,r0
+       addi r0,r3,VCPU_vr(6);  stvxl   vr6,0,r0
+       addi r0,r3,VCPU_vr(7);  stvxl   vr7,0,r0
+       addi r0,r3,VCPU_vr(8);  stvxl   vr8,0,r0
 
        /*
         * By now vr0 should be pushed out so now is a good time to
@@ -133,31 +129,31 @@ save_vmx:
         * on the following operations.
         */
        mfvscr  vr0
-       addi r0,r3,VCPU_vscr ;  stvxl   vr0,r0,r0
-
-       addi r0,r3,VCPU_vr(9);  stvxl   vr9,r0,r0
-       addi r0,r3,VCPU_vr(10); stvxl   vr10,r0,r0
-       addi r0,r3,VCPU_vr(11); stvxl   vr11,r0,r0
-       addi r0,r3,VCPU_vr(12); stvxl   vr12,r0,r0
-       addi r0,r3,VCPU_vr(13); stvxl   vr13,r0,r0
-       addi r0,r3,VCPU_vr(14); stvxl   vr14,r0,r0
-       addi r0,r3,VCPU_vr(15); stvxl   vr15,r0,r0
-       addi r0,r3,VCPU_vr(16); stvxl   vr16,r0,r0
-       addi r0,r3,VCPU_vr(17); stvxl   vr17,r0,r0
-       addi r0,r3,VCPU_vr(18); stvxl   vr18,r0,r0
-       addi r0,r3,VCPU_vr(19); stvxl   vr19,r0,r0
-       addi r0,r3,VCPU_vr(20); stvxl   vr20,r0,r0
-       addi r0,r3,VCPU_vr(21); stvxl   vr21,r0,r0
-       addi r0,r3,VCPU_vr(22); stvxl   vr22,r0,r0
-       addi r0,r3,VCPU_vr(23); stvxl   vr23,r0,r0
-       addi r0,r3,VCPU_vr(24); stvxl   vr24,r0,r0
-       addi r0,r3,VCPU_vr(25); stvxl   vr25,r0,r0
-       addi r0,r3,VCPU_vr(26); stvxl   vr26,r0,r0
-       addi r0,r3,VCPU_vr(27); stvxl   vr27,r0,r0
-       addi r0,r3,VCPU_vr(28); stvxl   vr28,r0,r0
-       addi r0,r3,VCPU_vr(29); stvxl   vr29,r0,r0
-       addi r0,r3,VCPU_vr(30); stvxl   vr30,r0,r0
-       addi r0,r3,VCPU_vr(31); stvxl   vr31,r0,r0
+       addi r0,r3,VCPU_vscr ;  stvxl   vr0,0,r0
+
+       addi r0,r3,VCPU_vr(9);  stvxl   vr9,0,r0
+       addi r0,r3,VCPU_vr(10); stvxl   vr10,0,r0
+       addi r0,r3,VCPU_vr(11); stvxl   vr11,0,r0
+       addi r0,r3,VCPU_vr(12); stvxl   vr12,0,r0
+       addi r0,r3,VCPU_vr(13); stvxl   vr13,0,r0
+       addi r0,r3,VCPU_vr(14); stvxl   vr14,0,r0
+       addi r0,r3,VCPU_vr(15); stvxl   vr15,0,r0
+       addi r0,r3,VCPU_vr(16); stvxl   vr16,0,r0
+       addi r0,r3,VCPU_vr(17); stvxl   vr17,0,r0
+       addi r0,r3,VCPU_vr(18); stvxl   vr18,0,r0
+       addi r0,r3,VCPU_vr(19); stvxl   vr19,0,r0
+       addi r0,r3,VCPU_vr(20); stvxl   vr20,0,r0
+       addi r0,r3,VCPU_vr(21); stvxl   vr21,0,r0
+       addi r0,r3,VCPU_vr(22); stvxl   vr22,0,r0
+       addi r0,r3,VCPU_vr(23); stvxl   vr23,0,r0
+       addi r0,r3,VCPU_vr(24); stvxl   vr24,0,r0
+       addi r0,r3,VCPU_vr(25); stvxl   vr25,0,r0
+       addi r0,r3,VCPU_vr(26); stvxl   vr26,0,r0
+       addi r0,r3,VCPU_vr(27); stvxl   vr27,0,r0
+       addi r0,r3,VCPU_vr(28); stvxl   vr28,0,r0
+       addi r0,r3,VCPU_vr(29); stvxl   vr29,0,r0
+       addi r0,r3,VCPU_vr(30); stvxl   vr30,0,r0
+       addi r0,r3,VCPU_vr(31); stvxl   vr31,0,r0
        blr
 
 load_vmx:
@@ -168,42 +164,41 @@ load_vmx:
         * This operation can take a long time so we use vr31 to
         * eliminate the depency on r0 for the next load
         */
-       addi r0,r3,VCPU_vscr ;  lvxl    vr31,r0,r0
+       addi r0,r3,VCPU_vscr ;  lvxl    vr31,0,r0
        mtvscr  vr31
 
-       /* r0 as the second operand is considered 0 */
-       addi r0,r3,VCPU_vr(0);  lvxl    vr0,r0,r0
-       addi r0,r3,VCPU_vr(1);  lvxl    vr1,r0,r0
-       addi r0,r3,VCPU_vr(2);  lvxl    vr2,r0,r0
-       addi r0,r3,VCPU_vr(3);  lvxl    vr3,r0,r0
-       addi r0,r3,VCPU_vr(4);  lvxl    vr4,r0,r0
-       addi r0,r3,VCPU_vr(5);  lvxl    vr5,r0,r0
-       addi r0,r3,VCPU_vr(6);  lvxl    vr6,r0,r0
-       addi r0,r3,VCPU_vr(7);  lvxl    vr7,r0,r0
-       addi r0,r3,VCPU_vr(8);  lvxl    vr8,r0,r0
-       addi r0,r3,VCPU_vr(9);  lvxl    vr9,r0,r0
-       addi r0,r3,VCPU_vr(10); lvxl    vr10,r0,r0
-       addi r0,r3,VCPU_vr(11); lvxl    vr11,r0,r0
-       addi r0,r3,VCPU_vr(12); lvxl    vr12,r0,r0
-       addi r0,r3,VCPU_vr(13); lvxl    vr13,r0,r0
-       addi r0,r3,VCPU_vr(14); lvxl    vr14,r0,r0
-       addi r0,r3,VCPU_vr(15); lvxl    vr15,r0,r0
-       addi r0,r3,VCPU_vr(16); lvxl    vr16,r0,r0
-       addi r0,r3,VCPU_vr(17); lvxl    vr17,r0,r0
-       addi r0,r3,VCPU_vr(18); lvxl    vr18,r0,r0
-       addi r0,r3,VCPU_vr(19); lvxl    vr19,r0,r0
-       addi r0,r3,VCPU_vr(20); lvxl    vr20,r0,r0
-       addi r0,r3,VCPU_vr(21); lvxl    vr21,r0,r0
-       addi r0,r3,VCPU_vr(22); lvxl    vr22,r0,r0
-       addi r0,r3,VCPU_vr(23); lvxl    vr23,r0,r0
-       addi r0,r3,VCPU_vr(24); lvxl    vr24,r0,r0
-       addi r0,r3,VCPU_vr(25); lvxl    vr25,r0,r0
-       addi r0,r3,VCPU_vr(26); lvxl    vr26,r0,r0
-       addi r0,r3,VCPU_vr(27); lvxl    vr27,r0,r0
-       addi r0,r3,VCPU_vr(28); lvxl    vr28,r0,r0
-       addi r0,r3,VCPU_vr(29); lvxl    vr29,r0,r0
-       addi r0,r3,VCPU_vr(30); lvxl    vr30,r0,r0
-       addi r0,r3,VCPU_vr(31); lvxl    vr31,r0,r0
+       addi r0,r3,VCPU_vr(0);  lvxl    vr0,0,r0
+       addi r0,r3,VCPU_vr(1);  lvxl    vr1,0,r0
+       addi r0,r3,VCPU_vr(2);  lvxl    vr2,0,r0
+       addi r0,r3,VCPU_vr(3);  lvxl    vr3,0,r0
+       addi r0,r3,VCPU_vr(4);  lvxl    vr4,0,r0
+       addi r0,r3,VCPU_vr(5);  lvxl    vr5,0,r0
+       addi r0,r3,VCPU_vr(6);  lvxl    vr6,0,r0
+       addi r0,r3,VCPU_vr(7);  lvxl    vr7,0,r0
+       addi r0,r3,VCPU_vr(8);  lvxl    vr8,0,r0
+       addi r0,r3,VCPU_vr(9);  lvxl    vr9,0,r0
+       addi r0,r3,VCPU_vr(10); lvxl    vr10,0,r0
+       addi r0,r3,VCPU_vr(11); lvxl    vr11,0,r0
+       addi r0,r3,VCPU_vr(12); lvxl    vr12,0,r0
+       addi r0,r3,VCPU_vr(13); lvxl    vr13,0,r0
+       addi r0,r3,VCPU_vr(14); lvxl    vr14,0,r0
+       addi r0,r3,VCPU_vr(15); lvxl    vr15,0,r0
+       addi r0,r3,VCPU_vr(16); lvxl    vr16,0,r0
+       addi r0,r3,VCPU_vr(17); lvxl    vr17,0,r0
+       addi r0,r3,VCPU_vr(18); lvxl    vr18,0,r0
+       addi r0,r3,VCPU_vr(19); lvxl    vr19,0,r0
+       addi r0,r3,VCPU_vr(20); lvxl    vr20,0,r0
+       addi r0,r3,VCPU_vr(21); lvxl    vr21,0,r0
+       addi r0,r3,VCPU_vr(22); lvxl    vr22,0,r0
+       addi r0,r3,VCPU_vr(23); lvxl    vr23,0,r0
+       addi r0,r3,VCPU_vr(24); lvxl    vr24,0,r0
+       addi r0,r3,VCPU_vr(25); lvxl    vr25,0,r0
+       addi r0,r3,VCPU_vr(26); lvxl    vr26,0,r0
+       addi r0,r3,VCPU_vr(27); lvxl    vr27,0,r0
+       addi r0,r3,VCPU_vr(28); lvxl    vr28,0,r0
+       addi r0,r3,VCPU_vr(29); lvxl    vr29,0,r0
+       addi r0,r3,VCPU_vr(30); lvxl    vr30,0,r0
+       addi r0,r3,VCPU_vr(31); lvxl    vr31,0,r0
        blr
 #endif /* HAS_VMX */
 

_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.