[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] RFC x86/emul: Drop segment_attributes_t



The amount of namespace resolution is unnecessrily large, as all code deals in
terms of struct segment_register.  This removes the .fields part of all
references, and alters .bytes for .raw.

No functional change.  For some reason I can't identify, the changes to
{rm,vm86}_{cs,ds}_attr causes GCC to encode the values as immedate operands
rather than variables in .rodata.  As a result,
vmx_{get,set}_segment_register() are shorter.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>

RFC, because I'd also like to float the idea of making this adjustment as well:

diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h 
b/xen/arch/x86/x86_emulate/x86_emulate.h
index 3355c01..53a5480 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -90,7 +90,7 @@ struct x86_event {
 struct segment_register {
      uint16_t   sel;
           union {
-        uint16_t raw;
+        uint16_t attr;
         struct {
             uint16_t type:4;    /* 0;  Bit 40-43 */
             uint16_t s:   1;    /* 4;  Bit 44 */
@@ -102,7 +102,7 @@ struct segment_register {
             uint16_t g:   1;    /* 11; Bit 55 */
             uint16_t pad: 4;
         };
-    } attr;
+    };
     uint32_t   limit;
     uint64_t   base;
 };

which would turn .attr.raw into just .attr, and remove .attr for accesses into
the bitfield.

Furthermore, in a following patch, I intend to make a similar adjustment as
http://xenbits.xen.org/gitweb/?p=xtf.git;a=commitdiff;h=f099211f2ebdadf61ae6416559220d69b788cd2b
to expose the internal code/data field names.  This will simplify a lot of
code which currently uses opencoded numbers against the type field.

Thoughts?
---
 tools/fuzz/x86_instruction_emulator/fuzz-emul.c |  10 +-
 tools/tests/x86_emulator/test_x86_emulator.c    |   2 +-
 xen/arch/x86/cpu/vpmu.c                         |   2 +-
 xen/arch/x86/hvm/domain.c                       |  42 +++----
 xen/arch/x86/hvm/emulate.c                      |  20 +--
 xen/arch/x86/hvm/hvm.c                          | 154 ++++++++++++------------
 xen/arch/x86/hvm/svm/svm.c                      |  10 +-
 xen/arch/x86/hvm/svm/svmdebug.c                 |   6 +-
 xen/arch/x86/hvm/svm/vmcb.c                     |  16 +--
 xen/arch/x86/hvm/vmx/realmode.c                 |  10 +-
 xen/arch/x86/hvm/vmx/vmx.c                      |  32 ++---
 xen/arch/x86/mm/shadow/common.c                 |   6 +-
 xen/arch/x86/traps.c                            |  40 +++---
 xen/arch/x86/vm_event.c                         |   2 +-
 xen/arch/x86/x86_emulate/x86_emulate.c          |  54 ++++-----
 xen/arch/x86/x86_emulate/x86_emulate.h          |  35 +++---
 16 files changed, 217 insertions(+), 224 deletions(-)

diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c 
b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
index aadbb40..c80501f 100644
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -583,7 +583,7 @@ static bool in_longmode(struct x86_emulate_ctxt *ctxt)
     const struct fuzz_state *s = ctxt->data;
     const struct fuzz_corpus *c = s->corpus;
 
-    return long_mode_active(ctxt) && c->segments[x86_seg_cs].attr.fields.l;
+    return long_mode_active(ctxt) && c->segments[x86_seg_cs].attr.l;
 }
 
 static void set_sizes(struct x86_emulate_ctxt *ctxt)
@@ -597,8 +597,8 @@ static void set_sizes(struct x86_emulate_ctxt *ctxt)
         ctxt->addr_size = ctxt->sp_size = 64;
     else
     {
-        ctxt->addr_size = c->segments[x86_seg_cs].attr.fields.db ? 32 : 16;
-        ctxt->sp_size   = c->segments[x86_seg_ss].attr.fields.db ? 32 : 16;
+        ctxt->addr_size = c->segments[x86_seg_cs].attr.db ? 32 : 16;
+        ctxt->sp_size   = c->segments[x86_seg_ss].attr.db ? 32 : 16;
     }
 }
 
@@ -741,8 +741,8 @@ static void sanitize_input(struct x86_emulate_ctxt *ctxt)
     /* EFLAGS.VM implies 16-bit mode */
     if ( regs->rflags & X86_EFLAGS_VM )
     {
-        c->segments[x86_seg_cs].attr.fields.db = 0;
-        c->segments[x86_seg_ss].attr.fields.db = 0;
+        c->segments[x86_seg_cs].attr.db = 0;
+        c->segments[x86_seg_ss].attr.db = 0;
     }
 }
 
diff --git a/tools/tests/x86_emulator/test_x86_emulator.c 
b/tools/tests/x86_emulator/test_x86_emulator.c
index 1992c3f..f42b128 100644
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -264,7 +264,7 @@ static int read_segment(
     if ( !is_x86_user_segment(seg) )
         return X86EMUL_UNHANDLEABLE;
     memset(reg, 0, sizeof(*reg));
-    reg->attr.fields.p = 1;
+    reg->attr.p = 1;
     return X86EMUL_OKAY;
 }
 
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 21383d3..f15bf11 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -304,7 +304,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
                 r->cs = seg.sel;
                 hvm_get_segment_register(sampled, x86_seg_ss, &seg);
                 r->ss = seg.sel;
-                r->cpl = seg.attr.fields.dpl;
+                r->cpl = seg.attr.dpl;
                 if ( !(sampled->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
                     *flags |= PMU_SAMPLE_REAL;
             }
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index dca7a00..44103f4 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -27,13 +27,13 @@
 static int check_segment(struct segment_register *reg, enum x86_segment seg)
 {
 
-    if ( reg->attr.fields.pad != 0 )
+    if ( reg->attr.pad != 0 )
     {
         gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n");
         return -EINVAL;
     }
 
-    if ( reg->attr.bytes == 0 )
+    if ( reg->attr.raw == 0 )
     {
         if ( seg != x86_seg_ds && seg != x86_seg_es )
         {
@@ -45,46 +45,46 @@ static int check_segment(struct segment_register *reg, enum 
x86_segment seg)
 
     if ( seg == x86_seg_tr )
     {
-        if ( reg->attr.fields.s )
+        if ( reg->attr.s )
         {
             gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
             return -EINVAL;
         }
 
-        if ( reg->attr.fields.type != SYS_DESC_tss_busy )
+        if ( reg->attr.type != SYS_DESC_tss_busy )
         {
             gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n");
             return -EINVAL;
         }
     }
-    else if ( !reg->attr.fields.s )
+    else if ( !reg->attr.s )
     {
         gprintk(XENLOG_ERR,
                 "System segment provided for a code or data segment\n");
         return -EINVAL;
     }
 
-    if ( !reg->attr.fields.p )
+    if ( !reg->attr.p )
     {
         gprintk(XENLOG_ERR, "Non-present segment provided\n");
         return -EINVAL;
     }
 
-    if ( seg == x86_seg_cs && !(reg->attr.fields.type & 0x8) )
+    if ( seg == x86_seg_cs && !(reg->attr.type & 0x8) )
     {
         gprintk(XENLOG_ERR, "Non-code segment provided for CS\n");
         return -EINVAL;
     }
 
     if ( seg == x86_seg_ss &&
-         ((reg->attr.fields.type & 0x8) || !(reg->attr.fields.type & 0x2)) )
+         ((reg->attr.type & 0x8) || !(reg->attr.type & 0x2)) )
     {
         gprintk(XENLOG_ERR, "Non-writeable segment provided for SS\n");
         return -EINVAL;
     }
 
-    if ( reg->attr.fields.s && seg != x86_seg_ss && seg != x86_seg_cs &&
-         (reg->attr.fields.type & 0x8) && !(reg->attr.fields.type & 0x2) )
+    if ( reg->attr.s && seg != x86_seg_ss && seg != x86_seg_cs &&
+         (reg->attr.type & 0x8) && !(reg->attr.type & 0x2) )
     {
         gprintk(XENLOG_ERR, "Non-readable segment provided for DS or ES\n");
         return -EINVAL;
@@ -123,10 +123,10 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
 #define SEG(s, r) ({                                                        \
     s = (struct segment_register){ .base = (r)->s ## _base,                 \
                                    .limit = (r)->s ## _limit,               \
-                                   .attr.bytes = (r)->s ## _ar };           \
+                                   .attr.raw = (r)->s ## _ar };             \
     /* Set accessed / busy bit for present segments. */                     \
-    if ( s.attr.fields.p )                                                  \
-        s.attr.fields.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2);          \
+    if ( s.attr.p )                                                         \
+        s.attr.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2);                 \
     check_segment(&s, x86_seg_ ## s); })
 
         rc = SEG(cs, regs);
@@ -141,7 +141,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
 
         /* Basic sanity checks. */
         limit = cs.limit;
-        if ( cs.attr.fields.g )
+        if ( cs.attr.g )
             limit = (limit << 12) | 0xfff;
         if ( regs->eip > limit )
         {
@@ -150,24 +150,24 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
             return -EINVAL;
         }
 
-        if ( ss.attr.fields.dpl != cs.attr.fields.dpl )
+        if ( ss.attr.dpl != cs.attr.dpl )
         {
             gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL (%u)\n",
-                    ss.attr.fields.dpl, cs.attr.fields.dpl);
+                    ss.attr.dpl, cs.attr.dpl);
             return -EINVAL;
         }
 
-        if ( ds.attr.fields.p && ds.attr.fields.dpl > cs.attr.fields.dpl )
+        if ( ds.attr.p && ds.attr.dpl > cs.attr.dpl )
         {
             gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
-                    ds.attr.fields.dpl, cs.attr.fields.dpl);
+                    ds.attr.dpl, cs.attr.dpl);
             return -EINVAL;
         }
 
-        if ( es.attr.fields.p && es.attr.fields.dpl > cs.attr.fields.dpl )
+        if ( es.attr.p && es.attr.dpl > cs.attr.dpl )
         {
             gprintk(XENLOG_ERR, "ES.DPL (%u) is greater than CS.DPL (%u)\n",
-                    es.attr.fields.dpl, cs.attr.fields.dpl);
+                    es.attr.dpl, cs.attr.dpl);
             return -EINVAL;
         }
 
@@ -245,7 +245,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
         v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
         v->arch.hvm_vcpu.guest_efer  = regs->efer;
 
-#define SEG(l, a) (struct segment_register){ .limit = (l), .attr.bytes = (a) }
+#define SEG(l, a) (struct segment_register){ .limit = (l), .attr.raw = (a) }
         cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
         ds = ss = es = SEG(~0u, 0xc93);
         tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 11e4aba..1ef87fe 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -873,7 +873,7 @@ static int __hvmemul_read(
 
     if ( is_x86_system_segment(seg) )
         pfec |= PFEC_implicit;
-    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_virtual_to_linear(
@@ -984,7 +984,7 @@ static int hvmemul_write(
 
     if ( is_x86_system_segment(seg) )
         pfec |= PFEC_implicit;
-    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_virtual_to_linear(
@@ -1161,7 +1161,7 @@ static int hvmemul_rep_ins(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_linear_to_phys(
@@ -1230,7 +1230,7 @@ static int hvmemul_rep_outs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_linear_to_phys(
@@ -1277,7 +1277,7 @@ static int hvmemul_rep_movs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.dpl == 3 )
         pfec |= PFEC_user_mode;
 
     if ( vio->mmio_access.read_access &&
@@ -1435,7 +1435,7 @@ static int hvmemul_rep_stos(
     {
         uint32_t pfec = PFEC_page_present | PFEC_write_access;
 
-        if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+        if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.dpl == 3 )
             pfec |= PFEC_user_mode;
 
         rc = hvmemul_linear_to_phys(addr, &gpa, bytes_per_rep, reps, pfec,
@@ -2133,17 +2133,17 @@ void hvm_emulate_init_per_insn(
     hvmemul_ctxt->ctxt.lma = hvm_long_mode_active(curr);
 
     if ( hvmemul_ctxt->ctxt.lma &&
-         hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )
+         hvmemul_ctxt->seg_reg[x86_seg_cs].attr.l )
         hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;
     else
     {
         hvmemul_ctxt->ctxt.addr_size =
-            hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
+            hvmemul_ctxt->seg_reg[x86_seg_cs].attr.db ? 32 : 16;
         hvmemul_ctxt->ctxt.sp_size =
-            hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
+            hvmemul_ctxt->seg_reg[x86_seg_ss].attr.db ? 32 : 16;
     }
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.dpl == 3 )
         pfec |= PFEC_user_mode;
 
     hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->rip;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 70ddc81..889aaaa 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -774,49 +774,49 @@ static int hvm_save_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
         ctxt.cs_sel = seg.sel;
         ctxt.cs_limit = seg.limit;
         ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr.bytes;
+        ctxt.cs_arbytes = seg.attr.raw;
 
         hvm_get_segment_register(v, x86_seg_ds, &seg);
         ctxt.ds_sel = seg.sel;
         ctxt.ds_limit = seg.limit;
         ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr.bytes;
+        ctxt.ds_arbytes = seg.attr.raw;
 
         hvm_get_segment_register(v, x86_seg_es, &seg);
         ctxt.es_sel = seg.sel;
         ctxt.es_limit = seg.limit;
         ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr.bytes;
+        ctxt.es_arbytes = seg.attr.raw;
 
         hvm_get_segment_register(v, x86_seg_ss, &seg);
         ctxt.ss_sel = seg.sel;
         ctxt.ss_limit = seg.limit;
         ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr.bytes;
+        ctxt.ss_arbytes = seg.attr.raw;
 
         hvm_get_segment_register(v, x86_seg_fs, &seg);
         ctxt.fs_sel = seg.sel;
         ctxt.fs_limit = seg.limit;
         ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr.bytes;
+        ctxt.fs_arbytes = seg.attr.raw;
 
         hvm_get_segment_register(v, x86_seg_gs, &seg);
         ctxt.gs_sel = seg.sel;
         ctxt.gs_limit = seg.limit;
         ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr.bytes;
+        ctxt.gs_arbytes = seg.attr.raw;
 
         hvm_get_segment_register(v, x86_seg_tr, &seg);
         ctxt.tr_sel = seg.sel;
         ctxt.tr_limit = seg.limit;
         ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr.bytes;
+        ctxt.tr_arbytes = seg.attr.raw;
 
         hvm_get_segment_register(v, x86_seg_ldtr, &seg);
         ctxt.ldtr_sel = seg.sel;
         ctxt.ldtr_limit = seg.limit;
         ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr.bytes;
+        ctxt.ldtr_arbytes = seg.attr.raw;
 
         if ( v->fpu_initialised )
         {
@@ -1028,49 +1028,49 @@ static int hvm_load_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     seg.sel = ctxt.cs_sel;
     seg.limit = ctxt.cs_limit;
     seg.base = ctxt.cs_base;
-    seg.attr.bytes = ctxt.cs_arbytes;
+    seg.attr.raw = ctxt.cs_arbytes;
     hvm_set_segment_register(v, x86_seg_cs, &seg);
 
     seg.sel = ctxt.ds_sel;
     seg.limit = ctxt.ds_limit;
     seg.base = ctxt.ds_base;
-    seg.attr.bytes = ctxt.ds_arbytes;
+    seg.attr.raw = ctxt.ds_arbytes;
     hvm_set_segment_register(v, x86_seg_ds, &seg);
 
     seg.sel = ctxt.es_sel;
     seg.limit = ctxt.es_limit;
     seg.base = ctxt.es_base;
-    seg.attr.bytes = ctxt.es_arbytes;
+    seg.attr.raw = ctxt.es_arbytes;
     hvm_set_segment_register(v, x86_seg_es, &seg);
 
     seg.sel = ctxt.ss_sel;
     seg.limit = ctxt.ss_limit;
     seg.base = ctxt.ss_base;
-    seg.attr.bytes = ctxt.ss_arbytes;
+    seg.attr.raw = ctxt.ss_arbytes;
     hvm_set_segment_register(v, x86_seg_ss, &seg);
 
     seg.sel = ctxt.fs_sel;
     seg.limit = ctxt.fs_limit;
     seg.base = ctxt.fs_base;
-    seg.attr.bytes = ctxt.fs_arbytes;
+    seg.attr.raw = ctxt.fs_arbytes;
     hvm_set_segment_register(v, x86_seg_fs, &seg);
 
     seg.sel = ctxt.gs_sel;
     seg.limit = ctxt.gs_limit;
     seg.base = ctxt.gs_base;
-    seg.attr.bytes = ctxt.gs_arbytes;
+    seg.attr.raw = ctxt.gs_arbytes;
     hvm_set_segment_register(v, x86_seg_gs, &seg);
 
     seg.sel = ctxt.tr_sel;
     seg.limit = ctxt.tr_limit;
     seg.base = ctxt.tr_base;
-    seg.attr.bytes = ctxt.tr_arbytes;
+    seg.attr.raw = ctxt.tr_arbytes;
     hvm_set_segment_register(v, x86_seg_tr, &seg);
 
     seg.sel = ctxt.ldtr_sel;
     seg.limit = ctxt.ldtr_limit;
     seg.base = ctxt.ldtr_base;
-    seg.attr.bytes = ctxt.ldtr_arbytes;
+    seg.attr.raw = ctxt.ldtr_arbytes;
     hvm_set_segment_register(v, x86_seg_ldtr, &seg);
 
     /* Cover xsave-absent save file restoration on xsave-capable host. */
@@ -1936,9 +1936,9 @@ int hvm_set_efer(uint64_t value)
          * When LME becomes set, clobber %cs.L to keep the guest firmly in
          * compatibility mode until it reloads %cs itself.
          */
-        if ( cs.attr.fields.l )
+        if ( cs.attr.l )
         {
-            cs.attr.fields.l = 0;
+            cs.attr.l = 0;
             hvm_set_segment_register(v, x86_seg_cs, &cs);
         }
     }
@@ -2379,14 +2379,14 @@ bool_t hvm_virtual_to_linear_addr(
             goto out;
     }
     else if ( hvm_long_mode_active(curr) &&
-              (is_x86_system_segment(seg) || active_cs->attr.fields.l) )
+              (is_x86_system_segment(seg) || active_cs->attr.l) )
     {
         /*
          * User segments are always treated as present.  System segment may
          * not be, and also incur limit checks.
          */
         if ( is_x86_system_segment(seg) &&
-             (!reg->attr.fields.p || (offset + bytes - !!bytes) > reg->limit) )
+             (!reg->attr.p || (offset + bytes - !!bytes) > reg->limit) )
             goto out;
 
         /*
@@ -2414,20 +2414,20 @@ bool_t hvm_virtual_to_linear_addr(
         addr = (uint32_t)(addr + reg->base);
 
         /* Segment not valid for use (cooked meaning of .p)? */
-        if ( !reg->attr.fields.p )
+        if ( !reg->attr.p )
             goto out;
 
         /* Read/write restrictions only exist for user segments. */
-        if ( reg->attr.fields.s )
+        if ( reg->attr.s )
         {
             switch ( access_type )
             {
             case hvm_access_read:
-                if ( (reg->attr.fields.type & 0xa) == 0x8 )
+                if ( (reg->attr.type & 0xa) == 0x8 )
                     goto out; /* execute-only code segment */
                 break;
             case hvm_access_write:
-                if ( (reg->attr.fields.type & 0xa) != 0x2 )
+                if ( (reg->attr.type & 0xa) != 0x2 )
                     goto out; /* not a writable data segment */
                 break;
             default:
@@ -2438,10 +2438,10 @@ bool_t hvm_virtual_to_linear_addr(
         last_byte = (uint32_t)offset + bytes - !!bytes;
 
         /* Is this a grows-down data segment? Special limit check if so. */
-        if ( reg->attr.fields.s && (reg->attr.fields.type & 0xc) == 0x4 )
+        if ( reg->attr.s && (reg->attr.type & 0xc) == 0x4 )
         {
             /* Is upper limit 0xFFFF or 0xFFFFFFFF? */
-            if ( !reg->attr.fields.db )
+            if ( !reg->attr.db )
                 last_byte = (uint16_t)last_byte;
 
             /* Check first byte and last byte against respective bounds. */
@@ -2637,7 +2637,7 @@ static int hvm_load_segment_selector(
         segr.sel = sel;
         segr.base = (uint32_t)sel << 4;
         segr.limit = 0xffffu;
-        segr.attr.bytes = 0xf3;
+        segr.attr.raw = 0xf3;
         hvm_set_segment_register(v, seg, &segr);
         return 0;
     }
@@ -2661,7 +2661,7 @@ static int hvm_load_segment_selector(
         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
 
     /* Segment not valid for use (cooked meaning of .p)? */
-    if ( !desctab.attr.fields.p )
+    if ( !desctab.attr.p )
         goto fail;
 
     /* Check against descriptor table limit. */
@@ -2739,10 +2739,10 @@ static int hvm_load_segment_selector(
     segr.base = (((desc.b <<  0) & 0xff000000u) |
                  ((desc.b << 16) & 0x00ff0000u) |
                  ((desc.a >> 16) & 0x0000ffffu));
-    segr.attr.bytes = (((desc.b >>  8) & 0x00ffu) |
-                       ((desc.b >> 12) & 0x0f00u));
+    segr.attr.raw = (((desc.b >>  8) & 0x00ffu) |
+                     ((desc.b >> 12) & 0x0f00u));
     segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu);
-    if ( segr.attr.fields.g )
+    if ( segr.attr.g )
         segr.limit = (segr.limit << 12) | 0xfffu;
     segr.sel = sel;
     hvm_set_segment_register(v, seg, &segr);
@@ -2840,13 +2840,13 @@ void hvm_task_switch(
     tr.base = (((tss_desc.b <<  0) & 0xff000000u) |
                ((tss_desc.b << 16) & 0x00ff0000u) |
                ((tss_desc.a >> 16) & 0x0000ffffu));
-    tr.attr.bytes = (((tss_desc.b >>  8) & 0x00ffu) |
-                     ((tss_desc.b >> 12) & 0x0f00u));
+    tr.attr.raw = (((tss_desc.b >>  8) & 0x00ffu) |
+                   ((tss_desc.b >> 12) & 0x0f00u));
     tr.limit = (tss_desc.b & 0x000f0000u) | (tss_desc.a & 0x0000ffffu);
-    if ( tr.attr.fields.g )
+    if ( tr.attr.g )
         tr.limit = (tr.limit << 12) | 0xfffu;
 
-    if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
+    if ( tr.attr.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
     {
         hvm_inject_hw_exception(
             (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault,
@@ -2854,7 +2854,7 @@ void hvm_task_switch(
         goto out;
     }
 
-    if ( !tr.attr.fields.p )
+    if ( !tr.attr.p )
     {
         hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
         goto out;
@@ -2972,7 +2972,7 @@ void hvm_task_switch(
             goto out;
     }
 
-    tr.attr.fields.type = 0xb; /* busy 32-bit tss */
+    tr.attr.type = SYS_DESC_tss_busy;
     hvm_set_segment_register(v, x86_seg_tr, &tr);
 
     v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
@@ -2992,9 +2992,9 @@ void hvm_task_switch(
         unsigned int opsz, sp;
 
         hvm_get_segment_register(v, x86_seg_cs, &cs);
-        opsz = cs.attr.fields.db ? 4 : 2;
+        opsz = cs.attr.db ? 4 : 2;
         hvm_get_segment_register(v, x86_seg_ss, &segr);
-        if ( segr.attr.fields.db )
+        if ( segr.attr.db )
             sp = regs->esp -= opsz;
         else
             sp = regs->sp -= opsz;
@@ -3614,7 +3614,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
     if ( opt_hvm_fep )
     {
         const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
-        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].attr.fields.dpl == 3)
+        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].attr.dpl == 3)
             ? PFEC_user_mode : 0;
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
@@ -3630,7 +3630,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
             regs->eflags &= ~X86_EFLAGS_RF;
 
             /* Zero the upper 32 bits of %rip if not in 64bit mode. */
-            if ( !(hvm_long_mode_active(cur) && cs->attr.fields.l) )
+            if ( !(hvm_long_mode_active(cur) && cs->attr.l) )
                 regs->rip = regs->eip;
 
             add_taint(TAINT_HVM_FEP);
@@ -3782,25 +3782,25 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, 
uint16_t ip)
     reg.sel = cs;
     reg.base = (uint32_t)reg.sel << 4;
     reg.limit = 0xffff;
-    reg.attr.bytes = 0x09b;
+    reg.attr.raw = 0x09b;
     hvm_set_segment_register(v, x86_seg_cs, &reg);
 
     reg.sel = reg.base = 0;
     reg.limit = 0xffff;
-    reg.attr.bytes = 0x093;
+    reg.attr.raw = 0x093;
     hvm_set_segment_register(v, x86_seg_ds, &reg);
     hvm_set_segment_register(v, x86_seg_es, &reg);
     hvm_set_segment_register(v, x86_seg_fs, &reg);
     hvm_set_segment_register(v, x86_seg_gs, &reg);
     hvm_set_segment_register(v, x86_seg_ss, &reg);
 
-    reg.attr.bytes = 0x82; /* LDT */
+    reg.attr.raw = 0x82; /* LDT */
     hvm_set_segment_register(v, x86_seg_ldtr, &reg);
 
-    reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
+    reg.attr.raw = 0x8b; /* 32-bit TSS (busy) */
     hvm_set_segment_register(v, x86_seg_tr, &reg);
 
-    reg.attr.bytes = 0;
+    reg.attr.raw = 0;
     hvm_set_segment_register(v, x86_seg_gdtr, &reg);
     hvm_set_segment_register(v, x86_seg_idtr, &reg);
 
@@ -4736,8 +4736,8 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
     {
     case x86_seg_ss:
         /* SVM may retain %ss.DB when %ss is loaded with a NULL selector. */
-        if ( !reg->attr.fields.p )
-            reg->attr.fields.db = 0;
+        if ( !reg->attr.p )
+            reg->attr.db = 0;
         break;
 
     case x86_seg_tr:
@@ -4745,14 +4745,14 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
          * SVM doesn't track %tr.B. Architecturally, a loaded TSS segment will
          * always be busy.
          */
-        reg->attr.fields.type |= 0x2;
+        reg->attr.type |= 0x2;
 
         /*
          * %cs and %tr are unconditionally present.  SVM ignores these present
          * bits and will happily run without them set.
          */
     case x86_seg_cs:
-        reg->attr.fields.p = 1;
+        reg->attr.p = 1;
         break;
 
     case x86_seg_gdtr:
@@ -4761,21 +4761,21 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
          * Treat GDTR/IDTR as being present system segments.  This avoids them
          * needing special casing for segmentation checks.
          */
-        reg->attr.bytes = 0x80;
+        reg->attr.raw = 0x80;
         break;
 
     default: /* Avoid triggering -Werror=switch */
         break;
     }
 
-    if ( reg->attr.fields.p )
+    if ( reg->attr.p )
     {
         /*
          * For segments which are present/usable, cook the system flag.  SVM
          * ignores the S bit on all segments and will happily run with them in
          * any state.
          */
-        reg->attr.fields.s = is_x86_user_segment(seg);
+        reg->attr.s = is_x86_user_segment(seg);
 
         /*
          * SVM discards %cs.G on #VMEXIT.  Other user segments do have .G
@@ -4785,14 +4785,14 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
          *
          * Unconditionally recalculate G.
          */
-        reg->attr.fields.g = !!(reg->limit >> 20);
+        reg->attr.g = !!(reg->limit >> 20);
 
         /*
          * SVM doesn't track the Accessed flag.  It will always be set for
          * usable user segments loaded into the descriptor cache.
          */
         if ( is_x86_user_segment(seg) )
-            reg->attr.fields.type |= 0x1;
+            reg->attr.type |= 0x1;
     }
 }
 
@@ -4800,25 +4800,25 @@ void hvm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
                               struct segment_register *reg)
 {
     /* Set G to match the limit field.  VT-x cares, while SVM doesn't. */
-    if ( reg->attr.fields.p )
-        reg->attr.fields.g = !!(reg->limit >> 20);
+    if ( reg->attr.p )
+        reg->attr.g = !!(reg->limit >> 20);
 
     switch ( seg )
     {
     case x86_seg_cs:
-        ASSERT(reg->attr.fields.p);                  /* Usable. */
-        ASSERT(reg->attr.fields.s);                  /* User segment. */
-        ASSERT(reg->attr.fields.type & 0x1);         /* Accessed. */
+        ASSERT(reg->attr.p);                         /* Usable. */
+        ASSERT(reg->attr.s);                         /* User segment. */
+        ASSERT(reg->attr.type & 0x1);                /* Accessed. */
         ASSERT((reg->base >> 32) == 0);              /* Upper bits clear. */
         break;
 
     case x86_seg_ss:
-        if ( reg->attr.fields.p )
+        if ( reg->attr.p )
         {
-            ASSERT(reg->attr.fields.s);              /* User segment. */
-            ASSERT(!(reg->attr.fields.type & 0x8));  /* Data segment. */
-            ASSERT(reg->attr.fields.type & 0x2);     /* Writeable. */
-            ASSERT(reg->attr.fields.type & 0x1);     /* Accessed. */
+            ASSERT(reg->attr.s);                     /* User segment. */
+            ASSERT(!(reg->attr.type & 0x8));         /* Data segment. */
+            ASSERT(reg->attr.type & 0x2);            /* Writeable. */
+            ASSERT(reg->attr.type & 0x1);            /* Accessed. */
             ASSERT((reg->base >> 32) == 0);          /* Upper bits clear. */
         }
         break;
@@ -4827,14 +4827,14 @@ void hvm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
     case x86_seg_es:
     case x86_seg_fs:
     case x86_seg_gs:
-        if ( reg->attr.fields.p )
+        if ( reg->attr.p )
         {
-            ASSERT(reg->attr.fields.s);              /* User segment. */
+            ASSERT(reg->attr.s);                     /* User segment. */
 
-            if ( reg->attr.fields.type & 0x8 )
-                ASSERT(reg->attr.fields.type & 0x2); /* Readable. */
+            if ( reg->attr.type & 0x8 )
+                ASSERT(reg->attr.type & 0x2);        /* Readable. */
 
-            ASSERT(reg->attr.fields.type & 0x1);     /* Accessed. */
+            ASSERT(reg->attr.type & 0x1);            /* Accessed. */
 
             if ( seg == x86_seg_fs || seg == x86_seg_gs )
                 ASSERT(is_canonical_address(reg->base));
@@ -4844,23 +4844,23 @@ void hvm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
 
     case x86_seg_tr:
-        ASSERT(reg->attr.fields.p);                  /* Usable. */
-        ASSERT(!reg->attr.fields.s);                 /* System segment. */
+        ASSERT(reg->attr.p);                         /* Usable. */
+        ASSERT(!reg->attr.s);                        /* System segment. */
         ASSERT(!(reg->sel & 0x4));                   /* !TI. */
-        if ( reg->attr.fields.type == SYS_DESC_tss_busy )
+        if ( reg->attr.type == SYS_DESC_tss_busy )
             ASSERT(is_canonical_address(reg->base));
-        else if ( reg->attr.fields.type == SYS_DESC_tss16_busy )
+        else if ( reg->attr.type == SYS_DESC_tss16_busy )
             ASSERT((reg->base >> 32) == 0);
         else
             ASSERT(!"%tr typecheck failure");
         break;
 
     case x86_seg_ldtr:
-        if ( reg->attr.fields.p )
+        if ( reg->attr.p )
         {
-            ASSERT(!reg->attr.fields.s);             /* System segment. */
+            ASSERT(!reg->attr.s);                    /* System segment. */
             ASSERT(!(reg->sel & 0x4));               /* !TI. */
-            ASSERT(reg->attr.fields.type == SYS_DESC_ldt);
+            ASSERT(reg->attr.type == SYS_DESC_ldt);
             ASSERT(is_canonical_address(reg->base));
         }
         break;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 178adc5..c312827 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -526,9 +526,9 @@ static int svm_guest_x86_mode(struct vcpu *v)
         return 0;
     if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
         return 1;
-    if ( hvm_long_mode_active(v) && likely(vmcb->cs.attr.fields.l) )
+    if ( hvm_long_mode_active(v) && likely(vmcb->cs.attr.l) )
         return 8;
-    return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
+    return likely(vmcb->cs.attr.db) ? 4 : 2;
 }
 
 void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
@@ -653,7 +653,7 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
     case x86_seg_ss:
         *reg = vmcb->ss;
-        reg->attr.fields.dpl = vmcb->_cpl;
+        reg->attr.dpl = vmcb->_cpl;
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
@@ -726,7 +726,7 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
     case x86_seg_ss:
         vmcb->ss = *reg;
-        vmcb->_cpl = vmcb->ss.attr.fields.dpl;
+        vmcb->_cpl = vmcb->ss.attr.dpl;
         break;
     case x86_seg_tr:
         vmcb->tr = *reg;
@@ -1442,7 +1442,7 @@ static void svm_inject_event(const struct x86_event 
*event)
      * If injecting an event outside of 64bit mode, zero the upper bits of the
      * %eip and nextrip after the adjustments above.
      */
-    if ( !((vmcb->_efer & EFER_LMA) && vmcb->cs.attr.fields.l) )
+    if ( !((vmcb->_efer & EFER_LMA) && vmcb->cs.attr.l) )
     {
         regs->rip = regs->eip;
         vmcb->nextrip = (uint32_t)vmcb->nextrip;
diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
index 0a64e92..049b038 100644
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -23,7 +23,7 @@
 static void svm_dump_sel(const char *name, const svm_segment_register_t *s)
 {
     printk("%s: %04x %04x %08x %016"PRIx64"\n",
-           name, s->sel, s->attr.bytes, s->limit, s->base);
+           name, s->sel, s->attr.raw, s->limit, s->base);
 }
 
 /* This function can directly access fields which are covered by clean bits. */
@@ -163,8 +163,8 @@ svm_vmcb_isvalid(const char *from, struct vmcb_struct *vmcb,
     if ((vmcb->_efer & EFER_LME) != 0
         && (vmcb->_cr0 & X86_CR0_PG) != 0
         && (vmcb->_cr4 & X86_CR4_PAE) != 0
-        && (vmcb->cs.attr.fields.l != 0)
-        && (vmcb->cs.attr.fields.db != 0))
+        && (vmcb->cs.attr.l != 0)
+        && (vmcb->cs.attr.db != 0))
     {
         PRINTF("EFER_LME, CR0.PG, CR4.PAE, CS.L and CS.D are all non-zero.\n");
     }
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 96abf8d..e925ec7 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -158,12 +158,12 @@ static int construct_vmcb(struct vcpu *v)
     vmcb->gs.base = 0;
 
     /* Guest segment AR bytes. */
-    vmcb->es.attr.bytes = 0xc93; /* read/write, accessed */
-    vmcb->ss.attr.bytes = 0xc93;
-    vmcb->ds.attr.bytes = 0xc93;
-    vmcb->fs.attr.bytes = 0xc93;
-    vmcb->gs.attr.bytes = 0xc93;
-    vmcb->cs.attr.bytes = 0xc9b; /* exec/read, accessed */
+    vmcb->es.attr.raw = 0xc93; /* read/write, accessed */
+    vmcb->ss.attr.raw = 0xc93;
+    vmcb->ds.attr.raw = 0xc93;
+    vmcb->fs.attr.raw = 0xc93;
+    vmcb->gs.attr.raw = 0xc93;
+    vmcb->cs.attr.raw = 0xc9b; /* exec/read, accessed */
 
     /* Guest IDT. */
     vmcb->idtr.base = 0;
@@ -177,10 +177,10 @@ static int construct_vmcb(struct vcpu *v)
     vmcb->ldtr.sel = 0;
     vmcb->ldtr.base = 0;
     vmcb->ldtr.limit = 0;
-    vmcb->ldtr.attr.bytes = 0;
+    vmcb->ldtr.attr.raw = 0;
 
     /* Guest TSS. */
-    vmcb->tr.attr.bytes = 0x08b; /* 32-bit TSS (busy) */
+    vmcb->tr.attr.raw = 0x08b; /* 32-bit TSS (busy) */
     vmcb->tr.base = 0;
     vmcb->tr.limit = 0xff;
 
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 1996b1f..17540e9 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -70,7 +70,7 @@ static void realmode_deliver_exception(
     frame[2] = regs->flags & ~X86_EFLAGS_RF;
 
     /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.db )
         pstk = regs->esp -= 6;
     else
         pstk = regs->sp -= 6;
@@ -207,13 +207,13 @@ void vmx_realmode(struct cpu_user_regs *regs)
          * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
          */
         sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->attr.dpl = sreg->sel & 3;
         sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->attr.dpl = sreg->sel & 3;
         sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->attr.dpl = sreg->sel & 3;
         sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->attr.dpl = sreg->sel & 3;
         hvmemul_ctxt.seg_reg_dirty |=
             (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
             (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index c53b249..3d5f48a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1074,18 +1074,18 @@ static unsigned int _vmx_get_cpl(struct vcpu *v)
  * things.  We store the ring-3 version in the VMCS to avoid lots of
  * shuffling on vmenter and vmexit, and translate in these accessors. */
 
-#define rm_cs_attr (((union segment_attributes) {                       \
-        .fields = { .type = 0xb, .s = 1, .dpl = 0, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-#define rm_ds_attr (((union segment_attributes) {                       \
-        .fields = { .type = 0x3, .s = 1, .dpl = 0, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-#define vm86_ds_attr (((union segment_attributes) {                     \
-        .fields = { .type = 0x3, .s = 1, .dpl = 3, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-#define vm86_tr_attr (((union segment_attributes) {                     \
-        .fields = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
+#define rm_cs_attr (((struct segment_register) {                        \
+        .attr = { .type = 0xb, .s = 1, .dpl = 0, .p = 1, .avl = 0,      \
+                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).attr.raw)
+#define rm_ds_attr (((struct segment_register) {                        \
+        .attr = { .type = 0x3, .s = 1, .dpl = 0, .p = 1, .avl = 0,      \
+                  .l = 0, .db = 0, .g = 0, .pad = 0 } }).attr.raw)
+#define vm86_ds_attr (((struct segment_register) {                      \
+        .attr = { .type = 0x3, .s = 1, .dpl = 3, .p = 1, .avl = 0,      \
+                  .l = 0, .db = 0, .g = 0, .pad = 0 } }).attr.raw)
+#define vm86_tr_attr (((struct segment_register) {                      \
+        .attr = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0,      \
+                  .l = 0, .db = 0, .g = 0, .pad = 0 } }).attr.raw)
 
 static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
@@ -1156,7 +1156,7 @@ static void vmx_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
      * Fold VT-x representation into Xen's representation.  The Present bit is
      * unconditionally set to the inverse of unusable.
      */
-    reg->attr.bytes =
+    reg->attr.raw =
         (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
 
     /* Adjust for virtual 8086 mode */
@@ -1175,7 +1175,7 @@ static void vmx_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
              * but for SS we assume it has: the Ubuntu graphical bootloader
              * does this and gets badly confused if we leave the old SS in 
              * place. */
-            reg->attr.bytes = (seg == x86_seg_cs ? rm_cs_attr : rm_ds_attr);
+            reg->attr.raw = (seg == x86_seg_cs ? rm_cs_attr : rm_ds_attr);
             *sreg = *reg;
         }
         else 
@@ -1195,7 +1195,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
     uint64_t base;
 
     sel = reg->sel;
-    attr = reg->attr.bytes;
+    attr = reg->attr.raw;
     limit = reg->limit;
     base = reg->base;
 
@@ -1234,7 +1234,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
              * but otherwise we have to emulate if *any* segment hasn't
              * been reloaded. */
             if ( base < 0x100000 && !(base & 0xf) && limit >= 0xffff
-                 && reg->attr.fields.p )
+                 && reg->attr.p )
             {
                 sel = base >> 4;
                 attr = vm86_ds_attr;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index d432198..a166999 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -332,13 +332,13 @@ const struct x86_emulate_ops *shadow_init_emulation(
     creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
 
     /* Work out the emulation mode. */
-    if ( sh_ctxt->ctxt.lma && creg->attr.fields.l )
+    if ( sh_ctxt->ctxt.lma && creg->attr.l )
         sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
     else
     {
         sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
-        sh_ctxt->ctxt.addr_size = creg->attr.fields.db ? 32 : 16;
-        sh_ctxt->ctxt.sp_size   = sreg->attr.fields.db ? 32 : 16;
+        sh_ctxt->ctxt.addr_size = creg->attr.db ? 32 : 16;
+        sh_ctxt->ctxt.sp_size   = sreg->attr.db ? 32 : 16;
     }
 
     /* Attempt to prefetch whole instruction. */
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 52e740f..e4c7c6a 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1845,7 +1845,7 @@ static int priv_op_read_segment(enum x86_segment seg,
             return X86EMUL_UNHANDLEABLE;
 
         reg->limit = limit;
-        reg->attr.bytes = ar >> 8;
+        reg->attr.raw = ar >> 8;
     }
     else
     {
@@ -1866,19 +1866,19 @@ static int priv_op_read_segment(enum x86_segment seg,
 
         reg->limit = ~0U;
 
-        reg->attr.bytes = 0;
-        reg->attr.fields.type = _SEGMENT_WR >> 8;
+        reg->attr.raw = 0;
+        reg->attr.type = _SEGMENT_WR >> 8;
         if ( seg == x86_seg_cs )
         {
-            reg->attr.fields.type |= _SEGMENT_CODE >> 8;
-            reg->attr.fields.l = 1;
+            reg->attr.type |= _SEGMENT_CODE >> 8;
+            reg->attr.l = 1;
         }
         else
-            reg->attr.fields.db = 1;
-        reg->attr.fields.s   = 1;
-        reg->attr.fields.dpl = 3;
-        reg->attr.fields.p   = 1;
-        reg->attr.fields.g   = 1;
+            reg->attr.db = 1;
+        reg->attr.s   = 1;
+        reg->attr.dpl = 3;
+        reg->attr.p   = 1;
+        reg->attr.g   = 1;
     }
 
     /*
@@ -1887,9 +1887,9 @@ static int priv_op_read_segment(enum x86_segment seg,
      */
     if ( (seg == x86_seg_ss ||
           (seg == x86_seg_cs &&
-           !(reg->attr.fields.type & (_SEGMENT_EC >> 8)))) &&
+           !(reg->attr.type & (_SEGMENT_EC >> 8)))) &&
          guest_kernel_mode(current, ctxt->regs) )
-        reg->attr.fields.dpl = 0;
+        reg->attr.dpl = 0;
 
     return X86EMUL_OKAY;
 }
@@ -2260,11 +2260,11 @@ static int priv_op_rep_ins(uint16_t port,
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( !sreg.attr.fields.p )
+    if ( !sreg.attr.p )
         return X86EMUL_UNHANDLEABLE;
-    if ( !sreg.attr.fields.s ||
-         (sreg.attr.fields.type & (_SEGMENT_CODE >> 8)) ||
-         !(sreg.attr.fields.type & (_SEGMENT_WR >> 8)) )
+    if ( !sreg.attr.s ||
+         (sreg.attr.type & (_SEGMENT_CODE >> 8)) ||
+         !(sreg.attr.type & (_SEGMENT_WR >> 8)) )
     {
         x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
         return X86EMUL_EXCEPTION;
@@ -2325,11 +2325,11 @@ static int priv_op_rep_outs(enum x86_segment seg, 
unsigned long offset,
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( !sreg.attr.fields.p )
+    if ( !sreg.attr.p )
         return X86EMUL_UNHANDLEABLE;
-    if ( !sreg.attr.fields.s ||
-         ((sreg.attr.fields.type & (_SEGMENT_CODE >> 8)) &&
-          !(sreg.attr.fields.type & (_SEGMENT_WR >> 8))) )
+    if ( !sreg.attr.s ||
+         ((sreg.attr.type & (_SEGMENT_CODE >> 8)) &&
+          !(sreg.attr.type & (_SEGMENT_WR >> 8))) )
     {
         x86_emul_hw_exception(seg != x86_seg_ss ? TRAP_gp_fault
                                                 : TRAP_stack_error,
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index a6ea42c..a66fc9c 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -176,7 +176,7 @@ void vm_event_fill_regs(vm_event_request_t *req)
     req->data.regs.x86.gs_base = seg.base;
 
     hvm_get_segment_register(curr, x86_seg_cs, &seg);
-    req->data.regs.x86.cs_arbytes = seg.attr.bytes;
+    req->data.regs.x86.cs_arbytes = seg.attr.raw;
 }
 
 void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index 8a30715..3c08ac7 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -973,7 +973,7 @@ do {                                                        
            \
         ASSERT(!ctxt->lma);                                             \
         generate_exception_if((ip) > (cs)->limit, EXC_GP, 0);           \
     } else                                                              \
-        generate_exception_if(ctxt->lma && (cs)->attr.fields.l          \
+        generate_exception_if(ctxt->lma && (cs)->attr.l                 \
                               ? !is_canonical_address(ip)               \
                               : (ip) > (cs)->limit, EXC_GP, 0);         \
 })
@@ -1414,7 +1414,7 @@ get_cpl(
          ops->read_segment(x86_seg_ss, &reg, ctxt) )
         return -1;
 
-    return reg.attr.fields.dpl;
+    return reg.attr.dpl;
 }
 
 static int
@@ -1470,7 +1470,7 @@ static int ioport_access_check(
         return rc == X86EMUL_DONE ? X86EMUL_OKAY : rc;
 
     /* Ensure the TSS has an io-bitmap-offset field. */
-    generate_exception_if(tr.attr.fields.type != 0xb, EXC_GP, 0);
+    generate_exception_if(tr.attr.type != 0xb, EXC_GP, 0);
 
     switch ( rc = read_ulong(x86_seg_tr, 0x66, &iobmp, 2, ctxt, ops) )
     {
@@ -1693,12 +1693,12 @@ protmode_load_seg(
              ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY )
             memset(sreg, 0, sizeof(*sreg));
         else
-            sreg->attr.bytes = 0;
+            sreg->attr.raw = 0;
         sreg->sel = sel;
 
         /* Since CPL == SS.DPL, we need to put back DPL. */
         if ( seg == x86_seg_ss )
-            sreg->attr.fields.dpl = sel;
+            sreg->attr.dpl = sel;
 
         return X86EMUL_OKAY;
     }
@@ -1873,10 +1873,10 @@ protmode_load_seg(
                   ((desc.b <<  0) & 0xff000000u) |
                   ((desc.b << 16) & 0x00ff0000u) |
                   ((desc.a >> 16) & 0x0000ffffu));
-    sreg->attr.bytes = (((desc.b >>  8) & 0x00ffu) |
-                        ((desc.b >> 12) & 0x0f00u));
+    sreg->attr.raw = (((desc.b >>  8) & 0x00ffu) |
+                      ((desc.b >> 12) & 0x0f00u));
     sreg->limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu);
-    if ( sreg->attr.fields.g )
+    if ( sreg->attr.g )
         sreg->limit = (sreg->limit << 12) | 0xfffu;
     sreg->sel = sel;
     return X86EMUL_OKAY;
@@ -4963,9 +4963,9 @@ x86_emulate(
                                             &sreg, ctxt, ops) )
             {
             case X86EMUL_OKAY:
-                if ( sreg.attr.fields.s &&
-                     ((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
-                                      : ((sreg.attr.fields.type & 0xa) != 
0x8)) )
+                if ( sreg.attr.s &&
+                     ((modrm_reg & 1) ? ((sreg.attr.type & 0xa) == 0x2)
+                                      : ((sreg.attr.type & 0xa) != 0x8)) )
                     _regs.eflags |= X86_EFLAGS_ZF;
                 break;
             case X86EMUL_EXCEPTION:
@@ -5188,9 +5188,9 @@ x86_emulate(
                                         ctxt, ops) )
         {
         case X86EMUL_OKAY:
-            if ( !sreg.attr.fields.s )
+            if ( !sreg.attr.s )
             {
-                switch ( sreg.attr.fields.type )
+                switch ( sreg.attr.type )
                 {
                 case 0x01: /* available 16-bit TSS */
                 case 0x03: /* busy 16-bit TSS */
@@ -5222,10 +5222,10 @@ x86_emulate(
             break;
         }
         if ( _regs.eflags & X86_EFLAGS_ZF )
-            dst.val = ((sreg.attr.bytes & 0xff) << 8) |
-                      ((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
+            dst.val = ((sreg.attr.raw & 0xff) << 8) |
+                      ((sreg.limit >> (sreg.attr.g ? 12 : 0)) &
                        0xf0000) |
-                      ((sreg.attr.bytes & 0xf00) << 12);
+                      ((sreg.attr.raw & 0xf00) << 12);
         else
             dst.type = OP_NONE;
         break;
@@ -5237,9 +5237,9 @@ x86_emulate(
                                         ctxt, ops) )
         {
         case X86EMUL_OKAY:
-            if ( !sreg.attr.fields.s )
+            if ( !sreg.attr.s )
             {
-                switch ( sreg.attr.fields.type )
+                switch ( sreg.attr.type )
                 {
                 case 0x01: /* available 16-bit TSS */
                 case 0x03: /* busy 16-bit TSS */
@@ -5290,12 +5290,12 @@ x86_emulate(
 
         cs.base = sreg.base = 0; /* flat segment */
         cs.limit = sreg.limit = ~0u;  /* 4GB limit */
-        sreg.attr.bytes = 0xc93; /* G+DB+P+S+Data */
+        sreg.attr.raw = 0xc93; /* G+DB+P+S+Data */
 
 #ifdef __x86_64__
         if ( ctxt->lma )
         {
-            cs.attr.bytes = 0xa9b; /* L+DB+P+S+Code */
+            cs.attr.raw = 0xa9b; /* L+DB+P+S+Code */
 
             _regs.rcx = _regs.rip;
             _regs.r11 = _regs.eflags & ~X86_EFLAGS_RF;
@@ -5313,7 +5313,7 @@ x86_emulate(
         else
 #endif
         {
-            cs.attr.bytes = 0xc9b; /* G+DB+P+S+Code */
+            cs.attr.raw = 0xc9b; /* G+DB+P+S+Code */
 
             _regs.r(cx) = _regs.eip;
             _regs.eip = msr_val;
@@ -5747,13 +5747,13 @@ x86_emulate(
         cs.sel = msr_val & ~3; /* SELECTOR_RPL_MASK */
         cs.base = 0;   /* flat segment */
         cs.limit = ~0u;  /* 4GB limit */
-        cs.attr.bytes = ctxt->lma ? 0xa9b  /* G+L+P+S+Code */
-                                  : 0xc9b; /* G+DB+P+S+Code */
+        cs.attr.raw = ctxt->lma ? 0xa9b  /* G+L+P+S+Code */
+                                : 0xc9b; /* G+DB+P+S+Code */
 
         sreg.sel = cs.sel + 8;
         sreg.base = 0;   /* flat segment */
         sreg.limit = ~0u;  /* 4GB limit */
-        sreg.attr.bytes = 0xc93; /* G+DB+P+S+Data */
+        sreg.attr.raw = 0xc93; /* G+DB+P+S+Data */
 
         fail_if(ops->write_segment == NULL);
         if ( (rc = ops->write_segment(x86_seg_cs, &cs, ctxt)) != 0 ||
@@ -5793,13 +5793,13 @@ x86_emulate(
                  (op_bytes == 8 ? 32 : 16);
         cs.base = 0;   /* flat segment */
         cs.limit = ~0u;  /* 4GB limit */
-        cs.attr.bytes = op_bytes == 8 ? 0xafb  /* L+DB+P+DPL3+S+Code */
-                                      : 0xcfb; /* G+DB+P+DPL3+S+Code */
+        cs.attr.raw = op_bytes == 8 ? 0xafb  /* L+DB+P+DPL3+S+Code */
+                                    : 0xcfb; /* G+DB+P+DPL3+S+Code */
 
         sreg.sel = cs.sel + 8;
         sreg.base = 0;   /* flat segment */
         sreg.limit = ~0u;  /* 4GB limit */
-        sreg.attr.bytes = 0xcf3; /* G+DB+P+DPL3+S+Data */
+        sreg.attr.raw = 0xcf3; /* G+DB+P+DPL3+S+Data */
 
         fail_if(ops->write_segment == NULL);
         if ( (rc = ops->write_segment(x86_seg_cs, &cs, ctxt)) != 0 ||
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h 
b/xen/arch/x86/x86_emulate/x86_emulate.h
index e5ec8a6..3355c01 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -83,33 +83,26 @@ struct x86_event {
     unsigned long cr2;          /* Only for TRAP_page_fault h/w exception */
 };
 
-/* 
- * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
- * segment descriptor. It happens to match the format of an AMD SVM VMCB.
- */
-typedef union segment_attributes {
-    uint16_t bytes;
-    struct
-    {
-        uint16_t type:4;    /* 0;  Bit 40-43 */
-        uint16_t s:   1;    /* 4;  Bit 44 */
-        uint16_t dpl: 2;    /* 5;  Bit 45-46 */
-        uint16_t p:   1;    /* 7;  Bit 47 */
-        uint16_t avl: 1;    /* 8;  Bit 52 */
-        uint16_t l:   1;    /* 9;  Bit 53 */
-        uint16_t db:  1;    /* 10; Bit 54 */
-        uint16_t g:   1;    /* 11; Bit 55 */
-        uint16_t pad: 4;
-    } fields;
-} segment_attributes_t;
-
 /*
  * Full state of a segment register (visible and hidden portions).
  * Again, this happens to match the format of an AMD SVM VMCB.
  */
 struct segment_register {
     uint16_t   sel;
-    segment_attributes_t attr;
+    union {
+        uint16_t raw;
+        struct {
+            uint16_t type:4;    /* 0;  Bit 40-43 */
+            uint16_t s:   1;    /* 4;  Bit 44 */
+            uint16_t dpl: 2;    /* 5;  Bit 45-46 */
+            uint16_t p:   1;    /* 7;  Bit 47 */
+            uint16_t avl: 1;    /* 8;  Bit 52 */
+            uint16_t l:   1;    /* 9;  Bit 53 */
+            uint16_t db:  1;    /* 10; Bit 54 */
+            uint16_t g:   1;    /* 11; Bit 55 */
+            uint16_t pad: 4;
+        };
+    } attr;
     uint32_t   limit;
     uint64_t   base;
 };
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.