[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] add/fix access rights and limit checks to INS/OUTS emulation in HVM
Since these instructions are documented to take their intercepts before these checks are being done in hardware, they must be carried out in software. This patch will not apply without the previously sent one forcing proper use of segment base addresses. Functionality tested on 3.0.3, compile tested on -unstable. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx> Index: 2006-11-27/xen/arch/x86/hvm/svm/svm.c =================================================================== --- 2006-11-27.orig/xen/arch/x86/hvm/svm/svm.c 2006-11-28 09:43:47.000000000 +0100 +++ 2006-11-27/xen/arch/x86/hvm/svm/svm.c 2006-11-28 11:01:57.000000000 +0100 @@ -1233,8 +1233,7 @@ static inline int svm_get_io_address( unsigned long *count, unsigned long *addr) { unsigned long reg; - unsigned int asize = 0; - unsigned int isize; + unsigned int asize, isize; int long_mode = 0; segment_selector_t *seg = NULL; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -1267,17 +1266,25 @@ static inline int svm_get_io_address( reg = regs->esi; if (!seg) /* If no prefix, used DS. */ seg = &vmcb->ds; + if (!long_mode && (seg->attributes.fields.type & 0xa) == 0x8) { + svm_inject_exception(v, TRAP_gp_fault, 1, 0); + return 0; + } } else { reg = regs->edi; seg = &vmcb->es; /* Note: This is ALWAYS ES. */ + if (!long_mode && (seg->attributes.fields.type & 0xa) != 0x2) { + svm_inject_exception(v, TRAP_gp_fault, 1, 0); + return 0; + } } /* If the segment isn't present, give GP fault! */ if (!long_mode && !seg->attributes.fields.p) { - svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel); + svm_inject_exception(v, TRAP_gp_fault, 1, 0); return 0; } @@ -1294,16 +1301,59 @@ static inline int svm_get_io_address( if (!info.fields.rep) *count = 1; - if (!long_mode) { - if (*addr > seg->limit) + if (!long_mode) + { + ASSERT(*addr == (u32)*addr); + if ((u32)(*addr + size - 1) < (u32)*addr || + (seg->attributes.fields.type & 0xc) != 0x4 ? + *addr + size - 1 > seg->limit : + *addr <= seg->limit) { - svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel); + svm_inject_exception(v, TRAP_gp_fault, 1, 0); return 0; - } - else + } + + /* Check the limit for repeated instructions, as above we checked only + the first instance. Truncate the count if a limit violation would + occur. Note that the checking is not necessary for page granular + segments as transfers crossing page boundaries will be broken up + anyway. */ + if (!seg->attributes.fields.g && *count > 1) { - *addr += seg->base; + if ((seg->attributes.fields.type & 0xc) != 0x4) + { + /* expand-up */ + if (!(regs->eflags & EF_DF)) + { + if (*addr + *count * size - 1 < *addr || + *addr + *count * size - 1 > seg->limit) + *count = (seg->limit + 1UL - *addr) / size; + } + else + { + if (*count - 1 > *addr / size) + *count = *addr / size + 1; + } + } + else + { + /* expand-down */ + if (!(regs->eflags & EF_DF)) + { + if (*count - 1 > -(s32)*addr / size) + *count = -(s32)*addr / size + 1UL; + } + else + { + if (*addr < (*count - 1) * size || + *addr - (*count - 1) * size <= seg->limit) + *count = (*addr - seg->limit - 1) / size + 1; + } + } + ASSERT(*count); } + + *addr += seg->base; } else if (seg == &vmcb->fs || seg == &vmcb->gs) *addr += seg->base; Index: 2006-11-27/xen/arch/x86/hvm/vmx/vmx.c =================================================================== --- 2006-11-27.orig/xen/arch/x86/hvm/vmx/vmx.c 2006-11-28 10:34:07.000000000 +0100 +++ 2006-11-27/xen/arch/x86/hvm/vmx/vmx.c 2006-11-28 11:14:04.000000000 +0100 @@ -958,12 +958,13 @@ static void vmx_do_invlpg(unsigned long static int vmx_check_descriptor(int long_mode, unsigned long eip, int inst_len, - enum segment seg, unsigned long *base) + enum segment seg, unsigned long *base, + u32 *limit, u32 *ar_bytes) { - enum vmcs_field ar_field, base_field; - u32 ar_bytes; + enum vmcs_field ar_field, base_field, limit_field; *base = 0; + *limit = 0; if ( seg != seg_es ) { unsigned char inst[MAX_INST_LEN]; @@ -1020,26 +1021,32 @@ static int vmx_check_descriptor(int long case seg_cs: ar_field = GUEST_CS_AR_BYTES; base_field = GUEST_CS_BASE; + limit_field = GUEST_CS_LIMIT; break; case seg_ds: ar_field = GUEST_DS_AR_BYTES; base_field = GUEST_DS_BASE; + limit_field = GUEST_DS_LIMIT; break; case seg_es: ar_field = GUEST_ES_AR_BYTES; base_field = GUEST_ES_BASE; + limit_field = GUEST_ES_LIMIT; break; case seg_fs: ar_field = GUEST_FS_AR_BYTES; base_field = GUEST_FS_BASE; + limit_field = GUEST_FS_LIMIT; break; case seg_gs: ar_field = GUEST_FS_AR_BYTES; base_field = GUEST_FS_BASE; + limit_field = GUEST_FS_LIMIT; break; case seg_ss: ar_field = GUEST_GS_AR_BYTES; base_field = GUEST_GS_BASE; + limit_field = GUEST_GS_LIMIT; break; default: BUG(); @@ -1047,10 +1054,13 @@ static int vmx_check_descriptor(int long } if ( !long_mode || seg == seg_fs || seg == seg_gs ) + { *base = __vmread(base_field); - ar_bytes = __vmread(ar_field); + *limit = __vmread(limit_field); + } + *ar_bytes = __vmread(ar_field); - return !(ar_bytes & 0x10000); + return !(*ar_bytes & 0x10000); } static void vmx_io_instruction(unsigned long exit_qualification, @@ -1090,7 +1100,7 @@ static void vmx_io_instruction(unsigned if ( test_bit(4, &exit_qualification) ) { /* string instruction */ unsigned long addr, count = 1, base; - u32 ar_bytes; + u32 ar_bytes, limit; int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; int long_mode = 0; @@ -1101,20 +1111,86 @@ static void vmx_io_instruction(unsigned #endif addr = __vmread(GUEST_LINEAR_ADDRESS); + if ( test_bit(5, &exit_qualification) ) { /* "rep" prefix */ + pio_opp->flags |= REPZ; + count = regs->ecx; + if ( !long_mode && (vm86 || !(ar_bytes & (1u<<14))) ) + count &= 0xFFFF; + } + /* * In protected mode, guest linear address is invalid if the * selector is null. */ if ( !vmx_check_descriptor(long_mode, regs->eip, inst_len, dir == IOREQ_WRITE ? seg_ds : seg_es, - &base) ) + &base, &limit, &ar_bytes) ) { + if ( !long_mode ) { + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); + return; + } addr = dir == IOREQ_WRITE ? base + regs->esi : regs->edi; + } - if ( test_bit(5, &exit_qualification) ) { /* "rep" prefix */ - pio_opp->flags |= REPZ; - count = regs->ecx; - if ( !long_mode && (vm86 || !(ar_bytes & (1u<<14))) ) - count &= 0xFFFF; + if ( !long_mode ) { + unsigned long ea = addr - base; + + /* Segment must be readable for outs and writeable for ins. */ + if ( dir == IOREQ_WRITE ? (ar_bytes & 0xa) == 0x8 + : (ar_bytes & 0xa) != 0x2 ) { + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); + return; + } + + /* Offset must be within limits. */ + ASSERT(ea == (u32)ea); + if ( (u32)(ea + size - 1) < (u32)ea || + (ar_bytes & 0xc) != 0x4 ? ea + size - 1 > limit + : ea <= limit ) + { + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); + return; + } + + /* Check the limit for repeated instructions, as above we checked + only the first instance. Truncate the count if a limit violation + would occur. Note that the checking is not necessary for page + granular segments as transfers crossing page boundaries will be + broken up anyway. */ + if ( !(ar_bytes & (1u<<15)) && count > 1 ) + { + if ( (ar_bytes & 0xc) != 0x4 ) + { + /* expand-up */ + if ( !df ) + { + if ( ea + count * size - 1 < ea || + ea + count * size - 1 > limit ) + count = (limit + 1UL - ea) / size; + } + else + { + if ( count - 1 > ea / size ) + count = ea / size + 1; + } + } + else + { + /* expand-down */ + if ( !df ) + { + if ( count - 1 > -(s32)ea / size ) + count = -(s32)ea / size + 1UL; + } + else + { + if ( ea < (count - 1) * size || + ea - (count - 1) * size <= limit ) + count = (ea - limit - 1) / size + 1; + } + } + ASSERT(count); + } } /* _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |