[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] x86: synchronize PCI config space access decoding
Both PV and HVM logic have similar but not similar enough code here. Synchronize the two so that - in the HVM case we don't unconditionally try to access extended config space - in the PV case we pass a correct range to the XSM hook - in the PV case we don't needlessly deny access when the operation isn't really on PCI config space All this along with sharing the macros HVM already had here. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: hvm_select_ioreq_server() no longer (directly) depends on host properties. --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -620,7 +620,7 @@ static void __devinit init_amd(struct cp check_syscfg_dram_mod_en(); } -static struct cpu_dev amd_cpu_dev __cpuinitdata = { +static const struct cpu_dev amd_cpu_dev = { .c_vendor = "AMD", .c_ident = { "AuthenticAMD" }, .c_init = init_amd, --- a/xen/arch/x86/cpu/centaur.c +++ b/xen/arch/x86/cpu/centaur.c @@ -60,7 +60,7 @@ static void __init init_centaur(struct c init_c3(c); } -static struct cpu_dev centaur_cpu_dev __cpuinitdata = { +static const struct cpu_dev centaur_cpu_dev = { .c_vendor = "Centaur", .c_ident = { "CentaurHauls" }, .c_init = init_centaur, --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -35,7 +35,7 @@ integer_param("cpuid_mask_ext_ecx", opt_ unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u; integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx); -struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; +const struct cpu_dev *__read_mostly cpu_devs[X86_VENDOR_NUM] = {}; unsigned int paddr_bits __read_mostly = 36; @@ -61,11 +61,11 @@ static void default_init(struct cpuinfo_ __clear_bit(X86_FEATURE_SEP, c->x86_capability); } -static struct cpu_dev default_cpu = { +static const struct cpu_dev default_cpu = { .c_init = default_init, .c_vendor = "Unknown", }; -static struct cpu_dev * this_cpu = &default_cpu; +static const struct cpu_dev *this_cpu = &default_cpu; bool_t opt_cpu_info; boolean_param("cpuinfo", opt_cpu_info); @@ -126,9 +126,8 @@ void __cpuinit display_cacheinfo(struct l2size, ecx & 0xFF); } -static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) +int get_cpu_vendor(const char v[], enum get_cpu_vendor mode) { - char *v = c->x86_vendor_id; int i; static int printed; @@ -137,20 +136,22 @@ static void __cpuinit get_cpu_vendor(str if (!strcmp(v,cpu_devs[i]->c_ident[0]) || (cpu_devs[i]->c_ident[1] && !strcmp(v,cpu_devs[i]->c_ident[1]))) { - c->x86_vendor = i; - if (!early) + if (mode == gcv_host_late) this_cpu = cpu_devs[i]; - return; + return i; } } } + if (mode == gcv_guest) + return X86_VENDOR_UNKNOWN; if (!printed) { printed++; printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); printk(KERN_ERR "CPU: Your system may be unstable.\n"); } - c->x86_vendor = X86_VENDOR_UNKNOWN; this_cpu = &default_cpu; + + return X86_VENDOR_UNKNOWN; } static inline u32 _phys_pkg_id(u32 cpuid_apic, int index_msb) @@ -189,7 +190,7 @@ static void __init early_cpu_detect(void (int *)&c->x86_vendor_id[8], (int *)&c->x86_vendor_id[4]); - get_cpu_vendor(c, 1); + c->x86_vendor = get_cpu_vendor(c->x86_vendor_id, gcv_host_early); cpuid(0x00000001, &tfms, &misc, &cap4, &cap0); c->x86 = (tfms >> 8) & 15; @@ -218,7 +219,7 @@ static void __cpuinit generic_identify(s (int *)&c->x86_vendor_id[8], (int *)&c->x86_vendor_id[4]); - get_cpu_vendor(c, 0); + c->x86_vendor = get_cpu_vendor(c->x86_vendor_id, gcv_host_late); /* Initialize the standard set of capabilities */ /* Note that the vendor-specific code below might override */ --- a/xen/arch/x86/cpu/cpu.h +++ b/xen/arch/x86/cpu/cpu.h @@ -8,7 +8,7 @@ struct cpu_dev { void (*c_init)(struct cpuinfo_x86 * c); }; -extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; +extern const struct cpu_dev *cpu_devs[X86_VENDOR_NUM]; extern bool_t opt_arat; extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx; --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -286,7 +286,7 @@ static void __devinit init_intel(struct set_bit(X86_FEATURE_ARAT, c->x86_capability); } -static struct cpu_dev intel_cpu_dev __cpuinitdata = { +static const struct cpu_dev intel_cpu_dev = { .c_vendor = "Intel", .c_ident = { "GenuineIntel" }, .c_init = init_intel, --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -579,6 +579,10 @@ int arch_domain_create(struct domain *d, d->arch.cpuids[i].input[1] = XEN_CPUID_INPUT_UNUSED; } + d->arch.x86_vendor = boot_cpu_data.x86_vendor; + d->arch.x86 = boot_cpu_data.x86; + d->arch.x86_model = boot_cpu_data.x86_model; + d->arch.ioport_caps = rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex); rc = -ENOMEM; --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -695,6 +695,38 @@ long arch_do_domctl( *unused = *ctl; else ret = -ENOENT; + + if ( !ret ) + { + switch ( ctl->input[0] ) + { + case 0: { + union { + typeof(boot_cpu_data.x86_vendor_id) str; + struct { + uint32_t ebx, edx, ecx; + } reg; + } vendor_id = { + .reg = { + .ebx = ctl->ebx, + .edx = ctl->edx, + .ecx = ctl->ecx + } + }; + + d->arch.x86_vendor = get_cpu_vendor(vendor_id.str, gcv_guest); + break; + } + case 1: + d->arch.x86 = (ctl->eax >> 8) & 0xf; + if ( d->arch.x86 == 0xf ) + d->arch.x86 += (ctl->eax >> 20) & 0xff; + d->arch.x86_model = (ctl->eax >> 4) & 0xf; + if ( d->arch.x86 >= 0x6 ) + d->arch.x86_model |= (ctl->eax >> 12) & 0xf0; + break; + } + } break; } --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2406,11 +2406,6 @@ void hvm_vcpu_down(struct vcpu *v) struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d, ioreq_t *p) { -#define CF8_BDF(cf8) (((cf8) & 0x00ffff00) >> 8) -#define CF8_ADDR_LO(cf8) ((cf8) & 0x000000fc) -#define CF8_ADDR_HI(cf8) (((cf8) & 0x0f000000) >> 16) -#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000)) - struct hvm_ioreq_server *s; uint32_t cf8; uint8_t type; @@ -2439,9 +2434,19 @@ struct hvm_ioreq_server *hvm_select_iore type = IOREQ_TYPE_PCI_CONFIG; addr = ((uint64_t)sbdf << 32) | - CF8_ADDR_HI(cf8) | CF8_ADDR_LO(cf8) | (p->addr & 3); + /* AMD extended configuration space access? */ + if ( CF8_ADDR_HI(cf8) && + d->arch.x86_vendor == X86_VENDOR_AMD && + d->arch.x86 >= 0x10 && d->arch.x86 <= 0x17 ) + { + uint64_t msr_val; + + if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) && + (msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) ) + addr |= CF8_ADDR_HI(cf8); + } } else { @@ -2495,11 +2500,6 @@ struct hvm_ioreq_server *hvm_select_iore } return d->arch.hvm_domain.default_ioreq_server; - -#undef CF8_ADDR_ENABLED -#undef CF8_ADDR_HI -#undef CF8_ADDR_LO -#undef CF8_BDF } int hvm_buffered_io_send(ioreq_t *p) --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -1771,15 +1771,18 @@ static bool_t admin_io_okay(unsigned int return ioports_access_permitted(d, port, port + bytes - 1); } -static bool_t pci_cfg_ok(struct domain *currd, bool_t write, unsigned int size) +static bool_t pci_cfg_ok(struct domain *currd, bool_t write, + unsigned int start, unsigned int size) { uint32_t machine_bdf; - unsigned int start; if ( !is_hardware_domain(currd) ) return 0; - machine_bdf = (currd->arch.pci_cf8 >> 8) & 0xFFFF; + if ( !CF8_ENABLED(currd->arch.pci_cf8) ) + return 1; + + machine_bdf = CF8_BDF(currd->arch.pci_cf8); if ( write ) { const unsigned long *ro_map = pci_get_ro_map(0); @@ -1787,9 +1790,9 @@ static bool_t pci_cfg_ok(struct domain * if ( ro_map && test_bit(machine_bdf, ro_map) ) return 0; } - start = currd->arch.pci_cf8 & 0xFF; + start |= CF8_ADDR_LO(currd->arch.pci_cf8); /* AMD extended configuration space access? */ - if ( (currd->arch.pci_cf8 & 0x0F000000) && + if ( CF8_ADDR_HI(currd->arch.pci_cf8) && boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 >= 0x10 && boot_cpu_data.x86 <= 0x17 ) { @@ -1798,7 +1801,7 @@ static bool_t pci_cfg_ok(struct domain * if ( rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) ) return 0; if ( msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT) ) - start |= (currd->arch.pci_cf8 >> 16) & 0xF00; + start |= CF8_ADDR_HI(currd->arch.pci_cf8); } return !xsm_pci_config_permission(XSM_HOOK, currd, machine_bdf, @@ -1854,7 +1857,7 @@ uint32_t guest_io_read(unsigned int port size = min(bytes, 4 - (port & 3)); if ( size == 3 ) size = 2; - if ( pci_cfg_ok(currd, 0, size) ) + if ( pci_cfg_ok(currd, 0, port & 3, size) ) sub_data = pci_conf_read(currd->arch.pci_cf8, port & 3, size); } @@ -1925,7 +1928,7 @@ void guest_io_write(unsigned int port, u size = min(bytes, 4 - (port & 3)); if ( size == 3 ) size = 2; - if ( pci_cfg_ok(currd, 1, size) ) + if ( pci_cfg_ok(currd, 1, port & 3, size) ) pci_conf_write(currd->arch.pci_cf8, port & 3, size, data); } --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -308,6 +308,11 @@ struct arch_domain /* Is PHYSDEVOP_eoi to automatically unmask the event channel? */ bool_t auto_unmask; + /* Values snooped from updates to cpuids[] (below). */ + u8 x86; /* CPU family */ + u8 x86_vendor; /* CPU vendor */ + u8 x86_model; /* CPU model */ + cpuid_input_t *cpuids; struct PITState vpit; --- a/xen/include/asm-x86/pci.h +++ b/xen/include/asm-x86/pci.h @@ -1,6 +1,11 @@ #ifndef __X86_PCI_H__ #define __X86_PCI_H__ +#define CF8_BDF(cf8) (((cf8) & 0x00ffff00) >> 8) +#define CF8_ADDR_LO(cf8) ((cf8) & 0x000000fc) +#define CF8_ADDR_HI(cf8) (((cf8) & 0x0f000000) >> 16) +#define CF8_ENABLED(cf8) (!!((cf8) & 0x80000000)) + #define IS_SNB_GFX(id) (id == 0x01068086 || id == 0x01168086 \ || id == 0x01268086 || id == 0x01028086 \ || id == 0x01128086 || id == 0x01228086 \ --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -561,6 +561,13 @@ void microcode_set_module(unsigned int); int microcode_update(XEN_GUEST_HANDLE_PARAM(const_void), unsigned long len); int microcode_resume_cpu(int cpu); +enum get_cpu_vendor { + gcv_host_early, + gcv_host_late, + gcv_guest +}; + +int get_cpu_vendor(const char vendor_id[], enum get_cpu_vendor); void pv_cpuid(struct cpu_user_regs *regs); #endif /* !__ASSEMBLY__ */ Attachment:
x86-CF8-decode.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |