|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen staging] x86/svm: Drop the _enabled suffix from vmcb bits
commit a6081d0176d558a96678430d9f0602e5918cd0de
Author: Vaishali Thakkar <vaishali.thakkar@xxxxxxxxxx>
AuthorDate: Thu Mar 14 13:54:43 2024 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Thu Mar 14 13:54:43 2024 +0100
x86/svm: Drop the _enabled suffix from vmcb bits
The suffix is redundant for np/sev/sev-es bits. Drop it
to avoid adding extra code volume.
Also, while we're here, drop the double negations in one
of the instances of _np bit, replace 0/1 with false/true
in the use cases of _np and use VMCB accessors instead
of open coding.
Suggested-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Vaishali Thakkar <vaishali.thakkar@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
xen/arch/x86/hvm/svm/nestedsvm.c | 14 +++++++-------
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/svm/vmcb.c | 2 +-
xen/arch/x86/include/asm/hvm/svm/vmcb.h | 20 ++++++++++----------
4 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index e4e01add8c..07630d74d3 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -571,7 +571,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
if ( nestedhvm_paging_mode_hap(v) )
{
/* host nested paging + guest nested paging. */
- n2vmcb->_np_enable = 1;
+ vmcb_set_np(n2vmcb, true);
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb);
@@ -585,7 +585,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
else if ( paging_mode_hap(v->domain) )
{
/* host nested paging + guest shadow paging. */
- n2vmcb->_np_enable = 1;
+ vmcb_set_np(n2vmcb, true);
/* Keep h_cr3 as it is. */
n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
/* When l1 guest does shadow paging
@@ -601,7 +601,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct
cpu_user_regs *regs)
else
{
/* host shadow paging + guest shadow paging. */
- n2vmcb->_np_enable = 0;
+ vmcb_set_np(n2vmcb, false);
n2vmcb->_h_cr3 = 0x0;
/* TODO: Once shadow-shadow paging is in place come back to here
@@ -706,7 +706,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs
*regs,
}
/* nested paging for the guest */
- svm->ns_hap_enabled = !!ns_vmcb->_np_enable;
+ svm->ns_hap_enabled = vmcb_get_np(ns_vmcb);
/* Remember the V_INTR_MASK in hostflags */
svm->ns_hostflags.fields.vintrmask = !!ns_vmcb->_vintr.fields.intr_masking;
@@ -1084,7 +1084,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct
cpu_user_regs *regs)
if ( nestedhvm_paging_mode_hap(v) )
{
/* host nested paging + guest nested paging. */
- ns_vmcb->_np_enable = n2vmcb->_np_enable;
+ vmcb_set_np(ns_vmcb, vmcb_get_np(n2vmcb));
ns_vmcb->_cr3 = n2vmcb->_cr3;
/* The vmcb->h_cr3 is the shadowed h_cr3. The original
* unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
@@ -1093,7 +1093,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct
cpu_user_regs *regs)
else if ( paging_mode_hap(v->domain) )
{
/* host nested paging + guest shadow paging. */
- ns_vmcb->_np_enable = 0;
+ vmcb_set_np(ns_vmcb, false);
/* Throw h_cr3 away. Guest is not allowed to set it or
* it can break out, otherwise (security hole!) */
ns_vmcb->_h_cr3 = 0x0;
@@ -1104,7 +1104,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct
cpu_user_regs *regs)
else
{
/* host shadow paging + guest shadow paging. */
- ns_vmcb->_np_enable = 0;
+ vmcb_set_np(ns_vmcb, false);
ns_vmcb->_h_cr3 = 0x0;
/* The vmcb->_cr3 is the shadowed cr3. The original
* unshadowed guest cr3 is kept in ns_vmcb->_cr3,
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b551eac807..b1ab0b568b 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -473,7 +473,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct
hvm_hw_cpu *c)
if ( paging_mode_hap(v->domain) )
{
- vmcb_set_np_enable(vmcb, 1);
+ vmcb_set_np(vmcb, true);
vmcb_set_g_pat(vmcb, MSR_IA32_CR_PAT_RESET /* guest PAT */);
vmcb_set_h_cr3(vmcb, pagetable_get_paddr(p2m_get_pagetable(p2m)));
}
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 282fe7cdbe..4e1f61dbe0 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -133,7 +133,7 @@ static int construct_vmcb(struct vcpu *v)
if ( paging_mode_hap(v->domain) )
{
- vmcb->_np_enable = 1; /* enable nested paging */
+ vmcb_set_np(vmcb, true); /* enable nested paging */
vmcb->_g_pat = MSR_IA32_CR_PAT_RESET; /* guest PAT */
vmcb->_h_cr3 = pagetable_get_paddr(
p2m_get_pagetable(p2m_get_hostp2m(v->domain)));
diff --git a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
index 91221ff4e2..bf2b8d9a94 100644
--- a/xen/arch/x86/include/asm/hvm/svm/vmcb.h
+++ b/xen/arch/x86/include/asm/hvm/svm/vmcb.h
@@ -385,7 +385,7 @@ typedef union
bool iopm:1; /* 1: iopm_base_pa, msrpm_base_pa */
bool asid:1; /* 2: guest_asid */
bool tpr:1; /* 3: vintr */
- bool np:1; /* 4: np_enable, h_cr3, g_pat */
+ bool np:1; /* 4: np, h_cr3, g_pat */
bool cr:1; /* 5: cr0, cr3, cr4, efer */
bool dr:1; /* 6: dr6, dr7 */
bool dt:1; /* 7: gdtr, idtr */
@@ -473,12 +473,12 @@ struct vmcb_struct {
intinfo_t exit_int_info; /* offset 0x88 */
union { /* offset 0x90 - cleanbit 4 */
struct {
- bool _np_enable :1;
- bool _sev_enable :1;
- bool _sev_es_enable :1;
- bool _gmet :1;
- bool _np_sss :1;
- bool _vte :1;
+ bool _np :1;
+ bool _sev :1;
+ bool _sev_es :1;
+ bool _gmet :1;
+ bool _np_sss :1;
+ bool _vte :1;
};
uint64_t _np_ctrl;
};
@@ -645,9 +645,9 @@ VMCB_ACCESSORS(msrpm_base_pa, iopm)
VMCB_ACCESSORS(guest_asid, asid)
VMCB_ACCESSORS(vintr, tpr)
VMCB_ACCESSORS(np_ctrl, np)
-VMCB_ACCESSORS_(np_enable, bool, np)
-VMCB_ACCESSORS_(sev_enable, bool, np)
-VMCB_ACCESSORS_(sev_es_enable, bool, np)
+VMCB_ACCESSORS_(np, bool, np)
+VMCB_ACCESSORS_(sev, bool, np)
+VMCB_ACCESSORS_(sev_es, bool, np)
VMCB_ACCESSORS_(gmet, bool, np)
VMCB_ACCESSORS_(vte, bool, np)
VMCB_ACCESSORS(h_cr3, np)
--
generated by git-patchbot for /home/xen/git/xen.git#staging
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |