[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 1/6] x86/HVM: introduce hvm_get_entry()


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Tue, 28 Nov 2023 11:33:35 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=ZprIUxrlQ9pKLtqLP+Mf4qwZU5HLEL5NJRnyyEOBfjE=; b=AjIJ0ZLenNC6VLgDmyydWCNFbj4LGF7aEg1vdZB73Sxccof7TzUqDFkUzaABQ548qz61Q2kiLDpEhlM9oGoYCJZ8u3srnji+/BIcKs5kjBZDrW8YURKrJP32adfodUf6wxpzXIEPj8buuGTD7TrYkXel72eT6BD9l+8yj2is7UPtPFNoSB1pZjt80ysIp6R6D2yM2mhgsGtjp4dAysCx2w/3ruJkJqwsvpsc/5vOyXOXfiX6HLINrzNjvGSYQlbN9qx6ZbF2zjyEZjEr5gY4TgY8vDlp0lcbAit+YPljz7zp4GIy+6+BUZmer9pRkogAPAjaO56P3ljmw2To1EPnww==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=GisvrSPD0mju/nxXhRJpMFezXoI8D3iAoyEooZmR7NhoNpShHAoEkWP7+PJ8PFAeJlCQa1L6KLDmDgBsct/CKL3c4kVGe8hDLp+MDjo2n2HAHUSC3moxyseGmm8kgIszYzudnN4tP4cGFJWHgSU61FiyGe6WF45LwOylUcyjxaEVyTgU2LXedbzNazBtZSwGylqw0xKsVx6H1XdJHDg74uj0ECs/vid/wGJcLeGQEMXlwst4YPINDosxAthMlUG7+4ItwzHld1MobPPjOm6d6d4MziJ5hW5M9mH7w1foEk7q+6GdETqTl1AH9STjCAl8yb8rTU5abO/wfY+l9LiZ+A==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Autocrypt: addr=jbeulich@xxxxxxxx; keydata= xsDiBFk3nEQRBADAEaSw6zC/EJkiwGPXbWtPxl2xCdSoeepS07jW8UgcHNurfHvUzogEq5xk hu507c3BarVjyWCJOylMNR98Yd8VqD9UfmX0Hb8/BrA+Hl6/DB/eqGptrf4BSRwcZQM32aZK 7Pj2XbGWIUrZrd70x1eAP9QE3P79Y2oLrsCgbZJfEwCgvz9JjGmQqQkRiTVzlZVCJYcyGGsD /0tbFCzD2h20ahe8rC1gbb3K3qk+LpBtvjBu1RY9drYk0NymiGbJWZgab6t1jM7sk2vuf0Py O9Hf9XBmK0uE9IgMaiCpc32XV9oASz6UJebwkX+zF2jG5I1BfnO9g7KlotcA/v5ClMjgo6Gl MDY4HxoSRu3i1cqqSDtVlt+AOVBJBACrZcnHAUSuCXBPy0jOlBhxPqRWv6ND4c9PH1xjQ3NP nxJuMBS8rnNg22uyfAgmBKNLpLgAGVRMZGaGoJObGf72s6TeIqKJo/LtggAS9qAUiuKVnygo 3wjfkS9A3DRO+SpU7JqWdsveeIQyeyEJ/8PTowmSQLakF+3fote9ybzd880fSmFuIEJldWxp Y2ggPGpiZXVsaWNoQHN1c2UuY29tPsJgBBMRAgAgBQJZN5xEAhsDBgsJCAcDAgQVAggDBBYC AwECHgECF4AACgkQoDSui/t3IH4J+wCfQ5jHdEjCRHj23O/5ttg9r9OIruwAn3103WUITZee e7Sbg12UgcQ5lv7SzsFNBFk3nEQQCACCuTjCjFOUdi5Nm244F+78kLghRcin/awv+IrTcIWF hUpSs1Y91iQQ7KItirz5uwCPlwejSJDQJLIS+QtJHaXDXeV6NI0Uef1hP20+y8qydDiVkv6l IreXjTb7DvksRgJNvCkWtYnlS3mYvQ9NzS9PhyALWbXnH6sIJd2O9lKS1Mrfq+y0IXCP10eS FFGg+Av3IQeFatkJAyju0PPthyTqxSI4lZYuJVPknzgaeuJv/2NccrPvmeDg6Coe7ZIeQ8Yj t0ARxu2xytAkkLCel1Lz1WLmwLstV30g80nkgZf/wr+/BXJW/oIvRlonUkxv+IbBM3dX2OV8 AmRv1ySWPTP7AAMFB/9PQK/VtlNUJvg8GXj9ootzrteGfVZVVT4XBJkfwBcpC/XcPzldjv+3 HYudvpdNK3lLujXeA5fLOH+Z/G9WBc5pFVSMocI71I8bT8lIAzreg0WvkWg5V2WZsUMlnDL9 mpwIGFhlbM3gfDMs7MPMu8YQRFVdUvtSpaAs8OFfGQ0ia3LGZcjA6Ik2+xcqscEJzNH+qh8V m5jjp28yZgaqTaRbg3M/+MTbMpicpZuqF4rnB0AQD12/3BNWDR6bmh+EkYSMcEIpQmBM51qM EKYTQGybRCjpnKHGOxG0rfFY1085mBDZCH5Kx0cl0HVJuQKC+dV2ZY5AqjcKwAxpE75MLFkr wkkEGBECAAkFAlk3nEQCGwwACgkQoDSui/t3IH7nnwCfcJWUDUFKdCsBH/E5d+0ZnMQi+G0A nAuWpQkjM1ASeQwSHEeAWPgskBQL
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>
  • Delivery-date: Tue, 28 Nov 2023 10:33:44 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

... to accompany hvm_read_entry() when actual copying isn't desirable.
This allows to remove open-coded stream accesses from hpet_load(),
along with using the helper in hvm_load() itself.

Since arch_hvm_load()'s declaration would need changing, and since the
function is not used from elsewhere, purge the declaration. With that it
makes little sense to keep arch_hvm_save()'s around; convert that
function to static then at the same time.

In hpet_load() simplify the specific case of error return that's in
context anyway: There's no need to hold the lock when only updating a
local variable.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
v3: Rename to hvm_get_entry().
v2: New.

--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -637,7 +637,7 @@ static int cf_check hpet_save(struct vcp
 static int cf_check hpet_load(struct domain *d, hvm_domain_context_t *h)
 {
     HPETState *hp = domain_vhpet(d);
-    struct hvm_hw_hpet *rec;
+    const struct hvm_hw_hpet *rec;
     uint64_t cmp;
     uint64_t guest_time;
     int i;
@@ -645,17 +645,12 @@ static int cf_check hpet_load(struct dom
     if ( !has_vhpet(d) )
         return -ENODEV;
 
-    write_lock(&hp->lock);
-
     /* Reload the HPET registers */
-    if ( _hvm_check_entry(h, HVM_SAVE_CODE(HPET), HVM_SAVE_LENGTH(HPET), 1) )
-    {
-        write_unlock(&hp->lock);
+    rec = hvm_get_entry(HPET, h);
+    if ( !rec )
         return -EINVAL;
-    }
 
-    rec = (struct hvm_hw_hpet *)&h->data[h->cur];
-    h->cur += HVM_SAVE_LENGTH(HPET);
+    write_lock(&hp->lock);
 
 #define C(x) hp->hpet.x = rec->x
     C(capability);
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -15,7 +15,7 @@
 
 #include <public/hvm/save.h>
 
-void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
+static void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
 {
     uint32_t eax, ebx, ecx, edx;
 
@@ -30,7 +30,7 @@ void arch_hvm_save(struct domain *d, str
     d->arch.hvm.sync_tsc = rdtsc();
 }
 
-int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
+static int arch_hvm_load(struct domain *d, const struct hvm_save_header *hdr)
 {
     uint32_t eax, ebx, ecx, edx;
 
@@ -277,7 +277,7 @@ int hvm_save(struct domain *d, hvm_domai
 
 int hvm_load(struct domain *d, hvm_domain_context_t *h)
 {
-    struct hvm_save_header hdr;
+    const struct hvm_save_header *hdr;
     struct hvm_save_descriptor *desc;
     hvm_load_handler handler;
     struct vcpu *v;
@@ -286,11 +286,12 @@ int hvm_load(struct domain *d, hvm_domai
     if ( d->is_dying )
         return -EINVAL;
 
-    /* Read the save header, which must be first */
-    if ( hvm_load_entry(HEADER, h, &hdr) != 0 )
+    /* Get at the save header, which must be first */
+    hdr = hvm_get_entry(HEADER, h);
+    if ( !hdr )
         return -ENODATA;
 
-    rc = arch_hvm_load(d, &hdr);
+    rc = arch_hvm_load(d, hdr);
     if ( rc )
         return rc;
 
--- a/xen/arch/x86/include/asm/hvm/save.h
+++ b/xen/arch/x86/include/asm/hvm/save.h
@@ -39,6 +39,21 @@ void _hvm_write_entry(struct hvm_domain_
 int _hvm_check_entry(struct hvm_domain_context *h,
                      uint16_t type, uint32_t len, bool strict_length);
 
+/*
+ * Unmarshalling: check, then return pointer. Evaluates to non-NULL on success.
+ * This macro requires the save entry to be the same size as the dest 
structure.
+ */
+#define hvm_get_entry(x, h) ({                                  \
+    const void *ptr = NULL;                                     \
+    BUILD_BUG_ON(HVM_SAVE_HAS_COMPAT(x));                       \
+    if ( _hvm_check_entry(h, HVM_SAVE_CODE(x),                  \
+                          HVM_SAVE_LENGTH(x), true) == 0 )      \
+    {                                                           \
+        ptr = &(h)->data[(h)->cur];                             \
+        h->cur += HVM_SAVE_LENGTH(x);                           \
+    }                                                           \
+    ptr; })
+
 /* Unmarshalling: copy the contents in a type-safe way */
 void _hvm_read_entry(struct hvm_domain_context *h,
                      void *dest, uint32_t dest_len);
@@ -127,9 +142,4 @@ int hvm_save_one(struct domain *d, unsig
                  XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz);
 int hvm_load(struct domain *d, hvm_domain_context_t *h);
 
-/* Arch-specific definitions. */
-struct hvm_save_header;
-void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr);
-int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr);
-
 #endif /* __XEN_HVM_SAVE_H__ */




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.