[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 1/2] xen: make include/xen/unaligned.h usable on all architectures


  • To: Jan Beulich <jbeulich@xxxxxxxx>
  • From: Juergen Gross <jgross@xxxxxxxx>
  • Date: Tue, 5 Dec 2023 15:11:16 +0100
  • Authentication-results: smtp-out2.suse.de; none
  • Autocrypt: addr=jgross@xxxxxxxx; keydata= xsBNBFOMcBYBCACgGjqjoGvbEouQZw/ToiBg9W98AlM2QHV+iNHsEs7kxWhKMjrioyspZKOB ycWxw3ie3j9uvg9EOB3aN4xiTv4qbnGiTr3oJhkB1gsb6ToJQZ8uxGq2kaV2KL9650I1SJve dYm8Of8Zd621lSmoKOwlNClALZNew72NjJLEzTalU1OdT7/i1TXkH09XSSI8mEQ/ouNcMvIJ NwQpd369y9bfIhWUiVXEK7MlRgUG6MvIj6Y3Am/BBLUVbDa4+gmzDC9ezlZkTZG2t14zWPvx XP3FAp2pkW0xqG7/377qptDmrk42GlSKN4z76ELnLxussxc7I2hx18NUcbP8+uty4bMxABEB AAHNH0p1ZXJnZW4gR3Jvc3MgPGpncm9zc0BzdXNlLmNvbT7CwHkEEwECACMFAlOMcK8CGwMH CwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRCw3p3WKL8TL8eZB/9G0juS/kDY9LhEXseh mE9U+iA1VsLhgDqVbsOtZ/S14LRFHczNd/Lqkn7souCSoyWsBs3/wO+OjPvxf7m+Ef+sMtr0 G5lCWEWa9wa0IXx5HRPW/ScL+e4AVUbL7rurYMfwCzco+7TfjhMEOkC+va5gzi1KrErgNRHH kg3PhlnRY0Udyqx++UYkAsN4TQuEhNN32MvN0Np3WlBJOgKcuXpIElmMM5f1BBzJSKBkW0Jc Wy3h2Wy912vHKpPV/Xv7ZwVJ27v7KcuZcErtptDevAljxJtE7aJG6WiBzm+v9EswyWxwMCIO RoVBYuiocc51872tRGywc03xaQydB+9R7BHPzsBNBFOMcBYBCADLMfoA44MwGOB9YT1V4KCy vAfd7E0BTfaAurbG+Olacciz3yd09QOmejFZC6AnoykydyvTFLAWYcSCdISMr88COmmCbJzn sHAogjexXiif6ANUUlHpjxlHCCcELmZUzomNDnEOTxZFeWMTFF9Rf2k2F0Tl4E5kmsNGgtSa aMO0rNZoOEiD/7UfPP3dfh8JCQ1VtUUsQtT1sxos8Eb/HmriJhnaTZ7Hp3jtgTVkV0ybpgFg w6WMaRkrBh17mV0z2ajjmabB7SJxcouSkR0hcpNl4oM74d2/VqoW4BxxxOD1FcNCObCELfIS auZx+XT6s+CE7Qi/c44ibBMR7hyjdzWbABEBAAHCwF8EGAECAAkFAlOMcBYCGwwACgkQsN6d 1ii/Ey9D+Af/WFr3q+bg/8v5tCknCtn92d5lyYTBNt7xgWzDZX8G6/pngzKyWfedArllp0Pn fgIXtMNV+3t8Li1Tg843EXkP7+2+CQ98MB8XvvPLYAfW8nNDV85TyVgWlldNcgdv7nn1Sq8g HwB2BHdIAkYce3hEoDQXt/mKlgEGsLpzJcnLKimtPXQQy9TxUaLBe9PInPd+Ohix0XOlY+Uk QFEx50Ki3rSDl2Zt2tnkNYKUCvTJq7jvOlaPd6d/W0tZqpyy7KVay+K4aMobDsodB3dvEAs6 ScCnh03dDAFgIq5nsB11j3KPKdVoPlfucX2c7kGNH+LUMbzqV6beIENfNexkOfxHfw==
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, George Dunlap <george.dunlap@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Arnd Bergmann <arnd@xxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxxx
  • Delivery-date: Tue, 05 Dec 2023 14:11:22 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 05.12.23 14:55, Jan Beulich wrote:
On 05.12.2023 11:07, Juergen Gross wrote:
@@ -15,67 +7,126 @@
  #include <asm/byteorder.h>
  #endif
-#define get_unaligned(p) (*(p))
-#define put_unaligned(val, p) (*(p) = (val))
+/*
+ * This is the most generic implementation of unaligned accesses
+ * and should work almost anywhere.
+ */
+
+#define __get_unaligned_t(type, ptr) ({                                        
        \
+       const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr);      
\
+       __pptr->x;                                                           \
+})
+
+#define __put_unaligned_t(type, val, ptr) do {                                 
\
+       struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr);            
\
+       __pptr->x = (val);                                                   \
+} while (0)

Please can we avoid gaining new (even double) leading underscore symbols,
especially when they're in helper (i.e. not intended to be used directly)
constructs? No reason to introduce further issues wrt Misra adoption.

+#define get_unaligned(ptr)     __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))

May I ask to omit excess parentheses where possible?

+static inline u16 get_unaligned_le16(const void *p)
+{
+       return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+       return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+       return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+       __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+       __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+       __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+       return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+       return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
-static inline uint16_t get_unaligned_be16(const void *p)
+static inline u64 get_unaligned_be64(const void *p)
  {
-       return be16_to_cpup(p);
+       return be64_to_cpu(__get_unaligned_t(__be64, p));
  }
-static inline void put_unaligned_be16(uint16_t val, void *p)
+static inline void put_unaligned_be16(u16 val, void *p)
  {
-       *(__force __be16*)p = cpu_to_be16(val);
+       __put_unaligned_t(__be16, cpu_to_be16(val), p);
  }
-static inline uint32_t get_unaligned_be32(const void *p)
+static inline void put_unaligned_be32(u32 val, void *p)
  {
-       return be32_to_cpup(p);
+       __put_unaligned_t(__be32, cpu_to_be32(val), p);
  }
-static inline void put_unaligned_be32(uint32_t val, void *p)
+static inline void put_unaligned_be64(u64 val, void *p)
  {
-       *(__force __be32*)p = cpu_to_be32(val);
+       __put_unaligned_t(__be64, cpu_to_be64(val), p);
  }
-static inline uint64_t get_unaligned_be64(const void *p)
+static inline u32 __get_unaligned_be24(const u8 *p)

Here and below - do you foresee use of these 24-bit helpers? They weren't
there before, and the description also doesn't justify their introduction.

I have just applied the patch from the kernel (see Origin: tag).

I can change the patch according to your comments, of course.


Juergen

Attachment: OpenPGP_0xB0DE9DD628BF132F.asc
Description: OpenPGP public key

Attachment: OpenPGP_signature.asc
Description: OpenPGP digital signature


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.