|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 06/11] evtchn: alter internal object handling scheme
From: Wei Liu <wei.liu2@xxxxxxxxxx>
Originally, evtchn objects are stored in buckets. Now we add another
layer called group. struct domain holds an array to evtchn groups,
then each group holds pointers to a bucket.
With this change, each domain can have more struct evtchn in a
space-efficient way.
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Allocate the array of group pointers, shrinking the size of struct
domain.
Compile time calculate grouping and bucket parameters to achive
optimum packing into PAGE_SIZE memory allocations.
Signed-off-by: Malcolm Crossley <malcolm.crossley@xxxxxxxxxx>
Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
---
xen/common/event_channel.c | 29 ++++++++++++++++++++++++-----
xen/include/xen/event.h | 24 ++++++++++++++++--------
xen/include/xen/sched.h | 20 +++++++++++++++++---
3 files changed, 57 insertions(+), 16 deletions(-)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 273d449..28c641b 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -124,6 +124,7 @@ static int virq_is_global(uint32_t virq)
static int get_free_port(struct domain *d)
{
struct evtchn *chn;
+ struct evtchn **grp;
int port;
int i, j;
@@ -137,6 +138,15 @@ static int get_free_port(struct domain *d)
if ( port == d->max_evtchns )
return -ENOSPC;
+ if ( unlikely(group_from_port(d, port) == NULL ) )
+ {
+ grp = xzalloc_array(struct evtchn *, BUCKETS_PER_GROUP);
+ if ( unlikely(grp == NULL) )
+ return -ENOMEM;
+ else
+ group_from_port(d, port) = grp;
+ }
+
chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
if ( unlikely(chn == NULL) )
return -ENOMEM;
@@ -1153,8 +1163,7 @@ int evtchn_init(struct domain *d)
/* Default to N-level ABI. */
evtchn_2l_init(d);
- BUILD_BUG_ON(sizeof(struct evtchn *) * NR_EVTCHN_BUCKETS > PAGE_SIZE);
- d->evtchn = xzalloc_array(struct evtchn *, NR_EVTCHN_BUCKETS);
+ d->evtchn = xzalloc_array(struct evtchn **, NR_EVTCHN_GROUPS);
if ( d->evtchn == NULL )
return -ENOMEM;
@@ -1182,7 +1191,7 @@ int evtchn_init(struct domain *d)
void evtchn_destroy(struct domain *d)
{
- int i;
+ unsigned i, j;
/* After this barrier no new event-channel allocations can occur. */
BUG_ON(!d->is_dying);
@@ -1197,9 +1206,19 @@ void evtchn_destroy(struct domain *d)
/* Free all event-channel buckets. */
spin_lock(&d->event_lock);
- for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
+ for ( i = 0; i < NR_EVTCHN_GROUPS; i++ )
{
- xsm_free_security_evtchn(d->evtchn[i]);
+ if ( d->evtchn[i] == NULL )
+ continue;
+
+ for ( j = 0; j < BUCKETS_PER_GROUP; j++ )
+ {
+ if ( d->evtchn[i][j] == NULL )
+ continue;
+ xsm_free_security_evtchn(d->evtchn[i][j]);
+ xfree(d->evtchn[i][j]);
+ d->evtchn[i][j] = NULL;
+ }
xfree(d->evtchn[i]);
d->evtchn[i] = NULL;
}
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 302a904..091b53c 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -69,14 +69,22 @@ int guest_enabled_event(struct vcpu *v, uint32_t virq);
/* Notify remote end of a Xen-attached event channel.*/
void notify_via_xen_event_channel(struct domain *ld, int lport);
-/* Internal event channel object accessors */
-#define bucket_from_port(d,p) \
- ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
-#define port_is_valid(d,p) \
- (((p) >= 0) && ((p) < (d)->max_evtchns) && \
- (bucket_from_port(d,p) != NULL))
-#define evtchn_from_port(d,p) \
- (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
+/*
+ * Internal event channel object storage:
+ * Objects are organized in two level scheme: group and bucket
+ * A group consists of several buckets, a bucket is an array of struct evtchn
+ */
+#define group_from_port(d, p) \
+ ((d)->evtchn[(p) / EVTCHNS_PER_GROUP])
+/* User should make sure group is not NULL */
+#define bucket_from_port(d, p) \
+ ((group_from_port(d, p))[((p) % EVTCHNS_PER_GROUP) / EVTCHNS_PER_BUCKET])
+#define port_is_valid(d, p) \
+ (((p) >= 0) && ((p) < (d)->max_evtchns) && \
+ (group_from_port(d, p) != NULL) && \
+ (bucket_from_port(d, p) != NULL))
+#define evtchn_from_port(d, p) \
+ (&(bucket_from_port(d, p))[(p) & (EVTCHNS_PER_BUCKET-1)])
/* Wait on a Xen-attached event channel. */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 9227685..b348232 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -50,8 +50,22 @@ extern struct domain *dom0;
#else
#define BITS_PER_EVTCHN_WORD(d) (has_32bit_shinfo(d) ? 32 : BITS_PER_XEN_ULONG)
#endif
-#define EVTCHNS_PER_BUCKET 128
-#define NR_EVTCHN_BUCKETS (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)
+
+#define BUCKETS_PER_GROUP (PAGE_SIZE/sizeof(struct evtchn *))
+/* Round size of struct evtchn up to power of 2 size */
+#define __RDU2(x) ( (x) | ( (x) >> 1))
+#define __RDU4(x) ( __RDU2(x) | ( __RDU2(x) >> 2))
+#define __RDU8(x) ( __RDU4(x) | ( __RDU4(x) >> 4))
+#define __RDU16(x) ( __RDU8(x) | ( __RDU8(x) >> 8))
+#define __RDU32(x) (__RDU16(x) | (__RDU16(x) >>16))
+#define next_power_of_2(x) (__RDU32((x)-1) + 1)
+
+/* Maximum number of event channels for any ABI. */
+#define MAX_NR_EVTCHNS NR_EVENT_CHANNELS
+
+#define EVTCHNS_PER_BUCKET (PAGE_SIZE / next_power_of_2(sizeof(struct evtchn)))
+#define EVTCHNS_PER_GROUP (BUCKETS_PER_GROUP * EVTCHNS_PER_BUCKET)
+#define NR_EVTCHN_GROUPS DIV_ROUND_UP(MAX_NR_EVTCHNS, EVTCHNS_PER_GROUP)
struct evtchn
{
@@ -271,7 +285,7 @@ struct domain
spinlock_t rangesets_lock;
/* Event channel information. */
- struct evtchn **evtchn;
+ struct evtchn ***evtchn;
unsigned max_evtchns;
spinlock_t event_lock;
const struct evtchn_port_ops *evtchn_port_ops;
--
1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |