[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH][MCA] avoid allocating memory in interrupt context (was Re: [Xen-ia64-devel] [PATCH 2/3] xencomm consolidation: linux side)
Hi, The attached patch fixes to avoid allocating memory in interrupt context. I tried to allocate xencomm_handle in IA64_LOG_ALLOCATE instead of allocating each time. Thanks, KAZ Signed-off-by: Kazuhiro Suzuki <kaz@xxxxxxxxxxxxxx> From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx> Subject: Re: [Xen-ia64-devel] [PATCH 2/3] xencomm consolidation: linux side Date: Mon, 20 Aug 2007 12:52:02 +0900 > Hi Kaz. > > You may want to test the following hank. > Although I didn't change the logic, but it certainly needs fix. > > NOTE: > [PATCH 0/12]MCA handler support for Xen/ia64 TAKE 2 > http://lists.xensource.com/archives/html/xen-ia64-devel/2006-11/msg00358.html > http://lists.xensource.com/archives/html/xen-ia64-devel/2006-11/msg00362.html > > > On Thu, Aug 16, 2007 at 12:21:04PM +0900, Isaku Yamahata wrote: > > diff -r 3470a432a082 -r ea78d46a2ea0 include/asm-ia64/sal.h > > --- a/include/asm-ia64/sal.h Tue Aug 07 16:54:04 2007 +0900 > > +++ b/include/asm-ia64/sal.h Thu Aug 16 11:55:08 2007 +0900 > > @@ -701,9 +701,9 @@ ia64_sal_get_state_info (u64 sal_info_ty > > if (is_running_on_xen()) { > > struct xencomm_handle *desc; > > > > - if (xencomm_create(sal_info, > > - ia64_sal_get_state_info_size(sal_info_type), > > - &desc, GFP_KERNEL)) > > + desc = xencomm_map(sal_info, > > + ia64_sal_get_state_info_size(sal_info_type)); > > + if (desc == NULL) > > return 0; > > > > SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0, > > -- > yamahata diff -r 057b47cada5c arch/ia64/kernel/mca.c --- a/arch/ia64/kernel/mca.c Thu Aug 23 15:18:40 2007 -0600 +++ b/arch/ia64/kernel/mca.c Tue Aug 28 19:43:33 2007 +0900 @@ -160,11 +160,33 @@ typedef struct ia64_state_log_s static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; +#ifdef CONFIG_XEN +DEFINE_SPINLOCK(ia64_mca_xencomm_lock); +LIST_HEAD(ia64_mca_xencomm_list); + +#define IA64_MCA_XENCOMM_ALLOCATE(rec, desc) \ + if (is_running_on_xen()) { \ + ia64_mca_xencomm_t *entry; \ + entry = alloc_bootmem(sizeof(ia64_mca_xencomm_t)); \ + entry->record = rec; \ + entry->handle = desc; \ + list_add(&entry->list, &ia64_mca_xencomm_list); \ + } +#define IA64_LOG_ALLOCATE(it, size) \ + {ia64_err_rec_t *rec; \ + ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = rec = \ + (ia64_err_rec_t *)alloc_bootmem(size); \ + IA64_MCA_XENCOMM_ALLOCATE(rec, xencomm_map(rec, size)); \ + ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = rec = \ + (ia64_err_rec_t *)alloc_bootmem(size); \ + IA64_MCA_XENCOMM_ALLOCATE(rec, xencomm_map(rec, size));} +#else #define IA64_LOG_ALLOCATE(it, size) \ {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ (ia64_err_rec_t *)alloc_bootmem(size); \ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ (ia64_err_rec_t *)alloc_bootmem(size);} +#endif #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) diff -r 057b47cada5c arch/ia64/kernel/salinfo.c --- a/arch/ia64/kernel/salinfo.c Thu Aug 23 15:18:40 2007 -0600 +++ b/arch/ia64/kernel/salinfo.c Tue Aug 28 19:45:39 2007 +0900 @@ -375,6 +375,20 @@ salinfo_log_open(struct inode *inode, st data->open = 0; return -ENOMEM; } +#ifdef CONFIG_XEN + if (is_running_on_xen()) { + ia64_mca_xencomm_t *entry; + unsigned long flags; + + entry = vmalloc(sizeof(ia64_mca_xencomm_t)); + entry->record = data->log_buffer; + entry->handle = xencomm_map(data->log_buffer, + ia64_sal_get_state_info_size(data->type)); + spin_lock_irqsave(&ia64_mca_xencomm_lock, flags); + list_add(&entry->list, &ia64_mca_xencomm_list); + spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags); + } +#endif return 0; } @@ -386,6 +400,24 @@ salinfo_log_release(struct inode *inode, struct salinfo_data *data = entry->data; if (data->state == STATE_NO_DATA) { +#ifdef CONFIG_XEN + if (is_running_on_xen()) { + struct list_head *pos, *n; + unsigned long flags; + + spin_lock_irqsave(&ia64_mca_xencomm_lock, flags); + list_for_each_safe(pos, n, &ia64_mca_xencomm_list) { + ia64_mca_xencomm_t *entry; + + entry = list_entry(pos, ia64_mca_xencomm_t, list); + if (entry->record == data->log_buffer) { + list_del(&entry->list); + vfree(entry); + } + } + spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags); + } +#endif vfree(data->log_buffer); vfree(data->oemdata); data->log_buffer = NULL; diff -r 057b47cada5c include/asm-ia64/sal.h --- a/include/asm-ia64/sal.h Thu Aug 23 15:18:40 2007 -0600 +++ b/include/asm-ia64/sal.h Tue Aug 28 10:28:50 2007 +0900 @@ -691,6 +691,13 @@ ia64_sal_clear_state_info (u64 sal_info_ */ #ifdef CONFIG_XEN static inline u64 ia64_sal_get_state_info_size (u64 sal_info_type); +typedef struct ia64_mca_xencomm_t { + void *record; + struct xencomm_handle *handle; + struct list_head list; +} ia64_mca_xencomm_t; +extern struct list_head ia64_mca_xencomm_list; +extern spinlock_t ia64_mca_xencomm_lock; #endif static inline u64 @@ -699,16 +706,24 @@ ia64_sal_get_state_info (u64 sal_info_ty struct ia64_sal_retval isrv; #ifdef CONFIG_XEN if (is_running_on_xen()) { - struct xencomm_handle *desc; - - desc = xencomm_map(sal_info, - ia64_sal_get_state_info_size(sal_info_type)); + ia64_mca_xencomm_t *entry; + struct xencomm_handle *desc = NULL; + unsigned long flags; + + spin_lock_irqsave(&ia64_mca_xencomm_lock, flags); + list_for_each_entry(entry, &ia64_mca_xencomm_list, list) { + if (entry->record == sal_info) { + desc = entry->handle; + break; + } + } + spin_unlock_irqrestore(&ia64_mca_xencomm_lock, flags); + if (desc == NULL) return 0; SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0, desc, 0, 0, 0, 0); - xencomm_free(desc); } else #endif SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0, _______________________________________________ Xen-ia64-devel mailing list Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-ia64-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |