[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Add arch/x86_64/kernel/{asm-offsets, init_task}.c to sparse tree.



# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID f0a8a0a9a6f52dc78b5bd7a0a7e959ff15b78a8c
# Parent  16a91d8dd8ed1193f6d58a61d85929ea81fc946c
Add arch/x86_64/kernel/{asm-offsets,init_task}.c to sparse tree.

Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>

diff -r 16a91d8dd8ed -r f0a8a0a9a6f5 
linux-2.6-xen-sparse/arch/x86_64/kernel/asm-offsets.c
--- /dev/null   Mon Feb 20 14:46:14 2006
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/asm-offsets.c     Mon Feb 20 
14:51:50 2006
@@ -0,0 +1,72 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/sched.h> 
+#include <linux/stddef.h>
+#include <linux/errno.h> 
+#include <linux/hardirq.h>
+#include <linux/suspend.h>
+#include <asm/pda.h>
+#include <asm/processor.h>
+#include <asm/segment.h>
+#include <asm/thread_info.h>
+#include <asm/ia32.h>
+
+#define DEFINE(sym, val) \
+        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
+       ENTRY(state);
+       ENTRY(flags); 
+       ENTRY(thread); 
+       ENTRY(pid);
+       BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(threadinfo_ ## entry, offsetof(struct thread_info, 
entry))
+       ENTRY(flags);
+       ENTRY(addr_limit);
+       ENTRY(preempt_count);
+       ENTRY(status);
+       BLANK();
+#undef ENTRY
+#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
+       ENTRY(kernelstack); 
+       ENTRY(oldrsp); 
+       ENTRY(pcurrent); 
+       ENTRY(irqcount);
+       ENTRY(cpunumber);
+       ENTRY(irqstackptr);
+       ENTRY(data_offset);
+       BLANK();
+#undef ENTRY
+#ifdef CONFIG_IA32_EMULATION
+#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct 
sigcontext_ia32, entry))
+       ENTRY(eax);
+       ENTRY(ebx);
+       ENTRY(ecx);
+       ENTRY(edx);
+       ENTRY(esi);
+       ENTRY(edi);
+       ENTRY(ebp);
+       ENTRY(esp);
+       ENTRY(eip);
+       BLANK();
+#undef ENTRY
+       DEFINE(IA32_RT_SIGFRAME_sigcontext,
+              offsetof (struct rt_sigframe32, uc.uc_mcontext));
+       BLANK();
+#endif
+       DEFINE(pbe_address, offsetof(struct pbe, address));
+       DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+       DEFINE(pbe_next, offsetof(struct pbe, next));
+       BLANK();
+       DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
+       return 0;
+}
diff -r 16a91d8dd8ed -r f0a8a0a9a6f5 
linux-2.6-xen-sparse/arch/x86_64/kernel/init_task.c
--- /dev/null   Mon Feb 20 14:46:14 2006
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/init_task.c       Mon Feb 20 
14:51:50 2006
@@ -0,0 +1,49 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union 
+       __attribute__((__section__(".data.init_task"))) =
+               { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+/*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
+ * so they are allowed to end up in the .data.cacheline_aligned
+ * section. Since TSS's are completely CPU-local, we want them
+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
+ */ 
+DEFINE_PER_CPU(struct tss_struct, init_tss) 
____cacheline_internodealigned_in_smp = INIT_TSS;
+
+#define ALIGN_TO_4K __attribute__((section(".data.init_task")))

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.