From dc6de33674608f978ec29f5c2f7e3af458c06f78 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 20 Apr 2006 00:10:50 -0700 Subject: [NET]: Add skb->truesize assertion checking. Add some sanity checking. truesize should be at least sizeof(struct sk_buff) plus the current packet length. If not, then truesize is seriously mangled and deserves a kernel log message. Currently we'll do the check for release of stream socket buffers. But we can add checks to more spots over time. Incorporating ideas from Herbert Xu. Signed-off-by: David S. Miller --- include/linux/skbuff.h | 7 +++++++ include/net/sock.h | 1 + 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c4619a428d9b..f8f234708b98 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -344,6 +344,13 @@ extern void skb_over_panic(struct sk_buff *skb, int len, void *here); extern void skb_under_panic(struct sk_buff *skb, int len, void *here); +extern void skb_truesize_bug(struct sk_buff *skb); + +static inline void skb_truesize_check(struct sk_buff *skb) +{ + if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) + skb_truesize_bug(skb); +} extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int getfrag(void *from, char *to, int offset, diff --git a/include/net/sock.h b/include/net/sock.h index af2b0544586e..ff8b0dad7b0f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -454,6 +454,7 @@ static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk) static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb) { + skb_truesize_check(skb); sock_set_flag(sk, SOCK_QUEUE_SHRUNK); sk->sk_wmem_queued -= skb->truesize; sk->sk_forward_alloc += skb->truesize; -- cgit v1.2.3 From 7daa411b810d7eadfaabe3765ec5f827893dbb30 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 12 Apr 2006 21:05:59 -0500 Subject: [PATCH] powerpc: IOMMU support for honoring dma_mask Some devices don't support full 32-bit DMA address space, which we currently assume. Add the required mask-passing to the IOMMU allocators. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/iommu.c | 36 ++++++++++++++++++++++++++---------- arch/powerpc/kernel/pci_iommu.c | 40 ++++++++++++++++++++++++++++++++++++---- arch/powerpc/kernel/vio.c | 6 +++--- include/asm-powerpc/iommu.h | 7 ++++--- 4 files changed, 69 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d9a7fdef59b9..4eba60a32890 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -61,6 +61,7 @@ __setup("iommu=", setup_iommu); static unsigned long iommu_range_alloc(struct iommu_table *tbl, unsigned long npages, unsigned long *handle, + unsigned long mask, unsigned int align_order) { unsigned long n, end, i, start; @@ -97,9 +98,21 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, */ if (start >= limit) start = largealloc ? tbl->it_largehint : tbl->it_hint; - + again: + if (limit + tbl->it_offset > mask) { + limit = mask - tbl->it_offset + 1; + /* If we're constrained on address range, first try + * at the masked hint to avoid O(n) search complexity, + * but on second pass, start at 0. + */ + if ((start & mask) >= limit || pass > 0) + start = 0; + else + start &= mask; + } + n = find_next_zero_bit(tbl->it_map, limit, start); /* Align allocation */ @@ -150,14 +163,14 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, - unsigned int align_order) + unsigned long mask, unsigned int align_order) { unsigned long entry, flags; dma_addr_t ret = DMA_ERROR_CODE; - + spin_lock_irqsave(&(tbl->it_lock), flags); - entry = iommu_range_alloc(tbl, npages, NULL, align_order); + entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); if (unlikely(entry == DMA_ERROR_CODE)) { spin_unlock_irqrestore(&(tbl->it_lock), flags); @@ -236,7 +249,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, int iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) + unsigned long mask, enum dma_data_direction direction) { dma_addr_t dma_next = 0, dma_addr; unsigned long flags; @@ -274,7 +287,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, vaddr = (unsigned long)page_address(s->page) + s->offset; npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); npages >>= PAGE_SHIFT; - entry = iommu_range_alloc(tbl, npages, &handle, 0); + entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); @@ -479,7 +492,8 @@ void iommu_free_table(struct device_node *dn) * byte within the page as vaddr. */ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, - size_t size, enum dma_data_direction direction) + size_t size, unsigned long mask, + enum dma_data_direction direction) { dma_addr_t dma_handle = DMA_ERROR_CODE; unsigned long uaddr; @@ -492,7 +506,8 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, npages >>= PAGE_SHIFT; if (tbl) { - dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0); + dma_handle = iommu_alloc(tbl, vaddr, npages, direction, + mask >> PAGE_SHIFT, 0); if (dma_handle == DMA_ERROR_CODE) { if (printk_ratelimit()) { printk(KERN_INFO "iommu_alloc failed, " @@ -521,7 +536,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, * to the dma address (mapping) of the first page. */ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) { void *ret = NULL; dma_addr_t mapping; @@ -551,7 +566,8 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, memset(ret, 0, size); /* Set up tces to cover the allocated range */ - mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order); + mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, + mask >> PAGE_SHIFT, order); if (mapping == DMA_ERROR_CODE) { free_pages((unsigned long)ret, order); ret = NULL; diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c index c336f3e31cff..c1d95e14bbed 100644 --- a/arch/powerpc/kernel/pci_iommu.c +++ b/arch/powerpc/kernel/pci_iommu.c @@ -59,6 +59,25 @@ static inline struct iommu_table *devnode_table(struct device *dev) } +static inline unsigned long device_to_mask(struct device *hwdev) +{ + struct pci_dev *pdev; + + if (!hwdev) { + pdev = ppc64_isabridge_dev; + if (!pdev) /* This is the best guess we can do */ + return 0xfffffffful; + } else + pdev = to_pci_dev(hwdev); + + if (pdev->dma_mask) + return pdev->dma_mask; + + /* Assume devices without mask can take 32 bit addresses */ + return 0xfffffffful; +} + + /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. @@ -67,7 +86,7 @@ static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, - flag); + device_to_mask(hwdev), flag); } static void pci_iommu_free_coherent(struct device *hwdev, size_t size, @@ -85,7 +104,8 @@ static void pci_iommu_free_coherent(struct device *hwdev, size_t size, static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr, size_t size, enum dma_data_direction direction) { - return iommu_map_single(devnode_table(hwdev), vaddr, size, direction); + return iommu_map_single(devnode_table(hwdev), vaddr, size, + device_to_mask(hwdev), direction); } @@ -100,7 +120,7 @@ static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { return iommu_map_sg(pdev, devnode_table(pdev), sglist, - nelems, direction); + nelems, device_to_mask(pdev), direction); } static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, @@ -112,7 +132,19 @@ static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist, /* We support DMA to/from any memory page via the iommu */ static int pci_iommu_dma_supported(struct device *dev, u64 mask) { - return 1; + struct iommu_table *tbl = devnode_table(dev); + + if (!tbl || tbl->it_offset > mask) { + printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n"); + if (tbl) + printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n", + mask, tbl->it_offset); + else + printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", + mask); + return 0; + } else + return 1; } void pci_iommu_init(void) diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 13c655ba2841..971020cf3f7d 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -202,7 +202,7 @@ static dma_addr_t vio_map_single(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size, - direction); + ~0ul, direction); } static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle, @@ -216,7 +216,7 @@ static int vio_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist, - nelems, direction); + nelems, ~0ul, direction); } static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, @@ -229,7 +229,7 @@ static void *vio_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, - dma_handle, flag); + dma_handle, ~0ul, flag); } static void vio_free_coherent(struct device *dev, size_t size, diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h index d5677cbec200..18ca29e9105a 100644 --- a/include/asm-powerpc/iommu.h +++ b/include/asm-powerpc/iommu.h @@ -70,17 +70,18 @@ extern void iommu_free_table(struct device_node *dn); extern struct iommu_table *iommu_init_table(struct iommu_table * tbl); extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl, - struct scatterlist *sglist, int nelems, + struct scatterlist *sglist, int nelems, unsigned long mask, enum dma_data_direction direction); extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction); extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle, gfp_t flag); + dma_addr_t *dma_handle, unsigned long mask, gfp_t flag); extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle); extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, - size_t size, enum dma_data_direction direction); + size_t size, unsigned long mask, + enum dma_data_direction direction); extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); -- cgit v1.2.3 From 2fd83038160531245099c3c5b3511fa4b80765eb Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 20 Apr 2006 20:40:23 +0000 Subject: [PARISC] Further work for multiple page sizes More work towards supporing multiple page sizes on 64-bit. Convert some assumptions that 64bit uses 3 level page tables into testing PT_NLEVELS. Also some BUG() to BUG_ON() conversions and some cleanups to assembler. Signed-off-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/Kconfig | 31 ++++++++++++++++++++ arch/parisc/kernel/asm-offsets.c | 3 ++ arch/parisc/kernel/entry.S | 36 ++++++++++++++--------- arch/parisc/kernel/head.S | 15 +++++----- arch/parisc/kernel/init_task.c | 10 +++---- arch/parisc/kernel/pacache.S | 25 ++++++++-------- arch/parisc/kernel/syscall.S | 10 +++---- arch/parisc/kernel/vmlinux.lds.S | 54 ++++++++++++++++++++-------------- arch/parisc/mm/init.c | 28 +++++++++--------- include/asm-parisc/page.h | 25 +++++++++++++--- include/asm-parisc/pgtable.h | 63 ++++++++++++++++++++++++++++------------ 11 files changed, 198 insertions(+), 102 deletions(-) (limited to 'include') diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 19f911c5dd58..910fb3afc0b5 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -138,6 +138,37 @@ config 64BIT enable this option otherwise. The 64bit kernel is significantly bigger and slower than the 32bit one. +choice + prompt "Kernel page size" + default PARISC_PAGE_SIZE_4KB if !64BIT + default PARISC_PAGE_SIZE_4KB if 64BIT +# default PARISC_PAGE_SIZE_16KB if 64BIT + +config PARISC_PAGE_SIZE_4KB + bool "4KB" + help + This lets you select the page size of the kernel. For best + performance, a page size of 16KB is recommended. For best + compatibility with 32bit applications, a page size of 4KB should be + selected (the vast majority of 32bit binaries work perfectly fine + with a larger page size). + + 4KB For best 32bit compatibility + 16KB For best performance + 64KB For best performance, might give more overhead. + + If you don't know what to do, choose 4KB. + +config PARISC_PAGE_SIZE_16KB + bool "16KB (EXPERIMENTAL)" + depends on PA8X00 && EXPERIMENTAL + +config PARISC_PAGE_SIZE_64KB + bool "64KB (EXPERIMENTAL)" + depends on PA8X00 && EXPERIMENTAL + +endchoice + config SMP bool "Symmetric multi-processing support" ---help--- diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index e23c4e1e3a25..c11a5bc7c067 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -288,8 +288,11 @@ int main(void) DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE); DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE); DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); + DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); DEFINE(ASM_PT_INITIAL, PT_INITIAL); DEFINE(ASM_PAGE_SIZE, PAGE_SIZE); + DEFINE(ASM_PAGE_SIZE_DIV64, PAGE_SIZE/64); + DEFINE(ASM_PAGE_SIZE_DIV128, PAGE_SIZE/128); BLANK(); DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 7c95d7663c29..d9e53cf0372b 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -502,18 +502,20 @@ * all ILP32 processes and all the kernel for machines with * under 4GB of memory) */ .macro L3_ptep pgd,pte,index,va,fault +#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index copy %r0,\pte - extrd,u,*= \va,31,32,%r0 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd - extrd,u,*= \va,31,32,%r0 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault - extrd,u,*= \va,31,32,%r0 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 shld \pgd,PxD_VALUE_SHIFT,\index - extrd,u,*= \va,31,32,%r0 + extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 copy \index,\pgd - extrd,u,*<> \va,31,32,%r0 + extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd +#endif L2_ptep \pgd,\pte,\index,\va,\fault .endm @@ -563,10 +565,18 @@ extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ - /* Get rid of prot bits and convert to page addr for iitlbt and idtlbt */ + /* Enforce uncacheable pages. + * This should ONLY be use for MMIO on PA 2.0 machines. + * Memory/DMA is cache coherent on all PA2.0 machines we support + * (that means T-class is NOT supported) and the memory controllers + * on most of those machines only handles cache transactions. + */ + extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 + depi 1,12,1,\prot - depd %r0,63,PAGE_SHIFT,\pte - extrd,s \pte,(63-PAGE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte + /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ + extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte + depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte .endm /* Identical macro to make_insert_tlb above, except it @@ -584,9 +594,8 @@ /* Get rid of prot bits and convert to page addr for iitlba */ - depi 0,31,PAGE_SHIFT,\pte + depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte extru \pte,24,25,\pte - .endm /* This is for ILP32 PA2.0 only. The TLB insertion needs @@ -1201,10 +1210,9 @@ intr_save: */ /* adjust isr/ior. */ - - extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */ - depd %r1,31,7,%r17 /* deposit them into ior */ - depdi 0,63,7,%r16 /* clear them from isr */ + extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ + depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ + depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ #endif STREG %r16, PT_ISR(%r29) STREG %r17, PT_IOR(%r29) diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0b47afc20690..3e79e62f7b0b 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -76,16 +76,16 @@ $bss_loop: mtctl %r4,%cr24 /* Initialize kernel root pointer */ mtctl %r4,%cr25 /* Initialize user root pointer */ -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 /* Set pmd in pgd */ load32 PA(pmd0),%r5 shrd %r5,PxD_VALUE_SHIFT,%r3 - ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 + ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4) ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4 #else /* 2-level page table, so pmd == pgd */ - ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4 + ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4 #endif /* Fill in pmd with enough pte directories */ @@ -99,7 +99,7 @@ $bss_loop: stw %r3,0(%r4) ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 addib,> -1,%r1,1b -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 #else ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 @@ -107,13 +107,14 @@ $bss_loop: /* Now initialize the PTEs themselves */ - ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ + ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */ + ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ load32 PA(pg0),%r1 $pgt_fill_loop: STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1) - ldo ASM_PAGE_SIZE(%r3),%r3 - bb,>= %r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop + ldo (1< -1,%r11,$pgt_fill_loop nop /* Load the return address...er...crash 'n burn */ diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index 7e898fd64415..8384bf9cecd2 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -53,17 +53,17 @@ union thread_union init_thread_union __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) = { INIT_THREAD_INFO(init_task) }; -#ifdef __LP64__ +#if PT_NLEVELS == 3 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout * with the first pmd adjacent to the pgd and below it. gcc doesn't actually * guarantee that global objects will be laid out in memory in the same order * as the order of declaration, so put these in different sections and use * the linker script to order them. */ -pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, }; - +pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE))); #endif -pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, }; -pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte"))) = { {0}, }; + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE))); +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE))); /* * Initial task structure. diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 7a4f07e8d3c3..f600556414d1 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -65,7 +65,7 @@ flush_tlb_all_local: */ /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */ - rsm PSW_SM_I, %r19 /* save I-bit state */ + rsm PSW_SM_I, %r19 /* save I-bit state */ load32 PA(1f), %r1 nop nop @@ -84,8 +84,7 @@ flush_tlb_all_local: rfi nop -1: ldil L%PA(cache_info), %r1 - ldo R%PA(cache_info)(%r1), %r1 +1: load32 PA(cache_info), %r1 /* Flush Instruction Tlb */ @@ -212,8 +211,7 @@ flush_instruction_cache_local: .entry mtsp %r0, %sr1 - ldil L%cache_info, %r1 - ldo R%cache_info(%r1), %r1 + load32 cache_info, %r1 /* Flush Instruction Cache */ @@ -254,8 +252,7 @@ flush_data_cache_local: .entry mtsp %r0, %sr1 - ldil L%cache_info, %r1 - ldo R%cache_info(%r1), %r1 + load32 cache_info, %r1 /* Flush Data Cache */ @@ -303,7 +300,8 @@ copy_user_page_asm: */ ldd 0(%r25), %r19 - ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ + ldi ASM_PAGE_SIZE_DIV128, %r1 + ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */ ldw 128(%r25), %r0 /* prefetch 2 */ @@ -368,7 +366,7 @@ copy_user_page_asm: * use ldd/std on a 32 bit kernel. */ ldw 0(%r25), %r19 - ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ + ldi ASM_PAGE_SIZE_DIV64, %r1 1: ldw 4(%r25), %r20 @@ -461,6 +459,7 @@ copy_user_page_asm: sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ ldil L%(TMPALIAS_MAP_START), %r28 + /* FIXME for different page sizes != 4k */ #ifdef CONFIG_64BIT extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ @@ -551,6 +550,7 @@ __clear_user_page_asm: #ifdef CONFIG_64BIT #if (TMPALIAS_MAP_START >= 0x80000000) depdi 0, 31,32, %r28 /* clear any sign extension */ + /* FIXME: page size dependend */ #endif extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ @@ -566,10 +566,10 @@ __clear_user_page_asm: pdtlb 0(%r28) #ifdef CONFIG_64BIT - ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ + ldi ASM_PAGE_SIZE_DIV128, %r1 /* PREFETCH (Write) has not (yet) been proven to help here */ -/* #define PREFETCHW_OP ldd 256(%0), %r0 */ + /* #define PREFETCHW_OP ldd 256(%0), %r0 */ 1: std %r0, 0(%r28) std %r0, 8(%r28) @@ -591,8 +591,7 @@ __clear_user_page_asm: ldo 128(%r28), %r28 #else /* ! CONFIG_64BIT */ - - ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ + ldi ASM_PAGE_SIZE_DIV64, %r1 1: stw %r0, 0(%r28) diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index af88afef41bd..479d9a017cd1 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -55,7 +55,7 @@ * pointers. */ - .align 4096 + .align ASM_PAGE_SIZE linux_gateway_page: /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */ @@ -632,7 +632,7 @@ cas_action: end_compare_and_swap: /* Make sure nothing else is placed on this page */ - .align 4096 + .align ASM_PAGE_SIZE .export end_linux_gateway_page end_linux_gateway_page: @@ -652,7 +652,7 @@ end_linux_gateway_page: .section .rodata,"a" - .align 4096 + .align ASM_PAGE_SIZE /* Light-weight-syscall table */ /* Start of lws table. */ .export lws_table @@ -662,14 +662,14 @@ lws_table: LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ /* End of lws table */ - .align 4096 + .align ASM_PAGE_SIZE .export sys_call_table .Lsys_call_table: sys_call_table: #include "syscall_table.S" #ifdef CONFIG_64BIT - .align 4096 + .align ASM_PAGE_SIZE .export sys_call_table64 .Lsys_call_table64: sys_call_table64: diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 6d6436a6b624..94dcc03a28ed 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -6,6 +6,7 @@ * Copyright (C) 2000 Michael Ang * Copyright (C) 2002 Randolph Chung * Copyright (C) 2003 James Bottomley + * Copyright (C) 2006 Helge Deller * * * This program is free software; you can redistribute it and/or modify @@ -27,6 +28,7 @@ /* needed for the processor specific cache alignment size */ #include #include +#include /* ld script to make hppa Linux kernel */ #ifndef CONFIG_64BIT @@ -68,7 +70,7 @@ SECTIONS RODATA /* writeable */ - . = ALIGN(4096); /* Make sure this is page aligned so + . = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so that we can properly leave these as writable */ data_start = .; @@ -81,23 +83,17 @@ SECTIONS __start___unwind = .; /* unwind info */ .PARISC.unwind : { *(.PARISC.unwind) } __stop___unwind = .; - + + /* rarely changed data like cpu maps */ + . = ALIGN(16); + .data.read_mostly : { *(.data.read_mostly) } + + . = ALIGN(L1_CACHE_BYTES); .data : { /* Data */ *(.data) - *(.data.vm0.pmd) - *(.data.vm0.pgd) - *(.data.vm0.pte) CONSTRUCTORS } - . = ALIGN(4096); - /* nosave data is really only used for software suspend...it's here - * just in case we ever implement it */ - __nosave_begin = .; - .data_nosave : { *(.data.nosave) } - . = ALIGN(4096); - __nosave_end = .; - . = ALIGN(L1_CACHE_BYTES); .data.cacheline_aligned : { *(.data.cacheline_aligned) } @@ -105,12 +101,29 @@ SECTIONS . = ALIGN(16); .data.lock_aligned : { *(.data.lock_aligned) } - /* rarely changed data like cpu maps */ - . = ALIGN(16); - .data.read_mostly : { *(.data.read_mostly) } + . = ALIGN(ASM_PAGE_SIZE); + /* nosave data is really only used for software suspend...it's here + * just in case we ever implement it */ + __nosave_begin = .; + .data_nosave : { *(.data.nosave) } + . = ALIGN(ASM_PAGE_SIZE); + __nosave_end = .; _edata = .; /* End of data section */ + __bss_start = .; /* BSS */ + /* page table entries need to be PAGE_SIZE aligned */ + . = ALIGN(ASM_PAGE_SIZE); + .data.vmpages : { + *(.data.vm0.pmd) + *(.data.vm0.pgd) + *(.data.vm0.pte) + } + .bss : { *(.bss) *(COMMON) } + __bss_stop = .; + + + /* assembler code expects init_task to be 16k aligned */ . = ALIGN(16384); /* init_task */ .data.init_task : { *(.data.init_task) } @@ -126,6 +139,7 @@ SECTIONS .dlt : { *(.dlt) } #endif + /* reserve space for interrupt stack by aligning __init* to 16k */ . = ALIGN(16384); __init_begin = .; .init.text : { @@ -166,7 +180,7 @@ SECTIONS from .altinstructions and .eh_frame */ .exit.text : { *(.exit.text) } .exit.data : { *(.exit.data) } - . = ALIGN(4096); + . = ALIGN(ASM_PAGE_SIZE); __initramfs_start = .; .init.ramfs : { *(.init.ramfs) } __initramfs_end = .; @@ -174,14 +188,10 @@ SECTIONS __per_cpu_start = .; .data.percpu : { *(.data.percpu) } __per_cpu_end = .; - . = ALIGN(4096); + . = ALIGN(ASM_PAGE_SIZE); __init_end = .; /* freed after init ends here */ - __bss_start = .; /* BSS */ - .bss : { *(.bss) *(COMMON) } - __bss_stop = .; - _end = . ; /* Sections to be discarded */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 3796be67cd53..631712562656 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -6,6 +6,7 @@ * changed by Philipp Rumpf * Copyright 1999 Philipp Rumpf (prumpf@tux.org) * Copyright 2004 Randolph Chung (tausq@debian.org) + * Copyright 2006 Helge Deller (deller@gmx.de) * */ @@ -371,8 +372,8 @@ static void __init setup_bootmem(void) void free_initmem(void) { - unsigned long addr; - + unsigned long addr, init_begin, init_end; + printk(KERN_INFO "Freeing unused kernel memory: "); #ifdef CONFIG_DEBUG_KERNEL @@ -395,8 +396,11 @@ void free_initmem(void) local_irq_enable(); #endif - addr = (unsigned long)(&__init_begin); - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + /* align __init_begin and __init_end to page size, + ignoring linker script where we might have tried to save RAM */ + init_begin = PAGE_ALIGN((unsigned long)(&__init_begin)); + init_end = PAGE_ALIGN((unsigned long)(&__init_end)); + for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); @@ -407,7 +411,7 @@ void free_initmem(void) /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); - printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10); + printk("%luk freed\n", (init_end - init_begin) >> 10); } @@ -639,11 +643,13 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd * Map the fault vector writable so we can * write the HPMC checksum. */ +#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) if (address >= ro_start && address < ro_end && address != fv_addr && address != gw_addr) pte = __mk_pte(address, PAGE_KERNEL_RO); else +#endif pte = __mk_pte(address, pgprot); if (address >= end_paddr) @@ -874,8 +880,7 @@ unsigned long alloc_sid(void) flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ spin_lock(&sid_lock); } - if (free_space_ids == 0) - BUG(); + BUG_ON(free_space_ids == 0); } free_space_ids--; @@ -899,8 +904,7 @@ void free_sid(unsigned long spaceid) spin_lock(&sid_lock); - if (*dirty_space_offset & (1L << index)) - BUG(); /* attempt to free space id twice */ + BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ *dirty_space_offset |= (1L << index); dirty_space_ids++; @@ -975,7 +979,7 @@ static void recycle_sids(void) static unsigned long recycle_ndirty; static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; -static unsigned int recycle_inuse = 0; +static unsigned int recycle_inuse; void flush_tlb_all(void) { @@ -984,9 +988,7 @@ void flush_tlb_all(void) do_recycle = 0; spin_lock(&sid_lock); if (dirty_space_ids > RECYCLE_THRESHOLD) { - if (recycle_inuse) { - BUG(); /* FIXME: Use a semaphore/wait queue here */ - } + BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ get_dirty_sids(&recycle_ndirty,recycle_dirty_array); recycle_inuse++; do_recycle++; diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h index 45e02aa5bf4b..c0dd461fb8f1 100644 --- a/include/asm-parisc/page.h +++ b/include/asm-parisc/page.h @@ -1,13 +1,30 @@ #ifndef _PARISC_PAGE_H #define _PARISC_PAGE_H -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#if !defined(__KERNEL__) +/* this is for userspace applications (4k page size) */ +# define PAGE_SHIFT 12 /* 4k */ +# define PAGE_SIZE (1UL << PAGE_SHIFT) +# define PAGE_MASK (~(PAGE_SIZE-1)) +#endif + #ifdef __KERNEL__ #include + +#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) +# define PAGE_SHIFT 12 /* 4k */ +#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) +# define PAGE_SHIFT 14 /* 16k */ +#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) +# define PAGE_SHIFT 16 /* 64k */ +#else +# error "unknown default kernel page size" +#endif +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + + #ifndef __ASSEMBLY__ #include diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index 4e34c6b44059..aec089eb8b85 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h @@ -59,16 +59,15 @@ #define ISTACK_SIZE 32768 /* Interrupt Stack Size */ #define ISTACK_ORDER 3 -/* This is the size of the initially mapped kernel memory (i.e. currently - * 0 to 1<<23 == 8MB */ +/* This is the size of the initially mapped kernel memory */ #ifdef CONFIG_64BIT -#define KERNEL_INITIAL_ORDER 24 +#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ #else -#define KERNEL_INITIAL_ORDER 23 +#define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */ #endif #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) #define PT_NLEVELS 3 #define PGD_ORDER 1 /* Number of pages per pgd */ #define PMD_ORDER 1 /* Number of pages per pmd */ @@ -111,11 +110,15 @@ #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) #define MAX_ADDRESS (1UL << MAX_ADDRBITS) -#define SPACEID_SHIFT (MAX_ADDRBITS - 32) +#define SPACEID_SHIFT (MAX_ADDRBITS - 32) /* This calculates the number of initial pages we need for the initial * page tables */ -#define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) +#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) +# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) +#else +# define PT_INITIAL (1) /* all initial PTEs fit into one page */ +#endif /* * pgd entries used up by user/kernel: @@ -160,6 +163,10 @@ extern void *vmalloc_start; * to zero */ #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) +/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ +#define PFN_PTE_SHIFT 12 + + /* this is how many bits may be used by the file functions */ #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) @@ -188,7 +195,8 @@ extern void *vmalloc_start; /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds * are page-aligned, we don't care about the PAGE_OFFSET bits, except * for a few meta-information bits, so we shift the address to be - * able to effectively address 40-bits of physical address space. */ + * able to effectively address 40/42/44-bits of physical address space + * depending on 4k/16k/64k PAGE_SIZE */ #define _PxD_PRESENT_BIT 31 #define _PxD_ATTACHED_BIT 30 #define _PxD_VALID_BIT 29 @@ -198,7 +206,7 @@ extern void *vmalloc_start; #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_MASK (0xf) #define PxD_FLAG_SHIFT (4) -#define PxD_VALUE_SHIFT (8) +#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ #ifndef __ASSEMBLY__ @@ -246,6 +254,7 @@ extern void *vmalloc_start; #define __S110 PAGE_RWX #define __S111 PAGE_RWX + extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ /* initial page tables for 0-8MB for kernel */ @@ -272,7 +281,7 @@ extern unsigned long *empty_zero_page; #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 /* The first entry of the permanent pmd is not there if it contains * the gateway marker */ #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) @@ -282,7 +291,7 @@ extern unsigned long *empty_zero_page; #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) static inline void pmd_clear(pmd_t *pmd) { -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) /* This is the entry pointing to the permanent pmd * attached to the pgd; cannot clear it */ @@ -303,7 +312,7 @@ static inline void pmd_clear(pmd_t *pmd) { #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) static inline void pgd_clear(pgd_t *pgd) { -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) /* This is the permanent pmd attached to the pgd; cannot * free it */ @@ -351,7 +360,7 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return ({ \ pte_t __pte; \ \ - pte_val(__pte) = ((addr)+pgprot_val(pgprot)); \ + pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<> PAGE_SHIFT) +#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) @@ -499,6 +504,26 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #endif /* !__ASSEMBLY__ */ + +/* TLB page size encoding - see table 3-1 in parisc20.pdf */ +#define _PAGE_SIZE_ENCODING_4K 0 +#define _PAGE_SIZE_ENCODING_16K 1 +#define _PAGE_SIZE_ENCODING_64K 2 +#define _PAGE_SIZE_ENCODING_256K 3 +#define _PAGE_SIZE_ENCODING_1M 4 +#define _PAGE_SIZE_ENCODING_4M 5 +#define _PAGE_SIZE_ENCODING_16M 6 +#define _PAGE_SIZE_ENCODING_64M 7 + +#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) +# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K +#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) +# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K +#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) +# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K +#endif + + #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -- cgit v1.2.3 From 6ca773cf8b9dc19989c9b44635292b1ba80f9112 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Thu, 20 Apr 2006 04:44:07 +0000 Subject: [PARISC] Add new entries to the syscall table Most are easy, but sync_file_range needed special handling when entering through the 32-bit syscall table. Signed-off-by: Kyle McMartin --- arch/parisc/kernel/sys_parisc.c | 8 ++++++++ arch/parisc/kernel/syscall_table.S | 8 +++++++- include/asm-parisc/unistd.h | 8 +++++++- 3 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index d15a1d53e101..8b5df98e2b31 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -231,6 +231,14 @@ asmlinkage long parisc_fadvise64_64(int fd, (loff_t)high_len << 32 | low_len, advice); } +asmlinkage long parisc_sync_file_range(int fd, + u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes, + unsigned int flags) +{ + return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off, + (loff_t)hi_nbytes << 32 | lo_nbytes, flags); +} + asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag) { return -ENOMEM; diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index bbeeb614cfab..e27b432f90a8 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -13,7 +13,7 @@ * Copyright (C) 2001 Helge Deller * Copyright (C) 2000-2001 Thomas Bogendoerfer * Copyright (C) 2002 Randolph Chung - * + * Copyright (C) 2005-2006 Kyle McMartin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -393,5 +393,11 @@ ENTRY_SAME(readlinkat) /* 285 */ ENTRY_SAME(fchmodat) ENTRY_SAME(faccessat) + ENTRY_SAME(unshare) + ENTRY_COMP(set_robust_list) + ENTRY_COMP(get_robust_list) /* 290 */ + ENTRY_SAME(splice) + ENTRY_OURS(sync_file_range) + ENTRY_SAME(tee) /* Nothing yet */ diff --git a/include/asm-parisc/unistd.h b/include/asm-parisc/unistd.h index c56fccbf34ad..0e1a30be2e30 100644 --- a/include/asm-parisc/unistd.h +++ b/include/asm-parisc/unistd.h @@ -780,8 +780,14 @@ #define __NR_readlinkat (__NR_Linux + 285) #define __NR_fchmodat (__NR_Linux + 286) #define __NR_faccessat (__NR_Linux + 287) +#define __NR_unshare (__NR_Linux + 288) +#define __NR_set_robust_list (__NR_Linux + 289) +#define __NR_get_robust_list (__NR_Linux + 290) +#define __NR_splice (__NR_Linux + 291) +#define __NR_sync_file_range (__NR_Linux + 292) +#define __NR_tee (__NR_Linux + 293) -#define __NR_Linux_syscalls 288 +#define __NR_Linux_syscalls 294 #define HPUX_GATEWAY_ADDR 0xC0000004 #define LINUX_GATEWAY_ADDR 0x100 -- cgit v1.2.3 From 1b52d7c2210b9a64c5cba6aded478c8217a8853c Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Thu, 20 Apr 2006 21:16:32 +0000 Subject: [PARISC] Make ioremap default to _nocache Since it is way more work to change most drivers to comply with parisc, take the easy way out and make ioremap _NO_CACHE by default. This is in line with what powerpc does. Signed-off-by: Kyle McMartin --- include/asm-parisc/io.h | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-parisc/io.h b/include/asm-parisc/io.h index 29da31194b91..244f6b8883f4 100644 --- a/include/asm-parisc/io.h +++ b/include/asm-parisc/io.h @@ -126,24 +126,17 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr) extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); -extern inline void __iomem * ioremap(unsigned long offset, unsigned long size) -{ - return __ioremap(offset, size, 0); -} - -/* - * This one maps high address device memory and turns off caching for that area. - * it's useful if some control registers are in such an area and write combining - * or read caching is not desirable: +/* Most machines react poorly to I/O-space being cacheable... Instead let's + * define ioremap() in terms of ioremap_nocache(). */ -extern inline void * ioremap_nocache(unsigned long offset, unsigned long size) +extern inline void __iomem * ioremap(unsigned long offset, unsigned long size) { - return __ioremap(offset, size, _PAGE_NO_CACHE /* _PAGE_PCD */); + return __ioremap(offset, size, _PAGE_NO_CACHE); } +#define ioremap_nocache(off, sz) ioremap((off), (sz)) extern void iounmap(void __iomem *addr); - static inline unsigned char __raw_readb(const volatile void __iomem *addr) { return (*(volatile unsigned char __force *) (addr)); -- cgit v1.2.3 From d0e15bed84db7a9b0ea85d2ad9707b5e6d2e38da Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sun, 23 Apr 2006 10:42:04 +1000 Subject: powerpc: Fix define_machine so machine_is() works from modules machine_is() was always returning 0 when used in a module, because we weren't exporting the machine definitions. This was why sound wasn't working on powermacs when CONFIG_SND_POWERMAC=m. Original fix from Ben Herrenschmidt, further fixed by me. Signed-off-by: Paul Mackerras --- include/asm-powerpc/machdep.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 5ed847680754..0f9254c18914 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -253,7 +253,11 @@ extern struct machdep_calls *machine_id; #define __machine_desc __attribute__ ((__section__ (".machine.desc"))) -#define define_machine(name) struct machdep_calls mach_##name __machine_desc = +#define define_machine(name) \ + extern struct machdep_calls mach_##name; \ + EXPORT_SYMBOL(mach_##name); \ + struct machdep_calls mach_##name __machine_desc = + #define machine_is(name) \ ({ \ extern struct machdep_calls mach_##name \ -- cgit v1.2.3 From d8fe3f19203b1f5070358aaa292d33295258b448 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Mon, 24 Apr 2006 13:48:51 -0700 Subject: [SPARC]: __NR_sys removal __NR_sys_sync_file_range part was lost somewhere... [glibc is already checking __NR_sync_file_range] Signed-off-by: OGAWA Hirofumi Signed-off-by: David S. Miller --- include/asm-sparc/unistd.h | 2 +- include/asm-sparc64/unistd.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h index 45feff893b8e..32a48f623e2b 100644 --- a/include/asm-sparc/unistd.h +++ b/include/asm-sparc/unistd.h @@ -271,7 +271,7 @@ #define __NR_getsid 252 #define __NR_fdatasync 253 #define __NR_nfsservctl 254 -#define __NR_sys_sync_file_range 255 +#define __NR_sync_file_range 255 #define __NR_clock_settime 256 #define __NR_clock_gettime 257 #define __NR_clock_getres 258 diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index 597f6923a46e..ca80e8aca128 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -273,7 +273,7 @@ #define __NR_getsid 252 #define __NR_fdatasync 253 #define __NR_nfsservctl 254 -#define __NR_sys_sync_file_range 255 +#define __NR_sync_file_range 255 #define __NR_clock_settime 256 #define __NR_clock_gettime 257 #define __NR_clock_getres 258 -- cgit v1.2.3 From 55fe5866366ae42f259f27ae5962eb267d9ce172 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 24 Apr 2006 17:16:28 -0700 Subject: [NETFILTER]: Fix compat_xt_counters alignment for non-x86 Some (?) non-x86 architectures require 8byte alignment for u_int64_t even when compiled for 32bit, using u_int32_t in compat_xt_counters breaks on these architectures, use u_int64_t for everything but x86. Reported by Andreas Schwab . Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/linux/netfilter/x_tables.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index f6bdef82a322..38701454e197 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -361,7 +361,11 @@ struct compat_xt_entry_target struct compat_xt_counters { +#if defined(CONFIG_X86_64) || defined(CONFIG_IA64) u_int32_t cnt[4]; +#else + u_int64_t cnt[2]; +#endif }; struct compat_xt_counters_info -- cgit v1.2.3