From 7e05484f02e1ea05a3aae0724d4df1e8a5a1920f Mon Sep 17 00:00:00 2001 From: Bryan Venteicher Date: Tue, 16 Oct 2012 23:55:54 +1030 Subject: virtio-scsi: Add real 2-clause BSD license to header This is analogous to commit a1b383870a made by Rusty Russell to all the VirtIO headers at the time. This eases the use of the header as is by other OSes. Signed-off-by: Bryan Venteicher Acked-by: Paolo Bonzini Signed-off-by: Rusty Russell --- include/linux/virtio_scsi.h | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h index d6b4440387b7..4195b97a3def 100644 --- a/include/linux/virtio_scsi.h +++ b/include/linux/virtio_scsi.h @@ -1,7 +1,31 @@ +/* + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + #ifndef _LINUX_VIRTIO_SCSI_H #define _LINUX_VIRTIO_SCSI_H -/* This header is BSD licensed so anyone can use the definitions to implement - * compatible drivers/servers. */ #define VIRTIO_SCSI_CDB_SIZE 32 #define VIRTIO_SCSI_SENSE_SIZE 96 -- cgit v1.2.3 From 1b4f59e356cc94929305bd107b7f38eec62715ad Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 22 Oct 2012 18:05:36 +0400 Subject: slub: Commonize slab_cache field in struct page Right now, slab and slub have fields in struct page to derive which cache a page belongs to, but they do it slightly differently. slab uses a field called slab_cache, that lives in the third double word. slub, uses a field called "slab", living outside of the doublewords area. Ideally, we could use the same field for this. Since slub heavily makes use of the doubleword region, there isn't really much room to move slub's slab_cache field around. Since slab does not have such strict placement restrictions, we can move it outside the doubleword area. The naming used by slab, "slab_cache", is less confusing, and it is preferred over slub's generic "slab". Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- include/linux/mm_types.h | 7 ++----- mm/slub.c | 24 ++++++++++++------------ 2 files changed, 14 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 31f8a3af7d94..2fef4e720e79 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -128,10 +128,7 @@ struct page { }; struct list_head list; /* slobs list of pages */ - struct { /* slab fields */ - struct kmem_cache *slab_cache; - struct slab *slab_page; - }; + struct slab *slab_page; /* slab fields */ }; /* Remainder is not double word aligned */ @@ -146,7 +143,7 @@ struct page { #if USE_SPLIT_PTLOCKS spinlock_t ptl; #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct page *first_page; /* Compound tail pages */ }; diff --git a/mm/slub.c b/mm/slub.c index 16274b273c61..35483e0ab6bc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1092,11 +1092,11 @@ static noinline struct kmem_cache_node *free_debug_processing( if (!check_object(s, page, object, SLUB_RED_ACTIVE)) goto out; - if (unlikely(s != page->slab)) { + if (unlikely(s != page->slab_cache)) { if (!PageSlab(page)) { slab_err(s, page, "Attempt to free object(0x%p) " "outside of slab", object); - } else if (!page->slab) { + } else if (!page->slab_cache) { printk(KERN_ERR "SLUB : no slab for object 0x%p.\n", object); @@ -1357,7 +1357,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) goto out; inc_slabs_node(s, page_to_nid(page), page->objects); - page->slab = s; + page->slab_cache = s; __SetPageSlab(page); if (page->pfmemalloc) SetPageSlabPfmemalloc(page); @@ -1424,7 +1424,7 @@ static void rcu_free_slab(struct rcu_head *h) else page = container_of((struct list_head *)h, struct page, lru); - __free_slab(page->slab, page); + __free_slab(page->slab_cache, page); } static void free_slab(struct kmem_cache *s, struct page *page) @@ -2617,9 +2617,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_head_page(x); - if (kmem_cache_debug(s) && page->slab != s) { + if (kmem_cache_debug(s) && page->slab_cache != s) { pr_err("kmem_cache_free: Wrong slab cache. %s but object" - " is from %s\n", page->slab->name, s->name); + " is from %s\n", page->slab_cache->name, s->name); WARN_ON_ONCE(1); return; } @@ -3418,7 +3418,7 @@ size_t ksize(const void *object) return PAGE_SIZE << compound_order(page); } - return slab_ksize(page->slab); + return slab_ksize(page->slab_cache); } EXPORT_SYMBOL(ksize); @@ -3443,8 +3443,8 @@ bool verify_mem_not_deleted(const void *x) } slab_lock(page); - if (on_freelist(page->slab, page, object)) { - object_err(page->slab, page, object, "Object is on free-list"); + if (on_freelist(page->slab_cache, page, object)) { + object_err(page->slab_cache, page, object, "Object is on free-list"); rv = false; } else { rv = true; @@ -3475,7 +3475,7 @@ void kfree(const void *x) __free_pages(page, compound_order(page)); return; } - slab_free(page->slab, page, object, _RET_IP_); + slab_free(page->slab_cache, page, object, _RET_IP_); } EXPORT_SYMBOL(kfree); @@ -3686,11 +3686,11 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) if (n) { list_for_each_entry(p, &n->partial, lru) - p->slab = s; + p->slab_cache = s; #ifdef CONFIG_SLUB_DEBUG list_for_each_entry(p, &n->full, lru) - p->slab = s; + p->slab_cache = s; #endif } } -- cgit v1.2.3 From 6c9c6d6301287e369a754d628230fa6e50cdb74b Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Mon, 8 Oct 2012 11:08:06 -0600 Subject: dma-debug: New interfaces to debug dma mapping errors Add dma-debug interface debug_dma_mapping_error() to debug drivers that fail to check dma mapping errors on addresses returned by dma_map_single() and dma_map_page() interfaces. This interface clears a flag set by debug_dma_map_page() to indicate that dma_mapping_error() has been called by the driver. When driver does unmap, debug_dma_unmap() checks the flag and if this flag is still set, prints warning message that includes call trace that leads up to the unmap. This interface can be called from dma_mapping_error() routines to enable dma mapping error check debugging. Tested: Intel iommu and swiotlb (iommu=soft) on x86-64 with CONFIG_DMA_API_DEBUG enabled and disabled. Signed-off-by: Shuah Khan Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Joerg Roedel --- Documentation/DMA-API.txt | 12 +++++++ arch/x86/include/asm/dma-mapping.h | 1 + include/linux/dma-debug.h | 7 ++++ lib/dma-debug.c | 71 +++++++++++++++++++++++++++++++++++--- 4 files changed, 87 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 66bd97a95f10..78a6c569d204 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -678,3 +678,15 @@ out of dma_debug_entries. These entries are preallocated at boot. The number of preallocated entries is defined per architecture. If it is too low for you boot with 'dma_debug_entries=' to overwrite the architectural default. + +void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr); + +dma-debug interface debug_dma_mapping_error() to debug drivers that fail +to check dma mapping errors on addresses returned by dma_map_single() and +dma_map_page() interfaces. This interface clears a flag set by +debug_dma_map_page() to indicate that dma_mapping_error() has been called by +the driver. When driver does unmap, debug_dma_unmap() checks the flag and if +this flag is still set, prints warning message that includes call trace that +leads up to the unmap. This interface can be called from dma_mapping_error() +routines to enable dma mapping error check debugging. + diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index f7b4c7903e7e..808dae63eeea 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -47,6 +47,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { struct dma_map_ops *ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dma_addr); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 171ad8aedc83..fc0e34ce038f 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -39,6 +39,8 @@ extern void debug_dma_map_page(struct device *dev, struct page *page, int direction, dma_addr_t dma_addr, bool map_single); +extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction, bool map_single); @@ -105,6 +107,11 @@ static inline void debug_dma_map_page(struct device *dev, struct page *page, { } +static inline void debug_dma_mapping_error(struct device *dev, + dma_addr_t dma_addr) +{ +} + static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction, bool map_single) diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d84beb994f36..59f4a1a8187d 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -45,6 +45,12 @@ enum { dma_debug_coherent, }; +enum map_err_types { + MAP_ERR_CHECK_NOT_APPLICABLE, + MAP_ERR_NOT_CHECKED, + MAP_ERR_CHECKED, +}; + #define DMA_DEBUG_STACKTRACE_ENTRIES 5 struct dma_debug_entry { @@ -57,6 +63,7 @@ struct dma_debug_entry { int direction; int sg_call_ents; int sg_mapped_ents; + enum map_err_types map_err_type; #ifdef CONFIG_STACKTRACE struct stack_trace stacktrace; unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; @@ -114,6 +121,12 @@ static struct device_driver *current_driver __read_mostly; static DEFINE_RWLOCK(driver_name_lock); +static const char *const maperr2str[] = { + [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", + [MAP_ERR_NOT_CHECKED] = "dma map error not checked", + [MAP_ERR_CHECKED] = "dma map error checked", +}; + static const char *type2name[4] = { "single", "page", "scather-gather", "coherent" }; @@ -376,11 +389,12 @@ void debug_dma_dump_mappings(struct device *dev) list_for_each_entry(entry, &bucket->list, list) { if (!dev || dev == entry->dev) { dev_info(entry->dev, - "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", + "%s idx %d P=%Lx D=%Lx L=%Lx %s %s\n", type2name[entry->type], idx, (unsigned long long)entry->paddr, entry->dev_addr, entry->size, - dir2name[entry->direction]); + dir2name[entry->direction], + maperr2str[entry->map_err_type]); } } @@ -838,13 +852,28 @@ static __init int dma_debug_entries_cmdline(char *str) __setup("dma_debug=", dma_debug_cmdline); __setup("dma_debug_entries=", dma_debug_entries_cmdline); +/* Calling dma_mapping_error() from dma-debug api will result in calling + debug_dma_mapping_error() - need internal mapping error routine to + avoid debug checks */ +#ifndef DMA_ERROR_CODE +#define DMA_ERROR_CODE 0 +#endif +static inline int has_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); + + return (dma_addr == DMA_ERROR_CODE); +} + static void check_unmap(struct dma_debug_entry *ref) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; - if (dma_mapping_error(ref->dev, ref->dev_addr)) { + if (unlikely(has_mapping_error(ref->dev, ref->dev_addr))) { err_printk(ref->dev, NULL, "DMA-API: device driver tries " "to free an invalid DMA memory address\n"); return; @@ -910,6 +939,15 @@ static void check_unmap(struct dma_debug_entry *ref) dir2name[ref->direction]); } + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + err_printk(ref->dev, entry, + "DMA-API: device driver failed to check map error" + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped as %s]", + ref->dev_addr, ref->size, + type2name[entry->type]); + } + hash_bucket_del(entry); dma_entry_free(entry); @@ -1017,7 +1055,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, if (unlikely(global_disable)) return; - if (unlikely(dma_mapping_error(dev, dma_addr))) + if (unlikely(has_mapping_error(dev, dma_addr))) return; entry = dma_entry_alloc(); @@ -1030,6 +1068,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, entry->dev_addr = dma_addr; entry->size = size; entry->direction = direction; + entry->map_err_type = MAP_ERR_NOT_CHECKED; if (map_single) entry->type = dma_debug_single; @@ -1045,6 +1084,30 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, } EXPORT_SYMBOL(debug_dma_map_page); +void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + struct dma_debug_entry ref; + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + if (unlikely(global_disable)) + return; + + ref.dev = dev; + ref.dev_addr = dma_addr; + bucket = get_hash_bucket(&ref, &flags); + entry = bucket_find_exact(bucket, &ref); + + if (!entry) + goto out; + + entry->map_err_type = MAP_ERR_CHECKED; +out: + put_hash_bucket(bucket, &flags); +} +EXPORT_SYMBOL(debug_dma_mapping_error); + void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction, bool map_single) { -- cgit v1.2.3 From 242860a47a75b933a79a30f6a40bf4858f4a3ecc Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Fri, 19 Oct 2012 09:33:12 -0300 Subject: mm/sl[aou]b: Move common kmem_cache_size() to slab.h This function is identically defined in all three allocators and it's trivial to move it to slab.h Since now it's static, inline, header-defined function this patch also drops the EXPORT_SYMBOL tag. Cc: Pekka Enberg Cc: Matt Mackall Acked-by: Christoph Lameter Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- include/linux/slab.h | 9 ++++++++- mm/slab.c | 6 ------ mm/slob.c | 6 ------ mm/slub.c | 9 --------- 4 files changed, 8 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 83d1a1454b7e..743a10415122 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); -unsigned int kmem_cache_size(struct kmem_cache *); /* * Please use this macro to create slab caches. Simply specify the @@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } +/* + * Determine the size of a slab object + */ +static inline unsigned int kmem_cache_size(struct kmem_cache *s) +{ + return s->object_size; +} + void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */ diff --git a/mm/slab.c b/mm/slab.c index 6d5c83c6ddd5..1f7fd5f51f87 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3969,12 +3969,6 @@ void kfree(const void *objp) } EXPORT_SYMBOL(kfree); -unsigned int kmem_cache_size(struct kmem_cache *cachep) -{ - return cachep->object_size; -} -EXPORT_SYMBOL(kmem_cache_size); - /* * This initializes kmem_list3 or resizes various caches for all nodes. */ diff --git a/mm/slob.c b/mm/slob.c index 287a88aa4a61..fffbc820774d 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -604,12 +604,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b) } EXPORT_SYMBOL(kmem_cache_free); -unsigned int kmem_cache_size(struct kmem_cache *c) -{ - return c->object_size; -} -EXPORT_SYMBOL(kmem_cache_size); - int __kmem_cache_shutdown(struct kmem_cache *c) { /* No way to check for remaining objects */ diff --git a/mm/slub.c b/mm/slub.c index 35483e0ab6bc..deee7c754a7d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3121,15 +3121,6 @@ error: return -EINVAL; } -/* - * Determine the size of a slab object - */ -unsigned int kmem_cache_size(struct kmem_cache *s) -{ - return s->object_size; -} -EXPORT_SYMBOL(kmem_cache_size); - static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { -- cgit v1.2.3 From 86b00e0da6be7bbc16412f126c5b548ac5d91d50 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Oct 2012 23:34:42 -0500 Subject: rbd: get parent spec for version 2 images Add support for getting the the information identifying the parent image for rbd images that have them. The child image holds a reference to its parent image specification structure. Create a new entry "parent" in /sys/bus/rbd/image/N/ to report the identifying information for the parent image, if any. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin --- Documentation/ABI/testing/sysfs-bus-rbd | 4 + drivers/block/rbd.c | 131 ++++++++++++++++++++++++++++++++ include/linux/ceph/rados.h | 2 + 3 files changed, 137 insertions(+) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd index 1cf2adf46b11..cd9213ccf3dc 100644 --- a/Documentation/ABI/testing/sysfs-bus-rbd +++ b/Documentation/ABI/testing/sysfs-bus-rbd @@ -70,6 +70,10 @@ snap_* A directory per each snapshot +parent + + Information identifying the pool, image, and snapshot id for + the parent image in a layered rbd image (format 2 only). Entries under /sys/bus/rbd/devices//snap_ ------------------------------------------------------------- diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 28052ff679ca..bce1fcfb5185 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -217,6 +217,9 @@ struct rbd_device { struct ceph_osd_event *watch_event; struct ceph_osd_request *watch_request; + struct rbd_spec *parent_spec; + u64 parent_overlap; + /* protects updating the header */ struct rw_semaphore header_rwsem; @@ -2009,6 +2012,49 @@ static ssize_t rbd_snap_show(struct device *dev, return sprintf(buf, "%s\n", rbd_dev->spec->snap_name); } +/* + * For an rbd v2 image, shows the pool id, image id, and snapshot id + * for the parent image. If there is no parent, simply shows + * "(no parent image)". + */ +static ssize_t rbd_parent_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + struct rbd_spec *spec = rbd_dev->parent_spec; + int count; + char *bufp = buf; + + if (!spec) + return sprintf(buf, "(no parent image)\n"); + + count = sprintf(bufp, "pool_id %llu\npool_name %s\n", + (unsigned long long) spec->pool_id, spec->pool_name); + if (count < 0) + return count; + bufp += count; + + count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id, + spec->image_name ? spec->image_name : "(unknown)"); + if (count < 0) + return count; + bufp += count; + + count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n", + (unsigned long long) spec->snap_id, spec->snap_name); + if (count < 0) + return count; + bufp += count; + + count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap); + if (count < 0) + return count; + bufp += count; + + return (ssize_t) (bufp - buf); +} + static ssize_t rbd_image_refresh(struct device *dev, struct device_attribute *attr, const char *buf, @@ -2032,6 +2078,7 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL); static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); +static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL); static struct attribute *rbd_attrs[] = { &dev_attr_size.attr, @@ -2043,6 +2090,7 @@ static struct attribute *rbd_attrs[] = { &dev_attr_name.attr, &dev_attr_image_id.attr, &dev_attr_current_snap.attr, + &dev_attr_parent.attr, &dev_attr_refresh.attr, NULL }; @@ -2192,6 +2240,7 @@ struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, static void rbd_dev_destroy(struct rbd_device *rbd_dev) { + rbd_spec_put(rbd_dev->parent_spec); kfree(rbd_dev->header_name); rbd_put_client(rbd_dev->rbd_client); rbd_spec_put(rbd_dev->spec); @@ -2400,6 +2449,71 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev) &rbd_dev->header.features); } +static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) +{ + struct rbd_spec *parent_spec; + size_t size; + void *reply_buf = NULL; + __le64 snapid; + void *p; + void *end; + char *image_id; + u64 overlap; + size_t len = 0; + int ret; + + parent_spec = rbd_spec_alloc(); + if (!parent_spec) + return -ENOMEM; + + size = sizeof (__le64) + /* pool_id */ + sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ + sizeof (__le64) + /* snap_id */ + sizeof (__le64); /* overlap */ + reply_buf = kmalloc(size, GFP_KERNEL); + if (!reply_buf) { + ret = -ENOMEM; + goto out_err; + } + + snapid = cpu_to_le64(CEPH_NOSNAP); + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_parent", + (char *) &snapid, sizeof (snapid), + (char *) reply_buf, size, + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out_err; + + ret = -ERANGE; + p = reply_buf; + end = (char *) reply_buf + size; + ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); + if (parent_spec->pool_id == CEPH_NOPOOL) + goto out; /* No parent? No problem. */ + + image_id = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); + if (IS_ERR(image_id)) { + ret = PTR_ERR(image_id); + goto out_err; + } + parent_spec->image_id = image_id; + ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); + ceph_decode_64_safe(&p, end, overlap, out_err); + + rbd_dev->parent_overlap = overlap; + rbd_dev->parent_spec = parent_spec; + parent_spec = NULL; /* rbd_dev now owns this */ +out: + ret = 0; +out_err: + kfree(reply_buf); + rbd_spec_put(parent_spec); + + return ret; +} + static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) { size_t size; @@ -3154,6 +3268,12 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) ret = rbd_read_header(rbd_dev, &rbd_dev->header); if (ret < 0) goto out_err; + + /* Version 1 images have no parent (no layering) */ + + rbd_dev->parent_spec = NULL; + rbd_dev->parent_overlap = 0; + rbd_dev->image_format = 1; dout("discovered version 1 image, header name is %s\n", @@ -3205,6 +3325,14 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) if (ret < 0) goto out_err; + /* If the image supports layering, get the parent info */ + + if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { + ret = rbd_dev_v2_parent_info(rbd_dev); + if (ret < 0) + goto out_err; + } + /* crypto and compression type aren't (yet) supported for v2 images */ rbd_dev->header.crypt_type = 0; @@ -3224,6 +3352,9 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) return 0; out_err: + rbd_dev->parent_overlap = 0; + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = NULL; kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; kfree(rbd_dev->header.object_prefix); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 0a99099801a4..15077db662ed 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -87,6 +87,8 @@ struct ceph_pg { * * lpgp_num -- as above. */ +#define CEPH_NOPOOL ((__u64) (-1)) /* pool id not defined */ + #define CEPH_PG_TYPE_REP 1 #define CEPH_PG_TYPE_RAID4 2 #define CEPH_PG_POOL_VERSION 2 -- cgit v1.2.3 From 72afc71ffca0f444ee0e1ef8c7e34ab209bb48b3 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Oct 2012 19:40:33 -0500 Subject: libceph: define ceph_pg_pool_name_by_id() Define and export function ceph_pg_pool_name_by_id() to supply the name of a pg pool whose id is given. This will be used by the next patch. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin --- include/linux/ceph/osdmap.h | 1 + net/ceph/osdmap.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index e88a620b9f8a..5ea57ba69320 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -123,6 +123,7 @@ extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid); +extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); #endif diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index f552aa48fd9e..de73214b5d26 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -469,6 +469,22 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) return NULL; } +const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) +{ + struct ceph_pg_pool_info *pi; + + if (id == CEPH_NOPOOL) + return NULL; + + if (WARN_ON_ONCE(id > (u64) INT_MAX)) + return NULL; + + pi = __lookup_pg_pool(&map->pg_pools, (int) id); + + return pi ? pi->name : NULL; +} +EXPORT_SYMBOL(ceph_pg_pool_name_by_id); + int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) { struct rb_node *rbp; -- cgit v1.2.3 From 216b6cbdcbd86b1db0754d58886b466ae31f5a63 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Wed, 29 Aug 2012 10:10:10 -0400 Subject: exportfs: add FILEID_INVALID to indicate invalid fid_type This commit adds FILEID_INVALID = 0xff in fid_type to indicate invalid fid_type It avoids using magic number 255 Signed-off-by: Namjae Jeon Signed-off-by: Vivek Trivedi Signed-off-by: J. Bruce Fields --- fs/exportfs/expfs.c | 4 ++-- fs/fhandle.c | 2 +- fs/nfsd/nfsfh.c | 4 ++-- include/linux/exportfs.h | 5 +++++ 4 files changed, 10 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 29ab099e3e08..f1f1c59c2966 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -322,10 +322,10 @@ static int export_encode_fh(struct inode *inode, struct fid *fid, if (parent && (len < 4)) { *max_len = 4; - return 255; + return FILEID_INVALID; } else if (len < 2) { *max_len = 2; - return 255; + return FILEID_INVALID; } len = 2; diff --git a/fs/fhandle.c b/fs/fhandle.c index f775bfdd6e4a..26f12b95702a 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -52,7 +52,7 @@ static long do_sys_name_to_handle(struct path *path, handle_bytes = handle_dwords * sizeof(u32); handle->handle_bytes = handle_bytes; if ((handle->handle_bytes > f_handle.handle_bytes) || - (retval == 255) || (retval == -ENOSPC)) { + (retval == FILEID_INVALID) || (retval == -ENOSPC)) { /* As per old exportfs_encode_fh documentation * we could return ENOSPC to indicate overflow * But file system returned 255 always. So handle diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 032af381b3aa..814afaa4458a 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -572,7 +572,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, if (inode) _fh_update(fhp, exp, dentry); - if (fhp->fh_handle.fh_fileid_type == 255) { + if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) { fh_put(fhp); return nfserr_opnotsupp; } @@ -603,7 +603,7 @@ fh_update(struct svc_fh *fhp) goto out; _fh_update(fhp, fhp->fh_export, dentry); - if (fhp->fh_handle.fh_fileid_type == 255) + if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) return nfserr_opnotsupp; } out: diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 12291a7ee275..0e1452546300 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -83,6 +83,11 @@ enum fid_type { * 64 bit parent inode number. */ FILEID_NILFS_WITH_PARENT = 0x62, + + /* + * Filesystems must not use 0xff file ID. + */ + FILEID_INVALID = 0xff, }; struct fid { -- cgit v1.2.3 From 2be975c6d920de989ff5e4bc09ffe87e59d94662 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sat, 3 Nov 2012 12:16:12 -0700 Subject: Input: introduce managed input devices (add devres support) There is a demand from driver's writers to use managed devices framework for their drivers. Unfortunately up to this moment input devices did not provide support for managed devices and that lead to mixing two styles of resource management which usually introduced more bugs, such as manually unregistering input device but relying in devres to free interrupt handler which (unless device is properly shut off) can cause ISR to reference already freed memory. This change introduces devm_input_allocate_device() that will allocate managed instance of input device so that driver writers who prefer using devm_* framework do not have to mix 2 styles. Reviewed-by: Henrik Rydberg Reviewed-by: Tejun Heo Signed-off-by: Dmitry Torokhov --- drivers/input/input.c | 176 ++++++++++++++++++++++++++++++++++++++++++-------- include/linux/input.h | 7 +- 2 files changed, 155 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/input/input.c b/drivers/input/input.c index f1be1a77edf3..ce01332f7b3a 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1726,7 +1726,7 @@ EXPORT_SYMBOL_GPL(input_class); /** * input_allocate_device - allocate memory for new input device * - * Returns prepared struct input_dev or NULL. + * Returns prepared struct input_dev or %NULL. * * NOTE: Use input_free_device() to free devices that have not been * registered; input_unregister_device() should be used for already @@ -1753,6 +1753,70 @@ struct input_dev *input_allocate_device(void) } EXPORT_SYMBOL(input_allocate_device); +struct input_devres { + struct input_dev *input; +}; + +static int devm_input_device_match(struct device *dev, void *res, void *data) +{ + struct input_devres *devres = res; + + return devres->input == data; +} + +static void devm_input_device_release(struct device *dev, void *res) +{ + struct input_devres *devres = res; + struct input_dev *input = devres->input; + + dev_dbg(dev, "%s: dropping reference to %s\n", + __func__, dev_name(&input->dev)); + input_put_device(input); +} + +/** + * devm_input_allocate_device - allocate managed input device + * @dev: device owning the input device being created + * + * Returns prepared struct input_dev or %NULL. + * + * Managed input devices do not need to be explicitly unregistered or + * freed as it will be done automatically when owner device unbinds from + * its driver (or binding fails). Once managed input device is allocated, + * it is ready to be set up and registered in the same fashion as regular + * input device. There are no special devm_input_device_[un]register() + * variants, regular ones work with both managed and unmanaged devices. + * + * NOTE: the owner device is set up as parent of input device and users + * should not override it. + */ + +struct input_dev *devm_input_allocate_device(struct device *dev) +{ + struct input_dev *input; + struct input_devres *devres; + + devres = devres_alloc(devm_input_device_release, + sizeof(struct input_devres), GFP_KERNEL); + if (!devres) + return NULL; + + input = input_allocate_device(); + if (!input) { + devres_free(devres); + return NULL; + } + + input->dev.parent = dev; + input->devres_managed = true; + + devres->input = input; + devres_add(dev, devres); + + return input; +} +EXPORT_SYMBOL(devm_input_allocate_device); + /** * input_free_device - free memory occupied by input_dev structure * @dev: input device to free @@ -1769,8 +1833,14 @@ EXPORT_SYMBOL(input_allocate_device); */ void input_free_device(struct input_dev *dev) { - if (dev) + if (dev) { + if (dev->devres_managed) + WARN_ON(devres_destroy(dev->dev.parent, + devm_input_device_release, + devm_input_device_match, + dev)); input_put_device(dev); + } } EXPORT_SYMBOL(input_free_device); @@ -1891,6 +1961,38 @@ static void input_cleanse_bitmasks(struct input_dev *dev) INPUT_CLEANSE_BITMASK(dev, SW, sw); } +static void __input_unregister_device(struct input_dev *dev) +{ + struct input_handle *handle, *next; + + input_disconnect_device(dev); + + mutex_lock(&input_mutex); + + list_for_each_entry_safe(handle, next, &dev->h_list, d_node) + handle->handler->disconnect(handle); + WARN_ON(!list_empty(&dev->h_list)); + + del_timer_sync(&dev->timer); + list_del_init(&dev->node); + + input_wakeup_procfs_readers(); + + mutex_unlock(&input_mutex); + + device_del(&dev->dev); +} + +static void devm_input_device_unregister(struct device *dev, void *res) +{ + struct input_devres *devres = res; + struct input_dev *input = devres->input; + + dev_dbg(dev, "%s: unregistering device %s\n", + __func__, dev_name(&input->dev)); + __input_unregister_device(input); +} + /** * input_register_device - register device with input core * @dev: device to be registered @@ -1906,11 +2008,21 @@ static void input_cleanse_bitmasks(struct input_dev *dev) int input_register_device(struct input_dev *dev) { static atomic_t input_no = ATOMIC_INIT(0); + struct input_devres *devres = NULL; struct input_handler *handler; unsigned int packet_size; const char *path; int error; + if (dev->devres_managed) { + devres = devres_alloc(devm_input_device_unregister, + sizeof(struct input_devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + devres->input = dev; + } + /* Every input device generates EV_SYN/SYN_REPORT events. */ __set_bit(EV_SYN, dev->evbit); @@ -1926,8 +2038,10 @@ int input_register_device(struct input_dev *dev) dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2; dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); - if (!dev->vals) - return -ENOMEM; + if (!dev->vals) { + error = -ENOMEM; + goto err_devres_free; + } /* * If delay and period are pre-set by the driver, then autorepeating @@ -1952,7 +2066,7 @@ int input_register_device(struct input_dev *dev) error = device_add(&dev->dev); if (error) - return error; + goto err_free_vals; path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); pr_info("%s as %s\n", @@ -1961,10 +2075,8 @@ int input_register_device(struct input_dev *dev) kfree(path); error = mutex_lock_interruptible(&input_mutex); - if (error) { - device_del(&dev->dev); - return error; - } + if (error) + goto err_device_del; list_add_tail(&dev->node, &input_dev_list); @@ -1975,7 +2087,21 @@ int input_register_device(struct input_dev *dev) mutex_unlock(&input_mutex); + if (dev->devres_managed) { + dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", + __func__, dev_name(&dev->dev)); + devres_add(dev->dev.parent, devres); + } return 0; + +err_device_del: + device_del(&dev->dev); +err_free_vals: + kfree(dev->vals); + dev->vals = NULL; +err_devres_free: + devres_free(devres); + return error; } EXPORT_SYMBOL(input_register_device); @@ -1988,24 +2114,20 @@ EXPORT_SYMBOL(input_register_device); */ void input_unregister_device(struct input_dev *dev) { - struct input_handle *handle, *next; - - input_disconnect_device(dev); - - mutex_lock(&input_mutex); - - list_for_each_entry_safe(handle, next, &dev->h_list, d_node) - handle->handler->disconnect(handle); - WARN_ON(!list_empty(&dev->h_list)); - - del_timer_sync(&dev->timer); - list_del_init(&dev->node); - - input_wakeup_procfs_readers(); - - mutex_unlock(&input_mutex); - - device_unregister(&dev->dev); + if (dev->devres_managed) { + WARN_ON(devres_destroy(dev->dev.parent, + devm_input_device_unregister, + devm_input_device_match, + dev)); + __input_unregister_device(dev); + /* + * We do not do input_put_device() here because it will be done + * when 2nd devres fires up. + */ + } else { + __input_unregister_device(dev); + input_put_device(dev); + } } EXPORT_SYMBOL(input_unregister_device); diff --git a/include/linux/input.h b/include/linux/input.h index cab994ba6d91..5538cc09a4f5 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -112,6 +112,8 @@ struct input_value { * @h_list: list of input handles associated with the device. When * accessing the list dev->mutex must be held * @node: used to place the device onto input_dev_list + * @devres_managed: indicates that devices is managed with devres framework + * and needs not be explicitly unregistered or freed. */ struct input_dev { const char *name; @@ -180,6 +182,8 @@ struct input_dev { unsigned int num_vals; unsigned int max_vals; struct input_value *vals; + + bool devres_managed; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) @@ -323,7 +327,8 @@ struct input_handle { struct list_head h_node; }; -struct input_dev *input_allocate_device(void); +struct input_dev __must_check *input_allocate_device(void); +struct input_dev __must_check *devm_input_allocate_device(struct device *); void input_free_device(struct input_dev *dev); static inline struct input_dev *input_get_device(struct input_dev *dev) -- cgit v1.2.3 From 800963fd598e2acbcd3a21a17e3ab3c185ad0d6a Mon Sep 17 00:00:00 2001 From: Henrik Rydberg Date: Sat, 10 Nov 2012 00:32:36 -0800 Subject: Input: document new members of struct input_dev Fixes kernel-doc warnings for the members added in 3.7-rc1. Signed-off-by: Henrik Rydberg Signed-off-by: Dmitry Torokhov --- include/linux/input.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/input.h b/include/linux/input.h index 5538cc09a4f5..82ce323b9986 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -112,6 +112,9 @@ struct input_value { * @h_list: list of input handles associated with the device. When * accessing the list dev->mutex must be held * @node: used to place the device onto input_dev_list + * @num_vals: number of values queued in the current frame + * @max_vals: maximum number of values queued in a frame + * @vals: array of values queued in the current frame * @devres_managed: indicates that devices is managed with devres framework * and needs not be explicitly unregistered or freed. */ -- cgit v1.2.3 From 49839dc93970789cea46f5171cd7f6ec11af64c7 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Tue, 6 Nov 2012 16:31:32 +0000 Subject: Revert "ARM: OMAP: convert I2C driver to PM QoS for MPU latency constraints" This reverts commit 3db11feffc1ad2ab9dea27789e6b5b3032827adc (ARM: OMAP: convert I2C driver to PM QoS for MPU latency constraints). This commit causes I2C timeouts to appear on several OMAP3430/3530-based boards: http://marc.info/?l=linux-arm-kernel&m=135071372426971&w=2 http://marc.info/?l=linux-arm-kernel&m=135067558415214&w=2 http://marc.info/?l=linux-arm-kernel&m=135216013608196&w=2 and appears to have been sent for merging before one of its prerequisites was merged: http://marc.info/?l=linux-arm-kernel&m=135219411617621&w=2 Signed-off-by: Paul Walmsley Acked-by: Jean Pihet Signed-off-by: Wolfram Sang --- arch/arm/plat-omap/i2c.c | 21 +++++++++++++++++++++ drivers/i2c/busses/i2c-omap.c | 32 ++++++++++++++------------------ include/linux/i2c-omap.h | 1 + 3 files changed, 36 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c index a5683a84c6ee..6013831a043e 100644 --- a/arch/arm/plat-omap/i2c.c +++ b/arch/arm/plat-omap/i2c.c @@ -26,12 +26,14 @@ #include #include #include +#include #include #include #include #include #include +#include #include #define OMAP_I2C_SIZE 0x3f @@ -127,6 +129,16 @@ static inline int omap1_i2c_add_bus(int bus_id) #ifdef CONFIG_ARCH_OMAP2PLUS +/* + * XXX This function is a temporary compatibility wrapper - only + * needed until the I2C driver can be converted to call + * omap_pm_set_max_dev_wakeup_lat() and handle a return code. + */ +static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t) +{ + omap_pm_set_max_mpu_wakeup_lat(dev, t); +} + static inline int omap2_i2c_add_bus(int bus_id) { int l; @@ -158,6 +170,15 @@ static inline int omap2_i2c_add_bus(int bus_id) dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr; pdata->flags = dev_attr->flags; + /* + * When waiting for completion of a i2c transfer, we need to + * set a wake up latency constraint for the MPU. This is to + * ensure quick enough wakeup from idle, when transfer + * completes. + * Only omap3 has support for constraints + */ + if (cpu_is_omap34xx()) + pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat; pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(struct omap_i2c_bus_platform_data), NULL, 0, 0); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index db31eaed6ea5..0b0254312d21 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -43,7 +43,6 @@ #include #include #include -#include /* I2C controller revisions */ #define OMAP_I2C_OMAP1_REV_2 0x20 @@ -187,8 +186,9 @@ struct omap_i2c_dev { int reg_shift; /* bit shift for I2C register addresses */ struct completion cmd_complete; struct resource *ioarea; - u32 latency; /* maximum MPU wkup latency */ - struct pm_qos_request pm_qos_request; + u32 latency; /* maximum mpu wkup latency */ + void (*set_mpu_wkup_lat)(struct device *dev, + long latency); u32 speed; /* Speed of bus in kHz */ u32 dtrev; /* extra revision from DT */ u32 flags; @@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->threshold) / + (1000 * dev->speed / 8); } /* @@ -629,16 +631,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (r < 0) goto out; - /* - * When waiting for completion of a i2c transfer, we need to - * set a wake up latency constraint for the MPU. This is to - * ensure quick enough wakeup from idle, when transfer - * completes. - */ - if (dev->latency) - pm_qos_add_request(&dev->pm_qos_request, - PM_QOS_CPU_DMA_LATENCY, - dev->latency); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, dev->latency); for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); @@ -646,8 +640,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) break; } - if (dev->latency) - pm_qos_remove_request(&dev->pm_qos_request); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, -1); if (r == 0) r = num; @@ -1104,6 +1098,7 @@ omap_i2c_probe(struct platform_device *pdev) } else if (pdata != NULL) { dev->speed = pdata->clkrate; dev->flags = pdata->flags; + dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; dev->dtrev = pdata->rev; } @@ -1159,8 +1154,9 @@ omap_i2c_probe(struct platform_device *pdev) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->fifo_size) / - (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->fifo_size) / + (1000 * dev->speed / 8); } /* reset ASAP, clearing any IRQs */ diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h index df804ba73e0b..92a0dc75bc74 100644 --- a/include/linux/i2c-omap.h +++ b/include/linux/i2c-omap.h @@ -34,6 +34,7 @@ struct omap_i2c_bus_platform_data { u32 clkrate; u32 rev; u32 flags; + void (*set_mpu_wkup_lat)(struct device *dev, long set); }; #endif -- cgit v1.2.3 From 2c88ab8c5af7d637d2a9d14b607fa6100fa64236 Mon Sep 17 00:00:00 2001 From: Shubhrajyoti D Date: Mon, 5 Nov 2012 17:53:39 +0530 Subject: ARM: i2c: omap: Remove the i207 errata flag The commit [i2c: omap: use revision check for OMAP_I2C_FLAG_APPLY_ERRATA_I207] uses the revision id instead of the flag. So the flag can be safely removed. Reviewed-by: Felipe Balbi Signed-off-by: Shubhrajyoti D Signed-off-by: Wolfram Sang --- arch/arm/mach-omap2/omap_hwmod_2430_data.c | 3 +-- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 9 +++------ drivers/i2c/busses/i2c-omap.c | 3 +-- include/linux/i2c-omap.h | 1 - 4 files changed, 5 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c index c455e41b0237..b79ccf6efbe8 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -76,8 +76,7 @@ static struct omap_hwmod_class i2c_class = { static struct omap_i2c_dev_attr i2c_dev_attr = { .fifo_depth = 8, /* bytes */ - .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | - OMAP_I2C_FLAG_BUS_SHIFT_2 | + .flags = OMAP_I2C_FLAG_BUS_SHIFT_2 | OMAP_I2C_FLAG_FORCE_19200_INT_CLK, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index f67b7ee07dd4..943222c40489 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -791,8 +791,7 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = { /* I2C1 */ static struct omap_i2c_dev_attr i2c1_dev_attr = { .fifo_depth = 8, /* bytes */ - .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | - OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | + .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; @@ -818,8 +817,7 @@ static struct omap_hwmod omap3xxx_i2c1_hwmod = { /* I2C2 */ static struct omap_i2c_dev_attr i2c2_dev_attr = { .fifo_depth = 8, /* bytes */ - .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | - OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | + .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; @@ -845,8 +843,7 @@ static struct omap_hwmod omap3xxx_i2c2_hwmod = { /* I2C3 */ static struct omap_i2c_dev_attr i2c3_dev_attr = { .fifo_depth = 64, /* bytes */ - .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | - OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | + .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 0ca50e71731b..11e645ab1e79 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1028,8 +1028,7 @@ static const struct i2c_algorithm omap_i2c_algo = { #ifdef CONFIG_OF static struct omap_i2c_bus_platform_data omap3_pdata = { .rev = OMAP_I2C_IP_VERSION_1, - .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | - OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | + .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | OMAP_I2C_FLAG_BUS_SHIFT_2, }; diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h index 92a0dc75bc74..1b25c04f82d9 100644 --- a/include/linux/i2c-omap.h +++ b/include/linux/i2c-omap.h @@ -21,7 +21,6 @@ #define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) #define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) #define OMAP_I2C_FLAG_RESET_REGS_POSTIDLE BIT(3) -#define OMAP_I2C_FLAG_APPLY_ERRATA_I207 BIT(4) #define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) #define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) /* how the CPU address bus must be translated for I2C unit access */ -- cgit v1.2.3 From 1cf3d8b3d24cd383ddfd5442c83ec5c355ffc2f7 Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Tue, 2 Oct 2012 16:57:57 +0000 Subject: powerpc+of: Add of node/property notification chain for adds and removes This patch moves the notification chain for updates to the device tree from the powerpc/pseries code to the base OF code. This makes this functionality available to all architectures. Additionally the notification chain is updated to allow notifications for property add/remove/update. To make this work a pointer to a new struct (of_prop_reconfig) is passed to the routines in the notification chain. The of_prop_reconfig property contains a pointer to the node containing the property and a pointer to the property itself. In the case of property updates, the property pointer refers to the new property. Signed-off-by: Nathan Fontenot Acked-by: Rob Herring Acked-by: Grant Likely Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pSeries_reconfig.h | 32 ----------- arch/powerpc/kernel/prom.c | 6 +- arch/powerpc/platforms/pseries/dlpar.c | 14 +++-- arch/powerpc/platforms/pseries/hotplug-cpu.c | 8 +-- arch/powerpc/platforms/pseries/hotplug-memory.c | 60 ++++++++++++++------ arch/powerpc/platforms/pseries/iommu.c | 6 +- arch/powerpc/platforms/pseries/reconfig.c | 68 ++--------------------- arch/powerpc/platforms/pseries/setup.c | 6 +- drivers/crypto/nx/nx-842.c | 20 +++---- drivers/crypto/nx/nx.c | 1 - drivers/of/base.c | 74 +++++++++++++++++++++++-- include/linux/of.h | 22 ++++++-- 12 files changed, 163 insertions(+), 154 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h index c07edfe98b98..adc00d2e75b0 100644 --- a/arch/powerpc/include/asm/pSeries_reconfig.h +++ b/arch/powerpc/include/asm/pSeries_reconfig.h @@ -2,43 +2,11 @@ #define _PPC64_PSERIES_RECONFIG_H #ifdef __KERNEL__ -#include - -/* - * Use this API if your code needs to know about OF device nodes being - * added or removed on pSeries systems. - */ - -#define PSERIES_RECONFIG_ADD 0x0001 -#define PSERIES_RECONFIG_REMOVE 0x0002 -#define PSERIES_DRCONF_MEM_ADD 0x0003 -#define PSERIES_DRCONF_MEM_REMOVE 0x0004 -#define PSERIES_UPDATE_PROPERTY 0x0005 - -/** - * pSeries_reconfig_notify - Notifier value structure for OFDT property updates - * - * @node: Device tree node which owns the property being updated - * @property: Updated property - */ -struct pSeries_reconfig_prop_update { - struct device_node *node; - struct property *property; -}; - #ifdef CONFIG_PPC_PSERIES -extern int pSeries_reconfig_notifier_register(struct notifier_block *); -extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); -extern int pSeries_reconfig_notify(unsigned long action, void *p); /* Not the best place to put this, will be fixed when we move some * of the rtas suspend-me stuff to pseries */ extern void pSeries_coalesce_init(void); #else /* !CONFIG_PPC_PSERIES */ -static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) -{ - return 0; -} -static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { } static inline void pSeries_coalesce_init(void) { } #endif /* CONFIG_PPC_PSERIES */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 37725e86651e..6feb60c3c6e3 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -49,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -802,7 +802,7 @@ static int prom_reconfig_notifier(struct notifier_block *nb, int err; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: err = of_finish_dynamic_node(node); if (err < 0) printk(KERN_ERR "finish_node returned %d\n", err); @@ -821,7 +821,7 @@ static struct notifier_block prom_reconfig_nb = { static int __init prom_reconfig_setup(void) { - return pSeries_reconfig_notifier_register(&prom_reconfig_nb); + return of_reconfig_notifier_register(&prom_reconfig_nb); } __initcall(prom_reconfig_setup); #endif diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index e36789bd4e6c..a1a7b9a67ffd 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -16,13 +16,13 @@ #include #include #include +#include #include "offline_states.h" #include #include #include #include -#include struct cc_workarea { u32 drc_index; @@ -262,24 +262,26 @@ int dlpar_attach_node(struct device_node *dn) if (!dn->parent) return -ENOMEM; - rc = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, dn); + rc = of_attach_node(dn); if (rc) { printk(KERN_ERR "Failed to add device node %s\n", dn->full_name); return rc; } - of_attach_node(dn); of_node_put(dn->parent); return 0; } int dlpar_detach_node(struct device_node *dn) { - pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, dn); - of_detach_node(dn); - of_node_put(dn); /* Must decrement the refcount */ + int rc; + + rc = of_detach_node(dn); + if (rc) + return rc; + of_node_put(dn); /* Must decrement the refcount */ return 0; } diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 64c97d8ac0c5..a38956269fbf 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -23,12 +23,12 @@ #include #include /* for idle_task_exit */ #include +#include #include #include #include #include #include -#include #include #include "plpar_wrappers.h" #include "offline_states.h" @@ -333,10 +333,10 @@ static int pseries_smp_notifier(struct notifier_block *nb, int err = 0; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: err = pseries_add_processor(node); break; - case PSERIES_RECONFIG_REMOVE: + case OF_RECONFIG_DETACH_NODE: pseries_remove_processor(node); break; } @@ -399,7 +399,7 @@ static int __init pseries_cpu_hotplug_init(void) /* Processors can be added/removed only on LPAR */ if (firmware_has_feature(FW_FEATURE_LPAR)) { - pSeries_reconfig_notifier_register(&pseries_smp_nb); + of_reconfig_notifier_register(&pseries_smp_nb); cpu_maps_update_begin(); if (cede_offline_enabled && parse_cede_parameters() == 0) { default_offline_state = CPU_STATE_INACTIVE; diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index ecdb0a6b3171..2372c609fa2b 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -16,7 +16,6 @@ #include #include -#include #include static unsigned long get_memblock_size(void) @@ -187,42 +186,69 @@ static int pseries_add_memory(struct device_node *np) return (ret < 0) ? -EINVAL : 0; } -static int pseries_drconf_memory(unsigned long *base, unsigned int action) +static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) { + struct of_drconf_cell *new_drmem, *old_drmem; unsigned long memblock_size; - int rc; + u32 entries; + u32 *p; + int i, rc = -EINVAL; memblock_size = get_memblock_size(); if (!memblock_size) return -EINVAL; - if (action == PSERIES_DRCONF_MEM_ADD) { - rc = memblock_add(*base, memblock_size); - rc = (rc < 0) ? -EINVAL : 0; - } else if (action == PSERIES_DRCONF_MEM_REMOVE) { - rc = pseries_remove_memblock(*base, memblock_size); - } else { - rc = -EINVAL; + p = (u32 *)of_get_property(pr->dn, "ibm,dynamic-memory", NULL); + if (!p) + return -EINVAL; + + /* The first int of the property is the number of lmb's described + * by the property. This is followed by an array of of_drconf_cell + * entries. Get the niumber of entries and skip to the array of + * of_drconf_cell's. + */ + entries = *p++; + old_drmem = (struct of_drconf_cell *)p; + + p = (u32 *)pr->prop->value; + p++; + new_drmem = (struct of_drconf_cell *)p; + + for (i = 0; i < entries; i++) { + if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) && + (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) { + rc = pseries_remove_memblock(old_drmem[i].base_addr, + memblock_size); + break; + } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) && + (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) { + rc = memblock_add(old_drmem[i].base_addr, + memblock_size); + rc = (rc < 0) ? -EINVAL : 0; + break; + } } return rc; } static int pseries_memory_notifier(struct notifier_block *nb, - unsigned long action, void *node) + unsigned long action, void *node) { + struct of_prop_reconfig *pr; int err = 0; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: err = pseries_add_memory(node); break; - case PSERIES_RECONFIG_REMOVE: + case OF_RECONFIG_DETACH_NODE: err = pseries_remove_memory(node); break; - case PSERIES_DRCONF_MEM_ADD: - case PSERIES_DRCONF_MEM_REMOVE: - err = pseries_drconf_memory(node, action); + case OF_RECONFIG_UPDATE_PROPERTY: + pr = (struct of_prop_reconfig *)node; + if (!strcmp(pr->prop->name, "ibm,dynamic-memory")) + err = pseries_update_drconf_memory(pr); break; } return notifier_from_errno(err); @@ -235,7 +261,7 @@ static struct notifier_block pseries_mem_nb = { static int __init pseries_memory_hotplug_init(void) { if (firmware_has_feature(FW_FEATURE_LPAR)) - pSeries_reconfig_notifier_register(&pseries_mem_nb); + of_reconfig_notifier_register(&pseries_mem_nb); return 0; } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 6153eea27ce7..da5594c441e4 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -36,13 +36,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include #include @@ -1294,7 +1294,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti struct direct_window *window; switch (action) { - case PSERIES_RECONFIG_REMOVE: + case OF_RECONFIG_DETACH_NODE: if (pci && pci->iommu_table) iommu_free_table(pci->iommu_table, np->full_name); @@ -1357,7 +1357,7 @@ void iommu_init_early_pSeries(void) } - pSeries_reconfig_notifier_register(&iommu_reconfig_nb); + of_reconfig_notifier_register(&iommu_reconfig_nb); register_memory_notifier(&iommu_mem_nb); set_pci_dma_ops(&dma_iommu_ops); diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index f99f1ca8035b..720a0cc2e69f 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -16,11 +16,11 @@ #include #include #include +#include #include #include #include -#include #include /** @@ -55,28 +55,6 @@ static struct device_node *derive_parent(const char *path) return parent; } -static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain); - -int pSeries_reconfig_notifier_register(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb); -} -EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_register); - -void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) -{ - blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb); -} -EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_unregister); - -int pSeries_reconfig_notify(unsigned long action, void *p) -{ - int err = blocking_notifier_call_chain(&pSeries_reconfig_chain, - action, p); - - return notifier_to_errno(err); -} - static int pSeries_reconfig_add_node(const char *path, struct property *proplist) { struct device_node *np; @@ -100,13 +78,12 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist goto out_err; } - err = pSeries_reconfig_notify(PSERIES_RECONFIG_ADD, np); + err = of_attach_node(np); if (err) { printk(KERN_ERR "Failed to add device node %s\n", path); goto out_err; } - of_attach_node(np); of_node_put(np->parent); return 0; @@ -134,9 +111,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np) return -EBUSY; } - pSeries_reconfig_notify(PSERIES_RECONFIG_REMOVE, np); of_detach_node(np); - of_node_put(parent); of_node_put(np); /* Must decrement the refcount */ return 0; @@ -381,10 +356,9 @@ static int do_remove_property(char *buf, size_t bufsize) static int do_update_property(char *buf, size_t bufsize) { struct device_node *np; - struct pSeries_reconfig_prop_update upd_value; unsigned char *value; char *name, *end, *next_prop; - int rc, length; + int length; struct property *newprop; buf = parse_node(buf, bufsize, &np); end = buf + bufsize; @@ -406,41 +380,7 @@ static int do_update_property(char *buf, size_t bufsize) if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size")) slb_set_size(*(int *)value); - upd_value.node = np; - upd_value.property = newprop; - pSeries_reconfig_notify(PSERIES_UPDATE_PROPERTY, &upd_value); - - rc = prom_update_property(np, newprop); - if (rc) - return rc; - - /* For memory under the ibm,dynamic-reconfiguration-memory node - * of the device tree, adding and removing memory is just an update - * to the ibm,dynamic-memory property instead of adding/removing a - * memory node in the device tree. For these cases we still need to - * involve the notifier chain. - */ - if (!strcmp(name, "ibm,dynamic-memory")) { - int action; - - next_prop = parse_next_property(next_prop, end, &name, - &length, &value); - if (!next_prop) - return -EINVAL; - - if (!strcmp(name, "add")) - action = PSERIES_DRCONF_MEM_ADD; - else - action = PSERIES_DRCONF_MEM_REMOVE; - - rc = pSeries_reconfig_notify(action, value); - if (rc) { - prom_update_property(np, newprop); - return rc; - } - } - - return 0; + return prom_update_property(np, newprop); } /** diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index e3cb7ae61658..e1a5b8a32d25 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -63,7 +64,6 @@ #include #include #include -#include #include "plpar_wrappers.h" #include "pseries.h" @@ -258,7 +258,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act int err = NOTIFY_OK; switch (action) { - case PSERIES_RECONFIG_ADD: + case OF_RECONFIG_ATTACH_NODE: pci = np->parent->data; if (pci) { update_dn_pci_info(np, pci->phb); @@ -389,7 +389,7 @@ static void __init pSeries_setup_arch(void) /* Find and initialize PCI host bridges */ init_pci_config_tokens(); find_and_init_phbs(); - pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); + of_reconfig_notifier_register(&pci_dn_reconfig_nb); pSeries_nvram_init(); diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 0ce625738677..6c4c000671c5 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -28,7 +28,6 @@ #include #include -#include #include #include "nx_csbcpb.h" /* struct nx_csbcpb */ @@ -1014,26 +1013,23 @@ error_out: * NOTIFY_BAD encoded with error number on failure, use * notifier_to_errno() to decode this value */ -static int nx842_OF_notifier(struct notifier_block *np, - unsigned long action, - void *update) +static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, + void *update) { - struct pSeries_reconfig_prop_update *upd; + struct of_prop_reconfig *upd = update; struct nx842_devdata *local_devdata; struct device_node *node = NULL; - upd = (struct pSeries_reconfig_prop_update *)update; - rcu_read_lock(); local_devdata = rcu_dereference(devdata); if (local_devdata) node = local_devdata->dev->of_node; if (local_devdata && - action == PSERIES_UPDATE_PROPERTY && - !strcmp(upd->node->name, node->name)) { + action == OF_RECONFIG_UPDATE_PROPERTY && + !strcmp(upd->dn->name, node->name)) { rcu_read_unlock(); - nx842_OF_upd(upd->property); + nx842_OF_upd(upd->prop); } else rcu_read_unlock(); @@ -1182,7 +1178,7 @@ static int __init nx842_probe(struct vio_dev *viodev, synchronize_rcu(); kfree(old_devdata); - pSeries_reconfig_notifier_register(&nx842_of_nb); + of_reconfig_notifier_register(&nx842_of_nb); ret = nx842_OF_upd(NULL); if (ret && ret != -ENODEV) { @@ -1228,7 +1224,7 @@ static int __exit nx842_remove(struct vio_dev *viodev) spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); - pSeries_reconfig_notifier_unregister(&nx842_of_nb); + of_reconfig_notifier_unregister(&nx842_of_nb); rcu_assign_pointer(devdata, NULL); spin_unlock_irqrestore(&devdata_mutex, flags); synchronize_rcu(); diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 638110efae9b..f7a8a16aa7d3 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include diff --git a/drivers/of/base.c b/drivers/of/base.c index bbd073f53c9f..87b63850e8dc 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1028,6 +1028,24 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name, } EXPORT_SYMBOL(of_parse_phandle_with_args); +#if defined(CONFIG_OF_DYNAMIC) +static int of_property_notify(int action, struct device_node *np, + struct property *prop) +{ + struct of_prop_reconfig pr; + + pr.dn = np; + pr.prop = prop; + return of_reconfig_notify(action, &pr); +} +#else +static int of_property_notify(int action, struct device_node *np, + struct property *prop) +{ + return 0; +} +#endif + /** * prom_add_property - Add a property to a node */ @@ -1035,6 +1053,11 @@ int prom_add_property(struct device_node *np, struct property *prop) { struct property **next; unsigned long flags; + int rc; + + rc = of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop); + if (rc) + return rc; prop->next = NULL; write_lock_irqsave(&devtree_lock, flags); @@ -1072,6 +1095,11 @@ int prom_remove_property(struct device_node *np, struct property *prop) struct property **next; unsigned long flags; int found = 0; + int rc; + + rc = of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop); + if (rc) + return rc; write_lock_irqsave(&devtree_lock, flags); next = &np->properties; @@ -1114,7 +1142,11 @@ int prom_update_property(struct device_node *np, { struct property **next, *oldprop; unsigned long flags; - int found = 0; + int rc, found = 0; + + rc = of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop); + if (rc) + return rc; if (!newprop->name) return -EINVAL; @@ -1160,6 +1192,26 @@ int prom_update_property(struct device_node *np, * device tree nodes. */ +static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain); + +int of_reconfig_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&of_reconfig_chain, nb); +} + +int of_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&of_reconfig_chain, nb); +} + +int of_reconfig_notify(unsigned long action, void *p) +{ + int rc; + + rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p); + return notifier_to_errno(rc); +} + #ifdef CONFIG_PROC_DEVICETREE static void of_add_proc_dt_entry(struct device_node *dn) { @@ -1179,9 +1231,14 @@ static void of_add_proc_dt_entry(struct device_node *dn) /** * of_attach_node - Plug a device node into the tree and global list. */ -void of_attach_node(struct device_node *np) +int of_attach_node(struct device_node *np) { unsigned long flags; + int rc; + + rc = of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np); + if (rc) + return rc; write_lock_irqsave(&devtree_lock, flags); np->sibling = np->parent->child; @@ -1191,6 +1248,7 @@ void of_attach_node(struct device_node *np) write_unlock_irqrestore(&devtree_lock, flags); of_add_proc_dt_entry(np); + return 0; } #ifdef CONFIG_PROC_DEVICETREE @@ -1220,23 +1278,28 @@ static void of_remove_proc_dt_entry(struct device_node *dn) * The caller must hold a reference to the node. The memory associated with * the node is not freed until its refcount goes to zero. */ -void of_detach_node(struct device_node *np) +int of_detach_node(struct device_node *np) { struct device_node *parent; unsigned long flags; + int rc = 0; + + rc = of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np); + if (rc) + return rc; write_lock_irqsave(&devtree_lock, flags); if (of_node_check_flag(np, OF_DETACHED)) { /* someone already detached it */ write_unlock_irqrestore(&devtree_lock, flags); - return; + return rc; } parent = np->parent; if (!parent) { write_unlock_irqrestore(&devtree_lock, flags); - return; + return rc; } if (allnodes == np) @@ -1265,6 +1328,7 @@ void of_detach_node(struct device_node *np) write_unlock_irqrestore(&devtree_lock, flags); of_remove_proc_dt_entry(np); + return rc; } #endif /* defined(CONFIG_OF_DYNAMIC) */ diff --git a/include/linux/of.h b/include/linux/of.h index 72843b72a2b2..fb5d87b66e3e 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -272,11 +273,24 @@ extern int prom_remove_property(struct device_node *np, struct property *prop); extern int prom_update_property(struct device_node *np, struct property *newprop); -#if defined(CONFIG_OF_DYNAMIC) /* For updating the device tree at runtime */ -extern void of_attach_node(struct device_node *); -extern void of_detach_node(struct device_node *); -#endif +#define OF_RECONFIG_ATTACH_NODE 0x0001 +#define OF_RECONFIG_DETACH_NODE 0x0002 +#define OF_RECONFIG_ADD_PROPERTY 0x0003 +#define OF_RECONFIG_REMOVE_PROPERTY 0x0004 +#define OF_RECONFIG_UPDATE_PROPERTY 0x0005 + +struct of_prop_reconfig { + struct device_node *dn; + struct property *prop; +}; + +extern int of_reconfig_notifier_register(struct notifier_block *); +extern int of_reconfig_notifier_unregister(struct notifier_block *); +extern int of_reconfig_notify(unsigned long, void *); + +extern int of_attach_node(struct device_node *); +extern int of_detach_node(struct device_node *); #define of_match_ptr(_ptr) (_ptr) -- cgit v1.2.3 From 79d1c712958f94372482ad74578b00f44e744c12 Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Tue, 2 Oct 2012 16:58:46 +0000 Subject: powerpc+of: Rename the drivers/of prom_* functions to of_* Rename the prom_*_property routines of the generic OF code to of_*_property. This brings them in line with the naming used by the rest of the OF code. Signed-off-by: Nathan Fontenot Acked-by: Geoff Levand Acked-by: Rob Herring Acked-by: Grant Likely Signed-off-by: Benjamin Herrenschmidt --- arch/arm/mach-mxs/mach-mxs.c | 2 +- arch/powerpc/kernel/machine_kexec.c | 14 +++++++------- arch/powerpc/kernel/machine_kexec_64.c | 8 ++++---- arch/powerpc/kernel/pci_32.c | 2 +- arch/powerpc/platforms/85xx/p1022_ds.c | 6 +++--- arch/powerpc/platforms/ps3/os-area.c | 6 +++--- arch/powerpc/platforms/pseries/iommu.c | 4 ++-- arch/powerpc/platforms/pseries/mobility.c | 4 ++-- arch/powerpc/platforms/pseries/reconfig.c | 6 +++--- drivers/macintosh/smu.c | 2 +- drivers/of/base.c | 15 +++++++-------- include/linux/of.h | 7 +++---- 12 files changed, 37 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 4748ec551a68..d61b915ce52c 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -211,7 +211,7 @@ static void __init update_fec_mac_prop(enum mac_oui oui) macaddr[4] = (val >> 8) & 0xff; macaddr[5] = (val >> 0) & 0xff; - prom_update_property(np, newmac); + of_update_property(np, newmac); } } diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index fa9f6c72f557..e1ec57e87b3b 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -218,23 +218,23 @@ static void __init export_crashk_values(struct device_node *node) * be sure what's in them, so remove them. */ prop = of_find_property(node, "linux,crashkernel-base", NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); prop = of_find_property(node, "linux,crashkernel-size", NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); if (crashk_res.start != 0) { - prom_add_property(node, &crashk_base_prop); + of_add_property(node, &crashk_base_prop); crashk_size = resource_size(&crashk_res); - prom_add_property(node, &crashk_size_prop); + of_add_property(node, &crashk_size_prop); } /* * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ - prom_update_property(node, &memory_limit_prop); + of_update_property(node, &memory_limit_prop); } static int __init kexec_setup(void) @@ -249,11 +249,11 @@ static int __init kexec_setup(void) /* remove any stale properties so ours can be found */ prop = of_find_property(node, kernel_end_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ kernel_end = __pa(_end); - prom_add_property(node, &kernel_end_prop); + of_add_property(node, &kernel_end_prop); export_crashk_values(node); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index d7f609086a99..7206701b1ff1 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -389,14 +389,14 @@ static int __init export_htab_values(void) /* remove any stale propertys so ours can be found */ prop = of_find_property(node, htab_base_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); prop = of_find_property(node, htab_size_prop.name, NULL); if (prop) - prom_remove_property(node, prop); + of_remove_property(node, prop); htab_base = __pa(htab_address); - prom_add_property(node, &htab_base_prop); - prom_add_property(node, &htab_size_prop); + of_add_property(node, &htab_base_prop); + of_add_property(node, &htab_size_prop); of_node_put(node); return 0; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 4b06ec5a502e..64f526a321f5 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -208,7 +208,7 @@ pci_create_OF_bus_map(void) of_prop->name = "pci-OF-bus-map"; of_prop->length = 256; of_prop->value = &of_prop[1]; - prom_add_property(dn, of_prop); + of_add_property(dn, of_prop); of_node_put(dn); } } diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 848a3e98e1c1..8fb12570b2f5 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -539,7 +539,7 @@ static void __init p1022_ds_setup_arch(void) }; /* - * prom_update_property() is called before + * of_update_property() is called before * kmalloc() is available, so the 'new' object * should be allocated in the global area. * The easiest way is to do that is to @@ -548,7 +548,7 @@ static void __init p1022_ds_setup_arch(void) */ pr_info("p1022ds: disabling %s node", np2->full_name); - prom_update_property(np2, &nor_status); + of_update_property(np2, &nor_status); of_node_put(np2); } @@ -564,7 +564,7 @@ static void __init p1022_ds_setup_arch(void) pr_info("p1022ds: disabling %s node", np2->full_name); - prom_update_property(np2, &nand_status); + of_update_property(np2, &nand_status); of_node_put(np2); } diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index 56d26bc4fd41..09787139834d 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -280,13 +280,13 @@ static void os_area_set_property(struct device_node *node, if (tmp) { pr_debug("%s:%d found %s\n", __func__, __LINE__, prop->name); - prom_remove_property(node, tmp); + of_remove_property(node, tmp); } - result = prom_add_property(node, prop); + result = of_add_property(node, prop); if (result) - pr_debug("%s:%d prom_set_property failed\n", __func__, + pr_debug("%s:%d of_set_property failed\n", __func__, __LINE__); } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index da5594c441e4..e2685badb5db 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -760,7 +760,7 @@ static void remove_ddw(struct device_node *np) __remove_ddw(np, ddw_avail, liobn); delprop: - ret = prom_remove_property(np, win64); + ret = of_remove_property(np, win64); if (ret) pr_warning("%s: failed to remove direct window property: %d\n", np->full_name, ret); @@ -1070,7 +1070,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) goto out_free_window; } - ret = prom_add_property(pdn, win64); + ret = of_add_property(pdn, win64); if (ret) { dev_err(&dev->dev, "unable to add dma window property for %s: %d", pdn->full_name, ret); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index dd30b12edfe4..6573808cc5f3 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -116,7 +116,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop, } if (!more) { - prom_update_property(dn, new_prop); + of_update_property(dn, new_prop); new_prop = NULL; } @@ -172,7 +172,7 @@ static int update_dt_node(u32 phandle) case 0x80000000: prop = of_find_property(dn, prop_name, NULL); - prom_remove_property(dn, prop); + of_remove_property(dn, prop); prop = NULL; break; diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 720a0cc2e69f..30b358dc2beb 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -326,7 +326,7 @@ static int do_add_property(char *buf, size_t bufsize) if (!prop) return -ENOMEM; - prom_add_property(np, prop); + of_add_property(np, prop); return 0; } @@ -350,7 +350,7 @@ static int do_remove_property(char *buf, size_t bufsize) prop = of_find_property(np, buf, NULL); - return prom_remove_property(np, prop); + return of_remove_property(np, prop); } static int do_update_property(char *buf, size_t bufsize) @@ -380,7 +380,7 @@ static int do_update_property(char *buf, size_t bufsize) if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size")) slb_set_size(*(int *)value); - return prom_update_property(np, newprop); + return of_update_property(np, newprop); } /** diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 7d5a6b40b31c..5b939509db3b 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -997,7 +997,7 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id) "%02x !\n", id, hdr->id); goto failure; } - if (prom_add_property(smu->of_node, prop)) { + if (of_add_property(smu->of_node, prop)) { printk(KERN_DEBUG "SMU: Failed creating sdb-partition-%02x " "property !\n", id); goto failure; diff --git a/drivers/of/base.c b/drivers/of/base.c index 87b63850e8dc..02d94c4ea83c 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1047,9 +1047,9 @@ static int of_property_notify(int action, struct device_node *np, #endif /** - * prom_add_property - Add a property to a node + * of_add_property - Add a property to a node */ -int prom_add_property(struct device_node *np, struct property *prop) +int of_add_property(struct device_node *np, struct property *prop) { struct property **next; unsigned long flags; @@ -1083,14 +1083,14 @@ int prom_add_property(struct device_node *np, struct property *prop) } /** - * prom_remove_property - Remove a property from a node. + * of_remove_property - Remove a property from a node. * * Note that we don't actually remove it, since we have given out * who-knows-how-many pointers to the data using get-property. * Instead we just move the property to the "dead properties" * list, so it won't be found any more. */ -int prom_remove_property(struct device_node *np, struct property *prop) +int of_remove_property(struct device_node *np, struct property *prop) { struct property **next; unsigned long flags; @@ -1129,7 +1129,7 @@ int prom_remove_property(struct device_node *np, struct property *prop) } /* - * prom_update_property - Update a property in a node, if the property does + * of_update_property - Update a property in a node, if the property does * not exist, add it. * * Note that we don't actually remove it, since we have given out @@ -1137,8 +1137,7 @@ int prom_remove_property(struct device_node *np, struct property *prop) * Instead we just move the property to the "dead properties" list, * and add the new property to the property list */ -int prom_update_property(struct device_node *np, - struct property *newprop) +int of_update_property(struct device_node *np, struct property *newprop) { struct property **next, *oldprop; unsigned long flags; @@ -1153,7 +1152,7 @@ int prom_update_property(struct device_node *np, oldprop = of_find_property(np, newprop->name, NULL); if (!oldprop) - return prom_add_property(np, newprop); + return of_add_property(np, newprop); write_lock_irqsave(&devtree_lock, flags); next = &np->properties; diff --git a/include/linux/of.h b/include/linux/of.h index fb5d87b66e3e..a093b2fe5dfb 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -268,10 +268,9 @@ extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_machine_is_compatible(const char *compat); -extern int prom_add_property(struct device_node* np, struct property* prop); -extern int prom_remove_property(struct device_node *np, struct property *prop); -extern int prom_update_property(struct device_node *np, - struct property *newprop); +extern int of_add_property(struct device_node *np, struct property *prop); +extern int of_remove_property(struct device_node *np, struct property *prop); +extern int of_update_property(struct device_node *np, struct property *newprop); /* For updating the device tree at runtime */ #define OF_RECONFIG_ATTACH_NODE 0x0001 -- cgit v1.2.3 From 621eb19ce1ec216e03ad354cb0c4061736b2a436 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 14 Nov 2012 10:48:05 -0500 Subject: svcrpc: Revert "sunrpc/cache.h: replace simple_strtoul" Commit bbf43dc888833ac0539e437dbaeb28bfd4fbab9f "sunrpc/cache.h: replace simple_strtoul" introduced new range-checking which could cause get_int to fail on unsigned integers too large to be represented as an int. We could parse them as unsigned instead--but it turns out svcgssd is actually passing down "-1" in some cases. Which is perhaps stupid, but there's nothing we can do about it now. So just revert back to the previous "sloppy" behavior that accepts either representation. Cc: stable@vger.kernel.org Reported-by: Sven Geggus Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/cache.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index f792794f6634..5dc9ee4d616e 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -217,6 +217,8 @@ extern int qword_get(char **bpp, char *dest, int bufsize); static inline int get_int(char **bpp, int *anint) { char buf[50]; + char *ep; + int rv; int len = qword_get(bpp, buf, sizeof(buf)); if (len < 0) @@ -224,9 +226,11 @@ static inline int get_int(char **bpp, int *anint) if (len == 0) return -ENOENT; - if (kstrtoint(buf, 0, anint)) + rv = simple_strtol(buf, &ep, 0); + if (*ep) return -EINVAL; + *anint = rv; return 0; } -- cgit v1.2.3 From fc05d5a30dc19dd4c6d161e551719a8c597c7890 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 4 Oct 2012 09:28:49 +0200 Subject: mtd: delete nomadik_nand driver The nomadik_nand driver is really just a subset of the FSMC NAND driver, and there are no users anymore so let's delete it. Signed-off-by: Linus Walleij Acked-by: Jean-Christophe PLAGNIOL-VILLARD Signed-off-by: Artem Bityutskiy --- drivers/mtd/nand/Kconfig | 6 - drivers/mtd/nand/Makefile | 1 - drivers/mtd/nand/nomadik_nand.c | 235 ------------------------- include/linux/platform_data/mtd-nomadik-nand.h | 16 -- 4 files changed, 258 deletions(-) delete mode 100644 drivers/mtd/nand/nomadik_nand.c delete mode 100644 include/linux/platform_data/mtd-nomadik-nand.h (limited to 'include/linux') diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index ee803d611e4e..a803d9ba55bd 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -526,12 +526,6 @@ config MTD_NAND_MXC This enables the driver for the NAND flash controller on the MXC processors. -config MTD_NAND_NOMADIK - tristate "ST Nomadik 8815 NAND support" - depends on ARCH_NOMADIK - help - Driver for the NAND flash controller on the Nomadik, with ECC. - config MTD_NAND_SH_FLCTL tristate "Support for NAND on Renesas SuperH FLCTL" depends on SUPERH || ARCH_SHMOBILE diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 38358c90771e..44fca0553365 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -49,7 +49,6 @@ obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o -obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o obj-$(CONFIG_MTD_NAND_RICOH) += r852.o obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c deleted file mode 100644 index 9ee0c4edfacf..000000000000 --- a/drivers/mtd/nand/nomadik_nand.c +++ /dev/null @@ -1,235 +0,0 @@ -/* - * drivers/mtd/nand/nomadik_nand.c - * - * Overview: - * Driver for on-board NAND flash on Nomadik Platforms - * - * Copyright © 2007 STMicroelectronics Pvt. Ltd. - * Author: Sachin Verma - * - * Copyright © 2009 Alessandro Rubini - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -struct nomadik_nand_host { - struct mtd_info mtd; - struct nand_chip nand; - void __iomem *data_va; - void __iomem *cmd_va; - void __iomem *addr_va; - struct nand_bbt_descr *bbt_desc; -}; - -static struct nand_ecclayout nomadik_ecc_layout = { - .eccbytes = 3 * 4, - .eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */ - 0x02, 0x03, 0x04, - 0x12, 0x13, 0x14, - 0x22, 0x23, 0x24, - 0x32, 0x33, 0x34}, - /* let's keep bytes 5,6,7 for us, just in case we change ECC algo */ - .oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} }, -}; - -static void nomadik_ecc_control(struct mtd_info *mtd, int mode) -{ - /* No need to enable hw ecc, it's on by default */ -} - -static void nomadik_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) -{ - struct nand_chip *nand = mtd->priv; - struct nomadik_nand_host *host = nand->priv; - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_CLE) - writeb(cmd, host->cmd_va); - else - writeb(cmd, host->addr_va); -} - -static int nomadik_nand_probe(struct platform_device *pdev) -{ - struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data; - struct nomadik_nand_host *host; - struct mtd_info *mtd; - struct nand_chip *nand; - struct resource *res; - int ret = 0; - - /* Allocate memory for the device structure (and zero it) */ - host = kzalloc(sizeof(struct nomadik_nand_host), GFP_KERNEL); - if (!host) { - dev_err(&pdev->dev, "Failed to allocate device structure.\n"); - return -ENOMEM; - } - - /* Call the client's init function, if any */ - if (pdata->init) - ret = pdata->init(); - if (ret < 0) { - dev_err(&pdev->dev, "Init function failed\n"); - goto err; - } - - /* ioremap three regions */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr"); - if (!res) { - ret = -EIO; - goto err_unmap; - } - host->addr_va = ioremap(res->start, resource_size(res)); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); - if (!res) { - ret = -EIO; - goto err_unmap; - } - host->data_va = ioremap(res->start, resource_size(res)); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); - if (!res) { - ret = -EIO; - goto err_unmap; - } - host->cmd_va = ioremap(res->start, resource_size(res)); - - if (!host->addr_va || !host->data_va || !host->cmd_va) { - ret = -ENOMEM; - goto err_unmap; - } - - /* Link all private pointers */ - mtd = &host->mtd; - nand = &host->nand; - mtd->priv = nand; - nand->priv = host; - - host->mtd.owner = THIS_MODULE; - nand->IO_ADDR_R = host->data_va; - nand->IO_ADDR_W = host->data_va; - nand->cmd_ctrl = nomadik_cmd_ctrl; - - /* - * This stanza declares ECC_HW but uses soft routines. It's because - * HW claims to make the calculation but not the correction. However, - * I haven't managed to get the desired data out of it until now. - */ - nand->ecc.mode = NAND_ECC_SOFT; - nand->ecc.layout = &nomadik_ecc_layout; - nand->ecc.hwctl = nomadik_ecc_control; - nand->ecc.size = 512; - nand->ecc.bytes = 3; - - nand->options = pdata->options; - - /* - * Scan to find existence of the device - */ - if (nand_scan(&host->mtd, 1)) { - ret = -ENXIO; - goto err_unmap; - } - - mtd_device_register(&host->mtd, pdata->parts, pdata->nparts); - - platform_set_drvdata(pdev, host); - return 0; - - err_unmap: - if (host->cmd_va) - iounmap(host->cmd_va); - if (host->data_va) - iounmap(host->data_va); - if (host->addr_va) - iounmap(host->addr_va); - err: - kfree(host); - return ret; -} - -/* - * Clean up routine - */ -static int nomadik_nand_remove(struct platform_device *pdev) -{ - struct nomadik_nand_host *host = platform_get_drvdata(pdev); - struct nomadik_nand_platform_data *pdata = pdev->dev.platform_data; - - if (pdata->exit) - pdata->exit(); - - if (host) { - nand_release(&host->mtd); - iounmap(host->cmd_va); - iounmap(host->data_va); - iounmap(host->addr_va); - kfree(host); - } - return 0; -} - -static int nomadik_nand_suspend(struct device *dev) -{ - struct nomadik_nand_host *host = dev_get_drvdata(dev); - int ret = 0; - if (host) - ret = mtd_suspend(&host->mtd); - return ret; -} - -static int nomadik_nand_resume(struct device *dev) -{ - struct nomadik_nand_host *host = dev_get_drvdata(dev); - if (host) - mtd_resume(&host->mtd); - return 0; -} - -static const struct dev_pm_ops nomadik_nand_pm_ops = { - .suspend = nomadik_nand_suspend, - .resume = nomadik_nand_resume, -}; - -static struct platform_driver nomadik_nand_driver = { - .probe = nomadik_nand_probe, - .remove = nomadik_nand_remove, - .driver = { - .owner = THIS_MODULE, - .name = "nomadik_nand", - .pm = &nomadik_nand_pm_ops, - }, -}; - -module_platform_driver(nomadik_nand_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)"); -MODULE_DESCRIPTION("NAND driver for Nomadik Platform"); diff --git a/include/linux/platform_data/mtd-nomadik-nand.h b/include/linux/platform_data/mtd-nomadik-nand.h deleted file mode 100644 index c3c8254c22a5..000000000000 --- a/include/linux/platform_data/mtd-nomadik-nand.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef __ASM_ARCH_NAND_H -#define __ASM_ARCH_NAND_H - -struct nomadik_nand_platform_data { - struct mtd_partition *parts; - int nparts; - int options; - int (*init) (void); - int (*exit) (void); -}; - -#define NAND_IO_DATA 0x40000000 -#define NAND_IO_CMD 0x40800000 -#define NAND_IO_ADDR 0x41000000 - -#endif /* __ASM_ARCH_NAND_H */ -- cgit v1.2.3 From 6d7b42a447f92eb3e7e410bbf62042693eb040f7 Mon Sep 17 00:00:00 2001 From: Jean-Christophe PLAGNIOL-VILLARD Date: Thu, 4 Oct 2012 15:14:16 +0200 Subject: mtd: fsmc_nand: pass the ale and cmd resource via resource Do not use the platform_data to pass resource and be smart in the drivers. Just pass it via resource Switch to devm_request_and_ioremap at the sametime Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD Acked-by: Linus Walleij Reviewed-By: Vipin Kumar Signed-off-by: Artem Bityutskiy --- .../devicetree/bindings/mtd/fsmc-nand.txt | 12 +++--- arch/arm/boot/dts/spear13xx.dtsi | 10 ++--- arch/arm/boot/dts/spear300.dtsi | 8 ++-- arch/arm/boot/dts/spear310.dtsi | 8 ++-- arch/arm/boot/dts/spear320.dtsi | 8 ++-- arch/arm/boot/dts/spear600.dtsi | 8 ++-- arch/arm/mach-u300/core.c | 14 ++++++- drivers/mtd/nand/fsmc_nand.c | 44 ++++++---------------- include/linux/mtd/fsmc.h | 3 -- 9 files changed, 49 insertions(+), 66 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt index e2c663b354d2..e3ea32e7de3e 100644 --- a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt +++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt @@ -3,9 +3,7 @@ Required properties: - compatible : "st,spear600-fsmc-nand" - reg : Address range of the mtd chip -- reg-names: Should contain the reg names "fsmc_regs" and "nand_data" -- st,ale-off : Chip specific offset to ALE -- st,cle-off : Chip specific offset to CLE +- reg-names: Should contain the reg names "fsmc_regs", "nand_data", "nand_addr" and "nand_cmd" Optional properties: - bank-width : Width (in bytes) of the device. If not present, the width @@ -19,10 +17,10 @@ Example: #address-cells = <1>; #size-cells = <1>; reg = <0xd1800000 0x1000 /* FSMC Register */ - 0xd2000000 0x4000>; /* NAND Base */ - reg-names = "fsmc_regs", "nand_data"; - st,ale-off = <0x20000>; - st,cle-off = <0x10000>; + 0xd2000000 0x0010 /* NAND Base DATA */ + 0xd2020000 0x0010 /* NAND Base ADDR */ + 0xd2010000 0x0010>; /* NAND Base CMD */ + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; bank-width = <1>; nand-skip-bbtscan; diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index f7b84aced654..14a6d15c2a81 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -104,15 +104,15 @@ compatible = "st,spear600-fsmc-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0xb0000000 0x1000 /* FSMC Register */ - 0xb0800000 0x0010>; /* NAND Base */ - reg-names = "fsmc_regs", "nand_data"; + reg = <0xb0000000 0x1000 /* FSMC Register*/ + 0xb0800000 0x0010 /* NAND Base DATA */ + 0xb0820000 0x0010 /* NAND Base ADDR */ + 0xb0810000 0x0010>; /* NAND Base CMD */ + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; interrupts = <0 20 0x4 0 21 0x4 0 22 0x4 0 23 0x4>; - st,ale-off = <0x20000>; - st,cle-off = <0x10000>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi index ed3627c116cc..bc436387d7f9 100644 --- a/arch/arm/boot/dts/spear300.dtsi +++ b/arch/arm/boot/dts/spear300.dtsi @@ -38,10 +38,10 @@ #address-cells = <1>; #size-cells = <1>; reg = <0x94000000 0x1000 /* FSMC Register */ - 0x80000000 0x0010>; /* NAND Base */ - reg-names = "fsmc_regs", "nand_data"; - st,ale-off = <0x20000>; - st,cle-off = <0x10000>; + 0x80000000 0x0010 /* NAND Base DATA */ + 0x80020000 0x0010 /* NAND Base ADDR */ + 0x80010000 0x0010>; /* NAND Base CMD */ + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi index 62fc4fb3e5f9..7840e529aba2 100644 --- a/arch/arm/boot/dts/spear310.dtsi +++ b/arch/arm/boot/dts/spear310.dtsi @@ -32,10 +32,10 @@ #address-cells = <1>; #size-cells = <1>; reg = <0x44000000 0x1000 /* FSMC Register */ - 0x40000000 0x0010>; /* NAND Base */ - reg-names = "fsmc_regs", "nand_data"; - st,ale-off = <0x10000>; - st,cle-off = <0x20000>; + 0x40000000 0x0010 /* NAND Base DATA */ + 0x40020000 0x0010 /* NAND Base ADDR */ + 0x40010000 0x0010>; /* NAND Base CMD */ + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi index 1f49d69595a0..5ad820641ac0 100644 --- a/arch/arm/boot/dts/spear320.dtsi +++ b/arch/arm/boot/dts/spear320.dtsi @@ -38,10 +38,10 @@ #address-cells = <1>; #size-cells = <1>; reg = <0x4c000000 0x1000 /* FSMC Register */ - 0x50000000 0x0010>; /* NAND Base */ - reg-names = "fsmc_regs", "nand_data"; - st,ale-off = <0x20000>; - st,cle-off = <0x10000>; + 0x50000000 0x0010 /* NAND Base DATA */ + 0x50020000 0x0010 /* NAND Base ADDR */ + 0x50010000 0x0010>; /* NAND Base CMD */ + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index a3c36e47d7ef..4ecc66f5ac88 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -67,10 +67,10 @@ #address-cells = <1>; #size-cells = <1>; reg = <0xd1800000 0x1000 /* FSMC Register */ - 0xd2000000 0x4000>; /* NAND Base */ - reg-names = "fsmc_regs", "nand_data"; - st,ale-off = <0x20000>; - st,cle-off = <0x10000>; + 0xd2000000 0x0010 /* NAND Base DATA */ + 0xd2020000 0x0010 /* NAND Base ADDR */ + 0xd2010000 0x0010>; /* NAND Base CMD */ + reg-names = "fsmc_regs", "nand_data", "nand_addr", "nand_cmd"; status = "disabled"; }; diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index b8efac4daed8..f642a1552346 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -251,6 +251,18 @@ static struct resource rtc_resources[] = { * but these are not yet used by the driver. */ static struct resource fsmc_resources[] = { + { + .name = "nand_addr", + .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE, + .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_ALE + SZ_16K - 1, + .flags = IORESOURCE_MEM, + }, + { + .name = "nand_cmd", + .start = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE, + .end = U300_NAND_CS0_PHYS_BASE + PLAT_NAND_CLE + SZ_16K - 1, + .flags = IORESOURCE_MEM, + }, { .name = "nand_data", .start = U300_NAND_CS0_PHYS_BASE, @@ -1496,8 +1508,6 @@ static struct fsmc_nand_platform_data nand_platform_data = { .nr_partitions = ARRAY_SIZE(u300_partitions), .options = NAND_SKIP_BBTSCAN, .width = FSMC_NAND_BW8, - .ale_off = PLAT_NAND_ALE, - .cle_off = PLAT_NAND_CLE, }; static struct platform_device nand_device = { diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index 38d26240d8b1..cb8645087151 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -876,8 +876,6 @@ static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev, return -EINVAL; } } - of_property_read_u32(np, "st,ale-off", &pdata->ale_off); - of_property_read_u32(np, "st,cle-off", &pdata->cle_off); if (of_get_property(np, "nand-skip-bbtscan", NULL)) pdata->options = NAND_SKIP_BBTSCAN; @@ -935,41 +933,28 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) if (!res) return -EINVAL; - if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), - pdev->name)) { - dev_err(&pdev->dev, "Failed to get memory data resourse\n"); - return -ENOENT; - } - - host->data_pa = (dma_addr_t)res->start; - host->data_va = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); + host->data_va = devm_request_and_ioremap(&pdev->dev, res); if (!host->data_va) { dev_err(&pdev->dev, "data ioremap failed\n"); return -ENOMEM; } + host->data_pa = (dma_addr_t)res->start; - if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off, - resource_size(res), pdev->name)) { - dev_err(&pdev->dev, "Failed to get memory ale resourse\n"); - return -ENOENT; - } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr"); + if (!res) + return -EINVAL; - host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off, - resource_size(res)); + host->addr_va = devm_request_and_ioremap(&pdev->dev, res); if (!host->addr_va) { dev_err(&pdev->dev, "ale ioremap failed\n"); return -ENOMEM; } - if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off, - resource_size(res), pdev->name)) { - dev_err(&pdev->dev, "Failed to get memory cle resourse\n"); - return -ENOENT; - } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); + if (!res) + return -EINVAL; - host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off, - resource_size(res)); + host->cmd_va = devm_request_and_ioremap(&pdev->dev, res); if (!host->cmd_va) { dev_err(&pdev->dev, "ale ioremap failed\n"); return -ENOMEM; @@ -979,14 +964,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) if (!res) return -EINVAL; - if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), - pdev->name)) { - dev_err(&pdev->dev, "Failed to get memory regs resourse\n"); - return -ENOENT; - } - - host->regs_va = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); + host->regs_va = devm_request_and_ioremap(&pdev->dev, res); if (!host->regs_va) { dev_err(&pdev->dev, "regs ioremap failed\n"); return -ENOMEM; diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h index b20029221fb1..d6ed61ef451d 100644 --- a/include/linux/mtd/fsmc.h +++ b/include/linux/mtd/fsmc.h @@ -155,9 +155,6 @@ struct fsmc_nand_platform_data { unsigned int width; unsigned int bank; - /* CLE, ALE offsets */ - unsigned int cle_off; - unsigned int ale_off; enum access_mode mode; void (*select_bank)(uint32_t bank, uint32_t busw); -- cgit v1.2.3 From 2f25ae97fe4b424d88d765797c46456c7c0f1bae Mon Sep 17 00:00:00 2001 From: Vipin Kumar Date: Tue, 9 Oct 2012 16:14:53 +0530 Subject: mtd: nand: Increase the ecc placement locations to 640 Few devices like H27UBG8T2CTR have a writesize/oobsize of 8KB/640B. This means that the maximum oobsize has gone up to 640 bytes and consequently the maximum ecc placement locations have also gone up to 640. Signed-off-by: Vipin Kumar Signed-off-by: Artem Bityutskiy --- include/linux/mtd/mtd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 81d61e704599..f9ac2897b86b 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -98,7 +98,7 @@ struct mtd_oob_ops { }; #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 -#define MTD_MAX_ECCPOS_ENTRIES_LARGE 448 +#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 /* * Internal ECC layout control structure. For historical reasons, there is a * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained -- cgit v1.2.3 From 5de0b52ea8f8f5149502867acff2efb5efaf1fc2 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Sat, 13 Oct 2012 13:03:29 -0400 Subject: mtd: gpmi: remove unneccessary header The whole gpmi-nand driver has turned to pure devicetree supported. So the linux/mtd/gpmi-nand.h is not neccessary now. Just remove it, and move some macros to the gpmi-nand driver itself. Signed-off-by: Huang Shijie Signed-off-by: Artem Bityutskiy --- drivers/mtd/nand/gpmi-nand/gpmi-lib.c | 1 - drivers/mtd/nand/gpmi-nand/gpmi-nand.c | 7 +++- drivers/mtd/nand/gpmi-nand/gpmi-nand.h | 1 - include/linux/mtd/gpmi-nand.h | 68 ---------------------------------- 4 files changed, 6 insertions(+), 71 deletions(-) delete mode 100644 include/linux/mtd/gpmi-nand.h (limited to 'include/linux') diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 3502accd4bc3..1585c5b1c8bf 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c @@ -18,7 +18,6 @@ * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ -#include #include #include diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index e2c56fc4574b..d37619882fa6 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -33,6 +32,12 @@ #include #include "gpmi-nand.h" +/* Resource names for the GPMI NAND driver. */ +#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" +#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" +#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" +#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma" + /* add our owner bbt descriptor */ static uint8_t scan_ff_pattern[] = { 0xff }; static struct nand_bbt_descr gpmi_bbt_descr = { diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h index 7ac25c1e58f9..3d93a5e39090 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h @@ -130,7 +130,6 @@ struct gpmi_nand_data { /* System Interface */ struct device *dev; struct platform_device *pdev; - struct gpmi_nand_platform_data *pdata; /* Resources */ struct resources resources; diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h deleted file mode 100644 index ed3c4e09f3d1..000000000000 --- a/include/linux/mtd/gpmi-nand.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -#ifndef __MACH_MXS_GPMI_NAND_H__ -#define __MACH_MXS_GPMI_NAND_H__ - -/* The size of the resources is fixed. */ -#define GPMI_NAND_RES_SIZE 6 - -/* Resource names for the GPMI NAND driver. */ -#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" -#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt" -#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" -#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" -#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels" -#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma" - -/** - * struct gpmi_nand_platform_data - GPMI NAND driver platform data. - * - * This structure communicates platform-specific information to the GPMI NAND - * driver that can't be expressed as resources. - * - * @platform_init: A pointer to a function the driver will call to - * initialize the platform (e.g., set up the pin mux). - * @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and - * from the NAND Flash device, in nanoseconds. - * @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and - * from the NAND Flash device, in nanoseconds. - * @max_chip_count: The maximum number of chips for which the driver - * should configure the hardware. This value most - * likely reflects the number of pins that are - * connected to a NAND Flash device. If this is - * greater than the SoC hardware can support, the - * driver will print a message and fail to initialize. - * @partitions: An optional pointer to an array of partition - * descriptions. - * @partition_count: The number of elements in the partitions array. - */ -struct gpmi_nand_platform_data { - /* SoC hardware information. */ - int (*platform_init)(void); - - /* NAND Flash information. */ - unsigned int min_prop_delay_in_ns; - unsigned int max_prop_delay_in_ns; - unsigned int max_chip_count; - - /* Medium information. */ - struct mtd_partition *partitions; - unsigned partition_count; -}; -#endif -- cgit v1.2.3 From e8a9d8f31c592eea89f1b0d3fd425e7a96944e88 Mon Sep 17 00:00:00 2001 From: Bastian Hecht Date: Fri, 19 Oct 2012 12:15:34 +0200 Subject: mtd: sh_flctl: Minor cleanups Some small fixes to avoid sparse and smatch complain. Other cosmetic fixes as well. - Change of the type of the member index in struct sh_flctl from signed to unsigned. We use index by addressing array members, so unsigned is more concise here. Adapt functions relying on sh_flctl::index. - Remove a blurring cast in write_fiforeg(). - Apply consistent naming scheme when refering to the data buffer. - Shorten some unnecessarily verbose functions. - Remove spaces at start of lines. Signed-off-by: Bastian Hecht Signed-off-by: Artem Bityutskiy --- drivers/mtd/nand/sh_flctl.c | 37 ++++++++++++++++--------------------- include/linux/mtd/sh_flctl.h | 2 +- 2 files changed, 17 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 4fbfe96e37a1..78d18c0f132f 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -225,7 +225,7 @@ static enum flctl_ecc_res_t wait_recfifo_ready for (i = 0; i < 3; i++) { uint8_t org; - int index; + unsigned int index; data = readl(ecc_reg[i]); @@ -305,28 +305,29 @@ static enum flctl_ecc_res_t read_ecfiforeg return res; } -static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) +static void write_fiforeg(struct sh_flctl *flctl, int rlen, + unsigned int offset) { int i, len_4align; - unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; - void *fifo_addr = (void *)FLDTFIFO(flctl); + unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; len_4align = (rlen + 3) / 4; for (i = 0; i < len_4align; i++) { wait_wfifo_ready(flctl); - writel(cpu_to_be32(data[i]), fifo_addr); + writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl)); } } -static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, int offset) +static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, + unsigned int offset) { int i, len_4align; - unsigned long *data = (unsigned long *)&flctl->done_buff[offset]; + unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; len_4align = (rlen + 3) / 4; for (i = 0; i < len_4align; i++) { wait_wecfifo_ready(flctl); - writel(cpu_to_be32(data[i]), FLECFIFO(flctl)); + writel(cpu_to_be32(buf[i]), FLECFIFO(flctl)); } } @@ -748,41 +749,35 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr) static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct sh_flctl *flctl = mtd_to_flctl(mtd); - int index = flctl->index; - memcpy(&flctl->done_buff[index], buf, len); + memcpy(&flctl->done_buff[flctl->index], buf, len); flctl->index += len; } static uint8_t flctl_read_byte(struct mtd_info *mtd) { struct sh_flctl *flctl = mtd_to_flctl(mtd); - int index = flctl->index; uint8_t data; - data = flctl->done_buff[index]; + data = flctl->done_buff[flctl->index]; flctl->index++; return data; } static uint16_t flctl_read_word(struct mtd_info *mtd) { - struct sh_flctl *flctl = mtd_to_flctl(mtd); - int index = flctl->index; - uint16_t data; - uint16_t *buf = (uint16_t *)&flctl->done_buff[index]; + struct sh_flctl *flctl = mtd_to_flctl(mtd); + uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index]; - data = *buf; - flctl->index += 2; - return data; + flctl->index += 2; + return *buf; } static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct sh_flctl *flctl = mtd_to_flctl(mtd); - int index = flctl->index; - memcpy(buf, &flctl->done_buff[index], len); + memcpy(buf, &flctl->done_buff[flctl->index], len); flctl->index += len; } diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 01e4b15b280e..481557688d05 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -147,7 +147,7 @@ struct sh_flctl { uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ int read_bytes; - int index; + unsigned int index; int seqin_column; /* column in SEQIN cmd */ int seqin_page_addr; /* page_addr in SEQIN cmd */ uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */ -- cgit v1.2.3 From 83738d87e3a0a4096e1419a65b8228130d183df6 Mon Sep 17 00:00:00 2001 From: Bastian Hecht Date: Fri, 19 Oct 2012 12:15:35 +0200 Subject: mtd: sh_flctl: Add DMA capabilty The code probes if DMA channels can get allocated and tears them down at removal/failure if needed. If available it uses them to transfer the data part (not ECC). On failure we fall back to PIO mode. Based on Guennadi Liakhovetski's code from the sh_mmcif driver. Signed-off-by: Bastian Hecht Reviewed-by: Guennadi Liakhovetski Signed-off-by: Artem Bityutskiy --- drivers/mtd/nand/sh_flctl.c | 173 ++++++++++++++++++++++++++++++++++++++++++- include/linux/mtd/sh_flctl.h | 12 +++ 2 files changed, 183 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 78d18c0f132f..6dc0369aa44b 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -23,11 +23,15 @@ #include #include +#include #include +#include +#include #include #include #include #include +#include #include #include @@ -106,6 +110,84 @@ static void wait_completion(struct sh_flctl *flctl) writeb(0x0, FLTRCR(flctl)); } +static void flctl_dma_complete(void *param) +{ + struct sh_flctl *flctl = param; + + complete(&flctl->dma_complete); +} + +static void flctl_release_dma(struct sh_flctl *flctl) +{ + if (flctl->chan_fifo0_rx) { + dma_release_channel(flctl->chan_fifo0_rx); + flctl->chan_fifo0_rx = NULL; + } + if (flctl->chan_fifo0_tx) { + dma_release_channel(flctl->chan_fifo0_tx); + flctl->chan_fifo0_tx = NULL; + } +} + +static void flctl_setup_dma(struct sh_flctl *flctl) +{ + dma_cap_mask_t mask; + struct dma_slave_config cfg; + struct platform_device *pdev = flctl->pdev; + struct sh_flctl_platform_data *pdata = pdev->dev.platform_data; + int ret; + + if (!pdata) + return; + + if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0) + return; + + /* We can only either use DMA for both Tx and Rx or not use it at all */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter, + (void *)pdata->slave_id_fifo0_tx); + dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__, + flctl->chan_fifo0_tx); + + if (!flctl->chan_fifo0_tx) + return; + + memset(&cfg, 0, sizeof(cfg)); + cfg.slave_id = pdata->slave_id_fifo0_tx; + cfg.direction = DMA_MEM_TO_DEV; + cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl); + cfg.src_addr = 0; + ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg); + if (ret < 0) + goto err; + + flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter, + (void *)pdata->slave_id_fifo0_rx); + dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__, + flctl->chan_fifo0_rx); + + if (!flctl->chan_fifo0_rx) + goto err; + + cfg.slave_id = pdata->slave_id_fifo0_rx; + cfg.direction = DMA_DEV_TO_MEM; + cfg.dst_addr = 0; + cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl); + ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg); + if (ret < 0) + goto err; + + init_completion(&flctl->dma_complete); + + return; + +err: + flctl_release_dma(flctl); +} + static void set_addr(struct mtd_info *mtd, int column, int page_addr) { struct sh_flctl *flctl = mtd_to_flctl(mtd); @@ -261,6 +343,70 @@ static void wait_wecfifo_ready(struct sh_flctl *flctl) timeout_error(flctl, __func__); } +static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf, + int len, enum dma_data_direction dir) +{ + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *chan; + enum dma_transfer_direction tr_dir; + dma_addr_t dma_addr; + dma_cookie_t cookie = -EINVAL; + uint32_t reg; + int ret; + + if (dir == DMA_FROM_DEVICE) { + chan = flctl->chan_fifo0_rx; + tr_dir = DMA_DEV_TO_MEM; + } else { + chan = flctl->chan_fifo0_tx; + tr_dir = DMA_MEM_TO_DEV; + } + + dma_addr = dma_map_single(chan->device->dev, buf, len, dir); + + if (dma_addr) + desc = dmaengine_prep_slave_single(chan, dma_addr, len, + tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + + if (desc) { + reg = readl(FLINTDMACR(flctl)); + reg |= DREQ0EN; + writel(reg, FLINTDMACR(flctl)); + + desc->callback = flctl_dma_complete; + desc->callback_param = flctl; + cookie = dmaengine_submit(desc); + + dma_async_issue_pending(chan); + } else { + /* DMA failed, fall back to PIO */ + flctl_release_dma(flctl); + dev_warn(&flctl->pdev->dev, + "DMA failed, falling back to PIO\n"); + ret = -EIO; + goto out; + } + + ret = + wait_for_completion_timeout(&flctl->dma_complete, + msecs_to_jiffies(3000)); + + if (ret <= 0) { + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); + dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n"); + } + +out: + reg = readl(FLINTDMACR(flctl)); + reg &= ~DREQ0EN; + writel(reg, FLINTDMACR(flctl)); + + dma_unmap_single(chan->device->dev, dma_addr, len, dir); + + /* ret > 0 is success */ + return ret; +} + static void read_datareg(struct sh_flctl *flctl, int offset) { unsigned long data; @@ -279,11 +425,20 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset) len_4align = (rlen + 3) / 4; + /* initiate DMA transfer */ + if (flctl->chan_fifo0_rx && rlen >= 32 && + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0) + goto convert; /* DMA success */ + + /* do polling transfer */ for (i = 0; i < len_4align; i++) { wait_rfifo_ready(flctl); buf[i] = readl(FLDTFIFO(flctl)); - buf[i] = be32_to_cpu(buf[i]); } + +convert: + for (i = 0; i < len_4align; i++) + buf[i] = be32_to_cpu(buf[i]); } static enum flctl_ecc_res_t read_ecfiforeg @@ -325,9 +480,19 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen, unsigned long *buf = (unsigned long *)&flctl->done_buff[offset]; len_4align = (rlen + 3) / 4; + + for (i = 0; i < len_4align; i++) + buf[i] = cpu_to_be32(buf[i]); + + /* initiate DMA transfer */ + if (flctl->chan_fifo0_tx && rlen >= 32 && + flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0) + return; /* DMA success */ + + /* do polling transfer */ for (i = 0; i < len_4align; i++) { wait_wecfifo_ready(flctl); - writel(cpu_to_be32(buf[i]), FLECFIFO(flctl)); + writel(buf[i], FLECFIFO(flctl)); } } @@ -925,6 +1090,8 @@ static int __devinit flctl_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); + flctl_setup_dma(flctl); + ret = nand_scan_ident(flctl_mtd, 1, NULL); if (ret) goto err_chip; @@ -942,6 +1109,7 @@ static int __devinit flctl_probe(struct platform_device *pdev) return 0; err_chip: + flctl_release_dma(flctl); pm_runtime_disable(&pdev->dev); free_irq(irq, flctl); err_flste: @@ -955,6 +1123,7 @@ static int __devexit flctl_remove(struct platform_device *pdev) { struct sh_flctl *flctl = platform_get_drvdata(pdev); + flctl_release_dma(flctl); nand_release(&flctl->mtd); pm_runtime_disable(&pdev->dev); free_irq(platform_get_irq(pdev, 0), flctl); diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 481557688d05..1c28f8879b1c 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -20,6 +20,7 @@ #ifndef __SH_FLCTL_H__ #define __SH_FLCTL_H__ +#include #include #include #include @@ -107,6 +108,7 @@ #define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */ #define AC1CLR (0x1 << 19) /* ECC FIFO clear */ #define AC0CLR (0x1 << 18) /* Data FIFO clear */ +#define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */ #define ECERB (0x1 << 9) /* ECC error */ #define STERB (0x1 << 8) /* Status error */ #define STERINTE (0x1 << 4) /* Status error enable */ @@ -138,6 +140,8 @@ enum flctl_ecc_res_t { FL_TIMEOUT }; +struct dma_chan; + struct sh_flctl { struct mtd_info mtd; struct nand_chip chip; @@ -161,6 +165,11 @@ struct sh_flctl { unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ + + /* DMA related objects */ + struct dma_chan *chan_fifo0_rx; + struct dma_chan *chan_fifo0_tx; + struct completion dma_complete; }; struct sh_flctl_platform_data { @@ -170,6 +179,9 @@ struct sh_flctl_platform_data { unsigned has_hwecc:1; unsigned use_holden:1; + + unsigned int slave_id_fifo0_tx; + unsigned int slave_id_fifo0_rx; }; static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) -- cgit v1.2.3 From 9ef525a9141b14d23613faad303cf48a20814f1b Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Thu, 25 Oct 2012 09:43:10 -0400 Subject: mtd: Fix kernel-doc content to avoid warning. Add missing colons to fix kernel-doc generation warnings. Signed-off-by: Robert P. J. Day Signed-off-by: Artem Bityutskiy --- include/linux/mtd/nand.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 24e915957e4f..9d8a6048aacd 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -471,8 +471,8 @@ struct nand_buffers { * non 0 if ONFI supported. * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is * supported, 0 otherwise. - * @onfi_set_features [REPLACEABLE] set the features for ONFI nand - * @onfi_get_features [REPLACEABLE] get the features for ONFI nand + * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand + * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand * @ecclayout: [REPLACEABLE] the default ECC placement scheme * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash -- cgit v1.2.3 From 3e9ce49e0ef95e22790a74720f0068696b2477c9 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 29 Oct 2012 22:47:26 +0530 Subject: mtd: map: Fix compilation warning This patch is an attempt to fix following compilation warning. In file included from drivers/mtd/chips/cfi_cmdset_0001.c:35:0: drivers/mtd/chips/cfi_cmdset_0001.c: In function 'cfi_intelext_write_words': include/linux/mtd/map.h:331:11: warning: 'r.x[0]' may be used uninitialized in this function [-Wmaybe-uninitialized] I could have used uninitialized_var() too, but didn't used it as the final else part of map_word_load() is missing. So there is a chance that it might be passed uninitialized. Better initialize to zero. Signed-off-by: Viresh Kumar Signed-off-by: Artem Bityutskiy --- include/linux/mtd/map.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 3595a0236b0f..56c7936e0c65 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -328,7 +328,7 @@ static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word static inline map_word map_word_load(struct map_info *map, const void *ptr) { - map_word r; + map_word r = {{0} }; if (map_bankwidth_is_1(map)) r.x[0] = *(unsigned char *)ptr; -- cgit v1.2.3 From ebd5ac165f2aaefb767c53112c2010b0ff3df688 Mon Sep 17 00:00:00 2001 From: Shinya Kuribayashi Date: Wed, 24 Oct 2012 19:58:10 +0900 Subject: i2c: i2c-sh_mobile: support I2C hardware block with a faster operating clock On newer SH-/R-Mobile SoCs, a clock supply to the I2C hardware block, which is used to generate the SCL clock output, is getting faster than before, while on the other hand, the SCL clock control registers, ICCH and ICCL, stay unchanged in 9-bit-wide (8+1). On such silicons, the internal SCL clock counter gets incremented every 2 clocks of the operating clock. This patch makes it configurable through platform data. Signed-off-by: Shinya Kuribayashi Signed-off-by: Wolfram Sang --- drivers/i2c/busses/i2c-sh_mobile.c | 5 +++++ include/linux/i2c/i2c-sh_mobile.h | 1 + 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 4dc0cc3611c2..4c283583bea0 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -120,6 +120,7 @@ struct sh_mobile_i2c_data { void __iomem *reg; struct i2c_adapter adap; unsigned long bus_speed; + unsigned int clks_per_count; struct clk *clk; u_int8_t icic; u_int8_t flags; @@ -231,6 +232,7 @@ static void sh_mobile_i2c_init(struct sh_mobile_i2c_data *pd) /* Get clock rate after clock is enabled */ clk_enable(pd->clk); i2c_clk_khz = clk_get_rate(pd->clk) / 1000; + i2c_clk_khz /= pd->clks_per_count; if (pd->bus_speed == STANDARD_MODE) { tLOW = 47; /* tLOW = 4.7 us */ @@ -658,6 +660,9 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) pd->bus_speed = STANDARD_MODE; if (pdata && pdata->bus_speed) pd->bus_speed = pdata->bus_speed; + pd->clks_per_count = 1; + if (pdata && pdata->clks_per_count) + pd->clks_per_count = pdata->clks_per_count; /* The IIC blocks on SH-Mobile ARM processors * come with two new bits in ICIC. diff --git a/include/linux/i2c/i2c-sh_mobile.h b/include/linux/i2c/i2c-sh_mobile.h index beda7081aead..06e3089795fb 100644 --- a/include/linux/i2c/i2c-sh_mobile.h +++ b/include/linux/i2c/i2c-sh_mobile.h @@ -5,6 +5,7 @@ struct i2c_sh_mobile_platform_data { unsigned long bus_speed; + unsigned int clks_per_count; }; #endif /* __I2C_SH_MOBILE_H__ */ -- cgit v1.2.3 From d611d41b46c96195b9a168a21992782458826e07 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 6 Nov 2012 22:55:27 +0100 Subject: mtd: diskonchip: use inline functions for DocRead/DocWrite The diskonchip drivers traditionally use home-grown macros for doing MMIO accesses, which cause a lot of warnings, at least on ARM machines: drivers/mtd/devices/doc2000.c: In function 'doc_write': drivers/mtd/devices/doc2000.c:854:5: warning: value computed is not used [-Wunused-value] drivers/mtd/devices/doc2000.c: In function 'doc_erase': drivers/mtd/devices/doc2000.c:1123:5: warning: value computed is not used [-Wunused-value drivers/mtd/nand/diskonchip.c: In function 'doc2000_read_byte': drivers/mtd/nand/diskonchip.c:318:3: warning: value computed is not used [-Wunused-value] A nicer solution is to use the architecture-defined I/O accessors. Here, we use the __raw_readl/__raw_writel style, instead of the proper readl/writel ones, in order to preserve the odd semantics of the existing macros that have their own barrier implementation and no byte swap. It would be nice to fix this properly and use the correct accessors as well as make the word size independent from the architecture, but I guess the hardware is obsolete enough that we should better not mess the driver an more than necessary. Signed-off-by: Arnd Bergmann Signed-off-by: Artem Bityutskiy --- include/linux/mtd/doc2000.h | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h index 0f6fea73a1f6..407d1e556c39 100644 --- a/include/linux/mtd/doc2000.h +++ b/include/linux/mtd/doc2000.h @@ -92,12 +92,26 @@ * Others use readb/writeb */ #if defined(__arm__) -#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)))) -#define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0) +static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg) +{ + return __raw_readl(addr + reg); +} +static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg) +{ + __raw_writel(data, addr + reg); + wmb(); +} #define DOC_IOREMAP_LEN 0x8000 #elif defined(__ppc__) -#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)))) -#define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0) +static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg) +{ + return __raw_readw(addr + reg); +} +static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg) +{ + __raw_writew(data, addr + reg); + wmb(); +} #define DOC_IOREMAP_LEN 0x4000 #else #define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg)) -- cgit v1.2.3 From 5d27aa5af04f58f3020de1c224dcf8a62151fd58 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 6 Nov 2012 22:55:28 +0100 Subject: mtd: uninitialized variable warning in map.h The map_word_load() function initializes exactly as many words in the buffer as required, but gcc cannot figure this out and gives a misleading warning. Marking the local variable as uninitialized_var shuts up that warning. Without this patch, building acs5k_defconfig results in: drivers/mtd/chips/cfi_cmdset_0002.c: In function 'cfi_amdstd_panic_write': include/linux/mtd/map.h:331:11: warning: 'r.x[0]' may be used uninitialized in this function [-Wuninitialized] drivers/mtd/chips/cfi_cmdset_0002.c: In function 'cfi_amdstd_write_words': include/linux/mtd/map.h:331:11: warning: 'r.x[0]' may be used uninitialized in this function [-Wuninitialized] drivers/mtd/chips/cfi_cmdset_0001.c: In function 'cfi_intelext_write_words': include/linux/mtd/map.h:331:11: warning: 'r.x[0]' may be used uninitialized in this function [-Wuninitialized] Signed-off-by: Arnd Bergmann Signed-off-by: Artem Bityutskiy --- include/linux/mtd/map.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 56c7936e0c65..f6eb4332ac92 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -391,7 +391,7 @@ static inline map_word map_word_ff(struct map_info *map) static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) { - map_word r; + map_word uninitialized_var(r); if (map_bankwidth_is_1(map)) r.x[0] = __raw_readb(map->virt + ofs); -- cgit v1.2.3 From 0857ba3c24c308f42a242fe8a1894772750230ce Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Sun, 18 Nov 2012 18:36:19 +0200 Subject: i2c: i2c-cbus-gpio: introduce driver Add i2c driver to enable access to devices behind CBUS on Nokia Internet Tablets. The patch also adds CBUS I2C configuration for N8x0 which is one of the users of this driver. Acked-by: Felipe Balbi Acked-by: Tony Lindgren Signed-off-by: Aaro Koskinen Signed-off-by: Wolfram Sang --- .../devicetree/bindings/i2c/i2c-cbus-gpio.txt | 27 ++ arch/arm/mach-omap2/board-n8x0.c | 42 +++ drivers/i2c/busses/Kconfig | 10 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-cbus-gpio.c | 300 +++++++++++++++++++++ include/linux/platform_data/i2c-cbus-gpio.h | 27 ++ 6 files changed, 407 insertions(+) create mode 100644 Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt create mode 100644 drivers/i2c/busses/i2c-cbus-gpio.c create mode 100644 include/linux/platform_data/i2c-cbus-gpio.h (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt new file mode 100644 index 000000000000..8ce9cd2855b5 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt @@ -0,0 +1,27 @@ +Device tree bindings for i2c-cbus-gpio driver + +Required properties: + - compatible = "i2c-cbus-gpio"; + - gpios: clk, dat, sel + - #address-cells = <1>; + - #size-cells = <0>; + +Optional properties: + - child nodes conforming to i2c bus binding + +Example: + +i2c@0 { + compatible = "i2c-cbus-gpio"; + gpios = <&gpio 66 0 /* clk */ + &gpio 65 0 /* dat */ + &gpio 64 0 /* sel */ + >; + #address-cells = <1>; + #size-cells = <0>; + + retu-mfd: retu@1 { + compatible = "retu-mfd"; + reg = <0x1>; + }; +}; diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index d95f727ca39a..bbfd74263c42 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -16,10 +16,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -39,6 +41,45 @@ #define TUSB6010_GPIO_ENABLE 0 #define TUSB6010_DMACHAN 0x3f +#if defined(CONFIG_I2C_CBUS_GPIO) || defined(CONFIG_I2C_CBUS_GPIO_MODULE) +static struct i2c_cbus_platform_data n8x0_cbus_data = { + .clk_gpio = 66, + .dat_gpio = 65, + .sel_gpio = 64, +}; + +static struct platform_device n8x0_cbus_device = { + .name = "i2c-cbus-gpio", + .id = 3, + .dev = { + .platform_data = &n8x0_cbus_data, + }, +}; + +static struct i2c_board_info n8x0_i2c_board_info_3[] __initdata = { + { + I2C_BOARD_INFO("retu-mfd", 0x01), + }, +}; + +static void __init n8x0_cbus_init(void) +{ + const int retu_irq_gpio = 108; + + if (gpio_request_one(retu_irq_gpio, GPIOF_IN, "Retu IRQ")) + return; + irq_set_irq_type(gpio_to_irq(retu_irq_gpio), IRQ_TYPE_EDGE_RISING); + n8x0_i2c_board_info_3[0].irq = gpio_to_irq(retu_irq_gpio); + i2c_register_board_info(3, n8x0_i2c_board_info_3, + ARRAY_SIZE(n8x0_i2c_board_info_3)); + platform_device_register(&n8x0_cbus_device); +} +#else /* CONFIG_I2C_CBUS_GPIO */ +static void __init n8x0_cbus_init(void) +{ +} +#endif /* CONFIG_I2C_CBUS_GPIO */ + #if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) /* * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and @@ -677,6 +718,7 @@ static void __init n8x0_init_machine(void) gpmc_onenand_init(board_onenand_data); n8x0_mmc_init(); n8x0_usb_init(); + n8x0_cbus_init(); } MACHINE_START(NOKIA_N800, "Nokia N800") diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e9df4612b7eb..e949edf644d4 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -337,6 +337,16 @@ config I2C_BLACKFIN_TWI_CLK_KHZ help The unit of the TWI clock is kHz. +config I2C_CBUS_GPIO + tristate "CBUS I2C driver" + depends on GENERIC_GPIO + help + Support for CBUS access using I2C API. Mostly relevant for Nokia + Internet Tablets (770, N800 and N810). + + This driver can also be built as a module. If so, the module + will be called i2c-cbus-gpio. + config I2C_CPM tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" depends on (CPM1 || CPM2) && OF_I2C diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 395b516ffa08..f9e3e0b5c827 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o +obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c new file mode 100644 index 000000000000..98386d659318 --- /dev/null +++ b/drivers/i2c/busses/i2c-cbus-gpio.c @@ -0,0 +1,300 @@ +/* + * CBUS I2C driver for Nokia Internet Tablets. + * + * Copyright (C) 2004-2010 Nokia Corporation + * + * Based on code written by Juha Yrjölä, David Weinehall, Mikko Ylinen and + * Felipe Balbi. Converted to I2C driver by Aaro Koskinen. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Bit counts are derived from Nokia implementation. These should be checked + * if other CBUS implementations appear. + */ +#define CBUS_ADDR_BITS 3 +#define CBUS_REG_BITS 5 + +struct cbus_host { + spinlock_t lock; /* host lock */ + struct device *dev; + int clk_gpio; + int dat_gpio; + int sel_gpio; +}; + +/** + * cbus_send_bit - sends one bit over the bus + * @host: the host we're using + * @bit: one bit of information to send + */ +static void cbus_send_bit(struct cbus_host *host, unsigned bit) +{ + gpio_set_value(host->dat_gpio, bit ? 1 : 0); + gpio_set_value(host->clk_gpio, 1); + gpio_set_value(host->clk_gpio, 0); +} + +/** + * cbus_send_data - sends @len amount of data over the bus + * @host: the host we're using + * @data: the data to send + * @len: size of the transfer + */ +static void cbus_send_data(struct cbus_host *host, unsigned data, unsigned len) +{ + int i; + + for (i = len; i > 0; i--) + cbus_send_bit(host, data & (1 << (i - 1))); +} + +/** + * cbus_receive_bit - receives one bit from the bus + * @host: the host we're using + */ +static int cbus_receive_bit(struct cbus_host *host) +{ + int ret; + + gpio_set_value(host->clk_gpio, 1); + ret = gpio_get_value(host->dat_gpio); + gpio_set_value(host->clk_gpio, 0); + return ret; +} + +/** + * cbus_receive_word - receives 16-bit word from the bus + * @host: the host we're using + */ +static int cbus_receive_word(struct cbus_host *host) +{ + int ret = 0; + int i; + + for (i = 16; i > 0; i--) { + int bit = cbus_receive_bit(host); + + if (bit < 0) + return bit; + + if (bit) + ret |= 1 << (i - 1); + } + return ret; +} + +/** + * cbus_transfer - transfers data over the bus + * @host: the host we're using + * @rw: read/write flag + * @dev: device address + * @reg: register address + * @data: if @rw == I2C_SBUS_WRITE data to send otherwise 0 + */ +static int cbus_transfer(struct cbus_host *host, char rw, unsigned dev, + unsigned reg, unsigned data) +{ + unsigned long flags; + int ret; + + /* We don't want interrupts disturbing our transfer */ + spin_lock_irqsave(&host->lock, flags); + + /* Reset state and start of transfer, SEL stays down during transfer */ + gpio_set_value(host->sel_gpio, 0); + + /* Set the DAT pin to output */ + gpio_direction_output(host->dat_gpio, 1); + + /* Send the device address */ + cbus_send_data(host, dev, CBUS_ADDR_BITS); + + /* Send the rw flag */ + cbus_send_bit(host, rw == I2C_SMBUS_READ); + + /* Send the register address */ + cbus_send_data(host, reg, CBUS_REG_BITS); + + if (rw == I2C_SMBUS_WRITE) { + cbus_send_data(host, data, 16); + ret = 0; + } else { + ret = gpio_direction_input(host->dat_gpio); + if (ret) { + dev_dbg(host->dev, "failed setting direction\n"); + goto out; + } + gpio_set_value(host->clk_gpio, 1); + + ret = cbus_receive_word(host); + if (ret < 0) { + dev_dbg(host->dev, "failed receiving data\n"); + goto out; + } + } + + /* Indicate end of transfer, SEL goes up until next transfer */ + gpio_set_value(host->sel_gpio, 1); + gpio_set_value(host->clk_gpio, 1); + gpio_set_value(host->clk_gpio, 0); + +out: + spin_unlock_irqrestore(&host->lock, flags); + + return ret; +} + +static int cbus_i2c_smbus_xfer(struct i2c_adapter *adapter, + u16 addr, + unsigned short flags, + char read_write, + u8 command, + int size, + union i2c_smbus_data *data) +{ + struct cbus_host *chost = i2c_get_adapdata(adapter); + int ret; + + if (size != I2C_SMBUS_WORD_DATA) + return -EINVAL; + + ret = cbus_transfer(chost, read_write == I2C_SMBUS_READ, addr, + command, data->word); + if (ret < 0) + return ret; + + if (read_write == I2C_SMBUS_READ) + data->word = ret; + + return 0; +} + +static u32 cbus_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_WRITE_WORD_DATA; +} + +static const struct i2c_algorithm cbus_i2c_algo = { + .smbus_xfer = cbus_i2c_smbus_xfer, + .functionality = cbus_i2c_func, +}; + +static int cbus_i2c_remove(struct platform_device *pdev) +{ + struct i2c_adapter *adapter = platform_get_drvdata(pdev); + + return i2c_del_adapter(adapter); +} + +static int cbus_i2c_probe(struct platform_device *pdev) +{ + struct i2c_adapter *adapter; + struct cbus_host *chost; + int ret; + + adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter), + GFP_KERNEL); + if (!adapter) + return -ENOMEM; + + chost = devm_kzalloc(&pdev->dev, sizeof(*chost), GFP_KERNEL); + if (!chost) + return -ENOMEM; + + if (pdev->dev.of_node) { + struct device_node *dnode = pdev->dev.of_node; + if (of_gpio_count(dnode) != 3) + return -ENODEV; + chost->clk_gpio = of_get_gpio(dnode, 0); + chost->dat_gpio = of_get_gpio(dnode, 1); + chost->sel_gpio = of_get_gpio(dnode, 2); + } else if (pdev->dev.platform_data) { + struct i2c_cbus_platform_data *pdata = pdev->dev.platform_data; + chost->clk_gpio = pdata->clk_gpio; + chost->dat_gpio = pdata->dat_gpio; + chost->sel_gpio = pdata->sel_gpio; + } else { + return -ENODEV; + } + + adapter->owner = THIS_MODULE; + adapter->class = I2C_CLASS_HWMON; + adapter->dev.parent = &pdev->dev; + adapter->nr = pdev->id; + adapter->timeout = HZ; + adapter->algo = &cbus_i2c_algo; + strlcpy(adapter->name, "CBUS I2C adapter", sizeof(adapter->name)); + + spin_lock_init(&chost->lock); + chost->dev = &pdev->dev; + + ret = devm_gpio_request_one(&pdev->dev, chost->clk_gpio, + GPIOF_OUT_INIT_LOW, "CBUS clk"); + if (ret) + return ret; + + ret = devm_gpio_request_one(&pdev->dev, chost->dat_gpio, GPIOF_IN, + "CBUS data"); + if (ret) + return ret; + + ret = devm_gpio_request_one(&pdev->dev, chost->sel_gpio, + GPIOF_OUT_INIT_HIGH, "CBUS sel"); + if (ret) + return ret; + + i2c_set_adapdata(adapter, chost); + platform_set_drvdata(pdev, adapter); + + return i2c_add_numbered_adapter(adapter); +} + +#if defined(CONFIG_OF) +static const struct of_device_id i2c_cbus_dt_ids[] = { + { .compatible = "i2c-cbus-gpio", }, + { } +}; +MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids); +#endif + +static struct platform_driver cbus_i2c_driver = { + .probe = cbus_i2c_probe, + .remove = cbus_i2c_remove, + .driver = { + .owner = THIS_MODULE, + .name = "i2c-cbus-gpio", + }, +}; +module_platform_driver(cbus_i2c_driver); + +MODULE_ALIAS("platform:i2c-cbus-gpio"); +MODULE_DESCRIPTION("CBUS I2C driver"); +MODULE_AUTHOR("Juha Yrjölä"); +MODULE_AUTHOR("David Weinehall"); +MODULE_AUTHOR("Mikko Ylinen"); +MODULE_AUTHOR("Felipe Balbi"); +MODULE_AUTHOR("Aaro Koskinen "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h new file mode 100644 index 000000000000..6faa992a9502 --- /dev/null +++ b/include/linux/platform_data/i2c-cbus-gpio.h @@ -0,0 +1,27 @@ +/* + * i2c-cbus-gpio.h - CBUS I2C platform_data definition + * + * Copyright (C) 2004-2009 Nokia Corporation + * + * Written by Felipe Balbi and Aaro Koskinen. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H +#define __INCLUDE_LINUX_I2C_CBUS_GPIO_H + +struct i2c_cbus_platform_data { + int dat_gpio; + int clk_gpio; + int sel_gpio; +}; + +#endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */ -- cgit v1.2.3 From ae72ae676045274c82f3c25159a9dd7cfcf5ffae Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Nov 2012 11:02:55 -0500 Subject: NFSv4.1: Don't confuse CREATE_SESSION arguments and results Don't store the target request and response sizes in the same variables used to store the server's replies to those targets. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 30 +++++++++++++++++------------- include/linux/nfs_fs_sb.h | 3 +++ 2 files changed, 20 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a32d953b08de..3e572dc316e4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5807,8 +5807,8 @@ void nfs4_destroy_session(struct nfs4_session *session) static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) { struct nfs4_session *session = args->client->cl_session; - unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, - mxresp_sz = session->fc_attrs.max_resp_sz; + unsigned int mxrqst_sz = session->fc_target_max_rqst_sz, + mxresp_sz = session->fc_target_max_resp_sz; if (mxrqst_sz == 0) mxrqst_sz = NFS_MAX_FILE_IO_SIZE; @@ -6015,24 +6015,28 @@ int nfs4_init_session(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs4_session *session; - unsigned int rsize, wsize; + unsigned int target_max_rqst_sz = NFS_MAX_FILE_IO_SIZE; + unsigned int target_max_resp_sz = NFS_MAX_FILE_IO_SIZE; if (!nfs4_has_session(clp)) return 0; + if (server->rsize != 0) + target_max_resp_sz = server->rsize; + target_max_resp_sz += nfs41_maxread_overhead; + + if (server->wsize != 0) + target_max_rqst_sz = server->wsize; + target_max_rqst_sz += nfs41_maxwrite_overhead; + session = clp->cl_session; spin_lock(&clp->cl_lock); if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { - - rsize = server->rsize; - if (rsize == 0) - rsize = NFS_MAX_FILE_IO_SIZE; - wsize = server->wsize; - if (wsize == 0) - wsize = NFS_MAX_FILE_IO_SIZE; - - session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; - session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; + /* Initialise targets and channel attributes */ + session->fc_target_max_rqst_sz = target_max_rqst_sz; + session->fc_attrs.max_rqst_sz = target_max_rqst_sz; + session->fc_target_max_resp_sz = target_max_resp_sz; + session->fc_attrs.max_resp_sz = target_max_resp_sz; } spin_unlock(&clp->cl_lock); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a9e76ee1adca..97c8f9191880 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -242,6 +242,9 @@ struct nfs4_session { struct nfs4_channel_attrs bc_attrs; struct nfs4_slot_table bc_slot_table; struct nfs_client *clp; + /* Create session arguments */ + unsigned int fc_target_max_rqst_sz; + unsigned int fc_target_max_resp_sz; }; #endif /* CONFIG_NFS_V4 */ -- cgit v1.2.3 From 933602e368c4452260c9bff4fbb3baba35cf987a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Nov 2012 12:12:38 -0500 Subject: NFSv4.1: Shrink struct nfs4_sequence_res by moving sr_renewal_time Store the renewal time inside the session slot instead. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 15 +++++++++------ include/linux/nfs_xdr.h | 2 +- 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 5e5cc5a5065f..14b39742b6e4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -486,6 +486,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { + struct nfs4_slot *slot; unsigned long timestamp; struct nfs_client *clp; @@ -502,12 +503,14 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * if (!RPC_WAS_SENT(task)) goto out; + slot = res->sr_slot; + /* Check the SEQUENCE operation status */ switch (res->sr_status) { case 0: /* Update the slot's sequence and clientid lease timer */ - ++res->sr_slot->seq_nr; - timestamp = res->sr_renewal_time; + ++slot->seq_nr; + timestamp = slot->renewal_time; clp = res->sr_session->clp; do_renew_lease(clp, timestamp); /* Check sequence flags */ @@ -521,12 +524,12 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * */ dprintk("%s: slot=%td seq=%d: Operation in progress\n", __func__, - res->sr_slot - res->sr_session->fc_slot_table.slots, - res->sr_slot->seq_nr); + slot - res->sr_session->fc_slot_table.slots, + slot->seq_nr); goto out_retry; default: /* Just update the slot sequence no. */ - ++res->sr_slot->seq_nr; + ++slot->seq_nr; } out: /* The session may be reset by one of the error handlers. */ @@ -637,6 +640,7 @@ int nfs41_setup_sequence(struct nfs4_session *session, rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); slot = tbl->slots + slotid; + slot->renewal_time = jiffies; args->sa_session = session; args->sa_slotid = slotid; @@ -644,7 +648,6 @@ int nfs41_setup_sequence(struct nfs4_session *session, res->sr_session = session; res->sr_slot = slot; - res->sr_renewal_time = jiffies; res->sr_status_flags = 0; /* * sr_status is only set in decode_sequence, and so will remain diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index a73ea89789d1..9cb1c63a70c2 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -187,6 +187,7 @@ struct nfs4_channel_attrs { /* nfs41 sessions slot seqid */ struct nfs4_slot { + unsigned long renewal_time; u32 seq_nr; }; @@ -200,7 +201,6 @@ struct nfs4_sequence_res { struct nfs4_session *sr_session; struct nfs4_slot *sr_slot; /* slot used to send request */ int sr_status; /* sequence operation status */ - unsigned long sr_renewal_time; u32 sr_status_flags; }; -- cgit v1.2.3 From 22a8578fca5a47e643bb4f70c232d0ec84db9e4e Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Sat, 10 Nov 2012 13:08:20 -0300 Subject: mtd: mtd_blkdevs: Replace request handler kthread with a workqueue By replacing a kthread with a workqueue, the code is now a bit clearer. There's also a slight reduction of code size (numbers apply for x86): Before: text data bss dec hex filename 3248 36 0 3284 cd4 drivers/mtd/mtd_blkdevs.o After: text data bss dec hex filename 3150 36 0 3186 c72 drivers/mtd/mtd_blkdevs.o Due to lack of real hardware, tests have been performed on an emulated environment with mtdswap and mtdblock over nandsim devices. Some real testing should be done, before merging this patch. Signed-off-by: Ezequiel Garcia Signed-off-by: Artem Bityutskiy --- drivers/mtd/mtd_blkdevs.c | 47 ++++++++++++++------------------------------ include/linux/mtd/blktrans.h | 4 +++- 2 files changed, 18 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 26ef2a72bdae..5ad39bb5ab4c 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include "mtdcore.h" @@ -121,16 +120,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) { - if (kthread_should_stop()) - return 1; - return dev->bg_stop; } EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); -static int mtd_blktrans_thread(void *arg) +static void mtd_blktrans_work(struct work_struct *work) { - struct mtd_blktrans_dev *dev = arg; + struct mtd_blktrans_dev *dev = + container_of(work, struct mtd_blktrans_dev, work); struct mtd_blktrans_ops *tr = dev->tr; struct request_queue *rq = dev->rq; struct request *req = NULL; @@ -138,7 +135,7 @@ static int mtd_blktrans_thread(void *arg) spin_lock_irq(rq->queue_lock); - while (!kthread_should_stop()) { + while (1) { int res; dev->bg_stop = false; @@ -156,15 +153,7 @@ static int mtd_blktrans_thread(void *arg) background_done = !dev->bg_stop; continue; } - set_current_state(TASK_INTERRUPTIBLE); - - if (kthread_should_stop()) - set_current_state(TASK_RUNNING); - - spin_unlock_irq(rq->queue_lock); - schedule(); - spin_lock_irq(rq->queue_lock); - continue; + break; } spin_unlock_irq(rq->queue_lock); @@ -185,8 +174,6 @@ static int mtd_blktrans_thread(void *arg) __blk_end_request_all(req, -EIO); spin_unlock_irq(rq->queue_lock); - - return 0; } static void mtd_blktrans_request(struct request_queue *rq) @@ -199,10 +186,8 @@ static void mtd_blktrans_request(struct request_queue *rq) if (!dev) while ((req = blk_fetch_request(rq)) != NULL) __blk_end_request_all(req, -ENODEV); - else { - dev->bg_stop = true; - wake_up_process(dev->thread); - } + else + queue_work(dev->wq, &dev->work); } static int blktrans_open(struct block_device *bdev, fmode_t mode) @@ -437,14 +422,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) gd->queue = new->rq; - /* Create processing thread */ - /* TODO: workqueue ? */ - new->thread = kthread_run(mtd_blktrans_thread, new, - "%s%d", tr->name, new->mtd->index); - if (IS_ERR(new->thread)) { - ret = PTR_ERR(new->thread); + /* Create processing workqueue */ + new->wq = alloc_workqueue("%s%d", 0, 0, + tr->name, new->mtd->index); + if (!new->wq) goto error4; - } + INIT_WORK(&new->work, mtd_blktrans_work); + gd->driverfs_dev = &new->mtd->dev; if (new->readonly) @@ -484,9 +468,8 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) /* Stop new requests to arrive */ del_gendisk(old->disk); - - /* Stop the thread */ - kthread_stop(old->thread); + /* Stop workqueue. This will perform any pending request. */ + destroy_workqueue(old->wq); /* Kill current requests */ spin_lock_irqsave(&old->queue_lock, flags); diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index ed270bd2e4df..4eb0a50d0c55 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h @@ -23,6 +23,7 @@ #include #include #include +#include struct hd_geometry; struct mtd_info; @@ -43,7 +44,8 @@ struct mtd_blktrans_dev { struct kref ref; struct gendisk *disk; struct attribute_group *disk_attributes; - struct task_struct *thread; + struct workqueue_struct *wq; + struct work_struct work; struct request_queue *rq; spinlock_t queue_lock; void *priv; -- cgit v1.2.3 From 8d4b9e3182634d8b5afb5a144a8c6c24b187bcc1 Mon Sep 17 00:00:00 2001 From: RafaÅ‚ MiÅ‚ecki Date: Mon, 12 Nov 2012 13:03:20 +0100 Subject: bcma: export PLL reading function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is required by NAND flash driver for initializing wait counters. Signed-off-by: RafaÅ‚ MiÅ‚ecki Signed-off-by: Artem Bityutskiy --- drivers/bcma/driver_chipcommon_pmu.c | 3 ++- include/linux/bcma/bcma.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index 201faf106b3f..657f23517481 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c @@ -13,12 +13,13 @@ #include #include -static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) +u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) { bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); } +EXPORT_SYMBOL_GPL(bcma_chipco_pll_read); void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) { diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 4180eb78d575..4fb6bd7941d7 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -345,6 +345,7 @@ extern void bcma_core_set_clockmode(struct bcma_device *core, enum bcma_clkmode clkmode); extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on); +extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); #define BCMA_DMA_TRANSLATION_MASK 0xC0000000 #define BCMA_DMA_TRANSLATION_NONE 0x00000000 #define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */ -- cgit v1.2.3 From 83af24027b3df1af5c5a9aa9adcdcfeb3429d3be Mon Sep 17 00:00:00 2001 From: "Philip, Avinash" Date: Wed, 21 Nov 2012 13:10:44 +0530 Subject: pwm: Device tree support for PWM polarity Add support for encoding PWM properties in bit encoded form with of_pwm_xlate_with_flags() function support. Platforms require platform specific PWM properties has to populate in 3rd cell of the pwm-specifier and PWM driver should also set .of_xlate support with this function. Currently PWM property polarity encoded in bit position 0 of the third cell in pwm-specifier. Signed-off-by: Philip, Avinash Acked-by: Grant Likely Signed-off-by: Thierry Reding --- Documentation/devicetree/bindings/pwm/pwm.txt | 17 +++++++++++++--- drivers/pwm/core.c | 28 +++++++++++++++++++++++++++ include/linux/pwm.h | 3 +++ 3 files changed, 45 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/Documentation/devicetree/bindings/pwm/pwm.txt b/Documentation/devicetree/bindings/pwm/pwm.txt index 73ec962bfe8c..06e67247859a 100644 --- a/Documentation/devicetree/bindings/pwm/pwm.txt +++ b/Documentation/devicetree/bindings/pwm/pwm.txt @@ -37,10 +37,21 @@ device: pwm-names = "backlight"; }; +Note that in the example above, specifying the "pwm-names" is redundant +because the name "backlight" would be used as fallback anyway. + pwm-specifier typically encodes the chip-relative PWM number and the PWM -period in nanoseconds. Note that in the example above, specifying the -"pwm-names" is redundant because the name "backlight" would be used as -fallback anyway. +period in nanoseconds. + +Optionally, the pwm-specifier can encode a number of flags in a third cell: +- bit 0: PWM signal polarity (0: normal polarity, 1: inverse polarity) + +Example with optional PWM specifier for inverse polarity + + bl: backlight { + pwms = <&pwm 0 5000000 1>; + pwm-names = "backlight"; + }; 2) PWM controller nodes ----------------------- diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index f5acdaa52707..780cb6b8a8f0 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -32,6 +32,9 @@ #define MAX_PWMS 1024 +/* flags in the third cell of the DT PWM specifier */ +#define PWM_SPEC_POLARITY (1 << 0) + static DEFINE_MUTEX(pwm_lookup_lock); static LIST_HEAD(pwm_lookup_list); static DEFINE_MUTEX(pwm_lock); @@ -129,6 +132,31 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label) return 0; } +struct pwm_device * +of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args) +{ + struct pwm_device *pwm; + + if (pc->of_pwm_n_cells < 3) + return ERR_PTR(-EINVAL); + + if (args->args[0] >= pc->npwm) + return ERR_PTR(-EINVAL); + + pwm = pwm_request_from_chip(pc, args->args[0], NULL); + if (IS_ERR(pwm)) + return pwm; + + pwm_set_period(pwm, args->args[1]); + + if (args->args[2] & PWM_SPEC_POLARITY) + pwm_set_polarity(pwm, PWM_POLARITY_INVERSED); + else + pwm_set_polarity(pwm, PWM_POLARITY_NORMAL); + + return pwm; +} + static struct pwm_device * of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) { diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 112b31436848..6d661f32e0e4 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -171,6 +171,9 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label); +struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc, + const struct of_phandle_args *args); + struct pwm_device *pwm_get(struct device *dev, const char *consumer); void pwm_put(struct pwm_device *pwm); -- cgit v1.2.3 From e3725ec015dfbbeb896295cf2b3a995f28b0630e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Nov 2012 12:25:01 -0500 Subject: NFSv4.1: Shrink struct nfs4_sequence_res by moving the session pointer Move the session pointer into the slot table, then have struct nfs4_slot point to that slot table. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 3 ++- fs/nfs/nfs4proc.c | 33 +++++++++++++++++++++++---------- fs/nfs/nfs4state.c | 2 +- fs/nfs/nfs4xdr.c | 8 +++++--- include/linux/nfs_fs_sb.h | 1 + include/linux/nfs_xdr.h | 2 +- 6 files changed, 33 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 36880b9aa91e..42c58691fb41 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -258,7 +258,8 @@ extern int nfs4_proc_get_lease_time(struct nfs_client *clp, extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync); -extern struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags); +extern struct nfs4_slot *nfs4_alloc_slots(struct nfs4_slot_table *table, + u32 max_slots, gfp_t gfp_flags); static inline bool is_ds_only_client(struct nfs_client *clp) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 14b39742b6e4..5b61c4a83191 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -467,25 +467,28 @@ void nfs4_check_drain_bc_complete(struct nfs4_session *ses) static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) { + struct nfs4_session *session; struct nfs4_slot_table *tbl; - tbl = &res->sr_session->fc_slot_table; if (!res->sr_slot) { /* just wake up the next guy waiting since * we may have not consumed a slot after all */ dprintk("%s: No slot\n", __func__); return; } + tbl = res->sr_slot->table; + session = tbl->session; spin_lock(&tbl->slot_tbl_lock); nfs4_free_slot(tbl, res->sr_slot - tbl->slots); - nfs4_check_drain_fc_complete(res->sr_session); + nfs4_check_drain_fc_complete(session); spin_unlock(&tbl->slot_tbl_lock); res->sr_slot = NULL; } static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { + struct nfs4_session *session; struct nfs4_slot *slot; unsigned long timestamp; struct nfs_client *clp; @@ -504,6 +507,7 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * goto out; slot = res->sr_slot; + session = slot->table->session; /* Check the SEQUENCE operation status */ switch (res->sr_status) { @@ -511,7 +515,7 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * /* Update the slot's sequence and clientid lease timer */ ++slot->seq_nr; timestamp = slot->renewal_time; - clp = res->sr_session->clp; + clp = session->clp; do_renew_lease(clp, timestamp); /* Check sequence flags */ if (res->sr_status_flags != 0) @@ -524,7 +528,7 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * */ dprintk("%s: slot=%td seq=%d: Operation in progress\n", __func__, - slot - res->sr_session->fc_slot_table.slots, + slot - session->fc_slot_table.slots, slot->seq_nr); goto out_retry; default: @@ -546,7 +550,7 @@ out_retry: static int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { - if (res->sr_session == NULL) + if (res->sr_slot == NULL) return 1; return nfs41_sequence_done(task, res); } @@ -591,7 +595,6 @@ static void nfs41_init_sequence(struct nfs4_sequence_args *args, args->sa_cache_this = 0; if (cache_reply) args->sa_cache_this = 1; - res->sr_session = NULL; res->sr_slot = NULL; } @@ -646,7 +649,6 @@ int nfs41_setup_sequence(struct nfs4_session *session, dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); - res->sr_session = session; res->sr_slot = slot; res->sr_status_flags = 0; /* @@ -5659,9 +5661,18 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) return status; } -struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) +struct nfs4_slot *nfs4_alloc_slots(struct nfs4_slot_table *table, + u32 max_slots, gfp_t gfp_flags) { - return kmalloc_array(max_slots, sizeof(struct nfs4_slot), gfp_flags); + struct nfs4_slot *tbl; + u32 i; + + tbl = kmalloc_array(max_slots, sizeof(*tbl), gfp_flags); + if (tbl != NULL) { + for (i = 0; i < max_slots; i++) + tbl[i].table = table; + } + return tbl; } static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, @@ -5699,7 +5710,7 @@ static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, /* Does the newly negotiated max_reqs match the existing slot table? */ if (max_reqs != tbl->max_slots) { - new = nfs4_alloc_slots(max_reqs, GFP_NOFS); + new = nfs4_alloc_slots(tbl, max_reqs, GFP_NOFS); if (!new) goto out; } @@ -5738,11 +5749,13 @@ static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) dprintk("--> %s\n", __func__); /* Fore channel */ tbl = &ses->fc_slot_table; + tbl->session = ses; status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); if (status) /* -ENOMEM */ return status; /* Back channel */ tbl = &ses->bc_slot_table; + tbl->session = ses; status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); if (status && tbl->slots == NULL) /* Fore and back channel share a connection so get diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 96fcbb97fd6a..9495789c425b 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2033,7 +2033,7 @@ static int nfs4_recall_slot(struct nfs_client *clp) return 0; nfs4_begin_drain_session(clp); fc_tbl = &clp->cl_session->fc_slot_table; - new = nfs4_alloc_slots(fc_tbl->target_max_slots, GFP_NOFS); + new = nfs4_alloc_slots(fc_tbl, fc_tbl->target_max_slots, GFP_NOFS); if (!new) return -ENOMEM; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 672d9b0ef2c5..4126f054610a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5507,12 +5507,13 @@ static int decode_sequence(struct xdr_stream *xdr, struct rpc_rqst *rqstp) { #if defined(CONFIG_NFS_V4_1) + struct nfs4_session *session; struct nfs4_sessionid id; u32 dummy; int status; __be32 *p; - if (!res->sr_session) + if (res->sr_slot == NULL) return 0; status = decode_op_hdr(xdr, OP_SEQUENCE); @@ -5526,8 +5527,9 @@ static int decode_sequence(struct xdr_stream *xdr, * sequence number, the server is looney tunes. */ status = -EREMOTEIO; + session = res->sr_slot->table->session; - if (memcmp(id.data, res->sr_session->sess_id.data, + if (memcmp(id.data, session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { dprintk("%s Invalid session id\n", __func__); goto out_err; @@ -5545,7 +5547,7 @@ static int decode_sequence(struct xdr_stream *xdr, } /* slot id */ dummy = be32_to_cpup(p++); - if (dummy != res->sr_slot - res->sr_session->fc_slot_table.slots) { + if (dummy != res->sr_slot - session->fc_slot_table.slots) { dprintk("%s Invalid slot id\n", __func__); goto out_err; } diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 97c8f9191880..b0412873d29c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -209,6 +209,7 @@ struct nfs_server { /* Sessions */ #define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) struct nfs4_slot_table { + struct nfs4_session *session; /* Parent session */ struct nfs4_slot *slots; /* seqid per slot */ unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */ spinlock_t slot_tbl_lock; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9cb1c63a70c2..0fd88ab0e814 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -187,6 +187,7 @@ struct nfs4_channel_attrs { /* nfs41 sessions slot seqid */ struct nfs4_slot { + struct nfs4_slot_table *table; unsigned long renewal_time; u32 seq_nr; }; @@ -198,7 +199,6 @@ struct nfs4_sequence_args { }; struct nfs4_sequence_res { - struct nfs4_session *sr_session; struct nfs4_slot *sr_slot; /* slot used to send request */ int sr_status; /* sequence operation status */ u32 sr_status_flags; -- cgit v1.2.3 From df2fabffbace8988f3265585ec793ff9deccdea7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Nov 2012 12:45:06 -0500 Subject: NFSv4.1: Label each entry in the session slot tables with its slot number Instead of doing slot table pointer gymnastics every time we want to know which slot we're using. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 12 +++++++----- fs/nfs/nfs4xdr.c | 2 +- include/linux/nfs_xdr.h | 1 + 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 5b61c4a83191..4311dba49c58 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -526,9 +526,9 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * * returned NFS4ERR_DELAY as per Section 2.10.6.2 * of RFC5661. */ - dprintk("%s: slot=%td seq=%d: Operation in progress\n", + dprintk("%s: slot=%u seq=%u: Operation in progress\n", __func__, - slot - session->fc_slot_table.slots, + slot->slot_nr, slot->seq_nr); goto out_retry; default: @@ -671,9 +671,9 @@ int nfs4_setup_sequence(const struct nfs_server *server, if (session == NULL) goto out; - dprintk("--> %s clp %p session %p sr_slot %td\n", + dprintk("--> %s clp %p session %p sr_slot %d\n", __func__, session->clp, session, res->sr_slot ? - res->sr_slot - session->fc_slot_table.slots : -1); + res->sr_slot->slot_nr : -1); ret = nfs41_setup_sequence(session, args, res, task); out: @@ -5669,8 +5669,10 @@ struct nfs4_slot *nfs4_alloc_slots(struct nfs4_slot_table *table, tbl = kmalloc_array(max_slots, sizeof(*tbl), gfp_flags); if (tbl != NULL) { - for (i = 0; i < max_slots; i++) + for (i = 0; i < max_slots; i++) { tbl[i].table = table; + tbl[i].slot_nr = i; + } } return tbl; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4126f054610a..50bac7066160 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5547,7 +5547,7 @@ static int decode_sequence(struct xdr_stream *xdr, } /* slot id */ dummy = be32_to_cpup(p++); - if (dummy != res->sr_slot - session->fc_slot_table.slots) { + if (dummy != res->sr_slot->slot_nr) { dprintk("%s Invalid slot id\n", __func__); goto out_err; } diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 0fd88ab0e814..9c9b76c94b46 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -189,6 +189,7 @@ struct nfs4_channel_attrs { struct nfs4_slot { struct nfs4_slot_table *table; unsigned long renewal_time; + u32 slot_nr; u32 seq_nr; }; -- cgit v1.2.3 From 2b2fa71723f955d5b4a0f4edd99cf3cd69ceafd1 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Nov 2012 12:58:36 -0500 Subject: NFSv4.1: Simplify struct nfs4_sequence_args too Replace the session pointer + slotid with a pointer to the allocated slot. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 6 +++--- fs/nfs/nfs4xdr.c | 21 ++++++++++----------- include/linux/nfs_xdr.h | 3 +-- 3 files changed, 14 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4311dba49c58..6c41a34e34b4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -591,7 +591,7 @@ out: static void nfs41_init_sequence(struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { - args->sa_session = NULL; + args->sa_slot = NULL; args->sa_cache_this = 0; if (cache_reply) args->sa_cache_this = 1; @@ -644,8 +644,8 @@ int nfs41_setup_sequence(struct nfs4_session *session, rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); slot = tbl->slots + slotid; slot->renewal_time = jiffies; - args->sa_session = session; - args->sa_slotid = slotid; + + args->sa_slot = slot; dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 50bac7066160..27b0fec1a6b0 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1833,18 +1833,16 @@ static void encode_sequence(struct xdr_stream *xdr, struct compound_hdr *hdr) { #if defined(CONFIG_NFS_V4_1) - struct nfs4_session *session = args->sa_session; + struct nfs4_session *session; struct nfs4_slot_table *tp; - struct nfs4_slot *slot; + struct nfs4_slot *slot = args->sa_slot; __be32 *p; - if (!session) + if (slot == NULL) return; - tp = &session->fc_slot_table; - - WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE); - slot = tp->slots + args->sa_slotid; + tp = slot->table; + session = tp->session; encode_op_hdr(xdr, OP_SEQUENCE, decode_sequence_maxsz, hdr); @@ -1858,12 +1856,12 @@ static void encode_sequence(struct xdr_stream *xdr, ((u32 *)session->sess_id.data)[1], ((u32 *)session->sess_id.data)[2], ((u32 *)session->sess_id.data)[3], - slot->seq_nr, args->sa_slotid, + slot->seq_nr, slot->slot_nr, tp->highest_used_slotid, args->sa_cache_this); p = reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 16); p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); *p++ = cpu_to_be32(slot->seq_nr); - *p++ = cpu_to_be32(args->sa_slotid); + *p++ = cpu_to_be32(slot->slot_nr); *p++ = cpu_to_be32(tp->highest_used_slotid); *p = cpu_to_be32(args->sa_cache_this); #endif /* CONFIG_NFS_V4_1 */ @@ -2025,8 +2023,9 @@ static void encode_free_stateid(struct xdr_stream *xdr, static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args) { #if defined(CONFIG_NFS_V4_1) - if (args->sa_session) - return args->sa_session->clp->cl_mvops->minor_version; + + if (args->sa_slot) + return args->sa_slot->table->session->clp->cl_mvops->minor_version; #endif /* CONFIG_NFS_V4_1 */ return 0; } diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9c9b76c94b46..deb31bbbb857 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -194,8 +194,7 @@ struct nfs4_slot { }; struct nfs4_sequence_args { - struct nfs4_session *sa_session; - u32 sa_slotid; + struct nfs4_slot *sa_slot; u8 sa_cache_this; }; -- cgit v1.2.3 From 31fbcda71489d8cbe2b82819eaab4818524e3a49 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Fri, 28 Sep 2012 10:29:07 +0100 Subject: Input: bu21013_ts - move GPIO init and exit functions into the driver These GPIO init and exit functions have no place in platform data, they should be part of the driver instead, Acked-by: Arnd Bergmann Acked-by: Linus Walleij Signed-off-by: Lee Jones Signed-off-by: Dmitry Torokhov --- arch/arm/mach-ux500/board-mop500-stuib.c | 71 +------------------------------- drivers/input/touchscreen/bu21013_ts.c | 69 ++++++++++++++++++++++--------- include/linux/input/bu21013.h | 10 +---- 3 files changed, 53 insertions(+), 97 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-ux500/board-mop500-stuib.c b/arch/arm/mach-ux500/board-mop500-stuib.c index 564f57d5d8a7..7e1f294f0434 100644 --- a/arch/arm/mach-ux500/board-mop500-stuib.c +++ b/arch/arm/mach-ux500/board-mop500-stuib.c @@ -77,9 +77,6 @@ static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = { * BU21013 ROHM touchscreen interface on the STUIBs */ -/* tracks number of bu21013 devices being enabled */ -static int bu21013_devices; - #define TOUCH_GPIO_PIN 84 #define TOUCH_XMAX 384 @@ -88,73 +85,8 @@ static int bu21013_devices; #define PRCMU_CLOCK_OCR 0x1CC #define TSC_EXT_CLOCK_9_6MHZ 0x840000 -/** - * bu21013_gpio_board_init : configures the touch panel. - * @reset_pin: reset pin number - * This function can be used to configures - * the voltage and reset the touch panel controller. - */ -static int bu21013_gpio_board_init(int reset_pin) -{ - int retval = 0; - - bu21013_devices++; - if (bu21013_devices == 1) { - retval = gpio_request(reset_pin, "touchp_reset"); - if (retval) { - printk(KERN_ERR "Unable to request gpio reset_pin"); - return retval; - } - retval = gpio_direction_output(reset_pin, 1); - if (retval < 0) { - printk(KERN_ERR "%s: gpio direction failed\n", - __func__); - return retval; - } - } - - return retval; -} - -/** - * bu21013_gpio_board_exit : deconfigures the touch panel controller - * @reset_pin: reset pin number - * This function can be used to deconfigures the chip selection - * for touch panel controller. - */ -static int bu21013_gpio_board_exit(int reset_pin) -{ - int retval = 0; - - if (bu21013_devices == 1) { - retval = gpio_direction_output(reset_pin, 0); - if (retval < 0) { - printk(KERN_ERR "%s: gpio direction failed\n", - __func__); - return retval; - } - gpio_set_value(reset_pin, 0); - } - bu21013_devices--; - - return retval; -} - -/** - * bu21013_read_pin_val : get the interrupt pin value - * This function can be used to get the interrupt pin value for touch panel - * controller. - */ -static int bu21013_read_pin_val(void) -{ - return gpio_get_value(TOUCH_GPIO_PIN); -} - static struct bu21013_platform_device tsc_plat_device = { - .cs_en = bu21013_gpio_board_init, - .cs_dis = bu21013_gpio_board_exit, - .irq_read_val = bu21013_read_pin_val, - .irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN), + .touch_pin = TOUCH_GPIO_PIN, .touch_x_max = TOUCH_XMAX, .touch_y_max = TOUCH_YMAX, .ext_clk = false, @@ -171,7 +103,6 @@ static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = { I2C_BOARD_INFO("bu21013_tp", 0x5D), .platform_data = &tsc_plat_device, }, - }; void __init mop500_stuib_init(void) diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c index 1e8cddd06c60..c6f6a04ec673 100644 --- a/drivers/input/touchscreen/bu21013_ts.c +++ b/drivers/input/touchscreen/bu21013_ts.c @@ -14,6 +14,7 @@ #include #include #include +#include #define PEN_DOWN_INTR 0 #define MAX_FINGERS 2 @@ -148,11 +149,12 @@ struct bu21013_ts_data { struct i2c_client *client; wait_queue_head_t wait; - bool touch_stopped; const struct bu21013_platform_device *chip; struct input_dev *in_dev; - unsigned int intr_pin; struct regulator *regulator; + unsigned int irq; + unsigned int intr_pin; + bool touch_stopped; }; /** @@ -262,7 +264,7 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data) return IRQ_NONE; } - data->intr_pin = data->chip->irq_read_val(); + data->intr_pin = gpio_get_value(data->chip->touch_pin); if (data->intr_pin == PEN_DOWN_INTR) wait_event_timeout(data->wait, data->touch_stopped, msecs_to_jiffies(2)); @@ -418,9 +420,32 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data) { bu21013_data->touch_stopped = true; wake_up(&bu21013_data->wait); - free_irq(bu21013_data->chip->irq, bu21013_data); + free_irq(bu21013_data->irq, bu21013_data); } +/** + * bu21013_cs_disable() - deconfigures the touch panel controller + * @bu21013_data: device structure pointer + * + * This function is used to deconfigure the chip selection + * for touch panel controller. + */ +static void bu21013_cs_disable(struct bu21013_ts_data *bu21013_data) +{ + int error; + + error = gpio_direction_output(bu21013_data->chip->cs_pin, 0); + if (error < 0) + dev_warn(&bu21013_data->client->dev, + "%s: gpio direction failed, error: %d\n", + __func__, error); + else + gpio_set_value(bu21013_data->chip->cs_pin, 0); + + gpio_free(bu21013_data->chip->cs_pin); +} + + /** * bu21013_probe() - initializes the i2c-client touchscreen driver * @client: i2c client structure pointer @@ -430,7 +455,7 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data) * driver and returns integer. */ static int bu21013_probe(struct i2c_client *client, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { struct bu21013_ts_data *bu21013_data; struct input_dev *in_dev; @@ -449,6 +474,11 @@ static int bu21013_probe(struct i2c_client *client, return -EINVAL; } + if (!gpio_is_valid(pdata->touch_pin)) { + dev_err(&client->dev, "invalid touch_pin supplied\n"); + return -EINVAL; + } + bu21013_data = kzalloc(sizeof(struct bu21013_ts_data), GFP_KERNEL); in_dev = input_allocate_device(); if (!bu21013_data || !in_dev) { @@ -460,6 +490,7 @@ static int bu21013_probe(struct i2c_client *client, bu21013_data->in_dev = in_dev; bu21013_data->chip = pdata; bu21013_data->client = client; + bu21013_data->irq = gpio_to_irq(pdata->touch_pin); bu21013_data->regulator = regulator_get(&client->dev, "avdd"); if (IS_ERR(bu21013_data->regulator)) { @@ -478,12 +509,11 @@ static int bu21013_probe(struct i2c_client *client, init_waitqueue_head(&bu21013_data->wait); /* configure the gpio pins */ - if (pdata->cs_en) { - error = pdata->cs_en(pdata->cs_pin); - if (error < 0) { - dev_err(&client->dev, "chip init failed\n"); - goto err_disable_regulator; - } + error = gpio_request_one(pdata->cs_pin, GPIOF_OUT_INIT_HIGH, + "touchp_reset"); + if (error < 0) { + dev_err(&client->dev, "Unable to request gpio reset_pin\n"); + goto err_disable_regulator; } /* configure the touch panel controller */ @@ -508,12 +538,13 @@ static int bu21013_probe(struct i2c_client *client, pdata->touch_y_max, 0, 0); input_set_drvdata(in_dev, bu21013_data); - error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq, + error = request_threaded_irq(bu21013_data->irq, NULL, bu21013_gpio_irq, IRQF_TRIGGER_FALLING | IRQF_SHARED | IRQF_ONESHOT, DRIVER_TP, bu21013_data); if (error) { - dev_err(&client->dev, "request irq %d failed\n", pdata->irq); + dev_err(&client->dev, "request irq %d failed\n", + bu21013_data->irq); goto err_cs_disable; } @@ -531,7 +562,7 @@ static int bu21013_probe(struct i2c_client *client, err_free_irq: bu21013_free_irq(bu21013_data); err_cs_disable: - pdata->cs_dis(pdata->cs_pin); + bu21013_cs_disable(bu21013_data); err_disable_regulator: regulator_disable(bu21013_data->regulator); err_put_regulator: @@ -555,7 +586,7 @@ static int bu21013_remove(struct i2c_client *client) bu21013_free_irq(bu21013_data); - bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin); + bu21013_cs_disable(bu21013_data); input_unregister_device(bu21013_data->in_dev); @@ -584,9 +615,9 @@ static int bu21013_suspend(struct device *dev) bu21013_data->touch_stopped = true; if (device_may_wakeup(&client->dev)) - enable_irq_wake(bu21013_data->chip->irq); + enable_irq_wake(bu21013_data->irq); else - disable_irq(bu21013_data->chip->irq); + disable_irq(bu21013_data->irq); regulator_disable(bu21013_data->regulator); @@ -621,9 +652,9 @@ static int bu21013_resume(struct device *dev) bu21013_data->touch_stopped = false; if (device_may_wakeup(&client->dev)) - disable_irq_wake(bu21013_data->chip->irq); + disable_irq_wake(bu21013_data->irq); else - enable_irq(bu21013_data->chip->irq); + enable_irq(bu21013_data->irq); return 0; } diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h index 05e03284b92a..6230d76bde5d 100644 --- a/include/linux/input/bu21013.h +++ b/include/linux/input/bu21013.h @@ -9,13 +9,10 @@ /** * struct bu21013_platform_device - Handle the platform data - * @cs_en: pointer to the cs enable function - * @cs_dis: pointer to the cs disable function - * @irq_read_val: pointer to read the pen irq value function * @touch_x_max: touch x max * @touch_y_max: touch y max * @cs_pin: chip select pin - * @irq: irq pin + * @touch_pin: touch gpio pin * @ext_clk: external clock flag * @x_flip: x flip flag * @y_flip: y flip flag @@ -24,13 +21,10 @@ * This is used to handle the platform data */ struct bu21013_platform_device { - int (*cs_en)(int reset_pin); - int (*cs_dis)(int reset_pin); - int (*irq_read_val)(void); int touch_x_max; int touch_y_max; unsigned int cs_pin; - unsigned int irq; + unsigned int touch_pin; bool ext_clk; bool x_flip; bool y_flip; -- cgit v1.2.3 From 972deb4f49b5b6703d9c6117ba0aeda2180d4447 Mon Sep 17 00:00:00 2001 From: Shubhrajyoti D Date: Mon, 26 Nov 2012 15:25:11 +0530 Subject: i2c: omap: Remove the OMAP_I2C_FLAG_RESET_REGS_POSTIDLE flag The OMAP_I2C_FLAG_RESET_REGS_POSTIDLE is not used anymore in the i2c driver. Remove the flag. Signed-off-by: Shubhrajyoti D Reviewed-by: Felipe Balbi Signed-off-by: Wolfram Sang --- arch/arm/mach-omap2/omap_hwmod_33xx_data.c | 3 +-- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 9 +++------ arch/arm/mach-omap2/omap_hwmod_44xx_data.c | 3 +-- drivers/i2c/busses/i2c-omap.c | 3 +-- include/linux/i2c-omap.h | 1 - 5 files changed, 6 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 59d5c1cd316d..c9a186bc6d40 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -1103,8 +1103,7 @@ static struct omap_hwmod_class i2c_class = { }; static struct omap_i2c_dev_attr i2c_dev_attr = { - .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE | - OMAP_I2C_FLAG_RESET_REGS_POSTIDLE, + .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE, }; /* i2c1 */ diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 943222c40489..36270bb637e4 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -791,8 +791,7 @@ static struct omap_hwmod omap3xxx_dss_venc_hwmod = { /* I2C1 */ static struct omap_i2c_dev_attr i2c1_dev_attr = { .fifo_depth = 8, /* bytes */ - .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | - OMAP_I2C_FLAG_BUS_SHIFT_2, + .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_hwmod omap3xxx_i2c1_hwmod = { @@ -817,8 +816,7 @@ static struct omap_hwmod omap3xxx_i2c1_hwmod = { /* I2C2 */ static struct omap_i2c_dev_attr i2c2_dev_attr = { .fifo_depth = 8, /* bytes */ - .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | - OMAP_I2C_FLAG_BUS_SHIFT_2, + .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_hwmod omap3xxx_i2c2_hwmod = { @@ -843,8 +841,7 @@ static struct omap_hwmod omap3xxx_i2c2_hwmod = { /* I2C3 */ static struct omap_i2c_dev_attr i2c3_dev_attr = { .fifo_depth = 64, /* bytes */ - .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | - OMAP_I2C_FLAG_BUS_SHIFT_2, + .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = { diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 652d0285bd6d..eb40dbc6688e 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -1526,8 +1526,7 @@ static struct omap_hwmod_class omap44xx_i2c_hwmod_class = { }; static struct omap_i2c_dev_attr i2c_dev_attr = { - .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE | - OMAP_I2C_FLAG_RESET_REGS_POSTIDLE, + .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE, }; /* i2c1 */ diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 248280136668..7a62acb7d262 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1038,8 +1038,7 @@ static const struct i2c_algorithm omap_i2c_algo = { #ifdef CONFIG_OF static struct omap_i2c_bus_platform_data omap3_pdata = { .rev = OMAP_I2C_IP_VERSION_1, - .flags = OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | - OMAP_I2C_FLAG_BUS_SHIFT_2, + .flags = OMAP_I2C_FLAG_BUS_SHIFT_2, }; static struct omap_i2c_bus_platform_data omap4_pdata = { diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h index 1b25c04f82d9..babe0cf6d56b 100644 --- a/include/linux/i2c-omap.h +++ b/include/linux/i2c-omap.h @@ -20,7 +20,6 @@ #define OMAP_I2C_FLAG_NO_FIFO BIT(0) #define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1) #define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2) -#define OMAP_I2C_FLAG_RESET_REGS_POSTIDLE BIT(3) #define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5) #define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6) /* how the CPU address bus must be translated for I2C unit access */ -- cgit v1.2.3 From 64b37b2a63eb2f80b65c7185f0013f8ffc637ae3 Mon Sep 17 00:00:00 2001 From: Matthieu CASTET Date: Tue, 6 Nov 2012 11:51:44 +0100 Subject: mtd: nand: add NAND_BUSWIDTH_AUTO to autodetect bus width The driver call nand_scan_ident in 8 bit mode, then readid or onfi detection are done (and detect bus width). The driver should update its bus width before calling nand_scan_tail. This work because readid and onfi are read work 8 byte mode. Note that nand_scan_ident send command (NAND_CMD_RESET, NAND_CMD_READID, NAND_CMD_PARAM), address and read data The ONFI specificication is not very clear for x16 device if high byte of address should be driven to 0, but according to [1] it should be ok to not drive it during autodetection. [1] 3.3.2. Target Initialization [...] The Read ID and Read Parameter Page commands only use the lower 8-bits of the data bus. The host shall not issue commands that use a word data width on x16 devices until the host determines the device supports a 16-bit data bus width in the parameter page. Signed-off-by: Matthieu CASTET Signed-off-by: Artem Bityutskiy --- drivers/mtd/nand/nand_base.c | 14 +++++++++----- include/linux/mtd/nand.h | 7 +++++++ 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 6f58e1633e2f..5851c51ac2df 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -3250,11 +3250,15 @@ ident_done: break; } - /* - * Check, if buswidth is correct. Hardware drivers should set - * chip correct! - */ - if (busw != (chip->options & NAND_BUSWIDTH_16)) { + if (chip->options & NAND_BUSWIDTH_AUTO) { + WARN_ON(chip->options & NAND_BUSWIDTH_16); + chip->options |= busw; + nand_set_defaults(chip, busw); + } else if (busw != (chip->options & NAND_BUSWIDTH_16)) { + /* + * Check, if buswidth is correct. Hardware drivers should set + * chip correct! + */ pr_info("NAND device: Manufacturer ID:" " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 9d8a6048aacd..7ccb3c59ed60 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -219,6 +219,13 @@ typedef enum { #define NAND_OWN_BUFFERS 0x00020000 /* Chip may not exist, so silence any errors in scan */ #define NAND_SCAN_SILENT_NODEV 0x00040000 +/* + * Autodetect nand buswidth with readid/onfi. + * This suppose the driver will configure the hardware in 8 bits mode + * when calling nand_scan_ident, and update its configuration + * before calling nand_scan_tail. + */ +#define NAND_BUSWIDTH_AUTO 0x00080000 /* Options set by nand scan */ /* Nand scan has allocated controller struct */ -- cgit v1.2.3 From 72b15b6ae97796c5fac687addde5dbfab872cf94 Mon Sep 17 00:00:00 2001 From: Omar Ramirez Luna Date: Mon, 19 Nov 2012 19:05:50 -0600 Subject: iommu/omap: Migrate to hwmod framework Use hwmod data and device attributes to build and register an omap device for iommu driver. - Update the naming convention in isp module. - Remove unneeded check for number of resources, as this is now handled by omap_device and prevents driver from loading. - Now unused, remove platform device and resource data, handling of sysconfig register for softreset purposes, use default latency structure. - Use hwmod API for reset handling. Signed-off-by: Omar Ramirez Luna Tested-by: Ohad Ben-Cohen Signed-off-by: Joerg Roedel --- arch/arm/mach-omap2/devices.c | 2 +- arch/arm/mach-omap2/omap-iommu.c | 168 +++++++------------------------ drivers/iommu/omap-iommu.c | 23 ++++- drivers/iommu/omap-iommu2.c | 19 ---- include/linux/platform_data/iommu-omap.h | 8 +- 5 files changed, 64 insertions(+), 156 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index c15f5a97b51c..787a996ec4eb 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -214,7 +214,7 @@ static struct platform_device omap3isp_device = { }; static struct omap_iommu_arch_data omap3_isp_iommu = { - .name = "isp", + .name = "mmu_isp", }; int omap3_init_camera(struct isp_platform_data *pdata) diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index a6a4ff8744b7..02726a647b1d 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -12,153 +12,61 @@ #include #include +#include +#include #include +#include +#include -#include "soc.h" -#include "common.h" - -struct iommu_device { - resource_size_t base; - int irq; - struct iommu_platform_data pdata; - struct resource res[2]; -}; -static struct iommu_device *devices; -static int num_iommu_devices; - -#ifdef CONFIG_ARCH_OMAP3 -static struct iommu_device omap3_devices[] = { - { - .base = 0x480bd400, - .irq = 24 + OMAP_INTC_START, - .pdata = { - .name = "isp", - .nr_tlb_entries = 8, - .clk_name = "cam_ick", - .da_start = 0x0, - .da_end = 0xFFFFF000, - }, - }, -#if defined(CONFIG_OMAP_IOMMU_IVA2) - { - .base = 0x5d000000, - .irq = 28 + OMAP_INTC_START, - .pdata = { - .name = "iva2", - .nr_tlb_entries = 32, - .clk_name = "iva2_ck", - .da_start = 0x11000000, - .da_end = 0xFFFFF000, - }, - }, -#endif -}; -#define NR_OMAP3_IOMMU_DEVICES ARRAY_SIZE(omap3_devices) -static struct platform_device *omap3_iommu_pdev[NR_OMAP3_IOMMU_DEVICES]; -#else -#define omap3_devices NULL -#define NR_OMAP3_IOMMU_DEVICES 0 -#define omap3_iommu_pdev NULL -#endif - -#ifdef CONFIG_ARCH_OMAP4 -static struct iommu_device omap4_devices[] = { - { - .base = OMAP4_MMU1_BASE, - .irq = 100 + OMAP44XX_IRQ_GIC_START, - .pdata = { - .name = "ducati", - .nr_tlb_entries = 32, - .clk_name = "ipu_fck", - .da_start = 0x0, - .da_end = 0xFFFFF000, - }, - }, - { - .base = OMAP4_MMU2_BASE, - .irq = 28 + OMAP44XX_IRQ_GIC_START, - .pdata = { - .name = "tesla", - .nr_tlb_entries = 32, - .clk_name = "dsp_fck", - .da_start = 0x0, - .da_end = 0xFFFFF000, - }, - }, -}; -#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices) -static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES]; -#else -#define omap4_devices NULL -#define NR_OMAP4_IOMMU_DEVICES 0 -#define omap4_iommu_pdev NULL -#endif - -static struct platform_device **omap_iommu_pdev; - -static int __init omap_iommu_init(void) +static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused) { - int i, err; - struct resource res[] = { - { .flags = IORESOURCE_MEM }, - { .flags = IORESOURCE_IRQ }, - }; + struct platform_device *pdev; + struct iommu_platform_data *pdata; + struct omap_mmu_dev_attr *a = (struct omap_mmu_dev_attr *)oh->dev_attr; + static int i; + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->name = oh->name; + pdata->clk_name = oh->main_clk; + pdata->nr_tlb_entries = a->nr_tlb_entries; + pdata->da_start = a->da_start; + pdata->da_end = a->da_end; + + if (oh->rst_lines_cnt == 1) { + pdata->reset_name = oh->rst_lines->name; + pdata->assert_reset = omap_device_assert_hardreset; + pdata->deassert_reset = omap_device_deassert_hardreset; + } - if (cpu_is_omap34xx()) { - devices = omap3_devices; - omap_iommu_pdev = omap3_iommu_pdev; - num_iommu_devices = NR_OMAP3_IOMMU_DEVICES; - } else if (cpu_is_omap44xx()) { - devices = omap4_devices; - omap_iommu_pdev = omap4_iommu_pdev; - num_iommu_devices = NR_OMAP4_IOMMU_DEVICES; - } else - return -ENODEV; + pdev = omap_device_build("omap-iommu", i, oh, pdata, sizeof(*pdata), + NULL, 0, 0); - for (i = 0; i < num_iommu_devices; i++) { - struct platform_device *pdev; - const struct iommu_device *d = &devices[i]; + kfree(pdata); - pdev = platform_device_alloc("omap-iommu", i); - if (!pdev) { - err = -ENOMEM; - goto err_out; - } + if (IS_ERR(pdev)) { + pr_err("%s: device build err: %ld\n", __func__, PTR_ERR(pdev)); + return PTR_ERR(pdev); + } - res[0].start = d->base; - res[0].end = d->base + MMU_REG_SIZE - 1; - res[1].start = res[1].end = d->irq; + i++; - err = platform_device_add_resources(pdev, res, - ARRAY_SIZE(res)); - if (err) - goto err_out; - err = platform_device_add_data(pdev, &d->pdata, - sizeof(d->pdata)); - if (err) - goto err_out; - err = platform_device_add(pdev); - if (err) - goto err_out; - omap_iommu_pdev[i] = pdev; - } return 0; +} -err_out: - while (i--) - platform_device_put(omap_iommu_pdev[i]); - return err; +static int __init omap_iommu_init(void) +{ + return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL); } /* must be ready before omap3isp is probed */ subsys_initcall(omap_iommu_init); static void __exit omap_iommu_exit(void) { - int i; - - for (i = 0; i < num_iommu_devices; i++) - platform_device_unregister(omap_iommu_pdev[i]); + /* Do nothing */ } module_exit(omap_iommu_exit); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f8082da6179b..af9b4f31f594 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -143,13 +143,23 @@ EXPORT_SYMBOL_GPL(omap_iommu_arch_version); static int iommu_enable(struct omap_iommu *obj) { int err; + struct platform_device *pdev = to_platform_device(obj->dev); + struct iommu_platform_data *pdata = pdev->dev.platform_data; - if (!obj) + if (!obj || !pdata) return -EINVAL; if (!arch_iommu) return -ENODEV; + if (pdata->deassert_reset) { + err = pdata->deassert_reset(pdev, pdata->reset_name); + if (err) { + dev_err(obj->dev, "deassert_reset failed: %d\n", err); + return err; + } + } + clk_enable(obj->clk); err = arch_iommu->enable(obj); @@ -159,12 +169,18 @@ static int iommu_enable(struct omap_iommu *obj) static void iommu_disable(struct omap_iommu *obj) { - if (!obj) + struct platform_device *pdev = to_platform_device(obj->dev); + struct iommu_platform_data *pdata = pdev->dev.platform_data; + + if (!obj || !pdata) return; arch_iommu->disable(obj); clk_disable(obj->clk); + + if (pdata->assert_reset) + pdata->assert_reset(pdev, pdata->reset_name); } /* @@ -926,9 +942,6 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) struct resource *res; struct iommu_platform_data *pdata = pdev->dev.platform_data; - if (pdev->num_resources != 2) - return -EINVAL; - obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); if (!obj) return -ENOMEM; diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c index c02020292377..4a3a1c7a38c1 100644 --- a/drivers/iommu/omap-iommu2.c +++ b/drivers/iommu/omap-iommu2.c @@ -35,12 +35,8 @@ #define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT) #define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT) -#define MMU_SYS_SOFTRESET (1 << 1) #define MMU_SYS_AUTOIDLE 1 -/* SYSSTATUS */ -#define MMU_SYS_RESETDONE 1 - /* IRQSTATUS & IRQENABLE */ #define MMU_IRQ_MULTIHITFAULT (1 << 4) #define MMU_IRQ_TABLEWALKFAULT (1 << 3) @@ -97,7 +93,6 @@ static void __iommu_set_twl(struct omap_iommu *obj, bool on) static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; - unsigned long timeout; if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) return -EINVAL; @@ -106,20 +101,6 @@ static int omap2_iommu_enable(struct omap_iommu *obj) if (!IS_ALIGNED(pa, SZ_16K)) return -EINVAL; - iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG); - - timeout = jiffies + msecs_to_jiffies(20); - do { - l = iommu_read_reg(obj, MMU_SYSSTATUS); - if (l & MMU_SYS_RESETDONE) - break; - } while (!time_after(jiffies, timeout)); - - if (!(l & MMU_SYS_RESETDONE)) { - dev_err(obj->dev, "can't take mmu out of reset\n"); - return -ENODEV; - } - l = iommu_read_reg(obj, MMU_REVISION); dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index c677b9f2fefa..ef2060d7eeb8 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -10,6 +10,8 @@ * published by the Free Software Foundation. */ +#include + #define MMU_REG_SIZE 256 /** @@ -43,7 +45,11 @@ struct omap_mmu_dev_attr { struct iommu_platform_data { const char *name; const char *clk_name; - const int nr_tlb_entries; + const char *reset_name; + int nr_tlb_entries; u32 da_start; u32 da_end; + + int (*assert_reset)(struct platform_device *pdev, const char *name); + int (*deassert_reset)(struct platform_device *pdev, const char *name); }; -- cgit v1.2.3 From ebf7cda0f92effd8169b831fae81e9437dce1fef Mon Sep 17 00:00:00 2001 From: Omar Ramirez Luna Date: Mon, 19 Nov 2012 19:05:51 -0600 Subject: iommu/omap: Adapt to runtime pm Use runtime PM functionality interfaced with hwmod enable/idle functions, to replace direct clock operations and sysconfig handling. Due to reset sequence, pm_runtime_[get|put]_sync must be used, to avoid possible operations with the module under reset. Because of this and given that the driver uses spin_locks to protect their critical sections, we must use pm_runtime_irq_safe in order for the runtime ops to be happy, otherwise might_sleep_if checks in runtime framework will complain. The remaining pm_runtime out of iommu_enable and iommu_disable corresponds to paths that can be accessed through debugfs, some of them doesn't work if the module is not enabled first, but in future if the mmu is idled withouth freeing, these are needed to debug. Signed-off-by: Omar Ramirez Luna Tested-by: Ohad Ben-Cohen Acked-by: Tony Lindgren Signed-off-by: Joerg Roedel --- arch/arm/mach-omap2/omap-iommu.c | 1 - drivers/iommu/omap-iommu.c | 40 +++++++++++++++----------------- drivers/iommu/omap-iommu.h | 3 --- drivers/iommu/omap-iommu2.c | 17 -------------- include/linux/platform_data/iommu-omap.h | 1 - 5 files changed, 19 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c index 02726a647b1d..7642fc4672c1 100644 --- a/arch/arm/mach-omap2/omap-iommu.c +++ b/arch/arm/mach-omap2/omap-iommu.c @@ -31,7 +31,6 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused) return -ENOMEM; pdata->name = oh->name; - pdata->clk_name = oh->main_clk; pdata->nr_tlb_entries = a->nr_tlb_entries; pdata->da_start = a->da_start; pdata->da_end = a->da_end; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index af9b4f31f594..18108c1405e2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -16,13 +16,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include @@ -160,7 +160,7 @@ static int iommu_enable(struct omap_iommu *obj) } } - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); err = arch_iommu->enable(obj); @@ -177,7 +177,7 @@ static void iommu_disable(struct omap_iommu *obj) arch_iommu->disable(obj); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); if (pdata->assert_reset) pdata->assert_reset(pdev, pdata->reset_name); @@ -303,7 +303,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) if (!obj || !obj->nr_tlb_entries || !e) return -EINVAL; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); iotlb_lock_get(obj, &l); if (l.base == obj->nr_tlb_entries) { @@ -333,7 +333,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) cr = iotlb_alloc_cr(obj, e); if (IS_ERR(cr)) { - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return PTR_ERR(cr); } @@ -347,7 +347,7 @@ static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) l.vict = l.base; iotlb_lock_set(obj, &l); out: - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return err; } @@ -377,7 +377,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) int i; struct cr_regs cr; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { u32 start; @@ -396,7 +396,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); } } - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); if (i == obj->nr_tlb_entries) dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); @@ -410,7 +410,7 @@ static void flush_iotlb_all(struct omap_iommu *obj) { struct iotlb_lock l; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); l.base = 0; l.vict = 0; @@ -418,7 +418,7 @@ static void flush_iotlb_all(struct omap_iommu *obj) iommu_write_reg(obj, 1, MMU_GFLUSH); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); } #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) @@ -428,11 +428,11 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) if (!obj || !buf) return -EINVAL; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); bytes = arch_iommu->dump_ctx(obj, buf, bytes); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return bytes; } @@ -446,7 +446,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) struct cr_regs tmp; struct cr_regs *p = crs; - clk_enable(obj->clk); + pm_runtime_get_sync(obj->dev); iotlb_lock_get(obj, &saved); for_each_iotlb_cr(obj, num, i, tmp) { @@ -456,7 +456,7 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) } iotlb_lock_set(obj, &saved); - clk_disable(obj->clk); + pm_runtime_put_sync(obj->dev); return p - crs; } @@ -946,10 +946,6 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) if (!obj) return -ENOMEM; - obj->clk = clk_get(&pdev->dev, pdata->clk_name); - if (IS_ERR(obj->clk)) - goto err_clk; - obj->nr_tlb_entries = pdata->nr_tlb_entries; obj->name = pdata->name; obj->dev = &pdev->dev; @@ -992,6 +988,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) goto err_irq; platform_set_drvdata(pdev, obj); + pm_runtime_irq_safe(obj->dev); + pm_runtime_enable(obj->dev); + dev_info(&pdev->dev, "%s registered\n", obj->name); return 0; @@ -1000,8 +999,6 @@ err_irq: err_ioremap: release_mem_region(res->start, resource_size(res)); err_mem: - clk_put(obj->clk); -err_clk: kfree(obj); return err; } @@ -1022,7 +1019,8 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev) release_mem_region(res->start, resource_size(res)); iounmap(obj->regbase); - clk_put(obj->clk); + pm_runtime_disable(obj->dev); + dev_info(&pdev->dev, "%s removed\n", obj->name); kfree(obj); return 0; diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 2b5f3c04d167..120084206602 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -29,7 +29,6 @@ struct iotlb_entry { struct omap_iommu { const char *name; struct module *owner; - struct clk *clk; void __iomem *regbase; struct device *dev; void *isr_priv; @@ -116,8 +115,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) * MMU Register offsets */ #define MMU_REVISION 0x00 -#define MMU_SYSCONFIG 0x10 -#define MMU_SYSSTATUS 0x14 #define MMU_IRQSTATUS 0x18 #define MMU_IRQENABLE 0x1c #define MMU_WALKING_ST 0x40 diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c index 4a3a1c7a38c1..d745094a69dd 100644 --- a/drivers/iommu/omap-iommu2.c +++ b/drivers/iommu/omap-iommu2.c @@ -28,15 +28,6 @@ */ #define IOMMU_ARCH_VERSION 0x00000011 -/* SYSCONF */ -#define MMU_SYS_IDLE_SHIFT 3 -#define MMU_SYS_IDLE_FORCE (0 << MMU_SYS_IDLE_SHIFT) -#define MMU_SYS_IDLE_NONE (1 << MMU_SYS_IDLE_SHIFT) -#define MMU_SYS_IDLE_SMART (2 << MMU_SYS_IDLE_SHIFT) -#define MMU_SYS_IDLE_MASK (3 << MMU_SYS_IDLE_SHIFT) - -#define MMU_SYS_AUTOIDLE 1 - /* IRQSTATUS & IRQENABLE */ #define MMU_IRQ_MULTIHITFAULT (1 << 4) #define MMU_IRQ_TABLEWALKFAULT (1 << 3) @@ -105,11 +96,6 @@ static int omap2_iommu_enable(struct omap_iommu *obj) dev_info(obj->dev, "%s: version %d.%d\n", obj->name, (l >> 4) & 0xf, l & 0xf); - l = iommu_read_reg(obj, MMU_SYSCONFIG); - l &= ~MMU_SYS_IDLE_MASK; - l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE); - iommu_write_reg(obj, l, MMU_SYSCONFIG); - iommu_write_reg(obj, pa, MMU_TTB); __iommu_set_twl(obj, true); @@ -123,7 +109,6 @@ static void omap2_iommu_disable(struct omap_iommu *obj) l &= ~MMU_CNTL_MASK; iommu_write_reg(obj, l, MMU_CNTL); - iommu_write_reg(obj, MMU_SYS_IDLE_FORCE, MMU_SYSCONFIG); dev_dbg(obj->dev, "%s is shutting down\n", obj->name); } @@ -252,8 +237,6 @@ omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) char *p = buf; pr_reg(REVISION); - pr_reg(SYSCONFIG); - pr_reg(SYSSTATUS); pr_reg(IRQSTATUS); pr_reg(IRQENABLE); pr_reg(WALKING_ST); diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index ef2060d7eeb8..5b429c43a297 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -44,7 +44,6 @@ struct omap_mmu_dev_attr { struct iommu_platform_data { const char *name; - const char *clk_name; const char *reset_name; int nr_tlb_entries; u32 da_start; -- cgit v1.2.3 From cc248d4b1ddf05fefc1373d9d7a4dd1df71b6190 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 3 Dec 2012 16:11:13 -0500 Subject: svcrpc: don't byte-swap sk_reclen in place Byte-swapping in place is always a little dubious. Let's instead define this field to always be big-endian, and do the swapping on demand where we need it. Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svcsock.h | 12 +++++++++++- net/sunrpc/svcsock.c | 26 +++++++++++--------------- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 92ad02f0dcc0..613cf42227aa 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -26,11 +26,21 @@ struct svc_sock { void (*sk_owspace)(struct sock *); /* private TCP part */ - u32 sk_reclen; /* length of record */ + __be32 sk_reclen; /* length of record */ u32 sk_tcplen; /* current read length */ struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */ }; +static inline u32 svc_sock_reclen(struct svc_sock *svsk) +{ + return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; +} + +static inline u32 svc_sock_final_rec(struct svc_sock *svsk) +{ + return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; +} + /* * Function prototypes. */ diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 03827cef1fa7..d50de2b95036 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -950,8 +950,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) return -EAGAIN; } - svsk->sk_reclen = ntohl(svsk->sk_reclen); - if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) { + if (!(svc_sock_final_rec(svsk))) { /* FIXME: technically, a record can be fragmented, * and non-terminal fragments will not have the top * bit set in the fragment length header. @@ -961,21 +960,18 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) goto err_delete; } - svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK; - dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); - if (svsk->sk_reclen > serv->sv_max_mesg) { + dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk)); + if (svc_sock_reclen(svsk) > serv->sv_max_mesg) { net_notice_ratelimited("RPC: fragment too large: 0x%08lx\n", - (unsigned long)svsk->sk_reclen); + (unsigned long)svc_sock_reclen(svsk)); goto err_delete; } } - if (svsk->sk_reclen < 8) + if (svc_sock_reclen(svsk) < 8) goto err_delete; /* client is nuts. */ - len = svsk->sk_reclen; - - return len; + return svc_sock_reclen(svsk); error: dprintk("RPC: TCP recv_record got %d\n", len); return len; @@ -1019,7 +1015,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) if (dst->iov_len < src->iov_len) return -EAGAIN; /* whatever; just giving up. */ memcpy(dst->iov_base, src->iov_base, src->iov_len); - xprt_complete_rqst(req->rq_task, svsk->sk_reclen); + xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); rqstp->rq_arg.len = 0; return 0; } @@ -1064,12 +1060,12 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) goto error; base = svc_tcp_restore_pages(svsk, rqstp); - want = svsk->sk_reclen - base; + want = svc_sock_reclen(svsk) - base; vec = rqstp->rq_vec; pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], - svsk->sk_reclen); + svc_sock_reclen(svsk)); rqstp->rq_respages = &rqstp->rq_pages[pnum]; @@ -1082,11 +1078,11 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) if (len < 0 && len != -EAGAIN) goto err_other; dprintk("svc: incomplete TCP record (%d of %d)\n", - svsk->sk_tcplen, svsk->sk_reclen); + svsk->sk_tcplen, svc_sock_reclen(svsk)); goto err_noclose; } - rqstp->rq_arg.len = svsk->sk_reclen; + rqstp->rq_arg.len = svc_sock_reclen(svsk); rqstp->rq_arg.page_base = 0; if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; -- cgit v1.2.3 From 8af345f58ac9b350bb23c1457c613381d9f00472 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 3 Dec 2012 16:45:35 -0500 Subject: svcrpc: track rpc data length separately from sk_tcplen Keep a separate field, sk_datalen, that tracks only the data contained in a fragment, not including the fragment header. For now, this is always just max(0, sk_tcplen - 4), but after we allow multiple fragments sk_datalen will accumulate the total rpc data size while sk_tcplen only tracks progress receiving the current fragment. Signed-off-by: J. Bruce Fields --- include/linux/sunrpc/svcsock.h | 11 +++++++++-- net/sunrpc/svcsock.c | 19 ++++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 613cf42227aa..62fd1b756e99 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -26,8 +26,15 @@ struct svc_sock { void (*sk_owspace)(struct sock *); /* private TCP part */ - __be32 sk_reclen; /* length of record */ - u32 sk_tcplen; /* current read length */ + /* On-the-wire fragment header: */ + __be32 sk_reclen; + /* As we receive a record, this includes the length received so + * far (including the fragment header): */ + u32 sk_tcplen; + /* Total length of the data (not including fragment headers) + * received so far in the fragments making up this rpc: */ + u32 sk_datalen; + struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */ }; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 1db42b1ffe28..2b09e2306bfa 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -874,9 +874,9 @@ static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst { unsigned int i, len, npages; - if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) + if (svsk->sk_datalen == 0) return 0; - len = svsk->sk_tcplen - sizeof(rpc_fraghdr); + len = svsk->sk_datalen; npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (rqstp->rq_pages[i] != NULL) @@ -893,9 +893,9 @@ static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) { unsigned int i, len, npages; - if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) + if (svsk->sk_datalen == 0) return; - len = svsk->sk_tcplen - sizeof(rpc_fraghdr); + len = svsk->sk_datalen; npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { svsk->sk_pages[i] = rqstp->rq_pages[i]; @@ -907,9 +907,9 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk) { unsigned int i, len, npages; - if (svsk->sk_tcplen <= sizeof(rpc_fraghdr)) + if (svsk->sk_datalen == 0) goto out; - len = svsk->sk_tcplen - sizeof(rpc_fraghdr); + len = svsk->sk_datalen; npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { BUG_ON(svsk->sk_pages[i] == NULL); @@ -918,6 +918,7 @@ static void svc_tcp_clear_pages(struct svc_sock *svsk) } out: svsk->sk_tcplen = 0; + svsk->sk_datalen = 0; } /* @@ -1066,8 +1067,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) /* Now receive data */ len = svc_partial_recvfrom(rqstp, vec, pnum, want, base); - if (len >= 0) + if (len >= 0) { svsk->sk_tcplen += len; + svsk->sk_datalen += len; + } if (len != want) { svc_tcp_save_pages(svsk, rqstp); if (len < 0 && len != -EAGAIN) @@ -1100,6 +1103,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) /* Reset TCP read info */ svsk->sk_reclen = 0; svsk->sk_tcplen = 0; + svsk->sk_datalen = 0; /* If we have more data, signal svc_xprt_enqueue() to try again */ if (svc_recv_available(svsk) > sizeof(rpc_fraghdr)) set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); @@ -1296,6 +1300,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) svsk->sk_reclen = 0; svsk->sk_tcplen = 0; + svsk->sk_datalen = 0; memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; -- cgit v1.2.3 From 464ee9f966404786ba4c6be35dc8362ee8e6ba4e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Nov 2012 12:49:27 -0500 Subject: NFSv4.1: Ensure that the client tracks the server target_highest_slotid Dynamic slot allocation in NFSv4.1 depends on the client being able to track the server's target value for the highest slotid in the slot table. See the reference in Section 2.10.6.1 of RFC5661. To avoid ordering problems in the case where 2 SEQUENCE replies contain conflicting updates to this target value, we also introduce a generation counter, to track whether or not an RPC containing a SEQUENCE operation was launched before or after the last update. Also rename the nfs4_slot_table target_max_slots field to 'target_highest_slotid' to avoid confusion with a slot table size or number of slots. Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 2 +- fs/nfs/nfs4proc.c | 25 +++++++++++++++++++++++++ fs/nfs/nfs4state.c | 7 +++---- fs/nfs/nfs4xdr.c | 4 ++-- include/linux/nfs_fs_sb.h | 5 +++-- include/linux/nfs_xdr.h | 2 ++ 6 files changed, 36 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 0be08b964f38..0ef047b7d28d 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -576,7 +576,7 @@ __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, if (args->crsa_target_max_slots == fc_tbl->max_slots) goto out; - fc_tbl->target_max_slots = args->crsa_target_max_slots; + fc_tbl->target_highest_slotid = args->crsa_target_max_slots; nfs41_handle_recall_slot(cps->clp); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 197ef3e4e1f7..d91abaa522e8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -488,6 +488,28 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) res->sr_slot = NULL; } +/* Update the client's idea of target_highest_slotid */ +static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl, + u32 target_highest_slotid) +{ + if (tbl->target_highest_slotid == target_highest_slotid) + return; + tbl->target_highest_slotid = target_highest_slotid; + tbl->generation++; +} + +static void nfs41_update_target_slotid(struct nfs4_slot_table *tbl, + struct nfs4_slot *slot, + struct nfs4_sequence_res *res) +{ + spin_lock(&tbl->slot_tbl_lock); + if (tbl->generation != slot->generation) + goto out; + nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid); +out: + spin_unlock(&tbl->slot_tbl_lock); +} + static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { struct nfs4_session *session; @@ -522,6 +544,7 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * /* Check sequence flags */ if (res->sr_status_flags != 0) nfs4_schedule_lease_recovery(clp); + nfs41_update_target_slotid(slot->table, slot, res); break; case -NFS4ERR_DELAY: /* The server detected a resend of the RPC call and @@ -583,6 +606,7 @@ static struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl) tbl->highest_used_slotid = slotid; ret = &tbl->slots[slotid]; ret->renewal_time = jiffies; + ret->generation = tbl->generation; out: dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", @@ -5693,6 +5717,7 @@ static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, tbl->max_slots = max_slots; } tbl->highest_used_slotid = NFS4_NO_SLOT; + tbl->target_highest_slotid = max_slots - 1; for (i = 0; i < tbl->max_slots; i++) tbl->slots[i].seq_nr = ivalue; spin_unlock(&tbl->slot_tbl_lock); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9495789c425b..842cb8c2f65d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2033,17 +2033,16 @@ static int nfs4_recall_slot(struct nfs_client *clp) return 0; nfs4_begin_drain_session(clp); fc_tbl = &clp->cl_session->fc_slot_table; - new = nfs4_alloc_slots(fc_tbl, fc_tbl->target_max_slots, GFP_NOFS); + new = nfs4_alloc_slots(fc_tbl, fc_tbl->target_highest_slotid + 1, GFP_NOFS); if (!new) return -ENOMEM; spin_lock(&fc_tbl->slot_tbl_lock); - for (i = 0; i < fc_tbl->target_max_slots; i++) + for (i = 0; i <= fc_tbl->target_highest_slotid; i++) new[i].seq_nr = fc_tbl->slots[i].seq_nr; old = fc_tbl->slots; fc_tbl->slots = new; - fc_tbl->max_slots = fc_tbl->target_max_slots; - fc_tbl->target_max_slots = 0; + fc_tbl->max_slots = fc_tbl->target_highest_slotid + 1; clp->cl_session->fc_attrs.max_reqs = fc_tbl->max_slots; spin_unlock(&fc_tbl->slot_tbl_lock); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 27b0fec1a6b0..05d34f1fcc19 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5552,8 +5552,8 @@ static int decode_sequence(struct xdr_stream *xdr, } /* highest slot id - currently not processed */ dummy = be32_to_cpup(p++); - /* target highest slot id - currently not processed */ - dummy = be32_to_cpup(p++); + /* target highest slot id */ + res->sr_target_highest_slotid = be32_to_cpup(p++); /* result flags */ res->sr_status_flags = be32_to_cpup(p); status = 0; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index b0412873d29c..57d406997def 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -217,8 +217,9 @@ struct nfs4_slot_table { u32 max_slots; /* # slots in table */ u32 highest_used_slotid; /* sent to server on each SEQ. * op for dynamic resizing */ - u32 target_max_slots; /* Set by CB_RECALL_SLOT as - * the new max_slots */ + u32 target_highest_slotid; /* Server max_slot target */ + unsigned long generation; /* Generation counter for + target_highest_slotid */ struct completion complete; }; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index deb31bbbb857..08c47db7417f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -188,6 +188,7 @@ struct nfs4_channel_attrs { /* nfs41 sessions slot seqid */ struct nfs4_slot { struct nfs4_slot_table *table; + unsigned long generation; unsigned long renewal_time; u32 slot_nr; u32 seq_nr; @@ -202,6 +203,7 @@ struct nfs4_sequence_res { struct nfs4_slot *sr_slot; /* slot used to send request */ int sr_status; /* sequence operation status */ u32 sr_status_flags; + u32 sr_target_highest_slotid; }; struct nfs4_get_lease_time_args { -- cgit v1.2.3 From da0507b7c95ccd4d9c86394eef42fe076032af30 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Nov 2012 18:10:30 -0500 Subject: NFSv4.1: Reset the sequence number for slots that have been deallocated When the server tells us that it is dynamically resizing the session replay cache, we should reset the sequence number for those slots that have been deallocated. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 18 ++++++++++++++++++ fs/nfs/nfs4xdr.c | 4 ++-- include/linux/nfs_fs_sb.h | 1 + include/linux/nfs_xdr.h | 1 + 4 files changed, 22 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d91abaa522e8..52435ec44193 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -498,6 +498,22 @@ static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl, tbl->generation++; } +static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl, + u32 highest_slotid) +{ + unsigned int max_slotid, i; + + if (tbl->server_highest_slotid == highest_slotid) + return; + if (tbl->highest_used_slotid > highest_slotid) + return; + max_slotid = min(tbl->max_slots - 1, highest_slotid); + /* Reset the seq_nr for deallocated slots */ + for (i = tbl->server_highest_slotid + 1; i <= max_slotid; i++) + tbl->slots[i].seq_nr = 1; + tbl->server_highest_slotid = highest_slotid; +} + static void nfs41_update_target_slotid(struct nfs4_slot_table *tbl, struct nfs4_slot *slot, struct nfs4_sequence_res *res) @@ -505,6 +521,7 @@ static void nfs41_update_target_slotid(struct nfs4_slot_table *tbl, spin_lock(&tbl->slot_tbl_lock); if (tbl->generation != slot->generation) goto out; + nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid); nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid); out: spin_unlock(&tbl->slot_tbl_lock); @@ -5718,6 +5735,7 @@ static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, } tbl->highest_used_slotid = NFS4_NO_SLOT; tbl->target_highest_slotid = max_slots - 1; + tbl->server_highest_slotid = max_slots - 1; for (i = 0; i < tbl->max_slots; i++) tbl->slots[i].seq_nr = ivalue; spin_unlock(&tbl->slot_tbl_lock); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 05d34f1fcc19..a67040f51597 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5550,8 +5550,8 @@ static int decode_sequence(struct xdr_stream *xdr, dprintk("%s Invalid slot id\n", __func__); goto out_err; } - /* highest slot id - currently not processed */ - dummy = be32_to_cpup(p++); + /* highest slot id */ + res->sr_highest_slotid = be32_to_cpup(p++); /* target highest slot id */ res->sr_target_highest_slotid = be32_to_cpup(p++); /* result flags */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 57d406997def..646e64bbff4c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -218,6 +218,7 @@ struct nfs4_slot_table { u32 highest_used_slotid; /* sent to server on each SEQ. * op for dynamic resizing */ u32 target_highest_slotid; /* Server max_slot target */ + u32 server_highest_slotid; /* Server highest slotid */ unsigned long generation; /* Generation counter for target_highest_slotid */ struct completion complete; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 08c47db7417f..3ddb08fba935 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -203,6 +203,7 @@ struct nfs4_sequence_res { struct nfs4_slot *sr_slot; /* slot used to send request */ int sr_status; /* sequence operation status */ u32 sr_status_flags; + u32 sr_highest_slotid; u32 sr_target_highest_slotid; }; -- cgit v1.2.3 From 97e548a93de213b149eea025a97d88e28143b445 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Nov 2012 14:45:48 -0500 Subject: NFSv4.1: Support dynamic resizing of the session slot table Allow the server to control the size of the session slot table by adjusting the value of sr_target_max_slots in the reply to the SEQUENCE operation. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 12 ++++++++++-- fs/nfs/nfs4state.c | 6 +++--- include/linux/nfs_fs_sb.h | 1 + 3 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 62212231ce62..1792ece8b53c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -492,10 +492,17 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl, u32 target_highest_slotid) { + unsigned int max_slotid, i; + if (tbl->target_highest_slotid == target_highest_slotid) return; tbl->target_highest_slotid = target_highest_slotid; tbl->generation++; + + max_slotid = min(tbl->max_slots - 1, tbl->target_highest_slotid); + for (i = tbl->max_slotid + 1; i <= max_slotid; i++) + rpc_wake_up_next(&tbl->slot_tbl_waitq); + tbl->max_slotid = max_slotid; } void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, @@ -622,8 +629,8 @@ static struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl) dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, tbl->max_slots); - slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); - if (slotid >= tbl->max_slots) + slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1); + if (slotid > tbl->max_slotid) goto out; __set_bit(slotid, tbl->used_slots); if (slotid > tbl->highest_used_slotid || @@ -5744,6 +5751,7 @@ static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, tbl->highest_used_slotid = NFS4_NO_SLOT; tbl->target_highest_slotid = max_slots - 1; tbl->server_highest_slotid = max_slots - 1; + tbl->max_slotid = max_slots - 1; for (i = 0; i < tbl->max_slots; i++) tbl->slots[i].seq_nr = ivalue; spin_unlock(&tbl->slot_tbl_lock); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 842cb8c2f65d..1b7fa73c9436 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -254,15 +254,14 @@ static void nfs4_end_drain_session(struct nfs_client *clp) { struct nfs4_session *ses = clp->cl_session; struct nfs4_slot_table *tbl; - int max_slots; + unsigned int i; if (ses == NULL) return; tbl = &ses->fc_slot_table; if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { spin_lock(&tbl->slot_tbl_lock); - max_slots = tbl->max_slots; - while (max_slots--) { + for (i = 0; i <= tbl->max_slotid; i++) { if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs4_set_task_privileged, NULL) == NULL) @@ -2043,6 +2042,7 @@ static int nfs4_recall_slot(struct nfs_client *clp) old = fc_tbl->slots; fc_tbl->slots = new; fc_tbl->max_slots = fc_tbl->target_highest_slotid + 1; + fc_tbl->max_slotid = fc_tbl->target_highest_slotid; clp->cl_session->fc_attrs.max_reqs = fc_tbl->max_slots; spin_unlock(&fc_tbl->slot_tbl_lock); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 646e64bbff4c..30715508fade 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -215,6 +215,7 @@ struct nfs4_slot_table { spinlock_t slot_tbl_lock; struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */ u32 max_slots; /* # slots in table */ + u32 max_slotid; /* Max allowed slotid value */ u32 highest_used_slotid; /* sent to server on each SEQ. * op for dynamic resizing */ u32 target_highest_slotid; /* Server max_slot target */ -- cgit v1.2.3 From 87dda67e7386ba7d2164391ea58b34e028d8157b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Nov 2012 19:49:20 -0500 Subject: NFSv4.1: Allow SEQUENCE to resize the slot table on the fly Instead of an array of slots, use a singly linked list of slots that can be dynamically appended to or shrunk. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 4 +- fs/nfs/nfs4proc.c | 174 +++++++++++++++++++++++++++++++----------------- fs/nfs/nfs4state.c | 22 ++---- include/linux/nfs_xdr.h | 1 + 4 files changed, 121 insertions(+), 80 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 5d4e82b10c3c..856bc496a210 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -258,10 +258,10 @@ extern int nfs4_proc_get_lease_time(struct nfs_client *clp, extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync); -extern struct nfs4_slot *nfs4_alloc_slots(struct nfs4_slot_table *table, - u32 max_slots, gfp_t gfp_flags); extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, u32 target_highest_slotid); +extern int nfs4_resize_slot_table(struct nfs4_slot_table *tbl, + u32 max_reqs, u32 ivalue); static inline bool is_ds_only_client(struct nfs_client *clp) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1792ece8b53c..fc65300172e1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -396,6 +396,27 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp #if defined(CONFIG_NFS_V4_1) +/* + * nfs4_shrink_slot_table - free retired slots from the slot table + */ +static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize) +{ + struct nfs4_slot **p; + if (newsize >= tbl->max_slots) + return; + + p = &tbl->slots; + while (newsize--) + p = &(*p)->next; + while (*p) { + struct nfs4_slot *slot = *p; + + *p = slot->next; + kfree(slot); + tbl->max_slots--; + } +} + /* * nfs4_free_slot - free a slot and efficiently update slot table. * @@ -499,7 +520,7 @@ static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl, tbl->target_highest_slotid = target_highest_slotid; tbl->generation++; - max_slotid = min(tbl->max_slots - 1, tbl->target_highest_slotid); + max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, tbl->target_highest_slotid); for (i = tbl->max_slotid + 1; i <= max_slotid; i++) rpc_wake_up_next(&tbl->slot_tbl_waitq); tbl->max_slotid = max_slotid; @@ -516,16 +537,12 @@ void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl, u32 highest_slotid) { - unsigned int max_slotid, i; - if (tbl->server_highest_slotid == highest_slotid) return; if (tbl->highest_used_slotid > highest_slotid) return; - max_slotid = min(tbl->max_slots - 1, highest_slotid); - /* Reset the seq_nr for deallocated slots */ - for (i = tbl->server_highest_slotid + 1; i <= max_slotid; i++) - tbl->slots[i].seq_nr = 1; + /* Deallocate slots */ + nfs4_shrink_slot_table(tbl, highest_slotid + 1); tbl->server_highest_slotid = highest_slotid; } @@ -612,6 +629,42 @@ static int nfs4_sequence_done(struct rpc_task *task, return nfs41_sequence_done(task, res); } +static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl, + u32 slotid, u32 seq_init, gfp_t gfp_mask) +{ + struct nfs4_slot *slot; + + slot = kzalloc(sizeof(*slot), gfp_mask); + if (slot) { + slot->table = tbl; + slot->slot_nr = slotid; + slot->seq_nr = seq_init; + } + return slot; +} + +static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl, + u32 slotid, u32 seq_init, gfp_t gfp_mask) +{ + struct nfs4_slot **p, *slot; + + p = &tbl->slots; + for (;;) { + if (*p == NULL) { + *p = nfs4_new_slot(tbl, tbl->max_slots, + seq_init, gfp_mask); + if (*p == NULL) + break; + tbl->max_slots++; + } + slot = *p; + if (slot->slot_nr == slotid) + return slot; + p = &slot->next; + } + return NULL; +} + /* * nfs4_alloc_slot - efficiently look for a free slot * @@ -628,15 +681,17 @@ static struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl) dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", __func__, tbl->used_slots[0], tbl->highest_used_slotid, - tbl->max_slots); + tbl->max_slotid + 1); slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1); if (slotid > tbl->max_slotid) goto out; + ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT); + if (ret == NULL) + goto out; __set_bit(slotid, tbl->used_slots); if (slotid > tbl->highest_used_slotid || tbl->highest_used_slotid == NFS4_NO_SLOT) tbl->highest_used_slotid = slotid; - ret = &tbl->slots[slotid]; ret->renewal_time = jiffies; ret->generation = tbl->generation; @@ -5718,67 +5773,56 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) return status; } -struct nfs4_slot *nfs4_alloc_slots(struct nfs4_slot_table *table, - u32 max_slots, gfp_t gfp_flags) +static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl, + u32 max_reqs, u32 ivalue) { - struct nfs4_slot *tbl; - u32 i; - - tbl = kmalloc_array(max_slots, sizeof(*tbl), gfp_flags); - if (tbl != NULL) { - for (i = 0; i < max_slots; i++) { - tbl[i].table = table; - tbl[i].slot_nr = i; - } - } - return tbl; + if (max_reqs <= tbl->max_slots) + return 0; + if (nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)) + return 0; + return -ENOMEM; } -static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, - struct nfs4_slot *new, - u32 max_slots, +static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl, + u32 server_highest_slotid, u32 ivalue) { - struct nfs4_slot *old = NULL; - u32 i; + struct nfs4_slot **p; - spin_lock(&tbl->slot_tbl_lock); - if (new) { - old = tbl->slots; - tbl->slots = new; - tbl->max_slots = max_slots; + nfs4_shrink_slot_table(tbl, server_highest_slotid + 1); + p = &tbl->slots; + while (*p) { + (*p)->seq_nr = ivalue; + p = &(*p)->next; } tbl->highest_used_slotid = NFS4_NO_SLOT; - tbl->target_highest_slotid = max_slots - 1; - tbl->server_highest_slotid = max_slots - 1; - tbl->max_slotid = max_slots - 1; - for (i = 0; i < tbl->max_slots; i++) - tbl->slots[i].seq_nr = ivalue; - spin_unlock(&tbl->slot_tbl_lock); - kfree(old); + tbl->target_highest_slotid = server_highest_slotid; + tbl->server_highest_slotid = server_highest_slotid; + tbl->max_slotid = server_highest_slotid; } /* * (re)Initialise a slot table */ -static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, - u32 ivalue) +static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, + u32 max_reqs, u32 ivalue) { - struct nfs4_slot *new = NULL; - int ret = -ENOMEM; + int ret; dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, max_reqs, tbl->max_slots); - /* Does the newly negotiated max_reqs match the existing slot table? */ - if (max_reqs != tbl->max_slots) { - new = nfs4_alloc_slots(tbl, max_reqs, GFP_NOFS); - if (!new) - goto out; - } - ret = 0; + if (max_reqs > NFS4_MAX_SLOT_TABLE) + max_reqs = NFS4_MAX_SLOT_TABLE; + + ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue); + if (ret) + goto out; + + spin_lock(&tbl->slot_tbl_lock); + nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue); + spin_unlock(&tbl->slot_tbl_lock); - nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, tbl, tbl->slots, tbl->max_slots); out: @@ -5786,18 +5830,28 @@ out: return ret; } +int nfs4_resize_slot_table(struct nfs4_slot_table *tbl, + u32 max_reqs, u32 ivalue) +{ + int ret; + + if (max_reqs > NFS4_MAX_SLOT_TABLE) + max_reqs = NFS4_MAX_SLOT_TABLE; + ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue); + if (ret) + return ret; + spin_lock(&tbl->slot_tbl_lock); + nfs4_shrink_slot_table(tbl, max_reqs); + tbl->max_slotid = max_reqs - 1; + spin_unlock(&tbl->slot_tbl_lock); + return 0; +} + /* Destroy the slot table */ static void nfs4_destroy_slot_tables(struct nfs4_session *session) { - if (session->fc_slot_table.slots != NULL) { - kfree(session->fc_slot_table.slots); - session->fc_slot_table.slots = NULL; - } - if (session->bc_slot_table.slots != NULL) { - kfree(session->bc_slot_table.slots); - session->bc_slot_table.slots = NULL; - } - return; + nfs4_shrink_slot_table(&session->fc_slot_table, 0); + nfs4_shrink_slot_table(&session->bc_slot_table, 0); } /* diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1b7fa73c9436..c14b2c7ac8a7 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2025,29 +2025,15 @@ out: static int nfs4_recall_slot(struct nfs_client *clp) { struct nfs4_slot_table *fc_tbl; - struct nfs4_slot *new, *old; - int i; + u32 new_size; if (!nfs4_has_session(clp)) return 0; nfs4_begin_drain_session(clp); - fc_tbl = &clp->cl_session->fc_slot_table; - new = nfs4_alloc_slots(fc_tbl, fc_tbl->target_highest_slotid + 1, GFP_NOFS); - if (!new) - return -ENOMEM; - spin_lock(&fc_tbl->slot_tbl_lock); - for (i = 0; i <= fc_tbl->target_highest_slotid; i++) - new[i].seq_nr = fc_tbl->slots[i].seq_nr; - old = fc_tbl->slots; - fc_tbl->slots = new; - fc_tbl->max_slots = fc_tbl->target_highest_slotid + 1; - fc_tbl->max_slotid = fc_tbl->target_highest_slotid; - clp->cl_session->fc_attrs.max_reqs = fc_tbl->max_slots; - spin_unlock(&fc_tbl->slot_tbl_lock); - - kfree(old); - return 0; + fc_tbl = &clp->cl_session->fc_slot_table; + new_size = fc_tbl->server_highest_slotid + 1; + return nfs4_resize_slot_table(fc_tbl, new_size, 1); } static int nfs4_bind_conn_to_session(struct nfs_client *clp) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 3ddb08fba935..44d256f6021c 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -188,6 +188,7 @@ struct nfs4_channel_attrs { /* nfs41 sessions slot seqid */ struct nfs4_slot { struct nfs4_slot_table *table; + struct nfs4_slot *next; unsigned long generation; unsigned long renewal_time; u32 slot_nr; -- cgit v1.2.3 From c34309a45ea491e5f0c0d0af49ccfa018ff35fc1 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 26 Nov 2012 14:33:03 -0500 Subject: NFS: Remove unused function slot_idx Signed-off-by: Trond Myklebust --- include/linux/nfs_fs_sb.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 30715508fade..e707c1b69796 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -225,11 +225,6 @@ struct nfs4_slot_table { struct completion complete; }; -static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp) -{ - return sp - tbl->slots; -} - /* * Session related parameters */ -- cgit v1.2.3 From 76e697ba7e8d187f50e385d21a2b2f1709a62c14 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 26 Nov 2012 14:20:49 -0500 Subject: NFSv4.1: Move slot table and session struct definitions to nfs4session.h Clean up. Gather NFSv4.1 slot definitions in fs/nfs/nfs4session.h. Signed-off-by: Trond Myklebust --- fs/nfs/blocklayout/blocklayout.c | 1 + fs/nfs/callback_xdr.c | 1 + fs/nfs/internal.h | 21 -------- fs/nfs/nfs4_fs.h | 12 ----- fs/nfs/nfs4filelayout.c | 1 + fs/nfs/nfs4session.h | 101 +++++++++++++++++++++++++++++++++++++++ fs/nfs/nfs4state.c | 1 + fs/nfs/nfs4xdr.c | 1 + fs/nfs/super.c | 1 + include/linux/nfs_fs_sb.h | 49 ------------------- include/linux/nfs_xdr.h | 11 +---- 11 files changed, 108 insertions(+), 92 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index f1027b06a1a9..4fa788c93f46 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -40,6 +40,7 @@ #include #include "../pnfs.h" +#include "../nfs4session.h" #include "../internal.h" #include "blocklayout.h" diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index ea6a7b190e6b..59461c957d9d 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -16,6 +16,7 @@ #include "nfs4_fs.h" #include "callback.h" #include "internal.h" +#include "nfs4session.h" #define CB_OP_TAGLEN_MAXSZ (512) #define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 8965a998b306..9bdbfc3884a9 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -18,27 +18,6 @@ struct nfs_string; */ #define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1) -/* - * Determine if sessions are in use. - */ -static inline int nfs4_has_session(const struct nfs_client *clp) -{ -#ifdef CONFIG_NFS_V4_1 - if (clp->cl_session) - return 1; -#endif /* CONFIG_NFS_V4_1 */ - return 0; -} - -static inline int nfs4_has_persistent_session(const struct nfs_client *clp) -{ -#ifdef CONFIG_NFS_V4_1 - if (nfs4_has_session(clp)) - return (clp->cl_session->flags & SESSION4_PERSIST); -#endif /* CONFIG_NFS_V4_1 */ - return 0; -} - static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr) { if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid)) diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index cd3e3096b60a..322bd0168ebf 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -29,11 +29,6 @@ enum nfs4_client_state { NFS4CLNT_BIND_CONN_TO_SESSION, }; -enum nfs4_session_state { - NFS4_SESSION_INITING, - NFS4_SESSION_DRAINING, -}; - #define NFS4_RENEW_TIMEOUT 0x01 #define NFS4_RENEW_DELEGATION_CB 0x02 @@ -327,13 +322,6 @@ int nfs41_discover_server_trunking(struct nfs_client *clp, extern void nfs4_schedule_session_recovery(struct nfs4_session *, int); extern void nfs41_server_notify_target_slotid_update(struct nfs_client *clp); -extern void nfs4_session_drain_complete(struct nfs4_session *session, - struct nfs4_slot_table *tbl); - -static inline bool nfs4_session_draining(struct nfs4_session *session) -{ - return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state); -} #else static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err) { diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index bfb28fa38e74..591a1a7f8f94 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -35,6 +35,7 @@ #include +#include "nfs4session.h" #include "internal.h" #include "delegation.h" #include "nfs4filelayout.h" diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index cb47b1eb0886..e96323ff1d95 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -7,6 +7,68 @@ #ifndef __LINUX_FS_NFS_NFS4SESSION_H #define __LINUX_FS_NFS_NFS4SESSION_H +/* maximum number of slots to use */ +#define NFS4_DEF_SLOT_TABLE_SIZE (16U) +#define NFS4_MAX_SLOT_TABLE (256U) +#define NFS4_NO_SLOT ((u32)-1) + +#if IS_ENABLED(CONFIG_NFS_V4) + +/* Sessions slot seqid */ +struct nfs4_slot { + struct nfs4_slot_table *table; + struct nfs4_slot *next; + unsigned long generation; + unsigned long renewal_time; + u32 slot_nr; + u32 seq_nr; +}; + +/* Sessions */ +#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) +struct nfs4_slot_table { + struct nfs4_session *session; /* Parent session */ + struct nfs4_slot *slots; /* seqid per slot */ + unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */ + spinlock_t slot_tbl_lock; + struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */ + u32 max_slots; /* # slots in table */ + u32 max_slotid; /* Max allowed slotid value */ + u32 highest_used_slotid; /* sent to server on each SEQ. + * op for dynamic resizing */ + u32 target_highest_slotid; /* Server max_slot target */ + u32 server_highest_slotid; /* Server highest slotid */ + unsigned long generation; /* Generation counter for + target_highest_slotid */ + struct completion complete; +}; + +/* + * Session related parameters + */ +struct nfs4_session { + struct nfs4_sessionid sess_id; + u32 flags; + unsigned long session_state; + u32 hash_alg; + u32 ssv_len; + + /* The fore and back channel */ + struct nfs4_channel_attrs fc_attrs; + struct nfs4_slot_table fc_slot_table; + struct nfs4_channel_attrs bc_attrs; + struct nfs4_slot_table bc_slot_table; + struct nfs_client *clp; + /* Create session arguments */ + unsigned int fc_target_max_rqst_sz; + unsigned int fc_target_max_resp_sz; +}; + +enum nfs4_session_state { + NFS4_SESSION_INITING, + NFS4_SESSION_DRAINING, +}; + #if defined(CONFIG_NFS_V4_1) extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); @@ -24,6 +86,31 @@ extern void nfs4_destroy_session(struct nfs4_session *session); extern int nfs4_init_session(struct nfs_server *server); extern int nfs4_init_ds_session(struct nfs_client *, unsigned long); +extern void nfs4_session_drain_complete(struct nfs4_session *session, + struct nfs4_slot_table *tbl); + +static inline bool nfs4_session_draining(struct nfs4_session *session) +{ + return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state); +} + +/* + * Determine if sessions are in use. + */ +static inline int nfs4_has_session(const struct nfs_client *clp) +{ + if (clp->cl_session) + return 1; + return 0; +} + +static inline int nfs4_has_persistent_session(const struct nfs_client *clp) +{ + if (nfs4_has_session(clp)) + return (clp->cl_session->flags & SESSION4_PERSIST); + return 0; +} + #else /* defined(CONFIG_NFS_V4_1) */ static inline int nfs4_init_session(struct nfs_server *server) @@ -31,5 +118,19 @@ static inline int nfs4_init_session(struct nfs_server *server) return 0; } +/* + * Determine if sessions are in use. + */ +static inline int nfs4_has_session(const struct nfs_client *clp) +{ + return 0; +} + +static inline int nfs4_has_persistent_session(const struct nfs_client *clp) +{ + return 0; +} + #endif /* defined(CONFIG_NFS_V4_1) */ +#endif /* IS_ENABLED(CONFIG_NFS_V4) */ #endif /* __LINUX_FS_NFS_NFS4SESSION_H */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1077b9698381..1402283d152d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -57,6 +57,7 @@ #include "callback.h" #include "delegation.h" #include "internal.h" +#include "nfs4session.h" #include "pnfs.h" #include "netns.h" diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index a67040f51597..e786dc7582b1 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -56,6 +56,7 @@ #include "nfs4_fs.h" #include "internal.h" +#include "nfs4session.h" #include "pnfs.h" #include "netns.h" diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 652d3f7176a9..e12cea4b36a5 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -64,6 +64,7 @@ #include "iostat.h" #include "internal.h" #include "fscache.h" +#include "nfs4session.h" #include "pnfs.h" #include "nfs.h" diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index e707c1b69796..6c6ed153a9b4 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -198,53 +198,4 @@ struct nfs_server { #define NFS_CAP_POSIX_LOCK (1U << 14) #define NFS_CAP_UIDGID_NOMAP (1U << 15) - -/* maximum number of slots to use */ -#define NFS4_DEF_SLOT_TABLE_SIZE (16U) -#define NFS4_MAX_SLOT_TABLE (256U) -#define NFS4_NO_SLOT ((u32)-1) - -#if IS_ENABLED(CONFIG_NFS_V4) - -/* Sessions */ -#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) -struct nfs4_slot_table { - struct nfs4_session *session; /* Parent session */ - struct nfs4_slot *slots; /* seqid per slot */ - unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */ - spinlock_t slot_tbl_lock; - struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */ - u32 max_slots; /* # slots in table */ - u32 max_slotid; /* Max allowed slotid value */ - u32 highest_used_slotid; /* sent to server on each SEQ. - * op for dynamic resizing */ - u32 target_highest_slotid; /* Server max_slot target */ - u32 server_highest_slotid; /* Server highest slotid */ - unsigned long generation; /* Generation counter for - target_highest_slotid */ - struct completion complete; -}; - -/* - * Session related parameters - */ -struct nfs4_session { - struct nfs4_sessionid sess_id; - u32 flags; - unsigned long session_state; - u32 hash_alg; - u32 ssv_len; - - /* The fore and back channel */ - struct nfs4_channel_attrs fc_attrs; - struct nfs4_slot_table fc_slot_table; - struct nfs4_channel_attrs bc_attrs; - struct nfs4_slot_table bc_slot_table; - struct nfs_client *clp; - /* Create session arguments */ - unsigned int fc_target_max_rqst_sz; - unsigned int fc_target_max_resp_sz; -}; - -#endif /* CONFIG_NFS_V4 */ #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 44d256f6021c..2076149db1a4 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -185,16 +185,7 @@ struct nfs4_channel_attrs { u32 max_reqs; }; -/* nfs41 sessions slot seqid */ -struct nfs4_slot { - struct nfs4_slot_table *table; - struct nfs4_slot *next; - unsigned long generation; - unsigned long renewal_time; - u32 slot_nr; - u32 seq_nr; -}; - +struct nfs4_slot; struct nfs4_sequence_args { struct nfs4_slot *sa_slot; u8 sa_cache_this; -- cgit v1.2.3 From 8fe72bac8de784c4059b41a7dd6bb0151a3ae898 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Mon, 29 Oct 2012 19:02:20 -0400 Subject: NFSv4: Clean up handling of privileged operations Privileged rpc calls are those that are run by the state recovery thread, in cases where we're trying to recover the system after a server reboot or a network partition. In those cases, we want to fence off all other rpc calls (see nfs4_begin_drain_session()) so that they don't end up using stateids or clientids that are in the process of being recovered. Prior to this patch, we had to set up special callback functions in order to declare an rpc call as being privileged. By adding a new field to the sequence arguments, this patch simplifies things considerably, and allows us to declare the rpc call as privileged before it is run. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 114 ++++++++++++++++++------------------------------ include/linux/nfs_xdr.h | 3 +- 2 files changed, 44 insertions(+), 73 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4b1635ce658d..38a709d78594 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -490,11 +490,17 @@ static void nfs41_init_sequence(struct nfs4_sequence_args *args, { args->sa_slot = NULL; args->sa_cache_this = 0; + args->sa_privileged = 0; if (cache_reply) args->sa_cache_this = 1; res->sr_slot = NULL; } +static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) +{ + args->sa_privileged = 1; +} + int nfs41_setup_sequence(struct nfs4_session *session, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, @@ -514,7 +520,7 @@ int nfs41_setup_sequence(struct nfs4_session *session, spin_lock(&tbl->slot_tbl_lock); if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && - !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { + !args->sa_privileged) { /* The state manager will wait until the slot table is empty */ dprintk("%s session is draining\n", __func__); goto out_sleep; @@ -548,6 +554,9 @@ out_success: rpc_call_start(task); return 0; out_sleep: + /* Privileged tasks are queued with top priority */ + if (args->sa_privileged) + rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); spin_unlock(&tbl->slot_tbl_lock); return -EAGAIN; @@ -593,12 +602,6 @@ static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); } -static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) -{ - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); - nfs41_call_sync_prepare(task, calldata); -} - static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) { struct nfs41_call_sync_data *data = calldata; @@ -611,17 +614,11 @@ static const struct rpc_call_ops nfs41_call_sync_ops = { .rpc_call_done = nfs41_call_sync_done, }; -static const struct rpc_call_ops nfs41_call_priv_sync_ops = { - .rpc_call_prepare = nfs41_call_priv_sync_prepare, - .rpc_call_done = nfs41_call_sync_done, -}; - static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, struct nfs_server *server, struct rpc_message *msg, struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - int privileged) + struct nfs4_sequence_res *res) { int ret; struct rpc_task *task; @@ -637,8 +634,6 @@ static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, .callback_data = &data }; - if (privileged) - task_setup.callback_ops = &nfs41_call_priv_sync_ops; task = rpc_run_task(&task_setup); if (IS_ERR(task)) ret = PTR_ERR(task); @@ -656,16 +651,21 @@ int _nfs4_call_sync_session(struct rpc_clnt *clnt, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res) { - return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); + return nfs4_call_sync_sequence(clnt, server, msg, args, res); } #else -static inline +static void nfs41_init_sequence(struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, int cache_reply) { } +static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) +{ +} + + static int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { @@ -1475,13 +1475,6 @@ unlock_no_action: rcu_read_unlock(); out_no_action: task->tk_action = NULL; - -} - -static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) -{ - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); - nfs4_open_prepare(task, calldata); } static void nfs4_open_done(struct rpc_task *task, void *calldata) @@ -1542,12 +1535,6 @@ static const struct rpc_call_ops nfs4_open_ops = { .rpc_release = nfs4_open_release, }; -static const struct rpc_call_ops nfs4_recover_open_ops = { - .rpc_call_prepare = nfs4_recover_open_prepare, - .rpc_call_done = nfs4_open_done, - .rpc_release = nfs4_open_release, -}; - static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) { struct inode *dir = data->dir->d_inode; @@ -1577,7 +1564,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) data->rpc_status = 0; data->cancelled = 0; if (isrecover) - task_setup_data.callback_ops = &nfs4_recover_open_ops; + nfs4_set_sequence_privileged(&o_arg->seq_args); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -4558,8 +4545,9 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) return; /* Do we need to do an open_to_lock_owner? */ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { - if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) + if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { goto out_release_lock_seqid; + } data->arg.open_stateid = &state->stateid; data->arg.new_lock_owner = 1; data->res.open_seqid = data->arg.open_seqid; @@ -4574,13 +4562,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) nfs_release_seqid(data->arg.open_seqid); out_release_lock_seqid: nfs_release_seqid(data->arg.lock_seqid); - dprintk("%s: done!, ret = %d\n", __func__, task->tk_status); -} - -static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) -{ - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); - nfs4_lock_prepare(task, calldata); + dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); } static void nfs4_lock_done(struct rpc_task *task, void *calldata) @@ -4635,12 +4617,6 @@ static const struct rpc_call_ops nfs4_lock_ops = { .rpc_release = nfs4_lock_release, }; -static const struct rpc_call_ops nfs4_recover_lock_ops = { - .rpc_call_prepare = nfs4_recover_lock_prepare, - .rpc_call_done = nfs4_lock_done, - .rpc_release = nfs4_lock_release, -}; - static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) { switch (error) { @@ -4683,15 +4659,15 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f return -ENOMEM; if (IS_SETLKW(cmd)) data->arg.block = 1; - if (recovery_type > NFS_LOCK_NEW) { - if (recovery_type == NFS_LOCK_RECLAIM) - data->arg.reclaim = NFS_LOCK_RECLAIM; - task_setup_data.callback_ops = &nfs4_recover_lock_ops; - } nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); msg.rpc_argp = &data->arg; msg.rpc_resp = &data->res; task_setup_data.callback_data = data; + if (recovery_type > NFS_LOCK_NEW) { + if (recovery_type == NFS_LOCK_RECLAIM) + data->arg.reclaim = NFS_LOCK_RECLAIM; + nfs4_set_sequence_privileged(&data->arg.seq_args); + } task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -5432,7 +5408,6 @@ static void nfs4_get_lease_time_prepare(struct rpc_task *task, (struct nfs4_get_lease_time_data *)calldata; dprintk("--> %s\n", __func__); - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); /* just setup sequence, do not trigger session recovery since we're invoked within one */ nfs41_setup_sequence(data->clp->cl_session, @@ -5500,6 +5475,7 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) int status; nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); + nfs4_set_sequence_privileged(&args.la_seq_args); dprintk("--> %s\n", __func__); task = rpc_run_task(&task_setup); @@ -5775,26 +5751,15 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data) nfs41_setup_sequence(clp->cl_session, args, res, task); } -static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data) -{ - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); - nfs41_sequence_prepare(task, data); -} - static const struct rpc_call_ops nfs41_sequence_ops = { .rpc_call_done = nfs41_sequence_call_done, .rpc_call_prepare = nfs41_sequence_prepare, .rpc_release = nfs41_sequence_release, }; -static const struct rpc_call_ops nfs41_sequence_privileged_ops = { - .rpc_call_done = nfs41_sequence_call_done, - .rpc_call_prepare = nfs41_sequence_prepare_privileged, - .rpc_release = nfs41_sequence_release, -}; - -static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred, - const struct rpc_call_ops *seq_ops) +static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, + struct rpc_cred *cred, + bool is_privileged) { struct nfs4_sequence_data *calldata; struct rpc_message msg = { @@ -5804,7 +5769,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_ struct rpc_task_setup task_setup_data = { .rpc_client = clp->cl_rpcclient, .rpc_message = &msg, - .callback_ops = seq_ops, + .callback_ops = &nfs41_sequence_ops, .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, }; @@ -5816,6 +5781,8 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_ return ERR_PTR(-ENOMEM); } nfs41_init_sequence(&calldata->args, &calldata->res, 0); + if (is_privileged) + nfs4_set_sequence_privileged(&calldata->args); msg.rpc_argp = &calldata->args; msg.rpc_resp = &calldata->res; calldata->clp = clp; @@ -5831,7 +5798,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) return 0; - task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops); + task = _nfs41_proc_sequence(clp, cred, false); if (IS_ERR(task)) ret = PTR_ERR(task); else @@ -5845,7 +5812,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) struct rpc_task *task; int ret; - task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops); + task = _nfs41_proc_sequence(clp, cred, true); if (IS_ERR(task)) { ret = PTR_ERR(task); goto out; @@ -5874,7 +5841,6 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) { struct nfs4_reclaim_complete_data *calldata = data; - rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); nfs41_setup_sequence(calldata->clp->cl_session, &calldata->arg.seq_args, &calldata->res.seq_res, @@ -5955,6 +5921,7 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) calldata->arg.one_fs = 0; nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); + nfs4_set_sequence_privileged(&calldata->arg.seq_args); msg.rpc_argp = &calldata->arg; msg.rpc_resp = &calldata->res; task_setup_data.callback_data = calldata; @@ -6521,7 +6488,9 @@ static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) dprintk("NFS call test_stateid %p\n", stateid); nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); - status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); + nfs4_set_sequence_privileged(&args.seq_args); + status = nfs4_call_sync_sequence(server->client, server, &msg, + &args.seq_args, &res.seq_res); if (status != NFS_OK) { dprintk("NFS reply test_stateid: failed, %d\n", status); return status; @@ -6568,8 +6537,9 @@ static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) dprintk("NFS call free_stateid %p\n", stateid); nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); + nfs4_set_sequence_privileged(&args.seq_args); status = nfs4_call_sync_sequence(server->client, server, &msg, - &args.seq_args, &res.seq_res, 1); + &args.seq_args, &res.seq_res); dprintk("NFS reply free_stateid: %d\n", status); return status; } diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 2076149db1a4..baa673edb597 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -188,7 +188,8 @@ struct nfs4_channel_attrs { struct nfs4_slot; struct nfs4_sequence_args { struct nfs4_slot *sa_slot; - u8 sa_cache_this; + u8 sa_cache_this : 1, + sa_privileged : 1; }; struct nfs4_sequence_res { -- cgit v1.2.3 From 62ae082d883d167cdaa7895cf2972d85e178228a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 29 Nov 2012 17:10:01 -0500 Subject: NFSv4: Reorder the XDR structures to put sequence at the top, not bottom Pre-condition for optimising the slot allocation and reintroducing FIFO behaviour. Signed-off-by: Trond Myklebust --- include/linux/nfs_xdr.h | 138 ++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 69 deletions(-) (limited to 'include/linux') diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index baa673edb597..a55abd499c21 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -205,8 +205,8 @@ struct nfs4_get_lease_time_args { }; struct nfs4_get_lease_time_res { - struct nfs_fsinfo *lr_fsinfo; struct nfs4_sequence_res lr_seq_res; + struct nfs_fsinfo *lr_fsinfo; }; #define PNFS_LAYOUT_MAXSIZE 4096 @@ -224,23 +224,23 @@ struct pnfs_layout_range { }; struct nfs4_layoutget_args { + struct nfs4_sequence_args seq_args; __u32 type; struct pnfs_layout_range range; __u64 minlength; __u32 maxcount; struct inode *inode; struct nfs_open_context *ctx; - struct nfs4_sequence_args seq_args; nfs4_stateid stateid; struct nfs4_layoutdriver_data layout; }; struct nfs4_layoutget_res { + struct nfs4_sequence_res seq_res; __u32 return_on_close; struct pnfs_layout_range range; __u32 type; nfs4_stateid stateid; - struct nfs4_sequence_res seq_res; struct nfs4_layoutdriver_data *layoutp; }; @@ -251,38 +251,38 @@ struct nfs4_layoutget { }; struct nfs4_getdevicelist_args { + struct nfs4_sequence_args seq_args; const struct nfs_fh *fh; u32 layoutclass; - struct nfs4_sequence_args seq_args; }; struct nfs4_getdevicelist_res { - struct pnfs_devicelist *devlist; struct nfs4_sequence_res seq_res; + struct pnfs_devicelist *devlist; }; struct nfs4_getdeviceinfo_args { - struct pnfs_device *pdev; struct nfs4_sequence_args seq_args; + struct pnfs_device *pdev; }; struct nfs4_getdeviceinfo_res { - struct pnfs_device *pdev; struct nfs4_sequence_res seq_res; + struct pnfs_device *pdev; }; struct nfs4_layoutcommit_args { + struct nfs4_sequence_args seq_args; nfs4_stateid stateid; __u64 lastbytewritten; struct inode *inode; const u32 *bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_layoutcommit_res { + struct nfs4_sequence_res seq_res; struct nfs_fattr *fattr; const struct nfs_server *server; - struct nfs4_sequence_res seq_res; int status; }; @@ -296,11 +296,11 @@ struct nfs4_layoutcommit_data { }; struct nfs4_layoutreturn_args { + struct nfs4_sequence_args seq_args; struct pnfs_layout_hdr *layout; struct inode *inode; nfs4_stateid stateid; __u32 layout_type; - struct nfs4_sequence_args seq_args; }; struct nfs4_layoutreturn_res { @@ -326,6 +326,7 @@ struct stateowner_id { * Arguments to the open call. */ struct nfs_openargs { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; struct nfs_seqid * seqid; int open_flags; @@ -346,10 +347,10 @@ struct nfs_openargs { const u32 * bitmask; const u32 * open_bitmap; __u32 claim; - struct nfs4_sequence_args seq_args; }; struct nfs_openres { + struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_fh fh; struct nfs4_change_info cinfo; @@ -364,7 +365,6 @@ struct nfs_openres { __u32 attrset[NFS4_BITMAP_SIZE]; struct nfs4_string *owner; struct nfs4_string *group_owner; - struct nfs4_sequence_res seq_res; __u32 access_request; __u32 access_supported; __u32 access_result; @@ -388,20 +388,20 @@ struct nfs_open_confirmres { * Arguments to the close call. */ struct nfs_closeargs { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; nfs4_stateid * stateid; struct nfs_seqid * seqid; fmode_t fmode; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs_closeres { + struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_fattr * fattr; struct nfs_seqid * seqid; const struct nfs_server *server; - struct nfs4_sequence_res seq_res; }; /* * * Arguments to the lock,lockt, and locku call. @@ -413,6 +413,7 @@ struct nfs_lowner { }; struct nfs_lock_args { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct file_lock * fl; struct nfs_seqid * lock_seqid; @@ -423,40 +424,39 @@ struct nfs_lock_args { unsigned char block : 1; unsigned char reclaim : 1; unsigned char new_lock_owner : 1; - struct nfs4_sequence_args seq_args; }; struct nfs_lock_res { + struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_seqid * lock_seqid; struct nfs_seqid * open_seqid; - struct nfs4_sequence_res seq_res; }; struct nfs_locku_args { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct file_lock * fl; struct nfs_seqid * seqid; nfs4_stateid * stateid; - struct nfs4_sequence_args seq_args; }; struct nfs_locku_res { + struct nfs4_sequence_res seq_res; nfs4_stateid stateid; struct nfs_seqid * seqid; - struct nfs4_sequence_res seq_res; }; struct nfs_lockt_args { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct file_lock * fl; struct nfs_lowner lock_owner; - struct nfs4_sequence_args seq_args; }; struct nfs_lockt_res { - struct file_lock * denied; /* LOCK, LOCKT failed */ struct nfs4_sequence_res seq_res; + struct file_lock * denied; /* LOCK, LOCKT failed */ }; struct nfs_release_lockowner_args { @@ -464,22 +464,23 @@ struct nfs_release_lockowner_args { }; struct nfs4_delegreturnargs { + struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; const nfs4_stateid *stateid; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_delegreturnres { + struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; const struct nfs_server *server; - struct nfs4_sequence_res seq_res; }; /* * Arguments to the read call. */ struct nfs_readargs { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct nfs_open_context *context; struct nfs_lock_context *lock_context; @@ -487,20 +488,20 @@ struct nfs_readargs { __u32 count; unsigned int pgbase; struct page ** pages; - struct nfs4_sequence_args seq_args; }; struct nfs_readres { + struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; __u32 count; int eof; - struct nfs4_sequence_res seq_res; }; /* * Arguments to the write call. */ struct nfs_writeargs { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; struct nfs_open_context *context; struct nfs_lock_context *lock_context; @@ -510,7 +511,6 @@ struct nfs_writeargs { unsigned int pgbase; struct page ** pages; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs_write_verifier { @@ -523,65 +523,65 @@ struct nfs_writeverf { }; struct nfs_writeres { + struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; struct nfs_writeverf * verf; __u32 count; const struct nfs_server *server; - struct nfs4_sequence_res seq_res; }; /* * Arguments to the commit call. */ struct nfs_commitargs { + struct nfs4_sequence_args seq_args; struct nfs_fh *fh; __u64 offset; __u32 count; const u32 *bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs_commitres { + struct nfs4_sequence_res seq_res; struct nfs_fattr *fattr; struct nfs_writeverf *verf; const struct nfs_server *server; - struct nfs4_sequence_res seq_res; }; /* * Common arguments to the unlink call */ struct nfs_removeargs { + struct nfs4_sequence_args seq_args; const struct nfs_fh *fh; struct qstr name; - struct nfs4_sequence_args seq_args; }; struct nfs_removeres { + struct nfs4_sequence_res seq_res; const struct nfs_server *server; struct nfs_fattr *dir_attr; struct nfs4_change_info cinfo; - struct nfs4_sequence_res seq_res; }; /* * Common arguments to the rename call */ struct nfs_renameargs { + struct nfs4_sequence_args seq_args; const struct nfs_fh *old_dir; const struct nfs_fh *new_dir; const struct qstr *old_name; const struct qstr *new_name; - struct nfs4_sequence_args seq_args; }; struct nfs_renameres { + struct nfs4_sequence_res seq_res; const struct nfs_server *server; struct nfs4_change_info old_cinfo; struct nfs_fattr *old_fattr; struct nfs4_change_info new_cinfo; struct nfs_fattr *new_fattr; - struct nfs4_sequence_res seq_res; }; /* @@ -622,20 +622,20 @@ struct nfs_createargs { }; struct nfs_setattrargs { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; nfs4_stateid stateid; struct iattr * iap; const struct nfs_server * server; /* Needed for name mapping */ const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs_setaclargs { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; unsigned int acl_pgbase; struct page ** acl_pages; - struct nfs4_sequence_args seq_args; }; struct nfs_setaclres { @@ -643,27 +643,27 @@ struct nfs_setaclres { }; struct nfs_getaclargs { + struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; unsigned int acl_pgbase; struct page ** acl_pages; - struct nfs4_sequence_args seq_args; }; /* getxattr ACL interface flags */ #define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */ struct nfs_getaclres { + struct nfs4_sequence_res seq_res; size_t acl_len; size_t acl_data_offset; int acl_flags; struct page * acl_scratch; - struct nfs4_sequence_res seq_res; }; struct nfs_setattrres { + struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; const struct nfs_server * server; - struct nfs4_sequence_res seq_res; }; struct nfs_linkargs { @@ -828,21 +828,22 @@ struct nfs3_getaclres { typedef u64 clientid4; struct nfs4_accessargs { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; u32 access; - struct nfs4_sequence_args seq_args; }; struct nfs4_accessres { + struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; u32 supported; u32 access; - struct nfs4_sequence_res seq_res; }; struct nfs4_create_arg { + struct nfs4_sequence_args seq_args; u32 ftype; union { struct { @@ -859,88 +860,88 @@ struct nfs4_create_arg { const struct iattr * attrs; const struct nfs_fh * dir_fh; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_create_res { + struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fh * fh; struct nfs_fattr * fattr; struct nfs4_change_info dir_cinfo; - struct nfs4_sequence_res seq_res; }; struct nfs4_fsinfo_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_fsinfo_res { - struct nfs_fsinfo *fsinfo; struct nfs4_sequence_res seq_res; + struct nfs_fsinfo *fsinfo; }; struct nfs4_getattr_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_getattr_res { + struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; - struct nfs4_sequence_res seq_res; }; struct nfs4_link_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const struct nfs_fh * dir_fh; const struct qstr * name; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_link_res { + struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs4_change_info cinfo; struct nfs_fattr * dir_attr; - struct nfs4_sequence_res seq_res; }; struct nfs4_lookup_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh * dir_fh; const struct qstr * name; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_lookup_res { + struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs_fh * fh; - struct nfs4_sequence_res seq_res; }; struct nfs4_lookup_root_arg { - const u32 * bitmask; struct nfs4_sequence_args seq_args; + const u32 * bitmask; }; struct nfs4_pathconf_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_pathconf_res { - struct nfs_pathconf *pathconf; struct nfs4_sequence_res seq_res; + struct nfs_pathconf *pathconf; }; struct nfs4_readdir_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; u64 cookie; nfs4_verifier verifier; @@ -949,21 +950,20 @@ struct nfs4_readdir_arg { unsigned int pgbase; /* zero-copy data */ const u32 * bitmask; int plus; - struct nfs4_sequence_args seq_args; }; struct nfs4_readdir_res { + struct nfs4_sequence_res seq_res; nfs4_verifier verifier; unsigned int pgbase; - struct nfs4_sequence_res seq_res; }; struct nfs4_readlink { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; unsigned int pgbase; unsigned int pglen; /* zero-copy data */ struct page ** pages; /* zero-copy data */ - struct nfs4_sequence_args seq_args; }; struct nfs4_readlink_res { @@ -989,28 +989,28 @@ struct nfs4_setclientid_res { }; struct nfs4_statfs_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_statfs_res { - struct nfs_fsstat *fsstat; struct nfs4_sequence_res seq_res; + struct nfs_fsstat *fsstat; }; struct nfs4_server_caps_arg { - struct nfs_fh *fhandle; struct nfs4_sequence_args seq_args; + struct nfs_fh *fhandle; }; struct nfs4_server_caps_res { + struct nfs4_sequence_res seq_res; u32 attr_bitmask[3]; u32 acl_bitmask; u32 has_links; u32 has_symlinks; u32 fh_expire_type; - struct nfs4_sequence_res seq_res; }; #define NFS4_PATHNAME_MAXCOMPONENTS 512 @@ -1036,16 +1036,16 @@ struct nfs4_fs_locations { }; struct nfs4_fs_locations_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh *dir_fh; const struct qstr *name; struct page *page; const u32 *bitmask; - struct nfs4_sequence_args seq_args; }; struct nfs4_fs_locations_res { - struct nfs4_fs_locations *fs_locations; struct nfs4_sequence_res seq_res; + struct nfs4_fs_locations *fs_locations; }; struct nfs4_secinfo_oid { @@ -1070,14 +1070,14 @@ struct nfs4_secinfo_flavors { }; struct nfs4_secinfo_arg { + struct nfs4_sequence_args seq_args; const struct nfs_fh *dir_fh; const struct qstr *name; - struct nfs4_sequence_args seq_args; }; struct nfs4_secinfo_res { - struct nfs4_secinfo_flavors *flavors; struct nfs4_sequence_res seq_res; + struct nfs4_secinfo_flavors *flavors; }; #endif /* CONFIG_NFS_V4 */ @@ -1157,9 +1157,9 @@ struct nfs41_create_session_res { }; struct nfs41_reclaim_complete_args { + struct nfs4_sequence_args seq_args; /* In the future extend to include curr_fh for use with migration */ unsigned char one_fs:1; - struct nfs4_sequence_args seq_args; }; struct nfs41_reclaim_complete_res { @@ -1169,28 +1169,28 @@ struct nfs41_reclaim_complete_res { #define SECINFO_STYLE_CURRENT_FH 0 #define SECINFO_STYLE_PARENT 1 struct nfs41_secinfo_no_name_args { - int style; struct nfs4_sequence_args seq_args; + int style; }; struct nfs41_test_stateid_args { - nfs4_stateid *stateid; struct nfs4_sequence_args seq_args; + nfs4_stateid *stateid; }; struct nfs41_test_stateid_res { - unsigned int status; struct nfs4_sequence_res seq_res; + unsigned int status; }; struct nfs41_free_stateid_args { - nfs4_stateid *stateid; struct nfs4_sequence_args seq_args; + nfs4_stateid *stateid; }; struct nfs41_free_stateid_res { - unsigned int status; struct nfs4_sequence_res seq_res; + unsigned int status; }; #else -- cgit v1.2.3 From c05eecf636101dd4347b2d8fa457626bf0088e0a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 30 Nov 2012 23:59:29 -0500 Subject: SUNRPC: Don't allow low priority tasks to pre-empt higher priority ones Currently, the priority queues attempt to be 'fair' to lower priority tasks by scheduling them after a certain number of higher priority tasks have run. The problem is that both the transport send queue and the NFSv4.1 session slot queue have strong ordering requirements. This patch therefore removes the fairness code in favour of strong ordering of task priorities. Signed-off-by: Trond Myklebust --- include/linux/sunrpc/sched.h | 1 - net/sunrpc/sched.c | 44 ++++++++++++++++++++++---------------------- 2 files changed, 22 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index dc0c3cc3ada3..b64f8eb0b973 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -192,7 +192,6 @@ struct rpc_wait_queue { pid_t owner; /* process id of last task serviced */ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ unsigned char priority; /* current priority */ - unsigned char count; /* # task groups remaining serviced so far */ unsigned char nr; /* # tasks remaining for cookie */ unsigned short qlen; /* total # tasks waiting in queue */ struct rpc_timer timer_list; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 1aefc9fef866..d17a704aaf5f 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -98,6 +98,23 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); } +static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) +{ + queue->priority = priority; +} + +static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) +{ + queue->owner = pid; + queue->nr = RPC_BATCH_COUNT; +} + +static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) +{ + rpc_set_waitqueue_priority(queue, queue->maxpriority); + rpc_set_waitqueue_owner(queue, 0); +} + /* * Add new request to a priority queue. */ @@ -109,9 +126,11 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *t; INIT_LIST_HEAD(&task->u.tk_wait.links); - q = &queue->tasks[queue_priority]; if (unlikely(queue_priority > queue->maxpriority)) - q = &queue->tasks[queue->maxpriority]; + queue_priority = queue->maxpriority; + if (queue_priority > queue->priority) + rpc_set_waitqueue_priority(queue, queue_priority); + q = &queue->tasks[queue_priority]; list_for_each_entry(t, q, u.tk_wait.list) { if (t->tk_owner == task->tk_owner) { list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); @@ -180,24 +199,6 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas task->tk_pid, queue, rpc_qname(queue)); } -static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) -{ - queue->priority = priority; - queue->count = 1 << (priority * 2); -} - -static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) -{ - queue->owner = pid; - queue->nr = RPC_BATCH_COUNT; -} - -static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) -{ - rpc_set_waitqueue_priority(queue, queue->maxpriority); - rpc_set_waitqueue_owner(queue, 0); -} - static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) { int i; @@ -464,8 +465,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q /* * Check if we need to switch queues. */ - if (--queue->count) - goto new_owner; + goto new_owner; } /* -- cgit v1.2.3 From cf66bb93e0f75e0a4ba1ec070692618fa028e994 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 3 Dec 2012 16:25:40 +0000 Subject: byteorder: allow arch to opt to use GCC intrinsics for byteswapping Since GCC 4.4, there have been __builtin_bswap32() and __builtin_bswap16() intrinsics. A __builtin_bswap16() came a little later (4.6 for PowerPC, 48 for other platforms). By using these instead of the inline assembler that most architectures have in their __arch_swabXX() macros, we let the compiler see what's actually happening. The resulting code should be at least as good, and much *better* in the cases where it can be combined with a nearby load or store, using a load-and-byteswap or store-and-byteswap instruction (e.g. lwbrx/stwbrx on PowerPC, movbe on Atom). When GCC is sufficiently recent *and* the architecture opts in to using the intrinsics by setting CONFIG_ARCH_USE_BUILTIN_BSWAP, they will be used in preference to the __arch_swabXX() macros. An architecture which does not set ARCH_USE_BUILTIN_BSWAP will continue to use its own hand-crafted macros. Signed-off-by: David Woodhouse Acked-by: H. Peter Anvin --- arch/Kconfig | 19 +++++++++++++++++++ include/linux/compiler-gcc4.h | 10 ++++++++++ include/linux/compiler-intel.h | 7 +++++++ include/uapi/linux/swab.h | 12 +++++++++--- 4 files changed, 45 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 366ec06a5185..c31416b10586 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -112,6 +112,25 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS See Documentation/unaligned-memory-access.txt for more information on the topic of unaligned memory accesses. +config ARCH_USE_BUILTIN_BSWAP + bool + help + Modern versions of GCC (since 4.4) have builtin functions + for handling byte-swapping. Using these, instead of the old + inline assembler that the architecture code provides in the + __arch_bswapXX() macros, allows the compiler to see what's + happening and offers more opportunity for optimisation. In + particular, the compiler will be able to combine the byteswap + with a nearby load or store and use load-and-swap or + store-and-swap instructions if the architecture has them. It + should almost *never* result in code which is worse than the + hand-coded assembler in . But just in case it + does, the use of the builtins is optional. + + Any architecture with load-and-swap or store-and-swap + instructions should set this. And it shouldn't hurt to set it + on architectures that don't have such instructions. + config HAVE_SYSCALL_WRAPPERS bool diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 412bc6c2b023..dc16a858e77c 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -63,3 +63,13 @@ #define __compiletime_warning(message) __attribute__((warning(message))) #define __compiletime_error(message) __attribute__((error(message))) #endif + +#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP +#if __GNUC_MINOR__ >= 4 +#define __HAVE_BUILTIN_BSWAP32__ +#define __HAVE_BUILTIN_BSWAP64__ +#endif +#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6) +#define __HAVE_BUILTIN_BSWAP16__ +#endif +#endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index d8e636e5607d..973ce10c40b6 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -29,3 +29,10 @@ #endif #define uninitialized_var(x) x + +#ifndef __HAVE_BUILTIN_BSWAP16__ +/* icc has this, but it's called _bswap16 */ +#define __HAVE_BUILTIN_BSWAP16__ +#define __builtin_bswap16 _bswap16 +#endif + diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index e811474724c2..0e011eb91b5d 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -45,7 +45,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val) { -#ifdef __arch_swab16 +#ifdef __HAVE_BUILTIN_BSWAP16__ + return __builtin_bswap16(val); +#elif defined (__arch_swab16) return __arch_swab16(val); #else return ___constant_swab16(val); @@ -54,7 +56,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val) static inline __attribute_const__ __u32 __fswab32(__u32 val) { -#ifdef __arch_swab32 +#ifdef __HAVE_BUILTIN_BSWAP32__ + return __builtin_bswap32(val); +#elif defined(__arch_swab32) return __arch_swab32(val); #else return ___constant_swab32(val); @@ -63,7 +67,9 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val) static inline __attribute_const__ __u64 __fswab64(__u64 val) { -#ifdef __arch_swab64 +#ifdef __HAVE_BUILTIN_BSWAP64__ + return __builtin_bswap64(val); +#elif defined (__arch_swab64) return __arch_swab64(val); #elif defined(__SWAB_64_THRU_32__) __u32 h = val >> 32; -- cgit v1.2.3 From dd31866b0d55c9b70722ebad6ccd643223d9269e Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 2 Nov 2012 17:06:26 +0900 Subject: f2fs: add on-disk layout This adds a header file describing the on-disk layout of f2fs. Signed-off-by: Changman Lee Signed-off-by: Chul Lee Signed-off-by: Jaegeuk Kim --- include/linux/f2fs_fs.h | 410 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 410 insertions(+) create mode 100644 include/linux/f2fs_fs.h (limited to 'include/linux') diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h new file mode 100644 index 000000000000..1429ece7caab --- /dev/null +++ b/include/linux/f2fs_fs.h @@ -0,0 +1,410 @@ +/** + * include/linux/f2fs_fs.h + * + * Copyright (c) 2012 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _LINUX_F2FS_FS_H +#define _LINUX_F2FS_FS_H + +#include +#include + +#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ +#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */ +#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */ +#define F2FS_BLKSIZE 4096 /* support only 4KB block */ +#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ + +#define NULL_ADDR 0x0U +#define NEW_ADDR -1U + +#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) +#define F2FS_NODE_INO(sbi) (sbi->node_ino_num) +#define F2FS_META_INO(sbi) (sbi->meta_ino_num) + +/* This flag is used by node and meta inodes, and by recovery */ +#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) + +/* + * For further optimization on multi-head logs, on-disk layout supports maximum + * 16 logs by default. The number, 16, is expected to cover all the cases + * enoughly. The implementaion currently uses no more than 6 logs. + * Half the logs are used for nodes, and the other half are used for data. + */ +#define MAX_ACTIVE_LOGS 16 +#define MAX_ACTIVE_NODE_LOGS 8 +#define MAX_ACTIVE_DATA_LOGS 8 + +/* + * For superblock + */ +struct f2fs_super_block { + __le32 magic; /* Magic Number */ + __le16 major_ver; /* Major Version */ + __le16 minor_ver; /* Minor Version */ + __le32 log_sectorsize; /* log2 sector size in bytes */ + __le32 log_sectors_per_block; /* log2 # of sectors per block */ + __le32 log_blocksize; /* log2 block size in bytes */ + __le32 log_blocks_per_seg; /* log2 # of blocks per segment */ + __le32 segs_per_sec; /* # of segments per section */ + __le32 secs_per_zone; /* # of sections per zone */ + __le32 checksum_offset; /* checksum offset inside super block */ + __le64 block_count; /* total # of user blocks */ + __le32 section_count; /* total # of sections */ + __le32 segment_count; /* total # of segments */ + __le32 segment_count_ckpt; /* # of segments for checkpoint */ + __le32 segment_count_sit; /* # of segments for SIT */ + __le32 segment_count_nat; /* # of segments for NAT */ + __le32 segment_count_ssa; /* # of segments for SSA */ + __le32 segment_count_main; /* # of segments for main area */ + __le32 segment0_blkaddr; /* start block address of segment 0 */ + __le32 cp_blkaddr; /* start block address of checkpoint */ + __le32 sit_blkaddr; /* start block address of SIT */ + __le32 nat_blkaddr; /* start block address of NAT */ + __le32 ssa_blkaddr; /* start block address of SSA */ + __le32 main_blkaddr; /* start block address of main area */ + __le32 root_ino; /* root inode number */ + __le32 node_ino; /* node inode number */ + __le32 meta_ino; /* meta inode number */ + __u8 uuid[16]; /* 128-bit uuid for volume */ + __le16 volume_name[512]; /* volume name */ + __le32 extension_count; /* # of extensions below */ + __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ +} __packed; + +/* + * For checkpoint + */ +#define CP_ERROR_FLAG 0x00000008 +#define CP_COMPACT_SUM_FLAG 0x00000004 +#define CP_ORPHAN_PRESENT_FLAG 0x00000002 +#define CP_UMOUNT_FLAG 0x00000001 + +struct f2fs_checkpoint { + __le64 checkpoint_ver; /* checkpoint block version number */ + __le64 user_block_count; /* # of user blocks */ + __le64 valid_block_count; /* # of valid blocks in main area */ + __le32 rsvd_segment_count; /* # of reserved segments for gc */ + __le32 overprov_segment_count; /* # of overprovision segments */ + __le32 free_segment_count; /* # of free segments in main area */ + + /* information of current node segments */ + __le32 cur_node_segno[MAX_ACTIVE_NODE_LOGS]; + __le16 cur_node_blkoff[MAX_ACTIVE_NODE_LOGS]; + /* information of current data segments */ + __le32 cur_data_segno[MAX_ACTIVE_DATA_LOGS]; + __le16 cur_data_blkoff[MAX_ACTIVE_DATA_LOGS]; + __le32 ckpt_flags; /* Flags : umount and journal_present */ + __le32 cp_pack_total_block_count; /* total # of one cp pack */ + __le32 cp_pack_start_sum; /* start block number of data summary */ + __le32 valid_node_count; /* Total number of valid nodes */ + __le32 valid_inode_count; /* Total number of valid inodes */ + __le32 next_free_nid; /* Next free node number */ + __le32 sit_ver_bitmap_bytesize; /* Default value 64 */ + __le32 nat_ver_bitmap_bytesize; /* Default value 256 */ + __le32 checksum_offset; /* checksum offset inside cp block */ + __le64 elapsed_time; /* mounted time */ + /* allocation type of current segment */ + unsigned char alloc_type[MAX_ACTIVE_LOGS]; + + /* SIT and NAT version bitmap */ + unsigned char sit_nat_version_bitmap[1]; +} __packed; + +/* + * For orphan inode management + */ +#define F2FS_ORPHANS_PER_BLOCK 1020 + +struct f2fs_orphan_block { + __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */ + __le32 reserved; /* reserved */ + __le16 blk_addr; /* block index in current CP */ + __le16 blk_count; /* Number of orphan inode blocks in CP */ + __le32 entry_count; /* Total number of orphan nodes in current CP */ + __le32 check_sum; /* CRC32 for orphan inode block */ +} __packed; + +/* + * For NODE structure + */ +struct f2fs_extent { + __le32 fofs; /* start file offset of the extent */ + __le32 blk_addr; /* start block address of the extent */ + __le32 len; /* lengh of the extent */ +} __packed; + +#define F2FS_MAX_NAME_LEN 256 +#define ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ +#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ +#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ + +struct f2fs_inode { + __le16 i_mode; /* file mode */ + __u8 i_advise; /* file hints */ + __u8 i_reserved; /* reserved */ + __le32 i_uid; /* user ID */ + __le32 i_gid; /* group ID */ + __le32 i_links; /* links count */ + __le64 i_size; /* file size in bytes */ + __le64 i_blocks; /* file size in blocks */ + __le64 i_atime; /* access time */ + __le64 i_ctime; /* change time */ + __le64 i_mtime; /* modification time */ + __le32 i_atime_nsec; /* access time in nano scale */ + __le32 i_ctime_nsec; /* change time in nano scale */ + __le32 i_mtime_nsec; /* modification time in nano scale */ + __le32 i_generation; /* file version (for NFS) */ + __le32 i_current_depth; /* only for directory depth */ + __le32 i_xattr_nid; /* nid to save xattr */ + __le32 i_flags; /* file attributes */ + __le32 i_pino; /* parent inode number */ + __le32 i_namelen; /* file name length */ + __u8 i_name[F2FS_MAX_NAME_LEN]; /* file name for SPOR */ + + struct f2fs_extent i_ext; /* caching a largest extent */ + + __le32 i_addr[ADDRS_PER_INODE]; /* Pointers to data blocks */ + + __le32 i_nid[5]; /* direct(2), indirect(2), + double_indirect(1) node id */ +} __packed; + +struct direct_node { + __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */ +} __packed; + +struct indirect_node { + __le32 nid[NIDS_PER_BLOCK]; /* array of data block address */ +} __packed; + +enum { + COLD_BIT_SHIFT = 0, + FSYNC_BIT_SHIFT, + DENT_BIT_SHIFT, + OFFSET_BIT_SHIFT +}; + +struct node_footer { + __le32 nid; /* node id */ + __le32 ino; /* inode nunmber */ + __le32 flag; /* include cold/fsync/dentry marks and offset */ + __le64 cp_ver; /* checkpoint version */ + __le32 next_blkaddr; /* next node page block address */ +} __packed; + +struct f2fs_node { + /* can be one of three types: inode, direct, and indirect types */ + union { + struct f2fs_inode i; + struct direct_node dn; + struct indirect_node in; + }; + struct node_footer footer; +} __packed; + +/* + * For NAT entries + */ +#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) + +struct f2fs_nat_entry { + __u8 version; /* latest version of cached nat entry */ + __le32 ino; /* inode number */ + __le32 block_addr; /* block address */ +} __packed; + +struct f2fs_nat_block { + struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK]; +} __packed; + +/* + * For SIT entries + * + * Each segment is 2MB in size by default so that a bitmap for validity of + * there-in blocks should occupy 64 bytes, 512 bits. + * Not allow to change this. + */ +#define SIT_VBLOCK_MAP_SIZE 64 +#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) + +/* + * Note that f2fs_sit_entry->vblocks has the following bit-field information. + * [15:10] : allocation type such as CURSEG_XXXX_TYPE + * [9:0] : valid block count + */ +#define SIT_VBLOCKS_SHIFT 10 +#define SIT_VBLOCKS_MASK ((1 << SIT_VBLOCKS_SHIFT) - 1) +#define GET_SIT_VBLOCKS(raw_sit) \ + (le16_to_cpu((raw_sit)->vblocks) & SIT_VBLOCKS_MASK) +#define GET_SIT_TYPE(raw_sit) \ + ((le16_to_cpu((raw_sit)->vblocks) & ~SIT_VBLOCKS_MASK) \ + >> SIT_VBLOCKS_SHIFT) + +struct f2fs_sit_entry { + __le16 vblocks; /* reference above */ + __u8 valid_map[SIT_VBLOCK_MAP_SIZE]; /* bitmap for valid blocks */ + __le64 mtime; /* segment age for cleaning */ +} __packed; + +struct f2fs_sit_block { + struct f2fs_sit_entry entries[SIT_ENTRY_PER_BLOCK]; +} __packed; + +/* + * For segment summary + * + * One summary block contains exactly 512 summary entries, which represents + * exactly 2MB segment by default. Not allow to change the basic units. + * + * NOTE: For initializing fields, you must use set_summary + * + * - If data page, nid represents dnode's nid + * - If node page, nid represents the node page's nid. + * + * The ofs_in_node is used by only data page. It represents offset + * from node's page's beginning to get a data block address. + * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) + */ +#define ENTRIES_IN_SUM 512 +#define SUMMARY_SIZE (sizeof(struct f2fs_summary)) +#define SUM_FOOTER_SIZE (sizeof(struct summary_footer)) +#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) + +/* a summary entry for a 4KB-sized block in a segment */ +struct f2fs_summary { + __le32 nid; /* parent node id */ + union { + __u8 reserved[3]; + struct { + __u8 version; /* node version number */ + __le16 ofs_in_node; /* block index in parent node */ + } __packed; + }; +} __packed; + +/* summary block type, node or data, is stored to the summary_footer */ +#define SUM_TYPE_NODE (1) +#define SUM_TYPE_DATA (0) + +struct summary_footer { + unsigned char entry_type; /* SUM_TYPE_XXX */ + __u32 check_sum; /* summary checksum */ +} __packed; + +#define SUM_JOURNAL_SIZE (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE -\ + SUM_ENTRY_SIZE) +#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ + sizeof(struct nat_journal_entry)) +#define NAT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ + sizeof(struct nat_journal_entry)) +#define SIT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ + sizeof(struct sit_journal_entry)) +#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\ + sizeof(struct sit_journal_entry)) +/* + * frequently updated NAT/SIT entries can be stored in the spare area in + * summary blocks + */ +enum { + NAT_JOURNAL = 0, + SIT_JOURNAL +}; + +struct nat_journal_entry { + __le32 nid; + struct f2fs_nat_entry ne; +} __packed; + +struct nat_journal { + struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES]; + __u8 reserved[NAT_JOURNAL_RESERVED]; +} __packed; + +struct sit_journal_entry { + __le32 segno; + struct f2fs_sit_entry se; +} __packed; + +struct sit_journal { + struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES]; + __u8 reserved[SIT_JOURNAL_RESERVED]; +} __packed; + +/* 4KB-sized summary block structure */ +struct f2fs_summary_block { + struct f2fs_summary entries[ENTRIES_IN_SUM]; + union { + __le16 n_nats; + __le16 n_sits; + }; + /* spare area is used by NAT or SIT journals */ + union { + struct nat_journal nat_j; + struct sit_journal sit_j; + }; + struct summary_footer footer; +} __packed; + +/* + * For directory operations + */ +#define F2FS_DOT_HASH 0 +#define F2FS_DDOT_HASH F2FS_DOT_HASH +#define F2FS_MAX_HASH (~((0x3ULL) << 62)) +#define F2FS_HASH_COL_BIT ((0x1ULL) << 63) + +typedef __le32 f2fs_hash_t; + +/* One directory entry slot covers 8bytes-long file name */ +#define F2FS_NAME_LEN 8 + +/* the number of dentry in a block */ +#define NR_DENTRY_IN_BLOCK 214 + +/* MAX level for dir lookup */ +#define MAX_DIR_HASH_DEPTH 63 + +#define SIZE_OF_DIR_ENTRY 11 /* by byte */ +#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \ + BITS_PER_BYTE) +#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ + F2FS_NAME_LEN) * \ + NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) + +/* One directory entry slot representing F2FS_NAME_LEN-sized file name */ +struct f2fs_dir_entry { + __le32 hash_code; /* hash code of file name */ + __le32 ino; /* inode number */ + __le16 name_len; /* lengh of file name */ + __u8 file_type; /* file type */ +} __packed; + +/* 4KB-sized directory entry block */ +struct f2fs_dentry_block { + /* validity bitmap for directory entries in each block */ + __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP]; + __u8 reserved[SIZE_OF_RESERVED]; + struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK]; + __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_NAME_LEN]; +} __packed; + +/* file types used in inode_info->flags */ +enum { + F2FS_FT_UNKNOWN, + F2FS_FT_REG_FILE, + F2FS_FT_DIR, + F2FS_FT_CHRDEV, + F2FS_FT_BLKDEV, + F2FS_FT_FIFO, + F2FS_FT_SOCK, + F2FS_FT_SYMLINK, + F2FS_FT_MAX +}; + +#endif /* _LINUX_F2FS_FS_H */ -- cgit v1.2.3 From 25ca923b2a766b9c93b63777ead351137533a623 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 28 Nov 2012 16:12:41 +0900 Subject: f2fs: fix endian conversion bugs reported by sparse This patch should resolve the bugs reported by the sparse tool. Initial reports were written by "kbuild test robot" managed by fengguang.wu. In my local machines, I've tested also by running: > make C=2 CF="-D__CHECK_ENDIAN__" Accordingly, I've found lots of warnings and bugs related to the endian conversion. And I've fixed all at this moment. Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 32 +++++++++++++++++--------------- fs/f2fs/data.c | 2 +- fs/f2fs/debug.c | 2 +- fs/f2fs/dir.c | 8 ++++---- fs/f2fs/f2fs.h | 25 +++++++++++++++++++++++-- fs/f2fs/hash.c | 3 +-- fs/f2fs/node.c | 4 ++-- fs/f2fs/node.h | 2 +- fs/f2fs/recovery.c | 2 +- fs/f2fs/segment.c | 14 +++++++------- fs/f2fs/super.c | 8 ++++---- include/linux/f2fs_fs.h | 6 +++--- 12 files changed, 65 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index ab743f92ee06..7c18f8efaadc 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -268,7 +268,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) { block_t start_blk, orphan_blkaddr, i, j; - if (!(F2FS_CKPT(sbi)->ckpt_flags & CP_ORPHAN_PRESENT_FLAG)) + if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) return 0; sbi->por_doing = 1; @@ -287,7 +287,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi) f2fs_put_page(page, 1); } /* clear Orphan Flag */ - F2FS_CKPT(sbi)->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG); + clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); sbi->por_doing = 0; return 0; } @@ -376,7 +376,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, pre_version = le64_to_cpu(cp_block->checkpoint_ver); /* Read the 2nd cp block in this CP pack */ - cp_addr += le64_to_cpu(cp_block->cp_pack_total_block_count) - 1; + cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; cp_page_2 = get_meta_page(sbi, cp_addr); cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); @@ -605,8 +605,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) block_t start_blk; struct page *cp_page; unsigned int data_sum_blocks, orphan_blocks; + unsigned int crc32 = 0; void *kaddr; - __u32 crc32 = 0; int i; /* Flush all the NAT/SIT pages */ @@ -646,33 +646,35 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) /* 2 cp + n data seg summary + orphan inode blocks */ data_sum_blocks = npages_for_summary_flush(sbi); if (data_sum_blocks < 3) - ckpt->ckpt_flags |= CP_COMPACT_SUM_FLAG; + set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); else - ckpt->ckpt_flags &= (~CP_COMPACT_SUM_FLAG); + clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) / F2FS_ORPHANS_PER_BLOCK; - ckpt->cp_pack_start_sum = 1 + orphan_blocks; - ckpt->cp_pack_total_block_count = 2 + data_sum_blocks + orphan_blocks; + ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks); if (is_umount) { - ckpt->ckpt_flags |= CP_UMOUNT_FLAG; - ckpt->cp_pack_total_block_count += NR_CURSEG_NODE_TYPE; + set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + ckpt->cp_pack_total_block_count = cpu_to_le32(2 + + data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); } else { - ckpt->ckpt_flags &= (~CP_UMOUNT_FLAG); + clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + ckpt->cp_pack_total_block_count = cpu_to_le32(2 + + data_sum_blocks + orphan_blocks); } if (sbi->n_orphans) - ckpt->ckpt_flags |= CP_ORPHAN_PRESENT_FLAG; + set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); else - ckpt->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG); + clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); /* update SIT/NAT bitmap */ get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); - *(__u32 *)((unsigned char *)ckpt + + *(__le32 *)((unsigned char *)ckpt + le32_to_cpu(ckpt->checksum_offset)) = cpu_to_le32(crc32); @@ -716,7 +718,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) sbi->alloc_valid_block_count = 0; /* Here, we only have one bio having CP pack */ - if (sbi->ckpt->ckpt_flags & CP_ERROR_FLAG) + if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) sbi->sb->s_flags |= MS_RDONLY; else sync_meta_pages(sbi, META_FLUSH, LONG_MAX); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c2fd0a80db16..5635cc5a9d4d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -545,7 +545,7 @@ redirty_out: #define MAX_DESIRED_PAGES_WP 4096 -int f2fs_write_data_pages(struct address_space *mapping, +static int f2fs_write_data_pages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index a56181c1b28b..fb62960a1dc1 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -27,7 +27,7 @@ static LIST_HEAD(f2fs_stat_list); static struct dentry *debugfs_root; -void update_general_status(struct f2fs_sb_info *sbi) +static void update_general_status(struct f2fs_sb_info *sbi) { struct f2fs_stat_info *si = sbi->stat_info; int i; diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 5975568d03df..5ec7a06120e1 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -80,7 +80,7 @@ static bool early_match_name(const char *name, int namelen, if (le16_to_cpu(de->name_len) != namelen) return false; - if (le32_to_cpu(de->hash_code) != namehash) + if (de->hash_code != namehash) return false; return true; @@ -143,7 +143,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, nbucket = dir_buckets(level); nblock = bucket_blocks(level); - bidx = dir_block_index(level, namehash % nbucket); + bidx = dir_block_index(level, le32_to_cpu(namehash) % nbucket); end_block = bidx + nblock; for (; bidx < end_block; bidx++) { @@ -406,7 +406,7 @@ start: nbucket = dir_buckets(level); nblock = bucket_blocks(level); - bidx = dir_block_index(level, (dentry_hash % nbucket)); + bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { mutex_lock_op(sbi, DENTRY_OPS); @@ -437,7 +437,7 @@ add_dentry: wait_on_page_writeback(dentry_page); de = &dentry_blk->dentry[bit_pos]; - de->hash_code = cpu_to_le32(dentry_hash); + de->hash_code = dentry_hash; de->name_len = cpu_to_le16(namelen); memcpy(dentry_blk->filename[bit_pos], name, namelen); de->ino = cpu_to_le32(inode->i_ino); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d3f5a70e2a49..8d7fde1bda1e 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -463,6 +463,26 @@ static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) sbi->s_dirty = 0; } +static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + return ckpt_flags & f; +} + +static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + ckpt_flags |= f; + cp->ckpt_flags = cpu_to_le32(ckpt_flags); +} + +static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) +{ + unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); + ckpt_flags &= (~f); + cp->ckpt_flags = cpu_to_le32(ckpt_flags); +} + static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t) { mutex_lock_nested(&sbi->fs_lock[t], t); @@ -577,7 +597,8 @@ static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); - int offset = (flag == NAT_BITMAP) ? ckpt->sit_ver_bitmap_bytesize : 0; + int offset = (flag == NAT_BITMAP) ? + le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; return &ckpt->sit_nat_version_bitmap + offset; } @@ -587,7 +608,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); - start_addr = le64_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); + start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); /* * odd numbered checkpoint should at cp segment 0 diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 098a1963d7c7..beb155e8d06d 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -92,7 +92,6 @@ f2fs_hash_t f2fs_dentry_hash(const char *name, int len) hash = buf[0]; minor_hash = buf[1]; - f2fs_hash = hash; - f2fs_hash &= ~F2FS_HASH_COL_BIT; + f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT); return f2fs_hash; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 216f04dc1177..5d421fe22575 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1445,8 +1445,8 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i); dst->i.i_size = 0; - dst->i.i_blocks = 1; - dst->i.i_links = 1; + dst->i.i_blocks = cpu_to_le64(1); + dst->i.i_links = cpu_to_le32(1); dst->i.i_xattr_nid = 0; new_ni = old_ni; diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 5d525ed312ba..0ab92d643052 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -177,7 +177,7 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) void *kaddr = page_address(page); struct f2fs_node *rn = (struct f2fs_node *)kaddr; rn->footer.cp_ver = ckpt->checkpoint_ver; - rn->footer.next_blkaddr = blkaddr; + rn->footer.next_blkaddr = cpu_to_le32(blkaddr); } static inline nid_t ino_of_node(struct page *node_page) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 7a43df0b72c1..222a7bb92214 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -81,7 +81,7 @@ static int recover_inode(struct inode *inode, struct page *node_page) struct f2fs_node *raw_node = (struct f2fs_node *)kaddr; struct f2fs_inode *raw_inode = &(raw_node->i); - inode->i_mode = le32_to_cpu(raw_inode->i_mode); + inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_size_write(inode, le64_to_cpu(raw_inode->i_size)); inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime); inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index ed7c079cfc7f..d973c56e8bd6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -630,7 +630,7 @@ static void f2fs_end_io_write(struct bio *bio, int err) SetPageError(page); if (page->mapping) set_bit(AS_EIO, &page->mapping->flags); - p->sbi->ckpt->ckpt_flags |= CP_ERROR_FLAG; + set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG); set_page_dirty(page); } end_page_writeback(page); @@ -1067,7 +1067,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) segno = le32_to_cpu(ckpt->cur_data_segno[type]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]); - if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); else blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); @@ -1076,7 +1076,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) CURSEG_HOT_NODE]); blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]); - if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE); else @@ -1087,7 +1087,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) sum = (struct f2fs_summary_block *)page_address(new); if (IS_NODESEG(type)) { - if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) { + if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { struct f2fs_summary *ns = &sum->entries[0]; int i; for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { @@ -1119,7 +1119,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) { int type = CURSEG_HOT_DATA; - if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) { + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { /* restore for compacted data summary */ if (read_compacted_summaries(sbi)) return -EINVAL; @@ -1208,7 +1208,7 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi, void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { - if (sbi->ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) write_compacted_summaries(sbi, start_blk); else write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); @@ -1216,7 +1216,7 @@ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { - if (sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); return; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 8661c93538af..878bf382f848 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -89,7 +89,7 @@ static void f2fs_i_callback(struct rcu_head *head) kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode)); } -void f2fs_destroy_inode(struct inode *inode) +static void f2fs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, f2fs_i_callback); } @@ -445,7 +445,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (sanity_check_raw_super(raw_super)) goto free_sb_buf; - sb->s_maxbytes = max_file_size(raw_super->log_blocksize); + sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); @@ -527,7 +527,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) /* if there are nt orphan nodes free them */ err = -EINVAL; - if (!(sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) && + if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) && recover_orphan_inodes(sbi)) goto free_node_inode; @@ -547,7 +547,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) } /* recover fsynced data */ - if (!(sbi->ckpt->ckpt_flags & CP_UMOUNT_FLAG) && + if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) && !test_opt(sbi, DISABLE_ROLL_FORWARD)) recover_fsync_data(sbi); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 1429ece7caab..c2fbbc35c1e6 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -272,8 +272,8 @@ struct f2fs_sit_block { * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node) */ #define ENTRIES_IN_SUM 512 -#define SUMMARY_SIZE (sizeof(struct f2fs_summary)) -#define SUM_FOOTER_SIZE (sizeof(struct summary_footer)) +#define SUMMARY_SIZE (7) /* sizeof(struct summary) */ +#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */ #define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM) /* a summary entry for a 4KB-sized block in a segment */ @@ -297,7 +297,7 @@ struct summary_footer { __u32 check_sum; /* summary checksum */ } __packed; -#define SUM_JOURNAL_SIZE (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE -\ +#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\ SUM_ENTRY_SIZE) #define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\ sizeof(struct nat_journal_entry)) -- cgit v1.2.3 From 457d08ee4fd91c8df17917ff2d32565e6adacbfc Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sat, 8 Dec 2012 14:54:50 +0900 Subject: f2fs: introduce accessor to retrieve number of dentry slots Simplify code by providing the accessor macro to retrieve the number of dentry slots for a given filename length. Signed-off-by: Namjae Jeon Signed-off-by: Amit Sahrawat --- fs/f2fs/dir.c | 13 +++++-------- include/linux/f2fs_fs.h | 3 +++ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index fc02d8b43aea..d900c088c7c6 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -99,8 +99,7 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, NR_DENTRY_IN_BLOCK, 0); while (bit_pos < NR_DENTRY_IN_BLOCK) { de = &dentry_blk->dentry[bit_pos]; - slots = (le16_to_cpu(de->name_len) + F2FS_NAME_LEN - 1) / - F2FS_NAME_LEN; + slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); if (early_match_name(name, namelen, namehash, de)) { if (!memcmp(dentry_blk->filename[bit_pos], @@ -130,7 +129,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, unsigned int level, const char *name, int namelen, f2fs_hash_t namehash, struct page **res_page) { - int s = (namelen + F2FS_NAME_LEN - 1) / F2FS_NAME_LEN; + int s = GET_DENTRY_SLOTS(namelen); unsigned int nbucket, nblock; unsigned int bidx, end_block; struct page *dentry_page; @@ -383,7 +382,7 @@ int f2fs_add_link(struct dentry *dentry, struct inode *inode) int namelen = dentry->d_name.len; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; - int slots = (namelen + F2FS_NAME_LEN - 1) / F2FS_NAME_LEN; + int slots = GET_DENTRY_SLOTS(namelen); int err = 0; int i; @@ -465,8 +464,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); - int slots = (le16_to_cpu(dentry->name_len) + F2FS_NAME_LEN - 1) / - F2FS_NAME_LEN; + int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); void *kaddr = page_address(page); int i; @@ -641,8 +639,7 @@ static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir) file->f_pos += bit_pos - start_bit_pos; goto success; } - slots = (le16_to_cpu(de->name_len) + F2FS_NAME_LEN - 1) - / F2FS_NAME_LEN; + slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); bit_pos += slots; } bit_pos = 0; diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c2fbbc35c1e6..f9a12f6243a5 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -363,6 +363,9 @@ typedef __le32 f2fs_hash_t; /* One directory entry slot covers 8bytes-long file name */ #define F2FS_NAME_LEN 8 +#define F2FS_NAME_LEN_BITS 3 + +#define GET_DENTRY_SLOTS(x) ((x + F2FS_NAME_LEN - 1) >> F2FS_NAME_LEN_BITS) /* the number of dentry in a block */ #define NR_DENTRY_IN_BLOCK 214 -- cgit v1.2.3 From 3c58346525d82625e68e24f071804c2dc057b6f4 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:01 +0000 Subject: slab: Simplify bootstrap The nodelists field in kmem_cache is pointing to the first unused object in the array field when bootstrap is complete. A problem with the current approach is that the statically sized kmem_cache structure use on boot can only contain NR_CPUS entries. If the number of nodes plus the number of cpus is greater then we would overwrite memory following the kmem_cache_boot definition. Increase the size of the array field to ensure that also the node pointers fit into the array field. Once we do that we no longer need the kmem_cache_nodelists array and we can then also use that structure elsewhere. Acked-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slab_def.h | 6 +++++- mm/slab.c | 21 +++++++++++++-------- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cc290f0bdb34..45c0356fdc8c 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -89,9 +89,13 @@ struct kmem_cache { * (see kmem_cache_init()) * We still use [NR_CPUS] and not [1] or [0] because cache_cache * is statically defined, so we reserve the max number of cpus. + * + * We also need to guarantee that the list is able to accomodate a + * pointer for each node since "nodelists" uses the remainder of + * available pointers. */ struct kmem_list3 **nodelists; - struct array_cache *array[NR_CPUS]; + struct array_cache *array[NR_CPUS + MAX_NUMNODES]; /* * Do not add fields after array[] */ diff --git a/mm/slab.c b/mm/slab.c index e26bff5ed1a6..c7ea5234c4e9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -553,9 +553,7 @@ static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ -static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; static struct kmem_cache kmem_cache_boot = { - .nodelists = kmem_cache_nodelists, .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, @@ -1559,6 +1557,15 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index) } } +/* + * The memory after the last cpu cache pointer is used for the + * the nodelists pointer. + */ +static void setup_nodelists_pointer(struct kmem_cache *cachep) +{ + cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; +} + /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). @@ -1573,15 +1580,14 @@ void __init kmem_cache_init(void) int node; kmem_cache = &kmem_cache_boot; + setup_nodelists_pointer(kmem_cache); if (num_possible_nodes() == 1) use_alien_caches = 0; - for (i = 0; i < NUM_INIT_LISTS; i++) { + for (i = 0; i < NUM_INIT_LISTS; i++) kmem_list3_init(&initkmem_list3[i]); - if (i < MAX_NUMNODES) - kmem_cache->nodelists[i] = NULL; - } + set_up_list3s(kmem_cache, CACHE_CACHE); /* @@ -1619,7 +1625,6 @@ void __init kmem_cache_init(void) list_add(&kmem_cache->list, &slab_caches); kmem_cache->colour_off = cache_line_size(); kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; - kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids @@ -2422,7 +2427,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) else gfp = GFP_NOWAIT; - cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; + setup_nodelists_pointer(cachep); #if DEBUG /* -- cgit v1.2.3 From d8153d4d8b7b6141770e1416c4a338161205ed1b Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Tue, 14 Jun 2011 17:29:45 +0200 Subject: inotify, fanotify: replace fsnotify_put_group() with fsnotify_destroy_group() Currently in fsnotify_put_group() the ref count of a group is decremented and if it becomes 0 fsnotify_destroy_group() is called. Since a groups ref count is only at group creation set to 1 and never increased after that a call to fsnotify_put_group() always results in a call to fsnotify_destroy_group(). With this patch fsnotify_destroy_group() is called directly. Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- fs/notify/fanotify/fanotify_user.c | 14 +++++++------- fs/notify/group.c | 2 +- fs/notify/inotify/inotify_user.c | 8 +++----- include/linux/fsnotify_backend.h | 3 ++- 4 files changed, 13 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index d43803669739..82ae6d783c14 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -415,7 +415,7 @@ static int fanotify_release(struct inode *ignored, struct file *file) wake_up(&group->fanotify_data.access_waitq); #endif /* matches the fanotify_init->fsnotify_alloc_group */ - fsnotify_put_group(group); + fsnotify_destroy_group(group); return 0; } @@ -728,13 +728,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) break; default: fd = -EINVAL; - goto out_put_group; + goto out_destroy_group; } if (flags & FAN_UNLIMITED_QUEUE) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) - goto out_put_group; + goto out_destroy_group; group->max_events = UINT_MAX; } else { group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; @@ -743,7 +743,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) if (flags & FAN_UNLIMITED_MARKS) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) - goto out_put_group; + goto out_destroy_group; group->fanotify_data.max_marks = UINT_MAX; } else { group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; @@ -751,12 +751,12 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); if (fd < 0) - goto out_put_group; + goto out_destroy_group; return fd; -out_put_group: - fsnotify_put_group(group); +out_destroy_group: + fsnotify_destroy_group(group); return fd; } diff --git a/fs/notify/group.c b/fs/notify/group.c index 63fc294a4692..cfda328c3d11 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -50,7 +50,7 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group) * situtation, the fsnotify_final_destroy_group will get called when that final * mark is freed. */ -static void fsnotify_destroy_group(struct fsnotify_group *group) +void fsnotify_destroy_group(struct fsnotify_group *group) { /* clear all inode marks for this group */ fsnotify_clear_marks_by_group(group); diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 8445fbc8985c..dbafbfc8ceca 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -293,10 +293,8 @@ static int inotify_release(struct inode *ignored, struct file *file) pr_debug("%s: group=%p\n", __func__, group); - fsnotify_clear_marks_by_group(group); - /* free this group, matching get was inotify_init->fsnotify_obtain_group */ - fsnotify_put_group(group); + fsnotify_destroy_group(group); return 0; } @@ -712,7 +710,7 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > inotify_max_user_instances) { - fsnotify_put_group(group); + fsnotify_destroy_group(group); return ERR_PTR(-EMFILE); } @@ -741,7 +739,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) ret = anon_inode_getfd("inotify", &inotify_fops, group, O_RDONLY | flags); if (ret < 0) - fsnotify_put_group(group); + fsnotify_destroy_group(group); return ret; } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 63d966d5c2ea..d2ad345bdeec 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -364,7 +364,8 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); - +/* destroy group */ +extern void fsnotify_destroy_group(struct fsnotify_group *group); /* take a reference to an event */ extern void fsnotify_get_event(struct fsnotify_event *event); extern void fsnotify_put_event(struct fsnotify_event *event); -- cgit v1.2.3 From 986129520479d689962a42c31acdeaf854ac91f5 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Tue, 14 Jun 2011 17:29:46 +0200 Subject: fsnotify: introduce fsnotify_get_group() Introduce fsnotify_get_group() which increments the reference counter of a group. Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- fs/notify/group.c | 8 ++++++++ include/linux/fsnotify_backend.h | 4 +++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/notify/group.c b/fs/notify/group.c index cfda328c3d11..1d57c35f1043 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -62,6 +62,14 @@ void fsnotify_destroy_group(struct fsnotify_group *group) fsnotify_final_destroy_group(group); } +/* + * Get reference to a group. + */ +void fsnotify_get_group(struct fsnotify_group *group) +{ + atomic_inc(&group->refcnt); +} + /* * Drop a reference to a group. Free it if it's through. */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index d2ad345bdeec..e76cef75295d 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -360,8 +360,10 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode /* called from fsnotify listeners, such as fanotify or dnotify */ -/* get a reference to an existing or create a new group */ +/* create a new group */ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); +/* get reference to a group */ +extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); /* destroy group */ -- cgit v1.2.3 From 986ab09807ca9454c3f54aae4db7e1bb00daeed3 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Tue, 14 Jun 2011 17:29:50 +0200 Subject: fsnotify: use a mutex instead of a spinlock to protect a groups mark list Replaces the groups mark_lock spinlock with a mutex. Using a mutex instead of a spinlock results in more flexibility (i.e it allows to sleep while the lock is held). Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- fs/notify/group.c | 2 +- fs/notify/inode_mark.c | 4 ++-- fs/notify/mark.c | 18 +++++++++--------- fs/notify/vfsmount_mark.c | 4 ++-- include/linux/fsnotify_backend.h | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/group.c b/fs/notify/group.c index 354044c47e23..1f7305711fc9 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -95,7 +95,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops) init_waitqueue_head(&group->notification_waitq); group->max_events = UINT_MAX; - spin_lock_init(&group->mark_lock); + mutex_init(&group->mark_mutex); INIT_LIST_HEAD(&group->marks_list); group->ops = ops; diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c index b13c00ac48eb..4e9071e37d5d 100644 --- a/fs/notify/inode_mark.c +++ b/fs/notify/inode_mark.c @@ -63,8 +63,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark) { struct inode *inode = mark->i.inode; + BUG_ON(!mutex_is_locked(&mark->group->mark_mutex)); assert_spin_locked(&mark->lock); - assert_spin_locked(&mark->group->mark_lock); spin_lock(&inode->i_lock); @@ -191,8 +191,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark, mark->flags |= FSNOTIFY_MARK_FLAG_INODE; + BUG_ON(!mutex_is_locked(&group->mark_mutex)); assert_spin_locked(&mark->lock); - assert_spin_locked(&group->mark_lock); spin_lock(&inode->i_lock); diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 32447dc06c07..ab25b810b146 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -136,13 +136,13 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark) group = mark->group; spin_unlock(&mark->lock); - spin_lock(&group->mark_lock); + mutex_lock(&group->mark_mutex); spin_lock(&mark->lock); /* something else already called this function on this mark */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { spin_unlock(&mark->lock); - spin_unlock(&group->mark_lock); + mutex_unlock(&group->mark_mutex); goto put_group; } @@ -159,7 +159,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark) list_del_init(&mark->g_list); spin_unlock(&mark->lock); - spin_unlock(&group->mark_lock); + mutex_unlock(&group->mark_mutex); spin_lock(&destroy_lock); list_add(&mark->destroy_list, &destroy_list); @@ -232,11 +232,11 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, /* * LOCKING ORDER!!!! - * group->mark_lock + * group->mark_mutex * mark->lock * inode->i_lock */ - spin_lock(&group->mark_lock); + mutex_lock(&group->mark_mutex); spin_lock(&mark->lock); mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE; @@ -263,7 +263,7 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_set_mark_mask_locked(mark, mark->mask); spin_unlock(&mark->lock); - spin_unlock(&group->mark_lock); + mutex_unlock(&group->mark_mutex); if (inode) __fsnotify_update_child_dentry_flags(inode); @@ -277,7 +277,7 @@ err: atomic_dec(&group->num_marks); spin_unlock(&mark->lock); - spin_unlock(&group->mark_lock); + mutex_unlock(&group->mark_mutex); spin_lock(&destroy_lock); list_add(&mark->destroy_list, &destroy_list); @@ -296,7 +296,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, struct fsnotify_mark *lmark, *mark; LIST_HEAD(free_list); - spin_lock(&group->mark_lock); + mutex_lock(&group->mark_mutex); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { if (mark->flags & flags) { list_add(&mark->free_g_list, &free_list); @@ -304,7 +304,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, fsnotify_get_mark(mark); } } - spin_unlock(&group->mark_lock); + mutex_unlock(&group->mark_mutex); list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) { fsnotify_destroy_mark(mark); diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c index b7b4b0e8554f..f26a348827f8 100644 --- a/fs/notify/vfsmount_mark.c +++ b/fs/notify/vfsmount_mark.c @@ -88,8 +88,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark) { struct vfsmount *mnt = mark->m.mnt; + BUG_ON(!mutex_is_locked(&mark->group->mark_mutex)); assert_spin_locked(&mark->lock); - assert_spin_locked(&mark->group->mark_lock); spin_lock(&mnt->mnt_root->d_lock); @@ -151,8 +151,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; + BUG_ON(!mutex_is_locked(&group->mark_mutex)); assert_spin_locked(&mark->lock); - assert_spin_locked(&group->mark_lock); spin_lock(&mnt->mnt_root->d_lock); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index e76cef75295d..c5848346840d 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -141,7 +141,7 @@ struct fsnotify_group { unsigned int priority; /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ - spinlock_t mark_lock; /* protect marks_list */ + struct mutex mark_mutex; /* protect marks_list */ atomic_t num_marks; /* 1 for each mark and 1 for not being * past the point of no return when freeing * a group */ -- cgit v1.2.3 From e2a29943e9a2ee2aa737a77f550f46ba72269db4 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Tue, 14 Jun 2011 17:29:51 +0200 Subject: fsnotify: pass group to fsnotify_destroy_mark() In fsnotify_destroy_mark() dont get the group from the passed mark anymore, but pass the group itself as an additional parameter to the function. Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- fs/notify/dnotify/dnotify.c | 4 ++-- fs/notify/fanotify/fanotify_user.c | 4 ++-- fs/notify/inode_mark.c | 10 +++++++++- fs/notify/inotify/inotify_fsnotify.c | 2 +- fs/notify/inotify/inotify_user.c | 2 +- fs/notify/mark.c | 21 ++++----------------- fs/notify/vfsmount_mark.c | 10 +++++++++- include/linux/fsnotify_backend.h | 5 +++-- kernel/audit_tree.c | 10 +++++----- kernel/audit_watch.c | 4 ++-- 10 files changed, 38 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 3344bdd5506e..08b886f119ce 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -201,7 +201,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id) /* nothing else could have found us thanks to the dnotify_mark_mutex */ if (dn_mark->dn == NULL) - fsnotify_destroy_mark(fsn_mark); + fsnotify_destroy_mark(fsn_mark, dnotify_group); mutex_unlock(&dnotify_mark_mutex); @@ -385,7 +385,7 @@ out: spin_unlock(&fsn_mark->lock); if (destroy) - fsnotify_destroy_mark(fsn_mark); + fsnotify_destroy_mark(fsn_mark, dnotify_group); mutex_unlock(&dnotify_mark_mutex); fsnotify_put_mark(fsn_mark); diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 599a01952c74..1218d10424d0 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -546,7 +546,7 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, &destroy_mark); if (destroy_mark) - fsnotify_destroy_mark(fsn_mark); + fsnotify_destroy_mark(fsn_mark, group); fsnotify_put_mark(fsn_mark); if (removed & real_mount(mnt)->mnt_fsnotify_mask) @@ -570,7 +570,7 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group, removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, &destroy_mark); if (destroy_mark) - fsnotify_destroy_mark(fsn_mark); + fsnotify_destroy_mark(fsn_mark, group); /* matches the fsnotify_find_inode_mark() */ fsnotify_put_mark(fsn_mark); if (removed & inode->i_fsnotify_mask) diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c index 4e9071e37d5d..21230209c957 100644 --- a/fs/notify/inode_mark.c +++ b/fs/notify/inode_mark.c @@ -99,8 +99,16 @@ void fsnotify_clear_marks_by_inode(struct inode *inode) spin_unlock(&inode->i_lock); list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) { - fsnotify_destroy_mark(mark); + struct fsnotify_group *group; + + spin_lock(&mark->lock); + fsnotify_get_group(mark->group); + group = mark->group; + spin_unlock(&mark->lock); + + fsnotify_destroy_mark(mark, group); fsnotify_put_mark(mark); + fsnotify_put_group(group); } } diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 74977fbf5aae..871569c7d609 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -132,7 +132,7 @@ static int inotify_handle_event(struct fsnotify_group *group, } if (inode_mark->mask & IN_ONESHOT) - fsnotify_destroy_mark(inode_mark); + fsnotify_destroy_mark(inode_mark, group); return ret; } diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 246250f1db7a..00ff82ff7c9f 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -816,7 +816,7 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) ret = 0; - fsnotify_destroy_mark(&i_mark->fsn_mark); + fsnotify_destroy_mark(&i_mark->fsn_mark, group); /* match ref taken by inotify_idr_find */ fsnotify_put_mark(&i_mark->fsn_mark); diff --git a/fs/notify/mark.c b/fs/notify/mark.c index ab25b810b146..b77c833c8d0a 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -121,21 +121,11 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) * The caller had better be holding a reference to this mark so we don't actually * do the final put under the mark->lock */ -void fsnotify_destroy_mark(struct fsnotify_mark *mark) +void fsnotify_destroy_mark(struct fsnotify_mark *mark, + struct fsnotify_group *group) { - struct fsnotify_group *group; struct inode *inode = NULL; - spin_lock(&mark->lock); - /* dont get the group from a mark that is not alive yet */ - if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { - spin_unlock(&mark->lock); - return; - } - fsnotify_get_group(mark->group); - group = mark->group; - spin_unlock(&mark->lock); - mutex_lock(&group->mark_mutex); spin_lock(&mark->lock); @@ -143,7 +133,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark) if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { spin_unlock(&mark->lock); mutex_unlock(&group->mark_mutex); - goto put_group; + return; } mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; @@ -194,9 +184,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark) */ atomic_dec(&group->num_marks); - -put_group: - fsnotify_put_group(group); } void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask) @@ -307,7 +294,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, mutex_unlock(&group->mark_mutex); list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) { - fsnotify_destroy_mark(mark); + fsnotify_destroy_mark(mark, group); fsnotify_put_mark(mark); } } diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c index f26a348827f8..4df58b8ea64a 100644 --- a/fs/notify/vfsmount_mark.c +++ b/fs/notify/vfsmount_mark.c @@ -46,8 +46,16 @@ void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) spin_unlock(&mnt->mnt_root->d_lock); list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) { - fsnotify_destroy_mark(mark); + struct fsnotify_group *group; + + spin_lock(&mark->lock); + fsnotify_get_group(mark->group); + group = mark->group; + spin_unlock(&mark->lock); + + fsnotify_destroy_mark(mark, group); fsnotify_put_mark(mark); + fsnotify_put_group(group); } } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index c5848346840d..140b4b8a100b 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -408,8 +408,9 @@ extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask /* attach the mark to both the group and the inode */ extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct inode *inode, struct vfsmount *mnt, int allow_dups); -/* given a mark, flag it to be freed when all references are dropped */ -extern void fsnotify_destroy_mark(struct fsnotify_mark *mark); +/* given a group and a mark, flag mark to be freed when all references are dropped */ +extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, + struct fsnotify_group *group); /* run all the marks in a group, and clear all of the vfsmount marks */ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); /* run all the marks in a group, and clear all of the inode marks */ diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index ed206fd88cca..e81175ef25f8 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -249,7 +249,7 @@ static void untag_chunk(struct node *p) list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); spin_unlock(&entry->lock); - fsnotify_destroy_mark(entry); + fsnotify_destroy_mark(entry, audit_tree_group); goto out; } @@ -291,7 +291,7 @@ static void untag_chunk(struct node *p) owner->root = new; spin_unlock(&hash_lock); spin_unlock(&entry->lock); - fsnotify_destroy_mark(entry); + fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(&new->mark); /* drop initial reference */ goto out; @@ -331,7 +331,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&hash_lock); chunk->dead = 1; spin_unlock(&entry->lock); - fsnotify_destroy_mark(entry); + fsnotify_destroy_mark(entry, audit_tree_group); fsnotify_put_mark(entry); return 0; } @@ -412,7 +412,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); - fsnotify_destroy_mark(chunk_entry); + fsnotify_destroy_mark(chunk_entry, audit_tree_group); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); @@ -443,7 +443,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&hash_lock); spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); - fsnotify_destroy_mark(old_entry); + fsnotify_destroy_mark(old_entry, audit_tree_group); fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ return 0; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 3823281401b5..a66affc1c12c 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -349,7 +349,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent) } mutex_unlock(&audit_filter_mutex); - fsnotify_destroy_mark(&parent->mark); + fsnotify_destroy_mark(&parent->mark, audit_watch_group); } /* Get path information necessary for adding watches. */ @@ -456,7 +456,7 @@ void audit_remove_watch_rule(struct audit_krule *krule) if (list_empty(&parent->watches)) { audit_get_parent(parent); - fsnotify_destroy_mark(&parent->mark); + fsnotify_destroy_mark(&parent->mark, audit_watch_group); audit_put_parent(parent); } } -- cgit v1.2.3 From d5a335b845792d2a69ed1e244c0b233117b7db3c Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Tue, 14 Jun 2011 17:29:52 +0200 Subject: fsnotify: introduce locked versions of fsnotify_add_mark() and fsnotify_remove_mark() This patch introduces fsnotify_add_mark_locked() and fsnotify_remove_mark_locked() which are essentially the same as fsnotify_add_mark() and fsnotify_remove_mark() but assume that the caller has already taken the groups mark mutex. Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- fs/notify/mark.c | 42 ++++++++++++++++++++++++++++------------ include/linux/fsnotify_backend.h | 4 ++++ 2 files changed, 34 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/mark.c b/fs/notify/mark.c index b77c833c8d0a..f9dda0304a10 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -121,18 +121,18 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) * The caller had better be holding a reference to this mark so we don't actually * do the final put under the mark->lock */ -void fsnotify_destroy_mark(struct fsnotify_mark *mark, - struct fsnotify_group *group) +void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, + struct fsnotify_group *group) { struct inode *inode = NULL; - mutex_lock(&group->mark_mutex); + BUG_ON(!mutex_is_locked(&group->mark_mutex)); + spin_lock(&mark->lock); /* something else already called this function on this mark */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { spin_unlock(&mark->lock); - mutex_unlock(&group->mark_mutex); return; } @@ -149,6 +149,8 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, list_del_init(&mark->g_list); spin_unlock(&mark->lock); + + /* release lock temporarily */ mutex_unlock(&group->mark_mutex); spin_lock(&destroy_lock); @@ -184,6 +186,16 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, */ atomic_dec(&group->num_marks); + + mutex_lock(&group->mark_mutex); +} + +void fsnotify_destroy_mark(struct fsnotify_mark *mark, + struct fsnotify_group *group) +{ + mutex_lock(&group->mark_mutex); + fsnotify_destroy_mark_locked(mark, group); + mutex_unlock(&group->mark_mutex); } void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask) @@ -208,14 +220,15 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas * These marks may be used for the fsnotify backend to determine which * event types should be delivered to which group. */ -int fsnotify_add_mark(struct fsnotify_mark *mark, - struct fsnotify_group *group, struct inode *inode, - struct vfsmount *mnt, int allow_dups) +int fsnotify_add_mark_locked(struct fsnotify_mark *mark, + struct fsnotify_group *group, struct inode *inode, + struct vfsmount *mnt, int allow_dups) { int ret = 0; BUG_ON(inode && mnt); BUG_ON(!inode && !mnt); + BUG_ON(!mutex_is_locked(&group->mark_mutex)); /* * LOCKING ORDER!!!! @@ -223,8 +236,6 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, * mark->lock * inode->i_lock */ - mutex_lock(&group->mark_mutex); - spin_lock(&mark->lock); mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE; @@ -250,8 +261,6 @@ int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_set_mark_mask_locked(mark, mark->mask); spin_unlock(&mark->lock); - mutex_unlock(&group->mark_mutex); - if (inode) __fsnotify_update_child_dentry_flags(inode); @@ -264,7 +273,6 @@ err: atomic_dec(&group->num_marks); spin_unlock(&mark->lock); - mutex_unlock(&group->mark_mutex); spin_lock(&destroy_lock); list_add(&mark->destroy_list, &destroy_list); @@ -274,6 +282,16 @@ err: return ret; } +int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, + struct inode *inode, struct vfsmount *mnt, int allow_dups) +{ + int ret; + mutex_lock(&group->mark_mutex); + ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups); + mutex_unlock(&group->mark_mutex); + return ret; +} + /* * clear any marks in a group in which mark->flags & flags is true */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 140b4b8a100b..26c06afa264e 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -408,9 +408,13 @@ extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask /* attach the mark to both the group and the inode */ extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct inode *inode, struct vfsmount *mnt, int allow_dups); +extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group, + struct inode *inode, struct vfsmount *mnt, int allow_dups); /* given a group and a mark, flag mark to be freed when all references are dropped */ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); +extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, + struct fsnotify_group *group); /* run all the marks in a group, and clear all of the vfsmount marks */ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); /* run all the marks in a group, and clear all of the inode marks */ -- cgit v1.2.3 From 64c20d2a20fce295c260ea6cb3b468edfa2fb07b Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Tue, 14 Jun 2011 17:29:53 +0200 Subject: fsnotify: dont put marks on temporary list when clearing marks by group In clear_marks_by_group_flags() the mark list of a group is iterated and the marks are put on a temporary list. Since we introduced fsnotify_destroy_mark_locked() we dont need the temp list any more and are able to remove the marks while the mark list is iterated and the mark list mutex is held. Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- fs/notify/mark.c | 10 ++-------- include/linux/fsnotify_backend.h | 1 - 2 files changed, 2 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/mark.c b/fs/notify/mark.c index f9dda0304a10..0e93d90bb753 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -299,22 +299,16 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags) { struct fsnotify_mark *lmark, *mark; - LIST_HEAD(free_list); mutex_lock(&group->mark_mutex); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { if (mark->flags & flags) { - list_add(&mark->free_g_list, &free_list); - list_del_init(&mark->g_list); fsnotify_get_mark(mark); + fsnotify_destroy_mark_locked(mark, group); + fsnotify_put_mark(mark); } } mutex_unlock(&group->mark_mutex); - - list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) { - fsnotify_destroy_mark(mark, group); - fsnotify_put_mark(mark); - } } /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 26c06afa264e..5a8899350456 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -287,7 +287,6 @@ struct fsnotify_mark { struct fsnotify_inode_mark i; struct fsnotify_vfsmount_mark m; }; - struct list_head free_g_list; /* tmp list used when freeing this mark */ __u32 ignored_mask; /* events types to ignore */ #define FSNOTIFY_MARK_FLAG_INODE 0x01 #define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02 -- cgit v1.2.3 From 6960b0d909cde5bdff49e4e5c1250edd10be7ebd Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Fri, 12 Aug 2011 01:13:31 +0200 Subject: fsnotify: change locking order On Mon, Aug 01, 2011 at 04:38:22PM -0400, Eric Paris wrote: > > I finally built and tested a v3.0 kernel with these patches (I know I'm > SOOOOOO far behind). Not what I hoped for: > > > [ 150.937798] VFS: Busy inodes after unmount of tmpfs. Self-destruct in 5 seconds. Have a nice day... > > [ 150.945290] BUG: unable to handle kernel NULL pointer dereference at 0000000000000070 > > [ 150.946012] IP: [] shmem_free_inode+0x18/0x50 > > [ 150.946012] PGD 2bf9e067 PUD 2bf9f067 PMD 0 > > [ 150.946012] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC > > [ 150.946012] CPU 0 > > [ 150.946012] Modules linked in: nfs lockd fscache auth_rpcgss nfs_acl sunrpc ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables ext4 jbd2 crc16 joydev ata_piix i2c_piix4 pcspkr uinput ipv6 autofs4 usbhid [last unloaded: scsi_wait_scan] > > [ 150.946012] > > [ 150.946012] Pid: 2764, comm: syscall_thrash Not tainted 3.0.0+ #1 Red Hat KVM > > [ 150.946012] RIP: 0010:[] [] shmem_free_inode+0x18/0x50 > > [ 150.946012] RSP: 0018:ffff88002c2e5df8 EFLAGS: 00010282 > > [ 150.946012] RAX: 000000004e370d9f RBX: 0000000000000000 RCX: ffff88003a029438 > > [ 150.946012] RDX: 0000000033630a5f RSI: 0000000000000000 RDI: ffff88003491c240 > > [ 150.946012] RBP: ffff88002c2e5e08 R08: 0000000000000000 R09: 0000000000000000 > > [ 150.946012] R10: 0000000000000000 R11: 0000000000000000 R12: ffff88003a029428 > > [ 150.946012] R13: ffff88003a029428 R14: ffff88003a029428 R15: ffff88003499a610 > > [ 150.946012] FS: 00007f5a05420700(0000) GS:ffff88003f600000(0000) knlGS:0000000000000000 > > [ 150.946012] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b > > [ 150.946012] CR2: 0000000000000070 CR3: 000000002a662000 CR4: 00000000000006f0 > > [ 150.946012] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 > > [ 150.946012] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 > > [ 150.946012] Process syscall_thrash (pid: 2764, threadinfo ffff88002c2e4000, task ffff88002bfbc760) > > [ 150.946012] Stack: > > [ 150.946012] ffff88003a029438 ffff88003a029428 ffff88002c2e5e38 ffffffff81102f76 > > [ 150.946012] ffff88003a029438 ffff88003a029598 ffffffff8160f9c0 ffff88002c221250 > > [ 150.946012] ffff88002c2e5e68 ffffffff8115e9be ffff88002c2e5e68 ffff88003a029438 > > [ 150.946012] Call Trace: > > [ 150.946012] [] shmem_evict_inode+0x76/0x130 > > [ 150.946012] [] evict+0x7e/0x170 > > [ 150.946012] [] iput_final+0xd0/0x190 > > [ 150.946012] [] iput+0x33/0x40 > > [ 150.946012] [] fsnotify_destroy_mark_locked+0x145/0x160 > > [ 150.946012] [] fsnotify_destroy_mark+0x36/0x50 > > [ 150.946012] [] sys_inotify_rm_watch+0x77/0xd0 > > [ 150.946012] [] system_call_fastpath+0x16/0x1b > > [ 150.946012] Code: 67 4a 00 b8 e4 ff ff ff eb aa 66 0f 1f 84 00 00 00 00 00 55 48 89 e5 48 83 ec 10 48 89 1c 24 4c 89 64 24 08 48 8b 9f 40 05 00 00 > > [ 150.946012] 83 7b 70 00 74 1c 4c 8d a3 80 00 00 00 4c 89 e7 e8 d2 5d 4a > > [ 150.946012] RIP [] shmem_free_inode+0x18/0x50 > > [ 150.946012] RSP > > [ 150.946012] CR2: 0000000000000070 > > Looks at aweful lot like the problem from: > http://www.spinics.net/lists/linux-fsdevel/msg46101.html > I tried to reproduce this bug with your test program, but without success. However, if I understand correctly, this occurs since we dont hold any locks when we call iput() in mark_destroy(), right? With the patches you tested, iput() is also not called within any lock, since the groups mark_mutex is released temporarily before iput() is called. This is, since the original codes behaviour is similar. However since we now have a mutex as the biggest lock, we can do what you suggested (http://www.spinics.net/lists/linux-fsdevel/msg46107.html) and call iput() with the mutex held to avoid the race. The patch below implements this. It uses nested locking to avoid deadlock in case we do the final iput() on an inode which still holds marks and thus would take the mutex again when calling fsnotify_inode_delete() in destroy_inode(). Signed-off-by: Lino Sanfilippo Signed-off-by: Eric Paris --- fs/notify/mark.c | 20 ++++++++++---------- include/linux/fsnotify_backend.h | 7 ++++--- 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 0e93d90bb753..fc6b49bf7360 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -150,6 +150,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, spin_unlock(&mark->lock); + if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) + iput(inode); /* release lock temporarily */ mutex_unlock(&group->mark_mutex); @@ -157,6 +159,11 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, list_add(&mark->destroy_list, &destroy_list); spin_unlock(&destroy_lock); wake_up(&destroy_waitq); + /* + * We don't necessarily have a ref on mark from caller so the above destroy + * may have actually freed it, unless this group provides a 'freeing_mark' + * function which must be holding a reference. + */ /* * Some groups like to know that marks are being freed. This is a @@ -178,22 +185,15 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, * is just a lazy update (and could be a perf win...) */ - if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) - iput(inode); - /* - * We don't necessarily have a ref on mark from caller so the above iput - * may have already destroyed it. Don't touch from now on. - */ - atomic_dec(&group->num_marks); - mutex_lock(&group->mark_mutex); + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); } void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) { - mutex_lock(&group->mark_mutex); + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); fsnotify_destroy_mark_locked(mark, group); mutex_unlock(&group->mark_mutex); } @@ -300,7 +300,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, { struct fsnotify_mark *lmark, *mark; - mutex_lock(&group->mark_mutex); + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { if (mark->flags & flags) { fsnotify_get_mark(mark); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 5a8899350456..1af2f6a722c0 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -88,9 +88,10 @@ struct fsnotify_event_private_data; * if the group is interested in this event. * handle_event - main call for a group to handle an fs event * free_group_priv - called when a group refcnt hits 0 to clean up the private union - * freeing-mark - this means that a mark has been flagged to die when everything - * finishes using it. The function is supplied with what must be a - * valid group and inode to use to clean up. + * freeing_mark - called when a mark is being destroyed for some reason. The group + * MUST be holding a reference on each mark and that reference must be + * dropped in this function. inotify uses this function to send + * userspace messages that marks have been removed. */ struct fsnotify_ops { bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, -- cgit v1.2.3 From 0a6b6bd5919a65030b557ec8fe81f6fb3e93744a Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Fri, 14 Oct 2011 17:43:39 -0400 Subject: fsnotify: make fasync generic for both inotify and fanotify inotify is supposed to support async signal notification when information is available on the inotify fd. This patch moves that support to generic fsnotify functions so it can be used by all notification mechanisms. Signed-off-by: Eric Paris --- fs/notify/fanotify/fanotify_user.c | 4 ++++ fs/notify/group.c | 7 +++++++ fs/notify/inotify/inotify_user.c | 13 ++++--------- fs/notify/notification.c | 1 + include/linux/fsnotify_backend.h | 5 ++++- 5 files changed, 20 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 1218d10424d0..f0e7a57bc899 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -414,6 +414,10 @@ static int fanotify_release(struct inode *ignored, struct file *file) wake_up(&group->fanotify_data.access_waitq); #endif + + if (file->f_flags & FASYNC) + fsnotify_fasync(-1, file, 0); + /* matches the fanotify_init->fsnotify_alloc_group */ fsnotify_destroy_group(group); diff --git a/fs/notify/group.c b/fs/notify/group.c index 1f7305711fc9..bd2625bd88b4 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -102,3 +102,10 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops) return group; } + +int fsnotify_fasync(int fd, struct file *file, int on) +{ + struct fsnotify_group *group = file->private_data; + + return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO; +} diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 00ff82ff7c9f..68f7bec1e664 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -280,19 +280,15 @@ static ssize_t inotify_read(struct file *file, char __user *buf, return ret; } -static int inotify_fasync(int fd, struct file *file, int on) -{ - struct fsnotify_group *group = file->private_data; - - return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO; -} - static int inotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); + if (file->f_flags & FASYNC) + fsnotify_fasync(-1, file, 0); + /* free this group, matching get was inotify_init->fsnotify_obtain_group */ fsnotify_destroy_group(group); @@ -335,7 +331,7 @@ static long inotify_ioctl(struct file *file, unsigned int cmd, static const struct file_operations inotify_fops = { .poll = inotify_poll, .read = inotify_read, - .fasync = inotify_fasync, + .fasync = fsnotify_fasync, .release = inotify_release, .unlocked_ioctl = inotify_ioctl, .compat_ioctl = inotify_ioctl, @@ -706,7 +702,6 @@ static struct fsnotify_group *inotify_new_group(unsigned int max_events) spin_lock_init(&group->inotify_data.idr_lock); idr_init(&group->inotify_data.idr); group->inotify_data.last_wd = 0; - group->inotify_data.fa = NULL; group->inotify_data.user = get_current_user(); if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > diff --git a/fs/notify/notification.c b/fs/notify/notification.c index c887b1378f7e..b3963d8c9988 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -225,6 +225,7 @@ alloc_holder: mutex_unlock(&group->notification_mutex); wake_up(&group->notification_waitq); + kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); return return_event; } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1af2f6a722c0..d5b0910d4961 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -148,6 +148,8 @@ struct fsnotify_group { * a group */ struct list_head marks_list; /* all inode marks for this group */ + struct fasync_struct *fsn_fa; /* async notification */ + /* groups can define private fields here or use the void *private */ union { void *private; @@ -156,7 +158,6 @@ struct fsnotify_group { spinlock_t idr_lock; struct idr idr; u32 last_wd; - struct fasync_struct *fa; /* async notification */ struct user_struct *user; } inotify_data; #endif @@ -368,6 +369,8 @@ extern void fsnotify_get_group(struct fsnotify_group *group); extern void fsnotify_put_group(struct fsnotify_group *group); /* destroy group */ extern void fsnotify_destroy_group(struct fsnotify_group *group); +/* fasync handler function */ +extern int fsnotify_fasync(int fd, struct file *file, int on); /* take a reference to an event */ extern void fsnotify_get_event(struct fsnotify_event *event); extern void fsnotify_put_event(struct fsnotify_event *event); -- cgit v1.2.3 From 7056741fd9fc14a65608549a4657cf5178f05f63 Mon Sep 17 00:00:00 2001 From: Jim Kukunas Date: Thu, 8 Nov 2012 13:47:44 -0800 Subject: lib/raid6: Add AVX2 optimized recovery functions Optimize RAID6 recovery functions to take advantage of the 256-bit YMM integer instructions introduced in AVX2. The patch was tested and benchmarked before submission. However hardware is not yet released so benchmark numbers cannot be reported. Acked-by: "H. Peter Anvin" Signed-off-by: Jim Kukunas Signed-off-by: NeilBrown --- arch/x86/Makefile | 5 +- include/linux/raid/pq.h | 1 + lib/raid6/Makefile | 2 +- lib/raid6/algos.c | 3 + lib/raid6/recov_avx2.c | 327 ++++++++++++++++++++++++++++++++++++++++++++++++ lib/raid6/test/Makefile | 2 +- lib/raid6/x86.h | 14 ++- 7 files changed, 345 insertions(+), 9 deletions(-) create mode 100644 lib/raid6/recov_avx2.c (limited to 'include/linux') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 58790bd85c1d..95477aae9ff7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -123,9 +123,10 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI # does binutils support specific instructions? asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) +avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) +KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) +KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 640c69ceec96..3156347452b9 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -109,6 +109,7 @@ struct raid6_recov_calls { extern const struct raid6_recov_calls raid6_recov_intx1; extern const struct raid6_recov_calls raid6_recov_ssse3; +extern const struct raid6_recov_calls raid6_recov_avx2; /* Algorithm list */ extern const struct raid6_calls * const raid6_algos[]; diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index de06dfe165b8..8c2e22bef661 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_RAID6_PQ) += raid6_pq.o -raid6_pq-y += algos.o recov.o recov_ssse3.o tables.o int1.o int2.o int4.o \ +raid6_pq-y += algos.o recov.o recov_ssse3.o recov_avx2.o tables.o int1.o int2.o int4.o \ int8.o int16.o int32.o altivec1.o altivec2.o altivec4.o \ altivec8.o mmx.o sse1.o sse2.o hostprogs-y += mktables diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 589f5f50ad2e..8b7f55cadb45 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -72,6 +72,9 @@ EXPORT_SYMBOL_GPL(raid6_datap_recov); const struct raid6_recov_calls *const raid6_recov_algos[] = { #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) +#ifdef CONFIG_AS_AVX2 + &raid6_recov_avx2, +#endif &raid6_recov_ssse3, #endif &raid6_recov_intx1, diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c new file mode 100644 index 000000000000..43a9bab91879 --- /dev/null +++ b/lib/raid6/recov_avx2.c @@ -0,0 +1,327 @@ +/* + * Copyright (C) 2012 Intel Corporation + * Author: Jim Kukunas + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) + +#if CONFIG_AS_AVX2 + +#include +#include "x86.h" + +static int raid6_has_avx2(void) +{ + return boot_cpu_has(X86_FEATURE_AVX2) && + boot_cpu_has(X86_FEATURE_AVX); +} + +static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila, + int failb, void **ptrs) +{ + u8 *p, *q, *dp, *dq; + const u8 *pbmul; /* P multiplier table for B data */ + const u8 *qmul; /* Q multiplier table (for both) */ + const u8 x0f = 0x0f; + + p = (u8 *)ptrs[disks-2]; + q = (u8 *)ptrs[disks-1]; + + /* Compute syndrome with zero for the missing data pages + Use the dead data pages as temporary storage for + delta p and delta q */ + dp = (u8 *)ptrs[faila]; + ptrs[faila] = (void *)raid6_empty_zero_page; + ptrs[disks-2] = dp; + dq = (u8 *)ptrs[failb]; + ptrs[failb] = (void *)raid6_empty_zero_page; + ptrs[disks-1] = dq; + + raid6_call.gen_syndrome(disks, bytes, ptrs); + + /* Restore pointer table */ + ptrs[faila] = dp; + ptrs[failb] = dq; + ptrs[disks-2] = p; + ptrs[disks-1] = q; + + /* Now, pick the proper data tables */ + pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]]; + qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ + raid6_gfexp[failb]]]; + + kernel_fpu_begin(); + + /* ymm0 = x0f[16] */ + asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f)); + + while (bytes) { +#ifdef CONFIG_X86_64 + asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0])); + asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32])); + asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0])); + asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32])); + asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0])); + asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32])); + asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0])); + asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32])); + + /* + * 1 = dq[0] ^ q[0] + * 9 = dq[32] ^ q[32] + * 0 = dp[0] ^ p[0] + * 8 = dp[32] ^ p[32] + */ + + asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0])); + asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16])); + + asm volatile("vpsraw $4, %ymm1, %ymm3"); + asm volatile("vpsraw $4, %ymm9, %ymm12"); + asm volatile("vpand %ymm7, %ymm1, %ymm1"); + asm volatile("vpand %ymm7, %ymm9, %ymm9"); + asm volatile("vpand %ymm7, %ymm3, %ymm3"); + asm volatile("vpand %ymm7, %ymm12, %ymm12"); + asm volatile("vpshufb %ymm9, %ymm4, %ymm14"); + asm volatile("vpshufb %ymm1, %ymm4, %ymm4"); + asm volatile("vpshufb %ymm12, %ymm5, %ymm15"); + asm volatile("vpshufb %ymm3, %ymm5, %ymm5"); + asm volatile("vpxor %ymm14, %ymm15, %ymm15"); + asm volatile("vpxor %ymm4, %ymm5, %ymm5"); + + /* + * 5 = qx[0] + * 15 = qx[32] + */ + + asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0])); + asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16])); + asm volatile("vpsraw $4, %ymm0, %ymm2"); + asm volatile("vpsraw $4, %ymm8, %ymm6"); + asm volatile("vpand %ymm7, %ymm0, %ymm3"); + asm volatile("vpand %ymm7, %ymm8, %ymm14"); + asm volatile("vpand %ymm7, %ymm2, %ymm2"); + asm volatile("vpand %ymm7, %ymm6, %ymm6"); + asm volatile("vpshufb %ymm14, %ymm4, %ymm12"); + asm volatile("vpshufb %ymm3, %ymm4, %ymm4"); + asm volatile("vpshufb %ymm6, %ymm1, %ymm13"); + asm volatile("vpshufb %ymm2, %ymm1, %ymm1"); + asm volatile("vpxor %ymm4, %ymm1, %ymm1"); + asm volatile("vpxor %ymm12, %ymm13, %ymm13"); + + /* + * 1 = pbmul[px[0]] + * 13 = pbmul[px[32]] + */ + asm volatile("vpxor %ymm5, %ymm1, %ymm1"); + asm volatile("vpxor %ymm15, %ymm13, %ymm13"); + + /* + * 1 = db = DQ + * 13 = db[32] = DQ[32] + */ + asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0])); + asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32])); + asm volatile("vpxor %ymm1, %ymm0, %ymm0"); + asm volatile("vpxor %ymm13, %ymm8, %ymm8"); + + asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0])); + asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32])); + + bytes -= 64; + p += 64; + q += 64; + dp += 64; + dq += 64; +#else + asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q)); + asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p)); + asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq)); + asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp)); + + /* 1 = dq ^ q; 0 = dp ^ p */ + + asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0])); + asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16])); + + /* + * 1 = dq ^ q + * 3 = dq ^ p >> 4 + */ + asm volatile("vpsraw $4, %ymm1, %ymm3"); + asm volatile("vpand %ymm7, %ymm1, %ymm1"); + asm volatile("vpand %ymm7, %ymm3, %ymm3"); + asm volatile("vpshufb %ymm1, %ymm4, %ymm4"); + asm volatile("vpshufb %ymm3, %ymm5, %ymm5"); + asm volatile("vpxor %ymm4, %ymm5, %ymm5"); + + /* 5 = qx */ + + asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0])); + asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16])); + + asm volatile("vpsraw $4, %ymm0, %ymm2"); + asm volatile("vpand %ymm7, %ymm0, %ymm3"); + asm volatile("vpand %ymm7, %ymm2, %ymm2"); + asm volatile("vpshufb %ymm3, %ymm4, %ymm4"); + asm volatile("vpshufb %ymm2, %ymm1, %ymm1"); + asm volatile("vpxor %ymm4, %ymm1, %ymm1"); + + /* 1 = pbmul[px] */ + asm volatile("vpxor %ymm5, %ymm1, %ymm1"); + /* 1 = db = DQ */ + asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0])); + + asm volatile("vpxor %ymm1, %ymm0, %ymm0"); + asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0])); + + bytes -= 32; + p += 32; + q += 32; + dp += 32; + dq += 32; +#endif + } + + kernel_fpu_end(); +} + +static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila, + void **ptrs) +{ + u8 *p, *q, *dq; + const u8 *qmul; /* Q multiplier table */ + const u8 x0f = 0x0f; + + p = (u8 *)ptrs[disks-2]; + q = (u8 *)ptrs[disks-1]; + + /* Compute syndrome with zero for the missing data page + Use the dead data page as temporary storage for delta q */ + dq = (u8 *)ptrs[faila]; + ptrs[faila] = (void *)raid6_empty_zero_page; + ptrs[disks-1] = dq; + + raid6_call.gen_syndrome(disks, bytes, ptrs); + + /* Restore pointer table */ + ptrs[faila] = dq; + ptrs[disks-1] = q; + + /* Now, pick the proper data tables */ + qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]]; + + kernel_fpu_begin(); + + asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f)); + + while (bytes) { +#ifdef CONFIG_X86_64 + asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0])); + asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32])); + asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0])); + asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32])); + + /* + * 3 = q[0] ^ dq[0] + * 8 = q[32] ^ dq[32] + */ + asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0])); + asm volatile("vmovapd %ymm0, %ymm13"); + asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16])); + asm volatile("vmovapd %ymm1, %ymm14"); + + asm volatile("vpsraw $4, %ymm3, %ymm6"); + asm volatile("vpsraw $4, %ymm8, %ymm12"); + asm volatile("vpand %ymm7, %ymm3, %ymm3"); + asm volatile("vpand %ymm7, %ymm8, %ymm8"); + asm volatile("vpand %ymm7, %ymm6, %ymm6"); + asm volatile("vpand %ymm7, %ymm12, %ymm12"); + asm volatile("vpshufb %ymm3, %ymm0, %ymm0"); + asm volatile("vpshufb %ymm8, %ymm13, %ymm13"); + asm volatile("vpshufb %ymm6, %ymm1, %ymm1"); + asm volatile("vpshufb %ymm12, %ymm14, %ymm14"); + asm volatile("vpxor %ymm0, %ymm1, %ymm1"); + asm volatile("vpxor %ymm13, %ymm14, %ymm14"); + + /* + * 1 = qmul[q[0] ^ dq[0]] + * 14 = qmul[q[32] ^ dq[32]] + */ + asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0])); + asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32])); + asm volatile("vpxor %ymm1, %ymm2, %ymm2"); + asm volatile("vpxor %ymm14, %ymm12, %ymm12"); + + /* + * 2 = p[0] ^ qmul[q[0] ^ dq[0]] + * 12 = p[32] ^ qmul[q[32] ^ dq[32]] + */ + + asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0])); + asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32])); + asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0])); + asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32])); + + bytes -= 64; + p += 64; + q += 64; + dq += 64; +#else + asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0])); + asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0])); + + /* 3 = q ^ dq */ + + asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0])); + asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16])); + + asm volatile("vpsraw $4, %ymm3, %ymm6"); + asm volatile("vpand %ymm7, %ymm3, %ymm3"); + asm volatile("vpand %ymm7, %ymm6, %ymm6"); + asm volatile("vpshufb %ymm3, %ymm0, %ymm0"); + asm volatile("vpshufb %ymm6, %ymm1, %ymm1"); + asm volatile("vpxor %ymm0, %ymm1, %ymm1"); + + /* 1 = qmul[q ^ dq] */ + + asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0])); + asm volatile("vpxor %ymm1, %ymm2, %ymm2"); + + /* 2 = p ^ qmul[q ^ dq] */ + + asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0])); + asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0])); + + bytes -= 32; + p += 32; + q += 32; + dq += 32; +#endif + } + + kernel_fpu_end(); +} + +const struct raid6_recov_calls raid6_recov_avx2 = { + .data2 = raid6_2data_recov_avx2, + .datap = raid6_datap_recov_avx2, + .valid = raid6_has_avx2, +#ifdef CONFIG_X86_64 + .name = "avx2x2", +#else + .name = "avx2x1", +#endif + .priority = 2, +}; + +#else +#warning "your version of binutils lacks AVX2 support" +#endif + +#endif diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index c76151d94764..d919c98ce266 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -23,7 +23,7 @@ RANLIB = ranlib all: raid6.a raid6test raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o \ - altivec1.o altivec2.o altivec4.o altivec8.o recov.o recov_ssse3.o algos.o \ + altivec1.o altivec2.o altivec4.o altivec8.o recov.o recov_ssse3.o recov_avx2.o algos.o \ tables.o rm -f $@ $(AR) cq $@ $^ diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h index d55d63232c55..b7595484a815 100644 --- a/lib/raid6/x86.h +++ b/lib/raid6/x86.h @@ -45,19 +45,23 @@ static inline void kernel_fpu_end(void) #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ /* Should work well enough on modern CPUs for testing */ static inline int boot_cpu_has(int flag) { - u32 eax = (flag & 0x20) ? 0x80000001 : 1; - u32 ecx, edx; + u32 eax, ebx, ecx, edx; + + eax = (flag & 0x100) ? 7 : + (flag & 0x20) ? 0x80000001 : 1; + ecx = 0; asm volatile("cpuid" - : "+a" (eax), "=d" (edx), "=c" (ecx) - : : "ebx"); + : "+a" (eax), "=b" (ebx), "=d" (edx), "+c" (ecx)); - return ((flag & 0x80 ? ecx : edx) >> (flag & 31)) & 1; + return ((flag & 0x100 ? ebx : + (flag & 0x80) ? ecx : edx) >> (flag & 31)) & 1; } #endif /* ndef __KERNEL__ */ -- cgit v1.2.3 From 2c935842bdb46f5f557426feb4d2bdfdad1aa5f9 Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Fri, 30 Nov 2012 13:10:39 -0800 Subject: lib/raid6: Add AVX2 optimized gen_syndrome functions Add AVX2 optimized gen_syndrom functions, which is simply based on sse2.c written by hpa. Signed-off-by: Yuanhan Liu Reviewed-by: H. Peter Anvin Signed-off-by: Jim Kukunas Signed-off-by: NeilBrown --- include/linux/raid/pq.h | 3 + lib/raid6/Makefile | 2 +- lib/raid6/algos.c | 9 ++ lib/raid6/avx2.c | 251 ++++++++++++++++++++++++++++++++++++++++++++++++ lib/raid6/test/Makefile | 12 ++- 5 files changed, 275 insertions(+), 2 deletions(-) create mode 100644 lib/raid6/avx2.c (limited to 'include/linux') diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 3156347452b9..8dfaa2ce2e95 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -98,6 +98,9 @@ extern const struct raid6_calls raid6_altivec1; extern const struct raid6_calls raid6_altivec2; extern const struct raid6_calls raid6_altivec4; extern const struct raid6_calls raid6_altivec8; +extern const struct raid6_calls raid6_avx2x1; +extern const struct raid6_calls raid6_avx2x2; +extern const struct raid6_calls raid6_avx2x4; struct raid6_recov_calls { void (*data2)(int, size_t, int, int, void **); diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 8c2e22bef661..3430711b9bdf 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_RAID6_PQ) += raid6_pq.o raid6_pq-y += algos.o recov.o recov_ssse3.o recov_avx2.o tables.o int1.o int2.o int4.o \ int8.o int16.o int32.o altivec1.o altivec2.o altivec4.o \ - altivec8.o mmx.o sse1.o sse2.o + altivec8.o mmx.o sse1.o sse2.o avx2.o hostprogs-y += mktables quiet_cmd_unroll = UNROLL $@ diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 8b7f55cadb45..6d7316fe9f30 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -45,11 +45,20 @@ const struct raid6_calls * const raid6_algos[] = { &raid6_sse1x2, &raid6_sse2x1, &raid6_sse2x2, +#ifdef CONFIG_AS_AVX2 + &raid6_avx2x1, + &raid6_avx2x2, +#endif #endif #if defined(__x86_64__) && !defined(__arch_um__) &raid6_sse2x1, &raid6_sse2x2, &raid6_sse2x4, +#ifdef CONFIG_AS_AVX2 + &raid6_avx2x1, + &raid6_avx2x2, + &raid6_avx2x4, +#endif #endif #ifdef CONFIG_ALTIVEC &raid6_altivec1, diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c new file mode 100644 index 000000000000..bc3b1dd436eb --- /dev/null +++ b/lib/raid6/avx2.c @@ -0,0 +1,251 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright (C) 2012 Intel Corporation + * Author: Yuanhan Liu + * + * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Boston MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * AVX2 implementation of RAID-6 syndrome functions + * + */ + +#ifdef CONFIG_AS_AVX2 + +#include +#include "x86.h" + +static const struct raid6_avx2_constants { + u64 x1d[4]; +} raid6_avx2_constants __aligned(32) = { + { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL, + 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,}, +}; + +static int raid6_have_avx2(void) +{ + return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX); +} + +/* + * Plain AVX2 implementation + */ +static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); + asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */ + + for (d = 0; d < bytes; d += 32) { + asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); + asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ + asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d])); + asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */ + asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d])); + for (z = z0-2; z >= 0; z--) { + asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); + asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5"); + asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); + asm volatile("vpand %ymm0,%ymm5,%ymm5"); + asm volatile("vpxor %ymm5,%ymm4,%ymm4"); + asm volatile("vpxor %ymm6,%ymm2,%ymm2"); + asm volatile("vpxor %ymm6,%ymm4,%ymm4"); + asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d])); + } + asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5"); + asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); + asm volatile("vpand %ymm0,%ymm5,%ymm5"); + asm volatile("vpxor %ymm5,%ymm4,%ymm4"); + asm volatile("vpxor %ymm6,%ymm2,%ymm2"); + asm volatile("vpxor %ymm6,%ymm4,%ymm4"); + + asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); + asm volatile("vpxor %ymm2,%ymm2,%ymm2"); + asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); + asm volatile("vpxor %ymm4,%ymm4,%ymm4"); + } + + asm volatile("sfence" : : : "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_avx2x1 = { + raid6_avx21_gen_syndrome, + raid6_have_avx2, + "avx2x1", + 1 /* Has cache hints */ +}; + +/* + * Unrolled-by-2 AVX2 implementation + */ +static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); + asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */ + + /* We uniformly assume a single prefetch covers at least 32 bytes */ + for (d = 0; d < bytes; d += 64) { + asm volatile("prefetchnta %0" : : "m" (dptr[z0][d])); + asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32])); + asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */ + asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */ + asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */ + asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */ + for (z = z0-1; z >= 0; z--) { + asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); + asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32])); + asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5"); + asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7"); + asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); + asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); + asm volatile("vpand %ymm0,%ymm5,%ymm5"); + asm volatile("vpand %ymm0,%ymm7,%ymm7"); + asm volatile("vpxor %ymm5,%ymm4,%ymm4"); + asm volatile("vpxor %ymm7,%ymm6,%ymm6"); + asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d])); + asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32])); + asm volatile("vpxor %ymm5,%ymm2,%ymm2"); + asm volatile("vpxor %ymm7,%ymm3,%ymm3"); + asm volatile("vpxor %ymm5,%ymm4,%ymm4"); + asm volatile("vpxor %ymm7,%ymm6,%ymm6"); + } + asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); + asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32])); + asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); + asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); + } + + asm volatile("sfence" : : : "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_avx2x2 = { + raid6_avx22_gen_syndrome, + raid6_have_avx2, + "avx2x2", + 1 /* Has cache hints */ +}; + +#ifdef CONFIG_X86_64 + +/* + * Unrolled-by-4 AVX2 implementation + */ +static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ + u8 **dptr = (u8 **)ptrs; + u8 *p, *q; + int d, z, z0; + + z0 = disks - 3; /* Highest data disk */ + p = dptr[z0+1]; /* XOR parity */ + q = dptr[z0+2]; /* RS syndrome */ + + kernel_fpu_begin(); + + asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0])); + asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */ + asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */ + asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */ + asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */ + asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */ + asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */ + asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */ + asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */ + asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */ + + for (d = 0; d < bytes; d += 128) { + for (z = z0; z >= 0; z--) { + asm volatile("prefetchnta %0" : : "m" (dptr[z][d])); + asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32])); + asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64])); + asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96])); + asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5"); + asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7"); + asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13"); + asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15"); + asm volatile("vpaddb %ymm4,%ymm4,%ymm4"); + asm volatile("vpaddb %ymm6,%ymm6,%ymm6"); + asm volatile("vpaddb %ymm12,%ymm12,%ymm12"); + asm volatile("vpaddb %ymm14,%ymm14,%ymm14"); + asm volatile("vpand %ymm0,%ymm5,%ymm5"); + asm volatile("vpand %ymm0,%ymm7,%ymm7"); + asm volatile("vpand %ymm0,%ymm13,%ymm13"); + asm volatile("vpand %ymm0,%ymm15,%ymm15"); + asm volatile("vpxor %ymm5,%ymm4,%ymm4"); + asm volatile("vpxor %ymm7,%ymm6,%ymm6"); + asm volatile("vpxor %ymm13,%ymm12,%ymm12"); + asm volatile("vpxor %ymm15,%ymm14,%ymm14"); + asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d])); + asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32])); + asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64])); + asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96])); + asm volatile("vpxor %ymm5,%ymm2,%ymm2"); + asm volatile("vpxor %ymm7,%ymm3,%ymm3"); + asm volatile("vpxor %ymm13,%ymm10,%ymm10"); + asm volatile("vpxor %ymm15,%ymm11,%ymm11"); + asm volatile("vpxor %ymm5,%ymm4,%ymm4"); + asm volatile("vpxor %ymm7,%ymm6,%ymm6"); + asm volatile("vpxor %ymm13,%ymm12,%ymm12"); + asm volatile("vpxor %ymm15,%ymm14,%ymm14"); + } + asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d])); + asm volatile("vpxor %ymm2,%ymm2,%ymm2"); + asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32])); + asm volatile("vpxor %ymm3,%ymm3,%ymm3"); + asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64])); + asm volatile("vpxor %ymm10,%ymm10,%ymm10"); + asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96])); + asm volatile("vpxor %ymm11,%ymm11,%ymm11"); + asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); + asm volatile("vpxor %ymm4,%ymm4,%ymm4"); + asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); + asm volatile("vpxor %ymm6,%ymm6,%ymm6"); + asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64])); + asm volatile("vpxor %ymm12,%ymm12,%ymm12"); + asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96])); + asm volatile("vpxor %ymm14,%ymm14,%ymm14"); + } + + asm volatile("sfence" : : : "memory"); + kernel_fpu_end(); +} + +const struct raid6_calls raid6_avx2x4 = { + raid6_avx24_gen_syndrome, + raid6_have_avx2, + "avx2x4", + 1 /* Has cache hints */ +}; +#endif + +#endif /* CONFIG_AS_AVX2 */ diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index d919c98ce266..754cbac0f9f8 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -11,6 +11,16 @@ AWK = awk -f AR = ar RANLIB = ranlib +ARCH := $(shell uname -m 2>/dev/null | sed -e /s/i.86/i386/) +ifeq ($(ARCH),i386) + CFLAGS += -DCONFIG_X86_32 +endif +ifeq ($(ARCH),x86_64) + CFLAGS += -DCONFIG_X86_64 +endif +CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1"| gcc -c -x assembler - &&\ + rm ./-.o && echo -DCONFIG_AS_AVX2=1) + .c.o: $(CC) $(CFLAGS) -c -o $@ $< @@ -22,7 +32,7 @@ RANLIB = ranlib all: raid6.a raid6test -raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o \ +raid6.a: int1.o int2.o int4.o int8.o int16.o int32.o mmx.o sse1.o sse2.o avx2.o \ altivec1.o altivec2.o altivec4.o altivec8.o recov.o recov_ssse3.o recov_avx2.o algos.o \ tables.o rm -f $@ -- cgit v1.2.3 From 83aff95eb9d60aff5497e9f44a2ae906b86d8e88 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 28 Nov 2012 12:28:24 -0800 Subject: libceph: remove 'osdtimeout' option This would reset a connection with any OSD that had an outstanding request that was taking more than N seconds. The idea was that if the OSD was buggy, the client could compensate by resending the request. In reality, this only served to hide server bugs, and we haven't actually seen such a bug in quite a while. Moreover, the userspace client code never did this. More importantly, often the request is taking a long time because the OSD is trying to recover, or overloaded, and killing the connection and retrying would only make the situation worse by giving the OSD more work to do. Signed-off-by: Sage Weil Reviewed-by: Alex Elder --- fs/ceph/super.c | 2 -- include/linux/ceph/libceph.h | 2 -- net/ceph/ceph_common.c | 3 +-- net/ceph/osd_client.c | 47 ++++---------------------------------------- 4 files changed, 5 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 2f586b0e5e0f..fcda1c73a1e5 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -403,8 +403,6 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_printf(m, ",mount_timeout=%d", opt->mount_timeout); if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl); - if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT) - seq_printf(m, ",osdtimeout=%d", opt->osd_timeout); if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) seq_printf(m, ",osdkeepalivetimeout=%d", opt->osd_keepalive_timeout); diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 42624789b06f..317aff8feb0a 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -43,7 +43,6 @@ struct ceph_options { struct ceph_entity_addr my_addr; int mount_timeout; int osd_idle_ttl; - int osd_timeout; int osd_keepalive_timeout; /* @@ -63,7 +62,6 @@ struct ceph_options { * defaults */ #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 -#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ #define CEPH_OSD_KEEPALIVE_DEFAULT 5 #define CEPH_OSD_IDLE_TTL_DEFAULT 60 diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index a8020293f342..ee71ea26777a 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -305,7 +305,6 @@ ceph_parse_options(char *options, const char *dev_name, /* start with defaults */ opt->flags = CEPH_OPT_DEFAULT; - opt->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT; opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ @@ -391,7 +390,7 @@ ceph_parse_options(char *options, const char *dev_name, /* misc */ case Opt_osdtimeout: - opt->osd_timeout = intval; + pr_warning("ignoring deprecated osdtimeout option\n"); break; case Opt_osdkeepalivetimeout: opt->osd_keepalive_timeout = intval; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ccbdfbba9e53..7ebfe13267e6 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -608,14 +608,6 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, } } -static void kick_osd_requests(struct ceph_osd_client *osdc, - struct ceph_osd *kickosd) -{ - mutex_lock(&osdc->request_mutex); - __kick_osd_requests(osdc, kickosd); - mutex_unlock(&osdc->request_mutex); -} - /* * If the osd connection drops, we need to resubmit all requests. */ @@ -629,7 +621,9 @@ static void osd_reset(struct ceph_connection *con) dout("osd_reset osd%d\n", osd->o_osd); osdc = osd->o_osdc; down_read(&osdc->map_sem); - kick_osd_requests(osdc, osd); + mutex_lock(&osdc->request_mutex); + __kick_osd_requests(osdc, osd); + mutex_unlock(&osdc->request_mutex); send_queued(osdc); up_read(&osdc->map_sem); } @@ -1091,12 +1085,10 @@ static void handle_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, timeout_work.work); - struct ceph_osd_request *req, *last_req = NULL; + struct ceph_osd_request *req; struct ceph_osd *osd; - unsigned long timeout = osdc->client->options->osd_timeout * HZ; unsigned long keepalive = osdc->client->options->osd_keepalive_timeout * HZ; - unsigned long last_stamp = 0; struct list_head slow_osds; dout("timeout\n"); down_read(&osdc->map_sem); @@ -1105,37 +1097,6 @@ static void handle_timeout(struct work_struct *work) mutex_lock(&osdc->request_mutex); - /* - * reset osds that appear to be _really_ unresponsive. this - * is a failsafe measure.. we really shouldn't be getting to - * this point if the system is working properly. the monitors - * should mark the osd as failed and we should find out about - * it from an updated osd map. - */ - while (timeout && !list_empty(&osdc->req_lru)) { - req = list_entry(osdc->req_lru.next, struct ceph_osd_request, - r_req_lru_item); - - /* hasn't been long enough since we sent it? */ - if (time_before(jiffies, req->r_stamp + timeout)) - break; - - /* hasn't been long enough since it was acked? */ - if (req->r_request->ack_stamp == 0 || - time_before(jiffies, req->r_request->ack_stamp + timeout)) - break; - - BUG_ON(req == last_req && req->r_stamp == last_stamp); - last_req = req; - last_stamp = req->r_stamp; - - osd = req->r_osd; - BUG_ON(!osd); - pr_warning(" tid %llu timed out on osd%d, will reset osd\n", - req->r_tid, osd->o_osd); - __kick_osd_requests(osdc, osd); - } - /* * ping osds that are a bit slow. this ensures that if there * is a break in the TCP connection we will notice, and reopen -- cgit v1.2.3 From d2cc4dde9206aa2c7fb237aa689d3277cc070547 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 29 Nov 2012 08:37:03 -0600 Subject: bdi_register: add __printf verification, fix arg mismatch __printf is useful to verify format and arguments. Signed-off-by: Joe Perches Reviewed-by: Alex Elder --- fs/ceph/super.c | 2 +- include/linux/backing-dev.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index fcda1c73a1e5..1a144001b2e1 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -842,7 +842,7 @@ static int ceph_register_bdi(struct super_block *sb, fsc->backing_dev_info.ra_pages = default_backing_dev_info.ra_pages; - err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d", + err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", atomic_long_inc_return(&bdi_seq)); if (!err) sb->s_bdi = &fsc->backing_dev_info; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 2a9a9abc9126..12731a19ef06 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -114,6 +114,7 @@ struct backing_dev_info { int bdi_init(struct backing_dev_info *bdi); void bdi_destroy(struct backing_dev_info *bdi); +__printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); -- cgit v1.2.3 From 34e1169d996ab148490c01b65b4ee371cf8ffba2 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 16 Oct 2012 07:31:07 +1030 Subject: module: add syscall to load module from fd As part of the effort to create a stronger boundary between root and kernel, Chrome OS wants to be able to enforce that kernel modules are being loaded only from our read-only crypto-hash verified (dm_verity) root filesystem. Since the init_module syscall hands the kernel a module as a memory blob, no reasoning about the origin of the blob can be made. Earlier proposals for appending signatures to kernel modules would not be useful in Chrome OS, since it would involve adding an additional set of keys to our kernel and builds for no good reason: we already trust the contents of our root filesystem. We don't need to verify those kernel modules a second time. Having to do signature checking on module loading would slow us down and be redundant. All we need to know is where a module is coming from so we can say yes/no to loading it. If a file descriptor is used as the source of a kernel module, many more things can be reasoned about. In Chrome OS's case, we could enforce that the module lives on the filesystem we expect it to live on. In the case of IMA (or other LSMs), it would be possible, for example, to examine extended attributes that may contain signatures over the contents of the module. This introduces a new syscall (on x86), similar to init_module, that has only two arguments. The first argument is used as a file descriptor to the module and the second argument is a pointer to the NULL terminated string of module arguments. Signed-off-by: Kees Cook Cc: Andrew Morton Signed-off-by: Rusty Russell (merge fixes) --- arch/x86/syscalls/syscall_32.tbl | 1 + arch/x86/syscalls/syscall_64.tbl | 1 + include/linux/syscalls.h | 1 + kernel/module.c | 367 +++++++++++++++++++++++---------------- kernel/sys_ni.c | 1 + 5 files changed, 223 insertions(+), 148 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index a47103fbc692..83b3838417ed 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -356,3 +356,4 @@ 347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 349 i386 kcmp sys_kcmp +350 i386 finit_module sys_finit_module diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index a582bfed95bb..7c58c84b7bc8 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -319,6 +319,7 @@ 310 64 process_vm_readv sys_process_vm_readv 311 64 process_vm_writev sys_process_vm_writev 312 common kcmp sys_kcmp +313 common finit_module sys_finit_module # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 727f0cd73921..32bc035bcd68 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -868,4 +868,5 @@ asmlinkage long sys_process_vm_writev(pid_t pid, asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1, unsigned long idx2); +asmlinkage long sys_finit_module(int fd, const char __user *uargs); #endif diff --git a/kernel/module.c b/kernel/module.c index 6e48c3a43599..6d2c4e4ca1f5 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -2425,18 +2426,17 @@ static inline void kmemleak_load_module(const struct module *mod, #endif #ifdef CONFIG_MODULE_SIG -static int module_sig_check(struct load_info *info, - const void *mod, unsigned long *_len) +static int module_sig_check(struct load_info *info) { int err = -ENOKEY; - unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; - unsigned long len = *_len; + const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; + const void *mod = info->hdr; - if (len > markerlen && - memcmp(mod + len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { + if (info->len > markerlen && + memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { /* We truncate the module to discard the signature */ - *_len -= markerlen; - err = mod_verify_sig(mod, _len); + info->len -= markerlen; + err = mod_verify_sig(mod, &info->len); } if (!err) { @@ -2454,59 +2454,97 @@ static int module_sig_check(struct load_info *info, return err; } #else /* !CONFIG_MODULE_SIG */ -static int module_sig_check(struct load_info *info, - void *mod, unsigned long *len) +static int module_sig_check(struct load_info *info) { return 0; } #endif /* !CONFIG_MODULE_SIG */ -/* Sets info->hdr, info->len and info->sig_ok. */ -static int copy_and_check(struct load_info *info, - const void __user *umod, unsigned long len, - const char __user *uargs) +/* Sanity checks against invalid binaries, wrong arch, weird elf version. */ +static int elf_header_check(struct load_info *info) { - int err; - Elf_Ehdr *hdr; + if (info->len < sizeof(*(info->hdr))) + return -ENOEXEC; + + if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 + || info->hdr->e_type != ET_REL + || !elf_check_arch(info->hdr) + || info->hdr->e_shentsize != sizeof(Elf_Shdr)) + return -ENOEXEC; + + if (info->hdr->e_shoff >= info->len + || (info->hdr->e_shnum * sizeof(Elf_Shdr) > + info->len - info->hdr->e_shoff)) + return -ENOEXEC; - if (len < sizeof(*hdr)) + return 0; +} + +/* Sets info->hdr and info->len. */ +static int copy_module_from_user(const void __user *umod, unsigned long len, + struct load_info *info) +{ + info->len = len; + if (info->len < sizeof(*(info->hdr))) return -ENOEXEC; /* Suck in entire file: we'll want most of it. */ - if ((hdr = vmalloc(len)) == NULL) + info->hdr = vmalloc(info->len); + if (!info->hdr) return -ENOMEM; - if (copy_from_user(hdr, umod, len) != 0) { - err = -EFAULT; - goto free_hdr; + if (copy_from_user(info->hdr, umod, info->len) != 0) { + vfree(info->hdr); + return -EFAULT; } - err = module_sig_check(info, hdr, &len); + return 0; +} + +/* Sets info->hdr and info->len. */ +static int copy_module_from_fd(int fd, struct load_info *info) +{ + struct file *file; + int err; + struct kstat stat; + loff_t pos; + ssize_t bytes = 0; + + file = fget(fd); + if (!file) + return -ENOEXEC; + + err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat); if (err) - goto free_hdr; + goto out; - /* Sanity checks against insmoding binaries or wrong arch, - weird elf version */ - if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 - || hdr->e_type != ET_REL - || !elf_check_arch(hdr) - || hdr->e_shentsize != sizeof(Elf_Shdr)) { - err = -ENOEXEC; - goto free_hdr; + if (stat.size > INT_MAX) { + err = -EFBIG; + goto out; } - - if (hdr->e_shoff >= len || - hdr->e_shnum * sizeof(Elf_Shdr) > len - hdr->e_shoff) { - err = -ENOEXEC; - goto free_hdr; + info->hdr = vmalloc(stat.size); + if (!info->hdr) { + err = -ENOMEM; + goto out; } - info->hdr = hdr; - info->len = len; - return 0; + pos = 0; + while (pos < stat.size) { + bytes = kernel_read(file, pos, (char *)(info->hdr) + pos, + stat.size - pos); + if (bytes < 0) { + vfree(info->hdr); + err = bytes; + goto out; + } + if (bytes == 0) + break; + pos += bytes; + } + info->len = pos; -free_hdr: - vfree(hdr); +out: + fput(file); return err; } @@ -2945,33 +2983,123 @@ static bool finished_loading(const char *name) return ret; } +/* Call module constructors. */ +static void do_mod_ctors(struct module *mod) +{ +#ifdef CONFIG_CONSTRUCTORS + unsigned long i; + + for (i = 0; i < mod->num_ctors; i++) + mod->ctors[i](); +#endif +} + +/* This is where the real work happens */ +static int do_init_module(struct module *mod) +{ + int ret = 0; + + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_COMING, mod); + + /* Set RO and NX regions for core */ + set_section_ro_nx(mod->module_core, + mod->core_text_size, + mod->core_ro_size, + mod->core_size); + + /* Set RO and NX regions for init */ + set_section_ro_nx(mod->module_init, + mod->init_text_size, + mod->init_ro_size, + mod->init_size); + + do_mod_ctors(mod); + /* Start the module */ + if (mod->init != NULL) + ret = do_one_initcall(mod->init); + if (ret < 0) { + /* Init routine failed: abort. Try to protect us from + buggy refcounters. */ + mod->state = MODULE_STATE_GOING; + synchronize_sched(); + module_put(mod); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + free_module(mod); + wake_up_all(&module_wq); + return ret; + } + if (ret > 0) { + printk(KERN_WARNING +"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n" +"%s: loading module anyway...\n", + __func__, mod->name, ret, + __func__); + dump_stack(); + } + + /* Now it's a first class citizen! */ + mod->state = MODULE_STATE_LIVE; + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_LIVE, mod); + + /* We need to finish all async code before the module init sequence is done */ + async_synchronize_full(); + + mutex_lock(&module_mutex); + /* Drop initial reference. */ + module_put(mod); + trim_init_extable(mod); +#ifdef CONFIG_KALLSYMS + mod->num_symtab = mod->core_num_syms; + mod->symtab = mod->core_symtab; + mod->strtab = mod->core_strtab; +#endif + unset_module_init_ro_nx(mod); + module_free(mod, mod->module_init); + mod->module_init = NULL; + mod->init_size = 0; + mod->init_ro_size = 0; + mod->init_text_size = 0; + mutex_unlock(&module_mutex); + wake_up_all(&module_wq); + + return 0; +} + +static int may_init_module(void) +{ + if (!capable(CAP_SYS_MODULE) || modules_disabled) + return -EPERM; + + return 0; +} + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ -static struct module *load_module(void __user *umod, - unsigned long len, - const char __user *uargs) +static int load_module(struct load_info *info, const char __user *uargs) { - struct load_info info = { NULL, }; struct module *mod, *old; long err; - pr_debug("load_module: umod=%p, len=%lu, uargs=%p\n", - umod, len, uargs); + err = module_sig_check(info); + if (err) + goto free_copy; - /* Copy in the blobs from userspace, check they are vaguely sane. */ - err = copy_and_check(&info, umod, len, uargs); + err = elf_header_check(info); if (err) - return ERR_PTR(err); + goto free_copy; /* Figure out module layout, and allocate all the memory. */ - mod = layout_and_allocate(&info); + mod = layout_and_allocate(info); if (IS_ERR(mod)) { err = PTR_ERR(mod); goto free_copy; } #ifdef CONFIG_MODULE_SIG - mod->sig_ok = info.sig_ok; + mod->sig_ok = info->sig_ok; if (!mod->sig_ok) add_taint_module(mod, TAINT_FORCED_MODULE); #endif @@ -2983,25 +3111,25 @@ static struct module *load_module(void __user *umod, /* Now we've got everything in the final locations, we can * find optional sections. */ - find_module_sections(mod, &info); + find_module_sections(mod, info); err = check_module_license_and_versions(mod); if (err) goto free_unload; /* Set up MODINFO_ATTR fields */ - setup_modinfo(mod, &info); + setup_modinfo(mod, info); /* Fix up syms, so that st_value is a pointer to location. */ - err = simplify_symbols(mod, &info); + err = simplify_symbols(mod, info); if (err < 0) goto free_modinfo; - err = apply_relocations(mod, &info); + err = apply_relocations(mod, info); if (err < 0) goto free_modinfo; - err = post_relocation(mod, &info); + err = post_relocation(mod, info); if (err < 0) goto free_modinfo; @@ -3041,14 +3169,14 @@ again: } /* This has to be done once we're sure module name is unique. */ - dynamic_debug_setup(info.debug, info.num_debug); + dynamic_debug_setup(info->debug, info->num_debug); /* Find duplicate symbols */ err = verify_export_symbols(mod); if (err < 0) goto ddebug; - module_bug_finalize(info.hdr, info.sechdrs, mod); + module_bug_finalize(info->hdr, info->sechdrs, mod); list_add_rcu(&mod->list, &modules); mutex_unlock(&module_mutex); @@ -3059,16 +3187,17 @@ again: goto unlink; /* Link in to syfs. */ - err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp); + err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); if (err < 0) goto unlink; /* Get rid of temporary copy. */ - free_copy(&info); + free_copy(info); /* Done! */ trace_module_load(mod); - return mod; + + return do_init_module(mod); unlink: mutex_lock(&module_mutex); @@ -3077,7 +3206,7 @@ again: module_bug_cleanup(mod); wake_up_all(&module_wq); ddebug: - dynamic_debug_remove(info.debug); + dynamic_debug_remove(info->debug); unlock: mutex_unlock(&module_mutex); synchronize_sched(); @@ -3089,106 +3218,48 @@ again: free_unload: module_unload_free(mod); free_module: - module_deallocate(mod, &info); + module_deallocate(mod, info); free_copy: - free_copy(&info); - return ERR_PTR(err); -} - -/* Call module constructors. */ -static void do_mod_ctors(struct module *mod) -{ -#ifdef CONFIG_CONSTRUCTORS - unsigned long i; - - for (i = 0; i < mod->num_ctors; i++) - mod->ctors[i](); -#endif + free_copy(info); + return err; } -/* This is where the real work happens */ SYSCALL_DEFINE3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs) { - struct module *mod; - int ret = 0; - - /* Must have permission */ - if (!capable(CAP_SYS_MODULE) || modules_disabled) - return -EPERM; + int err; + struct load_info info = { }; - /* Do all the hard work */ - mod = load_module(umod, len, uargs); - if (IS_ERR(mod)) - return PTR_ERR(mod); + err = may_init_module(); + if (err) + return err; - blocking_notifier_call_chain(&module_notify_list, - MODULE_STATE_COMING, mod); + pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", + umod, len, uargs); - /* Set RO and NX regions for core */ - set_section_ro_nx(mod->module_core, - mod->core_text_size, - mod->core_ro_size, - mod->core_size); + err = copy_module_from_user(umod, len, &info); + if (err) + return err; - /* Set RO and NX regions for init */ - set_section_ro_nx(mod->module_init, - mod->init_text_size, - mod->init_ro_size, - mod->init_size); + return load_module(&info, uargs); +} - do_mod_ctors(mod); - /* Start the module */ - if (mod->init != NULL) - ret = do_one_initcall(mod->init); - if (ret < 0) { - /* Init routine failed: abort. Try to protect us from - buggy refcounters. */ - mod->state = MODULE_STATE_GOING; - synchronize_sched(); - module_put(mod); - blocking_notifier_call_chain(&module_notify_list, - MODULE_STATE_GOING, mod); - free_module(mod); - wake_up_all(&module_wq); - return ret; - } - if (ret > 0) { - printk(KERN_WARNING -"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n" -"%s: loading module anyway...\n", - __func__, mod->name, ret, - __func__); - dump_stack(); - } +SYSCALL_DEFINE2(finit_module, int, fd, const char __user *, uargs) +{ + int err; + struct load_info info = { }; - /* Now it's a first class citizen! */ - mod->state = MODULE_STATE_LIVE; - blocking_notifier_call_chain(&module_notify_list, - MODULE_STATE_LIVE, mod); + err = may_init_module(); + if (err) + return err; - /* We need to finish all async code before the module init sequence is done */ - async_synchronize_full(); + pr_debug("finit_module: fd=%d, uargs=%p\n", fd, uargs); - mutex_lock(&module_mutex); - /* Drop initial reference. */ - module_put(mod); - trim_init_extable(mod); -#ifdef CONFIG_KALLSYMS - mod->num_symtab = mod->core_num_syms; - mod->symtab = mod->core_symtab; - mod->strtab = mod->core_strtab; -#endif - unset_module_init_ro_nx(mod); - module_free(mod, mod->module_init); - mod->module_init = NULL; - mod->init_size = 0; - mod->init_ro_size = 0; - mod->init_text_size = 0; - mutex_unlock(&module_mutex); - wake_up_all(&module_wq); + err = copy_module_from_fd(fd, &info); + if (err) + return err; - return 0; + return load_module(&info, uargs); } static inline int within(unsigned long addr, void *start, unsigned long size) diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index dbff751e4086..395084d4ce16 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -25,6 +25,7 @@ cond_syscall(sys_swapoff); cond_syscall(sys_kexec_load); cond_syscall(compat_sys_kexec_load); cond_syscall(sys_init_module); +cond_syscall(sys_finit_module); cond_syscall(sys_delete_module); cond_syscall(sys_socketpair); cond_syscall(sys_bind); -- cgit v1.2.3 From 2f3238aebedb243804f58d62d57244edec4149b2 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 22 Oct 2012 18:09:41 +1030 Subject: module: add flags arg to sys_finit_module() Thanks to Michael Kerrisk for keeping us honest. These flags are actually useful for eliminating the only case where kmod has to mangle a module's internals: for overriding module versioning. Signed-off-by: Rusty Russell Acked-by: Lucas De Marchi Acked-by: Kees Cook --- include/linux/syscalls.h | 2 +- include/uapi/linux/module.h | 8 ++++++++ kernel/module.c | 40 ++++++++++++++++++++++++++-------------- 3 files changed, 35 insertions(+), 15 deletions(-) create mode 100644 include/uapi/linux/module.h (limited to 'include/linux') diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 32bc035bcd68..8cf7b508cb50 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -868,5 +868,5 @@ asmlinkage long sys_process_vm_writev(pid_t pid, asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1, unsigned long idx2); -asmlinkage long sys_finit_module(int fd, const char __user *uargs); +asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags); #endif diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h new file mode 100644 index 000000000000..38da4258b12f --- /dev/null +++ b/include/uapi/linux/module.h @@ -0,0 +1,8 @@ +#ifndef _UAPI_LINUX_MODULE_H +#define _UAPI_LINUX_MODULE_H + +/* Flags for sys_finit_module: */ +#define MODULE_INIT_IGNORE_MODVERSIONS 1 +#define MODULE_INIT_IGNORE_VERMAGIC 2 + +#endif /* _UAPI_LINUX_MODULE_H */ diff --git a/kernel/module.c b/kernel/module.c index 6d2c4e4ca1f5..1395ca382fb5 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -60,6 +60,7 @@ #include #include #include +#include #include "module-internal.h" #define CREATE_TRACE_POINTS @@ -2553,7 +2554,7 @@ static void free_copy(struct load_info *info) vfree(info->hdr); } -static int rewrite_section_headers(struct load_info *info) +static int rewrite_section_headers(struct load_info *info, int flags) { unsigned int i; @@ -2581,7 +2582,10 @@ static int rewrite_section_headers(struct load_info *info) } /* Track but don't keep modinfo and version sections. */ - info->index.vers = find_sec(info, "__versions"); + if (flags & MODULE_INIT_IGNORE_MODVERSIONS) + info->index.vers = 0; /* Pretend no __versions section! */ + else + info->index.vers = find_sec(info, "__versions"); info->index.info = find_sec(info, ".modinfo"); info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; @@ -2596,7 +2600,7 @@ static int rewrite_section_headers(struct load_info *info) * Return the temporary module pointer (we'll replace it with the final * one when we move the module sections around). */ -static struct module *setup_load_info(struct load_info *info) +static struct module *setup_load_info(struct load_info *info, int flags) { unsigned int i; int err; @@ -2607,7 +2611,7 @@ static struct module *setup_load_info(struct load_info *info) info->secstrings = (void *)info->hdr + info->sechdrs[info->hdr->e_shstrndx].sh_offset; - err = rewrite_section_headers(info); + err = rewrite_section_headers(info, flags); if (err) return ERR_PTR(err); @@ -2645,11 +2649,14 @@ static struct module *setup_load_info(struct load_info *info) return mod; } -static int check_modinfo(struct module *mod, struct load_info *info) +static int check_modinfo(struct module *mod, struct load_info *info, int flags) { const char *modmagic = get_modinfo(info, "vermagic"); int err; + if (flags & MODULE_INIT_IGNORE_VERMAGIC) + modmagic = NULL; + /* This is allowed: modprobe --force will invalidate it. */ if (!modmagic) { err = try_to_force_load(mod, "bad vermagic"); @@ -2885,18 +2892,18 @@ int __weak module_frob_arch_sections(Elf_Ehdr *hdr, return 0; } -static struct module *layout_and_allocate(struct load_info *info) +static struct module *layout_and_allocate(struct load_info *info, int flags) { /* Module within temporary copy. */ struct module *mod; Elf_Shdr *pcpusec; int err; - mod = setup_load_info(info); + mod = setup_load_info(info, flags); if (IS_ERR(mod)) return mod; - err = check_modinfo(mod, info); + err = check_modinfo(mod, info, flags); if (err) return ERR_PTR(err); @@ -3078,7 +3085,8 @@ static int may_init_module(void) /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ -static int load_module(struct load_info *info, const char __user *uargs) +static int load_module(struct load_info *info, const char __user *uargs, + int flags) { struct module *mod, *old; long err; @@ -3092,7 +3100,7 @@ static int load_module(struct load_info *info, const char __user *uargs) goto free_copy; /* Figure out module layout, and allocate all the memory. */ - mod = layout_and_allocate(info); + mod = layout_and_allocate(info, flags); if (IS_ERR(mod)) { err = PTR_ERR(mod); goto free_copy; @@ -3241,10 +3249,10 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, if (err) return err; - return load_module(&info, uargs); + return load_module(&info, uargs, 0); } -SYSCALL_DEFINE2(finit_module, int, fd, const char __user *, uargs) +SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) { int err; struct load_info info = { }; @@ -3253,13 +3261,17 @@ SYSCALL_DEFINE2(finit_module, int, fd, const char __user *, uargs) if (err) return err; - pr_debug("finit_module: fd=%d, uargs=%p\n", fd, uargs); + pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); + + if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS + |MODULE_INIT_IGNORE_VERMAGIC)) + return -EINVAL; err = copy_module_from_fd(fd, &info); if (err) return err; - return load_module(&info, uargs); + return load_module(&info, uargs, flags); } static inline int within(unsigned long addr, void *start, unsigned long size) -- cgit v1.2.3 From 2e72d51b4ac32989496870cd8171b3682fea1839 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 16 Oct 2012 07:32:07 +1030 Subject: security: introduce kernel_module_from_file hook Now that kernel module origins can be reasoned about, provide a hook to the LSMs to make policy decisions about the module file. This will let Chrome OS enforce that loadable kernel modules can only come from its read-only hash-verified root filesystem. Other LSMs can, for example, read extended attributes for signatures, etc. Signed-off-by: Kees Cook Acked-by: Serge E. Hallyn Acked-by: Eric Paris Acked-by: Mimi Zohar Acked-by: James Morris Signed-off-by: Rusty Russell --- include/linux/security.h | 13 +++++++++++++ kernel/module.c | 11 +++++++++++ security/capability.c | 6 ++++++ security/security.c | 5 +++++ 4 files changed, 35 insertions(+) (limited to 'include/linux') diff --git a/include/linux/security.h b/include/linux/security.h index 05e88bdcf7d9..0f6afc657f77 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -694,6 +694,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * userspace to load a kernel module with the given name. * @kmod_name name of the module requested by the kernel * Return 0 if successful. + * @kernel_module_from_file: + * Load a kernel module from userspace. + * @file contains the file structure pointing to the file containing + * the kernel module to load. If the module is being loaded from a blob, + * this argument will be NULL. + * Return 0 if permission is granted. * @task_fix_setuid: * Update the module's state after setting one or more of the user * identity attributes of the current process. The @flags parameter @@ -1508,6 +1514,7 @@ struct security_operations { int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); int (*kernel_module_request)(char *kmod_name); + int (*kernel_module_from_file)(struct file *file); int (*task_fix_setuid) (struct cred *new, const struct cred *old, int flags); int (*task_setpgid) (struct task_struct *p, pid_t pgid); @@ -1765,6 +1772,7 @@ void security_transfer_creds(struct cred *new, const struct cred *old); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_kernel_module_request(char *kmod_name); +int security_kernel_module_from_file(struct file *file); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); @@ -2278,6 +2286,11 @@ static inline int security_kernel_module_request(char *kmod_name) return 0; } +static inline int security_kernel_module_from_file(struct file *file) +{ + return 0; +} + static inline int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) diff --git a/kernel/module.c b/kernel/module.c index 1395ca382fb5..a1d2ed8bab93 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -2485,10 +2486,16 @@ static int elf_header_check(struct load_info *info) static int copy_module_from_user(const void __user *umod, unsigned long len, struct load_info *info) { + int err; + info->len = len; if (info->len < sizeof(*(info->hdr))) return -ENOEXEC; + err = security_kernel_module_from_file(NULL); + if (err) + return err; + /* Suck in entire file: we'll want most of it. */ info->hdr = vmalloc(info->len); if (!info->hdr) @@ -2515,6 +2522,10 @@ static int copy_module_from_fd(int fd, struct load_info *info) if (!file) return -ENOEXEC; + err = security_kernel_module_from_file(file); + if (err) + goto out; + err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat); if (err) goto out; diff --git a/security/capability.c b/security/capability.c index b14a30c234b8..0fe5a026aef8 100644 --- a/security/capability.c +++ b/security/capability.c @@ -395,6 +395,11 @@ static int cap_kernel_module_request(char *kmod_name) return 0; } +static int cap_kernel_module_from_file(struct file *file) +{ + return 0; +} + static int cap_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; @@ -967,6 +972,7 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, kernel_act_as); set_to_cap_if_null(ops, kernel_create_files_as); set_to_cap_if_null(ops, kernel_module_request); + set_to_cap_if_null(ops, kernel_module_from_file); set_to_cap_if_null(ops, task_fix_setuid); set_to_cap_if_null(ops, task_setpgid); set_to_cap_if_null(ops, task_getpgid); diff --git a/security/security.c b/security/security.c index 8dcd4ae10a5f..ce88630de15d 100644 --- a/security/security.c +++ b/security/security.c @@ -820,6 +820,11 @@ int security_kernel_module_request(char *kmod_name) return security_ops->kernel_module_request(kmod_name); } +int security_kernel_module_from_file(struct file *file) +{ + return security_ops->kernel_module_from_file(file); +} + int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { -- cgit v1.2.3 From fdf90729e57812cb12d7938e2dee7c71e875fb08 Mon Sep 17 00:00:00 2001 From: Mimi Zohar Date: Tue, 16 Oct 2012 12:40:08 +1030 Subject: ima: support new kernel module syscall With the addition of the new kernel module syscall, which defines two arguments - a file descriptor to the kernel module and a pointer to a NULL terminated string of module arguments - it is now possible to measure and appraise kernel modules like any other file on the file system. This patch adds support to measure and appraise kernel modules in an extensible and consistent manner. To support filesystems without extended attribute support, additional patches could pass the signature as the first parameter. Signed-off-by: Mimi Zohar Signed-off-by: Rusty Russell --- Documentation/ABI/testing/ima_policy | 3 ++- include/linux/ima.h | 6 ++++++ security/integrity/ima/ima.h | 2 +- security/integrity/ima/ima_api.c | 4 ++-- security/integrity/ima/ima_main.c | 21 +++++++++++++++++++++ security/integrity/ima/ima_policy.c | 3 +++ security/security.c | 7 ++++++- 7 files changed, 41 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy index 986946613542..ec0a38ef3145 100644 --- a/Documentation/ABI/testing/ima_policy +++ b/Documentation/ABI/testing/ima_policy @@ -23,7 +23,7 @@ Description: lsm: [[subj_user=] [subj_role=] [subj_type=] [obj_user=] [obj_role=] [obj_type=]] - base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK] + base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK][MODULE_CHECK] mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] fsmagic:= hex value uid:= decimal value @@ -53,6 +53,7 @@ Description: measure func=BPRM_CHECK measure func=FILE_MMAP mask=MAY_EXEC measure func=FILE_CHECK mask=MAY_READ uid=0 + measure func=MODULE_CHECK uid=0 appraise fowner=0 The default policy measures all executables in bprm_check, diff --git a/include/linux/ima.h b/include/linux/ima.h index 2c7223d7e73b..86c361e947b9 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,6 +18,7 @@ extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); +extern int ima_module_check(struct file *file); #else static inline int ima_bprm_check(struct linux_binprm *bprm) @@ -40,6 +41,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) return 0; } +static inline int ima_module_check(struct file *file) +{ + return 0; +} + #endif /* CONFIG_IMA_H */ #ifdef CONFIG_IMA_APPRAISE diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 6ee8826662cc..3b2adb794f15 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -127,7 +127,7 @@ struct integrity_iint_cache *integrity_iint_insert(struct inode *inode); struct integrity_iint_cache *integrity_iint_find(struct inode *inode); /* IMA policy related functions */ -enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK, POST_SETATTR }; +enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK, MODULE_CHECK, POST_SETATTR }; int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, int flags); diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index b356884fb3ef..0cea3db21657 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -100,12 +100,12 @@ err_out: * ima_get_action - appraise & measure decision based on policy. * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) - * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) + * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP, MODULE_CHECK) * * The policy is defined in terms of keypairs: * subj=, obj=, type=, func=, mask=, fsmagic= * subj,obj, and type: are LSM specific. - * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP + * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP | MODULE_CHECK * mask: contains the permission mask * fsmagic: hex value * diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 73c9a268253e..45de18e9a6f2 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -280,6 +280,27 @@ int ima_file_check(struct file *file, int mask) } EXPORT_SYMBOL_GPL(ima_file_check); +/** + * ima_module_check - based on policy, collect/store/appraise measurement. + * @file: pointer to the file to be measured/appraised + * + * Measure/appraise kernel modules based on policy. + * + * Always return 0 and audit dentry_open failures. + * Return code is based upon measurement appraisal. + */ +int ima_module_check(struct file *file) +{ + int rc; + + if (!file) + rc = INTEGRITY_UNKNOWN; + else + rc = process_measurement(file, file->f_dentry->d_name.name, + MAY_EXEC, MODULE_CHECK); + return (ima_appraise & IMA_APPRAISE_ENFORCE) ? rc : 0; +} + static int __init init_ima(void) { int error; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index c7dacd2eab7a..af7d182d5a46 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -80,6 +80,7 @@ static struct ima_rule_entry default_rules[] = { .flags = IMA_FUNC | IMA_MASK}, {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_MASK | IMA_UID}, + {.action = MEASURE,.func = MODULE_CHECK, .flags = IMA_FUNC}, }; static struct ima_rule_entry default_appraise_rules[] = { @@ -401,6 +402,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) /* PATH_CHECK is for backwards compat */ else if (strcmp(args[0].from, "PATH_CHECK") == 0) entry->func = FILE_CHECK; + else if (strcmp(args[0].from, "MODULE_CHECK") == 0) + entry->func = MODULE_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) diff --git a/security/security.c b/security/security.c index ce88630de15d..daa97f4ac9d1 100644 --- a/security/security.c +++ b/security/security.c @@ -822,7 +822,12 @@ int security_kernel_module_request(char *kmod_name) int security_kernel_module_from_file(struct file *file) { - return security_ops->kernel_module_from_file(file); + int ret; + + ret = security_ops->kernel_module_from_file(file); + if (ret) + return ret; + return ima_module_check(file); } int security_task_fix_setuid(struct cred *new, const struct cred *old, -- cgit v1.2.3 From 6f33d58794ef4cef4b2c706029810f9688bd3026 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 22 Nov 2012 12:30:25 +1030 Subject: __UNIQUE_ID() Jan Beulich points out __COUNTER__ (gcc 4.3 and above), so let's use that to create unique ids. This is better than __LINE__ which we use today, so provide a wrapper. Stanislaw Gruszka reported that some module parameters start with a digit, so we need to prepend when we for the unique id. Signed-off-by: Rusty Russell Acked-by: Jan Beulich --- include/linux/compiler-gcc4.h | 2 ++ include/linux/compiler.h | 9 +++++++++ 2 files changed, 11 insertions(+) (limited to 'include/linux') diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 412bc6c2b023..56c802cba7f6 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -31,6 +31,8 @@ #define __linktime_error(message) __attribute__((__error__(message))) +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + #if __GNUC_MINOR__ >= 5 /* * Mark a position in code as unreachable. This can be used to diff --git a/include/linux/compiler.h b/include/linux/compiler.h index f430e4162f41..5f45335e1ac7 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -42,6 +42,10 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __rcu #endif +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + #ifdef __KERNEL__ #ifdef __GNUC__ @@ -164,6 +168,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); (typeof(ptr)) (__ptr + (off)); }) #endif +/* Not-quite-unique ID. */ +#ifndef __UNIQUE_ID +# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) +#endif + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 34182eea36fc1d70d748b0947c873314980ba806 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 22 Nov 2012 12:30:25 +1030 Subject: moduleparam: use __UNIQUE_ID() Signed-off-by: Rusty Russell --- include/linux/moduleparam.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index d6a58065c09c..137b4198fc03 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -16,17 +16,15 @@ /* Chosen so that structs with an unsigned long line up. */ #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) -#define ___module_cat(a,b) __mod_ ## a ## b -#define __module_cat(a,b) ___module_cat(a,b) #ifdef MODULE #define __MODULE_INFO(tag, name, info) \ -static const char __module_cat(name,__LINE__)[] \ +static const char __UNIQUE_ID(name)[] \ __used __attribute__((section(".modinfo"), unused, aligned(1))) \ = __stringify(tag) "=" info #else /* !MODULE */ /* This struct is here for syntactic coherency, it is not used */ #define __MODULE_INFO(tag, name, info) \ - struct __module_cat(name,__LINE__) {} + struct __UNIQUE_ID(name) {} #endif #define __MODULE_PARM_TYPE(name, _type) \ __MODULE_INFO(parmtype, name##type, #name ":" _type) -- cgit v1.2.3 From facc0a6bd494ce21e31b34fc355ecf702518272b Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 5 Dec 2012 11:55:28 +1030 Subject: ASN.1: Define indefinite length marker constant Define a constant to hold the marker value seen in an indefinite-length element. Signed-off-by: David Howells Signed-off-by: Rusty Russell --- include/linux/asn1.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/asn1.h b/include/linux/asn1.h index 5c3f4e4b9a23..eed6982860ba 100644 --- a/include/linux/asn1.h +++ b/include/linux/asn1.h @@ -64,4 +64,6 @@ enum asn1_tag { ASN1_LONG_TAG = 31 /* Long form tag */ }; +#define ASN1_INDEFINITE_LENGTH 0x80 + #endif /* _LINUX_ASN1_H */ -- cgit v1.2.3 From 63b68901dfd590cc13d4fe5c08dec2ca75b3c4aa Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Fri, 14 Dec 2012 09:09:11 -0800 Subject: mfd: omap-usb-host: get rid of cpu_is_omap..() macros Instead of using cpu_is_omap..() macros in the device driver we rely on information provided in the platform data. The only information we need is whether the USB Host module has a single ULPI bypass control bit for all ports or individual bypass control bits for each port. OMAP3 REV2.1 and earlier have the former. Signed-off-by: Roger Quadros Acked-by: Samuel Ortiz [tony@atomide.com: updated to remove plat/cpu.h] Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/usb-host.c | 4 ++++ drivers/mfd/omap-usb-host.c | 3 +-- include/linux/platform_data/usb-omap.h | 3 +++ 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c index d1dbe125b34f..2e44e8a22884 100644 --- a/arch/arm/mach-omap2/usb-host.c +++ b/arch/arm/mach-omap2/usb-host.c @@ -508,6 +508,10 @@ void __init usbhs_init(const struct usbhs_omap_board_data *pdata) if (cpu_is_omap34xx()) { setup_ehci_io_mux(pdata->port_mode); setup_ohci_io_mux(pdata->port_mode); + + if (omap_rev() <= OMAP3430_REV_ES2_1) + usbhs_data.single_ulpi_bypass = true; + } else if (cpu_is_omap44xx()) { setup_4430ehci_io_mux(pdata->port_mode); setup_4430ohci_io_mux(pdata->port_mode); diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 770a0d01e0b9..05164d7f054b 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -384,7 +383,7 @@ static void omap_usbhs_init(struct device *dev) reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; /* Bypass the TLL module for PHY mode operation */ - if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) { + if (pdata->single_ulpi_bypass) { dev_dbg(dev, "OMAP3 ES version <= ES2.1\n"); if (is_ehci_phy_mode(pdata->port_mode[0]) || is_ehci_phy_mode(pdata->port_mode[1]) || diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index 8570bcfe6311..ef65b67c56c3 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h @@ -59,6 +59,9 @@ struct usbhs_omap_platform_data { struct ehci_hcd_omap_platform_data *ehci_data; struct ohci_hcd_omap_platform_data *ohci_data; + + /* OMAP3 <= ES2.1 have a single ulpi bypass control bit */ + unsigned single_ulpi_bypass:1; }; /*-------------------------------------------------------------------------*/ -- cgit v1.2.3 From d9ba573718666df2e7e30d671f81bba39d07f91c Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 14 Dec 2012 09:09:11 -0800 Subject: ARM: OMAP: Move plat/omap-serial.h to include/linux/platform_data/serial-omap.h We need to move this file to allow ARM multiplatform configurations to build for omap2+. This can now be done as this file now only contains platform_data. cc: Russell King cc: Alan Cox cc: Greg Kroah-Hartman cc: Govindraj.R cc: Kevin Hilman cc: linux-serial@vger.kernel.org Reviewed-by: Felipe Balbi Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/serial.c | 3 +- arch/arm/plat-omap/include/plat/omap-serial.h | 51 --------------------------- drivers/tty/serial/omap-serial.c | 3 +- include/linux/platform_data/serial-omap.h | 51 +++++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 55 deletions(-) delete mode 100644 arch/arm/plat-omap/include/plat/omap-serial.h create mode 100644 include/linux/platform_data/serial-omap.h (limited to 'include/linux') diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 93d102535c85..04fdbc4c499b 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -27,8 +27,7 @@ #include #include #include - -#include +#include #include "common.h" #include "omap_hwmod.h" diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h deleted file mode 100644 index ff9b0aab5281..000000000000 --- a/arch/arm/plat-omap/include/plat/omap-serial.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Driver for OMAP-UART controller. - * Based on drivers/serial/8250.c - * - * Copyright (C) 2010 Texas Instruments. - * - * Authors: - * Govindraj R - * Thara Gopinath - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __OMAP_SERIAL_H__ -#define __OMAP_SERIAL_H__ - -#include -#include -#include - -#define DRIVER_NAME "omap_uart" - -/* - * Use tty device name as ttyO, [O -> OMAP] - * in bootargs we specify as console=ttyO0 if uart1 - * is used as console uart. - */ -#define OMAP_SERIAL_NAME "ttyO" - -struct omap_uart_port_info { - bool dma_enabled; /* To specify DMA Mode */ - unsigned int uartclk; /* UART clock rate */ - upf_t flags; /* UPF_* flags */ - unsigned int dma_rx_buf_size; - unsigned int dma_rx_timeout; - unsigned int autosuspend_timeout; - unsigned int dma_rx_poll_rate; - int DTR_gpio; - int DTR_inverted; - int DTR_present; - - int (*get_context_loss_count)(struct device *); - void (*set_forceidle)(struct device *); - void (*set_noidle)(struct device *); - void (*enable_wakeup)(struct device *, bool); -}; - -#endif /* __OMAP_SERIAL_H__ */ diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 23f797eb7a28..57d6b29c039c 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -41,8 +41,7 @@ #include #include #include - -#include +#include #define OMAP_MAX_HSUART_PORTS 6 diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h new file mode 100644 index 000000000000..ff9b0aab5281 --- /dev/null +++ b/include/linux/platform_data/serial-omap.h @@ -0,0 +1,51 @@ +/* + * Driver for OMAP-UART controller. + * Based on drivers/serial/8250.c + * + * Copyright (C) 2010 Texas Instruments. + * + * Authors: + * Govindraj R + * Thara Gopinath + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __OMAP_SERIAL_H__ +#define __OMAP_SERIAL_H__ + +#include +#include +#include + +#define DRIVER_NAME "omap_uart" + +/* + * Use tty device name as ttyO, [O -> OMAP] + * in bootargs we specify as console=ttyO0 if uart1 + * is used as console uart. + */ +#define OMAP_SERIAL_NAME "ttyO" + +struct omap_uart_port_info { + bool dma_enabled; /* To specify DMA Mode */ + unsigned int uartclk; /* UART clock rate */ + upf_t flags; /* UPF_* flags */ + unsigned int dma_rx_buf_size; + unsigned int dma_rx_timeout; + unsigned int autosuspend_timeout; + unsigned int dma_rx_poll_rate; + int DTR_gpio; + int DTR_inverted; + int DTR_present; + + int (*get_context_loss_count)(struct device *); + void (*set_forceidle)(struct device *); + void (*set_noidle)(struct device *); + void (*enable_wakeup)(struct device *, bool); +}; + +#endif /* __OMAP_SERIAL_H__ */ -- cgit v1.2.3 From 8e63b6a8adabb0551124c3b78f7f5f36912c3728 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 15 Dec 2012 15:21:52 -0500 Subject: NFSv4.1: Move the RPC timestamp out of the slot. Shave a few bytes off the slot table size by moving the RPC timestamp into the sequence results. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 14 +++++++------- fs/nfs/nfs4session.c | 3 +-- fs/nfs/nfs4session.h | 1 - include/linux/nfs_xdr.h | 1 + 4 files changed, 9 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9003b8f6b77f..afb428e63b52 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -419,7 +419,6 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * { struct nfs4_session *session; struct nfs4_slot *slot; - unsigned long timestamp; struct nfs_client *clp; int ret = 1; @@ -444,9 +443,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * case 0: /* Update the slot's sequence and clientid lease timer */ ++slot->seq_nr; - timestamp = slot->renewal_time; clp = session->clp; - do_renew_lease(clp, timestamp); + do_renew_lease(clp, res->sr_timestamp); /* Check sequence flags */ if (res->sr_status_flags != 0) nfs4_schedule_lease_recovery(clp); @@ -473,10 +471,11 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * * Could this slot have been previously retired? * If so, then the server may be expecting seq_nr = 1! */ - if (slot->seq_nr == 1) - break; - slot->seq_nr = 1; - goto retry_nowait; + if (slot->seq_nr != 1) { + slot->seq_nr = 1; + goto retry_nowait; + } + break; case -NFS4ERR_SEQ_FALSE_RETRY: ++slot->seq_nr; goto retry_nowait; @@ -567,6 +566,7 @@ int nfs41_setup_sequence(struct nfs4_session *session, slot->slot_nr, slot->seq_nr); res->sr_slot = slot; + res->sr_timestamp = jiffies; res->sr_status_flags = 0; /* * sr_status is only set in decode_sequence, and so will remain diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c index 1e6c87c443a7..0e1cc1f4e51a 100644 --- a/fs/nfs/nfs4session.c +++ b/fs/nfs/nfs4session.c @@ -143,7 +143,6 @@ struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl) if (slotid > tbl->highest_used_slotid || tbl->highest_used_slotid == NFS4_NO_SLOT) tbl->highest_used_slotid = slotid; - ret->renewal_time = jiffies; ret->generation = tbl->generation; out: @@ -228,9 +227,9 @@ static bool nfs41_assign_slot(struct rpc_task *task, void *pslot) if (nfs4_session_draining(tbl->session) && !args->sa_privileged) return false; - slot->renewal_time = jiffies; slot->generation = tbl->generation; args->sa_slot = slot; + res->sr_timestamp = jiffies; res->sr_slot = slot; res->sr_status_flags = 0; res->sr_status = 1; diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index 04f834cab16c..d17b08091d4b 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -19,7 +19,6 @@ struct nfs4_slot { struct nfs4_slot_table *table; struct nfs4_slot *next; unsigned long generation; - unsigned long renewal_time; u32 slot_nr; u32 seq_nr; }; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index a55abd499c21..29adb12c7ecf 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -194,6 +194,7 @@ struct nfs4_sequence_args { struct nfs4_sequence_res { struct nfs4_slot *sr_slot; /* slot used to send request */ + unsigned long sr_timestamp; int sr_status; /* sequence operation status */ u32 sr_status_flags; u32 sr_highest_slotid; -- cgit v1.2.3 From afc59400d6c65bad66d4ad0b2daf879cbff8e23e Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 10 Dec 2012 18:01:37 -0500 Subject: nfsd4: cleanup: replace rq_resused count by rq_next_page pointer It may be a matter of personal taste, but I find this makes the code clearer. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs2acl.c | 2 +- fs/nfsd/nfs3acl.c | 2 +- fs/nfsd/nfs3proc.c | 6 +++--- fs/nfsd/nfs3xdr.c | 33 ++++++++++++++++----------------- fs/nfsd/nfs4xdr.c | 24 ++++++++++++------------ fs/nfsd/nfsxdr.c | 11 ++++++----- fs/nfsd/vfs.c | 18 ++++++++---------- include/linux/sunrpc/svc.h | 6 +++--- net/sunrpc/svc.c | 2 +- net/sunrpc/svcsock.c | 2 ++ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 10 +++++----- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 4 +++- 12 files changed, 61 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index b314888825d5..9170861c804a 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -253,7 +253,7 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, (resp->mask & NFS_ACL) ? resp->acl_access : NULL, (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); while (w > 0) { - if (!rqstp->rq_respages[rqstp->rq_resused++]) + if (!*(rqstp->rq_next_page++)) return 0; w -= PAGE_SIZE; } diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index a596e9d987e4..9cbc1a841f87 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -184,7 +184,7 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, (resp->mask & NFS_ACL) ? resp->acl_access : NULL, (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); while (w > 0) { - if (!rqstp->rq_respages[rqstp->rq_resused++]) + if (!*(rqstp->rq_next_page++)) return 0; w -= PAGE_SIZE; } diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 97d90d1c8608..1fc02dfdc5c4 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -460,7 +460,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp, __be32 nfserr; int count = 0; loff_t offset; - int i; + struct page **p; caddr_t page_addr = NULL; dprintk("nfsd: READDIR+(3) %s %d bytes at %d\n", @@ -484,8 +484,8 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp, &resp->common, nfs3svc_encode_entry_plus); memcpy(resp->verf, argp->verf, 8); - for (i=1; irq_resused ; i++) { - page_addr = page_address(rqstp->rq_respages[i]); + for (p = rqstp->rq_respages + 1; p < rqstp->rq_next_page; p++) { + page_addr = page_address(*p); if (((caddr_t)resp->buffer >= page_addr) && ((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) { diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 2b8618de6c27..324c0baf7cda 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -325,7 +325,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; - int v,pn; + int v; u32 max_blocksize = svc_max_payload(rqstp); if (!(p = decode_fh(p, &args->fh))) @@ -340,8 +340,9 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, /* set up the kvec */ v=0; while (len > 0) { - pn = rqstp->rq_resused++; - rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]); + struct page *p = *(rqstp->rq_next_page++); + + rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE; len -= rqstp->rq_vec[v].iov_len; v++; @@ -463,8 +464,7 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, len = ntohl(*p++); if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE) return 0; - args->tname = new = - page_address(rqstp->rq_respages[rqstp->rq_resused++]); + args->tname = new = page_address(*(rqstp->rq_next_page++)); args->tlen = len; /* first copy and check from the first page */ old = (char*)p; @@ -535,8 +535,7 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, { if (!(p = decode_fh(p, &args->fh))) return 0; - args->buffer = - page_address(rqstp->rq_respages[rqstp->rq_resused++]); + args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } @@ -567,8 +566,7 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, if (args->count > PAGE_SIZE) args->count = PAGE_SIZE; - args->buffer = - page_address(rqstp->rq_respages[rqstp->rq_resused++]); + args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } @@ -577,7 +575,7 @@ int nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readdirargs *args) { - int len, pn; + int len; u32 max_blocksize = svc_max_payload(rqstp); if (!(p = decode_fh(p, &args->fh))) @@ -592,9 +590,9 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, args->count = len; while (len > 0) { - pn = rqstp->rq_resused++; + struct page *p = *(rqstp->rq_next_page++); if (!args->buffer) - args->buffer = page_address(rqstp->rq_respages[pn]); + args->buffer = page_address(p); len -= PAGE_SIZE; } @@ -880,7 +878,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen, common); __be32 *p = cd->buffer; caddr_t curr_page_addr = NULL; - int pn; /* current page number */ + struct page ** page; int slen; /* string (name) length */ int elen; /* estimated entry length in words */ int num_entry_words = 0; /* actual number of words */ @@ -917,8 +915,9 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen, } /* determine which page in rq_respages[] we are currently filling */ - for (pn=1; pn < cd->rqstp->rq_resused; pn++) { - curr_page_addr = page_address(cd->rqstp->rq_respages[pn]); + for (page = cd->rqstp->rq_respages + 1; + page < cd->rqstp->rq_next_page; page++) { + curr_page_addr = page_address(*page); if (((caddr_t)cd->buffer >= curr_page_addr) && ((caddr_t)cd->buffer < curr_page_addr + PAGE_SIZE)) @@ -933,14 +932,14 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen, if (plus) p = encode_entryplus_baggage(cd, p, name, namlen); num_entry_words = p - cd->buffer; - } else if (cd->rqstp->rq_respages[pn+1] != NULL) { + } else if (*(page+1) != NULL) { /* temporarily encode entry into next page, then move back to * current and next page in rq_respages[] */ __be32 *p1, *tmp; int len1, len2; /* grab next page for temporary storage of entry */ - p1 = tmp = page_address(cd->rqstp->rq_respages[pn+1]); + p1 = tmp = page_address(*(page+1)); p1 = encode_entry_baggage(cd, p1, name, namlen, ino); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index d7a3be5ab777..0dc11586682f 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2906,7 +2906,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_read *read) { u32 eof; - int v, pn; + int v; + struct page *page; unsigned long maxcount; long len; __be32 *p; @@ -2925,16 +2926,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, len = maxcount; v = 0; while (len > 0) { - pn = resp->rqstp->rq_resused; - if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */ + page = *(resp->rqstp->rq_next_page); + if (!page) { /* ran out of pages */ maxcount -= len; break; } - resp->rqstp->rq_vec[v].iov_base = - page_address(resp->rqstp->rq_respages[pn]); + resp->rqstp->rq_vec[v].iov_base = page_address(page); resp->rqstp->rq_vec[v].iov_len = len < PAGE_SIZE ? len : PAGE_SIZE; - resp->rqstp->rq_resused++; + resp->rqstp->rq_next_page++; v++; len -= PAGE_SIZE; } @@ -2980,10 +2980,10 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd return nfserr; if (resp->xbuf->page_len) return nfserr_resource; - if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) + if (!*resp->rqstp->rq_next_page) return nfserr_resource; - page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]); + page = page_address(*(resp->rqstp->rq_next_page++)); maxcount = PAGE_SIZE; RESERVE_SPACE(4); @@ -3031,7 +3031,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 return nfserr; if (resp->xbuf->page_len) return nfserr_resource; - if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused]) + if (!*resp->rqstp->rq_next_page) return nfserr_resource; RESERVE_SPACE(NFS4_VERIFIER_SIZE); @@ -3059,7 +3059,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 goto err_no_verf; } - page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]); + page = page_address(*(resp->rqstp->rq_next_page++)); readdir->common.err = 0; readdir->buflen = maxcount; readdir->buffer = page; @@ -3082,8 +3082,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 p = readdir->buffer; *p++ = 0; /* no more entries */ *p++ = htonl(readdir->common.err == nfserr_eof); - resp->xbuf->page_len = ((char*)p) - (char*)page_address( - resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]); + resp->xbuf->page_len = ((char*)p) - + (char*)page_address(*(resp->rqstp->rq_next_page-1)); /* Use rest of head for padding and remaining ops: */ resp->xbuf->tail[0].iov_base = tailbase; diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 65ec595e2226..979b42106979 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c @@ -246,7 +246,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readargs *args) { unsigned int len; - int v,pn; + int v; if (!(p = decode_fh(p, &args->fh))) return 0; @@ -262,8 +262,9 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, */ v=0; while (len > 0) { - pn = rqstp->rq_resused++; - rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]); + struct page *p = *(rqstp->rq_next_page++); + + rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE; len -= rqstp->rq_vec[v].iov_len; v++; @@ -355,7 +356,7 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli { if (!(p = decode_fh(p, &args->fh))) return 0; - args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]); + args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } @@ -396,7 +397,7 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, if (args->count > PAGE_SIZE) args->count = PAGE_SIZE; - args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]); + args->buffer = page_address(*(rqstp->rq_next_page++)); return xdr_argsize_check(rqstp, p); } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index b31e46eeb026..f0a6d88d7fff 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -886,7 +886,7 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { struct svc_rqst *rqstp = sd->u.data; - struct page **pp = rqstp->rq_respages + rqstp->rq_resused; + struct page **pp = rqstp->rq_next_page; struct page *page = buf->page; size_t size; @@ -894,17 +894,15 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, if (rqstp->rq_res.page_len == 0) { get_page(page); - put_page(*pp); - *pp = page; - rqstp->rq_resused++; + put_page(*rqstp->rq_next_page); + *(rqstp->rq_next_page++) = page; rqstp->rq_res.page_base = buf->offset; rqstp->rq_res.page_len = size; } else if (page != pp[-1]) { get_page(page); - if (*pp) - put_page(*pp); - *pp = page; - rqstp->rq_resused++; + if (*rqstp->rq_next_page) + put_page(*rqstp->rq_next_page); + *(rqstp->rq_next_page++) = page; rqstp->rq_res.page_len += size; } else rqstp->rq_res.page_len += size; @@ -936,8 +934,8 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, .u.data = rqstp, }; - WARN_ON_ONCE(rqstp->rq_resused != 1); - rqstp->rq_resused = 1; + WARN_ON_ONCE(rqstp->rq_next_page != rqstp->rq_respages + 1); + rqstp->rq_next_page = rqstp->rq_respages + 1; host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor); } else { oldfs = get_fs(); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index d83db800fe02..676ddf53b3ee 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -243,6 +243,7 @@ struct svc_rqst { struct page * rq_pages[RPCSVC_MAXPAGES]; struct page * *rq_respages; /* points into rq_pages */ int rq_resused; /* number of pages used for result */ + struct page * *rq_next_page; /* next reply page to use */ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ @@ -338,9 +339,8 @@ xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p) static inline void svc_free_res_pages(struct svc_rqst *rqstp) { - while (rqstp->rq_resused) { - struct page **pp = (rqstp->rq_respages + - --rqstp->rq_resused); + while (rqstp->rq_next_page != rqstp->rq_respages) { + struct page **pp = --rqstp->rq_next_page; if (*pp) { put_page(*pp); *pp = NULL; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 529400d59755..c6abf1a6ba95 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1297,7 +1297,7 @@ svc_process(struct svc_rqst *rqstp) * Setup response xdr_buf. * Initially it has just one page */ - rqstp->rq_resused = 1; + rqstp->rq_next_page = &rqstp->rq_respages[1]; resv->iov_base = page_address(rqstp->rq_respages[0]); resv->iov_len = 0; rqstp->rq_res.pages = rqstp->rq_respages + 1; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index d8e5adfeac30..dcd5669c5154 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -601,6 +601,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) rqstp->rq_respages = rqstp->rq_pages + 1 + DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); } + rqstp->rq_next_page = rqstp->rq_respages+1; if (serv->sv_stats) serv->sv_stats->netudpcnt++; @@ -1066,6 +1067,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) svsk->sk_datalen + want); rqstp->rq_respages = &rqstp->rq_pages[pnum]; + rqstp->rq_next_page = rqstp->rq_respages + 1; /* Now receive data */ len = svc_partial_recvfrom(rqstp, vec, pnum, want, base); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 41cb63b623df..0ce75524ed21 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -521,11 +521,11 @@ next_sge: rqstp->rq_pages[ch_no] = NULL; /* - * Detach res pages. svc_release must see a resused count of - * zero or it will attempt to put them. + * Detach res pages. If svc_release sees any it will attempt to + * put them. */ - while (rqstp->rq_resused) - rqstp->rq_respages[--rqstp->rq_resused] = NULL; + while (rqstp->rq_next_page != rqstp->rq_respages) + *(--rqstp->rq_next_page) = NULL; return err; } @@ -550,7 +550,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp, /* rq_respages starts after the last arg page */ rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; - rqstp->rq_resused = 0; + rqstp->rq_next_page = &rqstp->rq_arg.pages[page_no]; /* Rebuild rq_arg head and tail. */ rqstp->rq_arg.head[0] = head->arg.head[0]; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 42eb7ba0b903..c1d124dc772b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -548,6 +548,7 @@ static int send_reply(struct svcxprt_rdma *rdma, int sge_no; int sge_bytes; int page_no; + int pages; int ret; /* Post a recv buffer to handle another request. */ @@ -611,7 +612,8 @@ static int send_reply(struct svcxprt_rdma *rdma, * respages array. They are our pages until the I/O * completes. */ - for (page_no = 0; page_no < rqstp->rq_resused; page_no++) { + pages = rqstp->rq_next_page - rqstp->rq_respages; + for (page_no = 0; page_no < pages; page_no++) { ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; ctxt->count++; rqstp->rq_respages[page_no] = NULL; -- cgit v1.2.3 From 06ca287dbac9cc19d04ac2901b8c4882c03795ff Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 16 Oct 2012 23:56:14 +1030 Subject: virtio: move queue_index and num_free fields into core struct virtqueue. They're generic concepts, so hoist them. This also avoids accessor functions (though kept around for merge with DaveM's net tree). This goes even further than Jason Wang's 17bb6d4088 patch ("virtio-ring: move queue_index to vring_virtqueue") which moved the queue_index from the specific transport. Acked-by: Michael S. Tsirkin Signed-off-by: Rusty Russell --- drivers/virtio/virtio_mmio.c | 4 ++-- drivers/virtio/virtio_pci.c | 6 ++---- drivers/virtio/virtio_ring.c | 34 +++++++++++----------------------- include/linux/virtio.h | 14 +++++++++++++- 4 files changed, 28 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 6b1b7e184939..5a0e1d32ce13 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq) /* We write the queue's selector into the notification register to * signal the other end */ - writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); } /* Notify all virtqueues on an interrupt. */ @@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq) struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_vq_info *info = vq->priv; unsigned long flags, size; - unsigned int index = virtqueue_get_queue_index(vq); + unsigned int index = vq->index; spin_lock_irqsave(&vm_dev->lock, flags); list_del(&info->node); diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index b59237c1d540..e3ecc94591ad 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq) /* we write the queue's selector into the notification register to * signal the other end */ - iowrite16(virtqueue_get_queue_index(vq), - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } /* Handle a configuration change: Tell driver if it wants to know. */ @@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq) list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); - iowrite16(virtqueue_get_queue_index(vq), - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 286c30cb393d..33a4ce009bcc 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -93,8 +93,6 @@ struct vring_virtqueue /* Host publishes avail event idx */ bool event; - /* Number of free buffers */ - unsigned int num_free; /* Head of free buffer list. */ unsigned int free_head; /* Number we've added since last sync. */ @@ -106,9 +104,6 @@ struct vring_virtqueue /* How to notify other side. FIXME: commonalize hcalls! */ void (*notify)(struct virtqueue *vq); - /* Index of the queue */ - int queue_index; - #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; @@ -167,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq, desc[i-1].next = 0; /* We're about to use a buffer */ - vq->num_free--; + vq->vq.num_free--; /* Use a single buffer which doesn't continue */ head = vq->free_head; @@ -181,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq, return head; } -int virtqueue_get_queue_index(struct virtqueue *_vq) -{ - struct vring_virtqueue *vq = to_vvq(_vq); - return vq->queue_index; -} -EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); - /** * virtqueue_add_buf - expose buffer to other end * @vq: the struct virtqueue we're talking about. @@ -235,7 +223,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ - if (vq->indirect && (out + in) > 1 && vq->num_free) { + if (vq->indirect && (out + in) > 1 && vq->vq.num_free) { head = vring_add_indirect(vq, sg, out, in, gfp); if (likely(head >= 0)) goto add_head; @@ -244,9 +232,9 @@ int virtqueue_add_buf(struct virtqueue *_vq, BUG_ON(out + in > vq->vring.num); BUG_ON(out + in == 0); - if (vq->num_free < out + in) { + if (vq->vq.num_free < out + in) { pr_debug("Can't add buf len %i - avail = %i\n", - out + in, vq->num_free); + out + in, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ @@ -257,7 +245,7 @@ int virtqueue_add_buf(struct virtqueue *_vq, } /* We're about to use some buffers from the free list. */ - vq->num_free -= out + in; + vq->vq.num_free -= out + in; head = vq->free_head; for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { @@ -303,7 +291,7 @@ add_head: pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); - return vq->num_free; + return vq->vq.num_free; } EXPORT_SYMBOL_GPL(virtqueue_add_buf); @@ -400,13 +388,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; - vq->num_free++; + vq->vq.num_free++; } vq->vring.desc[i].next = vq->free_head; vq->free_head = head; /* Plus final descriptor */ - vq->num_free++; + vq->vq.num_free++; } static inline bool more_used(const struct vring_virtqueue *vq) @@ -606,7 +594,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq) return buf; } /* That should have freed everything. */ - BUG_ON(vq->num_free != vq->vring.num); + BUG_ON(vq->vq.num_free != vq->vring.num); END_USE(vq); return NULL; @@ -660,12 +648,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.name = name; + vq->vq.num_free = num; + vq->vq.index = index; vq->notify = notify; vq->weak_barriers = weak_barriers; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; - vq->queue_index = index; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; @@ -680,7 +669,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; /* Put everything in free lists. */ - vq->num_free = num; vq->free_head = 0; for (i = 0; i < num-1; i++) { vq->vring.desc[i].next = i+1; diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 533b1157f22e..4b8f17d90458 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -16,12 +16,20 @@ * @name: the name of this virtqueue (mainly for debugging) * @vdev: the virtio device this queue was created for. * @priv: a pointer for the virtqueue implementation to use. + * @index: the zero-based ordinal number for this queue. + * @num_free: number of elements we expect to be able to fit. + * + * A note on @num_free: with indirect buffers, each buffer needs one + * element in the queue, otherwise a buffer will need one element per + * sg element. */ struct virtqueue { struct list_head list; void (*callback)(struct virtqueue *vq); const char *name; struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; void *priv; }; @@ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq); unsigned int virtqueue_get_vring_size(struct virtqueue *vq); -int virtqueue_get_queue_index(struct virtqueue *vq); +/* FIXME: Obsolete accessor, but required for virtio_net merge. */ +static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq) +{ + return vq->index; +} /** * virtio_device - representation of a device using virtio -- cgit v1.2.3 From 9bffdca8c64a72ac54c47a552734ab457bc720d4 Mon Sep 17 00:00:00 2001 From: Wanlong Gao Date: Tue, 11 Dec 2012 11:04:50 +1030 Subject: virtio: use dev_to_virtio wrapper in virtio Use dev_to_virtio wrapper in virtio to make code clearly. Cc: Rusty Russell Cc: "Michael S. Tsirkin" Signed-off-by: Wanlong Gao Signed-off-by: Rusty Russell --- drivers/virtio/virtio.c | 19 +++++++++---------- include/linux/virtio.h | 6 +++++- 2 files changed, 14 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 1e8659ca27ef..1346ae8e14f3 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -10,33 +10,32 @@ static DEFINE_IDA(virtio_index_ida); static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.device); } static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%04x\n", dev->id.vendor); } static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - + struct virtio_device *dev = dev_to_virtio(_d); return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } static ssize_t features_show(struct device *_d, struct device_attribute *attr, char *buf) { - struct virtio_device *dev = container_of(_d, struct virtio_device, dev); + struct virtio_device *dev = dev_to_virtio(_d); unsigned int i; ssize_t len = 0; @@ -71,7 +70,7 @@ static inline int virtio_id_match(const struct virtio_device *dev, static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) { unsigned int i; - struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; ids = container_of(_dr, struct virtio_driver, driver)->id_table; @@ -83,7 +82,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) { - struct virtio_device *dev = container_of(_dv,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_dv); return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", dev->id.device, dev->id.vendor); @@ -111,7 +110,7 @@ EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); static int virtio_dev_probe(struct device *_d) { int err, i; - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = container_of(dev->dev.driver, struct virtio_driver, driver); u32 device_features; @@ -152,7 +151,7 @@ static int virtio_dev_probe(struct device *_d) static int virtio_dev_remove(struct device *_d) { - struct virtio_device *dev = container_of(_d,struct virtio_device,dev); + struct virtio_device *dev = dev_to_virtio(_d); struct virtio_driver *drv = container_of(dev->dev.driver, struct virtio_driver, driver); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 4b8f17d90458..5f1f80c76468 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -85,7 +85,11 @@ struct virtio_device { void *priv; }; -#define dev_to_virtio(dev) container_of(dev, struct virtio_device, dev) +static inline struct virtio_device *dev_to_virtio(struct device *_dev) +{ + return container_of(_dev, struct virtio_device, dev); +} + int register_virtio_device(struct virtio_device *dev); void unregister_virtio_device(struct virtio_device *dev); -- cgit v1.2.3 From 9a2bdcc85d28506d4e5d4a9618fb133a3f40945d Mon Sep 17 00:00:00 2001 From: Wanlong Gao Date: Mon, 10 Dec 2012 16:38:33 +0800 Subject: virtio: add drv_to_virtio to make code clearly Add drv_to_virtio wrapper to get virtio_driver from device_driver. Cc: Rusty Russell Cc: "Michael S. Tsirkin" Signed-off-by: Wanlong Gao Signed-off-by: Rusty Russell --- drivers/virtio/virtio.c | 11 ++++------- include/linux/virtio.h | 5 +++++ 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 1346ae8e14f3..1c01ac3fad08 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -73,7 +73,7 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) struct virtio_device *dev = dev_to_virtio(_dv); const struct virtio_device_id *ids; - ids = container_of(_dr, struct virtio_driver, driver)->id_table; + ids = drv_to_virtio(_dr)->id_table; for (i = 0; ids[i].device; i++) if (virtio_id_match(dev, &ids[i])) return 1; @@ -97,8 +97,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit) { unsigned int i; - struct virtio_driver *drv = container_of(vdev->dev.driver, - struct virtio_driver, driver); + struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); for (i = 0; i < drv->feature_table_size; i++) if (drv->feature_table[i] == fbit) @@ -111,8 +110,7 @@ static int virtio_dev_probe(struct device *_d) { int err, i; struct virtio_device *dev = dev_to_virtio(_d); - struct virtio_driver *drv = container_of(dev->dev.driver, - struct virtio_driver, driver); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); u32 device_features; /* We have a driver! */ @@ -152,8 +150,7 @@ static int virtio_dev_probe(struct device *_d) static int virtio_dev_remove(struct device *_d) { struct virtio_device *dev = dev_to_virtio(_d); - struct virtio_driver *drv = container_of(dev->dev.driver, - struct virtio_driver, driver); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); drv->remove(dev); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 5f1f80c76468..cf8adb1f5b2c 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -119,6 +119,11 @@ struct virtio_driver { #endif }; +static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv) +{ + return container_of(drv, struct virtio_driver, driver); +} + int register_virtio_driver(struct virtio_driver *drv); void unregister_virtio_driver(struct virtio_driver *drv); #endif /* _LINUX_VIRTIO_H */ -- cgit v1.2.3 From 7a64bf05b2a6fe3703062d13d389e3eb904741c6 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:21:51 -0800 Subject: mm: add a __GFP_KMEMCG flag This flag is used to indicate to the callees that this allocation is a kernel allocation in process context, and should be accounted to current's memcg. Signed-off-by: Glauber Costa Acked-by: Johannes Weiner Acked-by: Rik van Riel Acked-by: Mel Gorman Acked-by: Kamezawa Hiroyuki Acked-by: Michal Hocko Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: Suleiman Souhlal Cc: Tejun Heo Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: JoonSoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 ++ include/trace/events/gfpflags.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f74856e17e48..643c9a6f7f34 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -30,6 +30,7 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u #define ___GFP_RECLAIMABLE 0x80000u +#define ___GFP_KMEMCG 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_NO_KSWAPD 0x400000u #define ___GFP_OTHER_NODE 0x800000u @@ -89,6 +90,7 @@ struct vm_area_struct; #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ +#define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */ #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ /* diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h index d6fd8e5b14b7..1eddbf1557f2 100644 --- a/include/trace/events/gfpflags.h +++ b/include/trace/events/gfpflags.h @@ -34,6 +34,7 @@ {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ + {(unsigned long)__GFP_KMEMCG, "GFP_KMEMCG"}, \ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \ {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \ -- cgit v1.2.3 From 7ae1e1d0f8ac2927ed7e3ca6d15e42d485903459 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:21:56 -0800 Subject: memcg: kmem controller infrastructure Introduce infrastructure for tracking kernel memory pages to a given memcg. This will happen whenever the caller includes the flag __GFP_KMEMCG flag, and the task belong to a memcg other than the root. In memcontrol.h those functions are wrapped in inline acessors. The idea is to later on, patch those with static branches, so we don't incur any overhead when no mem cgroups with limited kmem are being used. Users of this functionality shall interact with the memcg core code through the following functions: memcg_kmem_newpage_charge: will return true if the group can handle the allocation. At this point, struct page is not yet allocated. memcg_kmem_commit_charge: will either revert the charge, if struct page allocation failed, or embed memcg information into page_cgroup. memcg_kmem_uncharge_page: called at free time, will revert the charge. Signed-off-by: Glauber Costa Acked-by: Michal Hocko Acked-by: Kamezawa Hiroyuki Cc: Christoph Lameter Cc: Pekka Enberg Cc: Johannes Weiner Cc: Tejun Heo Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: JoonSoo Kim Cc: Mel Gorman Cc: Rik van Riel Cc: Suleiman Souhlal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 110 +++++++++++++++++++++++++++++ mm/memcontrol.c | 170 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 280 insertions(+) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e98a74c0c9c0..afa2ad40457e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -21,6 +21,7 @@ #define _LINUX_MEMCONTROL_H #include #include +#include struct mem_cgroup; struct page_cgroup; @@ -414,5 +415,114 @@ static inline void sock_release_memcg(struct sock *sk) { } #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ + +#ifdef CONFIG_MEMCG_KMEM +static inline bool memcg_kmem_enabled(void) +{ + return true; +} + +/* + * In general, we'll do everything in our power to not incur in any overhead + * for non-memcg users for the kmem functions. Not even a function call, if we + * can avoid it. + * + * Therefore, we'll inline all those functions so that in the best case, we'll + * see that kmemcg is off for everybody and proceed quickly. If it is on, + * we'll still do most of the flag checking inline. We check a lot of + * conditions, but because they are pretty simple, they are expected to be + * fast. + */ +bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, + int order); +void __memcg_kmem_commit_charge(struct page *page, + struct mem_cgroup *memcg, int order); +void __memcg_kmem_uncharge_pages(struct page *page, int order); + +/** + * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. + * @gfp: the gfp allocation flags. + * @memcg: a pointer to the memcg this was charged against. + * @order: allocation order. + * + * returns true if the memcg where the current task belongs can hold this + * allocation. + * + * We return true automatically if this allocation is not to be accounted to + * any memcg. + */ +static inline bool +memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +{ + if (!memcg_kmem_enabled()) + return true; + + /* + * __GFP_NOFAIL allocations will move on even if charging is not + * possible. Therefore we don't even try, and have this allocation + * unaccounted. We could in theory charge it with + * res_counter_charge_nofail, but we hope those allocations are rare, + * and won't be worth the trouble. + */ + if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL)) + return true; + if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) + return true; + + /* If the test is dying, just let it go. */ + if (unlikely(fatal_signal_pending(current))) + return true; + + return __memcg_kmem_newpage_charge(gfp, memcg, order); +} + +/** + * memcg_kmem_uncharge_pages: uncharge pages from memcg + * @page: pointer to struct page being freed + * @order: allocation order. + * + * there is no need to specify memcg here, since it is embedded in page_cgroup + */ +static inline void +memcg_kmem_uncharge_pages(struct page *page, int order) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge_pages(page, order); +} + +/** + * memcg_kmem_commit_charge: embeds correct memcg in a page + * @page: pointer to struct page recently allocated + * @memcg: the memcg structure we charged against + * @order: allocation order. + * + * Needs to be called after memcg_kmem_newpage_charge, regardless of success or + * failure of the allocation. if @page is NULL, this function will revert the + * charges. Otherwise, it will commit the memcg given by @memcg to the + * corresponding page_cgroup. + */ +static inline void +memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +{ + if (memcg_kmem_enabled() && memcg) + __memcg_kmem_commit_charge(page, memcg, order); +} + +#else +static inline bool +memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +{ + return true; +} + +static inline void memcg_kmem_uncharge_pages(struct page *page, int order) +{ +} + +static inline void +memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +{ +} +#endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index bba1cb4bbb82..b9afa060b8d6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -10,6 +10,10 @@ * Copyright (C) 2009 Nokia Corporation * Author: Kirill A. Shutemov * + * Kernel Memory Controller + * Copyright (C) 2012 Parallels Inc. and Google Inc. + * Authors: Glauber Costa and Suleiman Souhlal + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -2661,6 +2665,172 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, memcg_check_events(memcg, page); } +#ifdef CONFIG_MEMCG_KMEM +static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) +{ + return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) && + (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK); +} + +static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) +{ + struct res_counter *fail_res; + struct mem_cgroup *_memcg; + int ret = 0; + bool may_oom; + + ret = res_counter_charge(&memcg->kmem, size, &fail_res); + if (ret) + return ret; + + /* + * Conditions under which we can wait for the oom_killer. Those are + * the same conditions tested by the core page allocator + */ + may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY); + + _memcg = memcg; + ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, + &_memcg, may_oom); + + if (ret == -EINTR) { + /* + * __mem_cgroup_try_charge() chosed to bypass to root due to + * OOM kill or fatal signal. Since our only options are to + * either fail the allocation or charge it to this cgroup, do + * it as a temporary condition. But we can't fail. From a + * kmem/slab perspective, the cache has already been selected, + * by mem_cgroup_kmem_get_cache(), so it is too late to change + * our minds. + * + * This condition will only trigger if the task entered + * memcg_charge_kmem in a sane state, but was OOM-killed during + * __mem_cgroup_try_charge() above. Tasks that were already + * dying when the allocation triggers should have been already + * directed to the root cgroup in memcontrol.h + */ + res_counter_charge_nofail(&memcg->res, size, &fail_res); + if (do_swap_account) + res_counter_charge_nofail(&memcg->memsw, size, + &fail_res); + ret = 0; + } else if (ret) + res_counter_uncharge(&memcg->kmem, size); + + return ret; +} + +static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) +{ + res_counter_uncharge(&memcg->kmem, size); + res_counter_uncharge(&memcg->res, size); + if (do_swap_account) + res_counter_uncharge(&memcg->memsw, size); +} + +/* + * We need to verify if the allocation against current->mm->owner's memcg is + * possible for the given order. But the page is not allocated yet, so we'll + * need a further commit step to do the final arrangements. + * + * It is possible for the task to switch cgroups in this mean time, so at + * commit time, we can't rely on task conversion any longer. We'll then use + * the handle argument to return to the caller which cgroup we should commit + * against. We could also return the memcg directly and avoid the pointer + * passing, but a boolean return value gives better semantics considering + * the compiled-out case as well. + * + * Returning true means the allocation is possible. + */ +bool +__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) +{ + struct mem_cgroup *memcg; + int ret; + + *_memcg = NULL; + memcg = try_get_mem_cgroup_from_mm(current->mm); + + /* + * very rare case described in mem_cgroup_from_task. Unfortunately there + * isn't much we can do without complicating this too much, and it would + * be gfp-dependent anyway. Just let it go + */ + if (unlikely(!memcg)) + return true; + + if (!memcg_can_account_kmem(memcg)) { + css_put(&memcg->css); + return true; + } + + mem_cgroup_get(memcg); + + ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order); + if (!ret) + *_memcg = memcg; + else + mem_cgroup_put(memcg); + + css_put(&memcg->css); + return (ret == 0); +} + +void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, + int order) +{ + struct page_cgroup *pc; + + VM_BUG_ON(mem_cgroup_is_root(memcg)); + + /* The page allocation failed. Revert */ + if (!page) { + memcg_uncharge_kmem(memcg, PAGE_SIZE << order); + mem_cgroup_put(memcg); + return; + } + + pc = lookup_page_cgroup(page); + lock_page_cgroup(pc); + pc->mem_cgroup = memcg; + SetPageCgroupUsed(pc); + unlock_page_cgroup(pc); +} + +void __memcg_kmem_uncharge_pages(struct page *page, int order) +{ + struct mem_cgroup *memcg = NULL; + struct page_cgroup *pc; + + + pc = lookup_page_cgroup(page); + /* + * Fast unlocked return. Theoretically might have changed, have to + * check again after locking. + */ + if (!PageCgroupUsed(pc)) + return; + + lock_page_cgroup(pc); + if (PageCgroupUsed(pc)) { + memcg = pc->mem_cgroup; + ClearPageCgroupUsed(pc); + } + unlock_page_cgroup(pc); + + /* + * We trust that only if there is a memcg associated with the page, it + * is a valid allocation + */ + if (!memcg) + return; + + VM_BUG_ON(mem_cgroup_is_root(memcg)); + memcg_uncharge_kmem(memcg, PAGE_SIZE << order); + mem_cgroup_put(memcg); +} +#endif /* CONFIG_MEMCG_KMEM */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION) -- cgit v1.2.3 From 6a1a0d3b625a4091e7a0eb249aefc6a644385149 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:00 -0800 Subject: mm: allocate kernel pages to the right memcg When a process tries to allocate a page with the __GFP_KMEMCG flag, the page allocator will call the corresponding memcg functions to validate the allocation. Tasks in the root memcg can always proceed. To avoid adding markers to the page - and a kmem flag that would necessarily follow, as much as doing page_cgroup lookups for no reason, whoever is marking its allocations with __GFP_KMEMCG flag is responsible for telling the page allocator that this is such an allocation at free_pages() time. This is done by the invocation of __free_accounted_pages() and free_accounted_pages(). Signed-off-by: Glauber Costa Acked-by: Michal Hocko Acked-by: Mel Gorman Acked-by: Kamezawa Hiroyuki Acked-by: David Rientjes Cc: Christoph Lameter Cc: Pekka Enberg Cc: Johannes Weiner Cc: Suleiman Souhlal Cc: Tejun Heo Cc: Frederic Weisbecker Cc: Greg Thelen Cc: JoonSoo Kim Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 3 +++ mm/page_alloc.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 643c9a6f7f34..0f615eb23d05 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -367,6 +367,9 @@ extern void free_pages(unsigned long addr, unsigned int order); extern void free_hot_cold_page(struct page *page, int cold); extern void free_hot_cold_page_list(struct list_head *list, int cold); +extern void __free_memcg_kmem_pages(struct page *page, unsigned int order); +extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order); + #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 62496edbd8dd..2ad2ad168efe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2612,6 +2612,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int migratetype = allocflags_to_migratetype(gfp_mask); unsigned int cpuset_mems_cookie; int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET; + struct mem_cgroup *memcg = NULL; gfp_mask &= gfp_allowed_mask; @@ -2630,6 +2631,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!zonelist->_zonerefs->zone)) return NULL; + /* + * Will only have any effect when __GFP_KMEMCG is set. This is + * verified in the (always inline) callee + */ + if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) + return NULL; + retry_cpuset: cpuset_mems_cookie = get_mems_allowed(); @@ -2665,6 +2673,8 @@ out: if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) goto retry_cpuset; + memcg_kmem_commit_charge(page, memcg, order); + return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -2717,6 +2727,31 @@ void free_pages(unsigned long addr, unsigned int order) EXPORT_SYMBOL(free_pages); +/* + * __free_memcg_kmem_pages and free_memcg_kmem_pages will free + * pages allocated with __GFP_KMEMCG. + * + * Those pages are accounted to a particular memcg, embedded in the + * corresponding page_cgroup. To avoid adding a hit in the allocator to search + * for that information only to find out that it is NULL for users who have no + * interest in that whatsoever, we provide these functions. + * + * The caller knows better which flags it relies on. + */ +void __free_memcg_kmem_pages(struct page *page, unsigned int order) +{ + memcg_kmem_uncharge_pages(page, order); + __free_pages(page, order); +} + +void free_memcg_kmem_pages(unsigned long addr, unsigned int order) +{ + if (addr != 0) { + VM_BUG_ON(!virt_addr_valid((void *)addr)); + __free_memcg_kmem_pages(virt_to_page((void *)addr), order); + } +} + static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) { if (addr) { -- cgit v1.2.3 From 50bdd430c20566b13d8bc59946184b08f5875de6 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:04 -0800 Subject: res_counter: return amount of charges after res_counter_uncharge() It is useful to know how many charges are still left after a call to res_counter_uncharge. While it is possible to issue a res_counter_read after uncharge, this can be racy. If we need, for instance, to take some action when the counters drop down to 0, only one of the callers should see it. This is the same semantics as the atomic variables in the kernel. Since the current return value is void, we don't need to worry about anything breaking due to this change: nobody relied on that, and only users appearing from now on will be checking this value. Signed-off-by: Glauber Costa Reviewed-by: Michal Hocko Acked-by: Kamezawa Hiroyuki Acked-by: David Rientjes Cc: Johannes Weiner Cc: Suleiman Souhlal Cc: Tejun Heo Cc: Christoph Lameter Cc: Frederic Weisbecker Cc: Greg Thelen Cc: JoonSoo Kim Cc: Mel Gorman Cc: Pekka Enberg Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cgroups/resource_counter.txt | 7 ++++--- include/linux/res_counter.h | 12 +++++++----- kernel/res_counter.c | 20 +++++++++++++------- 3 files changed, 24 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt index 0c4a344e78fa..c4d99ed0b418 100644 --- a/Documentation/cgroups/resource_counter.txt +++ b/Documentation/cgroups/resource_counter.txt @@ -83,16 +83,17 @@ to work with it. res_counter->lock internally (it must be called with res_counter->lock held). The force parameter indicates whether we can bypass the limit. - e. void res_counter_uncharge[_locked] + e. u64 res_counter_uncharge[_locked] (struct res_counter *rc, unsigned long val) When a resource is released (freed) it should be de-accounted from the resource counter it was accounted to. This is called - "uncharging". + "uncharging". The return value of this function indicate the amount + of charges still present in the counter. The _locked routines imply that the res_counter->lock is taken. - f. void res_counter_uncharge_until + f. u64 res_counter_uncharge_until (struct res_counter *rc, struct res_counter *top, unsinged long val) diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 6f54e40fa218..5ae8456d9670 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -125,14 +125,16 @@ int res_counter_charge_nofail(struct res_counter *counter, * * these calls check for usage underflow and show a warning on the console * _locked call expects the counter->lock to be taken + * + * returns the total charges still present in @counter. */ -void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); -void res_counter_uncharge(struct res_counter *counter, unsigned long val); +u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); +u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); -void res_counter_uncharge_until(struct res_counter *counter, - struct res_counter *top, - unsigned long val); +u64 res_counter_uncharge_until(struct res_counter *counter, + struct res_counter *top, + unsigned long val); /** * res_counter_margin - calculate chargeable space of a counter * @cnt: the counter diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 3920d593e63c..ff55247e7049 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -86,33 +86,39 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, return __res_counter_charge(counter, val, limit_fail_at, true); } -void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) +u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) { if (WARN_ON(counter->usage < val)) val = counter->usage; counter->usage -= val; + return counter->usage; } -void res_counter_uncharge_until(struct res_counter *counter, - struct res_counter *top, - unsigned long val) +u64 res_counter_uncharge_until(struct res_counter *counter, + struct res_counter *top, + unsigned long val) { unsigned long flags; struct res_counter *c; + u64 ret = 0; local_irq_save(flags); for (c = counter; c != top; c = c->parent) { + u64 r; spin_lock(&c->lock); - res_counter_uncharge_locked(c, val); + r = res_counter_uncharge_locked(c, val); + if (c == counter) + ret = r; spin_unlock(&c->lock); } local_irq_restore(flags); + return ret; } -void res_counter_uncharge(struct res_counter *counter, unsigned long val) +u64 res_counter_uncharge(struct res_counter *counter, unsigned long val) { - res_counter_uncharge_until(counter, NULL, val); + return res_counter_uncharge_until(counter, NULL, val); } static inline unsigned long long * -- cgit v1.2.3 From a8964b9b84f99c0b1b5d7c09520f89f0700e742e Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:09 -0800 Subject: memcg: use static branches when code not in use We can use static branches to patch the code in or out when not used. Because the _ACTIVE bit on kmem_accounted is only set after the increment is done, we guarantee that the root memcg will always be selected for kmem charges until all call sites are patched (see memcg_kmem_enabled). This guarantees that no mischarges are applied. Static branch decrement happens when the last reference count from the kmem accounting in memcg dies. This will only happen when the charges drop down to 0. When that happens, we need to disable the static branch only on those memcgs that enabled it. To achieve this, we would be forced to complicate the code by keeping track of which memcgs were the ones that actually enabled limits, and which ones got it from its parents. It is a lot simpler just to do static_key_slow_inc() on every child that is accounted. Signed-off-by: Glauber Costa Acked-by: Michal Hocko Acked-by: Kamezawa Hiroyuki Cc: Christoph Lameter Cc: Pekka Enberg Cc: Johannes Weiner Cc: Suleiman Souhlal Cc: Tejun Heo Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: JoonSoo Kim Cc: Mel Gorman Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 4 ++- mm/memcontrol.c | 79 +++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 78 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index afa2ad40457e..87d61e840ddd 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -22,6 +22,7 @@ #include #include #include +#include struct mem_cgroup; struct page_cgroup; @@ -417,9 +418,10 @@ static inline void sock_release_memcg(struct sock *sk) #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ #ifdef CONFIG_MEMCG_KMEM +extern struct static_key memcg_kmem_enabled_key; static inline bool memcg_kmem_enabled(void) { - return true; + return static_key_false(&memcg_kmem_enabled_key); } /* diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9a62ac3ea881..bc70254558fa 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -346,10 +346,13 @@ struct mem_cgroup { /* internal only representation about the status of kmem accounting. */ enum { KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ + KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */ KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ }; -#define KMEM_ACCOUNTED_MASK (1 << KMEM_ACCOUNTED_ACTIVE) +/* We account when limit is on, but only after call sites are patched */ +#define KMEM_ACCOUNTED_MASK \ + ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED)) #ifdef CONFIG_MEMCG_KMEM static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) @@ -362,6 +365,11 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg) return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); } +static void memcg_kmem_set_activated(struct mem_cgroup *memcg) +{ + set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); +} + static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) { if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) @@ -532,6 +540,26 @@ static void disarm_sock_keys(struct mem_cgroup *memcg) } #endif +#ifdef CONFIG_MEMCG_KMEM +struct static_key memcg_kmem_enabled_key; + +static void disarm_kmem_keys(struct mem_cgroup *memcg) +{ + if (memcg_kmem_is_active(memcg)) + static_key_slow_dec(&memcg_kmem_enabled_key); +} +#else +static void disarm_kmem_keys(struct mem_cgroup *memcg) +{ +} +#endif /* CONFIG_MEMCG_KMEM */ + +static void disarm_static_keys(struct mem_cgroup *memcg) +{ + disarm_sock_keys(memcg); + disarm_kmem_keys(memcg); +} + static void drain_all_stock_async(struct mem_cgroup *memcg); static struct mem_cgroup_per_zone * @@ -4204,6 +4232,8 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) { int ret = -EINVAL; #ifdef CONFIG_MEMCG_KMEM + bool must_inc_static_branch = false; + struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); /* * For simplicity, we won't allow this to be disabled. It also can't @@ -4234,7 +4264,15 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) ret = res_counter_set_limit(&memcg->kmem, val); VM_BUG_ON(ret); - memcg_kmem_set_active(memcg); + /* + * After this point, kmem_accounted (that we test atomically in + * the beginning of this conditional), is no longer 0. This + * guarantees only one process will set the following boolean + * to true. We don't need test_and_set because we're protected + * by the set_limit_mutex anyway. + */ + memcg_kmem_set_activated(memcg); + must_inc_static_branch = true; /* * kmem charges can outlive the cgroup. In the case of slab * pages, for instance, a page contain objects from various @@ -4247,6 +4285,27 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) out: mutex_unlock(&set_limit_mutex); cgroup_unlock(); + + /* + * We are by now familiar with the fact that we can't inc the static + * branch inside cgroup_lock. See disarm functions for details. A + * worker here is overkill, but also wrong: After the limit is set, we + * must start accounting right away. Since this operation can't fail, + * we can safely defer it to here - no rollback will be needed. + * + * The boolean used to control this is also safe, because + * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be + * able to set it to true; + */ + if (must_inc_static_branch) { + static_key_slow_inc(&memcg_kmem_enabled_key); + /* + * setting the active bit after the inc will guarantee no one + * starts accounting before all call sites are patched + */ + memcg_kmem_set_active(memcg); + } + #endif return ret; } @@ -4258,8 +4317,20 @@ static void memcg_propagate_kmem(struct mem_cgroup *memcg) return; memcg->kmem_account_flags = parent->kmem_account_flags; #ifdef CONFIG_MEMCG_KMEM - if (memcg_kmem_is_active(memcg)) + /* + * When that happen, we need to disable the static branch only on those + * memcgs that enabled it. To achieve this, we would be forced to + * complicate the code by keeping track of which memcgs were the ones + * that actually enabled limits, and which ones got it from its + * parents. + * + * It is a lot simpler just to do static_key_slow_inc() on every child + * that is accounted. + */ + if (memcg_kmem_is_active(memcg)) { mem_cgroup_get(memcg); + static_key_slow_inc(&memcg_kmem_enabled_key); + } #endif } @@ -5184,7 +5255,7 @@ static void free_work(struct work_struct *work) * to move this code around, and make sure it is outside * the cgroup_lock. */ - disarm_sock_keys(memcg); + disarm_static_keys(memcg); if (size < PAGE_SIZE) kfree(memcg); else -- cgit v1.2.3 From 2ad306b17c0ac5a1b1f250d5f772aeb87fdf1eba Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:18 -0800 Subject: fork: protect architectures where THREAD_SIZE >= PAGE_SIZE against fork bombs Because those architectures will draw their stacks directly from the page allocator, rather than the slab cache, we can directly pass __GFP_KMEMCG flag, and issue the corresponding free_pages. This code path is taken when the architecture doesn't define CONFIG_ARCH_THREAD_INFO_ALLOCATOR (only ia64 seems to), and has THREAD_SIZE >= PAGE_SIZE. Luckily, most - if not all - of the remaining architectures fall in this category. This will guarantee that every stack page is accounted to the memcg the process currently lives on, and will have the allocations to fail if they go over limit. For the time being, I am defining a new variant of THREADINFO_GFP, not to mess with the other path. Once the slab is also tracked by memcg, we can get rid of that flag. Tested to successfully protect against :(){ :|:& };: Signed-off-by: Glauber Costa Acked-by: Frederic Weisbecker Acked-by: Kamezawa Hiroyuki Reviewed-by: Michal Hocko Cc: Christoph Lameter Cc: David Rientjes Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: Mel Gorman Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/thread_info.h | 2 ++ kernel/fork.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index ccc1899bd62e..e7e04736802f 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -61,6 +61,8 @@ extern long do_no_restart_syscall(struct restart_block *parm); # define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) #endif +#define THREADINFO_GFP_ACCOUNTED (THREADINFO_GFP | __GFP_KMEMCG) + /* * flag set/clear/test wrappers * - pass TIF_xxxx constants to these functions diff --git a/kernel/fork.c b/kernel/fork.c index c36c4e301efe..85f6d536608d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -146,7 +146,7 @@ void __weak arch_release_thread_info(struct thread_info *ti) static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { - struct page *page = alloc_pages_node(node, THREADINFO_GFP, + struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED, THREAD_SIZE_ORDER); return page ? page_address(page) : NULL; @@ -154,7 +154,7 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, static inline void free_thread_info(struct thread_info *ti) { - free_pages((unsigned long)ti, THREAD_SIZE_ORDER); + free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); } # else static struct kmem_cache *thread_info_cache; -- cgit v1.2.3 From ba6c496ed834a37a26fc6fc87fc9aecb0fa0014d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:27 -0800 Subject: slab/slub: struct memcg_params For the kmem slab controller, we need to record some extra information in the kmem_cache structure. Signed-off-by: Glauber Costa Signed-off-by: Suleiman Souhlal Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 24 ++++++++++++++++++++++++ include/linux/slab_def.h | 3 +++ include/linux/slub_def.h | 3 +++ mm/slab.h | 13 +++++++++++++ 4 files changed, 43 insertions(+) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index 743a10415122..00efba149222 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -176,6 +176,30 @@ void kmem_cache_free(struct kmem_cache *, void *); #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif +/* + * This is the main placeholder for memcg-related information in kmem caches. + * struct kmem_cache will hold a pointer to it, so the memory cost while + * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it + * would otherwise be if that would be bundled in kmem_cache: we'll need an + * extra pointer chase. But the trade off clearly lays in favor of not + * penalizing non-users. + * + * Both the root cache and the child caches will have it. For the root cache, + * this will hold a dynamically allocated array large enough to hold + * information about the currently limited memcgs in the system. + * + * Child caches will hold extra metadata needed for its operation. Fields are: + * + * @memcg: pointer to the memcg this cache belongs to + */ +struct memcg_cache_params { + bool is_root_cache; + union { + struct kmem_cache *memcg_caches[0]; + struct mem_cgroup *memcg; + }; +}; + /* * Common kmalloc functions provided by all allocators */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 45c0356fdc8c..8bb6e0eaf3c6 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -81,6 +81,9 @@ struct kmem_cache { */ int obj_offset; #endif /* CONFIG_DEBUG_SLAB */ +#ifdef CONFIG_MEMCG_KMEM + struct memcg_cache_params *memcg_params; +#endif /* 6) per-cpu/per-node data, touched during every alloc/free */ /* diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index df448adb7283..961e72eab907 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -101,6 +101,9 @@ struct kmem_cache { #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ #endif +#ifdef CONFIG_MEMCG_KMEM + struct memcg_cache_params *memcg_params; +#endif #ifdef CONFIG_NUMA /* diff --git a/mm/slab.h b/mm/slab.h index 1cb9c9ee0e6f..49e7a8b1d27e 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -100,4 +100,17 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); + +#ifdef CONFIG_MEMCG_KMEM +static inline bool is_root_cache(struct kmem_cache *s) +{ + return !s->memcg_params || s->memcg_params->is_root_cache; +} +#else +static inline bool is_root_cache(struct kmem_cache *s) +{ + return true; +} + +#endif #endif -- cgit v1.2.3 From 2633d7a028239a738b793be5ca8fa6ac312f5793 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:34 -0800 Subject: slab/slub: consider a memcg parameter in kmem_create_cache Allow a memcg parameter to be passed during cache creation. When the slub allocator is being used, it will only merge caches that belong to the same memcg. We'll do this by scanning the global list, and then translating the cache to a memcg-specific cache Default function is created as a wrapper, passing NULL to the memcg version. We only merge caches that belong to the same memcg. A helper is provided, memcg_css_id: because slub needs a unique cache name for sysfs. Since this is visible, but not the canonical location for slab data, the cache name is not used, the css_id should suffice. Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 26 +++++++++++++++++++++++ include/linux/slab.h | 14 ++++++++++++- mm/memcontrol.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++ mm/slab.h | 23 +++++++++++++++++---- mm/slab_common.c | 42 ++++++++++++++++++++++++++++++-------- mm/slub.c | 19 +++++++++++++---- 6 files changed, 157 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 87d61e840ddd..0b69a0470007 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -28,6 +28,7 @@ struct mem_cgroup; struct page_cgroup; struct page; struct mm_struct; +struct kmem_cache; /* Stats that can be updated by kernel. */ enum mem_cgroup_page_stat_item { @@ -441,6 +442,11 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order); void __memcg_kmem_uncharge_pages(struct page *page, int order); +int memcg_cache_id(struct mem_cgroup *memcg); +int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s); +void memcg_release_cache(struct kmem_cache *cachep); +void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); + /** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. * @gfp: the gfp allocation flags. @@ -525,6 +531,26 @@ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) { } + +static inline int memcg_cache_id(struct mem_cgroup *memcg) +{ + return -1; +} + +static inline int memcg_register_cache(struct mem_cgroup *memcg, + struct kmem_cache *s) +{ + return 0; +} + +static inline void memcg_release_cache(struct kmem_cache *cachep) +{ +} + +static inline void memcg_cache_list_add(struct mem_cgroup *memcg, + struct kmem_cache *s) +{ +} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 00efba149222..c0fcf28c15b2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -116,6 +116,7 @@ struct kmem_cache { }; #endif +struct mem_cgroup; /* * struct kmem_cache related prototypes */ @@ -125,6 +126,9 @@ int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, void (*)(void *)); +struct kmem_cache * +kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, + unsigned long, void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); @@ -191,15 +195,23 @@ void kmem_cache_free(struct kmem_cache *, void *); * Child caches will hold extra metadata needed for its operation. Fields are: * * @memcg: pointer to the memcg this cache belongs to + * @list: list_head for the list of all caches in this memcg + * @root_cache: pointer to the global, root cache, this cache was derived from */ struct memcg_cache_params { bool is_root_cache; union { struct kmem_cache *memcg_caches[0]; - struct mem_cgroup *memcg; + struct { + struct mem_cgroup *memcg; + struct list_head list; + struct kmem_cache *root_cache; + }; }; }; +int memcg_update_all_caches(int num_memcgs); + /* * Common kmalloc functions provided by all allocators */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e16694d5e118..3eafe6cf6ca4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -341,6 +341,14 @@ struct mem_cgroup { #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct tcp_memcontrol tcp_mem; #endif +#if defined(CONFIG_MEMCG_KMEM) + /* analogous to slab_common's slab_caches list. per-memcg */ + struct list_head memcg_slab_caches; + /* Not a spinlock, we can take a lot of time walking the list */ + struct mutex slab_caches_mutex; + /* Index in the kmem_cache->memcg_params->memcg_caches array */ + int kmemcg_id; +#endif }; /* internal only representation about the status of kmem accounting. */ @@ -2785,6 +2793,47 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) mem_cgroup_put(memcg); } +void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep) +{ + if (!memcg) + return; + + mutex_lock(&memcg->slab_caches_mutex); + list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); + mutex_unlock(&memcg->slab_caches_mutex); +} + +/* + * helper for acessing a memcg's index. It will be used as an index in the + * child cache array in kmem_cache, and also to derive its name. This function + * will return -1 when this is not a kmem-limited memcg. + */ +int memcg_cache_id(struct mem_cgroup *memcg) +{ + return memcg ? memcg->kmemcg_id : -1; +} + +int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s) +{ + size_t size = sizeof(struct memcg_cache_params); + + if (!memcg_kmem_enabled()) + return 0; + + s->memcg_params = kzalloc(size, GFP_KERNEL); + if (!s->memcg_params) + return -ENOMEM; + + if (memcg) + s->memcg_params->memcg = memcg; + return 0; +} + +void memcg_release_cache(struct kmem_cache *s) +{ + kfree(s->memcg_params); +} + /* * We need to verify if the allocation against current->mm->owner's memcg is * possible for the given order. But the page is not allocated yet, so we'll @@ -5026,7 +5075,9 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, #ifdef CONFIG_MEMCG_KMEM static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { + memcg->kmemcg_id = -1; memcg_propagate_kmem(memcg); + return mem_cgroup_sockets_init(memcg, ss); }; diff --git a/mm/slab.h b/mm/slab.h index 49e7a8b1d27e..abe582d20c79 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -43,12 +43,15 @@ extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, extern void create_boot_cache(struct kmem_cache *, const char *name, size_t size, unsigned long flags); +struct mem_cgroup; #ifdef CONFIG_SLUB -struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)); +struct kmem_cache * +__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)); #else -static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +static inline struct kmem_cache * +__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)) { return NULL; } #endif @@ -106,11 +109,23 @@ static inline bool is_root_cache(struct kmem_cache *s) { return !s->memcg_params || s->memcg_params->is_root_cache; } + +static inline bool cache_match_memcg(struct kmem_cache *cachep, + struct mem_cgroup *memcg) +{ + return (is_root_cache(cachep) && !memcg) || + (cachep->memcg_params->memcg == memcg); +} #else static inline bool is_root_cache(struct kmem_cache *s) { return true; } +static inline bool cache_match_memcg(struct kmem_cache *cachep, + struct mem_cgroup *memcg) +{ + return true; +} #endif #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index a8e76d79ee65..3031badcc577 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "slab.h" @@ -27,7 +28,8 @@ DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; #ifdef CONFIG_DEBUG_VM -static int kmem_cache_sanity_check(const char *name, size_t size) +static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name, + size_t size) { struct kmem_cache *s = NULL; @@ -53,7 +55,13 @@ static int kmem_cache_sanity_check(const char *name, size_t size) continue; } - if (!strcmp(s->name, name)) { + /* + * For simplicity, we won't check this in the list of memcg + * caches. We have control over memcg naming, and if there + * aren't duplicates in the global list, there won't be any + * duplicates in the memcg lists as well. + */ + if (!memcg && !strcmp(s->name, name)) { pr_err("%s (%s): Cache name already exists.\n", __func__, name); dump_stack(); @@ -66,7 +74,8 @@ static int kmem_cache_sanity_check(const char *name, size_t size) return 0; } #else -static inline int kmem_cache_sanity_check(const char *name, size_t size) +static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg, + const char *name, size_t size) { return 0; } @@ -125,8 +134,9 @@ unsigned long calculate_alignment(unsigned long flags, * as davem. */ -struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, - unsigned long flags, void (*ctor)(void *)) +struct kmem_cache * +kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *s = NULL; int err = 0; @@ -134,7 +144,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align get_online_cpus(); mutex_lock(&slab_mutex); - if (!kmem_cache_sanity_check(name, size) == 0) + if (!kmem_cache_sanity_check(memcg, name, size) == 0) goto out_locked; /* @@ -145,7 +155,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align */ flags &= CACHE_CREATE_MASK; - s = __kmem_cache_alias(name, size, align, flags, ctor); + s = __kmem_cache_alias(memcg, name, size, align, flags, ctor); if (s) goto out_locked; @@ -154,6 +164,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align s->object_size = s->size = size; s->align = calculate_alignment(flags, align, size); s->ctor = ctor; + + if (memcg_register_cache(memcg, s)) { + kmem_cache_free(kmem_cache, s); + err = -ENOMEM; + goto out_locked; + } + s->name = kstrdup(name, GFP_KERNEL); if (!s->name) { kmem_cache_free(kmem_cache, s); @@ -163,10 +180,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align err = __kmem_cache_create(s, flags); if (!err) { - s->refcount = 1; list_add(&s->list, &slab_caches); - + memcg_cache_list_add(memcg, s); } else { kfree(s->name); kmem_cache_free(kmem_cache, s); @@ -194,6 +210,13 @@ out_locked: return s; } + +struct kmem_cache * +kmem_cache_create(const char *name, size_t size, size_t align, + unsigned long flags, void (*ctor)(void *)) +{ + return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor); +} EXPORT_SYMBOL(kmem_cache_create); void kmem_cache_destroy(struct kmem_cache *s) @@ -209,6 +232,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); + memcg_release_cache(s); kfree(s->name); kmem_cache_free(kmem_cache, s); } else { diff --git a/mm/slub.c b/mm/slub.c index 87f9f32bf0cd..985332b38852 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -3786,7 +3787,7 @@ static int slab_unmergeable(struct kmem_cache *s) return 0; } -static struct kmem_cache *find_mergeable(size_t size, +static struct kmem_cache *find_mergeable(struct mem_cgroup *memcg, size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(void *)) { @@ -3822,17 +3823,21 @@ static struct kmem_cache *find_mergeable(size_t size, if (s->size - size >= sizeof(void *)) continue; + if (!cache_match_memcg(s, memcg)) + continue; + return s; } return NULL; } -struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +struct kmem_cache * +__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, + size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *s; - s = find_mergeable(size, align, flags, name, ctor); + s = find_mergeable(memcg, size, align, flags, name, ctor); if (s) { s->refcount++; /* @@ -5156,6 +5161,12 @@ static char *create_unique_id(struct kmem_cache *s) if (p != name + 1) *p++ = '-'; p += sprintf(p, "%07d", s->size); + +#ifdef CONFIG_MEMCG_KMEM + if (!is_root_cache(s)) + p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg)); +#endif + BUG_ON(p > name + ID_STR_LENGTH - 1); return name; } -- cgit v1.2.3 From 55007d849759252ddd573aeb36143b947202d509 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:38 -0800 Subject: memcg: allocate memory for memcg caches whenever a new memcg appears Every cache that is considered a root cache (basically the "original" caches, tied to the root memcg/no-memcg) will have an array that should be large enough to store a cache pointer per each memcg in the system. Theoreticaly, this is as high as 1 << sizeof(css_id), which is currently in the 64k pointers range. Most of the time, we won't be using that much. What goes in this patch, is a simple scheme to dynamically allocate such an array, in order to minimize memory usage for memcg caches. Because we would also like to avoid allocations all the time, at least for now, the array will only grow. It will tend to be big enough to hold the maximum number of kmem-limited memcgs ever achieved. We'll allocate it to be a minimum of 64 kmem-limited memcgs. When we have more than that, we'll start doubling the size of this array every time the limit is reached. Because we are only considering kmem limited memcgs, a natural point for this to happen is when we write to the limit. At that point, we already have set_limit_mutex held, so that will become our natural synchronization mechanism. Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 + mm/memcontrol.c | 207 +++++++++++++++++++++++++++++++++++++++++---- mm/slab_common.c | 28 ++++++ 3 files changed, 221 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0b69a0470007..45085e14e023 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -447,6 +447,8 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s); void memcg_release_cache(struct kmem_cache *cachep); void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); +int memcg_update_cache_size(struct kmem_cache *s, int num_groups); +void memcg_update_array_size(int num_groups); /** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. * @gfp: the gfp allocation flags. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3eafe6cf6ca4..db38b60e5f87 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -378,6 +378,11 @@ static void memcg_kmem_set_activated(struct mem_cgroup *memcg) set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); } +static void memcg_kmem_clear_activated(struct mem_cgroup *memcg) +{ + clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags); +} + static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) { if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) @@ -549,12 +554,48 @@ static void disarm_sock_keys(struct mem_cgroup *memcg) #endif #ifdef CONFIG_MEMCG_KMEM +/* + * This will be the memcg's index in each cache's ->memcg_params->memcg_caches. + * There are two main reasons for not using the css_id for this: + * 1) this works better in sparse environments, where we have a lot of memcgs, + * but only a few kmem-limited. Or also, if we have, for instance, 200 + * memcgs, and none but the 200th is kmem-limited, we'd have to have a + * 200 entry array for that. + * + * 2) In order not to violate the cgroup API, we would like to do all memory + * allocation in ->create(). At that point, we haven't yet allocated the + * css_id. Having a separate index prevents us from messing with the cgroup + * core for this + * + * The current size of the caches array is stored in + * memcg_limited_groups_array_size. It will double each time we have to + * increase it. + */ +static DEFINE_IDA(kmem_limited_groups); +static int memcg_limited_groups_array_size; +/* + * MIN_SIZE is different than 1, because we would like to avoid going through + * the alloc/free process all the time. In a small machine, 4 kmem-limited + * cgroups is a reasonable guess. In the future, it could be a parameter or + * tunable, but that is strictly not necessary. + * + * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get + * this constant directly from cgroup, but it is understandable that this is + * better kept as an internal representation in cgroup.c. In any case, the + * css_id space is not getting any smaller, and we don't have to necessarily + * increase ours as well if it increases. + */ +#define MEMCG_CACHES_MIN_SIZE 4 +#define MEMCG_CACHES_MAX_SIZE 65535 + struct static_key memcg_kmem_enabled_key; static void disarm_kmem_keys(struct mem_cgroup *memcg) { - if (memcg_kmem_is_active(memcg)) + if (memcg_kmem_is_active(memcg)) { static_key_slow_dec(&memcg_kmem_enabled_key); + ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id); + } /* * This check can't live in kmem destruction function, * since the charges will outlive the cgroup @@ -2813,6 +2854,120 @@ int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } +/* + * This ends up being protected by the set_limit mutex, during normal + * operation, because that is its main call site. + * + * But when we create a new cache, we can call this as well if its parent + * is kmem-limited. That will have to hold set_limit_mutex as well. + */ +int memcg_update_cache_sizes(struct mem_cgroup *memcg) +{ + int num, ret; + + num = ida_simple_get(&kmem_limited_groups, + 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL); + if (num < 0) + return num; + /* + * After this point, kmem_accounted (that we test atomically in + * the beginning of this conditional), is no longer 0. This + * guarantees only one process will set the following boolean + * to true. We don't need test_and_set because we're protected + * by the set_limit_mutex anyway. + */ + memcg_kmem_set_activated(memcg); + + ret = memcg_update_all_caches(num+1); + if (ret) { + ida_simple_remove(&kmem_limited_groups, num); + memcg_kmem_clear_activated(memcg); + return ret; + } + + memcg->kmemcg_id = num; + INIT_LIST_HEAD(&memcg->memcg_slab_caches); + mutex_init(&memcg->slab_caches_mutex); + return 0; +} + +static size_t memcg_caches_array_size(int num_groups) +{ + ssize_t size; + if (num_groups <= 0) + return 0; + + size = 2 * num_groups; + if (size < MEMCG_CACHES_MIN_SIZE) + size = MEMCG_CACHES_MIN_SIZE; + else if (size > MEMCG_CACHES_MAX_SIZE) + size = MEMCG_CACHES_MAX_SIZE; + + return size; +} + +/* + * We should update the current array size iff all caches updates succeed. This + * can only be done from the slab side. The slab mutex needs to be held when + * calling this. + */ +void memcg_update_array_size(int num) +{ + if (num > memcg_limited_groups_array_size) + memcg_limited_groups_array_size = memcg_caches_array_size(num); +} + +int memcg_update_cache_size(struct kmem_cache *s, int num_groups) +{ + struct memcg_cache_params *cur_params = s->memcg_params; + + VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache); + + if (num_groups > memcg_limited_groups_array_size) { + int i; + ssize_t size = memcg_caches_array_size(num_groups); + + size *= sizeof(void *); + size += sizeof(struct memcg_cache_params); + + s->memcg_params = kzalloc(size, GFP_KERNEL); + if (!s->memcg_params) { + s->memcg_params = cur_params; + return -ENOMEM; + } + + s->memcg_params->is_root_cache = true; + + /* + * There is the chance it will be bigger than + * memcg_limited_groups_array_size, if we failed an allocation + * in a cache, in which case all caches updated before it, will + * have a bigger array. + * + * But if that is the case, the data after + * memcg_limited_groups_array_size is certainly unused + */ + for (i = 0; i < memcg_limited_groups_array_size; i++) { + if (!cur_params->memcg_caches[i]) + continue; + s->memcg_params->memcg_caches[i] = + cur_params->memcg_caches[i]; + } + + /* + * Ideally, we would wait until all caches succeed, and only + * then free the old one. But this is not worth the extra + * pointer per-cache we'd have to have for this. + * + * It is not a big deal if some caches are left with a size + * bigger than the others. And all updates will reset this + * anyway. + */ + kfree(cur_params); + } + return 0; +} + int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s) { size_t size = sizeof(struct memcg_cache_params); @@ -2820,6 +2975,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s) if (!memcg_kmem_enabled()) return 0; + if (!memcg) + size += memcg_limited_groups_array_size * sizeof(void *); + s->memcg_params = kzalloc(size, GFP_KERNEL); if (!s->memcg_params) return -ENOMEM; @@ -4326,14 +4484,11 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val) ret = res_counter_set_limit(&memcg->kmem, val); VM_BUG_ON(ret); - /* - * After this point, kmem_accounted (that we test atomically in - * the beginning of this conditional), is no longer 0. This - * guarantees only one process will set the following boolean - * to true. We don't need test_and_set because we're protected - * by the set_limit_mutex anyway. - */ - memcg_kmem_set_activated(memcg); + ret = memcg_update_cache_sizes(memcg); + if (ret) { + res_counter_set_limit(&memcg->kmem, RESOURCE_MAX); + goto out; + } must_inc_static_branch = true; /* * kmem charges can outlive the cgroup. In the case of slab @@ -4372,11 +4527,13 @@ out: return ret; } -static void memcg_propagate_kmem(struct mem_cgroup *memcg) +static int memcg_propagate_kmem(struct mem_cgroup *memcg) { + int ret = 0; struct mem_cgroup *parent = parent_mem_cgroup(memcg); if (!parent) - return; + goto out; + memcg->kmem_account_flags = parent->kmem_account_flags; #ifdef CONFIG_MEMCG_KMEM /* @@ -4389,11 +4546,24 @@ static void memcg_propagate_kmem(struct mem_cgroup *memcg) * It is a lot simpler just to do static_key_slow_inc() on every child * that is accounted. */ - if (memcg_kmem_is_active(memcg)) { - mem_cgroup_get(memcg); - static_key_slow_inc(&memcg_kmem_enabled_key); - } + if (!memcg_kmem_is_active(memcg)) + goto out; + + /* + * destroy(), called if we fail, will issue static_key_slow_inc() and + * mem_cgroup_put() if kmem is enabled. We have to either call them + * unconditionally, or clear the KMEM_ACTIVE flag. I personally find + * this more consistent, since it always leads to the same destroy path + */ + mem_cgroup_get(memcg); + static_key_slow_inc(&memcg_kmem_enabled_key); + + mutex_lock(&set_limit_mutex); + ret = memcg_update_cache_sizes(memcg); + mutex_unlock(&set_limit_mutex); #endif +out: + return ret; } /* @@ -5075,8 +5245,12 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, #ifdef CONFIG_MEMCG_KMEM static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { + int ret; + memcg->kmemcg_id = -1; - memcg_propagate_kmem(memcg); + ret = memcg_propagate_kmem(memcg); + if (ret) + return ret; return mem_cgroup_sockets_init(memcg, ss); }; @@ -5479,6 +5653,7 @@ mem_cgroup_css_alloc(struct cgroup *cont) res_counter_init(&memcg->res, &parent->res); res_counter_init(&memcg->memsw, &parent->memsw); res_counter_init(&memcg->kmem, &parent->kmem); + /* * We increment refcnt of the parent to ensure that we can * safely access it on res_counter_charge/uncharge. diff --git a/mm/slab_common.c b/mm/slab_common.c index 3031badcc577..1c424b6511bf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -81,6 +81,34 @@ static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg, } #endif +#ifdef CONFIG_MEMCG_KMEM +int memcg_update_all_caches(int num_memcgs) +{ + struct kmem_cache *s; + int ret = 0; + mutex_lock(&slab_mutex); + + list_for_each_entry(s, &slab_caches, list) { + if (!is_root_cache(s)) + continue; + + ret = memcg_update_cache_size(s, num_memcgs); + /* + * See comment in memcontrol.c, memcg_update_cache_size: + * Instead of freeing the memory, we'll just leave the caches + * up to this point in an updated state. + */ + if (ret) + goto out; + } + + memcg_update_array_size(num_memcgs); +out: + mutex_unlock(&slab_mutex); + return ret; +} +#endif + /* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. -- cgit v1.2.3 From d7f25f8a2f81252d1ac134470ba1d0a287cf8fcd Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:40 -0800 Subject: memcg: infrastructure to match an allocation to the right cache The page allocator is able to bind a page to a memcg when it is allocated. But for the caches, we'd like to have as many objects as possible in a page belonging to the same cache. This is done in this patch by calling memcg_kmem_get_cache in the beginning of every allocation function. This function is patched out by static branches when kernel memory controller is not being used. It assumes that the task allocating, which determines the memcg in the page allocator, belongs to the same cgroup throughout the whole process. Misaccounting can happen if the task calls memcg_kmem_get_cache() while belonging to a cgroup, and later on changes. This is considered acceptable, and should only happen upon task migration. Before the cache is created by the memcg core, there is also a possible imbalance: the task belongs to a memcg, but the cache being allocated from is the global cache, since the child cache is not yet guaranteed to be ready. This case is also fine, since in this case the GFP_KMEMCG will not be passed and the page allocator will not attempt any cgroup accounting. Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 41 +++++++++ init/Kconfig | 1 - mm/memcontrol.c | 217 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 258 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 45085e14e023..bd9b5d73bc2b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -449,6 +449,10 @@ void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); + +struct kmem_cache * +__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); + /** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. * @gfp: the gfp allocation flags. @@ -518,6 +522,37 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) __memcg_kmem_commit_charge(page, memcg, order); } +/** + * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation + * @cachep: the original global kmem cache + * @gfp: allocation flags. + * + * This function assumes that the task allocating, which determines the memcg + * in the page allocator, belongs to the same cgroup throughout the whole + * process. Misacounting can happen if the task calls memcg_kmem_get_cache() + * while belonging to a cgroup, and later on changes. This is considered + * acceptable, and should only happen upon task migration. + * + * Before the cache is created by the memcg core, there is also a possible + * imbalance: the task belongs to a memcg, but the cache being allocated from + * is the global cache, since the child cache is not yet guaranteed to be + * ready. This case is also fine, since in this case the GFP_KMEMCG will not be + * passed and the page allocator will not attempt any cgroup accounting. + */ +static __always_inline struct kmem_cache * +memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) +{ + if (!memcg_kmem_enabled()) + return cachep; + if (gfp & __GFP_NOFAIL) + return cachep; + if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) + return cachep; + if (unlikely(fatal_signal_pending(current))) + return cachep; + + return __memcg_kmem_get_cache(cachep, gfp); +} #else static inline bool memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) @@ -553,6 +588,12 @@ static inline void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *s) { } + +static inline struct kmem_cache * +memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) +{ + return cachep; +} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/init/Kconfig b/init/Kconfig index 19ccb33c99d9..7d30240e5bfe 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -883,7 +883,6 @@ config MEMCG_KMEM bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)" depends on MEMCG && EXPERIMENTAL depends on SLUB || SLAB - default n help The Kernel Memory extension for Memory Resource Controller can limit the amount of memory used by kernel objects in the system. Those are diff --git a/mm/memcontrol.c b/mm/memcontrol.c index db38b60e5f87..efd26620a60b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -588,7 +588,14 @@ static int memcg_limited_groups_array_size; #define MEMCG_CACHES_MIN_SIZE 4 #define MEMCG_CACHES_MAX_SIZE 65535 +/* + * A lot of the calls to the cache allocation functions are expected to be + * inlined by the compiler. Since the calls to memcg_kmem_get_cache are + * conditional to this static branch, we'll have to allow modules that does + * kmem_cache_alloc and the such to see this symbol as well + */ struct static_key memcg_kmem_enabled_key; +EXPORT_SYMBOL(memcg_kmem_enabled_key); static void disarm_kmem_keys(struct mem_cgroup *memcg) { @@ -2989,9 +2996,219 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s) void memcg_release_cache(struct kmem_cache *s) { + struct kmem_cache *root; + struct mem_cgroup *memcg; + int id; + + /* + * This happens, for instance, when a root cache goes away before we + * add any memcg. + */ + if (!s->memcg_params) + return; + + if (s->memcg_params->is_root_cache) + goto out; + + memcg = s->memcg_params->memcg; + id = memcg_cache_id(memcg); + + root = s->memcg_params->root_cache; + root->memcg_params->memcg_caches[id] = NULL; + mem_cgroup_put(memcg); + + mutex_lock(&memcg->slab_caches_mutex); + list_del(&s->memcg_params->list); + mutex_unlock(&memcg->slab_caches_mutex); + +out: kfree(s->memcg_params); } +static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) +{ + char *name; + struct dentry *dentry; + + rcu_read_lock(); + dentry = rcu_dereference(memcg->css.cgroup->dentry); + rcu_read_unlock(); + + BUG_ON(dentry == NULL); + + name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name, + memcg_cache_id(memcg), dentry->d_name.name); + + return name; +} + +static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, + struct kmem_cache *s) +{ + char *name; + struct kmem_cache *new; + + name = memcg_cache_name(memcg, s); + if (!name) + return NULL; + + new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, + (s->flags & ~SLAB_PANIC), s->ctor); + + kfree(name); + return new; +} + +/* + * This lock protects updaters, not readers. We want readers to be as fast as + * they can, and they will either see NULL or a valid cache value. Our model + * allow them to see NULL, in which case the root memcg will be selected. + * + * We need this lock because multiple allocations to the same cache from a non + * will span more than one worker. Only one of them can create the cache. + */ +static DEFINE_MUTEX(memcg_cache_mutex); +static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, + struct kmem_cache *cachep) +{ + struct kmem_cache *new_cachep; + int idx; + + BUG_ON(!memcg_can_account_kmem(memcg)); + + idx = memcg_cache_id(memcg); + + mutex_lock(&memcg_cache_mutex); + new_cachep = cachep->memcg_params->memcg_caches[idx]; + if (new_cachep) + goto out; + + new_cachep = kmem_cache_dup(memcg, cachep); + + if (new_cachep == NULL) { + new_cachep = cachep; + goto out; + } + + mem_cgroup_get(memcg); + new_cachep->memcg_params->root_cache = cachep; + + cachep->memcg_params->memcg_caches[idx] = new_cachep; + /* + * the readers won't lock, make sure everybody sees the updated value, + * so they won't put stuff in the queue again for no reason + */ + wmb(); +out: + mutex_unlock(&memcg_cache_mutex); + return new_cachep; +} + +struct create_work { + struct mem_cgroup *memcg; + struct kmem_cache *cachep; + struct work_struct work; +}; + +static void memcg_create_cache_work_func(struct work_struct *w) +{ + struct create_work *cw; + + cw = container_of(w, struct create_work, work); + memcg_create_kmem_cache(cw->memcg, cw->cachep); + /* Drop the reference gotten when we enqueued. */ + css_put(&cw->memcg->css); + kfree(cw); +} + +/* + * Enqueue the creation of a per-memcg kmem_cache. + * Called with rcu_read_lock. + */ +static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, + struct kmem_cache *cachep) +{ + struct create_work *cw; + + cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT); + if (cw == NULL) + return; + + /* The corresponding put will be done in the workqueue. */ + if (!css_tryget(&memcg->css)) { + kfree(cw); + return; + } + + cw->memcg = memcg; + cw->cachep = cachep; + + INIT_WORK(&cw->work, memcg_create_cache_work_func); + schedule_work(&cw->work); +} + +/* + * Return the kmem_cache we're supposed to use for a slab allocation. + * We try to use the current memcg's version of the cache. + * + * If the cache does not exist yet, if we are the first user of it, + * we either create it immediately, if possible, or create it asynchronously + * in a workqueue. + * In the latter case, we will let the current allocation go through with + * the original cache. + * + * Can't be called in interrupt context or from kernel threads. + * This function needs to be called with rcu_read_lock() held. + */ +struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, + gfp_t gfp) +{ + struct mem_cgroup *memcg; + int idx; + + VM_BUG_ON(!cachep->memcg_params); + VM_BUG_ON(!cachep->memcg_params->is_root_cache); + + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); + rcu_read_unlock(); + + if (!memcg_can_account_kmem(memcg)) + return cachep; + + idx = memcg_cache_id(memcg); + + /* + * barrier to mare sure we're always seeing the up to date value. The + * code updating memcg_caches will issue a write barrier to match this. + */ + read_barrier_depends(); + if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) { + /* + * If we are in a safe context (can wait, and not in interrupt + * context), we could be be predictable and return right away. + * This would guarantee that the allocation being performed + * already belongs in the new cache. + * + * However, there are some clashes that can arrive from locking. + * For instance, because we acquire the slab_mutex while doing + * kmem_cache_dup, this means no further allocation could happen + * with the slab_mutex held. + * + * Also, because cache creation issue get_online_cpus(), this + * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex, + * that ends up reversed during cpu hotplug. (cpuset allocates + * a bunch of GFP_KERNEL memory during cpuup). Due to all that, + * better to defer everything. + */ + memcg_create_cache_enqueue(memcg, cachep); + return cachep; + } + + return cachep->memcg_params->memcg_caches[idx]; +} +EXPORT_SYMBOL(__memcg_kmem_get_cache); + /* * We need to verify if the allocation against current->mm->owner's memcg is * possible for the given order. But the page is not allocated yet, so we'll -- cgit v1.2.3 From 0e9d92f2d02d8c8320f0502307c688d07bdac2b3 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:42 -0800 Subject: memcg: skip memcg kmem allocations in specified code regions Create a mechanism that skip memcg allocations during certain pieces of our core code. It basically works in the same way as preempt_disable()/preempt_enable(): By marking a region under which all allocations will be accounted to the root memcg. We need this to prevent races in early cache creation, when we allocate data using caches that are not necessarily created already. Signed-off-by: Glauber Costa yCc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 + mm/memcontrol.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 55 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 9914c662ed7b..f712465b05c5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1597,6 +1597,7 @@ struct task_struct { unsigned long nr_pages; /* uncharged usage */ unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ } memcg_batch; + unsigned int memcg_kmem_skip_account; #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT atomic_t ptrace_bp_refcnt; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index efd26620a60b..65302a083d2f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3025,6 +3025,37 @@ out: kfree(s->memcg_params); } +/* + * During the creation a new cache, we need to disable our accounting mechanism + * altogether. This is true even if we are not creating, but rather just + * enqueing new caches to be created. + * + * This is because that process will trigger allocations; some visible, like + * explicit kmallocs to auxiliary data structures, name strings and internal + * cache structures; some well concealed, like INIT_WORK() that can allocate + * objects during debug. + * + * If any allocation happens during memcg_kmem_get_cache, we will recurse back + * to it. This may not be a bounded recursion: since the first cache creation + * failed to complete (waiting on the allocation), we'll just try to create the + * cache again, failing at the same point. + * + * memcg_kmem_get_cache is prepared to abort after seeing a positive count of + * memcg_kmem_skip_account. So we enclose anything that might allocate memory + * inside the following two functions. + */ +static inline void memcg_stop_kmem_account(void) +{ + VM_BUG_ON(!current->mm); + current->memcg_kmem_skip_account++; +} + +static inline void memcg_resume_kmem_account(void) +{ + VM_BUG_ON(!current->mm); + current->memcg_kmem_skip_account--; +} + static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) { char *name; @@ -3084,7 +3115,6 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, goto out; new_cachep = kmem_cache_dup(memcg, cachep); - if (new_cachep == NULL) { new_cachep = cachep; goto out; @@ -3125,8 +3155,8 @@ static void memcg_create_cache_work_func(struct work_struct *w) * Enqueue the creation of a per-memcg kmem_cache. * Called with rcu_read_lock. */ -static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, - struct kmem_cache *cachep) +static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg, + struct kmem_cache *cachep) { struct create_work *cw; @@ -3147,6 +3177,24 @@ static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, schedule_work(&cw->work); } +static void memcg_create_cache_enqueue(struct mem_cgroup *memcg, + struct kmem_cache *cachep) +{ + /* + * We need to stop accounting when we kmalloc, because if the + * corresponding kmalloc cache is not yet created, the first allocation + * in __memcg_create_cache_enqueue will recurse. + * + * However, it is better to enclose the whole function. Depending on + * the debugging options enabled, INIT_WORK(), for instance, can + * trigger an allocation. This too, will make us recurse. Because at + * this point we can't allow ourselves back into memcg_kmem_get_cache, + * the safest choice is to do it like this, wrapping the whole function. + */ + memcg_stop_kmem_account(); + __memcg_create_cache_enqueue(memcg, cachep); + memcg_resume_kmem_account(); +} /* * Return the kmem_cache we're supposed to use for a slab allocation. * We try to use the current memcg's version of the cache. @@ -3169,6 +3217,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, VM_BUG_ON(!cachep->memcg_params); VM_BUG_ON(!cachep->memcg_params->is_root_cache); + if (!current->mm || current->memcg_kmem_skip_account) + return cachep; + rcu_read_lock(); memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner)); rcu_read_unlock(); -- cgit v1.2.3 From b9ce5ef49f00daf2254c6953c8d31f79aabccd34 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:46 -0800 Subject: sl[au]b: always get the cache from its page in kmem_cache_free() struct page already has this information. If we start chaining caches, this information will always be more trustworthy than whatever is passed into the function. Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +++++ mm/slab.c | 6 +++++- mm/slab.h | 39 +++++++++++++++++++++++++++++++++++++++ mm/slob.c | 2 +- mm/slub.c | 15 +++------------ 5 files changed, 53 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bd9b5d73bc2b..2298122e71ad 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -554,6 +554,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) return __memcg_kmem_get_cache(cachep, gfp); } #else +static inline bool memcg_kmem_enabled(void) +{ + return false; +} + static inline bool memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) { diff --git a/mm/slab.c b/mm/slab.c index c26ab9fbe1f5..bab6fec765a7 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -87,7 +87,6 @@ */ #include -#include "slab.h" #include #include #include @@ -128,6 +127,8 @@ #include "internal.h" +#include "slab.h" + /* * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). @@ -3883,6 +3884,9 @@ EXPORT_SYMBOL(__kmalloc); void kmem_cache_free(struct kmem_cache *cachep, void *objp) { unsigned long flags; + cachep = cache_from_obj(cachep, objp); + if (!cachep) + return; local_irq_save(flags); debug_check_no_locks_freed(objp, cachep->object_size); diff --git a/mm/slab.h b/mm/slab.h index abe582d20c79..c95e922b166d 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -116,6 +116,13 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, return (is_root_cache(cachep) && !memcg) || (cachep->memcg_params->memcg == memcg); } + +static inline bool slab_equal_or_root(struct kmem_cache *s, + struct kmem_cache *p) +{ + return (p == s) || + (s->memcg_params && (p == s->memcg_params->root_cache)); +} #else static inline bool is_root_cache(struct kmem_cache *s) { @@ -127,5 +134,37 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, { return true; } + +static inline bool slab_equal_or_root(struct kmem_cache *s, + struct kmem_cache *p) +{ + return true; +} #endif + +static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) +{ + struct kmem_cache *cachep; + struct page *page; + + /* + * When kmemcg is not being used, both assignments should return the + * same value. but we don't want to pay the assignment price in that + * case. If it is not compiled in, the compiler should be smart enough + * to not do even the assignment. In that case, slab_equal_or_root + * will also be a constant. + */ + if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) + return s; + + page = virt_to_head_page(x); + cachep = page->slab_cache; + if (slab_equal_or_root(cachep, s)) + return cachep; + + pr_err("%s: Wrong slab cache. %s but object is from %s\n", + __FUNCTION__, cachep->name, s->name); + WARN_ON_ONCE(1); + return s; +} #endif diff --git a/mm/slob.c b/mm/slob.c index 795bab7d391d..a99fdf7a0907 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -58,7 +58,6 @@ #include #include -#include "slab.h" #include #include /* struct reclaim_state */ @@ -73,6 +72,7 @@ #include +#include "slab.h" /* * slob_block has a field 'units', which indicates size of block if +ve, * or offset of next block if -ve (in SLOB_UNITs). diff --git a/mm/slub.c b/mm/slub.c index 985332b38852..6d5f2305d7a4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2611,19 +2611,10 @@ redo: void kmem_cache_free(struct kmem_cache *s, void *x) { - struct page *page; - - page = virt_to_head_page(x); - - if (kmem_cache_debug(s) && page->slab_cache != s) { - pr_err("kmem_cache_free: Wrong slab cache. %s but object" - " is from %s\n", page->slab_cache->name, s->name); - WARN_ON_ONCE(1); + s = cache_from_obj(s, x); + if (!s) return; - } - - slab_free(s, page, x, _RET_IP_); - + slab_free(s, virt_to_head_page(x), x, _RET_IP_); trace_kmem_cache_free(_RET_IP_, x); } EXPORT_SYMBOL(kmem_cache_free); -- cgit v1.2.3 From d79923fad95b0cdf7770e024677180c734cb7148 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:48 -0800 Subject: sl[au]b: allocate objects from memcg cache We are able to match a cache allocation to a particular memcg. If the task doesn't change groups during the allocation itself - a rare event, this will give us a good picture about who is the first group to touch a cache page. This patch uses the now available infrastructure by calling memcg_kmem_get_cache() before all the cache allocations. Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 5 ++++- mm/memcontrol.c | 3 +++ mm/slab.c | 6 +++++- mm/slub.c | 7 ++++--- 4 files changed, 16 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 961e72eab907..364ba6c9fe21 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -225,7 +225,10 @@ void *__kmalloc(size_t size, gfp_t flags); static __always_inline void * kmalloc_order(size_t size, gfp_t flags, unsigned int order) { - void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); + void *ret; + + flags |= (__GFP_COMP | __GFP_KMEMCG); + ret = (void *) __get_free_pages(flags, order); kmemleak_alloc(ret, size, 1, flags); return ret; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 65302a083d2f..cc13797d0fbc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3086,6 +3086,9 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, (s->flags & ~SLAB_PANIC), s->ctor); + if (new) + new->allocflags |= __GFP_KMEMCG; + kfree(name); return new; } diff --git a/mm/slab.c b/mm/slab.c index bab6fec765a7..e265865e8700 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1933,7 +1933,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) } if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; - free_pages((unsigned long)addr, cachep->gfporder); + free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); } static void kmem_rcu_free(struct rcu_head *head) @@ -3486,6 +3486,8 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, if (slab_should_failslab(cachep, flags)) return NULL; + cachep = memcg_kmem_get_cache(cachep, flags); + cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); @@ -3571,6 +3573,8 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) if (slab_should_failslab(cachep, flags)) return NULL; + cachep = memcg_kmem_get_cache(cachep, flags); + cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); objp = __do_cache_alloc(cachep, flags); diff --git a/mm/slub.c b/mm/slub.c index 6d5f2305d7a4..ef39e872b8eb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1405,7 +1405,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) reset_page_mapcount(page); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; - __free_pages(page, order); + __free_memcg_kmem_pages(page, order); } #define need_reserve_slab_rcu \ @@ -2323,6 +2323,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, if (slab_pre_alloc_hook(s, gfpflags)) return NULL; + s = memcg_kmem_get_cache(s, gfpflags); redo: /* @@ -3284,7 +3285,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) struct page *page; void *ptr = NULL; - flags |= __GFP_COMP | __GFP_NOTRACK; + flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG; page = alloc_pages_node(node, flags, get_order(size)); if (page) ptr = page_address(page); @@ -3390,7 +3391,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kmemleak_free(x); - __free_pages(page, compound_order(page)); + __free_memcg_kmem_pages(page, compound_order(page)); return; } slab_free(page->slab_cache, page, object, _RET_IP_); -- cgit v1.2.3 From 1f458cbf122288b23620ee822e19bcbb76c8d6ec Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:50 -0800 Subject: memcg: destroy memcg caches Implement destruction of memcg caches. Right now, only caches where our reference counter is the last remaining are deleted. If there are any other reference counters around, we just leave the caches lying around until they go away. When that happens, a destruction function is called from the cache code. Caches are only destroyed in process context, so we queue them up for later processing in the general case. Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 2 ++ include/linux/slab.h | 10 +++++++- mm/memcontrol.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++ mm/slab.c | 3 +++ mm/slab.h | 23 +++++++++++++++++ mm/slub.c | 7 +++++- 6 files changed, 106 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2298122e71ad..79fcf0cd7186 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -453,6 +453,8 @@ void memcg_update_array_size(int num_groups); struct kmem_cache * __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); +void mem_cgroup_destroy_cache(struct kmem_cache *cachep); + /** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. * @gfp: the gfp allocation flags. diff --git a/include/linux/slab.h b/include/linux/slab.h index c0fcf28c15b2..869efb8d2377 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -11,6 +11,8 @@ #include #include +#include + /* * Flags to pass to kmem_cache_create(). @@ -179,7 +181,6 @@ void kmem_cache_free(struct kmem_cache *, void *); #ifndef ARCH_SLAB_MINALIGN #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #endif - /* * This is the main placeholder for memcg-related information in kmem caches. * struct kmem_cache will hold a pointer to it, so the memory cost while @@ -197,6 +198,10 @@ void kmem_cache_free(struct kmem_cache *, void *); * @memcg: pointer to the memcg this cache belongs to * @list: list_head for the list of all caches in this memcg * @root_cache: pointer to the global, root cache, this cache was derived from + * @dead: set to true after the memcg dies; the cache may still be around. + * @nr_pages: number of pages that belongs to this cache. + * @destroy: worker to be called whenever we are ready, or believe we may be + * ready, to destroy this cache. */ struct memcg_cache_params { bool is_root_cache; @@ -206,6 +211,9 @@ struct memcg_cache_params { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; + bool dead; + atomic_t nr_pages; + struct work_struct destroy; }; }; }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cc13797d0fbc..270a36789859 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2779,6 +2779,19 @@ static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK); } +/* + * This is a bit cumbersome, but it is rarely used and avoids a backpointer + * in the memcg_cache_params struct. + */ +static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) +{ + struct kmem_cache *cachep; + + VM_BUG_ON(p->is_root_cache); + cachep = p->root_cache; + return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; +} + static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) { struct res_counter *fail_res; @@ -3056,6 +3069,31 @@ static inline void memcg_resume_kmem_account(void) current->memcg_kmem_skip_account--; } +static void kmem_cache_destroy_work_func(struct work_struct *w) +{ + struct kmem_cache *cachep; + struct memcg_cache_params *p; + + p = container_of(w, struct memcg_cache_params, destroy); + + cachep = memcg_params_to_cache(p); + + if (!atomic_read(&cachep->memcg_params->nr_pages)) + kmem_cache_destroy(cachep); +} + +void mem_cgroup_destroy_cache(struct kmem_cache *cachep) +{ + if (!cachep->memcg_params->dead) + return; + + /* + * We have to defer the actual destroying to a workqueue, because + * we might currently be in a context that cannot sleep. + */ + schedule_work(&cachep->memcg_params->destroy); +} + static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) { char *name; @@ -3125,6 +3163,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, mem_cgroup_get(memcg); new_cachep->memcg_params->root_cache = cachep; + atomic_set(&new_cachep->memcg_params->nr_pages , 0); cachep->memcg_params->memcg_caches[idx] = new_cachep; /* @@ -3143,6 +3182,25 @@ struct create_work { struct work_struct work; }; +static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) +{ + struct kmem_cache *cachep; + struct memcg_cache_params *params; + + if (!memcg_kmem_is_active(memcg)) + return; + + mutex_lock(&memcg->slab_caches_mutex); + list_for_each_entry(params, &memcg->memcg_slab_caches, list) { + cachep = memcg_params_to_cache(params); + cachep->memcg_params->dead = true; + INIT_WORK(&cachep->memcg_params->destroy, + kmem_cache_destroy_work_func); + schedule_work(&cachep->memcg_params->destroy); + } + mutex_unlock(&memcg->slab_caches_mutex); +} + static void memcg_create_cache_work_func(struct work_struct *w) { struct create_work *cw; @@ -3358,6 +3416,10 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) VM_BUG_ON(mem_cgroup_is_root(memcg)); memcg_uncharge_kmem(memcg, PAGE_SIZE << order); } +#else +static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) +{ +} #endif /* CONFIG_MEMCG_KMEM */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -5975,6 +6037,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont) struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); mem_cgroup_reparent_charges(memcg); + mem_cgroup_destroy_all_caches(memcg); } static void mem_cgroup_css_free(struct cgroup *cont) diff --git a/mm/slab.c b/mm/slab.c index e265865e8700..7467343f9fe7 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1895,6 +1895,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) if (page->pfmemalloc) SetPageSlabPfmemalloc(page + i); } + memcg_bind_pages(cachep, cachep->gfporder); if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); @@ -1931,6 +1932,8 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) __ClearPageSlab(page); page++; } + + memcg_release_pages(cachep, cachep->gfporder); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder); diff --git a/mm/slab.h b/mm/slab.h index c95e922b166d..43d8a38b534f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -117,6 +117,21 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, (cachep->memcg_params->memcg == memcg); } +static inline void memcg_bind_pages(struct kmem_cache *s, int order) +{ + if (!is_root_cache(s)) + atomic_add(1 << order, &s->memcg_params->nr_pages); +} + +static inline void memcg_release_pages(struct kmem_cache *s, int order) +{ + if (is_root_cache(s)) + return; + + if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) + mem_cgroup_destroy_cache(s); +} + static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { @@ -135,6 +150,14 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, return true; } +static inline void memcg_bind_pages(struct kmem_cache *s, int order) +{ +} + +static inline void memcg_release_pages(struct kmem_cache *s, int order) +{ +} + static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { diff --git a/mm/slub.c b/mm/slub.c index ef39e872b8eb..692177bebdf0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1344,6 +1344,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) void *start; void *last; void *p; + int order; BUG_ON(flags & GFP_SLAB_BUG_MASK); @@ -1352,7 +1353,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) if (!page) goto out; + order = compound_order(page); inc_slabs_node(s, page_to_nid(page), page->objects); + memcg_bind_pages(s, order); page->slab_cache = s; __SetPageSlab(page); if (page->pfmemalloc) @@ -1361,7 +1364,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) start = page_address(page); if (unlikely(s->flags & SLAB_POISON)) - memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); + memset(start, POISON_INUSE, PAGE_SIZE << order); last = start; for_each_object(p, s, start, page->objects) { @@ -1402,6 +1405,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __ClearPageSlabPfmemalloc(page); __ClearPageSlab(page); + + memcg_release_pages(s, order); reset_page_mapcount(page); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; -- cgit v1.2.3 From 7cf2798240a2a2230cb16a391beef98d8a7ad362 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:22:55 -0800 Subject: memcg/sl[au]b: track all the memcg children of a kmem_cache This enables us to remove all the children of a kmem_cache being destroyed, if for example the kernel module it's being used in gets unloaded. Otherwise, the children will still point to the destroyed parent. Signed-off-by: Suleiman Souhlal Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +++++ mm/memcontrol.c | 49 ++++++++++++++++++++++++++++++++++++++++++++-- mm/slab_common.c | 3 +++ 3 files changed, 55 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 79fcf0cd7186..e119f3ef793c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -454,6 +454,7 @@ struct kmem_cache * __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); void mem_cgroup_destroy_cache(struct kmem_cache *cachep); +void kmem_cache_destroy_memcg_children(struct kmem_cache *s); /** * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. @@ -601,6 +602,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { return cachep; } + +static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s) +{ +} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 270a36789859..4b68ec2c8df6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2772,6 +2772,8 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, memcg_check_events(memcg, page); } +static DEFINE_MUTEX(set_limit_mutex); + #ifdef CONFIG_MEMCG_KMEM static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) { @@ -3176,6 +3178,51 @@ out: return new_cachep; } +void kmem_cache_destroy_memcg_children(struct kmem_cache *s) +{ + struct kmem_cache *c; + int i; + + if (!s->memcg_params) + return; + if (!s->memcg_params->is_root_cache) + return; + + /* + * If the cache is being destroyed, we trust that there is no one else + * requesting objects from it. Even if there are, the sanity checks in + * kmem_cache_destroy should caught this ill-case. + * + * Still, we don't want anyone else freeing memcg_caches under our + * noses, which can happen if a new memcg comes to life. As usual, + * we'll take the set_limit_mutex to protect ourselves against this. + */ + mutex_lock(&set_limit_mutex); + for (i = 0; i < memcg_limited_groups_array_size; i++) { + c = s->memcg_params->memcg_caches[i]; + if (!c) + continue; + + /* + * We will now manually delete the caches, so to avoid races + * we need to cancel all pending destruction workers and + * proceed with destruction ourselves. + * + * kmem_cache_destroy() will call kmem_cache_shrink internally, + * and that could spawn the workers again: it is likely that + * the cache still have active pages until this very moment. + * This would lead us back to mem_cgroup_destroy_cache. + * + * But that will not execute at all if the "dead" flag is not + * set, so flip it down to guarantee we are in control. + */ + c->memcg_params->dead = false; + cancel_delayed_work_sync(&c->memcg_params->destroy); + kmem_cache_destroy(c); + } + mutex_unlock(&set_limit_mutex); +} + struct create_work { struct mem_cgroup *memcg; struct kmem_cache *cachep; @@ -4284,8 +4331,6 @@ void mem_cgroup_print_bad_page(struct page *page) } #endif -static DEFINE_MUTEX(set_limit_mutex); - static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val) { diff --git a/mm/slab_common.c b/mm/slab_common.c index 1c424b6511bf..080a43804bf1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -249,6 +249,9 @@ EXPORT_SYMBOL(kmem_cache_create); void kmem_cache_destroy(struct kmem_cache *s) { + /* Destroy all the children caches if we aren't a memcg cache */ + kmem_cache_destroy_memcg_children(s); + get_online_cpus(); mutex_lock(&slab_mutex); s->refcount--; -- cgit v1.2.3 From 749c54151a6e5b229e4ae067dbc651e54b161fbc Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:23:01 -0800 Subject: memcg: aggregate memcg cache values in slabinfo When we create caches in memcgs, we need to display their usage information somewhere. We'll adopt a scheme similar to /proc/meminfo, with aggregate totals shown in the global file, and per-group information stored in the group itself. For the time being, only reads are allowed in the per-group cache. Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 8 ++++++++ include/linux/slab.h | 4 ++++ mm/memcontrol.c | 30 +++++++++++++++++++++++++++++- mm/slab.h | 27 +++++++++++++++++++++++++++ mm/slab_common.c | 44 ++++++++++++++++++++++++++++++++++++++++---- 5 files changed, 108 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e119f3ef793c..8dc7c746b44f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -420,6 +420,11 @@ static inline void sock_release_memcg(struct sock *sk) #ifdef CONFIG_MEMCG_KMEM extern struct static_key memcg_kmem_enabled_key; + +extern int memcg_limited_groups_array_size; +#define for_each_memcg_cache_index(_idx) \ + for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) + static inline bool memcg_kmem_enabled(void) { return static_key_false(&memcg_kmem_enabled_key); @@ -557,6 +562,9 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) return __memcg_kmem_get_cache(cachep, gfp); } #else +#define for_each_memcg_cache_index(_idx) \ + for (; NULL; ) + static inline bool memcg_kmem_enabled(void) { return false; diff --git a/include/linux/slab.h b/include/linux/slab.h index 869efb8d2377..b9278663f22a 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -220,6 +220,10 @@ struct memcg_cache_params { int memcg_update_all_caches(int num_memcgs); +struct seq_file; +int cache_show(struct kmem_cache *s, struct seq_file *m); +void print_slabinfo_header(struct seq_file *m); + /* * Common kmalloc functions provided by all allocators */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7633e0d429e0..a32d83c2e353 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -572,7 +572,8 @@ static void disarm_sock_keys(struct mem_cgroup *memcg) * increase it. */ static DEFINE_IDA(kmem_limited_groups); -static int memcg_limited_groups_array_size; +int memcg_limited_groups_array_size; + /* * MIN_SIZE is different than 1, because we would like to avoid going through * the alloc/free process all the time. In a small machine, 4 kmem-limited @@ -2794,6 +2795,27 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)]; } +#ifdef CONFIG_SLABINFO +static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft, + struct seq_file *m) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); + struct memcg_cache_params *params; + + if (!memcg_can_account_kmem(memcg)) + return -EIO; + + print_slabinfo_header(m); + + mutex_lock(&memcg->slab_caches_mutex); + list_for_each_entry(params, &memcg->memcg_slab_caches, list) + cache_show(memcg_params_to_cache(params), m); + mutex_unlock(&memcg->slab_caches_mutex); + + return 0; +} +#endif + static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) { struct res_counter *fail_res; @@ -5822,6 +5844,12 @@ static struct cftype mem_cgroup_files[] = { .trigger = mem_cgroup_reset, .read = mem_cgroup_read, }, +#ifdef CONFIG_SLABINFO + { + .name = "kmem.slabinfo", + .read_seq_string = mem_cgroup_slabinfo_read, + }, +#endif #endif { }, /* terminate */ }; diff --git a/mm/slab.h b/mm/slab.h index 43d8a38b534f..ec5dae1c8e75 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -138,6 +138,23 @@ static inline bool slab_equal_or_root(struct kmem_cache *s, return (p == s) || (s->memcg_params && (p == s->memcg_params->root_cache)); } + +/* + * We use suffixes to the name in memcg because we can't have caches + * created in the system with the same name. But when we print them + * locally, better refer to them with the base name + */ +static inline const char *cache_name(struct kmem_cache *s) +{ + if (!is_root_cache(s)) + return s->memcg_params->root_cache->name; + return s->name; +} + +static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) +{ + return s->memcg_params->memcg_caches[idx]; +} #else static inline bool is_root_cache(struct kmem_cache *s) { @@ -163,6 +180,16 @@ static inline bool slab_equal_or_root(struct kmem_cache *s, { return true; } + +static inline const char *cache_name(struct kmem_cache *s) +{ + return s->name; +} + +static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) +{ + return NULL; +} #endif static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) diff --git a/mm/slab_common.c b/mm/slab_common.c index 080a43804bf1..081f1b8d9a7b 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -322,7 +322,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, #ifdef CONFIG_SLABINFO -static void print_slabinfo_header(struct seq_file *m) +void print_slabinfo_header(struct seq_file *m) { /* * Output format version, so at least we can change it @@ -366,16 +366,43 @@ static void s_stop(struct seq_file *m, void *p) mutex_unlock(&slab_mutex); } -static int s_show(struct seq_file *m, void *p) +static void +memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) +{ + struct kmem_cache *c; + struct slabinfo sinfo; + int i; + + if (!is_root_cache(s)) + return; + + for_each_memcg_cache_index(i) { + c = cache_from_memcg(s, i); + if (!c) + continue; + + memset(&sinfo, 0, sizeof(sinfo)); + get_slabinfo(c, &sinfo); + + info->active_slabs += sinfo.active_slabs; + info->num_slabs += sinfo.num_slabs; + info->shared_avail += sinfo.shared_avail; + info->active_objs += sinfo.active_objs; + info->num_objs += sinfo.num_objs; + } +} + +int cache_show(struct kmem_cache *s, struct seq_file *m) { - struct kmem_cache *s = list_entry(p, struct kmem_cache, list); struct slabinfo sinfo; memset(&sinfo, 0, sizeof(sinfo)); get_slabinfo(s, &sinfo); + memcg_accumulate_slabinfo(s, &sinfo); + seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", - s->name, sinfo.active_objs, sinfo.num_objs, s->size, + cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size, sinfo.objects_per_slab, (1 << sinfo.cache_order)); seq_printf(m, " : tunables %4u %4u %4u", @@ -387,6 +414,15 @@ static int s_show(struct seq_file *m, void *p) return 0; } +static int s_show(struct seq_file *m, void *p) +{ + struct kmem_cache *s = list_entry(p, struct kmem_cache, list); + + if (!is_root_cache(s)) + return 0; + return cache_show(s, m); +} + /* * slabinfo_op - iterator that generates /proc/slabinfo * -- cgit v1.2.3 From 943a451a87d229ca564a27274b58eaeae35fde5d Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:23:03 -0800 Subject: slab: propagate tunable values SLAB allows us to tune a particular cache behavior with tunables. When creating a new memcg cache copy, we'd like to preserve any tunables the parent cache already had. This could be done by an explicit call to do_tune_cpucache() after the cache is created. But this is not very convenient now that the caches are created from common code, since this function is SLAB-specific. Another method of doing that is taking advantage of the fact that do_tune_cpucache() is always called from enable_cpucache(), which is called at cache initialization. We can just preset the values, and then things work as expected. It can also happen that a root cache has its tunables updated during normal system operation. In this case, we will propagate the change to all caches that are already active. This change will require us to move the assignment of root_cache in memcg_params a bit earlier. We need this to be already set - which memcg_kmem_register_cache will do - when we reach __kmem_cache_create() Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 8 +++++--- include/linux/slab.h | 2 +- mm/memcontrol.c | 10 ++++++---- mm/slab.c | 44 +++++++++++++++++++++++++++++++++++++++++--- mm/slab.h | 12 ++++++++++++ mm/slab_common.c | 7 ++++--- 6 files changed, 69 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 8dc7c746b44f..ea02ff970836 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -448,7 +448,8 @@ void __memcg_kmem_commit_charge(struct page *page, void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s); +int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, + struct kmem_cache *root_cache); void memcg_release_cache(struct kmem_cache *cachep); void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); @@ -590,8 +591,9 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline int memcg_register_cache(struct mem_cgroup *memcg, - struct kmem_cache *s) +static inline int +memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, + struct kmem_cache *root_cache) { return 0; } diff --git a/include/linux/slab.h b/include/linux/slab.h index b9278663f22a..5d168d7e0a28 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -130,7 +130,7 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, void (*)(void *)); struct kmem_cache * kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, - unsigned long, void (*)(void *)); + unsigned long, void (*)(void *), struct kmem_cache *); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a32d83c2e353..f3009b4bae51 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3012,7 +3012,8 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return 0; } -int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s) +int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, + struct kmem_cache *root_cache) { size_t size = sizeof(struct memcg_cache_params); @@ -3026,8 +3027,10 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s) if (!s->memcg_params) return -ENOMEM; - if (memcg) + if (memcg) { s->memcg_params->memcg = memcg; + s->memcg_params->root_cache = root_cache; + } return 0; } @@ -3186,7 +3189,7 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, return NULL; new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, - (s->flags & ~SLAB_PANIC), s->ctor); + (s->flags & ~SLAB_PANIC), s->ctor, s); if (new) new->allocflags |= __GFP_KMEMCG; @@ -3226,7 +3229,6 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, } mem_cgroup_get(memcg); - new_cachep->memcg_params->root_cache = cachep; atomic_set(&new_cachep->memcg_params->nr_pages , 0); cachep->memcg_params->memcg_caches[idx] = new_cachep; diff --git a/mm/slab.c b/mm/slab.c index 7467343f9fe7..4dcbf96a77b4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4041,7 +4041,7 @@ static void do_ccupdate_local(void *info) } /* Always called with the slab_mutex held */ -static int do_tune_cpucache(struct kmem_cache *cachep, int limit, +static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) { struct ccupdate_struct *new; @@ -4084,12 +4084,48 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, return alloc_kmemlist(cachep, gfp); } +static int do_tune_cpucache(struct kmem_cache *cachep, int limit, + int batchcount, int shared, gfp_t gfp) +{ + int ret; + struct kmem_cache *c = NULL; + int i = 0; + + ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); + + if (slab_state < FULL) + return ret; + + if ((ret < 0) || !is_root_cache(cachep)) + return ret; + + for_each_memcg_cache_index(i) { + c = cache_from_memcg(cachep, i); + if (c) + /* return value determined by the parent cache only */ + __do_tune_cpucache(c, limit, batchcount, shared, gfp); + } + + return ret; +} + /* Called with slab_mutex held always */ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) { int err; - int limit, shared; + int limit = 0; + int shared = 0; + int batchcount = 0; + + if (!is_root_cache(cachep)) { + struct kmem_cache *root = memcg_root_cache(cachep); + limit = root->limit; + shared = root->shared; + batchcount = root->batchcount; + } + if (limit && shared && batchcount) + goto skip_setup; /* * The head array serves three purposes: * - create a LIFO ordering, i.e. return objects that are cache-warm @@ -4131,7 +4167,9 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) if (limit > 32) limit = 32; #endif - err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp); + batchcount = (limit + 1) / 2; +skip_setup: + err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); if (err) printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", cachep->name, -err); diff --git a/mm/slab.h b/mm/slab.h index ec5dae1c8e75..34a98d642196 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -155,6 +155,13 @@ static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) { return s->memcg_params->memcg_caches[idx]; } + +static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) +{ + if (is_root_cache(s)) + return s; + return s->memcg_params->root_cache; +} #else static inline bool is_root_cache(struct kmem_cache *s) { @@ -190,6 +197,11 @@ static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) { return NULL; } + +static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) +{ + return s; +} #endif static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) diff --git a/mm/slab_common.c b/mm/slab_common.c index 081f1b8d9a7b..3f3cd97d3fdf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -164,7 +164,8 @@ unsigned long calculate_alignment(unsigned long flags, struct kmem_cache * kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) + size_t align, unsigned long flags, void (*ctor)(void *), + struct kmem_cache *parent_cache) { struct kmem_cache *s = NULL; int err = 0; @@ -193,7 +194,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, s->align = calculate_alignment(flags, align, size); s->ctor = ctor; - if (memcg_register_cache(memcg, s)) { + if (memcg_register_cache(memcg, s, parent_cache)) { kmem_cache_free(kmem_cache, s); err = -ENOMEM; goto out_locked; @@ -243,7 +244,7 @@ struct kmem_cache * kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { - return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor); + return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL); } EXPORT_SYMBOL(kmem_cache_create); -- cgit v1.2.3 From 107dab5c92d5f9c3afe962036e47c207363255c7 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:23:05 -0800 Subject: slub: slub-specific propagation changes SLUB allows us to tune a particular cache behavior with sysfs-based tunables. When creating a new memcg cache copy, we'd like to preserve any tunables the parent cache already had. This can be done by tapping into the store attribute function provided by the allocator. We of course don't need to mess with read-only fields. Since the attributes can have multiple types and are stored internally by sysfs, the best strategy is to issue a ->show() in the root cache, and then ->store() in the memcg cache. The drawback of that, is that sysfs can allocate up to a page in buffering for show(), that we are likely not to need, but also can't guarantee. To avoid always allocating a page for that, we can update the caches at store time with the maximum attribute size ever stored to the root cache. We will then get a buffer big enough to hold it. The corolary to this, is that if no stores happened, nothing will be propagated. It can also happen that a root cache has its tunables updated during normal system operation. In this case, we will propagate the change to all caches that are already active. [akpm@linux-foundation.org: tweak code to avoid __maybe_unused] Signed-off-by: Glauber Costa Cc: Christoph Lameter Cc: David Rientjes Cc: Frederic Weisbecker Cc: Greg Thelen Cc: Johannes Weiner Cc: JoonSoo Kim Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Pekka Enberg Cc: Rik van Riel Cc: Suleiman Souhlal Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 1 + mm/slub.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 76 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 364ba6c9fe21..9db4825cd393 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -103,6 +103,7 @@ struct kmem_cache { #endif #ifdef CONFIG_MEMCG_KMEM struct memcg_cache_params *memcg_params; + int max_attr_size; /* for propagation, maximum size of a stored attr */ #endif #ifdef CONFIG_NUMA diff --git a/mm/slub.c b/mm/slub.c index 692177bebdf0..21c94d9695ec 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -201,13 +201,14 @@ enum track_item { TRACK_ALLOC, TRACK_FREE }; static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); - +static void memcg_propagate_slab_attrs(struct kmem_cache *s); #else static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } static inline void sysfs_slab_remove(struct kmem_cache *s) { } +static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } #endif static inline void stat(const struct kmem_cache *s, enum stat_item si) @@ -3865,6 +3866,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) if (slab_state <= UP) return 0; + memcg_propagate_slab_attrs(s); mutex_unlock(&slab_mutex); err = sysfs_slab_add(s); mutex_lock(&slab_mutex); @@ -5098,10 +5100,82 @@ static ssize_t slab_attr_store(struct kobject *kobj, return -EIO; err = attribute->store(s, buf, len); +#ifdef CONFIG_MEMCG_KMEM + if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { + int i; + + mutex_lock(&slab_mutex); + if (s->max_attr_size < len) + s->max_attr_size = len; + for_each_memcg_cache_index(i) { + struct kmem_cache *c = cache_from_memcg(s, i); + /* + * This function's return value is determined by the + * parent cache only + */ + if (c) + attribute->store(c, buf, len); + } + mutex_unlock(&slab_mutex); + } +#endif return err; } +static void memcg_propagate_slab_attrs(struct kmem_cache *s) +{ +#ifdef CONFIG_MEMCG_KMEM + int i; + char *buffer = NULL; + + if (!is_root_cache(s)) + return; + + /* + * This mean this cache had no attribute written. Therefore, no point + * in copying default values around + */ + if (!s->max_attr_size) + return; + + for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) { + char mbuf[64]; + char *buf; + struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); + + if (!attr || !attr->store || !attr->show) + continue; + + /* + * It is really bad that we have to allocate here, so we will + * do it only as a fallback. If we actually allocate, though, + * we can just use the allocated buffer until the end. + * + * Most of the slub attributes will tend to be very small in + * size, but sysfs allows buffers up to a page, so they can + * theoretically happen. + */ + if (buffer) + buf = buffer; + else if (s->max_attr_size < ARRAY_SIZE(mbuf)) + buf = mbuf; + else { + buffer = (char *) get_zeroed_page(GFP_KERNEL); + if (WARN_ON(!buffer)) + continue; + buf = buffer; + } + + attr->show(s->memcg_params->root_cache, buf); + attr->store(s, buf, strlen(buf)); + } + + if (buffer) + free_page((unsigned long)buffer); +#endif +} + static const struct sysfs_ops slab_sysfs_ops = { .show = slab_attr_show, .store = slab_attr_store, -- cgit v1.2.3 From ebe945c27628fca03723582eba138acc2e2f3d15 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Tue, 18 Dec 2012 14:23:10 -0800 Subject: memcg: add comments clarifying aspects of cache attribute propagation This patch clarifies two aspects of cache attribute propagation. First, the expected context for the for_each_memcg_cache macro in memcontrol.h. The usages already in the codebase are safe. In mm/slub.c, it is trivially safe because the lock is acquired right before the loop. In mm/slab.c, it is less so: the lock is acquired by an outer function a few steps back in the stack, so a VM_BUG_ON() is added to make sure it is indeed safe. A comment is also added to detail why we are returning the value of the parent cache and ignoring the children's when we propagate the attributes. Signed-off-by: Glauber Costa Cc: Michal Hocko Cc: Kamezawa Hiroyuki Cc: Johannes Weiner Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 6 ++++++ mm/slab.c | 1 + mm/slub.c | 21 +++++++++++++++++---- 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ea02ff970836..0108a56f814e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -422,6 +422,12 @@ static inline void sock_release_memcg(struct sock *sk) extern struct static_key memcg_kmem_enabled_key; extern int memcg_limited_groups_array_size; + +/* + * Helper macro to loop through all memcg-specific caches. Callers must still + * check if the cache is valid (it is either valid or NULL). + * the slab_mutex must be held when looping through those caches + */ #define for_each_memcg_cache_index(_idx) \ for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++) diff --git a/mm/slab.c b/mm/slab.c index 4dcbf96a77b4..e7667a3584bc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4099,6 +4099,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, if ((ret < 0) || !is_root_cache(cachep)) return ret; + VM_BUG_ON(!mutex_is_locked(&slab_mutex)); for_each_memcg_cache_index(i) { c = cache_from_memcg(cachep, i); if (c) diff --git a/mm/slub.c b/mm/slub.c index 21c94d9695ec..efe2cffc29b0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5108,12 +5108,25 @@ static ssize_t slab_attr_store(struct kobject *kobj, if (s->max_attr_size < len) s->max_attr_size = len; + /* + * This is a best effort propagation, so this function's return + * value will be determined by the parent cache only. This is + * basically because not all attributes will have a well + * defined semantics for rollbacks - most of the actions will + * have permanent effects. + * + * Returning the error value of any of the children that fail + * is not 100 % defined, in the sense that users seeing the + * error code won't be able to know anything about the state of + * the cache. + * + * Only returning the error code for the parent cache at least + * has well defined semantics. The cache being written to + * directly either failed or succeeded, in which case we loop + * through the descendants with best-effort propagation. + */ for_each_memcg_cache_index(i) { struct kmem_cache *c = cache_from_memcg(s, i); - /* - * This function's return value is determined by the - * parent cache only - */ if (c) attribute->store(c, buf, len); } -- cgit v1.2.3 From 7179e7bf4592ac5a7b30257a7df6259ee81e51da Mon Sep 17 00:00:00 2001 From: Jianguo Wu Date: Tue, 18 Dec 2012 14:23:19 -0800 Subject: mm/hugetlb: create hugetlb cgroup file in hugetlb_init Build kernel with CONFIG_HUGETLBFS=y,CONFIG_HUGETLB_PAGE=y and CONFIG_CGROUP_HUGETLB=y, then specify hugepagesz=xx boot option, system will fail to boot. This failure is caused by following code path: setup_hugepagesz hugetlb_add_hstate hugetlb_cgroup_file_init cgroup_add_cftypes kzalloc <--slab is *not available* yet For this path, slab is not available yet, so memory allocated will be failed, and cause WARN_ON() in hugetlb_cgroup_file_init(). So I move hugetlb_cgroup_file_init() into hugetlb_init(). [akpm@linux-foundation.org: tweak coding-style, remove pointless __init on inlined function] [akpm@linux-foundation.org: fix warning] Signed-off-by: Jianguo Wu Signed-off-by: Jiang Liu Reviewed-by: Aneesh Kumar K.V Acked-by: Michal Hocko Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/hugetlb_cgroup.h | 5 ++--- mm/hugetlb.c | 11 +---------- mm/hugetlb_cgroup.c | 19 +++++++++++++++++-- 3 files changed, 20 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index d73878c694b3..ce8217f7b5c2 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -62,7 +62,7 @@ extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page); extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg); -extern int hugetlb_cgroup_file_init(int idx) __init; +extern void hugetlb_cgroup_file_init(void) __init; extern void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage); @@ -111,9 +111,8 @@ hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, return; } -static inline int __init hugetlb_cgroup_file_init(int idx) +static inline void hugetlb_cgroup_file_init(void) { - return 0; } static inline void hugetlb_cgroup_migrate(struct page *oldhpage, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e5318c7793ae..4f3ea0b1e57c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1906,14 +1906,12 @@ static int __init hugetlb_init(void) default_hstate.max_huge_pages = default_hstate_max_huge_pages; hugetlb_init_hstates(); - gather_bootmem_prealloc(); - report_hugepages(); hugetlb_sysfs_init(); - hugetlb_register_all_nodes(); + hugetlb_cgroup_file_init(); return 0; } @@ -1943,13 +1941,6 @@ void __init hugetlb_add_hstate(unsigned order) h->next_nid_to_free = first_node(node_states[N_MEMORY]); snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); - /* - * Add cgroup control files only if the huge page consists - * of more than two normal pages. This is because we use - * page[2].lru.next for storing cgoup details. - */ - if (order >= HUGETLB_CGROUP_MIN_ORDER) - hugetlb_cgroup_file_init(hugetlb_max_hstate - 1); parsed_hstate = h; } diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index b5bde7a5c017..9cea7de22ffb 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -333,7 +333,7 @@ static char *mem_fmt(char *buf, int size, unsigned long hsize) return buf; } -int __init hugetlb_cgroup_file_init(int idx) +static void __init __hugetlb_cgroup_file_init(int idx) { char buf[32]; struct cftype *cft; @@ -375,7 +375,22 @@ int __init hugetlb_cgroup_file_init(int idx) WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files)); - return 0; + return; +} + +void __init hugetlb_cgroup_file_init(void) +{ + struct hstate *h; + + for_each_hstate(h) { + /* + * Add cgroup control files only if the huge page consists + * of more than two normal pages. This is because we use + * page[2].lru.next for storing cgroup details. + */ + if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) + __hugetlb_cgroup_file_init(hstate_index(h)); + } } /* -- cgit v1.2.3 From 59771079c18c44e39106f0f30054025acafadb41 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 19 Dec 2012 07:18:35 -0800 Subject: blk: avoid divide-by-zero with zero discard granularity Commit 8dd2cb7e880d ("block: discard granularity might not be power of 2") changed a couple of 'binary and' operations into modulus operations. Which turned the harmless case of a zero discard_granularity into a possible divide-by-zero. The code also had a much more subtle bug: it was doing the modulus of a value in bytes using 'sector_t'. That was always conceptually wrong, but didn't actually matter back when the code assumed a power-of-two granularity: we only looked at the low bits anyway. But with potentially arbitrary sector numbers, using a 'sector_t' to express bytes is very very wrong: depending on configuration it limits the starting offset of the device to just 32 bits, and any overflow would result in a wrong value if the modulus wasn't a power-of-two. So re-write the code to not only protect against the divide-by-zero, but to do the starting sector arithmetic in sectors, and using the proper types. [ For any mathematicians out there: it also looks monumentally stupid to do the 'modulo granularity' operation *twice*, never mind having a "+ granularity" in the second modulus op. But that's the easiest way to avoid negative values or overflow, and it is how the original code was done. ] Reported-by: Ingo Molnar Reported-by: Doug Anderson Cc: Neil Brown Cc: Shaohua Li Acked-by: Jens Axboe Signed-off-by: Linus Torvalds --- include/linux/blkdev.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index acb4f7bbbd32..f94bc83011ed 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1188,14 +1188,25 @@ static inline int queue_discard_alignment(struct request_queue *q) static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) { - sector_t alignment = sector << 9; - alignment = sector_div(alignment, lim->discard_granularity); + unsigned int alignment, granularity, offset; if (!lim->max_discard_sectors) return 0; - alignment = lim->discard_granularity + lim->discard_alignment - alignment; - return sector_div(alignment, lim->discard_granularity); + /* Why are these in bytes, not sectors? */ + alignment = lim->discard_alignment >> 9; + granularity = lim->discard_granularity >> 9; + if (!granularity) + return 0; + + /* Offset of the partition start in 'granularity' sectors */ + offset = sector_div(sector, granularity); + + /* And why do we do this modulus *again* in blkdev_issue_discard()? */ + offset = (granularity + alignment - offset) % granularity; + + /* Turn it back into bytes, gaah */ + return offset << 9; } static inline int bdev_discard_alignment(struct block_device *bdev) -- cgit v1.2.3 From ab28698d33af05abab0bcf8021eafe38f7434f24 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Wed, 19 Dec 2012 09:10:09 -0600 Subject: of: define struct device in of_platform.h if !OF_DEVICE and !OF_ADDRESS Fixes the following warning: include/linux/of_platform.h:106:13: warning: 'struct device' declared inside parameter list [enabled by default] include/linux/of_platform.h:106:13: warning: its scope is only this definition or declaration, which is probably not what you want [enabled by default] Signed-off-by: Jonas Gorski Signed-off-by: Rob Herring Signed-off-by: Grant Likely --- include/linux/of_platform.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index b47d2040c9f2..3863a4dbdf18 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -100,6 +100,7 @@ extern int of_platform_populate(struct device_node *root, #if !defined(CONFIG_OF_ADDRESS) struct of_dev_auxdata; +struct device; static inline int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, -- cgit v1.2.3 From 3c439b5586e9200f7e6287ee77c175c4d5b0eeed Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 6 Dec 2012 17:12:00 +0000 Subject: mlx4_core: Allow choosing flow steering mode Device managed flow steering will be enabled only under administrator directive provided through setting the existing module parameter log_num_mgm_entry_size to -1 (if the device actually supports flow steering). If flow steering isn't requested or not available, the driver will use the value of log_num_mgm_entry_size and B0 steering. Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/main.c | 52 ++++++++++++++++++++++++++----- drivers/net/ethernet/mellanox/mlx4/mcg.c | 7 +---- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 6 ++-- include/linux/mlx4/device.h | 1 + 4 files changed, 50 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 680d81026cbd..e1bafffbc3b1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -85,15 +85,15 @@ static int probe_vf; module_param(probe_vf, int, 0644); MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); -int mlx4_log_num_mgm_entry_size = 10; +int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; module_param_named(log_num_mgm_entry_size, mlx4_log_num_mgm_entry_size, int, 0444); MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" " of qp per mcg, for example:" - " 10 gives 248.range: 9<=" + " 10 gives 248.range: 7 <=" " log_num_mgm_entry_size <= 12." - " Not in use with device managed" - " flow steering"); + " To activate device managed" + " flow steering when available, set to -1"); static bool enable_64b_cqe_eqe; module_param(enable_64b_cqe_eqe, bool, 0444); @@ -1318,12 +1318,30 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) } } +static int choose_log_fs_mgm_entry_size(int qp_per_entry) +{ + int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; + + for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; + i++) { + if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) + break; + } + + return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; +} + static void choose_steering_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { - if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && + if (mlx4_log_num_mgm_entry_size == -1 && + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && (!mlx4_is_mfunc(dev) || - (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1)))) { + (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) && + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= + MLX4_MIN_MGM_LOG_ENTRY_SIZE) { + dev->oper_log_mgm_entry_size = + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; dev->caps.fs_log_max_ucast_qp_range_size = @@ -1340,10 +1358,17 @@ static void choose_steering_mode(struct mlx4_dev *dev, mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " "set to use B0 steering. Falling back to A0 steering mode.\n"); } + dev->oper_log_mgm_entry_size = + mlx4_log_num_mgm_entry_size > 0 ? + mlx4_log_num_mgm_entry_size : + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); } - mlx4_dbg(dev, "Steering mode is: %s\n", - mlx4_steering_mode_str(dev->caps.steering_mode)); + mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " + "modparam log_num_mgm_entry_size = %d\n", + mlx4_steering_mode_str(dev->caps.steering_mode), + dev->oper_log_mgm_entry_size, + mlx4_log_num_mgm_entry_size); } static int mlx4_init_hca(struct mlx4_dev *dev) @@ -2479,6 +2504,17 @@ static int __init mlx4_verify_params(void) port_type_array[0] = true; } + if (mlx4_log_num_mgm_entry_size != -1 && + (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || + mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { + pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " + "in legal range (-1 or %d..%d)\n", + mlx4_log_num_mgm_entry_size, + MLX4_MIN_MGM_LOG_ENTRY_SIZE, + MLX4_MAX_MGM_LOG_ENTRY_SIZE); + return -1; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index e151c21baf2b..1ee4db3c6400 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -54,12 +54,7 @@ struct mlx4_mgm { int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) { - if (dev->caps.steering_mode == - MLX4_STEERING_MODE_DEVICE_MANAGED) - return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE; - else - return min((1 << mlx4_log_num_mgm_entry_size), - MLX4_MAX_MGM_ENTRY_SIZE); + return 1 << dev->oper_log_mgm_entry_size; } int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 1cf42036d7bb..116c5c29d2d1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -94,8 +94,10 @@ enum { }; enum { - MLX4_MAX_MGM_ENTRY_SIZE = 0x1000, - MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2), + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10, + MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, + MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, + MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), MLX4_MTT_ENTRY_PER_SEG = 8, }; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 21821da2abfd..20ea939c22a6 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -625,6 +625,7 @@ struct mlx4_dev { u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; int num_vfs; + int oper_log_mgm_entry_size; u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; }; -- cgit v1.2.3 From a1c088e01b71d90852b0df5a77cdae46bd0e0c05 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 18 Dec 2012 04:45:29 +0000 Subject: usbnet: handle PM failure gracefully If a device fails to do remote wakeup, this is no reason to abort an open totally. This patch just continues without runtime PM. Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/usbnet.c | 15 ++++++++------- include/linux/usb/usbnet.h | 1 + 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index c04110ba677f..50ed7ab09c9f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -719,7 +719,8 @@ int usbnet_stop (struct net_device *net) dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); - if (info->manage_power) + if (info->manage_power && + !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) info->manage_power(dev, 0); else usb_autopm_put_interface(dev->intf); @@ -794,14 +795,14 @@ int usbnet_open (struct net_device *net) tasklet_schedule (&dev->bh); if (info->manage_power) { retval = info->manage_power(dev, 1); - if (retval < 0) - goto done_manage_power_error; - usb_autopm_put_interface(dev->intf); + if (retval < 0) { + retval = 0; + set_bit(EVENT_NO_RUNTIME_PM, &dev->flags); + } else { + usb_autopm_put_interface(dev->intf); + } } return retval; - -done_manage_power_error: - clear_bit(EVENT_DEV_OPEN, &dev->flags); done: usb_autopm_put_interface(dev->intf); done_nopm: diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 9bbeabf66c54..288b32aadab2 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -69,6 +69,7 @@ struct usbnet { # define EVENT_DEV_ASLEEP 6 # define EVENT_DEV_OPEN 7 # define EVENT_DEVICE_REPORT_IDLE 8 +# define EVENT_NO_RUNTIME_PM 9 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) -- cgit v1.2.3 From 2dd7c8cf29769f6b66f26b501db2364640c2c9d0 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 18 Dec 2012 04:45:52 +0000 Subject: usbnet: generic manage_power() Centralise common code for manage_power() in usbnet by making a generic simple implementation Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/usbnet.c | 10 ++++++++++ include/linux/usb/usbnet.h | 2 ++ 2 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 50ed7ab09c9f..3d4bf01641b4 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1616,6 +1616,16 @@ void usbnet_device_suggests_idle(struct usbnet *dev) } EXPORT_SYMBOL(usbnet_device_suggests_idle); +/* + * For devices that can do without special commands + */ +int usbnet_manage_power(struct usbnet *dev, int on) +{ + dev->intf->needs_remote_wakeup = on; + return 0; +} +EXPORT_SYMBOL(usbnet_manage_power); + /*-------------------------------------------------------------------------*/ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, void *data, u16 size) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 288b32aadab2..bd45eb7bedc8 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -241,4 +241,6 @@ extern void usbnet_set_msglevel(struct net_device *, u32); extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); extern int usbnet_nway_reset(struct net_device *net); +extern int usbnet_manage_power(struct usbnet *, int); + #endif /* __LINUX_USB_USBNET_H */ -- cgit v1.2.3 From cf13a84d174947df4bb809edfb4887393642303e Mon Sep 17 00:00:00 2001 From: Fabio Porcedda Date: Fri, 5 Oct 2012 12:16:09 +0200 Subject: watchdog: WatchDog Timer Driver Core: fix comment Signed-off-by: Fabio Porcedda Signed-off-by: Wim Van Sebroeck --- include/linux/watchdog.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 87490ac4bd87..3a9df2f43be6 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -129,7 +129,7 @@ static inline void *watchdog_get_drvdata(struct watchdog_device *wdd) return wdd->driver_data; } -/* drivers/watchdog/core/watchdog_core.c */ +/* drivers/watchdog/watchdog_core.c */ extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); -- cgit v1.2.3 From 468366138850f20543f1d4878028900672b23dae Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 23 Nov 2012 09:12:59 -0500 Subject: COMPAT_SYSCALL_DEFINE: infrastructure Signed-off-by: Al Viro --- arch/s390/include/asm/compat.h | 3 +++ include/linux/compat.h | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'include/linux') diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 18cd6b592650..f8c6df6cd1f0 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -7,6 +7,9 @@ #include #include +#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64)) +#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)) + #define PSW32_MASK_PER 0x40000000UL #define PSW32_MASK_DAT 0x04000000UL #define PSW32_MASK_IO 0x02000000UL diff --git a/include/linux/compat.h b/include/linux/compat.h index 784ebfe63c48..a7877fa809fd 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -23,6 +23,48 @@ #define COMPAT_USE_64BIT_TIME 0 #endif +#ifndef __SC_DELOUSE +#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v)) +#endif + +#define __SC_CCAST1(t1, a1) __SC_DELOUSE(t1,a1) +#define __SC_CCAST2(t2, a2, ...) __SC_DELOUSE(t2,a2), __SC_CCAST1(__VA_ARGS__) +#define __SC_CCAST3(t3, a3, ...) __SC_DELOUSE(t3,a3), __SC_CCAST2(__VA_ARGS__) +#define __SC_CCAST4(t4, a4, ...) __SC_DELOUSE(t4,a4), __SC_CCAST3(__VA_ARGS__) +#define __SC_CCAST5(t5, a5, ...) __SC_DELOUSE(t5,a5), __SC_CCAST4(__VA_ARGS__) +#define __SC_CCAST6(t6, a6, ...) __SC_DELOUSE(t6,a6), __SC_CCAST5(__VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE1(name, ...) \ + COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE2(name, ...) \ + COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE3(name, ...) \ + COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE4(name, ...) \ + COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE5(name, ...) \ + COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) +#define COMPAT_SYSCALL_DEFINE6(name, ...) \ + COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) + +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__)); \ + static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ + asmlinkage long compat_SyS##name(__SC_LONG##x(__VA_ARGS__)) \ + { \ + return (long) C_SYSC##name(__SC_CCAST##x(__VA_ARGS__)); \ + } \ + SYSCALL_ALIAS(compat_sys##name, compat_SyS##name); \ + static inline long C_SYSC##name(__SC_DECL##x(__VA_ARGS__)) + +#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long compat_sys##name(__SC_DECL##x(__VA_ARGS__)) + +#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ + #define compat_jiffies_to_clock_t(x) \ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) -- cgit v1.2.3 From ae903caae267154de7cf8576b130ff474630596b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 14 Dec 2012 12:44:11 -0500 Subject: Bury the conditionals from kernel_thread/kernel_execve series All architectures have CONFIG_GENERIC_KERNEL_THREAD CONFIG_GENERIC_KERNEL_EXECVE __ARCH_WANT_SYS_EXECVE None of them have __ARCH_WANT_KERNEL_EXECVE and there are only two callers of kernel_execve() (which is a trivial wrapper for do_execve() now) left. Kill the conditionals and make both callers use do_execve(). Signed-off-by: Al Viro --- arch/Kconfig | 6 ------ arch/alpha/Kconfig | 2 -- arch/alpha/include/asm/unistd.h | 1 - arch/arm/Kconfig | 2 -- arch/arm/include/asm/unistd.h | 1 - arch/arm64/Kconfig | 2 -- arch/arm64/include/asm/unistd.h | 1 - arch/avr32/Kconfig | 2 -- arch/avr32/include/asm/unistd.h | 1 - arch/blackfin/Kconfig | 2 -- arch/blackfin/include/asm/unistd.h | 1 - arch/c6x/Kconfig | 2 -- arch/c6x/include/uapi/asm/unistd.h | 1 - arch/cris/Kconfig | 2 -- arch/cris/include/asm/unistd.h | 1 - arch/frv/Kconfig | 2 -- arch/frv/include/asm/unistd.h | 1 - arch/h8300/Kconfig | 2 -- arch/h8300/include/asm/unistd.h | 1 - arch/hexagon/Kconfig | 2 -- arch/hexagon/include/uapi/asm/unistd.h | 1 - arch/ia64/Kconfig | 2 -- arch/ia64/include/asm/unistd.h | 1 - arch/m32r/Kconfig | 2 -- arch/m32r/include/asm/unistd.h | 1 - arch/m68k/Kconfig | 2 -- arch/m68k/include/asm/unistd.h | 1 - arch/microblaze/Kconfig | 2 -- arch/microblaze/include/asm/unistd.h | 1 - arch/mips/Kconfig | 2 -- arch/mips/include/asm/unistd.h | 1 - arch/mn10300/Kconfig | 2 -- arch/mn10300/include/asm/unistd.h | 1 - arch/openrisc/Kconfig | 2 -- arch/openrisc/include/uapi/asm/unistd.h | 1 - arch/parisc/Kconfig | 2 -- arch/parisc/include/asm/unistd.h | 1 - arch/powerpc/Kconfig | 2 -- arch/powerpc/include/asm/unistd.h | 1 - arch/s390/Kconfig | 2 -- arch/s390/include/asm/unistd.h | 1 - arch/score/Kconfig | 2 -- arch/score/include/asm/unistd.h | 1 - arch/sh/Kconfig | 2 -- arch/sh/include/asm/unistd.h | 1 - arch/sparc/Kconfig | 2 -- arch/sparc/include/asm/unistd.h | 1 - arch/tile/Kconfig | 2 -- arch/tile/include/asm/unistd.h | 1 - arch/unicore32/Kconfig | 2 -- arch/unicore32/include/uapi/asm/unistd.h | 1 - arch/x86/Kconfig | 2 -- arch/x86/include/asm/unistd.h | 1 - arch/x86/um/Kconfig | 2 -- arch/xtensa/Kconfig | 2 -- arch/xtensa/include/asm/unistd.h | 1 - fs/exec.c | 21 --------------------- include/linux/binfmts.h | 4 ---- include/linux/sched.h | 2 -- include/linux/syscalls.h | 9 --------- init/main.c | 4 +++- kernel/fork.c | 2 -- kernel/kmod.c | 6 +++--- 63 files changed, 6 insertions(+), 131 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 8d698fb5ccc9..0a8dd0585d0d 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -271,12 +271,6 @@ config ARCH_WANT_OLD_COMPAT_IPC select ARCH_WANT_COMPAT_IPC_PARSE_VERSION bool -config GENERIC_KERNEL_THREAD - bool - -config GENERIC_KERNEL_EXECVE - bool - config HAVE_ARCH_SECCOMP_FILTER bool help diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5dd7f5db24d4..7e3710c0cce5 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -20,8 +20,6 @@ config ALPHA select GENERIC_CMOS_UPDATE select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA help diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index eb3a4664ced2..68c75f2fadb9 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -481,7 +481,6 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8918a2dd89b4..b789654e7e2f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -11,8 +11,6 @@ config ARM select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select GENERIC_PCI_IOMAP select GENERIC_SMP_IDLE_THREAD select GENERIC_STRNCPY_FROM_USER diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 7cd13cc62624..21a2700d2957 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -41,7 +41,6 @@ #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_SOCKETCALL #endif -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4b03c56ec329..a846029bebcc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -7,8 +7,6 @@ config ARM64 select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_KERNEL_EXECVE - select GENERIC_KERNEL_THREAD select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index d69aeea6da1e..738322945d1a 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -27,6 +27,5 @@ #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #endif -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #include diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 649aeb9acecb..06e73bf665e9 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -17,8 +17,6 @@ config AVR32 select GENERIC_CLOCKEVENTS select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE help AVR32 is a high-performance 32-bit RISC microprocessor core, designed for cost-sensitive embedded applications, with particular diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h index f05a9804e8e2..0bdf6371574e 100644 --- a/arch/avr32/include/asm/unistd.h +++ b/arch/avr32/include/asm/unistd.h @@ -39,7 +39,6 @@ #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index ab9ff4075f4d..b6f3ad5441c5 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -45,8 +45,6 @@ config BLACKFIN select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE config GENERIC_CSUM def_bool y diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index 460514a1a4e1..38711e28baac 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h @@ -446,7 +446,6 @@ #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_VFORK /* diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 66eab3703c75..f6a3648f5ec3 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -17,8 +17,6 @@ config C6X select OF select OF_EARLY_FLATTREE select GENERIC_CLOCKEVENTS - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select MODULES_USE_ELF_RELA config MMU diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h index f3987a8703d9..e7d09a614d10 100644 --- a/arch/c6x/include/uapi/asm/unistd.h +++ b/arch/c6x/include/uapi/asm/unistd.h @@ -14,7 +14,6 @@ * more details. */ -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE /* Use the standard ABI for syscalls. */ diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 0cac6a49f230..c59a01dd9c0c 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -49,8 +49,6 @@ config CRIS select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32 select GENERIC_CMOS_UPDATE select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select CLONE_BACKWARDS2 config HZ diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h index f27b542e0ebc..5cda75a9cc1e 100644 --- a/arch/cris/include/asm/unistd.h +++ b/arch/cris/include/asm/unistd.h @@ -371,7 +371,6 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index df2eb4bd9fa2..9d262645f667 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -12,8 +12,6 @@ config FRV select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_CPU_DEVICES select ARCH_WANT_IPC_PARSE_VERSION - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE config ZONE_DMA bool diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h index 1807d8ea8cb5..d685da17f5fb 100644 --- a/arch/frv/include/asm/unistd.h +++ b/arch/frv/include/asm/unistd.h @@ -29,7 +29,6 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 04bef4d25b4a..98fabd10e95f 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -8,8 +8,6 @@ config H8300 select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE config SYMBOL_PREFIX string diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h index c2c2f5c7d6bf..566f94860c45 100644 --- a/arch/h8300/include/asm/unistd.h +++ b/arch/h8300/include/asm/unistd.h @@ -356,7 +356,6 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index e418803b6c8e..0744f7d7b1fd 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -31,8 +31,6 @@ config HEXAGON select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h index 2af81533bd0f..4a87cc47075c 100644 --- a/arch/hexagon/include/uapi/asm/unistd.h +++ b/arch/hexagon/include/uapi/asm/unistd.h @@ -27,7 +27,6 @@ */ #define sys_mmap2 sys_mmap_pgoff -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #include diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 670600468128..3279646120e3 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -42,8 +42,6 @@ config IA64 select GENERIC_TIME_VSYSCALL_OLD select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 1574bca86138..8b3ff2f5b861 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -29,7 +29,6 @@ #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 5183f43a2cf7..f807721e19a5 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -15,8 +15,6 @@ config M32R select GENERIC_ATOMIC64 select ARCH_USES_GETTIMEOFFSET select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE config SBUS bool diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h index d9e7351af2a4..cbfa39158fef 100644 --- a/arch/m32r/include/asm/unistd.h +++ b/arch/m32r/include/asm/unistd.h @@ -352,7 +352,6 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 953a7ba5d050..6710084e072a 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -15,8 +15,6 @@ config M68K select FPU if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_REL select MODULES_USE_ELF_RELA diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index a021d67cdd72..847994ce6804 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -31,7 +31,6 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 4bcf89148f3c..ba3b7c8c04b8 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -26,8 +26,6 @@ config MICROBLAZE select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select CLONE_BACKWARDS config SWAP diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 94d978986b75..38cabf4db548 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -422,7 +422,6 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK #ifdef CONFIG_MMU diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4183e62f178c..dba9390d37cf 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -40,8 +40,6 @@ config MIPS select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_REL select MODULES_USE_ELF_RELA if 64BIT - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE menu "Machine selection" diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index b306e2081cad..9e47cc11aa26 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -20,7 +20,6 @@ #define __ARCH_OMIT_COMPAT_SYS_GETDENTS64 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 72471744a912..aa03f2e13385 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -8,8 +8,6 @@ config MN10300 select HAVE_ARCH_KGDB select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER select GENERIC_CLOCKEVENTS - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select MODULES_USE_ELF_RELA config AM33_2 diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h index cabf8ba73b27..e6d2ed4ba68f 100644 --- a/arch/mn10300/include/asm/unistd.h +++ b/arch/mn10300/include/asm/unistd.h @@ -43,7 +43,6 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index e7f1a2993f78..05f2ba41ff1a 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -22,8 +22,6 @@ config OPENRISC select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE config MMU def_bool y diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index 5082b8066325..ce40b71df006 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -20,7 +20,6 @@ #define sys_mmap2 sys_mmap_pgoff -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index e688a2be30f6..b77feffbadea 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -22,8 +22,6 @@ config PARISC select GENERIC_STRNCPY_FROM_USER select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select CLONE_BACKWARDS help diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index 1efef41659c9..3043194547cd 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -163,7 +163,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 951a517a1a0f..17903f1f356b 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -141,10 +141,8 @@ config PPC select GENERIC_CLOCKEVENTS select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER - select GENERIC_KERNEL_THREAD select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_EXECVE select CLONE_BACKWARDS config EARLY_PRINTK diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 76fe846ec40e..784872f93711 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -55,7 +55,6 @@ #define __ARCH_WANT_SYS_NEWFSTATAT #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 3cbb8757704e..5029ebf7110e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -137,8 +137,6 @@ config S390 select GENERIC_CLOCKEVENTS select KTIME_SCALAR if 32BIT select HAVE_ARCH_SECCOMP_FILTER - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA select CLONE_BACKWARDS2 diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 086bb8eaf6ab..636530872516 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -53,7 +53,6 @@ # define __ARCH_WANT_COMPAT_SYS_TIME # define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND # endif -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/score/Kconfig b/arch/score/Kconfig index 45893390c7dd..3b1482e7afac 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig @@ -13,8 +13,6 @@ config SCORE select GENERIC_CLOCKEVENTS select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_REL - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select CLONE_BACKWARDS choice diff --git a/arch/score/include/asm/unistd.h b/arch/score/include/asm/unistd.h index 56001c93095a..9cb4260a5f3e 100644 --- a/arch/score/include/asm/unistd.h +++ b/arch/score/include/asm/unistd.h @@ -4,7 +4,6 @@ #define __ARCH_WANT_SYSCALL_NO_FLAGS #define __ARCH_WANT_SYSCALL_OFF_T #define __ARCH_WANT_SYSCALL_DEPRECATED -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 8451317eed58..babc2b826c5c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -40,8 +40,6 @@ config SUPERH select GENERIC_STRNLEN_USER select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h index 43d3f26b2eab..012004ed3330 100644 --- a/arch/sh/include/asm/unistd.h +++ b/arch/sh/include/asm/unistd.h @@ -28,7 +28,6 @@ # define __ARCH_WANT_SYS_SIGPENDING # define __ARCH_WANT_SYS_SIGPROCMASK # define __ARCH_WANT_SYS_RT_SIGACTION -# define __ARCH_WANT_SYS_EXECVE # define __ARCH_WANT_SYS_FORK # define __ARCH_WANT_SYS_VFORK # define __ARCH_WANT_SYS_CLONE diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 0c7d365fa402..9f2edb5c5551 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -41,8 +41,6 @@ config SPARC select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE config SPARC32 def_bool !64BIT diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index c3e5d8b64171..0ecea6ed943e 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -46,7 +46,6 @@ #define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif -#define __ARCH_WANT_SYS_EXECVE /* * "Conditional" syscalls diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index ea7f61e8bc9e..875d008828b8 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -21,8 +21,6 @@ config TILE select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE # FIXME: investigate whether we need/want these options. # select HAVE_IOREMAP_PROT diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h index b51c6ee3cd6c..940831fe9e94 100644 --- a/arch/tile/include/asm/unistd.h +++ b/arch/tile/include/asm/unistd.h @@ -16,6 +16,5 @@ #define __ARCH_WANT_SYS_LLSEEK #endif #define __ARCH_WANT_SYS_NEWFSTATAT -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #include diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index c4fbb21e802b..60651df5f952 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -16,8 +16,6 @@ config UNICORE32 select ARCH_WANT_FRAME_POINTERS select GENERIC_IOMAP select MODULES_USE_ELF_REL - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE help UniCore-32 is 32-bit Instruction Set Architecture, including a series of low-power-consumption RISC chip diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h index 00cf5e286fca..d4cc4559d848 100644 --- a/arch/unicore32/include/uapi/asm/unistd.h +++ b/arch/unicore32/include/uapi/asm/unistd.h @@ -12,5 +12,4 @@ /* Use the standard ABI for syscalls. */ #include -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0df6e7d84539..01ca0ebaff0e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -108,8 +108,6 @@ config X86 select GENERIC_STRNLEN_USER select HAVE_RCU_USER_QS if X86_64 select HAVE_IRQ_TIME_ACCOUNTING - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select MODULES_USE_ELF_REL if X86_32 select MODULES_USE_ELF_RELA if X86_64 select CLONE_BACKWARDS if X86_32 diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 0e7dea7d3669..9dcfcc1d6f92 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h @@ -50,7 +50,6 @@ # define __ARCH_WANT_SYS_TIME # define __ARCH_WANT_SYS_UTIME # define __ARCH_WANT_SYS_WAITPID -# define __ARCH_WANT_SYS_EXECVE # define __ARCH_WANT_SYS_FORK # define __ARCH_WANT_SYS_VFORK # define __ARCH_WANT_SYS_CLONE diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index 8f51c39750d1..0fd20f241e40 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -13,8 +13,6 @@ endmenu config UML_X86 def_bool y select GENERIC_FIND_FIRST_BIT - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE config 64BIT bool "64-bit kernel" if SUBARCH = "x86" diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 2481f267be29..03a8c107e07e 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -13,8 +13,6 @@ config XTENSA select GENERIC_CPU_DEVICES select MODULES_USE_ELF_RELA select GENERIC_PCI_IOMAP - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE select ARCH_WANT_OPTIONAL_GPIOLIB select CLONE_BACKWARDS help diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index e002dbcc88b6..eb63ea87815c 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -1,7 +1,6 @@ #ifndef _XTENSA_UNISTD_H #define _XTENSA_UNISTD_H -#define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #include diff --git a/fs/exec.c b/fs/exec.c index 721a29929511..090ac91da2e9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1657,7 +1657,6 @@ int get_dumpable(struct mm_struct *mm) return __get_dumpable(mm->flags); } -#ifdef __ARCH_WANT_SYS_EXECVE SYSCALL_DEFINE3(execve, const char __user *, filename, const char __user *const __user *, argv, @@ -1685,23 +1684,3 @@ asmlinkage long compat_sys_execve(const char __user * filename, return error; } #endif -#endif - -#ifdef __ARCH_WANT_KERNEL_EXECVE -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) -{ - int ret = do_execve(filename, - (const char __user *const __user *)argv, - (const char __user *const __user *)envp); - if (ret < 0) - return ret; - - /* - * We were successful. We won't be returning to our caller, but - * instead to user space by manipulating the kernel stack. - */ - ret_from_kernel_execve(current_pt_regs()); -} -#endif diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 2630c9b41a86..8c1388c6ae27 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -121,8 +121,4 @@ extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern void free_bprm(struct linux_binprm *); -#ifdef __ARCH_WANT_KERNEL_EXECVE -extern void ret_from_kernel_execve(struct pt_regs *normal) __noreturn; -#endif - #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1162258bcaf0..9e5a54e3d84f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2291,9 +2291,7 @@ extern int do_execve(const char *, const char __user * const __user *); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); -#ifdef CONFIG_GENERIC_KERNEL_THREAD extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); -#endif extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 91835e7f364d..9fe5f946526e 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -827,15 +827,6 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, const char __user *pathname); asmlinkage long sys_syncfs(int fd); -#ifndef CONFIG_GENERIC_KERNEL_EXECVE -int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]); -#else -#define kernel_execve(filename, argv, envp) \ - do_execve(filename, \ - (const char __user *const __user *)argv, \ - (const char __user *const __user *)envp) -#endif - asmlinkage long sys_fork(void); asmlinkage long sys_vfork(void); #ifdef CONFIG_CLONE_BACKWARDS diff --git a/init/main.c b/init/main.c index e33e09df3cbc..155ac208d581 100644 --- a/init/main.c +++ b/init/main.c @@ -797,7 +797,9 @@ static void __init do_pre_smp_initcalls(void) static int run_init_process(const char *init_filename) { argv_init[0] = init_filename; - return kernel_execve(init_filename, argv_init, envp_init); + return do_execve(init_filename, + (const char __user *const __user *)argv_init, + (const char __user *const __user *)envp_init); } static void __init kernel_init_freeable(void); diff --git a/kernel/fork.c b/kernel/fork.c index 540730783433..389712ffc0ad 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1623,7 +1623,6 @@ long do_fork(unsigned long clone_flags, return nr; } -#ifdef CONFIG_GENERIC_KERNEL_THREAD /* * Create a kernel thread. */ @@ -1632,7 +1631,6 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, (unsigned long)arg, NULL, NULL); } -#endif #ifdef __ARCH_WANT_SYS_FORK SYSCALL_DEFINE0(fork) diff --git a/kernel/kmod.c b/kernel/kmod.c index 1c317e386831..0023a87e8de6 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -219,9 +219,9 @@ static int ____call_usermodehelper(void *data) commit_creds(new); - retval = kernel_execve(sub_info->path, - (const char *const *)sub_info->argv, - (const char *const *)sub_info->envp); + retval = do_execve(sub_info->path, + (const char __user *const __user *)sub_info->argv, + (const char __user *const __user *)sub_info->envp); if (!retval) return 0; -- cgit v1.2.3 From 1ca97bb541a1f5a735e697a8bba763cde3aab452 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 18 Nov 2012 12:50:10 -0500 Subject: new helper: current_user_stack_pointer() Cross-architecture equivalent of rdusp(); default is user_stack_pointer(current_pt_regs()) - that works for almost all platforms that have usp saved in pt_regs. The only exception from that is ia64 - we want memory stack, not the backing store for register one. Signed-off-by: Al Viro --- arch/alpha/include/asm/ptrace.h | 1 + arch/blackfin/include/asm/ptrace.h | 1 + arch/cris/include/asm/ptrace.h | 1 + arch/h8300/include/asm/ptrace.h | 1 + arch/ia64/include/asm/ptrace.h | 5 +++++ arch/m68k/include/asm/ptrace.h | 1 + include/linux/ptrace.h | 4 ++++ 7 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h index b4c5b2fbb647..fd53c74ac943 100644 --- a/arch/alpha/include/asm/ptrace.h +++ b/arch/alpha/include/asm/ptrace.h @@ -72,6 +72,7 @@ struct switch_stack { #define user_mode(regs) (((regs)->ps & 8) != 0) #define instruction_pointer(regs) ((regs)->pc) #define profile_pc(regs) instruction_pointer(regs) +#define current_user_stack_pointer() rdusp() #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h index 10d8641180f2..c42002506a26 100644 --- a/arch/blackfin/include/asm/ptrace.h +++ b/arch/blackfin/include/asm/ptrace.h @@ -106,6 +106,7 @@ struct pt_regs { #define arch_has_single_step() (1) /* common code demands this function */ #define ptrace_disable(child) user_disable_single_step(child) +#define current_user_stack_pointer() rdusp() extern int is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long len); diff --git a/arch/cris/include/asm/ptrace.h b/arch/cris/include/asm/ptrace.h index 6618893bfe8e..551c081ab62b 100644 --- a/arch/cris/include/asm/ptrace.h +++ b/arch/cris/include/asm/ptrace.h @@ -10,6 +10,7 @@ #define PTRACE_SETREGS 13 #define profile_pc(regs) instruction_pointer(regs) +#define current_user_stack_pointer() rdusp() #endif /* __KERNEL__ */ diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h index 7468589a128b..6183371d0c93 100644 --- a/arch/h8300/include/asm/ptrace.h +++ b/arch/h8300/include/asm/ptrace.h @@ -63,6 +63,7 @@ struct pt_regs { #define current_pt_regs() ((struct pt_regs *) \ (THREAD_SIZE + (unsigned long)current_thread_info()) - 1) #define signal_pt_regs() ((struct pt_regs *)current->thread.esp0) +#define current_user_stack_pointer() rdusp() #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ #endif /* _H8300_PTRACE_H */ diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index b0e973649cb9..845143990a1d 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -78,6 +78,11 @@ static inline long regs_return_value(struct pt_regs *regs) unsigned long __ip = instruction_pointer(regs); \ (__ip & ~3UL) + ((__ip & 3UL) << 2); \ }) +/* + * Why not default? Because user_stack_pointer() on ia64 gives register + * stack backing store instead... + */ +#define current_user_stack_pointer() (current_pt_regs()->r12) /* given a pointer to a task_struct, return the user's pt_regs */ # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h index 0f717045bdde..a45cb6894ad3 100644 --- a/arch/m68k/include/asm/ptrace.h +++ b/arch/m68k/include/asm/ptrace.h @@ -15,6 +15,7 @@ #define profile_pc(regs) instruction_pointer(regs) #define current_pt_regs() \ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1 +#define current_user_stack_pointer() rdusp() #define arch_has_single_step() (1) diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index a89ff04bddd9..a3a9d085f932 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -342,6 +342,10 @@ static inline void user_single_step_siginfo(struct task_struct *tsk, #define signal_pt_regs() task_pt_regs(current) #endif +#ifndef current_user_stack_pointer +#define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) +#endif + extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); -- cgit v1.2.3 From 5c49574ffd7ac07eae8c3b065d19e6ebc7e4760f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 18 Nov 2012 15:29:16 -0500 Subject: new helper: restore_altstack() to be used by rt_sigreturn instances Signed-off-by: Al Viro --- include/linux/signal.h | 2 ++ kernel/signal.c | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index e19a011b43b7..5969522136fe 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -385,4 +385,6 @@ int unhandled_signal(struct task_struct *tsk, int sig); void signals_init(void); +int restore_altstack(const stack_t __user *); + #endif /* _LINUX_SIGNAL_H */ diff --git a/kernel/signal.c b/kernel/signal.c index e75e4bd2839b..887f2fefe207 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3103,6 +3103,13 @@ out: return error; } +int restore_altstack(const stack_t __user *uss) +{ + int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); + /* squash all but EFAULT for now */ + return err == -EFAULT ? err : 0; +} + #ifdef __ARCH_WANT_SYS_SIGPENDING /** -- cgit v1.2.3 From 9b064fc3f95a8e44e929fdf4d6037334ea03d15b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 14 Dec 2012 13:49:35 -0500 Subject: new helper: compat_user_stack_pointer() Compat counterpart of current_user_stack_pointer(); for most of the biarch architectures those two are identical, but e.g. arm64 and arm use different registers for stack pointer... Note that amd64 variants of current_user_stack_pointer/compat_user_stack_pointer do *not* rely on pt_regs having been through FIXUP_TOP_OF_STACK. Signed-off-by: Al Viro --- arch/arm64/include/asm/compat.h | 5 +++-- arch/x86/include/asm/ptrace.h | 7 +++++++ include/linux/compat.h | 3 +++ 3 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 37e610dc084e..d9ec40217a27 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -209,10 +209,11 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } +#define compat_user_stack_pointer() (current_pt_regs()->compat_sp) + static inline void __user *arch_compat_alloc_user_space(long len) { - struct pt_regs *regs = task_pt_regs(current); - return (void __user *)regs->compat_sp - len; + return (void __user *)compat_user_stack_pointer() - len; } struct compat_ipc64_perm { diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 19f16ebaf4fa..7e560b6daf5d 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -203,6 +203,13 @@ static inline bool user_64bit_mode(struct pt_regs *regs) return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; #endif } + +#define current_user_stack_pointer() this_cpu_read(old_rsp) +/* ia32 vs. x32 difference */ +#define compat_user_stack_pointer() \ + (test_thread_flag(TIF_IA32) \ + ? current_pt_regs()->sp \ + : this_cpu_read(old_rsp)) #endif #ifdef CONFIG_X86_32 diff --git a/include/linux/compat.h b/include/linux/compat.h index a7877fa809fd..62bb76f91baf 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -65,6 +65,9 @@ #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ +#ifndef compat_user_stack_pointer +#define compat_user_stack_pointer() current_user_stack_pointer() +#endif #define compat_jiffies_to_clock_t(x) \ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) -- cgit v1.2.3 From 6bf9adfc90370b695cb111116e15fdc0e1906270 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 14 Dec 2012 14:09:47 -0500 Subject: introduce generic sys_sigaltstack(), switch x86 and um to it Conditional on CONFIG_GENERIC_SIGALTSTACK; architectures that do not select it are completely unaffected Signed-off-by: Al Viro --- arch/Kconfig | 3 +++ arch/um/kernel/signal.c | 5 ----- arch/x86/Kconfig | 1 + arch/x86/include/asm/syscalls.h | 3 --- arch/x86/kernel/entry_32.S | 1 - arch/x86/kernel/entry_64.S | 1 - arch/x86/kernel/signal.c | 7 ------- arch/x86/syscalls/syscall_32.tbl | 2 +- arch/x86/syscalls/syscall_64.tbl | 2 +- arch/x86/um/Kconfig | 1 + arch/x86/um/sys_call_table_32.c | 1 - arch/x86/um/sys_call_table_64.c | 1 - include/linux/syscalls.h | 6 ++++++ kernel/signal.c | 6 ++++++ 14 files changed, 19 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 0a8dd0585d0d..330176824594 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -335,6 +335,9 @@ config MODULES_USE_ELF_REL Modules only use ELF REL relocations. Modules with ELF RELA relocations will give an error. +config GENERIC_SIGALTSTACK + bool + # # ABI hall of shame # diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c index db18eb6124e1..48ccf718e290 100644 --- a/arch/um/kernel/signal.c +++ b/arch/um/kernel/signal.c @@ -132,8 +132,3 @@ long sys_sigsuspend(int history0, int history1, old_sigset_t mask) siginitset(&blocked, mask); return sigsuspend(&blocked); } - -long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) -{ - return do_sigaltstack(uss, uoss, PT_REGS_SP(¤t->thread.regs)); -} diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 01ca0ebaff0e..f380614d7d89 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -111,6 +111,7 @@ config X86 select MODULES_USE_ELF_REL if X86_32 select MODULES_USE_ELF_RELA if X86_64 select CLONE_BACKWARDS if X86_32 + select GENERIC_SIGALTSTACK config INSTRUCTION_DECODER def_bool y diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 2f8374718aa3..58b7e3eac0ae 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -25,9 +25,6 @@ asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); /* kernel/signal.c */ long sys_rt_sigreturn(struct pt_regs *); -long sys_sigaltstack(const stack_t __user *, stack_t __user *, - struct pt_regs *); - /* kernel/tls.c */ asmlinkage int sys_set_thread_area(struct user_desc __user *); diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c763116c5359..ff84d5469d77 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -739,7 +739,6 @@ ENTRY(ptregs_##name) ; \ ENDPROC(ptregs_##name) PTREGSCALL1(iopl) -PTREGSCALL2(sigaltstack) PTREGSCALL0(sigreturn) PTREGSCALL0(rt_sigreturn) PTREGSCALL2(vm86) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 2363e820ed68..6e462019f195 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -864,7 +864,6 @@ END(stub_\func) FORK_LIKE clone FORK_LIKE fork FORK_LIKE vfork - PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx PTREGSCALL stub_iopl, sys_iopl, %rsi ENTRY(ptregscall_common) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 70b27ee6118e..16d065c23baf 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -602,13 +602,6 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, } #endif /* CONFIG_X86_32 */ -long -sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - struct pt_regs *regs) -{ - return do_sigaltstack(uss, uoss, regs->sp); -} - /* * Do a signal return; undo the signal stack. */ diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index ee3c220ee500..62c7b222e45c 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -192,7 +192,7 @@ 183 i386 getcwd sys_getcwd 184 i386 capget sys_capget 185 i386 capset sys_capset -186 i386 sigaltstack ptregs_sigaltstack stub32_sigaltstack +186 i386 sigaltstack sys_sigaltstack stub32_sigaltstack 187 i386 sendfile sys_sendfile sys32_sendfile 188 i386 getpmsg 189 i386 putpmsg diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index a582bfed95bb..6ffa7f9d005e 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -137,7 +137,7 @@ 128 64 rt_sigtimedwait sys_rt_sigtimedwait 129 64 rt_sigqueueinfo sys_rt_sigqueueinfo 130 common rt_sigsuspend sys_rt_sigsuspend -131 64 sigaltstack stub_sigaltstack +131 64 sigaltstack sys_sigaltstack 132 common utime sys_utime 133 common mknod sys_mknod 134 64 uselib diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index 0fd20f241e40..96b89d874ead 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -13,6 +13,7 @@ endmenu config UML_X86 def_bool y select GENERIC_FIND_FIRST_BIT + select GENERIC_SIGALTSTACK config 64BIT bool "64-bit kernel" if SUBARCH = "x86" diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c index 812e98c098e4..a0c3b0d1a122 100644 --- a/arch/x86/um/sys_call_table_32.c +++ b/arch/x86/um/sys_call_table_32.c @@ -27,7 +27,6 @@ #define ptregs_iopl sys_iopl #define ptregs_vm86old sys_vm86old #define ptregs_vm86 sys_vm86 -#define ptregs_sigaltstack sys_sigaltstack #define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ; #include diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c index 170bd926a69c..f2f0723070ca 100644 --- a/arch/x86/um/sys_call_table_64.c +++ b/arch/x86/um/sys_call_table_64.c @@ -31,7 +31,6 @@ #define stub_fork sys_fork #define stub_vfork sys_vfork #define stub_execve sys_execve -#define stub_sigaltstack sys_sigaltstack #define stub_rt_sigreturn sys_rt_sigreturn #define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 9fe5f946526e..6ca1e08210c6 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -63,6 +63,7 @@ struct getcpu_cache; struct old_linux_dirent; struct perf_event_attr; struct file_handle; +struct sigaltstack; #include #include @@ -299,6 +300,11 @@ asmlinkage long sys_personality(unsigned int personality); asmlinkage long sys_sigpending(old_sigset_t __user *set); asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset); +#ifdef CONFIG_GENERIC_SIGALTSTACK +asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss, + struct sigaltstack __user *uoss); +#endif + asmlinkage long sys_getitimer(int which, struct itimerval __user *value); asmlinkage long sys_setitimer(int which, struct itimerval __user *value, diff --git a/kernel/signal.c b/kernel/signal.c index 887f2fefe207..f05f4c4150d9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3102,6 +3102,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s out: return error; } +#ifdef CONFIG_GENERIC_SIGALTSTACK +SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) +{ + return do_sigaltstack(uss, uoss, current_user_stack_pointer()); +} +#endif int restore_altstack(const stack_t __user *uss) { -- cgit v1.2.3 From 9026843952adac5b123c7b8dc961e5c15828d9e1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 14 Dec 2012 14:47:53 -0500 Subject: generic compat_sys_sigaltstack() Again, conditional on CONFIG_GENERIC_SIGALTSTACK Signed-off-by: Al Viro --- arch/x86/ia32/ia32_signal.c | 50 +--------------------------------------- arch/x86/ia32/ia32entry.S | 1 - arch/x86/include/asm/ia32.h | 10 ++------ arch/x86/include/asm/sys_ia32.h | 2 -- arch/x86/kernel/entry_64.S | 2 -- arch/x86/kernel/signal.c | 4 +--- arch/x86/syscalls/syscall_32.tbl | 2 +- arch/x86/syscalls/syscall_64.tbl | 2 +- include/linux/compat.h | 16 +++++++++++++ kernel/signal.c | 45 ++++++++++++++++++++++++++++++++++++ 10 files changed, 67 insertions(+), 67 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index efc6a958b71d..a866411a2fcc 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -136,52 +136,6 @@ asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask) return sigsuspend(&blocked); } -asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, - stack_ia32_t __user *uoss_ptr, - struct pt_regs *regs) -{ - stack_t uss, uoss; - int ret, err = 0; - mm_segment_t seg; - - if (uss_ptr) { - u32 ptr; - - memset(&uss, 0, sizeof(stack_t)); - if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t))) - return -EFAULT; - - get_user_try { - get_user_ex(ptr, &uss_ptr->ss_sp); - get_user_ex(uss.ss_flags, &uss_ptr->ss_flags); - get_user_ex(uss.ss_size, &uss_ptr->ss_size); - } get_user_catch(err); - - if (err) - return -EFAULT; - uss.ss_sp = compat_ptr(ptr); - } - seg = get_fs(); - set_fs(KERNEL_DS); - ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), - (stack_t __force __user *) &uoss, regs->sp); - set_fs(seg); - if (ret >= 0 && uoss_ptr) { - if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t))) - return -EFAULT; - - put_user_try { - put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp); - put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags); - put_user_ex(uoss.ss_size, &uoss_ptr->ss_size); - } put_user_catch(err); - - if (err) - ret = -EFAULT; - } - return ret; -} - /* * Do a signal return; undo the signal stack. */ @@ -292,7 +246,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) struct rt_sigframe_ia32 __user *frame; sigset_t set; unsigned int ax; - struct pt_regs tregs; frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); @@ -306,8 +259,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; - tregs = *regs; - if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT) + if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; return ax; diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 32e6f05ddaaa..102ff7cb3e41 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -464,7 +464,6 @@ GLOBAL(\label) PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi - PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx PTREGSCALL stub32_execve, compat_sys_execve, %rcx PTREGSCALL stub32_fork, sys_fork, %rdi PTREGSCALL stub32_vfork, sys_vfork, %rdi diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h index e6232773ce49..4c6da2e4bb1d 100644 --- a/arch/x86/include/asm/ia32.h +++ b/arch/x86/include/asm/ia32.h @@ -29,16 +29,10 @@ struct old_sigaction32 { unsigned int sa_restorer; /* Another 32 bit pointer */ }; -typedef struct sigaltstack_ia32 { - unsigned int ss_sp; - int ss_flags; - unsigned int ss_size; -} stack_ia32_t; - struct ucontext_ia32 { unsigned int uc_flags; unsigned int uc_link; - stack_ia32_t uc_stack; + compat_stack_t uc_stack; struct sigcontext_ia32 uc_mcontext; compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; @@ -46,7 +40,7 @@ struct ucontext_ia32 { struct ucontext_x32 { unsigned int uc_flags; unsigned int uc_link; - stack_ia32_t uc_stack; + compat_stack_t uc_stack; unsigned int uc__pad0; /* needed for alignment */ struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */ compat_sigset_t uc_sigmask; /* mask last for extensibility */ diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index c76fae4d90be..31f61f96e0fb 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h @@ -69,8 +69,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned, /* ia32/ia32_signal.c */ asmlinkage long sys32_sigsuspend(int, int, old_sigset_t); -asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *, - stack_ia32_t __user *, struct pt_regs *); asmlinkage long sys32_sigreturn(struct pt_regs *); asmlinkage long sys32_rt_sigreturn(struct pt_regs *); diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 6e462019f195..86d81199bbde 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -912,8 +912,6 @@ ENTRY(stub_rt_sigreturn) END(stub_rt_sigreturn) #ifdef CONFIG_X86_X32_ABI - PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx - ENTRY(stub_x32_rt_sigreturn) CFI_STARTPROC addq $8, %rsp diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 16d065c23baf..b17ed37c61a2 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -857,7 +857,6 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) struct rt_sigframe_x32 __user *frame; sigset_t set; unsigned long ax; - struct pt_regs tregs; frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); @@ -871,8 +870,7 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; - tregs = *regs; - if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT) + if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; return ax; diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index 62c7b222e45c..235226efaa7f 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -192,7 +192,7 @@ 183 i386 getcwd sys_getcwd 184 i386 capget sys_capget 185 i386 capset sys_capset -186 i386 sigaltstack sys_sigaltstack stub32_sigaltstack +186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack 187 i386 sendfile sys_sendfile sys32_sendfile 188 i386 getpmsg 189 i386 putpmsg diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index 6ffa7f9d005e..c68cbe7174e7 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -337,7 +337,7 @@ 522 x32 rt_sigpending sys32_rt_sigpending 523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait 524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo -525 x32 sigaltstack stub_x32_sigaltstack +525 x32 sigaltstack compat_sys_sigaltstack 526 x32 timer_create compat_sys_timer_create 527 x32 mq_notify compat_sys_mq_notify 528 x32 kexec_load compat_sys_kexec_load diff --git a/include/linux/compat.h b/include/linux/compat.h index 62bb76f91baf..cb5637e2ee2c 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -68,6 +68,16 @@ #ifndef compat_user_stack_pointer #define compat_user_stack_pointer() current_user_stack_pointer() #endif +#ifdef CONFIG_GENERIC_SIGALTSTACK +#ifndef compat_sigaltstack /* we'll need that for MIPS */ +typedef struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +} compat_stack_t; +#endif +#endif + #define compat_jiffies_to_clock_t(x) \ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) @@ -632,6 +642,12 @@ asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid, asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, compat_size_t count); +#ifdef CONFIG_GENERIC_SIGALTSTACK +asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, + compat_stack_t __user *uoss_ptr); + +int compat_restore_altstack(const compat_stack_t __user *uss); +#endif #else diff --git a/kernel/signal.c b/kernel/signal.c index f05f4c4150d9..aee85bd76b8a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -31,6 +31,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -3116,6 +3117,50 @@ int restore_altstack(const stack_t __user *uss) return err == -EFAULT ? err : 0; } +#ifdef CONFIG_COMPAT +#ifdef CONFIG_GENERIC_SIGALTSTACK +asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, + compat_stack_t __user *uoss_ptr) +{ + stack_t uss, uoss; + int ret; + mm_segment_t seg; + + if (uss_ptr) { + compat_stack_t uss32; + + memset(&uss, 0, sizeof(stack_t)); + if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) + return -EFAULT; + uss.ss_sp = compat_ptr(uss32.ss_sp); + uss.ss_flags = uss32.ss_flags; + uss.ss_size = uss32.ss_size; + } + seg = get_fs(); + set_fs(KERNEL_DS); + ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), + (stack_t __force __user *) &uoss, + compat_user_stack_pointer()); + set_fs(seg); + if (ret >= 0 && uoss_ptr) { + if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || + __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || + __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || + __put_user(uoss.ss_size, &uoss_ptr->ss_size)) + ret = -EFAULT; + } + return ret; +} + +int compat_restore_altstack(const compat_stack_t __user *uss) +{ + int err = compat_sys_sigaltstack(uss, NULL); + /* squash all but -EFAULT for now */ + return err == -EFAULT ? err : 0; +} +#endif +#endif + #ifdef __ARCH_WANT_SYS_SIGPENDING /** -- cgit v1.2.3 From c40702c49faef05ae324f121d8b3e215244ee152 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 20 Nov 2012 14:24:26 -0500 Subject: new helpers: __save_altstack/__compat_save_altstack, switch x86 and um to those note that they are relying on access_ok() already checked by caller. Signed-off-by: Al Viro --- arch/x86/ia32/ia32_signal.c | 5 +---- arch/x86/kernel/signal.c | 18 ++++-------------- arch/x86/um/signal.c | 9 ++------- include/linux/compat.h | 1 + include/linux/signal.h | 1 + kernel/signal.c | 16 ++++++++++++++++ 6 files changed, 25 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index a866411a2fcc..a1daf4a65009 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -467,10 +467,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - put_user_ex(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b17ed37c61a2..a6c8a347b8c6 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -363,10 +363,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - put_user_ex(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __save_altstack(&frame->uc.uc_stack, regs->sp); /* Set up to return from userspace. */ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); @@ -413,7 +410,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, struct rt_sigframe __user *frame; void __user *fp = NULL; int err = 0; - struct task_struct *me = current; frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp); @@ -432,10 +428,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - put_user_ex(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __save_altstack(&frame->uc.uc_stack, regs->sp); /* Set up to return from userspace. If provided, use a stub already in userspace. */ @@ -502,10 +495,7 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - put_user_ex(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp); put_user_ex(0, &frame->uc.uc__pad0); if (ka->sa.sa_flags & SA_RESTORER) { @@ -651,7 +641,7 @@ long sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return ax; diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index bdaa08cfbcf4..71cef48ea5cd 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c @@ -342,9 +342,7 @@ static int copy_ucontext_to_user(struct ucontext __user *uc, { int err = 0; - err |= put_user(current->sas_ss_sp, &uc->uc_stack.ss_sp); - err |= put_user(sas_ss_flags(sp), &uc->uc_stack.ss_flags); - err |= put_user(current->sas_ss_size, &uc->uc_stack.ss_size); + err |= __save_altstack(&uc->uc_stack, sp); err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, 0); err |= copy_to_user(&uc->uc_sigmask, set, sizeof(*set)); return err; @@ -529,10 +527,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - err |= __put_user(sas_ss_flags(PT_REGS_SP(regs)), - &frame->uc.uc_stack.ss_flags); - err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs)); err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); diff --git a/include/linux/compat.h b/include/linux/compat.h index cb5637e2ee2c..334813307ec1 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -647,6 +647,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, compat_stack_t __user *uoss_ptr); int compat_restore_altstack(const compat_stack_t __user *uss); +int __compat_save_altstack(compat_stack_t __user *, unsigned long); #endif #else diff --git a/include/linux/signal.h b/include/linux/signal.h index 5969522136fe..0a89ffc48466 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -386,5 +386,6 @@ int unhandled_signal(struct task_struct *tsk, int sig); void signals_init(void); int restore_altstack(const stack_t __user *); +int __save_altstack(stack_t __user *, unsigned long); #endif /* _LINUX_SIGNAL_H */ diff --git a/kernel/signal.c b/kernel/signal.c index aee85bd76b8a..f072513302c3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3117,6 +3117,14 @@ int restore_altstack(const stack_t __user *uss) return err == -EFAULT ? err : 0; } +int __save_altstack(stack_t __user *uss, unsigned long sp) +{ + struct task_struct *t = current; + return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | + __put_user(sas_ss_flags(sp), &uss->ss_flags) | + __put_user(t->sas_ss_size, &uss->ss_size); +} + #ifdef CONFIG_COMPAT #ifdef CONFIG_GENERIC_SIGALTSTACK asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, @@ -3158,6 +3166,14 @@ int compat_restore_altstack(const compat_stack_t __user *uss) /* squash all but -EFAULT for now */ return err == -EFAULT ? err : 0; } + +int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) +{ + struct task_struct *t = current; + return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | + __put_user(sas_ss_flags(sp), &uss->ss_flags) | + __put_user(t->sas_ss_size, &uss->ss_size); +} #endif #endif -- cgit v1.2.3 From ada65c74059f8c104f1b467c126205471634c435 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Dec 2012 10:23:03 +0100 Subject: dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER Documentation says that code requiring dma-buf should add it to select, so inline fallbacks are not going to be used. A link error will make it obvious what went wrong, instead of silently doing nothing at runtime. Signed-off-by: Maarten Lankhorst Reviewed-by: Daniel Vetter Reviewed-by: Rob Clark Signed-off-by: Sumit Semwal --- include/linux/dma-buf.h | 99 ------------------------------------------------- 1 file changed, 99 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index eb48f3816df9..bd2e52ccc4f2 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -156,7 +156,6 @@ static inline void get_dma_buf(struct dma_buf *dmabuf) get_file(dmabuf->file); } -#ifdef CONFIG_DMA_SHARED_BUFFER struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev); void dma_buf_detach(struct dma_buf *dmabuf, @@ -184,103 +183,5 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); void *dma_buf_vmap(struct dma_buf *); void dma_buf_vunmap(struct dma_buf *, void *vaddr); -#else - -static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, - struct device *dev) -{ - return ERR_PTR(-ENODEV); -} - -static inline void dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *dmabuf_attach) -{ - return; -} - -static inline struct dma_buf *dma_buf_export(void *priv, - const struct dma_buf_ops *ops, - size_t size, int flags) -{ - return ERR_PTR(-ENODEV); -} - -static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags) -{ - return -ENODEV; -} - -static inline struct dma_buf *dma_buf_get(int fd) -{ - return ERR_PTR(-ENODEV); -} - -static inline void dma_buf_put(struct dma_buf *dmabuf) -{ - return; -} - -static inline struct sg_table *dma_buf_map_attachment( - struct dma_buf_attachment *attach, enum dma_data_direction write) -{ - return ERR_PTR(-ENODEV); -} - -static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, - struct sg_table *sg, enum dma_data_direction dir) -{ - return; -} - -static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - size_t start, size_t len, - enum dma_data_direction dir) -{ - return -ENODEV; -} - -static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf, - size_t start, size_t len, - enum dma_data_direction dir) -{ -} - -static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, - unsigned long pnum) -{ - return NULL; -} - -static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, - unsigned long pnum, void *vaddr) -{ -} - -static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum) -{ - return NULL; -} - -static inline void dma_buf_kunmap(struct dma_buf *dmabuf, - unsigned long pnum, void *vaddr) -{ -} - -static inline int dma_buf_mmap(struct dma_buf *dmabuf, - struct vm_area_struct *vma, - unsigned long pgoff) -{ - return -ENODEV; -} - -static inline void *dma_buf_vmap(struct dma_buf *dmabuf) -{ - return NULL; -} - -static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) -{ -} -#endif /* CONFIG_DMA_SHARED_BUFFER */ #endif /* __DMA_BUF_H__ */ -- cgit v1.2.3 From 39e3c9553f34381a1b664c27b0c696a266a5735e Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 28 Nov 2012 11:30:53 -0500 Subject: vfs: remove DCACHE_NEED_LOOKUP The code that relied on that flag was ripped out of btrfs quite some time ago, and never added back. Josef indicated that he was going to take a different approach to the problem in btrfs, and that we could just eliminate this flag. Cc: Josef Bacik Signed-off-by: Jeff Layton Signed-off-by: Al Viro --- fs/btrfs/inode.c | 16 +--------------- fs/dcache.c | 33 +-------------------------------- fs/namei.c | 11 +---------- include/linux/dcache.h | 8 -------- 4 files changed, 3 insertions(+), 65 deletions(-) (limited to 'include/linux') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 67ed24ae86bb..16d9e8e191e6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4262,16 +4262,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); - if (unlikely(d_need_lookup(dentry))) { - memcpy(&location, dentry->d_fsdata, sizeof(struct btrfs_key)); - kfree(dentry->d_fsdata); - dentry->d_fsdata = NULL; - /* This thing is hashed, drop it for now */ - d_drop(dentry); - } else { - ret = btrfs_inode_by_name(dir, dentry, &location); - } - + ret = btrfs_inode_by_name(dir, dentry, &location); if (ret < 0) return ERR_PTR(ret); @@ -4341,11 +4332,6 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, struct dentry *ret; ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); - if (unlikely(d_need_lookup(dentry))) { - spin_lock(&dentry->d_lock); - dentry->d_flags &= ~DCACHE_NEED_LOOKUP; - spin_unlock(&dentry->d_lock); - } return ret; } diff --git a/fs/dcache.c b/fs/dcache.c index 3a463d0c4fe8..1782be3fc3ef 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -454,24 +454,6 @@ void d_drop(struct dentry *dentry) } EXPORT_SYMBOL(d_drop); -/* - * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag - * @dentry: dentry to drop - * - * This is called when we do a lookup on a placeholder dentry that needed to be - * looked up. The dentry should have been hashed in order for it to be found by - * the lookup code, but now needs to be unhashed while we do the actual lookup - * and clear the DCACHE_NEED_LOOKUP flag. - */ -void d_clear_need_lookup(struct dentry *dentry) -{ - spin_lock(&dentry->d_lock); - __d_drop(dentry); - dentry->d_flags &= ~DCACHE_NEED_LOOKUP; - spin_unlock(&dentry->d_lock); -} -EXPORT_SYMBOL(d_clear_need_lookup); - /* * Finish off a dentry we've decided to kill. * dentry->d_lock must be held, returns with it unlocked. @@ -565,13 +547,7 @@ repeat: if (d_unhashed(dentry)) goto kill_it; - /* - * If this dentry needs lookup, don't set the referenced flag so that it - * is more likely to be cleaned up by the dcache shrinker in case of - * memory pressure. - */ - if (!d_need_lookup(dentry)) - dentry->d_flags |= DCACHE_REFERENCED; + dentry->d_flags |= DCACHE_REFERENCED; dentry_lru_add(dentry); dentry->d_count--; @@ -1736,13 +1712,6 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, return found; } - /* - * We are going to instantiate this dentry, unhash it and clear the - * lookup flag so we can do that. - */ - if (unlikely(d_need_lookup(found))) - d_clear_need_lookup(found); - /* * Negative dentry: instantiate it unless the inode is a directory and * already has a dentry. diff --git a/fs/namei.c b/fs/namei.c index 35195ff9d194..25a41e02984b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1275,9 +1275,7 @@ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir, *need_lookup = false; dentry = d_lookup(dir, name); if (dentry) { - if (d_need_lookup(dentry)) { - *need_lookup = true; - } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) { + if (dentry->d_flags & DCACHE_OP_REVALIDATE) { error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (error < 0) { @@ -1383,8 +1381,6 @@ static int lookup_fast(struct nameidata *nd, struct qstr *name, return -ECHILD; nd->seq = seq; - if (unlikely(d_need_lookup(dentry))) - goto unlazy; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { status = d_revalidate(dentry, nd->flags); if (unlikely(status <= 0)) { @@ -1410,11 +1406,6 @@ unlazy: if (unlikely(!dentry)) goto need_lookup; - if (unlikely(d_need_lookup(dentry))) { - dput(dentry); - goto need_lookup; - } - if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) status = d_revalidate(dentry, nd->flags); if (unlikely(status <= 0)) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 59200795482e..c1754b59ddd3 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -202,7 +202,6 @@ struct dentry_operations { #define DCACHE_MOUNTED 0x10000 /* is a mountpoint */ #define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */ #define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */ -#define DCACHE_NEED_LOOKUP 0x80000 /* dentry requires i_op->lookup */ #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) @@ -408,13 +407,6 @@ static inline bool d_mountpoint(struct dentry *dentry) return dentry->d_flags & DCACHE_MOUNTED; } -static inline bool d_need_lookup(struct dentry *dentry) -{ - return dentry->d_flags & DCACHE_NEED_LOOKUP; -} - -extern void d_clear_need_lookup(struct dentry *dentry); - extern int sysctl_vfs_cache_pressure; #endif /* __LINUX_DCACHE_H */ -- cgit v1.2.3 From c4d6d8dbf335c7fa47341654a37c53a512b519bb Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 20 Dec 2012 21:52:32 +0000 Subject: CacheFiles: Fix the marking of cached pages Under some circumstances CacheFiles defers the marking of pages with PG_fscache so that it can take advantage of pagevecs to reduce the number of calls to fscache_mark_pages_cached() and the netfs's hook to keep track of this. There are, however, two problems with this: (1) It can lead to the PG_fscache mark being applied _after_ the page is set PG_uptodate and unlocked (by the call to fscache_end_io()). (2) CacheFiles's ref on the page is dropped immediately following fscache_end_io() - and so may not still be held when the mark is applied. This can lead to the page being passed back to the allocator before the mark is applied. Fix this by, where appropriate, marking the page before calling fscache_end_io() and releasing the page. This means that we can't take advantage of pagevecs and have to make a separate call for each page to the marking routines. The symptoms of this are Bad Page state errors cropping up under memory pressure, for example: BUG: Bad page state in process tar pfn:002da page:ffffea0000009fb0 count:0 mapcount:0 mapping: (null) index:0x1447 page flags: 0x1000(private_2) Pid: 4574, comm: tar Tainted: G W 3.1.0-rc4-fsdevel+ #1064 Call Trace: [] ? dump_page+0xb9/0xbe [] bad_page+0xd5/0xea [] get_page_from_freelist+0x35b/0x46a [] __alloc_pages_nodemask+0x362/0x662 [] __do_page_cache_readahead+0x13a/0x267 [] ? __do_page_cache_readahead+0xa2/0x267 [] ra_submit+0x1c/0x20 [] ondemand_readahead+0x28b/0x29a [] ? ondemand_readahead+0x163/0x29a [] page_cache_sync_readahead+0x38/0x3a [] generic_file_aio_read+0x2ab/0x67e [] nfs_file_read+0xa4/0xc9 [nfs] [] do_sync_read+0xba/0xfa [] ? security_file_permission+0x7b/0x84 [] ? rw_verify_area+0xab/0xc8 [] vfs_read+0xaa/0x13a [] sys_read+0x45/0x6c [] system_call_fastpath+0x16/0x1b As can be seen, PG_private_2 (== PG_fscache) is set in the page flags. Instrumenting fscache_mark_pages_cached() to verify whether page->mapping was set appropriately showed that sometimes it wasn't. This led to the discovery that sometimes the page has apparently been reclaimed by the time the marker got to see it. Reported-by: M. Stevens Signed-off-by: David Howells Reviewed-by: Jeff Layton --- fs/cachefiles/rdwr.c | 34 ++++++++----------------- fs/fscache/page.c | 59 ++++++++++++++++++++++++++----------------- include/linux/fscache-cache.h | 3 +++ include/linux/fscache.h | 12 ++++----- 4 files changed, 56 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index c994691d9445..3367abdcdac4 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -176,9 +176,8 @@ static void cachefiles_read_copier(struct fscache_operation *_op) recheck: if (PageUptodate(monitor->back_page)) { copy_highpage(monitor->netfs_page, monitor->back_page); - - pagevec_add(&pagevec, monitor->netfs_page); - fscache_mark_pages_cached(monitor->op, &pagevec); + fscache_mark_page_cached(monitor->op, + monitor->netfs_page); error = 0; } else if (!PageError(monitor->back_page)) { /* the page has probably been truncated */ @@ -335,8 +334,7 @@ backing_page_already_present: backing_page_already_uptodate: _debug("- uptodate"); - pagevec_add(pagevec, netpage); - fscache_mark_pages_cached(op, pagevec); + fscache_mark_page_cached(op, netpage); copy_highpage(netpage, backpage); fscache_end_io(op, netpage, 0); @@ -448,8 +446,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, &pagevec); } else if (cachefiles_has_space(cache, 0, 1) == 0) { /* there's space in the cache we can use */ - pagevec_add(&pagevec, page); - fscache_mark_pages_cached(op, &pagevec); + fscache_mark_page_cached(op, page); ret = -ENODATA; } else { ret = -ENOBUFS; @@ -465,8 +462,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, */ static int cachefiles_read_backing_file(struct cachefiles_object *object, struct fscache_retrieval *op, - struct list_head *list, - struct pagevec *mark_pvec) + struct list_head *list) { struct cachefiles_one_read *monitor = NULL; struct address_space *bmapping = object->backer->d_inode->i_mapping; @@ -626,13 +622,13 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, page_cache_release(backpage); backpage = NULL; - if (!pagevec_add(mark_pvec, netpage)) - fscache_mark_pages_cached(op, mark_pvec); + fscache_mark_page_cached(op, netpage); page_cache_get(netpage); if (!pagevec_add(&lru_pvec, netpage)) __pagevec_lru_add_file(&lru_pvec); + /* the netpage is unlocked and marked up to date here */ fscache_end_io(op, netpage, 0); page_cache_release(netpage); netpage = NULL; @@ -775,15 +771,11 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, /* submit the apparently valid pages to the backing fs to be read from * disk */ if (nrbackpages > 0) { - ret2 = cachefiles_read_backing_file(object, op, &backpages, - &pagevec); + ret2 = cachefiles_read_backing_file(object, op, &backpages); if (ret2 == -ENOMEM || ret2 == -EINTR) ret = ret2; } - if (pagevec_count(&pagevec) > 0) - fscache_mark_pages_cached(op, &pagevec); - _leave(" = %d [nr=%u%s]", ret, *nr_pages, list_empty(pages) ? " empty" : ""); return ret; @@ -806,7 +798,6 @@ int cachefiles_allocate_page(struct fscache_retrieval *op, { struct cachefiles_object *object; struct cachefiles_cache *cache; - struct pagevec pagevec; int ret; object = container_of(op->op.object, @@ -817,13 +808,10 @@ int cachefiles_allocate_page(struct fscache_retrieval *op, _enter("%p,{%lx},", object, page->index); ret = cachefiles_has_space(cache, 0, 1); - if (ret == 0) { - pagevec_init(&pagevec, 0); - pagevec_add(&pagevec, page); - fscache_mark_pages_cached(op, &pagevec); - } else { + if (ret == 0) + fscache_mark_page_cached(op, page); + else ret = -ENOBUFS; - } _leave(" = %d", ret); return ret; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 3f7a59bfa7ad..d7c663cfc923 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -914,6 +914,40 @@ done: } EXPORT_SYMBOL(__fscache_uncache_page); +/** + * fscache_mark_page_cached - Mark a page as being cached + * @op: The retrieval op pages are being marked for + * @page: The page to be marked + * + * Mark a netfs page as being cached. After this is called, the netfs + * must call fscache_uncache_page() to remove the mark. + */ +void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page) +{ + struct fscache_cookie *cookie = op->op.object->cookie; + +#ifdef CONFIG_FSCACHE_STATS + atomic_inc(&fscache_n_marks); +#endif + + _debug("- mark %p{%lx}", page, page->index); + if (TestSetPageFsCache(page)) { + static bool once_only; + if (!once_only) { + once_only = true; + printk(KERN_WARNING "FS-Cache:" + " Cookie type %s marked page %lx" + " multiple times\n", + cookie->def->name, page->index); + } + } + + if (cookie->def->mark_page_cached) + cookie->def->mark_page_cached(cookie->netfs_data, + op->mapping, page); +} +EXPORT_SYMBOL(fscache_mark_page_cached); + /** * fscache_mark_pages_cached - Mark pages as being cached * @op: The retrieval op pages are being marked for @@ -925,32 +959,11 @@ EXPORT_SYMBOL(__fscache_uncache_page); void fscache_mark_pages_cached(struct fscache_retrieval *op, struct pagevec *pagevec) { - struct fscache_cookie *cookie = op->op.object->cookie; unsigned long loop; -#ifdef CONFIG_FSCACHE_STATS - atomic_add(pagevec->nr, &fscache_n_marks); -#endif - - for (loop = 0; loop < pagevec->nr; loop++) { - struct page *page = pagevec->pages[loop]; - - _debug("- mark %p{%lx}", page, page->index); - if (TestSetPageFsCache(page)) { - static bool once_only; - if (!once_only) { - once_only = true; - printk(KERN_WARNING "FS-Cache:" - " Cookie type %s marked page %lx" - " multiple times\n", - cookie->def->name, page->index); - } - } - } + for (loop = 0; loop < pagevec->nr; loop++) + fscache_mark_page_cached(op, pagevec->pages[loop]); - if (cookie->def->mark_pages_cached) - cookie->def->mark_pages_cached(cookie->netfs_data, - op->mapping, pagevec); pagevec_reinit(pagevec); } EXPORT_SYMBOL(fscache_mark_pages_cached); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index ce31408b1e47..9879183b55d8 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -504,6 +504,9 @@ extern void fscache_withdraw_cache(struct fscache_cache *cache); extern void fscache_io_error(struct fscache_cache *cache); +extern void fscache_mark_page_cached(struct fscache_retrieval *op, + struct page *page); + extern void fscache_mark_pages_cached(struct fscache_retrieval *op, struct pagevec *pagevec); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 9ec20dec3353..f4b6353543bf 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -135,14 +135,14 @@ struct fscache_cookie_def { */ void (*put_context)(void *cookie_netfs_data, void *context); - /* indicate pages that now have cache metadata retained - * - this function should mark the specified pages as now being cached - * - the pages will have been marked with PG_fscache before this is + /* indicate page that now have cache metadata retained + * - this function should mark the specified page as now being cached + * - the page will have been marked with PG_fscache before this is * called, so this is optional */ - void (*mark_pages_cached)(void *cookie_netfs_data, - struct address_space *mapping, - struct pagevec *cached_pvec); + void (*mark_page_cached)(void *cookie_netfs_data, + struct address_space *mapping, + struct page *page); /* indicate the cookie is no longer cached * - this function is called when the backing store currently caching -- cgit v1.2.3 From ef46ed888efb1e8da33be5d33c9b54476289a43b Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 20 Dec 2012 21:52:35 +0000 Subject: FS-Cache: Make cookie relinquishment wait for outstanding reads Make fscache_relinquish_cookie() log a warning and wait if there are any outstanding reads left on the cookie it was given. Signed-off-by: David Howells --- fs/fscache/cookie.c | 18 ++++++++++++++---- fs/fscache/operation.c | 10 ++++++++-- include/linux/fscache-cache.h | 1 + 3 files changed, 23 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 0666996adf80..66be9eccede0 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -442,22 +442,32 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; +try_again: spin_lock(&cookie->lock); /* break links with all the active objects */ while (!hlist_empty(&cookie->backing_objects)) { + int n_reads; object = hlist_entry(cookie->backing_objects.first, struct fscache_object, cookie_link); _debug("RELEASE OBJ%x", object->debug_id); - if (atomic_read(&object->n_reads)) { + set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags); + n_reads = atomic_read(&object->n_reads); + if (n_reads) { + int n_ops = object->n_ops; + int n_in_progress = object->n_in_progress; spin_unlock(&cookie->lock); printk(KERN_ERR "FS-Cache:" - " Cookie '%s' still has %d outstanding reads\n", - cookie->def->name, atomic_read(&object->n_reads)); - BUG(); + " Cookie '%s' still has %d outstanding reads (%d,%d)\n", + cookie->def->name, + n_reads, n_ops, n_in_progress); + wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS, + fscache_wait_bit, TASK_UNINTERRUPTIBLE); + printk("Wait finished\n"); + goto try_again; } /* detach each cache object from the object cookie */ diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 30afdfa7aec7..c857ab824d6e 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -340,8 +340,14 @@ void fscache_put_operation(struct fscache_operation *op) object = op->object; - if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) - atomic_dec(&object->n_reads); + if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) { + if (atomic_dec_and_test(&object->n_reads)) { + clear_bit(FSCACHE_COOKIE_WAITING_ON_READS, + &object->cookie->flags); + wake_up_bit(&object->cookie->flags, + FSCACHE_COOKIE_WAITING_ON_READS); + } + } /* now... we may get called with the object spinlock held, so we * complete the cleanup here only if we can immediately acquire the diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 9879183b55d8..e3d6d939d959 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -301,6 +301,7 @@ struct fscache_cookie { #define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ #define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ #define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ +#define FSCACHE_COOKIE_WAITING_ON_READS 6 /* T if cookie is waiting on reads */ }; extern struct fscache_cookie fscache_fsdef_index; -- cgit v1.2.3 From 9f10523f891928330b7529da54c1a3cc65180b1a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 20 Dec 2012 21:52:35 +0000 Subject: FS-Cache: Fix operation state management and accounting Fix the state management of internal fscache operations and the accounting of what operations are in what states. This is done by: (1) Give struct fscache_operation a enum variable that directly represents the state it's currently in, rather than spreading this knowledge over a bunch of flags, who's processing the operation at the moment and whether it is queued or not. This makes it easier to write assertions to check the state at various points and to prevent invalid state transitions. (2) Add an 'operation complete' state and supply a function to indicate the completion of an operation (fscache_op_complete()) and make things call it. The final call to fscache_put_operation() can then check that an op in the appropriate state (complete or cancelled). (3) Adjust the use of object->n_ops, ->n_in_progress, ->n_exclusive to better govern the state of an object: (a) The ->n_ops is now the number of extant operations on the object and is now decremented by fscache_put_operation() only. (b) The ->n_in_progress is simply the number of objects that have been taken off of the object's pending queue for the purposes of being run. This is decremented by fscache_op_complete() only. (c) The ->n_exclusive is the number of exclusive ops that have been submitted and queued or are in progress. It is decremented by fscache_op_complete() and by fscache_cancel_op(). fscache_put_operation() and fscache_operation_gc() now no longer try to clean up ->n_exclusive and ->n_in_progress. That was leading to double decrements against fscache_cancel_op(). fscache_cancel_op() now no longer decrements ->n_ops. That was leading to double decrements against fscache_put_operation(). fscache_submit_exclusive_op() now decides whether it has to queue an op based on ->n_in_progress being > 0 rather than ->n_ops > 0 as the latter will persist in being true even after all preceding operations have been cancelled or completed. Furthermore, if an object is active and there are runnable ops against it, there must be at least one op running. (4) Add a remaining-pages counter (n_pages) to struct fscache_retrieval and provide a function to record completion of the pages as they complete. When n_pages reaches 0, the operation is deemed to be complete and fscache_op_complete() is called. Add calls to fscache_retrieval_complete() anywhere we've finished with a page we've been given to read or allocate for. This includes places where we just return pages to the netfs for reading from the server and where accessing the cache fails and we discard the proposed netfs page. The bugs in the unfixed state management manifest themselves as oopses like the following where the operation completion gets out of sync with return of the cookie by the netfs. This is possible because the cache unlocks and returns all the netfs pages before recording its completion - which means that there's nothing to stop the netfs discarding them and returning the cookie. FS-Cache: Cookie 'NFS.fh' still has outstanding reads ------------[ cut here ]------------ kernel BUG at fs/fscache/cookie.c:519! invalid opcode: 0000 [#1] SMP CPU 1 Modules linked in: cachefiles nfs fscache auth_rpcgss nfs_acl lockd sunrpc Pid: 400, comm: kswapd0 Not tainted 3.1.0-rc7-fsdevel+ #1090 /DG965RY RIP: 0010:[] [] __fscache_relinquish_cookie+0x170/0x343 [fscache] RSP: 0018:ffff8800368cfb00 EFLAGS: 00010282 RAX: 000000000000003c RBX: ffff880023cc8790 RCX: 0000000000000000 RDX: 0000000000002f2e RSI: 0000000000000001 RDI: ffffffff813ab86c RBP: ffff8800368cfb50 R08: 0000000000000002 R09: 0000000000000000 R10: ffff88003a1b7890 R11: ffff88001df6e488 R12: ffff880023d8ed98 R13: ffff880023cc8798 R14: 0000000000000004 R15: ffff88003b8bf370 FS: 0000000000000000(0000) GS:ffff88003bd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000008ba008 CR3: 0000000023d93000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process kswapd0 (pid: 400, threadinfo ffff8800368ce000, task ffff88003b8bf040) Stack: ffff88003b8bf040 ffff88001df6e528 ffff88001df6e528 ffffffffa00b46b0 ffff88003b8bf040 ffff88001df6e488 ffff88001df6e620 ffffffffa00b46b0 ffff88001ebd04c8 0000000000000004 ffff8800368cfb70 ffffffffa00b2c91 Call Trace: [] nfs_fscache_release_inode_cookie+0x3b/0x47 [nfs] [] nfs_clear_inode+0x3c/0x41 [nfs] [] nfs4_evict_inode+0x2f/0x33 [nfs] [] evict+0xa1/0x15c [] dispose_list+0x2c/0x38 [] prune_icache_sb+0x28c/0x29b [] prune_super+0xd5/0x140 [] shrink_slab+0x102/0x1ab [] balance_pgdat+0x2f2/0x595 [] ? process_timeout+0xb/0xb [] kswapd+0x270/0x289 [] ? __init_waitqueue_head+0x46/0x46 [] ? balance_pgdat+0x595/0x595 [] kthread+0x7f/0x87 [] kernel_thread_helper+0x4/0x10 [] ? finish_task_switch+0x45/0xc0 [] ? retint_restore_args+0xe/0xe [] ? __init_kthread_worker+0x53/0x53 [] ? gs_change+0xb/0xb Signed-off-by: David Howells --- Documentation/filesystems/caching/backend-api.txt | 26 ++++++- Documentation/filesystems/caching/operations.txt | 2 +- fs/cachefiles/rdwr.c | 31 ++++++-- fs/fscache/object.c | 2 - fs/fscache/operation.c | 91 +++++++++++++++-------- fs/fscache/page.c | 25 ++++++- include/linux/fscache-cache.h | 37 +++++++-- 7 files changed, 164 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/caching/backend-api.txt b/Documentation/filesystems/caching/backend-api.txt index 382d52cdaf2d..f4769b9399df 100644 --- a/Documentation/filesystems/caching/backend-api.txt +++ b/Documentation/filesystems/caching/backend-api.txt @@ -419,7 +419,10 @@ performed on the denizens of the cache. These are held in a structure of type: If an I/O error occurs, fscache_io_error() should be called and -ENOBUFS returned if possible or fscache_end_io() called with a suitable error - code.. + code. + + fscache_put_retrieval() should be called after a page or pages are dealt + with. This will complete the operation when all pages are dealt with. (*) Request pages be read from cache [mandatory]: @@ -526,6 +529,27 @@ FS-Cache provides some utilities that a cache backend may make use of: error value should be 0 if successful and an error otherwise. + (*) Record that one or more pages being retrieved or allocated have been dealt + with: + + void fscache_retrieval_complete(struct fscache_retrieval *op, + int n_pages); + + This is called to record the fact that one or more pages have been dealt + with and are no longer the concern of this operation. When the number of + pages remaining in the operation reaches 0, the operation will be + completed. + + + (*) Record operation completion: + + void fscache_op_complete(struct fscache_operation *op); + + This is called to record the completion of an operation. This deducts + this operation from the parent object's run state, potentially permitting + one or more pending operations to start running. + + (*) Set highest store limit: void fscache_set_store_limit(struct fscache_object *object, diff --git a/Documentation/filesystems/caching/operations.txt b/Documentation/filesystems/caching/operations.txt index b6b070c57cbf..bee2a5f93d60 100644 --- a/Documentation/filesystems/caching/operations.txt +++ b/Documentation/filesystems/caching/operations.txt @@ -174,7 +174,7 @@ Operations are used through the following procedure: necessary (the object might have died whilst the thread was waiting). When it has finished doing its processing, it should call - fscache_put_operation() on it. + fscache_op_complete() and fscache_put_operation() on it. (4) The operation holds an effective lock upon the object, preventing other exclusive ops conflicting until it is released. The operation can be diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index bf123d9c3206..93a0815e0498 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -197,6 +197,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op) fscache_end_io(op, monitor->netfs_page, error); page_cache_release(monitor->netfs_page); + fscache_retrieval_complete(op, 1); fscache_put_retrieval(op); kfree(monitor); @@ -339,6 +340,7 @@ backing_page_already_uptodate: copy_highpage(netpage, backpage); fscache_end_io(op, netpage, 0); + fscache_retrieval_complete(op, 1); success: _debug("success"); @@ -360,6 +362,7 @@ read_error: goto out; io_error: cachefiles_io_error_obj(object, "Page read error on backing file"); + fscache_retrieval_complete(op, 1); ret = -ENOBUFS; goto out; @@ -369,6 +372,7 @@ nomem_monitor: fscache_put_retrieval(monitor->op); kfree(monitor); nomem: + fscache_retrieval_complete(op, 1); _leave(" = -ENOMEM"); return -ENOMEM; } @@ -407,7 +411,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, _enter("{%p},{%lx},,,", object, page->index); if (!object->backer) - return -ENOBUFS; + goto enobufs; inode = object->backer->d_inode; ASSERT(S_ISREG(inode->i_mode)); @@ -416,7 +420,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, /* calculate the shift required to use bmap */ if (inode->i_sb->s_blocksize > PAGE_SIZE) - return -ENOBUFS; + goto enobufs; shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; @@ -448,13 +452,19 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, } else if (cachefiles_has_space(cache, 0, 1) == 0) { /* there's space in the cache we can use */ fscache_mark_page_cached(op, page); + fscache_retrieval_complete(op, 1); ret = -ENODATA; } else { - ret = -ENOBUFS; + goto enobufs; } _leave(" = %d", ret); return ret; + +enobufs: + fscache_retrieval_complete(op, 1); + _leave(" = -ENOBUFS"); + return -ENOBUFS; } /* @@ -632,6 +642,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, /* the netpage is unlocked and marked up to date here */ fscache_end_io(op, netpage, 0); + fscache_retrieval_complete(op, 1); page_cache_release(netpage); netpage = NULL; continue; @@ -659,6 +670,7 @@ out: list_for_each_entry_safe(netpage, _n, list, lru) { list_del(&netpage->lru); page_cache_release(netpage); + fscache_retrieval_complete(op, 1); } _leave(" = %d", ret); @@ -707,7 +719,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, *nr_pages); if (!object->backer) - return -ENOBUFS; + goto all_enobufs; space = 1; if (cachefiles_has_space(cache, 0, *nr_pages) < 0) @@ -720,7 +732,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, /* calculate the shift required to use bmap */ if (inode->i_sb->s_blocksize > PAGE_SIZE) - return -ENOBUFS; + goto all_enobufs; shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; @@ -760,7 +772,10 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, nrbackpages++; } else if (space && pagevec_add(&pagevec, page) == 0) { fscache_mark_pages_cached(op, &pagevec); + fscache_retrieval_complete(op, 1); ret = -ENODATA; + } else { + fscache_retrieval_complete(op, 1); } } @@ -781,6 +796,10 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, _leave(" = %d [nr=%u%s]", ret, *nr_pages, list_empty(pages) ? " empty" : ""); return ret; + +all_enobufs: + fscache_retrieval_complete(op, *nr_pages); + return -ENOBUFS; } /* @@ -815,6 +834,7 @@ int cachefiles_allocate_page(struct fscache_retrieval *op, else ret = -ENOBUFS; + fscache_retrieval_complete(op, 1); _leave(" = %d", ret); return ret; } @@ -864,6 +884,7 @@ int cachefiles_allocate_pages(struct fscache_retrieval *op, ret = -ENOBUFS; } + fscache_retrieval_complete(op, *nr_pages); _leave(" = %d", ret); return ret; } diff --git a/fs/fscache/object.c b/fs/fscache/object.c index b6b897c550ac..773bc798a416 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -587,8 +587,6 @@ static void fscache_object_available(struct fscache_object *object) if (object->n_in_progress == 0) { if (object->n_ops > 0) { ASSERTCMP(object->n_ops, >=, object->n_obj_ops); - ASSERTIF(object->n_ops > object->n_obj_ops, - !list_empty(&object->pending_ops)); fscache_start_operations(object); } else { ASSERT(list_empty(&object->pending_ops)); diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index c857ab824d6e..748f9553c2cb 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -37,6 +37,7 @@ void fscache_enqueue_operation(struct fscache_operation *op) ASSERT(op->processor != NULL); ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); ASSERTCMP(atomic_read(&op->usage), >, 0); + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); fscache_stat(&fscache_n_op_enqueue); switch (op->flags & FSCACHE_OP_TYPE) { @@ -64,6 +65,9 @@ EXPORT_SYMBOL(fscache_enqueue_operation); static void fscache_run_op(struct fscache_object *object, struct fscache_operation *op) { + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); + + op->state = FSCACHE_OP_ST_IN_PROGRESS; object->n_in_progress++; if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) wake_up_bit(&op->flags, FSCACHE_OP_WAITING); @@ -80,22 +84,23 @@ static void fscache_run_op(struct fscache_object *object, int fscache_submit_exclusive_op(struct fscache_object *object, struct fscache_operation *op) { - int ret; - _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); + ASSERTCMP(atomic_read(&op->usage), >, 0); + spin_lock(&object->lock); ASSERTCMP(object->n_ops, >=, object->n_in_progress); ASSERTCMP(object->n_ops, >=, object->n_exclusive); ASSERT(list_empty(&op->pend_link)); - ret = -ENOBUFS; + op->state = FSCACHE_OP_ST_PENDING; if (fscache_object_is_active(object)) { op->object = object; object->n_ops++; object->n_exclusive++; /* reads and writes must wait */ - if (object->n_ops > 1) { + if (object->n_in_progress > 0) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); @@ -111,7 +116,6 @@ int fscache_submit_exclusive_op(struct fscache_object *object, /* need to issue a new write op after this */ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); - ret = 0; } else if (object->state == FSCACHE_OBJECT_CREATING) { op->object = object; object->n_ops++; @@ -119,14 +123,13 @@ int fscache_submit_exclusive_op(struct fscache_object *object, atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); - ret = 0; } else { /* not allowed to submit ops in any other state */ BUG(); } spin_unlock(&object->lock); - return ret; + return 0; } /* @@ -186,6 +189,7 @@ int fscache_submit_op(struct fscache_object *object, _enter("{OBJ%x OP%x},{%u}", object->debug_id, op->debug_id, atomic_read(&op->usage)); + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); ASSERTCMP(atomic_read(&op->usage), >, 0); spin_lock(&object->lock); @@ -196,6 +200,7 @@ int fscache_submit_op(struct fscache_object *object, ostate = object->state; smp_rmb(); + op->state = FSCACHE_OP_ST_PENDING; if (fscache_object_is_active(object)) { op->object = object; object->n_ops++; @@ -225,12 +230,15 @@ int fscache_submit_op(struct fscache_object *object, object->state == FSCACHE_OBJECT_LC_DYING || object->state == FSCACHE_OBJECT_WITHDRAWING) { fscache_stat(&fscache_n_op_rejected); + op->state = FSCACHE_OP_ST_CANCELLED; ret = -ENOBUFS; } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { fscache_report_unexpected_submission(object, op, ostate); ASSERT(!fscache_object_is_active(object)); + op->state = FSCACHE_OP_ST_CANCELLED; ret = -ENOBUFS; } else { + op->state = FSCACHE_OP_ST_CANCELLED; ret = -ENOBUFS; } @@ -290,13 +298,18 @@ int fscache_cancel_op(struct fscache_operation *op) _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); + ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING); + ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED); + ASSERTCMP(atomic_read(&op->usage), >, 0); + spin_lock(&object->lock); ret = -EBUSY; - if (!list_empty(&op->pend_link)) { + if (op->state == FSCACHE_OP_ST_PENDING) { + ASSERT(!list_empty(&op->pend_link)); fscache_stat(&fscache_n_op_cancelled); list_del_init(&op->pend_link); - object->n_ops--; + op->state = FSCACHE_OP_ST_CANCELLED; if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) object->n_exclusive--; if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) @@ -310,6 +323,37 @@ int fscache_cancel_op(struct fscache_operation *op) return ret; } +/* + * Record the completion of an in-progress operation. + */ +void fscache_op_complete(struct fscache_operation *op) +{ + struct fscache_object *object = op->object; + + _enter("OBJ%x", object->debug_id); + + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); + ASSERTCMP(object->n_in_progress, >, 0); + ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags), + object->n_exclusive, >, 0); + ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags), + object->n_in_progress, ==, 1); + + spin_lock(&object->lock); + + op->state = FSCACHE_OP_ST_COMPLETE; + + if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) + object->n_exclusive--; + object->n_in_progress--; + if (object->n_in_progress == 0) + fscache_start_operations(object); + + spin_unlock(&object->lock); + _leave(""); +} +EXPORT_SYMBOL(fscache_op_complete); + /* * release an operation * - queues pending ops if this is the last in-progress op @@ -328,8 +372,9 @@ void fscache_put_operation(struct fscache_operation *op) return; _debug("PUT OP"); - if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) - BUG(); + ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE, + op->state, ==, FSCACHE_OP_ST_CANCELLED); + op->state = FSCACHE_OP_ST_DEAD; fscache_stat(&fscache_n_op_release); @@ -365,16 +410,6 @@ void fscache_put_operation(struct fscache_operation *op) return; } - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { - ASSERTCMP(object->n_exclusive, >, 0); - object->n_exclusive--; - } - - ASSERTCMP(object->n_in_progress, >, 0); - object->n_in_progress--; - if (object->n_in_progress == 0) - fscache_start_operations(object); - ASSERTCMP(object->n_ops, >, 0); object->n_ops--; if (object->n_ops == 0) @@ -413,23 +448,14 @@ void fscache_operation_gc(struct work_struct *work) spin_unlock(&cache->op_gc_list_lock); object = op->object; + spin_lock(&object->lock); _debug("GC DEFERRED REL OBJ%x OP%x", object->debug_id, op->debug_id); fscache_stat(&fscache_n_op_gc); ASSERTCMP(atomic_read(&op->usage), ==, 0); - - spin_lock(&object->lock); - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) { - ASSERTCMP(object->n_exclusive, >, 0); - object->n_exclusive--; - } - - ASSERTCMP(object->n_in_progress, >, 0); - object->n_in_progress--; - if (object->n_in_progress == 0) - fscache_start_operations(object); + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD); ASSERTCMP(object->n_ops, >, 0); object->n_ops--; @@ -437,6 +463,7 @@ void fscache_operation_gc(struct work_struct *work) fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); spin_unlock(&object->lock); + kfree(op); } while (count++ < 20); diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 248a12e22532..b38b13d2a555 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -162,6 +162,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op) fscache_abort_object(object); } + fscache_op_complete(op); _leave(""); } @@ -223,6 +224,8 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) _enter("{OP%x}", op->op.debug_id); + ASSERTCMP(op->n_pages, ==, 0); + fscache_hist(fscache_retrieval_histogram, op->start_time); if (op->context) fscache_put_context(op->op.object->cookie, op->context); @@ -320,6 +323,11 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, _debug("<<< GO"); check_if_dead: + if (op->op.state == FSCACHE_OP_ST_CANCELLED) { + fscache_stat(stat_object_dead); + _leave(" = -ENOBUFS [cancelled]"); + return -ENOBUFS; + } if (unlikely(fscache_object_is_dead(object))) { fscache_stat(stat_object_dead); return -ENOBUFS; @@ -364,6 +372,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, _leave(" = -ENOMEM"); return -ENOMEM; } + op->n_pages = 1; spin_lock(&cookie->lock); @@ -375,10 +384,10 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); atomic_inc(&object->n_reads); - set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); if (fscache_submit_op(object, &op->op) < 0) - goto nobufs_unlock; + goto nobufs_unlock_dec; spin_unlock(&cookie->lock); fscache_stat(&fscache_n_retrieval_ops); @@ -425,6 +434,8 @@ error: _leave(" = %d", ret); return ret; +nobufs_unlock_dec: + atomic_dec(&object->n_reads); nobufs_unlock: spin_unlock(&cookie->lock); kfree(op); @@ -482,6 +493,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, op = fscache_alloc_retrieval(mapping, end_io_func, context); if (!op) return -ENOMEM; + op->n_pages = *nr_pages; spin_lock(&cookie->lock); @@ -491,10 +503,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, struct fscache_object, cookie_link); atomic_inc(&object->n_reads); - set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); if (fscache_submit_op(object, &op->op) < 0) - goto nobufs_unlock; + goto nobufs_unlock_dec; spin_unlock(&cookie->lock); fscache_stat(&fscache_n_retrieval_ops); @@ -541,6 +553,8 @@ error: _leave(" = %d", ret); return ret; +nobufs_unlock_dec: + atomic_dec(&object->n_reads); nobufs_unlock: spin_unlock(&cookie->lock); kfree(op); @@ -583,6 +597,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, op = fscache_alloc_retrieval(page->mapping, NULL, NULL); if (!op) return -ENOMEM; + op->n_pages = 1; spin_lock(&cookie->lock); @@ -696,6 +711,7 @@ static void fscache_write_op(struct fscache_operation *_op) fscache_end_page_write(object, page); if (ret < 0) { fscache_abort_object(object); + fscache_op_complete(&op->op); } else { fscache_enqueue_operation(&op->op); } @@ -710,6 +726,7 @@ superseded: spin_unlock(&cookie->stores_lock); clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); spin_unlock(&object->lock); + fscache_op_complete(&op->op); _leave(""); } diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index e3d6d939d959..f5facd1d333f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -75,6 +75,16 @@ extern wait_queue_head_t fscache_cache_cleared_wq; typedef void (*fscache_operation_release_t)(struct fscache_operation *op); typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); +enum fscache_operation_state { + FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ + FSCACHE_OP_ST_INITIALISED, /* Op is initialised */ + FSCACHE_OP_ST_PENDING, /* Op is blocked from running */ + FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */ + FSCACHE_OP_ST_COMPLETE, /* Op is complete */ + FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */ + FSCACHE_OP_ST_DEAD /* Op is now dead */ +}; + struct fscache_operation { struct work_struct work; /* record for async ops */ struct list_head pend_link; /* link in object->pending_ops */ @@ -86,10 +96,10 @@ struct fscache_operation { #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ -#define FSCACHE_OP_DEAD 6 /* op is now dead */ -#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */ -#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */ +#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */ +#define FSCACHE_OP_KEEP_FLAGS 0x0070 /* flags to keep when repurposing an op */ + enum fscache_operation_state state; atomic_t usage; unsigned debug_id; /* debugging ID */ @@ -106,6 +116,7 @@ extern atomic_t fscache_op_debug_id; extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); +extern void fscache_op_complete(struct fscache_operation *); extern void fscache_put_operation(struct fscache_operation *); /** @@ -122,6 +133,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, { INIT_WORK(&op->work, fscache_op_work_func); atomic_set(&op->usage, 1); + op->state = FSCACHE_OP_ST_INITIALISED; op->debug_id = atomic_inc_return(&fscache_op_debug_id); op->processor = processor; op->release = release; @@ -138,6 +150,7 @@ struct fscache_retrieval { void *context; /* netfs read context (pinned) */ struct list_head to_do; /* list of things to be done by the backend */ unsigned long start_time; /* time at which retrieval started */ + unsigned n_pages; /* number of pages to be retrieved */ }; typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, @@ -173,9 +186,23 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) fscache_enqueue_operation(&op->op); } +/** + * fscache_retrieval_complete - Record (partial) completion of a retrieval + * @op: The retrieval operation affected + * @n_pages: The number of pages to account for + */ +static inline void fscache_retrieval_complete(struct fscache_retrieval *op, + int n_pages) +{ + op->n_pages -= n_pages; + if (op->n_pages <= 0) + fscache_op_complete(&op->op); +} + /** * fscache_put_retrieval - Drop a reference to a retrieval operation * @op: The retrieval operation affected + * @n_pages: The number of pages to account for * * Drop a reference to a retrieval operation. */ @@ -333,10 +360,10 @@ struct fscache_object { int debug_id; /* debugging ID */ int n_children; /* number of child objects */ - int n_ops; /* number of ops outstanding on object */ + int n_ops; /* number of extant ops on object */ int n_obj_ops; /* number of object ops outstanding on object */ int n_in_progress; /* number of ops in progress */ - int n_exclusive; /* number of exclusive ops queued */ + int n_exclusive; /* number of exclusive ops queued or in progress */ atomic_t n_reads; /* number of read ops in progress */ spinlock_t lock; /* state and operations lock */ -- cgit v1.2.3 From ef778e7ae67cd426c30cad43378b908f5eb0bad5 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 20 Dec 2012 21:52:36 +0000 Subject: FS-Cache: Provide proper invalidation Provide a proper invalidation method rather than relying on the netfs retiring the cookie it has and getting a new one. The problem with this is that isn't easy for the netfs to make sure that it has completed/cancelled all its outstanding storage and retrieval operations on the cookie it is retiring. Instead, have the cache provide an invalidation method that will cancel or wait for all currently outstanding operations before invalidating the cache, and will cause new operations to queue up behind that. Whilst invalidation is in progress, some requests will be rejected until the cache can stack a barrier on the operation queue to cause new operations to be deferred behind it. Signed-off-by: David Howells --- Documentation/filesystems/caching/backend-api.txt | 12 ++++ Documentation/filesystems/caching/netfs-api.txt | 46 ++++++++++++--- Documentation/filesystems/caching/object.txt | 23 +++++--- fs/fscache/cookie.c | 60 +++++++++++++++++++ fs/fscache/internal.h | 10 ++++ fs/fscache/object.c | 72 +++++++++++++++++++++++ fs/fscache/operation.c | 32 ++++++++++ fs/fscache/page.c | 51 ++++++++++++++++ fs/fscache/stats.c | 11 +++- include/linux/fscache-cache.h | 8 ++- include/linux/fscache.h | 38 ++++++++++++ 11 files changed, 345 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/Documentation/filesystems/caching/backend-api.txt b/Documentation/filesystems/caching/backend-api.txt index f4769b9399df..d78bab9622c6 100644 --- a/Documentation/filesystems/caching/backend-api.txt +++ b/Documentation/filesystems/caching/backend-api.txt @@ -308,6 +308,18 @@ performed on the denizens of the cache. These are held in a structure of type: obtained by calling object->cookie->def->get_aux()/get_attr(). + (*) Invalidate data object [mandatory]: + + int (*invalidate_object)(struct fscache_operation *op) + + This is called to invalidate a data object (as pointed to by op->object). + All the data stored for this object should be discarded and an + attr_changed operation should be performed. The caller will follow up + with an object update operation. + + fscache_op_complete() must be called on op before returning. + + (*) Discard object [mandatory]: void (*drop_object)(struct fscache_object *object) diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt index 7cc6bf2871eb..97e6c0ecc5ef 100644 --- a/Documentation/filesystems/caching/netfs-api.txt +++ b/Documentation/filesystems/caching/netfs-api.txt @@ -35,8 +35,9 @@ This document contains the following sections: (12) Index and data file update (13) Miscellaneous cookie operations (14) Cookie unregistration - (15) Index and data file invalidation - (16) FS-Cache specific page flags. + (15) Index invalidation + (16) Data file invalidation + (17) FS-Cache specific page flags. ============================= @@ -767,13 +768,42 @@ the cookies for "child" indices, objects and pages have been relinquished first. -================================ -INDEX AND DATA FILE INVALIDATION -================================ +================== +INDEX INVALIDATION +================== + +There is no direct way to invalidate an index subtree. To do this, the caller +should relinquish and retire the cookie they have, and then acquire a new one. + + +====================== +DATA FILE INVALIDATION +====================== + +Sometimes it will be necessary to invalidate an object that contains data. +Typically this will be necessary when the server tells the netfs of a foreign +change - at which point the netfs has to throw away all the state it had for an +inode and reload from the server. + +To indicate that a cache object should be invalidated, the following function +can be called: + + void fscache_invalidate(struct fscache_cookie *cookie); + +This can be called with spinlocks held as it defers the work to a thread pool. +All extant storage, retrieval and attribute change ops at this point are +cancelled and discarded. Some future operations will be rejected until the +cache has had a chance to insert a barrier in the operations queue. After +that, operations will be queued again behind the invalidation operation. + +The invalidation operation will perform an attribute change operation and an +auxiliary data update operation as it is very likely these will have changed. + +Using the following function, the netfs can wait for the invalidation operation +to have reached a point at which it can start submitting ordinary operations +once again: -There is no direct way to invalidate an index subtree or a data file. To do -this, the caller should relinquish and retire the cookie they have, and then -acquire a new one. + void fscache_wait_on_invalidate(struct fscache_cookie *cookie); =========================== diff --git a/Documentation/filesystems/caching/object.txt b/Documentation/filesystems/caching/object.txt index 58313348da87..100ff41127e4 100644 --- a/Documentation/filesystems/caching/object.txt +++ b/Documentation/filesystems/caching/object.txt @@ -216,7 +216,14 @@ servicing netfs requests: The normal running state. In this state, requests the netfs makes will be passed on to the cache. - (6) State FSCACHE_OBJECT_UPDATING. + (6) State FSCACHE_OBJECT_INVALIDATING. + + The object is undergoing invalidation. When the state comes here, it + discards all pending read, write and attribute change operations as it is + going to clear out the cache entirely and reinitialise it. It will then + continue to the FSCACHE_OBJECT_UPDATING state. + + (7) State FSCACHE_OBJECT_UPDATING. The state machine comes here to update the object in the cache from the netfs's records. This involves updating the auxiliary data that is used @@ -225,13 +232,13 @@ servicing netfs requests: And there are terminal states in which an object cleans itself up, deallocates memory and potentially deletes stuff from disk: - (7) State FSCACHE_OBJECT_LC_DYING. + (8) State FSCACHE_OBJECT_LC_DYING. The object comes here if it is dying because of a lookup or creation error. This would be due to a disk error or system error of some sort. Temporary data is cleaned up, and the parent is released. - (8) State FSCACHE_OBJECT_DYING. + (9) State FSCACHE_OBJECT_DYING. The object comes here if it is dying due to an error, because its parent cookie has been relinquished by the netfs or because the cache is being @@ -241,27 +248,27 @@ memory and potentially deletes stuff from disk: can destroy themselves. This object waits for all its children to go away before advancing to the next state. - (9) State FSCACHE_OBJECT_ABORT_INIT. +(10) State FSCACHE_OBJECT_ABORT_INIT. The object comes to this state if it was waiting on its parent in FSCACHE_OBJECT_INIT, but its parent died. The object will destroy itself so that the parent may proceed from the FSCACHE_OBJECT_DYING state. -(10) State FSCACHE_OBJECT_RELEASING. -(11) State FSCACHE_OBJECT_RECYCLING. +(11) State FSCACHE_OBJECT_RELEASING. +(12) State FSCACHE_OBJECT_RECYCLING. The object comes to one of these two states when dying once it is rid of all its children, if it is dying because the netfs relinquished its cookie. In the first state, the cached data is expected to persist, and in the second it will be deleted. -(12) State FSCACHE_OBJECT_WITHDRAWING. +(13) State FSCACHE_OBJECT_WITHDRAWING. The object transits to this state if the cache decides it wants to withdraw the object from service, perhaps to make space, but also due to error or just because the whole cache is being withdrawn. -(13) State FSCACHE_OBJECT_DEAD. +(14) State FSCACHE_OBJECT_DEAD. The object transits to this state when the in-memory object record is ready to be deleted. The object processor shouldn't ever see an object in diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 66be9eccede0..8dcb114758e3 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -369,6 +369,66 @@ cant_attach_object: return ret; } +/* + * Invalidate an object. Callable with spinlocks held. + */ +void __fscache_invalidate(struct fscache_cookie *cookie) +{ + struct fscache_object *object; + + _enter("{%s}", cookie->def->name); + + fscache_stat(&fscache_n_invalidates); + + /* Only permit invalidation of data files. Invalidating an index will + * require the caller to release all its attachments to the tree rooted + * there, and if it's doing that, it may as well just retire the + * cookie. + */ + ASSERTCMP(cookie->def->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE); + + /* We will be updating the cookie too. */ + BUG_ON(!cookie->def->get_aux); + + /* If there's an object, we tell the object state machine to handle the + * invalidation on our behalf, otherwise there's nothing to do. + */ + if (!hlist_empty(&cookie->backing_objects)) { + spin_lock(&cookie->lock); + + if (!hlist_empty(&cookie->backing_objects) && + !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING, + &cookie->flags)) { + object = hlist_entry(cookie->backing_objects.first, + struct fscache_object, + cookie_link); + if (object->state < FSCACHE_OBJECT_DYING) + fscache_raise_event( + object, FSCACHE_OBJECT_EV_INVALIDATE); + } + + spin_unlock(&cookie->lock); + } + + _leave(""); +} +EXPORT_SYMBOL(__fscache_invalidate); + +/* + * Wait for object invalidation to complete. + */ +void __fscache_wait_on_invalidate(struct fscache_cookie *cookie) +{ + _enter("%p", cookie); + + wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING, + fscache_wait_bit_interruptible, + TASK_UNINTERRUPTIBLE); + + _leave(""); +} +EXPORT_SYMBOL(__fscache_wait_on_invalidate); + /* * update the index entries backing a cookie */ diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index f6aad48d38a8..c81179303930 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -122,10 +122,16 @@ extern int fscache_submit_exclusive_op(struct fscache_object *, extern int fscache_submit_op(struct fscache_object *, struct fscache_operation *); extern int fscache_cancel_op(struct fscache_operation *); +extern void fscache_cancel_all_ops(struct fscache_object *); extern void fscache_abort_object(struct fscache_object *); extern void fscache_start_operations(struct fscache_object *); extern void fscache_operation_gc(struct work_struct *); +/* + * page.c + */ +extern void fscache_invalidate_writes(struct fscache_cookie *); + /* * proc.c */ @@ -205,6 +211,9 @@ extern atomic_t fscache_n_acquires_ok; extern atomic_t fscache_n_acquires_nobufs; extern atomic_t fscache_n_acquires_oom; +extern atomic_t fscache_n_invalidates; +extern atomic_t fscache_n_invalidates_run; + extern atomic_t fscache_n_updates; extern atomic_t fscache_n_updates_null; extern atomic_t fscache_n_updates_run; @@ -237,6 +246,7 @@ extern atomic_t fscache_n_cop_alloc_object; extern atomic_t fscache_n_cop_lookup_object; extern atomic_t fscache_n_cop_lookup_complete; extern atomic_t fscache_n_cop_grab_object; +extern atomic_t fscache_n_cop_invalidate_object; extern atomic_t fscache_n_cop_update_object; extern atomic_t fscache_n_cop_drop_object; extern atomic_t fscache_n_cop_put_object; diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 773bc798a416..80b549141ea6 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -14,6 +14,7 @@ #define FSCACHE_DEBUG_LEVEL COOKIE #include +#include #include "internal.h" const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = { @@ -22,6 +23,7 @@ const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = { [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", [FSCACHE_OBJECT_AVAILABLE] = "OBJECT_AVAILABLE", [FSCACHE_OBJECT_ACTIVE] = "OBJECT_ACTIVE", + [FSCACHE_OBJECT_INVALIDATING] = "OBJECT_INVALIDATING", [FSCACHE_OBJECT_UPDATING] = "OBJECT_UPDATING", [FSCACHE_OBJECT_DYING] = "OBJECT_DYING", [FSCACHE_OBJECT_LC_DYING] = "OBJECT_LC_DYING", @@ -39,6 +41,7 @@ const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = { [FSCACHE_OBJECT_CREATING] = "CRTN", [FSCACHE_OBJECT_AVAILABLE] = "AVBL", [FSCACHE_OBJECT_ACTIVE] = "ACTV", + [FSCACHE_OBJECT_INVALIDATING] = "INVL", [FSCACHE_OBJECT_UPDATING] = "UPDT", [FSCACHE_OBJECT_DYING] = "DYNG", [FSCACHE_OBJECT_LC_DYING] = "LCDY", @@ -54,6 +57,7 @@ static void fscache_put_object(struct fscache_object *); static void fscache_initialise_object(struct fscache_object *); static void fscache_lookup_object(struct fscache_object *); static void fscache_object_available(struct fscache_object *); +static void fscache_invalidate_object(struct fscache_object *); static void fscache_release_object(struct fscache_object *); static void fscache_withdraw_object(struct fscache_object *); static void fscache_enqueue_dependents(struct fscache_object *); @@ -78,6 +82,15 @@ static inline void fscache_done_parent_op(struct fscache_object *object) spin_unlock(&parent->lock); } +/* + * Notify netfs of invalidation completion. + */ +static inline void fscache_invalidation_complete(struct fscache_cookie *cookie) +{ + if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) + wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING); +} + /* * process events that have been sent to an object's state machine * - initiates parent lookup @@ -125,6 +138,16 @@ static void fscache_object_state_machine(struct fscache_object *object) case FSCACHE_OBJECT_ACTIVE: goto active_transit; + /* Invalidate an object on disk */ + case FSCACHE_OBJECT_INVALIDATING: + clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events); + fscache_stat(&fscache_n_invalidates_run); + fscache_stat(&fscache_n_cop_invalidate_object); + fscache_invalidate_object(object); + fscache_stat_d(&fscache_n_cop_invalidate_object); + fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE); + goto active_transit; + /* update the object metadata on disk */ case FSCACHE_OBJECT_UPDATING: clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); @@ -275,6 +298,9 @@ active_transit: case FSCACHE_OBJECT_EV_ERROR: new_state = FSCACHE_OBJECT_DYING; goto change_state; + case FSCACHE_OBJECT_EV_INVALIDATE: + new_state = FSCACHE_OBJECT_INVALIDATING; + goto change_state; case FSCACHE_OBJECT_EV_UPDATE: new_state = FSCACHE_OBJECT_UPDATING; goto change_state; @@ -679,6 +705,7 @@ static void fscache_withdraw_object(struct fscache_object *object) if (object->cookie == cookie) { hlist_del_init(&object->cookie_link); object->cookie = NULL; + fscache_invalidation_complete(cookie); detached = true; } spin_unlock(&cookie->lock); @@ -888,3 +915,48 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object, return result; } EXPORT_SYMBOL(fscache_check_aux); + +/* + * Asynchronously invalidate an object. + */ +static void fscache_invalidate_object(struct fscache_object *object) +{ + struct fscache_operation *op; + struct fscache_cookie *cookie = object->cookie; + + _enter("{OBJ%x}", object->debug_id); + + /* Reject any new read/write ops and abort any that are pending. */ + fscache_invalidate_writes(cookie); + clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); + fscache_cancel_all_ops(object); + + /* Now we have to wait for in-progress reads and writes */ + op = kzalloc(sizeof(*op), GFP_KERNEL); + if (!op) { + fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR); + _leave(" [ENOMEM]"); + return; + } + + fscache_operation_init(op, object->cache->ops->invalidate_object, NULL); + op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); + + spin_lock(&cookie->lock); + if (fscache_submit_exclusive_op(object, op) < 0) + BUG(); + spin_unlock(&cookie->lock); + fscache_put_operation(op); + + /* Once we've completed the invalidation, we know there will be no data + * stored in the cache and thus we can reinstate the data-check-skip + * optimisation. + */ + set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); + + /* We can allow read and write requests to come in once again. They'll + * queue up behind our exclusive invalidation operation. + */ + fscache_invalidation_complete(cookie); + _leave(""); +} diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 748f9553c2cb..c58dbe613266 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -323,6 +323,38 @@ int fscache_cancel_op(struct fscache_operation *op) return ret; } +/* + * Cancel all pending operations on an object + */ +void fscache_cancel_all_ops(struct fscache_object *object) +{ + struct fscache_operation *op; + + _enter("OBJ%x", object->debug_id); + + spin_lock(&object->lock); + + while (!list_empty(&object->pending_ops)) { + op = list_entry(object->pending_ops.next, + struct fscache_operation, pend_link); + fscache_stat(&fscache_n_op_cancelled); + list_del_init(&op->pend_link); + + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); + op->state = FSCACHE_OP_ST_CANCELLED; + + if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) + object->n_exclusive--; + if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) + wake_up_bit(&op->flags, FSCACHE_OP_WAITING); + fscache_put_operation(op); + cond_resched_lock(&object->lock); + } + + spin_unlock(&object->lock); + _leave(""); +} + /* * Record the completion of an in-progress operation. */ diff --git a/fs/fscache/page.c b/fs/fscache/page.c index b38b13d2a555..7bf9d2557052 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -361,6 +361,11 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, if (hlist_empty(&cookie->backing_objects)) goto nobufs; + if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { + _leave(" = -ENOBUFS [invalidating]"); + return -ENOBUFS; + } + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERTCMP(page, !=, NULL); @@ -483,6 +488,11 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, if (hlist_empty(&cookie->backing_objects)) goto nobufs; + if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { + _leave(" = -ENOBUFS [invalidating]"); + return -ENOBUFS; + } + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERTCMP(*nr_pages, >, 0); ASSERT(!list_empty(pages)); @@ -591,6 +601,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERTCMP(page, !=, NULL); + if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { + _leave(" = -ENOBUFS [invalidating]"); + return -ENOBUFS; + } + if (fscache_wait_for_deferred_lookup(cookie) < 0) return -ERESTARTSYS; @@ -730,6 +745,37 @@ superseded: _leave(""); } +/* + * Clear the pages pending writing for invalidation + */ +void fscache_invalidate_writes(struct fscache_cookie *cookie) +{ + struct page *page; + void *results[16]; + int n, i; + + _enter(""); + + while (spin_lock(&cookie->stores_lock), + n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, + ARRAY_SIZE(results), + FSCACHE_COOKIE_PENDING_TAG), + n > 0) { + for (i = n - 1; i >= 0; i--) { + page = results[i]; + radix_tree_delete(&cookie->stores, page->index); + } + + spin_unlock(&cookie->stores_lock); + + for (i = n - 1; i >= 0; i--) + page_cache_release(results[i]); + } + + spin_unlock(&cookie->stores_lock); + _leave(""); +} + /* * request a page be stored in the cache * - returns: @@ -776,6 +822,11 @@ int __fscache_write_page(struct fscache_cookie *cookie, fscache_stat(&fscache_n_stores); + if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { + _leave(" = -ENOBUFS [invalidating]"); + return -ENOBUFS; + } + op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY); if (!op) goto nomem; diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 4765190d537f..51cdaee14109 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -80,6 +80,9 @@ atomic_t fscache_n_acquires_ok; atomic_t fscache_n_acquires_nobufs; atomic_t fscache_n_acquires_oom; +atomic_t fscache_n_invalidates; +atomic_t fscache_n_invalidates_run; + atomic_t fscache_n_updates; atomic_t fscache_n_updates_null; atomic_t fscache_n_updates_run; @@ -112,6 +115,7 @@ atomic_t fscache_n_cop_alloc_object; atomic_t fscache_n_cop_lookup_object; atomic_t fscache_n_cop_lookup_complete; atomic_t fscache_n_cop_grab_object; +atomic_t fscache_n_cop_invalidate_object; atomic_t fscache_n_cop_update_object; atomic_t fscache_n_cop_drop_object; atomic_t fscache_n_cop_put_object; @@ -168,6 +172,10 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_object_created), atomic_read(&fscache_n_object_lookups_timed_out)); + seq_printf(m, "Invals : n=%u run=%u\n", + atomic_read(&fscache_n_invalidates), + atomic_read(&fscache_n_invalidates_run)); + seq_printf(m, "Updates: n=%u nul=%u run=%u\n", atomic_read(&fscache_n_updates), atomic_read(&fscache_n_updates_null), @@ -246,7 +254,8 @@ static int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_cop_lookup_object), atomic_read(&fscache_n_cop_lookup_complete), atomic_read(&fscache_n_cop_grab_object)); - seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n", + seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n", + atomic_read(&fscache_n_cop_invalidate_object), atomic_read(&fscache_n_cop_update_object), atomic_read(&fscache_n_cop_drop_object), atomic_read(&fscache_n_cop_put_object), diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index f5facd1d333f..1e454ad7a832 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -254,6 +254,9 @@ struct fscache_cache_ops { /* store the updated auxiliary data on an object */ void (*update_object)(struct fscache_object *object); + /* Invalidate an object */ + void (*invalidate_object)(struct fscache_operation *op); + /* discard the resources pinned by an object and effect retirement if * necessary */ void (*drop_object)(struct fscache_object *object); @@ -329,6 +332,7 @@ struct fscache_cookie { #define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ #define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ #define FSCACHE_COOKIE_WAITING_ON_READS 6 /* T if cookie is waiting on reads */ +#define FSCACHE_COOKIE_INVALIDATING 7 /* T if cookie is being invalidated */ }; extern struct fscache_cookie fscache_fsdef_index; @@ -345,6 +349,7 @@ struct fscache_object { /* active states */ FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ FSCACHE_OBJECT_ACTIVE, /* object is usable */ + FSCACHE_OBJECT_INVALIDATING, /* object is invalidating */ FSCACHE_OBJECT_UPDATING, /* object is updating */ /* terminal states */ @@ -378,7 +383,8 @@ struct fscache_object { #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ -#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/ +#define FSCACHE_OBJECT_EV_INVALIDATE 7 /* T if cache requested object invalidation */ +#define FSCACHE_OBJECT_EVENTS_MASK 0xff /* mask of all events*/ unsigned long flags; #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h index f4b6353543bf..7a086235da4b 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -185,6 +185,8 @@ extern struct fscache_cookie *__fscache_acquire_cookie( extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); extern void __fscache_update_cookie(struct fscache_cookie *); extern int __fscache_attr_changed(struct fscache_cookie *); +extern void __fscache_invalidate(struct fscache_cookie *); +extern void __fscache_wait_on_invalidate(struct fscache_cookie *); extern int __fscache_read_or_alloc_page(struct fscache_cookie *, struct page *, fscache_rw_complete_t, @@ -389,6 +391,42 @@ int fscache_attr_changed(struct fscache_cookie *cookie) return -ENOBUFS; } +/** + * fscache_invalidate - Notify cache that an object needs invalidation + * @cookie: The cookie representing the cache object + * + * Notify the cache that an object is needs to be invalidated and that it + * should abort any retrievals or stores it is doing on the cache. The object + * is then marked non-caching until such time as the invalidation is complete. + * + * This can be called with spinlocks held. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_invalidate(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + __fscache_invalidate(cookie); +} + +/** + * fscache_wait_on_invalidate - Wait for invalidation to complete + * @cookie: The cookie representing the cache object + * + * Wait for the invalidation of an object to complete. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_wait_on_invalidate(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + __fscache_wait_on_invalidate(cookie); +} + /** * fscache_reserve_space - Reserve data space for a cached object * @cookie: The cookie representing the cache object -- cgit v1.2.3 From a02de9608595c8ef649ef03ae735b0b45e3d4396 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 20 Dec 2012 21:52:36 +0000 Subject: VFS: Make more complete truncate operation available to CacheFiles Make a more complete truncate operation available to CacheFiles (including security checks and suchlike) so that it can use this to clear invalidated cache files. Signed-off-by: David Howells Acked-by: Al Viro --- fs/open.c | 50 +++++++++++++++++++++++++++----------------------- include/linux/fs.h | 1 + 2 files changed, 28 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/fs/open.c b/fs/open.c index 182d8667b7bd..c819bbdab47f 100644 --- a/fs/open.c +++ b/fs/open.c @@ -61,33 +61,22 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, return ret; } -static long do_sys_truncate(const char __user *pathname, loff_t length) +long vfs_truncate(struct path *path, loff_t length) { - struct path path; struct inode *inode; - int error; - - error = -EINVAL; - if (length < 0) /* sorry, but loff_t says... */ - goto out; + long error; - error = user_path(pathname, &path); - if (error) - goto out; - inode = path.dentry->d_inode; + inode = path->dentry->d_inode; /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ - error = -EISDIR; if (S_ISDIR(inode->i_mode)) - goto dput_and_out; - - error = -EINVAL; + return -EISDIR; if (!S_ISREG(inode->i_mode)) - goto dput_and_out; + return -EINVAL; - error = mnt_want_write(path.mnt); + error = mnt_want_write(path->mnt); if (error) - goto dput_and_out; + goto out; error = inode_permission(inode, MAY_WRITE); if (error) @@ -111,19 +100,34 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) error = locks_verify_truncate(inode, NULL, length); if (!error) - error = security_path_truncate(&path); + error = security_path_truncate(path); if (!error) - error = do_truncate(path.dentry, length, 0, NULL); + error = do_truncate(path->dentry, length, 0, NULL); put_write_and_out: put_write_access(inode); mnt_drop_write_and_out: - mnt_drop_write(path.mnt); -dput_and_out: - path_put(&path); + mnt_drop_write(path->mnt); out: return error; } +EXPORT_SYMBOL_GPL(vfs_truncate); + +static long do_sys_truncate(const char __user *pathname, loff_t length) +{ + struct path path; + int error; + + if (length < 0) /* sorry, but loff_t says... */ + return -EINVAL; + + error = user_path(pathname, &path); + if (!error) { + error = vfs_truncate(&path, length); + path_put(&path); + } + return error; +} SYSCALL_DEFINE2(truncate, const char __user *, path, long, length) { diff --git a/include/linux/fs.h b/include/linux/fs.h index a823d4be38e7..017a15b707e2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1999,6 +1999,7 @@ struct filename { bool separate; /* should "name" be freed? */ }; +extern long vfs_truncate(struct path *, loff_t); extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); extern int do_fallocate(struct file *file, int mode, loff_t offset, -- cgit v1.2.3 From 36a02de5d7981435931d4608ee3e510b752e072b Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 5 Dec 2012 13:34:46 +0000 Subject: FS-Cache: Convert the object event ID #defines into an enum Convert the fscache_object event IDs from #defines into an enum. Also add an extra label to the enum to carry the event count and redefine the event mask in terms of that. Signed-off-by: David Howells --- include/linux/fscache-cache.h | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 1e454ad7a832..73e68c8d5df4 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -337,6 +337,23 @@ struct fscache_cookie { extern struct fscache_cookie fscache_fsdef_index; +/* + * Event list for fscache_object::{event_mask,events} + */ +enum { + FSCACHE_OBJECT_EV_REQUEUE, /* T if object should be requeued */ + FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */ + FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */ + FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */ + FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */ + FSCACHE_OBJECT_EV_RELEASE, /* T if netfs requested object release */ + FSCACHE_OBJECT_EV_RETIRE, /* T if netfs requested object retirement */ + FSCACHE_OBJECT_EV_WITHDRAW, /* T if cache requested object withdrawal */ + NR_FSCACHE_OBJECT_EVENTS +}; + +#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1) + /* * on-disk cache file or index handle */ @@ -376,15 +393,6 @@ struct fscache_object { unsigned long event_mask; /* events this object is interested in */ unsigned long events; /* events to be processed by this object * (order is important - using fls) */ -#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */ -#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */ -#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */ -#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */ -#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ -#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ -#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ -#define FSCACHE_OBJECT_EV_INVALIDATE 7 /* T if cache requested object invalidation */ -#define FSCACHE_OBJECT_EVENTS_MASK 0xff /* mask of all events*/ unsigned long flags; #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ -- cgit v1.2.3 From 1f372dff1da37e2b36ae9085368fa46896398598 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 13 Dec 2012 20:03:13 +0000 Subject: FS-Cache: Mark cancellation of in-progress operation Mark as cancelled an operation that is in progress rather than pending at the time it is cancelled, and call fscache_complete_op() to cancel an operation so that blocked ops can be started. Signed-off-by: David Howells --- fs/cachefiles/interface.c | 2 +- fs/fscache/operation.c | 7 ++++--- fs/fscache/page.c | 10 +++++----- include/linux/fscache-cache.h | 4 ++-- 4 files changed, 12 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 7a9d574b961c..746ce532e130 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -484,7 +484,7 @@ static void cachefiles_invalidate_object(struct fscache_operation *op) } } - fscache_op_complete(op); + fscache_op_complete(op, true); _leave(""); } diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index 9e6b7d232bb1..36c59604130d 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -363,9 +363,9 @@ void fscache_cancel_all_ops(struct fscache_object *object) } /* - * Record the completion of an in-progress operation. + * Record the completion or cancellation of an in-progress operation. */ -void fscache_op_complete(struct fscache_operation *op) +void fscache_op_complete(struct fscache_operation *op, bool cancelled) { struct fscache_object *object = op->object; @@ -380,7 +380,8 @@ void fscache_op_complete(struct fscache_operation *op) spin_lock(&object->lock); - op->state = FSCACHE_OP_ST_COMPLETE; + op->state = cancelled ? + FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE; if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) object->n_exclusive--; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index ef0218f5080d..8a92b9fabe83 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -171,7 +171,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op) fscache_abort_object(object); } - fscache_op_complete(op); + fscache_op_complete(op, true); _leave(""); } @@ -704,7 +704,7 @@ static void fscache_write_op(struct fscache_operation *_op) * exists, so we should just cancel this write operation. */ spin_unlock(&object->lock); - op->op.state = FSCACHE_OP_ST_CANCELLED; + fscache_op_complete(&op->op, false); _leave(" [inactive]"); return; } @@ -717,7 +717,7 @@ static void fscache_write_op(struct fscache_operation *_op) * cancel this write operation. */ spin_unlock(&object->lock); - op->op.state = FSCACHE_OP_ST_CANCELLED; + fscache_op_complete(&op->op, false); _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}", _op->flags, _op->state, object->state, object->flags); return; @@ -755,7 +755,7 @@ static void fscache_write_op(struct fscache_operation *_op) fscache_end_page_write(object, page); if (ret < 0) { fscache_abort_object(object); - fscache_op_complete(&op->op); + fscache_op_complete(&op->op, true); } else { fscache_enqueue_operation(&op->op); } @@ -770,7 +770,7 @@ superseded: spin_unlock(&cookie->stores_lock); clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); spin_unlock(&object->lock); - fscache_op_complete(&op->op); + fscache_op_complete(&op->op, true); _leave(""); } diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 73e68c8d5df4..5dfa0aa216b6 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -116,7 +116,7 @@ extern atomic_t fscache_op_debug_id; extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); -extern void fscache_op_complete(struct fscache_operation *); +extern void fscache_op_complete(struct fscache_operation *, bool); extern void fscache_put_operation(struct fscache_operation *); /** @@ -196,7 +196,7 @@ static inline void fscache_retrieval_complete(struct fscache_retrieval *op, { op->n_pages -= n_pages; if (op->n_pages <= 0) - fscache_op_complete(&op->op); + fscache_op_complete(&op->op, true); } /** -- cgit v1.2.3 From d30357f2f0ec0bfb67fd39f8f76d22d02d78631e Mon Sep 17 00:00:00 2001 From: Marco Stornelli Date: Sat, 15 Dec 2012 11:59:20 +0100 Subject: vfs: drop vmtruncate Removed vmtruncate Signed-off-by: Marco Stornelli Signed-off-by: Al Viro --- fs/libfs.c | 2 -- include/linux/fs.h | 1 - 2 files changed, 3 deletions(-) (limited to 'include/linux') diff --git a/fs/libfs.c b/fs/libfs.c index 35fc6e74cd88..916da8c4158b 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -369,8 +369,6 @@ int simple_setattr(struct dentry *dentry, struct iattr *iattr) struct inode *inode = dentry->d_inode; int error; - WARN_ON_ONCE(inode->i_op->truncate); - error = inode_change_ok(inode, iattr); if (error) return error; diff --git a/include/linux/fs.h b/include/linux/fs.h index a823d4be38e7..a0c5ba57ffc5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1565,7 +1565,6 @@ struct inode_operations { int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *); - void (*truncate) (struct inode *); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); -- cgit v1.2.3 From 7898575fc81bd707ce0844cb06874d48e39bbe09 Mon Sep 17 00:00:00 2001 From: Marco Stornelli Date: Sat, 15 Dec 2012 12:00:02 +0100 Subject: mm: drop vmtruncate Removed vmtruncate Signed-off-by: Marco Stornelli Signed-off-by: Al Viro --- include/linux/mm.h | 1 - mm/truncate.c | 23 ----------------------- 2 files changed, 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 7f4f906190bd..63204078f72b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1007,7 +1007,6 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); -extern int vmtruncate(struct inode *inode, loff_t offset); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); int truncate_inode_page(struct address_space *mapping, struct page *page); int generic_error_remove_page(struct address_space *mapping, struct page *page); diff --git a/mm/truncate.c b/mm/truncate.c index d51ce92d6e83..c75b736e54b7 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -576,29 +576,6 @@ void truncate_setsize(struct inode *inode, loff_t newsize) } EXPORT_SYMBOL(truncate_setsize); -/** - * vmtruncate - unmap mappings "freed" by truncate() syscall - * @inode: inode of the file used - * @newsize: file offset to start truncating - * - * This function is deprecated and truncate_setsize or truncate_pagecache - * should be used instead, together with filesystem specific block truncation. - */ -int vmtruncate(struct inode *inode, loff_t newsize) -{ - int error; - - error = inode_newsize_ok(inode, newsize); - if (error) - return error; - - truncate_setsize(inode, newsize); - if (inode->i_op->truncate) - inode->i_op->truncate(inode); - return 0; -} -EXPORT_SYMBOL(vmtruncate); - /** * truncate_pagecache_range - unmap and remove pagecache that is hole-punched * @inode: inode -- cgit v1.2.3 From 471667391a92bf7bf2cd4ff31a3ad88e5dec934b Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Thu, 13 Dec 2012 12:22:39 +0100 Subject: vfs: Remove useless function prototypes Commit 8e22cc88d68ca1a46d7d582938f979eb640ed30f removes the (un)lock_super function definitions but forgets to remove their prototypes. Signed-off-by: Alessio Igor Bogani Signed-off-by: Al Viro --- include/linux/fs.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index a0c5ba57ffc5..05cd238ad941 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1445,10 +1445,6 @@ static inline void sb_start_intwrite(struct super_block *sb) extern bool inode_owner_or_capable(const struct inode *inode); -/* not quite ready to be deprecated, but... */ -extern void lock_super(struct super_block *); -extern void unlock_super(struct super_block *); - /* * VFS helper functions.. */ -- cgit v1.2.3 From b9d6ba94b875192ef5e2dab92d72beea33b83c3d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Dec 2012 14:59:40 -0500 Subject: vfs: add a retry_estale helper function to handle retries on ESTALE This function is expected to be called from path-based syscalls to help them decide whether to try the lookup and call again in the event that they got an -ESTALE return back on an earier try. Currently, we only retry the call once on an ESTALE error, but in the event that we decide that that's not enough in the future, we should be able to change the logic in this helper without too much effort. Signed-off-by: Jeff Layton Signed-off-by: Al Viro --- include/linux/namei.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/namei.h b/include/linux/namei.h index 4bf19d8174ed..66542b644804 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -98,4 +98,20 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) ((char *) name)[min(len, maxlen)] = '\0'; } +/** + * retry_estale - determine whether the caller should retry an operation + * @error: the error that would currently be returned + * @flags: flags being used for next lookup attempt + * + * Check to see if the error code was -ESTALE, and then determine whether + * to retry the call based on whether "flags" already has LOOKUP_REVAL set. + * + * Returns true if the caller should try the operation again. + */ +static inline bool +retry_estale(const long error, const unsigned int flags) +{ + return error == -ESTALE && !(flags & LOOKUP_REVAL); +} + #endif /* _LINUX_NAMEI_H */ -- cgit v1.2.3 From 1ac12b4b6d707937f9de6d09622823b2fd0c93ef Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Dec 2012 12:10:06 -0500 Subject: vfs: turn is_dir argument to kern_path_create into a lookup_flags arg Where we can pass in LOOKUP_DIRECTORY or LOOKUP_REVAL. Any other flags passed in here are currently ignored. Signed-off-by: Jeff Layton Signed-off-by: Al Viro --- arch/powerpc/platforms/cell/spufs/syscalls.c | 2 +- drivers/base/devtmpfs.c | 2 +- fs/namei.c | 21 ++++++++++++++++----- include/linux/namei.h | 4 ++-- 4 files changed, 20 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 5b7d8ffbf890..baee994fe810 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -66,7 +66,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, struct dentry *dentry; int ret; - dentry = user_path_create(AT_FDCWD, pathname, &path, 1); + dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 147d1a4dd269..17cf7cad601e 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -148,7 +148,7 @@ static int dev_mkdir(const char *name, umode_t mode) struct path path; int err; - dentry = kern_path_create(AT_FDCWD, name, &path, 1); + dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); if (IS_ERR(dentry)) return PTR_ERR(dentry); diff --git a/fs/namei.c b/fs/namei.c index 25a41e02984b..8f8e41f6eb52 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3030,12 +3030,22 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, return file; } -struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir) +struct dentry *kern_path_create(int dfd, const char *pathname, + struct path *path, unsigned int lookup_flags) { struct dentry *dentry = ERR_PTR(-EEXIST); struct nameidata nd; int err2; - int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); + int error; + bool is_dir = (lookup_flags & LOOKUP_DIRECTORY); + + /* + * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any + * other flags passed in are ignored! + */ + lookup_flags &= LOOKUP_REVAL; + + error = do_path_lookup(dfd, pathname, LOOKUP_PARENT|lookup_flags, &nd); if (error) return ERR_PTR(error); @@ -3099,13 +3109,14 @@ void done_path_create(struct path *path, struct dentry *dentry) } EXPORT_SYMBOL(done_path_create); -struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) +struct dentry *user_path_create(int dfd, const char __user *pathname, + struct path *path, unsigned int lookup_flags) { struct filename *tmp = getname(pathname); struct dentry *res; if (IS_ERR(tmp)) return ERR_CAST(tmp); - res = kern_path_create(dfd, tmp->name, path, is_dir); + res = kern_path_create(dfd, tmp->name, path, lookup_flags); putname(tmp); return res; } @@ -3228,7 +3239,7 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) struct path path; int error; - dentry = user_path_create(dfd, pathname, &path, 1); + dentry = user_path_create(dfd, pathname, &path, LOOKUP_DIRECTORY); if (IS_ERR(dentry)) return PTR_ERR(dentry); diff --git a/include/linux/namei.h b/include/linux/namei.h index 66542b644804..e998c030061d 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -65,8 +65,8 @@ extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, extern int kern_path(const char *, unsigned, struct path *); -extern struct dentry *kern_path_create(int, const char *, struct path *, int); -extern struct dentry *user_path_create(int, const char __user *, struct path *, int); +extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int); +extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int); extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, -- cgit v1.2.3 From b66c5984017533316fd1951770302649baf1aa33 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 20 Dec 2012 15:05:16 -0800 Subject: exec: do not leave bprm->interp on stack If a series of scripts are executed, each triggering module loading via unprintable bytes in the script header, kernel stack contents can leak into the command line. Normally execution of binfmt_script and binfmt_misc happens recursively. However, when modules are enabled, and unprintable bytes exist in the bprm->buf, execution will restart after attempting to load matching binfmt modules. Unfortunately, the logic in binfmt_script and binfmt_misc does not expect to get restarted. They leave bprm->interp pointing to their local stack. This means on restart bprm->interp is left pointing into unused stack memory which can then be copied into the userspace argv areas. After additional study, it seems that both recursion and restart remains the desirable way to handle exec with scripts, misc, and modules. As such, we need to protect the changes to interp. This changes the logic to require allocation for any changes to the bprm->interp. To avoid adding a new kmalloc to every exec, the default value is left as-is. Only when passing through binfmt_script or binfmt_misc does an allocation take place. For a proof of concept, see DoTest.sh from: http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/ Signed-off-by: Kees Cook Cc: halfdog Cc: P J P Cc: Alexander Viro Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_misc.c | 5 ++++- fs/binfmt_script.c | 4 +++- fs/exec.c | 15 +++++++++++++++ include/linux/binfmts.h | 1 + 4 files changed, 23 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 9be335fb8a7c..0c8869fdd14e 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -172,7 +172,10 @@ static int load_misc_binary(struct linux_binprm *bprm) goto _error; bprm->argc ++; - bprm->interp = iname; /* for binfmt_script */ + /* Update interp in case binfmt_script needs it. */ + retval = bprm_change_interp(iname, bprm); + if (retval < 0) + goto _error; interp_file = open_exec (iname); retval = PTR_ERR (interp_file); diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index 1610a91637e5..5027a3e14922 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c @@ -80,7 +80,9 @@ static int load_script(struct linux_binprm *bprm) retval = copy_strings_kernel(1, &i_name, bprm); if (retval) return retval; bprm->argc++; - bprm->interp = interp; + retval = bprm_change_interp(interp, bprm); + if (retval < 0) + return retval; /* * OK, now restart the process with the interpreter's dentry. diff --git a/fs/exec.c b/fs/exec.c index d8e1191cb112..237d5342786c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1175,9 +1175,24 @@ void free_bprm(struct linux_binprm *bprm) mutex_unlock(¤t->signal->cred_guard_mutex); abort_creds(bprm->cred); } + /* If a binfmt changed the interp, free it. */ + if (bprm->interp != bprm->filename) + kfree(bprm->interp); kfree(bprm); } +int bprm_change_interp(char *interp, struct linux_binprm *bprm) +{ + /* If a binfmt changed the interp, free it first. */ + if (bprm->interp != bprm->filename) + kfree(bprm->interp); + bprm->interp = kstrdup(interp, GFP_KERNEL); + if (!bprm->interp) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL(bprm_change_interp); + /* * install the new credentials for this executable */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index a4c2b565c835..bdf3965f0a29 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -112,6 +112,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, unsigned long stack_top, int executable_stack); extern int bprm_mm_init(struct linux_binprm *bprm); +extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); extern int copy_strings_kernel(int argc, const char *const *argv, struct linux_binprm *bprm); extern int prepare_bprm_creds(struct linux_binprm *bprm); -- cgit v1.2.3 From c4e18497d8fd92eef2c6e7eadcc1a107ccd115ea Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 20 Dec 2012 15:05:42 -0800 Subject: linux/kernel.h: fix DIV_ROUND_CLOSEST with unsigned divisors Commit 263a523d18bc ("linux/kernel.h: Fix warning seen with W=1 due to change in DIV_ROUND_CLOSEST") fixes a warning seen with W=1 due to change in DIV_ROUND_CLOSEST. Unfortunately, the C compiler converts divide operations with unsigned divisors to unsigned, even if the dividend is signed and negative (for example, -10 / 5U = 858993457). The C standard says "If one operand has unsigned int type, the other operand is converted to unsigned int", so the compiler is not to blame. As a result, DIV_ROUND_CLOSEST(0, 2U) and similar operations now return bad values, since the automatic conversion of expressions such as "0 - 2U/2" to unsigned was not taken into account. Fix by checking for the divisor variable type when deciding which operation to perform. This fixes DIV_ROUND_CLOSEST(0, 2U), but still returns bad values for negative dividends divided by unsigned divisors. Mark the latter case as unsupported. One observed effect of this problem is that the s2c_hwmon driver reports a value of 4198403 instead of 0 if the ADC reads 0. Other impact is unpredictable. Problem is seen if the divisor is an unsigned variable or constant and the dividend is less than (divisor/2). Signed-off-by: Guenter Roeck Reported-by: Juergen Beisert Tested-by: Juergen Beisert Cc: Jean Delvare Cc: [3.7.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d140e8fb075f..c566927efcbd 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -77,13 +77,15 @@ /* * Divide positive or negative dividend by positive divisor and round - * to closest integer. Result is undefined for negative divisors. + * to closest integer. Result is undefined for negative divisors and + * for negative dividends if the divisor variable type is unsigned. */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ - (((typeof(x))-1) > 0 || (__x) > 0) ? \ + (((typeof(x))-1) > 0 || \ + ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ } \ -- cgit v1.2.3 From d54eaa5a0fde0a202e4e91f200f818edcef15bee Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Fri, 21 Dec 2012 20:23:36 +0000 Subject: dm: prepare to support WRITE SAME Allow targets to opt in to WRITE SAME support by setting 'num_write_same_requests' in the dm_target structure. A dm device will only advertise WRITE SAME support if all its targets and all its underlying devices support it. Signed-off-by: Mike Snitzer Signed-off-by: Alasdair G Kergon --- drivers/md/dm-table.c | 30 +++++++++++++++++++++++++++++- include/linux/device-mapper.h | 5 +++++ 2 files changed, 34 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index fa2955790031..6be58b696377 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1414,6 +1414,33 @@ static bool dm_table_all_devices_attribute(struct dm_table *t, return 1; } +static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && !q->limits.max_write_same_sectors; +} + +static bool dm_table_supports_write_same(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i = 0; + + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (!ti->num_write_same_requests) + return false; + + if (!ti->type->iterate_devices || + !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) + return false; + } + + return true; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1445,7 +1472,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); - q->limits.max_write_same_sectors = 0; + if (!dm_table_supports_write_same(t)) + q->limits.max_write_same_sectors = 0; dm_table_set_integrity(t); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 38d27a10aa5d..d1f6cd8486f2 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -205,6 +205,11 @@ struct dm_target { */ unsigned num_discard_requests; + /* + * The number of WRITE SAME requests that will be submitted to the target. + */ + unsigned num_write_same_requests; + /* target specific data */ void *private; -- cgit v1.2.3 From c0820cf5ad09522bdd9ff68e84841a09c9f339d8 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 21 Dec 2012 20:23:38 +0000 Subject: dm: introduce per_bio_data Introduce a field per_bio_data_size in struct dm_target. Targets can set this field in the constructor. If a target sets this field to a non-zero value, "per_bio_data_size" bytes of auxiliary data are allocated for each bio submitted to the target. These data can be used for any purpose by the target and help us improve performance by removing some per-target mempools. Per-bio data is accessed with dm_per_bio_data. The argument data_size must be the same as the value per_bio_data_size in dm_target. If the target has a pointer to per_bio_data, it can get a pointer to the bio with dm_bio_from_per_bio_data() function (data_size must be the same as the value passed to dm_per_bio_data). Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-table.c | 11 ++++++++++- drivers/md/dm.c | 33 +++++++++++++++------------------ drivers/md/dm.h | 2 +- include/linux/device-mapper.h | 30 ++++++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 6be58b696377..daf25d0890b3 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -967,13 +967,22 @@ bool dm_table_request_based(struct dm_table *t) int dm_table_alloc_md_mempools(struct dm_table *t) { unsigned type = dm_table_get_type(t); + unsigned per_bio_data_size = 0; + struct dm_target *tgt; + unsigned i; if (unlikely(type == DM_TYPE_NONE)) { DMWARN("no table type is set, can't allocate mempools"); return -EINVAL; } - t->mempools = dm_alloc_md_mempools(type, t->integrity_supported); + if (type == DM_TYPE_BIO_BASED) + for (i = 0; i < t->num_targets; i++) { + tgt = t->targets + i; + per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size); + } + + t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size); if (!t->mempools) return -ENOMEM; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5401cdce0fc5..2765cf2ba0ff 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -62,18 +62,6 @@ struct dm_io { spinlock_t endio_lock; }; -/* - * For bio-based dm. - * One of these is allocated per target within a bio. Hopefully - * this will be simplified out one day. - */ -struct dm_target_io { - struct dm_io *io; - struct dm_target *ti; - union map_info info; - struct bio clone; -}; - /* * For request-based dm. * One of these is allocated per request. @@ -1980,13 +1968,20 @@ static void free_dev(struct mapped_device *md) static void __bind_mempools(struct mapped_device *md, struct dm_table *t) { - struct dm_md_mempools *p; + struct dm_md_mempools *p = dm_table_get_md_mempools(t); - if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) - /* the md already has necessary mempools */ + if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) { + /* + * The md already has necessary mempools. Reload just the + * bioset because front_pad may have changed because + * a different table was loaded. + */ + bioset_free(md->bs); + md->bs = p->bs; + p->bs = NULL; goto out; + } - p = dm_table_get_md_mempools(t); BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); md->io_pool = p->io_pool; @@ -2745,7 +2740,7 @@ int dm_noflush_suspending(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_noflush_suspending); -struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) +struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) { struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; @@ -2753,6 +2748,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) if (!pools) return NULL; + per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io)); + pools->io_pool = (type == DM_TYPE_BIO_BASED) ? mempool_create_slab_pool(MIN_IOS, _io_cache) : mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache); @@ -2768,7 +2765,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) pools->bs = (type == DM_TYPE_BIO_BASED) ? bioset_create(pool_size, - offsetof(struct dm_target_io, clone)) : + per_bio_data_size + offsetof(struct dm_target_io, clone)) : bioset_create(pool_size, offsetof(struct dm_rq_clone_bio_info, clone)); if (!pools->bs) diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 6a99fefaa743..45b97da1bd06 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -159,7 +159,7 @@ void dm_kcopyd_exit(void); /* * Mempool operations */ -struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity); +struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size); void dm_free_md_mempools(struct dm_md_mempools *pools); #endif diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index d1f6cd8486f2..6f0e73b4a80d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -210,6 +210,12 @@ struct dm_target { */ unsigned num_write_same_requests; + /* + * The minimum number of extra bytes allocated in each bio for the + * target to use. dm_per_bio_data returns the data location. + */ + unsigned per_bio_data_size; + /* target specific data */ void *private; @@ -246,6 +252,30 @@ struct dm_target_callbacks { int (*congested_fn) (struct dm_target_callbacks *, int); }; +/* + * For bio-based dm. + * One of these is allocated for each bio. + * This structure shouldn't be touched directly by target drivers. + * It is here so that we can inline dm_per_bio_data and + * dm_bio_from_per_bio_data + */ +struct dm_target_io { + struct dm_io *io; + struct dm_target *ti; + union map_info info; + struct bio clone; +}; + +static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) +{ + return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; +} + +static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) +{ + return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); +} + int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); -- cgit v1.2.3 From ddbd658f6446a35e4d6ba84812fd71023320cae2 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 21 Dec 2012 20:23:39 +0000 Subject: dm: move target request nr to dm_target_io This patch moves target_request_nr from map_info to dm_target_io and makes it accessible with dm_bio_get_target_request_nr. This patch is a preparation for the next patch that removes map_info. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-snap.c | 2 +- drivers/md/dm-stripe.c | 4 ++-- drivers/md/dm.c | 3 ++- include/linux/device-mapper.h | 14 ++++++++++---- 4 files changed, 15 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5e88bc437be0..b7e179cdc5af 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1682,7 +1682,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, chunk_t chunk; if (bio->bi_rw & REQ_FLUSH) { - if (!map_context->target_request_nr) + if (!dm_bio_get_target_request_nr(bio)) bio->bi_bdev = s->origin->bdev; else bio->bi_bdev = s->cow->bdev; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index e2f876539743..4e7ba82146c0 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -279,13 +279,13 @@ static int stripe_map(struct dm_target *ti, struct bio *bio, unsigned target_request_nr; if (bio->bi_rw & REQ_FLUSH) { - target_request_nr = map_context->target_request_nr; + target_request_nr = dm_bio_get_target_request_nr(bio); BUG_ON(target_request_nr >= sc->stripes); bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; return DM_MAPIO_REMAPPED; } if (unlikely(bio->bi_rw & REQ_DISCARD)) { - target_request_nr = map_context->target_request_nr; + target_request_nr = dm_bio_get_target_request_nr(bio); BUG_ON(target_request_nr >= sc->stripes); return stripe_map_discard(sc, bio, target_request_nr); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2765cf2ba0ff..5ee580b4f330 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1099,6 +1099,7 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, tio->io = ci->io; tio->ti = ti; memset(&tio->info, 0, sizeof(tio->info)); + tio->target_request_nr = 0; return tio; } @@ -1109,7 +1110,7 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); struct bio *clone = &tio->clone; - tio->info.target_request_nr = request_nr; + tio->target_request_nr = request_nr; /* * Discard requests require the bio's inline iovecs be initialized. diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 6f0e73b4a80d..eb96ef6fd8b7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -23,7 +23,6 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { void *ptr; unsigned long long ll; - unsigned target_request_nr; }; /* @@ -193,20 +192,21 @@ struct dm_target { * A number of zero-length barrier requests that will be submitted * to the target for the purpose of flushing cache. * - * The request number will be placed in union map_info->target_request_nr. + * The request number can be accessed with dm_bio_get_target_request_nr. * It is a responsibility of the target driver to remap these requests * to the real underlying devices. */ unsigned num_flush_requests; /* - * The number of discard requests that will be submitted to the - * target. map_info->request_nr is used just like num_flush_requests. + * The number of discard requests that will be submitted to the target. + * The request number can be accessed with dm_bio_get_target_request_nr. */ unsigned num_discard_requests; /* * The number of WRITE SAME requests that will be submitted to the target. + * The request number can be accessed with dm_bio_get_target_request_nr. */ unsigned num_write_same_requests; @@ -263,6 +263,7 @@ struct dm_target_io { struct dm_io *io; struct dm_target *ti; union map_info info; + unsigned target_request_nr; struct bio clone; }; @@ -276,6 +277,11 @@ static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); } +static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio) +{ + return container_of(bio, struct dm_target_io, clone)->target_request_nr; +} + int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); -- cgit v1.2.3 From 7de3ee57da4b717050e79c9313a9bf66ccc72519 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 21 Dec 2012 20:23:41 +0000 Subject: dm: remove map_info This patch removes map_info from bio-based device mapper targets. map_info is still used for request-based targets. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-crypt.c | 5 ++--- drivers/md/dm-delay.c | 5 ++--- drivers/md/dm-flakey.c | 6 ++---- drivers/md/dm-linear.c | 3 +-- drivers/md/dm-raid.c | 4 ++-- drivers/md/dm-raid1.c | 6 ++---- drivers/md/dm-snap.c | 12 ++++-------- drivers/md/dm-stripe.c | 6 ++---- drivers/md/dm-target.c | 5 ++--- drivers/md/dm-thin.c | 15 +++++---------- drivers/md/dm-verity.c | 3 +-- drivers/md/dm-zero.c | 5 ++--- drivers/md/dm.c | 4 ++-- include/linux/device-mapper.h | 6 ++---- 14 files changed, 31 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index bbf459bca61d..f7369f9d8595 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1689,8 +1689,7 @@ bad: return ret; } -static int crypt_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int crypt_map(struct dm_target *ti, struct bio *bio) { struct dm_crypt_io *io; struct crypt_config *cc = ti->private; @@ -1846,7 +1845,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 11, 0}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index f53846f9ab50..cc1bd048acb2 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -274,8 +274,7 @@ static void delay_resume(struct dm_target *ti) atomic_set(&dc->may_delay, 1); } -static int delay_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int delay_map(struct dm_target *ti, struct bio *bio) { struct delay_c *dc = ti->private; @@ -338,7 +337,7 @@ out: static struct target_type delay_target = { .name = "delay", - .version = {1, 1, 0}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 660f98167e7b..9721f2ffb1a2 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -270,8 +270,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) } } -static int flakey_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int flakey_map(struct dm_target *ti, struct bio *bio) { struct flakey_c *fc = ti->private; unsigned elapsed; @@ -321,8 +320,7 @@ map_bio: return DM_MAPIO_REMAPPED; } -static int flakey_end_io(struct dm_target *ti, struct bio *bio, - int error, union map_info *map_context) +static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) { struct flakey_c *fc = ti->private; struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 82222a8cf750..328cad5617ab 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -88,8 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_sector = linear_map_sector(ti, bio->bi_sector); } -static int linear_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int linear_map(struct dm_target *ti, struct bio *bio) { linear_map_bio(ti, bio); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 4a20bf8c72da..3d8984edeff7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1218,7 +1218,7 @@ static void raid_dtr(struct dm_target *ti) context_free(rs); } -static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) +static int raid_map(struct dm_target *ti, struct bio *bio) { struct raid_set *rs = ti->private; struct mddev *mddev = &rs->md; @@ -1432,7 +1432,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 3, 1}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 57685cf0afa8..fa519185ebba 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1142,8 +1142,7 @@ static void mirror_dtr(struct dm_target *ti) /* * Mirror mapping function */ -static int mirror_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int mirror_map(struct dm_target *ti, struct bio *bio) { int r, rw = bio_rw(bio); struct mirror *m; @@ -1192,8 +1191,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio, return DM_MAPIO_REMAPPED; } -static int mirror_end_io(struct dm_target *ti, struct bio *bio, - int error, union map_info *map_context) +static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) { int rw = bio_rw(bio); struct mirror_set *ms = (struct mirror_set *) ti->private; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index e1ecacf2456f..59fc18ae52c2 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1567,8 +1567,7 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, s->store->chunk_mask); } -static int snapshot_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int snapshot_map(struct dm_target *ti, struct bio *bio) { struct dm_exception *e; struct dm_snapshot *s = ti->private; @@ -1683,8 +1682,7 @@ out: * If merging is currently taking place on the chunk in question, the * I/O is deferred by adding it to s->bios_queued_during_merge. */ -static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) { struct dm_exception *e; struct dm_snapshot *s = ti->private; @@ -1744,8 +1742,7 @@ out_unlock: return r; } -static int snapshot_end_io(struct dm_target *ti, struct bio *bio, - int error, union map_info *map_context) +static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error) { struct dm_snapshot *s = ti->private; @@ -2119,8 +2116,7 @@ static void origin_dtr(struct dm_target *ti) dm_put_device(ti, dev); } -static int origin_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int origin_map(struct dm_target *ti, struct bio *bio) { struct dm_dev *dev = ti->private; bio->bi_bdev = dev->bdev; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 4e7ba82146c0..6b0e5ea38027 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -271,8 +271,7 @@ static int stripe_map_discard(struct stripe_c *sc, struct bio *bio, } } -static int stripe_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int stripe_map(struct dm_target *ti, struct bio *bio) { struct stripe_c *sc = ti->private; uint32_t stripe; @@ -342,8 +341,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type, return 0; } -static int stripe_end_io(struct dm_target *ti, struct bio *bio, - int error, union map_info *map_context) +static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error) { unsigned i; char major_minor[16]; diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 8da366cf381c..617d21a77256 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -126,15 +126,14 @@ static void io_err_dtr(struct dm_target *tt) /* empty */ } -static int io_err_map(struct dm_target *tt, struct bio *bio, - union map_info *map_context) +static int io_err_map(struct dm_target *tt, struct bio *bio) { return -EIO; } static struct target_type error_target = { .name = "error", - .version = {1, 0, 1}, + .version = {1, 1, 0}, .ctr = io_err_ctr, .dtr = io_err_dtr, .map = io_err_map, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index e7743c69a24c..675ae5274016 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1371,8 +1371,7 @@ static void thin_hook_bio(struct thin_c *tc, struct bio *bio) /* * Non-blocking function called from the thin target's map function. */ -static int thin_bio_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int thin_bio_map(struct dm_target *ti, struct bio *bio) { int r; struct thin_c *tc = ti->private; @@ -1980,8 +1979,7 @@ out_unlock: return r; } -static int pool_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int pool_map(struct dm_target *ti, struct bio *bio) { int r; struct pool_c *pt = ti->private; @@ -2626,17 +2624,14 @@ out_unlock: return r; } -static int thin_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int thin_map(struct dm_target *ti, struct bio *bio) { bio->bi_sector = dm_target_offset(ti, bio->bi_sector); - return thin_bio_map(ti, bio, map_context); + return thin_bio_map(ti, bio); } -static int thin_endio(struct dm_target *ti, - struct bio *bio, int err, - union map_info *map_context) +static int thin_endio(struct dm_target *ti, struct bio *bio, int err) { unsigned long flags; struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 78f349894b24..52cde982164a 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -458,8 +458,7 @@ no_prefetch_cluster: * Bio map function. It allocates dm_verity_io structure and bio vector and * fills them. Then it issues prefetches and the I/O. */ -static int verity_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int verity_map(struct dm_target *ti, struct bio *bio) { struct dm_verity *v = ti->private; struct dm_verity_io *io; diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index cc2b3cb81946..69a5c3b3b340 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -33,8 +33,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* * Return zeros only on reads */ -static int zero_map(struct dm_target *ti, struct bio *bio, - union map_info *map_context) +static int zero_map(struct dm_target *ti, struct bio *bio) { switch(bio_rw(bio)) { case READ: @@ -56,7 +55,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio, static struct target_type zero_target = { .name = "zero", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = zero_ctr, .map = zero_map, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5ee580b4f330..c72e4d5a9617 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -645,7 +645,7 @@ static void clone_endio(struct bio *bio, int error) error = -EIO; if (endio) { - r = endio(tio->ti, bio, error, &tio->info); + r = endio(tio->ti, bio, error); if (r < 0 || r == DM_ENDIO_REQUEUE) /* * error and requeue request are handled @@ -1004,7 +1004,7 @@ static void __map_bio(struct dm_target *ti, struct dm_target_io *tio) */ atomic_inc(&tio->io->io_count); sector = clone->bi_sector; - r = ti->type->map(ti, clone, &tio->info); + r = ti->type->map(ti, clone); if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index eb96ef6fd8b7..bf6afa2fc432 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -45,8 +45,7 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti); * = 1: simple remap complete * = 2: The target wants to push back the io */ -typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, - union map_info *map_context); +typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, union map_info *map_context); @@ -59,8 +58,7 @@ typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, * 2 : The target wants to push back the io */ typedef int (*dm_endio_fn) (struct dm_target *ti, - struct bio *bio, int error, - union map_info *map_context); + struct bio *bio, int error); typedef int (*dm_request_endio_fn) (struct dm_target *ti, struct request *clone, int error, union map_info *map_context); -- cgit v1.2.3 From 30e6c9fa93cf3dbc7cc6df1d748ad25e4264545a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 20 Dec 2012 17:25:08 +0000 Subject: net: devnet_rename_seq should be a seqcount Using a seqlock for devnet_rename_seq is not a good idea, as device_rename() can sleep. As we hold RTNL, we dont need a protection for writers, and only need a seqcount so that readers can catch a change done by a writer. Bug added in commit c91f6df2db4972d3 (sockopt: Change getsockopt() of SO_BINDTODEVICE to return an interface name) Reported-by: Dave Jones Signed-off-by: Eric Dumazet Cc: Brian Haley Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 +- net/core/dev.c | 18 +++++++++--------- net/core/sock.c | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 02e0f6b156c3..c599e4782d45 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1576,7 +1576,7 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); extern rwlock_t dev_base_lock; /* Device list lock */ -extern seqlock_t devnet_rename_seq; /* Device rename lock */ +extern seqcount_t devnet_rename_seq; /* Device rename seq */ #define for_each_netdev(net, d) \ diff --git a/net/core/dev.c b/net/core/dev.c index d0cbc93fcf32..515473ee52cb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -203,7 +203,7 @@ static struct list_head offload_base __read_mostly; DEFINE_RWLOCK(dev_base_lock); EXPORT_SYMBOL(dev_base_lock); -DEFINE_SEQLOCK(devnet_rename_seq); +seqcount_t devnet_rename_seq; static inline void dev_base_seq_inc(struct net *net) { @@ -1093,10 +1093,10 @@ int dev_change_name(struct net_device *dev, const char *newname) if (dev->flags & IFF_UP) return -EBUSY; - write_seqlock(&devnet_rename_seq); + write_seqcount_begin(&devnet_rename_seq); if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { - write_sequnlock(&devnet_rename_seq); + write_seqcount_end(&devnet_rename_seq); return 0; } @@ -1104,7 +1104,7 @@ int dev_change_name(struct net_device *dev, const char *newname) err = dev_get_valid_name(net, dev, newname); if (err < 0) { - write_sequnlock(&devnet_rename_seq); + write_seqcount_end(&devnet_rename_seq); return err; } @@ -1112,11 +1112,11 @@ rollback: ret = device_rename(&dev->dev, dev->name); if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); - write_sequnlock(&devnet_rename_seq); + write_seqcount_end(&devnet_rename_seq); return ret; } - write_sequnlock(&devnet_rename_seq); + write_seqcount_end(&devnet_rename_seq); write_lock_bh(&dev_base_lock); hlist_del_rcu(&dev->name_hlist); @@ -1135,7 +1135,7 @@ rollback: /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; - write_seqlock(&devnet_rename_seq); + write_seqcount_begin(&devnet_rename_seq); memcpy(dev->name, oldname, IFNAMSIZ); goto rollback; } else { @@ -4180,7 +4180,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) return -EFAULT; retry: - seq = read_seqbegin(&devnet_rename_seq); + seq = read_seqcount_begin(&devnet_rename_seq); rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex); if (!dev) { @@ -4190,7 +4190,7 @@ retry: strcpy(ifr.ifr_name, dev->name); rcu_read_unlock(); - if (read_seqretry(&devnet_rename_seq, seq)) + if (read_seqcount_retry(&devnet_rename_seq, seq)) goto retry; if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) diff --git a/net/core/sock.c b/net/core/sock.c index a692ef49c9bb..bc131d419683 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -583,7 +583,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval, goto out; retry: - seq = read_seqbegin(&devnet_rename_seq); + seq = read_seqcount_begin(&devnet_rename_seq); rcu_read_lock(); dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); ret = -ENODEV; @@ -594,7 +594,7 @@ retry: strcpy(devname, dev->name); rcu_read_unlock(); - if (read_seqretry(&devnet_rename_seq, seq)) + if (read_seqcount_retry(&devnet_rename_seq, seq)) goto retry; len = strlen(devname) + 1; -- cgit v1.2.3 From 53e872681fed6a43047e71bf927f77d06f467988 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 25 Dec 2012 13:29:52 -0500 Subject: ext4: fix deadlock in journal_unmap_buffer() We cannot wait for transaction commit in journal_unmap_buffer() because we hold page lock which ranks below transaction start. We solve the issue by bailing out of journal_unmap_buffer() and jbd2_journal_invalidatepage() with -EBUSY. Caller is then responsible for waiting for transaction commit to finish and try invalidation again. Since the issue can happen only for page stradding i_size, it is simple enough to manually call jbd2_journal_invalidatepage() for such page from ext4_setattr(), check the return value and wait if necessary. Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 82 ++++++++++++++++++++++++++++++++++++++++++++------- fs/jbd2/transaction.c | 27 +++++++++-------- include/linux/jbd2.h | 2 +- 3 files changed, 86 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 12d3fbcff59f..cbfe13bf5b2a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2894,8 +2894,8 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset) block_invalidatepage(page, offset); } -static void ext4_journalled_invalidatepage(struct page *page, - unsigned long offset) +static int __ext4_journalled_invalidatepage(struct page *page, + unsigned long offset) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); @@ -2907,7 +2907,14 @@ static void ext4_journalled_invalidatepage(struct page *page, if (offset == 0) ClearPageChecked(page); - jbd2_journal_invalidatepage(journal, page, offset); + return jbd2_journal_invalidatepage(journal, page, offset); +} + +/* Wrapper for aops... */ +static void ext4_journalled_invalidatepage(struct page *page, + unsigned long offset) +{ + WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); } static int ext4_releasepage(struct page *page, gfp_t wait) @@ -4313,6 +4320,47 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) return err; } +/* + * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate + * buffers that are attached to a page stradding i_size and are undergoing + * commit. In that case we have to wait for commit to finish and try again. + */ +static void ext4_wait_for_tail_page_commit(struct inode *inode) +{ + struct page *page; + unsigned offset; + journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; + tid_t commit_tid = 0; + int ret; + + offset = inode->i_size & (PAGE_CACHE_SIZE - 1); + /* + * All buffers in the last page remain valid? Then there's nothing to + * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == + * blocksize case + */ + if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) + return; + while (1) { + page = find_lock_page(inode->i_mapping, + inode->i_size >> PAGE_CACHE_SHIFT); + if (!page) + return; + ret = __ext4_journalled_invalidatepage(page, offset); + unlock_page(page); + page_cache_release(page); + if (ret != -EBUSY) + return; + commit_tid = 0; + read_lock(&journal->j_state_lock); + if (journal->j_committing_transaction) + commit_tid = journal->j_committing_transaction->t_tid; + read_unlock(&journal->j_state_lock); + if (commit_tid) + jbd2_log_wait_commit(journal, commit_tid); + } +} + /* * ext4_setattr() * @@ -4426,16 +4474,28 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) } if (attr->ia_valid & ATTR_SIZE) { - if (attr->ia_size != i_size_read(inode)) { - truncate_setsize(inode, attr->ia_size); - /* Inode size will be reduced, wait for dio in flight. - * Temporarily disable dioread_nolock to prevent - * livelock. */ + if (attr->ia_size != inode->i_size) { + loff_t oldsize = inode->i_size; + + i_size_write(inode, attr->ia_size); + /* + * Blocks are going to be removed from the inode. Wait + * for dio in flight. Temporarily disable + * dioread_nolock to prevent livelock. + */ if (orphan) { - ext4_inode_block_unlocked_dio(inode); - inode_dio_wait(inode); - ext4_inode_resume_unlocked_dio(inode); + if (!ext4_should_journal_data(inode)) { + ext4_inode_block_unlocked_dio(inode); + inode_dio_wait(inode); + ext4_inode_resume_unlocked_dio(inode); + } else + ext4_wait_for_tail_page_commit(inode); } + /* + * Truncate pagecache after we've waited for commit + * in data=journal mode to make pages freeable. + */ + truncate_pagecache(inode, oldsize, inode->i_size); } ext4_truncate(inode); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index cd4485db42b3..ddc51a7f4508 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1840,7 +1840,6 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, BUFFER_TRACE(bh, "entry"); -retry: /* * It is safe to proceed here without the j_list_lock because the * buffers cannot be stolen by try_to_free_buffers as long as we are @@ -1935,14 +1934,11 @@ retry: * for commit and try again. */ if (partial_page) { - tid_t tid = journal->j_committing_transaction->t_tid; - jbd2_journal_put_journal_head(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); write_unlock(&journal->j_state_lock); - jbd2_log_wait_commit(journal, tid); - goto retry; + return -EBUSY; } /* * OK, buffer won't be reachable after truncate. We just set @@ -2003,21 +1999,23 @@ zap_buffer_unlocked: * @page: page to flush * @offset: length of page to invalidate. * - * Reap page buffers containing data after offset in page. - * + * Reap page buffers containing data after offset in page. Can return -EBUSY + * if buffers are part of the committing transaction and the page is straddling + * i_size. Caller then has to wait for current commit and try again. */ -void jbd2_journal_invalidatepage(journal_t *journal, - struct page *page, - unsigned long offset) +int jbd2_journal_invalidatepage(journal_t *journal, + struct page *page, + unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; int may_free = 1; + int ret = 0; if (!PageLocked(page)) BUG(); if (!page_has_buffers(page)) - return; + return 0; /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be @@ -2031,9 +2029,11 @@ void jbd2_journal_invalidatepage(journal_t *journal, if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); - may_free &= journal_unmap_buffer(journal, bh, - offset > 0); + ret = journal_unmap_buffer(journal, bh, offset > 0); unlock_buffer(bh); + if (ret < 0) + return ret; + may_free &= ret; } curr_off = next_off; bh = next; @@ -2044,6 +2044,7 @@ void jbd2_journal_invalidatepage(journal_t *journal, if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } + return 0; } /* diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 1be23d9fdacb..e30b66346942 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1098,7 +1098,7 @@ void jbd2_journal_set_triggers(struct buffer_head *, extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); extern void journal_sync_buffer (struct buffer_head *); -extern void jbd2_journal_invalidatepage(journal_t *, +extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned long); extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); extern int jbd2_journal_stop(handle_t *); -- cgit v1.2.3 From 08b60f8438879a84246d7debded31c9cb7aea6e4 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Mon, 24 Dec 2012 11:14:58 -0700 Subject: namei.h: include errno.h This solves: In file included from fs/ext3/symlink.c:20:0: include/linux/namei.h: In function 'retry_estale': include/linux/namei.h:114:19: error: 'ESTALE' undeclared (first use in this function) Signed-off-by: Stephen Warren Signed-off-by: Al Viro --- include/linux/namei.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/namei.h b/include/linux/namei.h index e998c030061d..5a5ff57ceed4 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -2,6 +2,7 @@ #define _LINUX_NAMEI_H #include +#include #include #include -- cgit v1.2.3 From c876ad7682155958d0c9c27afe9017925c230d64 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 21 Dec 2012 20:27:12 -0800 Subject: pidns: Stop pid allocation when init dies Oleg pointed out that in a pid namespace the sequence. - pid 1 becomes a zombie - setns(thepidns), fork,... - reaping pid 1. - The injected processes exiting. Can lead to processes attempting access their child reaper and instead following a stale pointer. That waitpid for init can return before all of the processes in the pid namespace have exited is also unfortunate. Avoid these problems by disabling the allocation of new pids in a pid namespace when init dies, instead of when the last process in a pid namespace is reaped. Pointed-out-by: Oleg Nesterov Reviewed-by: Oleg Nesterov Signed-off-by: "Eric W. Biederman" --- include/linux/pid.h | 1 + include/linux/pid_namespace.h | 4 +++- kernel/pid.c | 15 ++++++++++++--- kernel/pid_namespace.c | 4 ++++ 4 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pid.h b/include/linux/pid.h index b152d44fb181..2381c973d897 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -121,6 +121,7 @@ int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); +extern void disable_pid_allocation(struct pid_namespace *ns); /* * ns_of_pid() returns the pid namespace in which the specified pid was diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index bf285999273a..215e5e3dda10 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -21,7 +21,7 @@ struct pid_namespace { struct kref kref; struct pidmap pidmap[PIDMAP_ENTRIES]; int last_pid; - int nr_hashed; + unsigned int nr_hashed; struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; @@ -42,6 +42,8 @@ struct pid_namespace { extern struct pid_namespace init_pid_ns; +#define PIDNS_HASH_ADDING (1U << 31) + #ifdef CONFIG_PID_NS static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) { diff --git a/kernel/pid.c b/kernel/pid.c index 36aa02ff17d6..de9af600006f 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -270,7 +270,6 @@ void free_pid(struct pid *pid) wake_up_process(ns->child_reaper); break; case 0: - ns->nr_hashed = -1; schedule_work(&ns->proc_work); break; } @@ -319,7 +318,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) upid = pid->numbers + ns->level; spin_lock_irq(&pidmap_lock); - if (ns->nr_hashed < 0) + if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) goto out_unlock; for ( ; upid >= pid->numbers; --upid) { hlist_add_head_rcu(&upid->pid_chain, @@ -342,6 +341,13 @@ out_free: goto out; } +void disable_pid_allocation(struct pid_namespace *ns) +{ + spin_lock_irq(&pidmap_lock); + ns->nr_hashed &= ~PIDNS_HASH_ADDING; + spin_unlock_irq(&pidmap_lock); +} + struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { struct hlist_node *elem; @@ -573,6 +579,9 @@ void __init pidhash_init(void) void __init pidmap_init(void) { + /* Veryify no one has done anything silly */ + BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); + /* bump default and minimum pid_max based on number of cpus */ pid_max = min(pid_max_max, max_t(int, pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); @@ -584,7 +593,7 @@ void __init pidmap_init(void) /* Reserve PID 0. We never call free_pidmap(0) */ set_bit(0, init_pid_ns.pidmap[0].page); atomic_dec(&init_pid_ns.pidmap[0].nr_free); - init_pid_ns.nr_hashed = 1; + init_pid_ns.nr_hashed = PIDNS_HASH_ADDING; init_pid_ns.pid_cachep = KMEM_CACHE(pid, SLAB_HWCACHE_ALIGN | SLAB_PANIC); diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index fdbd0cdf271a..c1c3dc1c6023 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -115,6 +115,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns ns->level = level; ns->parent = get_pid_ns(parent_pid_ns); ns->user_ns = get_user_ns(user_ns); + ns->nr_hashed = PIDNS_HASH_ADDING; INIT_WORK(&ns->proc_work, proc_cleanup_work); set_bit(0, ns->pidmap[0].page); @@ -181,6 +182,9 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) int rc; struct task_struct *task, *me = current; + /* Don't allow any more processes into the pid namespace */ + disable_pid_allocation(pid_ns); + /* Ignore SIGCHLD causing any terminated children to autoreap */ spin_lock_irq(&me->sighand->siglock); me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; -- cgit v1.2.3 From 812089e01b9f65f90fc8fc670d8cce72a0e01fbb Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 1 Dec 2012 12:37:20 -0800 Subject: PCI: Reduce Ricoh 0xe822 SD card reader base clock frequency to 50MHz Otherwise it fails like this on cards like the Transcend 16GB SDHC card: mmc0: new SDHC card at address b368 mmcblk0: mmc0:b368 SDC 15.0 GiB mmcblk0: error -110 sending status command, retrying mmcblk0: error -84 transferring data, sector 0, nr 8, cmd response 0x900, card status 0xb0 Tested on my Lenovo x200 laptop. [bhelgaas: changelog] Signed-off-by: Andy Lutomirski Signed-off-by: Bjorn Helgaas Acked-by: Chris Ball CC: Manoj Iyer CC: stable@vger.kernel.org --- drivers/pci/quirks.c | 7 +++++-- include/linux/pci_ids.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8f7a6344e79e..0369fb6fc1da 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2725,7 +2725,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) if (PCI_FUNC(dev->devfn)) return; /* - * RICOH 0xe823 SD/MMC card reader fails to recognize + * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize * certain types of SD/MMC cards. Lowering the SD base * clock frequency from 200Mhz to 50Mhz fixes this issue. * @@ -2736,7 +2736,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) * 0xf9 - Key register for 0x150 * 0xfc - key register for 0xe1 */ - if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { + if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 || + dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { pci_write_config_byte(dev, 0xf9, 0xfc); pci_write_config_byte(dev, 0x150, 0x10); pci_write_config_byte(dev, 0xf9, 0x00); @@ -2763,6 +2764,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0f8447376ddb..0eb65796bcb9 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1568,6 +1568,7 @@ #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 #define PCI_DEVICE_ID_RICOH_R5C822 0x0822 +#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822 #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823 #define PCI_DEVICE_ID_RICOH_R5C832 0x0832 #define PCI_DEVICE_ID_RICOH_R5C843 0x0843 -- cgit v1.2.3 From ad4b3fb7ff9940bcdb1e4cd62bd189d10fa636ba Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 21 Dec 2012 13:03:50 -0500 Subject: mm: Fix PageHead when !CONFIG_PAGEFLAGS_EXTENDED Unfortunately with !CONFIG_PAGEFLAGS_EXTENDED, (!PageHead) is false, and (PageHead) is true, for tail pages. If this is indeed the intended behavior, which I doubt because it breaks cache cleaning on some ARM systems, then the nomenclature is highly problematic. This patch makes sure PageHead is only true for head pages and PageTail is only true for tail pages, and neither is true for non-compound pages. [ This buglet seems ancient - seems to have been introduced back in Apr 2008 in commit 6a1e7f777f61: "pageflags: convert to the use of new macros". And the reason nobody noticed is because the PageHead() tests are almost all about just sanity-checking, and only used on pages that are actual page heads. The fact that the old code returned true for tail pages too was thus not really noticeable. - Linus ] Signed-off-by: Christoffer Dall Acked-by: Andrea Arcangeli Cc: Andrew Morton Cc: Will Deacon Cc: Steve Capper Cc: Christoph Lameter Cc: stable@kernel.org # 2.6.26+ Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index b5d13841604e..70473da47b3f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -362,7 +362,7 @@ static inline void ClearPageCompound(struct page *page) * pages on the LRU and/or pagecache. */ TESTPAGEFLAG(Compound, compound) -__PAGEFLAG(Head, compound) +__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) /* * PG_reclaim is used in combination with PG_compound to mark the @@ -374,8 +374,14 @@ __PAGEFLAG(Head, compound) * PG_compound & PG_reclaim => Tail page * PG_compound & ~PG_reclaim => Head page */ +#define PG_head_mask ((1L << PG_compound)) #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) +static inline int PageHead(struct page *page) +{ + return ((page->flags & PG_head_tail_mask) == PG_head_mask); +} + static inline int PageTail(struct page *page) { return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); -- cgit v1.2.3 From a7a88b23737095e6c18a20c5d4eef9e25ec5b829 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 2 Jan 2013 02:04:23 -0800 Subject: mempolicy: remove arg from mpol_parse_str, mpol_to_str Remove the unused argument (formerly no_context) from mpol_parse_str() and from mpol_to_str(). Signed-off-by: Hugh Dickins Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 2 +- include/linux/mempolicy.h | 11 ++++------- mm/mempolicy.c | 6 ++---- mm/shmem.c | 4 ++-- 4 files changed, 9 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 448455b7fd91..ca5ce7f9f800 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1278,7 +1278,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) walk.mm = mm; pol = get_vma_policy(task, vma, vma->vm_start); - mpol_to_str(buffer, sizeof(buffer), pol, 0); + mpol_to_str(buffer, sizeof(buffer), pol); mpol_cond_put(pol); seq_printf(m, "%08lx %s", vma->vm_start, buffer); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 9adc270de7ef..92bc9988a180 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -165,11 +165,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, #ifdef CONFIG_TMPFS -extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); +extern int mpol_parse_str(char *str, struct mempolicy **mpol); #endif -extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, - int no_context); +extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ static inline int vma_migratable(struct vm_area_struct *vma) @@ -296,15 +295,13 @@ static inline void check_highest_zone(int k) } #ifdef CONFIG_TMPFS -static inline int mpol_parse_str(char *str, struct mempolicy **mpol, - int no_context) +static inline int mpol_parse_str(char *str, struct mempolicy **mpol) { return 1; /* error */ } #endif -static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, - int no_context) +static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { return 0; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 02c914cca53d..1cb200af3828 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2612,14 +2612,13 @@ static const char * const policy_modes[] = * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. - * @unused: redundant argument, to be removed later. * * Format of input: * [=][:] * * On success, returns 0, else 1 */ -int mpol_parse_str(char *str, struct mempolicy **mpol, int unused) +int mpol_parse_str(char *str, struct mempolicy **mpol) { struct mempolicy *new = NULL; unsigned short mode; @@ -2747,13 +2746,12 @@ out: * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted - * @unused: redundant argument, to be removed later. * * Convert a mempolicy into a string. * Returns the number of characters in buffer (if positive) * or an error (negative) */ -int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused) +int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { char *p = buffer; int l; diff --git a/mm/shmem.c b/mm/shmem.c index 5c90d84c2b02..5dd56f6efdbd 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -889,7 +889,7 @@ static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) if (!mpol || mpol->mode == MPOL_DEFAULT) return; /* show nothing */ - mpol_to_str(buffer, sizeof(buffer), mpol, 1); + mpol_to_str(buffer, sizeof(buffer), mpol); seq_printf(seq, ",mpol=%s", buffer); } @@ -2463,7 +2463,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, if (!gid_valid(sbinfo->gid)) goto bad_val; } else if (!strcmp(this_char,"mpol")) { - if (mpol_parse_str(value, &sbinfo->mpol, 1)) + if (mpol_parse_str(value, &sbinfo->mpol)) goto bad_val; } else { printk(KERN_ERR "tmpfs: Bad mount option %s\n", -- cgit v1.2.3 From 42288fe366c4f1ce7522bc9f27d0bc2a81c55264 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 21 Dec 2012 23:10:25 +0000 Subject: mm: mempolicy: Convert shared_policy mutex to spinlock Sasha was fuzzing with trinity and reported the following problem: BUG: sleeping function called from invalid context at kernel/mutex.c:269 in_atomic(): 1, irqs_disabled(): 0, pid: 6361, name: trinity-main 2 locks held by trinity-main/6361: #0: (&mm->mmap_sem){++++++}, at: [] __do_page_fault+0x1e4/0x4f0 #1: (&(&mm->page_table_lock)->rlock){+.+...}, at: [] handle_pte_fault+0x3f7/0x6a0 Pid: 6361, comm: trinity-main Tainted: G W 3.7.0-rc2-next-20121024-sasha-00001-gd95ef01-dirty #74 Call Trace: __might_sleep+0x1c3/0x1e0 mutex_lock_nested+0x29/0x50 mpol_shared_policy_lookup+0x2e/0x90 shmem_get_policy+0x2e/0x30 get_vma_policy+0x5a/0xa0 mpol_misplaced+0x41/0x1d0 handle_pte_fault+0x465/0x6a0 This was triggered by a different version of automatic NUMA balancing but in theory the current version is vunerable to the same problem. do_numa_page -> numa_migrate_prep -> mpol_misplaced -> get_vma_policy -> shmem_get_policy It's very unlikely this will happen as shared pages are not marked pte_numa -- see the page_mapcount() check in change_pte_range() -- but it is possible. To address this, this patch restores sp->lock as originally implemented by Kosaki Motohiro. In the path where get_vma_policy() is called, it should not be calling sp_alloc() so it is not necessary to treat the PTL specially. Signed-off-by: KOSAKI Motohiro Tested-by: KOSAKI Motohiro Signed-off-by: Mel Gorman Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 +- mm/mempolicy.c | 68 +++++++++++++++++++++++++++++++++-------------- 2 files changed, 49 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 92bc9988a180..0d7df39a5885 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -123,7 +123,7 @@ struct sp_node { struct shared_policy { struct rb_root root; - struct mutex mutex; + spinlock_t lock; }; void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1cb200af3828..e2df1c1fb41f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2132,7 +2132,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) */ /* lookup first element intersecting start-end */ -/* Caller holds sp->mutex */ +/* Caller holds sp->lock */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { @@ -2196,13 +2196,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) if (!sp->root.rb_node) return NULL; - mutex_lock(&sp->mutex); + spin_lock(&sp->lock); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } - mutex_unlock(&sp->mutex); + spin_unlock(&sp->lock); return pol; } @@ -2328,6 +2328,14 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n) sp_free(n); } +static void sp_node_init(struct sp_node *node, unsigned long start, + unsigned long end, struct mempolicy *pol) +{ + node->start = start; + node->end = end; + node->policy = pol; +} + static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { @@ -2344,10 +2352,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end, return NULL; } newpol->flags |= MPOL_F_SHARED; - - n->start = start; - n->end = end; - n->policy = newpol; + sp_node_init(n, start, end, newpol); return n; } @@ -2357,9 +2362,12 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { struct sp_node *n; + struct sp_node *n_new = NULL; + struct mempolicy *mpol_new = NULL; int ret = 0; - mutex_lock(&sp->mutex); +restart: + spin_lock(&sp->lock); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { @@ -2372,14 +2380,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, } else { /* Old policy spanning whole new range. */ if (n->end > end) { - struct sp_node *new2; - new2 = sp_alloc(end, n->end, n->policy); - if (!new2) { - ret = -ENOMEM; - goto out; - } + if (!n_new) + goto alloc_new; + + *mpol_new = *n->policy; + atomic_set(&mpol_new->refcnt, 1); + sp_node_init(n_new, n->end, end, mpol_new); + sp_insert(sp, n_new); n->end = start; - sp_insert(sp, new2); + n_new = NULL; + mpol_new = NULL; break; } else n->end = start; @@ -2390,9 +2400,27 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, } if (new) sp_insert(sp, new); -out: - mutex_unlock(&sp->mutex); + spin_unlock(&sp->lock); + ret = 0; + +err_out: + if (mpol_new) + mpol_put(mpol_new); + if (n_new) + kmem_cache_free(sn_cache, n_new); + return ret; + +alloc_new: + spin_unlock(&sp->lock); + ret = -ENOMEM; + n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); + if (!n_new) + goto err_out; + mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); + if (!mpol_new) + goto err_out; + goto restart; } /** @@ -2410,7 +2438,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ - mutex_init(&sp->mutex); + spin_lock_init(&sp->lock); if (mpol) { struct vm_area_struct pvma; @@ -2476,14 +2504,14 @@ void mpol_free_shared_policy(struct shared_policy *p) if (!p->root.rb_node) return; - mutex_lock(&p->mutex); + spin_lock(&p->lock); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); sp_delete(p, n); } - mutex_unlock(&p->mutex); + spin_unlock(&p->lock); } #ifdef CONFIG_NUMA_BALANCING -- cgit v1.2.3 From 3d33fcc11bdd11b6949cf5c406726a094395dc4f Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 2 Jan 2013 15:12:55 +0000 Subject: UAPI: Remove empty Kbuild files Empty files can get deleted by the patch program, so remove empty Kbuild files and their links from the parent Kbuilds. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- include/Kbuild | 3 --- include/linux/Kbuild | 5 ----- include/linux/hdlc/Kbuild | 0 include/linux/hsi/Kbuild | 0 include/linux/raid/Kbuild | 0 include/linux/usb/Kbuild | 0 include/rdma/Kbuild | 0 include/sound/Kbuild | 0 8 files changed, 8 deletions(-) delete mode 100644 include/linux/Kbuild delete mode 100644 include/linux/hdlc/Kbuild delete mode 100644 include/linux/hsi/Kbuild delete mode 100644 include/linux/raid/Kbuild delete mode 100644 include/linux/usb/Kbuild delete mode 100644 include/rdma/Kbuild delete mode 100644 include/sound/Kbuild (limited to 'include/linux') diff --git a/include/Kbuild b/include/Kbuild index 83256b64166a..1dfd33e8d43b 100644 --- a/include/Kbuild +++ b/include/Kbuild @@ -1,8 +1,5 @@ # Top-level Makefile calls into asm-$(ARCH) # List only non-arch directories below -header-y += linux/ -header-y += sound/ -header-y += rdma/ header-y += video/ header-y += scsi/ diff --git a/include/linux/Kbuild b/include/linux/Kbuild deleted file mode 100644 index 7fe2dae251e5..000000000000 --- a/include/linux/Kbuild +++ /dev/null @@ -1,5 +0,0 @@ -header-y += dvb/ -header-y += hdlc/ -header-y += hsi/ -header-y += raid/ -header-y += usb/ diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/include/linux/hsi/Kbuild b/include/linux/hsi/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/include/sound/Kbuild b/include/sound/Kbuild deleted file mode 100644 index e69de29bb2d1..000000000000 -- cgit v1.2.3 From f568f6ca811fe681ecfd11c4ce78b6aa488020c0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 21 Dec 2012 15:02:05 -0800 Subject: pstore: remove __dev* attributes. CONFIG_HOTPLUG is going away as an option. As a result, the __dev* markings need to be removed. This change removes the use of __devinit from the pstore filesystem. Based on patches originally written by Bill Pemberton, but redone by me in order to handle some of the coding style issues better, by hand. Cc: Bill Pemberton Cc: Anton Vorontsov Cc: Colin Cross Cc: Kees Cook Cc: Tony Luck Signed-off-by: Greg Kroah-Hartman --- fs/pstore/ram.c | 14 ++++++-------- fs/pstore/ram_core.c | 9 ++++----- include/linux/pstore_ram.h | 5 ++--- 3 files changed, 12 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index f883e7e74305..7003e5266f25 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -291,9 +291,8 @@ static void ramoops_free_przs(struct ramoops_context *cxt) kfree(cxt->przs); } -static int __devinit ramoops_init_przs(struct device *dev, - struct ramoops_context *cxt, - phys_addr_t *paddr, size_t dump_mem_sz) +static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, + phys_addr_t *paddr, size_t dump_mem_sz) { int err = -ENOMEM; int i; @@ -336,10 +335,9 @@ fail_prz: return err; } -static int __devinit ramoops_init_prz(struct device *dev, - struct ramoops_context *cxt, - struct persistent_ram_zone **prz, - phys_addr_t *paddr, size_t sz, u32 sig) +static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt, + struct persistent_ram_zone **prz, + phys_addr_t *paddr, size_t sz, u32 sig) { if (!sz) return 0; @@ -367,7 +365,7 @@ static int __devinit ramoops_init_prz(struct device *dev, return 0; } -static int __devinit ramoops_probe(struct platform_device *pdev) +static int ramoops_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ramoops_platform_data *pdata = pdev->dev.platform_data; diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index eecd2a8a84dd..0306303be372 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -390,8 +390,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, return 0; } -static int __devinit persistent_ram_post_init(struct persistent_ram_zone *prz, - u32 sig, int ecc_size) +static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, + int ecc_size) { int ret; @@ -443,9 +443,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz) kfree(prz); } -struct persistent_ram_zone * __devinit persistent_ram_new(phys_addr_t start, - size_t size, u32 sig, - int ecc_size) +struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + u32 sig, int ecc_size) { struct persistent_ram_zone *prz; int ret = -ENOMEM; diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 098d2a838296..cb6ab5feab67 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -46,9 +46,8 @@ struct persistent_ram_zone { size_t old_log_size; }; -struct persistent_ram_zone * __devinit persistent_ram_new(phys_addr_t start, - size_t size, u32 sig, - int ecc_size); +struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, + u32 sig, int ecc_size); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); -- cgit v1.2.3 From 0f58a01ddd5e8177255705ba15e64c3b74d67993 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 21 Dec 2012 15:12:59 -0800 Subject: Drivers: bcma: remove __dev* attributes. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CONFIG_HOTPLUG is going away as an option. As a result, the __dev* markings need to be removed. This change removes the use of __devinit, __devexit_p, and __devexit from these drivers. Based on patches originally written by Bill Pemberton, but redone by me in order to handle some of the coding style issues better, by hand. Cc: Bill Pemberton Cc: "RafaÅ‚ MiÅ‚ecki" Signed-off-by: Greg Kroah-Hartman --- drivers/bcma/bcma_private.h | 6 +++--- drivers/bcma/driver_gmac_cmn.c | 2 +- drivers/bcma/driver_pci.c | 4 ++-- drivers/bcma/driver_pci_host.c | 13 ++++++------- drivers/bcma/host_pci.c | 8 ++++---- drivers/bcma/main.c | 2 +- include/linux/bcma/bcma_driver_gmac_cmn.h | 2 +- include/linux/bcma/bcma_driver_pci.h | 2 +- 8 files changed, 19 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 4a2d72ec6d43..19e3fbfd5757 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -22,7 +22,7 @@ struct bcma_bus; /* main.c */ -int __devinit bcma_bus_register(struct bcma_bus *bus); +int bcma_bus_register(struct bcma_bus *bus); void bcma_bus_unregister(struct bcma_bus *bus); int __init bcma_bus_early_register(struct bcma_bus *bus, struct bcma_device *core_cc, @@ -87,8 +87,8 @@ u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address); extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc); #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE -bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc); -void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); +bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc); +void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ #ifdef CONFIG_BCMA_DRIVER_GPIO diff --git a/drivers/bcma/driver_gmac_cmn.c b/drivers/bcma/driver_gmac_cmn.c index 834225f65e8f..dcb137926d31 100644 --- a/drivers/bcma/driver_gmac_cmn.c +++ b/drivers/bcma/driver_gmac_cmn.c @@ -8,7 +8,7 @@ #include "bcma_private.h" #include -void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) +void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { mutex_init(&gc->phy_mutex); } diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c index c39ee6d45850..cf7a476a519f 100644 --- a/drivers/bcma/driver_pci.c +++ b/drivers/bcma/driver_pci.c @@ -207,14 +207,14 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) * Init. **************************************************/ -static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) +static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) { bcma_core_pci_fixcfg(pc); bcma_pcicore_serdes_workaround(pc); bcma_core_pci_config_fixup(pc); } -void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc) +void bcma_core_pci_init(struct bcma_drv_pci *pc) { if (pc->setup_done) return; diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c index e6b5c89469dc..af0c9fabee54 100644 --- a/drivers/bcma/driver_pci_host.c +++ b/drivers/bcma/driver_pci_host.c @@ -24,7 +24,7 @@ #define BCMA_PCI_SLOT_MAX 16 #define PCI_CONFIG_SPACE_SIZE 256 -bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc) +bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc) { struct bcma_bus *bus = pc->core->bus; u16 chipid_top; @@ -264,10 +264,9 @@ static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus, } /* return cap_offset if requested capability exists in the PCI config space */ -static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc, - unsigned int dev, - unsigned int func, u8 req_cap_id, - unsigned char *buf, u32 *buflen) +static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev, + unsigned int func, u8 req_cap_id, + unsigned char *buf, u32 *buflen) { u8 cap_id; u8 cap_ptr = 0; @@ -334,7 +333,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc, * Retry Status (CRS) Completion Status to software then * enable the feature. */ -static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) +static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) { struct bcma_bus *bus = pc->core->bus; u8 cap_ptr, root_ctrl, root_cap, dev; @@ -381,7 +380,7 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc) } } -void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) +void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc) { struct bcma_bus *bus = pc->core->bus; struct bcma_drv_pci_host *pc_host; diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 98fdc3e014e7..fbf2759e7e4e 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -155,8 +155,8 @@ static const struct bcma_host_ops bcma_host_pci_ops = { .awrite32 = bcma_host_pci_awrite32, }; -static int __devinit bcma_host_pci_probe(struct pci_dev *dev, - const struct pci_device_id *id) +static int bcma_host_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id) { struct bcma_bus *bus; int err = -ENOMEM; @@ -226,7 +226,7 @@ err_kfree_bus: return err; } -static void __devexit bcma_host_pci_remove(struct pci_dev *dev) +static void bcma_host_pci_remove(struct pci_dev *dev) { struct bcma_bus *bus = pci_get_drvdata(dev); @@ -284,7 +284,7 @@ static struct pci_driver bcma_pci_bridge_driver = { .name = "bcma-pci-bridge", .id_table = bcma_pci_bridge_tbl, .probe = bcma_host_pci_probe, - .remove = __devexit_p(bcma_host_pci_remove), + .remove = bcma_host_pci_remove, .driver.pm = BCMA_PM_OPS, }; diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 53ba20ca17e0..4a92f647b58b 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -192,7 +192,7 @@ static void bcma_unregister_cores(struct bcma_bus *bus) platform_device_unregister(bus->drv_cc.watchdog); } -int __devinit bcma_bus_register(struct bcma_bus *bus) +int bcma_bus_register(struct bcma_bus *bus) { int err; struct bcma_device *core; diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h index def894b83b0d..4dd1f33e36a2 100644 --- a/include/linux/bcma/bcma_driver_gmac_cmn.h +++ b/include/linux/bcma/bcma_driver_gmac_cmn.h @@ -92,7 +92,7 @@ struct bcma_drv_gmac_cmn { #define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val) #ifdef CONFIG_BCMA_DRIVER_GMAC_CMN -extern void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); +extern void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc); #else static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { } #endif diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 41da581e1612..c48d98d27b77 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -214,7 +214,7 @@ struct bcma_drv_pci { #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) -extern void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc); +extern void bcma_core_pci_init(struct bcma_drv_pci *pc); extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, bool enable); extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend); -- cgit v1.2.3 From e389623a68622e3c9be440ab522fac1aa1ca3454 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 21 Dec 2012 15:15:49 -0800 Subject: include: remove __dev* attributes. CONFIG_HOTPLUG is going away as an option. As a result, the __dev* markings need to be removed. This change removes the use of __devinit from some include files that were previously missed. Based on patches originally written by Bill Pemberton, but redone by me in order to handle some of the coding style issues better, by hand. Cc: Bill Pemberton Signed-off-by: Greg Kroah-Hartman --- include/asm-generic/parport.h | 4 ++-- include/linux/ata_platform.h | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h index 40528cb977e8..2c9f9d4336ca 100644 --- a/include/asm-generic/parport.h +++ b/include/asm-generic/parport.h @@ -10,8 +10,8 @@ * to devices on the PCI bus. */ -static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) +static int parport_pc_find_isa_ports(int autoirq, int autodma); +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) { #ifdef CONFIG_ISA return parport_pc_find_isa_ports(autoirq, autodma); diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h index fe9989636b62..b9fde17f767c 100644 --- a/include/linux/ata_platform.h +++ b/include/linux/ata_platform.h @@ -15,12 +15,12 @@ struct pata_platform_info { unsigned int irq_flags; }; -extern int __devinit __pata_platform_probe(struct device *dev, - struct resource *io_res, - struct resource *ctl_res, - struct resource *irq_res, - unsigned int ioport_shift, - int __pio_mask); +extern int __pata_platform_probe(struct device *dev, + struct resource *io_res, + struct resource *ctl_res, + struct resource *irq_res, + unsigned int ioport_shift, + int __pio_mask); /* * Marvell SATA private data -- cgit v1.2.3 From 03f595668017f1a1fb971c02fc37140bc6e7bb1c Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 4 Jan 2013 15:34:50 -0800 Subject: ipc: add sysctl to specify desired next object id Add 3 new variables and sysctls to tune them (by one "next_id" variable for messages, semaphores and shared memory respectively). This variable can be used to set desired id for next allocated IPC object. By default it's equal to -1 and old behaviour is preserved. If this variable is non-negative, then desired idr will be extracted from it and used as a start value to search for free IDR slot. Notes: 1) this patch doesn't guarantee that the new object will have desired id. So it's up to user space how to handle new object with wrong id. 2) After a sucessful id allocation attempt, "next_id" will be set back to -1 (if it was non-negative). [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Stanislav Kinsbursky Cc: Serge Hallyn Cc: "Eric W. Biederman" Cc: Pavel Emelyanov Cc: Al Viro Cc: KOSAKI Motohiro Cc: Michael Kerrisk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/kernel.txt | 19 +++++++++++++++++++ include/linux/ipc_namespace.h | 1 + ipc/ipc_sysctl.c | 32 ++++++++++++++++++++++++++++++++ ipc/util.c | 16 ++++++++++++---- ipc/util.h | 1 + 5 files changed, 65 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 2907ba6c3607..51b953a1b149 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -38,6 +38,7 @@ show up in /proc/sys/kernel: - l2cr [ PPC only ] - modprobe ==> Documentation/debugging-modules.txt - modules_disabled +- msg_next_id [ sysv ipc ] - msgmax - msgmnb - msgmni @@ -62,7 +63,9 @@ show up in /proc/sys/kernel: - rtsig-max - rtsig-nr - sem +- sem_next_id [ sysv ipc ] - sg-big-buff [ generic SCSI device (sg) ] +- shm_next_id [ sysv ipc ] - shm_rmid_forced - shmall - shmmax [ sysv ipc ] @@ -320,6 +323,22 @@ to false. ============================================================== +msg_next_id, sem_next_id, and shm_next_id: + +These three toggles allows to specify desired id for next allocated IPC +object: message, semaphore or shared memory respectively. + +By default they are equal to -1, which means generic allocation logic. +Possible values to set are in range {0..INT_MAX}. + +Notes: +1) kernel doesn't guarantee, that new object will have desired id. So, +it's up to userspace, how to handle an object with "wrong" id. +2) Toggle with non-default value will be set back to -1 by kernel after +successful IPC object allocation. + +============================================================== + nmi_watchdog: Enables/Disables the NMI watchdog on x86 systems. When the value is diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index fe771978e877..ae221a7b5092 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -24,6 +24,7 @@ struct ipc_ids { unsigned short seq_max; struct rw_semaphore rw_mutex; struct idr ipcs_idr; + int next_id; }; struct ipc_namespace { diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 00fba2bab87d..130dfece27ac 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -158,6 +158,9 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, static int zero; static int one = 1; +#ifdef CONFIG_CHECKPOINT_RESTORE +static int int_max = INT_MAX; +#endif static struct ctl_table ipc_kern_table[] = { { @@ -227,6 +230,35 @@ static struct ctl_table ipc_kern_table[] = { .extra1 = &zero, .extra2 = &one, }, +#ifdef CONFIG_CHECKPOINT_RESTORE + { + .procname = "sem_next_id", + .data = &init_ipc_ns.ids[IPC_SEM_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_SEM_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "msg_next_id", + .data = &init_ipc_ns.ids[IPC_MSG_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_MSG_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "shm_next_id", + .data = &init_ipc_ns.ids[IPC_SHM_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_SHM_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, +#endif {} }; diff --git a/ipc/util.c b/ipc/util.c index 72fd0785ac94..74e1d9c7a98a 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -122,6 +122,7 @@ void ipc_init_ids(struct ipc_ids *ids) ids->in_use = 0; ids->seq = 0; + ids->next_id = -1; { int seq_limit = INT_MAX/SEQ_MULTIPLIER; if (seq_limit > USHRT_MAX) @@ -252,6 +253,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) kuid_t euid; kgid_t egid; int id, err; + int next_id = ids->next_id; if (size > IPCMNI) size = IPCMNI; @@ -264,7 +266,8 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) rcu_read_lock(); spin_lock(&new->lock); - err = idr_get_new(&ids->ipcs_idr, new, &id); + err = idr_get_new_above(&ids->ipcs_idr, new, + (next_id < 0) ? 0 : ipcid_to_idx(next_id), &id); if (err) { spin_unlock(&new->lock); rcu_read_unlock(); @@ -277,9 +280,14 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) new->cuid = new->uid = euid; new->gid = new->cgid = egid; - new->seq = ids->seq++; - if(ids->seq > ids->seq_max) - ids->seq = 0; + if (next_id < 0) { + new->seq = ids->seq++; + if (ids->seq > ids->seq_max) + ids->seq = 0; + } else { + new->seq = ipcid_to_seqx(next_id); + ids->next_id = -1; + } new->id = ipc_buildid(id, new->seq); return id; diff --git a/ipc/util.h b/ipc/util.h index c8fe2f7631e9..a61e0ca2bffd 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -92,6 +92,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, #define IPC_SHM_IDS 2 #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) +#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) /* must be called with ids->rw_mutex acquired for writing */ int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); -- cgit v1.2.3 From f9dd87f4738c7555aca2cdf8cb2b2326cafb0cad Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 4 Jan 2013 15:34:52 -0800 Subject: ipc: message queue receive cleanup Move all message related manipulation into one function msg_fill(). Actually, two functions because of the compat one. [akpm@linux-foundation.org: checkpatch fixes] Signed-off-by: Stanislav Kinsbursky Cc: Serge Hallyn Cc: "Eric W. Biederman" Cc: Pavel Emelyanov Cc: Al Viro Cc: KOSAKI Motohiro Cc: Michael Kerrisk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/msg.h | 5 +++-- ipc/compat.c | 45 +++++++++++++++++++-------------------------- ipc/msg.c | 44 +++++++++++++++++++++++--------------------- 3 files changed, 45 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/include/linux/msg.h b/include/linux/msg.h index 7a4b9e97d29a..fc5743a554e6 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h @@ -34,7 +34,8 @@ struct msg_queue { /* Helper routines for sys_msgsnd and sys_msgrcv */ extern long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg); -extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext, - size_t msgsz, long msgtyp, int msgflg); +extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, + int msgflg, + long (*msg_fill)(void __user *, struct msg_msg *, size_t)); #endif /* _LINUX_MSG_H */ diff --git a/ipc/compat.c b/ipc/compat.c index ad9518eb26e0..eb3ea16d2d1d 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -306,6 +306,20 @@ static long do_compat_semctl(int first, int second, int third, u32 pad) return err; } +long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) +{ + struct compat_msgbuf __user *msgp = dest; + size_t msgsz; + + if (put_user(msg->m_type, &msgp->mtype)) + return -EFAULT; + + msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; + if (store_msg(msgp->mtext, msg, msgsz)) + return -EFAULT; + return msgsz; +} + #ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC long compat_sys_semctl(int first, int second, int third, void __user *uptr) { @@ -337,10 +351,6 @@ long compat_sys_msgsnd(int first, int second, int third, void __user *uptr) long compat_sys_msgrcv(int first, int second, int msgtyp, int third, int version, void __user *uptr) { - struct compat_msgbuf __user *up; - long type; - int err; - if (first < 0) return -EINVAL; if (second < 0) @@ -348,23 +358,14 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third, if (!version) { struct compat_ipc_kludge ipck; - err = -EINVAL; if (!uptr) - goto out; - err = -EFAULT; + return -EINVAL; if (copy_from_user (&ipck, uptr, sizeof(ipck))) - goto out; + return -EFAULT; uptr = compat_ptr(ipck.msgp); msgtyp = ipck.msgtyp; } - up = uptr; - err = do_msgrcv(first, &type, up->mtext, second, msgtyp, third); - if (err < 0) - goto out; - if (put_user(type, &up->mtype)) - err = -EFAULT; -out: - return err; + return do_msgrcv(first, uptr, second, msgtyp, third, compat_do_msg_fill); } #else long compat_sys_semctl(int semid, int semnum, int cmd, int arg) @@ -385,16 +386,8 @@ long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp, long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp, compat_ssize_t msgsz, long msgtyp, int msgflg) { - long err, mtype; - - err = do_msgrcv(msqid, &mtype, msgp->mtext, (ssize_t)msgsz, msgtyp, msgflg); - if (err < 0) - goto out; - - if (put_user(mtype, &msgp->mtype)) - err = -EFAULT; - out: - return err; + return do_msgrcv(msqid, msgp, (ssize_t)msgsz, msgtyp, msgflg, + compat_do_msg_fill); } #endif diff --git a/ipc/msg.c b/ipc/msg.c index 2f272fa76595..cefc24f46e3e 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -755,15 +755,30 @@ static inline int convert_mode(long *msgtyp, int msgflg) return SEARCH_EQUAL; } -long do_msgrcv(int msqid, long *pmtype, void __user *mtext, - size_t msgsz, long msgtyp, int msgflg) +static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) +{ + struct msgbuf __user *msgp = dest; + size_t msgsz; + + if (put_user(msg->m_type, &msgp->mtype)) + return -EFAULT; + + msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; + if (store_msg(msgp->mtext, msg, msgsz)) + return -EFAULT; + return msgsz; +} + +long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, + int msgflg, + long (*msg_handler)(void __user *, struct msg_msg *, size_t)) { struct msg_queue *msq; struct msg_msg *msg; int mode; struct ipc_namespace *ns; - if (msqid < 0 || (long) msgsz < 0) + if (msqid < 0 || (long) bufsz < 0) return -EINVAL; mode = convert_mode(&msgtyp, msgflg); ns = current->nsproxy->ipc_ns; @@ -804,7 +819,7 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, * Found a suitable message. * Unlink it from the queue. */ - if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { + if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; } @@ -831,7 +846,7 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else - msr_d.r_maxsize = msgsz; + msr_d.r_maxsize = bufsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); @@ -894,29 +909,16 @@ out_unlock: if (IS_ERR(msg)) return PTR_ERR(msg); - msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; - *pmtype = msg->m_type; - if (store_msg(mtext, msg, msgsz)) - msgsz = -EFAULT; - + bufsz = msg_handler(buf, msg, bufsz); free_msg(msg); - return msgsz; + return bufsz; } SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { - long err, mtype; - - err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); - if (err < 0) - goto out; - - if (put_user(mtype, &msgp->mtype)) - err = -EFAULT; -out: - return err; + return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); } #ifdef CONFIG_PROC_FS -- cgit v1.2.3 From 3a665531a3b7c2ad2c87903b24646be6916340e4 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Fri, 4 Jan 2013 15:34:56 -0800 Subject: selftests: IPC message queue copy feature test This test can be used to check wheither kernel supports IPC message queue copy and restore features (required by CRIU project). Signed-off-by: Stanislav Kinsbursky Cc: Serge Hallyn Cc: "Eric W. Biederman" Cc: Pavel Emelyanov Cc: Al Viro Cc: KOSAKI Motohiro Cc: Michael Kerrisk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/msg.h | 3 +- ipc/compat.c | 3 +- tools/testing/selftests/ipc/Makefile | 25 ++++ tools/testing/selftests/ipc/msgque.c | 246 +++++++++++++++++++++++++++++++++++ 4 files changed, 275 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/ipc/Makefile create mode 100644 tools/testing/selftests/ipc/msgque.c (limited to 'include/linux') diff --git a/include/linux/msg.h b/include/linux/msg.h index fc5743a554e6..391af8d11cce 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h @@ -36,6 +36,7 @@ extern long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg); extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, - long (*msg_fill)(void __user *, struct msg_msg *, size_t)); + long (*msg_fill)(void __user *, struct msg_msg *, + size_t)); #endif /* _LINUX_MSG_H */ diff --git a/ipc/compat.c b/ipc/compat.c index eb3ea16d2d1d..2547f29dcd1b 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -365,7 +365,8 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third, uptr = compat_ptr(ipck.msgp); msgtyp = ipck.msgtyp; } - return do_msgrcv(first, uptr, second, msgtyp, third, compat_do_msg_fill); + return do_msgrcv(first, uptr, second, msgtyp, third, + compat_do_msg_fill); } #else long compat_sys_semctl(int semid, int semnum, int cmd, int arg) diff --git a/tools/testing/selftests/ipc/Makefile b/tools/testing/selftests/ipc/Makefile new file mode 100644 index 000000000000..5386fd7c43ae --- /dev/null +++ b/tools/testing/selftests/ipc/Makefile @@ -0,0 +1,25 @@ +uname_M := $(shell uname -m 2>/dev/null || echo not) +ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/) +ifeq ($(ARCH),i386) + ARCH := X86 + CFLAGS := -DCONFIG_X86_32 -D__i386__ +endif +ifeq ($(ARCH),x86_64) + ARCH := X86 + CFLAGS := -DCONFIG_X86_64 -D__x86_64__ +endif + +CFLAGS += -I../../../../usr/include/ + +all: +ifeq ($(ARCH),X86) + gcc $(CFLAGS) msgque.c -o msgque_test +else + echo "Not an x86 target, can't build msgque selftest" +endif + +run_tests: all + ./msgque_test + +clean: + rm -fr ./msgque_test diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c new file mode 100644 index 000000000000..d66418237d21 --- /dev/null +++ b/tools/testing/selftests/ipc/msgque.c @@ -0,0 +1,246 @@ +#include +#include +#include +#include +#include +#include + +#define MAX_MSG_SIZE 32 + +struct msg1 { + int msize; + long mtype; + char mtext[MAX_MSG_SIZE]; +}; + +#define TEST_STRING "Test sysv5 msg" +#define MSG_TYPE 1 + +#define ANOTHER_TEST_STRING "Yet another test sysv5 msg" +#define ANOTHER_MSG_TYPE 26538 + +struct msgque_data { + key_t key; + int msq_id; + int qbytes; + int qnum; + int mode; + struct msg1 *messages; +}; + +int restore_queue(struct msgque_data *msgque) +{ + int fd, ret, id, i; + char buf[32]; + + fd = open("/proc/sys/kernel/msg_next_id", O_WRONLY); + if (fd == -1) { + printf("Failed to open /proc/sys/kernel/msg_next_id\n"); + return -errno; + } + sprintf(buf, "%d", msgque->msq_id); + + ret = write(fd, buf, strlen(buf)); + if (ret != strlen(buf)) { + printf("Failed to write to /proc/sys/kernel/msg_next_id\n"); + return -errno; + } + + id = msgget(msgque->key, msgque->mode | IPC_CREAT | IPC_EXCL); + if (id == -1) { + printf("Failed to create queue\n"); + return -errno; + } + + if (id != msgque->msq_id) { + printf("Restored queue has wrong id (%d instead of %d)\n", + id, msgque->msq_id); + ret = -EFAULT; + goto destroy; + } + + for (i = 0; i < msgque->qnum; i++) { + if (msgsnd(msgque->msq_id, &msgque->messages[i].mtype, + msgque->messages[i].msize, IPC_NOWAIT) != 0) { + printf("msgsnd failed (%m)\n"); + ret = -errno; + goto destroy; + }; + } + return 0; + +destroy: + if (msgctl(id, IPC_RMID, 0)) + printf("Failed to destroy queue: %d\n", -errno); + return ret; +} + +int check_and_destroy_queue(struct msgque_data *msgque) +{ + struct msg1 message; + int cnt = 0, ret; + + while (1) { + ret = msgrcv(msgque->msq_id, &message.mtype, MAX_MSG_SIZE, + 0, IPC_NOWAIT); + if (ret < 0) { + if (errno == ENOMSG) + break; + printf("Failed to read IPC message: %m\n"); + ret = -errno; + goto err; + } + if (ret != msgque->messages[cnt].msize) { + printf("Wrong message size: %d (expected %d)\n", ret, + msgque->messages[cnt].msize); + ret = -EINVAL; + goto err; + } + if (message.mtype != msgque->messages[cnt].mtype) { + printf("Wrong message type\n"); + ret = -EINVAL; + goto err; + } + if (memcmp(message.mtext, msgque->messages[cnt].mtext, ret)) { + printf("Wrong message content\n"); + ret = -EINVAL; + goto err; + } + cnt++; + } + + if (cnt != msgque->qnum) { + printf("Wrong message number\n"); + ret = -EINVAL; + goto err; + } + + ret = 0; +err: + if (msgctl(msgque->msq_id, IPC_RMID, 0)) { + printf("Failed to destroy queue: %d\n", -errno); + return -errno; + } + return ret; +} + +int dump_queue(struct msgque_data *msgque) +{ + struct msqid64_ds ds; + int kern_id; + int i, ret; + + for (kern_id = 0; kern_id < 256; kern_id++) { + ret = msgctl(kern_id, MSG_STAT, &ds); + if (ret < 0) { + if (errno == -EINVAL) + continue; + printf("Failed to get stats for IPC queue with id %d\n", + kern_id); + return -errno; + } + + if (ret == msgque->msq_id) + break; + } + + msgque->messages = malloc(sizeof(struct msg1) * ds.msg_qnum); + if (msgque->messages == NULL) { + printf("Failed to get stats for IPC queue\n"); + return -ENOMEM; + } + + msgque->qnum = ds.msg_qnum; + msgque->mode = ds.msg_perm.mode; + msgque->qbytes = ds.msg_qbytes; + + for (i = 0; i < msgque->qnum; i++) { + ret = msgrcv(msgque->msq_id, &msgque->messages[i].mtype, + MAX_MSG_SIZE, i, IPC_NOWAIT | MSG_COPY); + if (ret < 0) { + printf("Failed to copy IPC message: %m (%d)\n", errno); + return -errno; + } + msgque->messages[i].msize = ret; + } + return 0; +} + +int fill_msgque(struct msgque_data *msgque) +{ + struct msg1 msgbuf; + + msgbuf.mtype = MSG_TYPE; + memcpy(msgbuf.mtext, TEST_STRING, sizeof(TEST_STRING)); + if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(TEST_STRING), + IPC_NOWAIT) != 0) { + printf("First message send failed (%m)\n"); + return -errno; + }; + + msgbuf.mtype = ANOTHER_MSG_TYPE; + memcpy(msgbuf.mtext, ANOTHER_TEST_STRING, sizeof(ANOTHER_TEST_STRING)); + if (msgsnd(msgque->msq_id, &msgbuf.mtype, sizeof(ANOTHER_TEST_STRING), + IPC_NOWAIT) != 0) { + printf("Second message send failed (%m)\n"); + return -errno; + }; + return 0; +} + +int main(int argc, char **argv) +{ + int msg, pid, err; + struct msgque_data msgque; + + msgque.key = ftok(argv[0], 822155650); + if (msgque.key == -1) { + printf("Can't make key\n"); + return -errno; + } + + msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); + if (msgque.msq_id == -1) { + printf("Can't create queue\n"); + goto err_out; + } + + err = fill_msgque(&msgque); + if (err) { + printf("Failed to fill queue\n"); + goto err_destroy; + } + + err = dump_queue(&msgque); + if (err) { + printf("Failed to dump queue\n"); + goto err_destroy; + } + + err = check_and_destroy_queue(&msgque); + if (err) { + printf("Failed to check and destroy queue\n"); + goto err_out; + } + + err = restore_queue(&msgque); + if (err) { + printf("Failed to restore queue\n"); + goto err_destroy; + } + + err = check_and_destroy_queue(&msgque); + if (err) { + printf("Failed to test queue\n"); + goto err_out; + } + return 0; + +err_destroy: + if (msgctl(msgque.msq_id, IPC_RMID, 0)) { + printf("Failed to destroy queue: %d\n", -errno); + return -errno; + } +err_out: + return err; +} -- cgit v1.2.3 From a458431e176ddb27e8ef8b98c2a681b217337393 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 4 Jan 2013 15:35:08 -0800 Subject: mm: fix zone_watermark_ok_safe() accounting of isolated pages Commit 702d1a6e0766 ("memory-hotplug: fix kswapd looping forever problem") added an isolated pageblocks counter (nr_pageblock_isolate in struct zone) and used it to adjust free pages counter in zone_watermark_ok_safe() to prevent kswapd looping forever problem. Then later, commit 2139cbe627b8 ("cma: fix counting of isolated pages") fixed accounting of isolated pages in global free pages counter. It made the previous zone_watermark_ok_safe() fix unnecessary and potentially harmful (cause now isolated pages may be accounted twice making free pages counter incorrect). This patch removes the special isolated pageblocks counter altogether which fixes zone_watermark_ok_safe() free pages check. Reported-by: Tomasz Stanislawski Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Kyungmin Park Cc: Minchan Kim Cc: KOSAKI Motohiro Cc: Aaditya Kumar Cc: KAMEZAWA Hiroyuki Cc: Mel Gorman Cc: Michal Hocko Cc: Marek Szyprowski Cc: Michal Nazarewicz Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 8 -------- mm/page_alloc.c | 27 --------------------------- mm/page_isolation.c | 26 ++------------------------ 3 files changed, 2 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4bec5be82cab..73b64a38b984 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -503,14 +503,6 @@ struct zone { * rarely used fields: */ const char *name; -#ifdef CONFIG_MEMORY_ISOLATION - /* - * the number of MIGRATE_ISOLATE *pageblock*. - * We need this for free page counting. Look at zone_watermark_ok_safe. - * It's protected by zone->lock - */ - int nr_pageblock_isolate; -#endif } ____cacheline_internodealigned_in_smp; typedef enum { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4ba5e37127fc..bc6cc0e913bd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -221,11 +221,6 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; -/* - * NOTE: - * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly. - * Instead, use {un}set_pageblock_isolate. - */ void set_pageblock_migratetype(struct page *page, int migratetype) { @@ -1655,20 +1650,6 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, return true; } -#ifdef CONFIG_MEMORY_ISOLATION -static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) -{ - if (unlikely(zone->nr_pageblock_isolate)) - return zone->nr_pageblock_isolate * pageblock_nr_pages; - return 0; -} -#else -static inline unsigned long nr_zone_isolate_freepages(struct zone *zone) -{ - return 0; -} -#endif - bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags) { @@ -1684,14 +1665,6 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); - /* - * If the zone has MIGRATE_ISOLATE type free pages, we should consider - * it. nr_zone_isolate_freepages is never accurate so kswapd might not - * sleep although it could do so. But this is more desirable for memory - * hotplug than sleeping which can cause a livelock in the direct - * reclaim path. - */ - free_pages -= nr_zone_isolate_freepages(z); return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, free_pages); } diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 9d2264ea4606..383bdbb98b04 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -8,28 +8,6 @@ #include #include "internal.h" -/* called while holding zone->lock */ -static void set_pageblock_isolate(struct page *page) -{ - if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE) - return; - - set_pageblock_migratetype(page, MIGRATE_ISOLATE); - page_zone(page)->nr_pageblock_isolate++; -} - -/* called while holding zone->lock */ -static void restore_pageblock_isolate(struct page *page, int migratetype) -{ - struct zone *zone = page_zone(page); - if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) - return; - - BUG_ON(zone->nr_pageblock_isolate <= 0); - set_pageblock_migratetype(page, migratetype); - zone->nr_pageblock_isolate--; -} - int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) { struct zone *zone; @@ -80,7 +58,7 @@ out: unsigned long nr_pages; int migratetype = get_pageblock_migratetype(page); - set_pageblock_isolate(page); + set_pageblock_migratetype(page, MIGRATE_ISOLATE); nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); __mod_zone_freepage_state(zone, -nr_pages, migratetype); @@ -103,7 +81,7 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) goto out; nr_pages = move_freepages_block(zone, page, migratetype); __mod_zone_freepage_state(zone, nr_pages, migratetype); - restore_pageblock_isolate(page, migratetype); + set_pageblock_migratetype(page, migratetype); out: spin_unlock_irqrestore(&zone->lock, flags); } -- cgit v1.2.3 From 08c097fc3bb283299a6915a6a3795edab85979b1 Mon Sep 17 00:00:00 2001 From: Marc Dionne Date: Wed, 9 Jan 2013 14:16:30 +0000 Subject: cred: Remove tgcred pointer from struct cred Commit 3a50597de863 ("KEYS: Make the session and process keyrings per-thread") removed the definition of the thread_group_cred structure, but left a now unused pointer in struct cred. Signed-off-by: Marc Dionne Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- include/linux/cred.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cred.h b/include/linux/cred.h index abb2cd50f6b2..04421e825365 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -128,7 +128,6 @@ struct cred { struct key *process_keyring; /* keyring private to this process */ struct key *thread_keyring; /* keyring private to this thread */ struct key *request_key_auth; /* assumed request_key authority */ - struct thread_group_cred *tgcred; /* thread-group shared credentials */ #endif #ifdef CONFIG_SECURITY void *security; /* subjective LSM security */ -- cgit v1.2.3 From 54b956b903607f8f8878754dd4352da6a54a1da2 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 10 Jan 2013 10:57:01 -0800 Subject: Remove __dev* markings from init.h Now that all in-kernel users of __dev* are gone, let's remove them from init.h to keep them from popping up again and again. Thanks to Bill Pemberton for doing all of the hard work to make removal of this possible. Cc: Bill Pemberton Cc: Stephen Rothwell Signed-off-by: Greg Kroah-Hartman --- include/linux/init.h | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/init.h b/include/linux/init.h index a799273714ac..10ed4f436458 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -93,14 +93,6 @@ #define __exit __section(.exit.text) __exitused __cold notrace -/* Used for HOTPLUG, but that is always enabled now, so just make them noops */ -#define __devinit -#define __devinitdata -#define __devinitconst -#define __devexit -#define __devexitdata -#define __devexitconst - /* Used for HOTPLUG_CPU */ #define __cpuinit __section(.cpuinit.text) __cold notrace #define __cpuinitdata __section(.cpuinit.data) @@ -337,18 +329,6 @@ void __init parse_early_options(char *cmdline); #define __INITRODATA_OR_MODULE __INITRODATA #endif /*CONFIG_MODULES*/ -/* Functions marked as __devexit may be discarded at kernel link time, depending - on config options. Newer versions of binutils detect references from - retained sections to discarded sections and flag an error. Pointers to - __devexit functions must use __devexit_p(function_name), the wrapper will - insert either the function_name or NULL, depending on the config options. - */ -#if defined(MODULE) || defined(CONFIG_HOTPLUG) -#define __devexit_p(x) x -#else -#define __devexit_p(x) NULL -#endif - #ifdef MODULE #define __exit_p(x) x #else -- cgit v1.2.3 From 896f97ea95c1d29c0520ee0766b66b7f64cb967c Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Fri, 11 Jan 2013 14:31:36 -0800 Subject: lib: cpu_rmap: avoid flushing all workqueues In some cases, free_irq_cpu_rmap() is called while holding a lock (eg rtnl). This can lead to deadlocks, because it invokes flush_scheduled_work() which ends up waiting for whole system workqueue to flush, but some pending works might try to acquire the lock we are already holding. This commit uses reference-counting to replace irq_run_affinity_notifiers(). It also removes irq_run_affinity_notifiers() altogether. [akpm@linux-foundation.org: eliminate free_cpu_rmap, rename cpu_rmap_reclaim() to cpu_rmap_release(), propagate kref_put() retval from cpu_rmap_put()] Signed-off-by: David Decotigny Reviewed-by: Ben Hutchings Acked-by: Eric Dumazet Reviewed-by: Josh Triplett Cc: "David S. Miller" Cc: Or Gerlitz Acked-by: Amir Vadai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpu_rmap.h | 13 ++++-------- include/linux/interrupt.h | 5 ----- lib/cpu_rmap.c | 54 ++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 53 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index ac3bbb5b9502..1739510d8994 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -13,9 +13,11 @@ #include #include #include +#include /** * struct cpu_rmap - CPU affinity reverse-map + * @refcount: kref for object * @size: Number of objects to be reverse-mapped * @used: Number of objects added * @obj: Pointer to array of object pointers @@ -23,6 +25,7 @@ * based on affinity masks */ struct cpu_rmap { + struct kref refcount; u16 size, used; void **obj; struct { @@ -33,15 +36,7 @@ struct cpu_rmap { #define CPU_RMAP_DIST_INF 0xffff extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); - -/** - * free_cpu_rmap - free CPU affinity reverse-map - * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL - */ -static inline void free_cpu_rmap(struct cpu_rmap *rmap) -{ - kfree(rmap); -} +extern int cpu_rmap_put(struct cpu_rmap *rmap); extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5e4e6170f43a..5fa5afeeb759 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -268,11 +268,6 @@ struct irq_affinity_notify { extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -static inline void irq_run_affinity_notifiers(void) -{ - flush_scheduled_work(); -} - #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 145dec5267c9..5fbed5caba6e 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) if (!rmap) return NULL; + kref_init(&rmap->refcount); rmap->obj = (void **)((char *)rmap + obj_offset); /* Initially assign CPUs to objects on a rota, since we have @@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) } EXPORT_SYMBOL(alloc_cpu_rmap); +/** + * cpu_rmap_release - internal reclaiming helper called from kref_put + * @ref: kref to struct cpu_rmap + */ +static void cpu_rmap_release(struct kref *ref) +{ + struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount); + kfree(rmap); +} + +/** + * cpu_rmap_get - internal helper to get new ref on a cpu_rmap + * @rmap: reverse-map allocated with alloc_cpu_rmap() + */ +static inline void cpu_rmap_get(struct cpu_rmap *rmap) +{ + kref_get(&rmap->refcount); +} + +/** + * cpu_rmap_put - release ref on a cpu_rmap + * @rmap: reverse-map allocated with alloc_cpu_rmap() + */ +int cpu_rmap_put(struct cpu_rmap *rmap) +{ + return kref_put(&rmap->refcount, cpu_rmap_release); +} +EXPORT_SYMBOL(cpu_rmap_put); + /* Reevaluate nearest object for given CPU, comparing with the given * neighbours at the given distance. */ @@ -197,8 +227,7 @@ struct irq_glue { * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL * - * Must be called in process context, before freeing the IRQs, and - * without holding any locks required by global workqueue items. + * Must be called in process context, before freeing the IRQs. */ void free_irq_cpu_rmap(struct cpu_rmap *rmap) { @@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) glue = rmap->obj[index]; irq_set_affinity_notifier(glue->notify.irq, NULL); } - irq_run_affinity_notifiers(); - kfree(rmap); + cpu_rmap_put(rmap); } EXPORT_SYMBOL(free_irq_cpu_rmap); +/** + * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated + * @notify: struct irq_affinity_notify passed by irq/manage.c + * @mask: cpu mask for new SMP affinity + * + * This is executed in workqueue context. + */ static void irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { @@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); } +/** + * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem + * @ref: kref to struct irq_affinity_notify passed by irq/manage.c + */ static void irq_cpu_rmap_release(struct kref *ref) { struct irq_glue *glue = container_of(ref, struct irq_glue, notify.kref); + + cpu_rmap_put(glue->rmap); kfree(glue); } @@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) glue->notify.notify = irq_cpu_rmap_notify; glue->notify.release = irq_cpu_rmap_release; glue->rmap = rmap; + cpu_rmap_get(rmap); glue->index = cpu_rmap_add(rmap, glue); rc = irq_set_affinity_notifier(irq, &glue->notify); - if (rc) + if (rc) { + cpu_rmap_put(glue->rmap); kfree(glue); + } return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add); -- cgit v1.2.3 From 1b963c81b14509e330e0fe3218b645ece2738dc5 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 11 Jan 2013 14:31:56 -0800 Subject: lockdep, rwsem: provide down_write_nest_lock() down_write_nest_lock() provides a means to annotate locking scenario where an outer lock is guaranteed to serialize the order nested locks are being acquired. This is analogoue to already existing mutex_lock_nest_lock() and spin_lock_nest_lock(). Signed-off-by: Jiri Kosina Cc: Rik van Riel Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Mel Gorman Tested-by: Sedat Dilek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/lockdep.h | 3 +++ include/linux/rwsem.h | 9 +++++++++ kernel/rwsem.c | 10 ++++++++++ 3 files changed, 22 insertions(+) (limited to 'include/linux') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 00e46376e28f..2bca44b0893c 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else # define rwsem_acquire(l, s, t, i) do { } while (0) +# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) # define rwsem_acquire_read(l, s, t, i) do { } while (0) # define rwsem_release(l, n, i) do { } while (0) #endif diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 54bd7cd7ecbd..413cc11e414a 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem); */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); +extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); + +# define down_write_nest_lock(sem, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ +} while (0); + #else # define down_read_nested(sem, subclass) down_read(sem) +# define down_write_nest_lock(sem, nest_lock) down_read(sem) # define down_write_nested(sem, subclass) down_write(sem) #endif diff --git a/kernel/rwsem.c b/kernel/rwsem.c index 6850f53e02d8..b3c6c3fcd847 100644 --- a/kernel/rwsem.c +++ b/kernel/rwsem.c @@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) EXPORT_SYMBOL(down_read_nested); +void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) +{ + might_sleep(); + rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); + + LOCK_CONTENDED(sem, __down_write_trylock, __down_write); +} + +EXPORT_SYMBOL(_down_write_nest_lock); + void down_write_nested(struct rw_semaphore *sem, int subclass) { might_sleep(); -- cgit v1.2.3 From 7b9205bd775afc4439ed86d617f9042ee9e76a71 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 11 Jan 2013 14:32:05 -0800 Subject: audit: create explicit AUDIT_SECCOMP event type The seccomp path was using AUDIT_ANOM_ABEND from when seccomp mode 1 could only kill a process. While we still want to make sure an audit record is forced on a kill, this should use a separate record type since seccomp mode 2 introduces other behaviors. In the case of "handled" behaviors (process wasn't killed), only emit a record if the process is under inspection. This change also fixes userspace examination of seccomp audit events, since it was considered malformed due to missing fields of the AUDIT_ANOM_ABEND event type. Signed-off-by: Kees Cook Cc: Al Viro Cc: Eric Paris Cc: Jeff Layton Cc: "Eric W. Biederman" Cc: Julien Tinnes Acked-by: Will Drewry Acked-by: Steve Grubb Cc: Andrea Arcangeli Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/audit.h | 3 ++- include/uapi/linux/audit.h | 1 + kernel/auditsc.c | 14 +++++++++++--- 3 files changed, 14 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index bce729afbcf9..9d5104d7aba9 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -157,7 +157,8 @@ void audit_core_dumps(long signr); static inline void audit_seccomp(unsigned long syscall, long signr, int code) { - if (unlikely(!audit_dummy_context())) + /* Force a record to be reported if a signal was delivered. */ + if (signr || unlikely(!audit_dummy_context())) __audit_seccomp(syscall, signr, code); } diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 76352ac45f24..09a2d94ab113 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -106,6 +106,7 @@ #define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */ #define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */ #define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */ +#define AUDIT_SECCOMP 1326 /* Secure Computing event */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ diff --git a/kernel/auditsc.c b/kernel/auditsc.c index e37e6a12c5e3..3e46d1dec613 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags) context->type = AUDIT_MMAP; } -static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) +static void audit_log_task(struct audit_buffer *ab) { kuid_t auid, uid; kgid_t gid; @@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) audit_log_task_context(ab); audit_log_format(ab, " pid=%d comm=", current->pid); audit_log_untrustedstring(ab, current->comm); +} + +static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr) +{ + audit_log_task(ab); audit_log_format(ab, " reason="); audit_log_string(ab, reason); audit_log_format(ab, " sig=%ld", signr); @@ -2723,8 +2728,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code) { struct audit_buffer *ab; - ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); - audit_log_abend(ab, "seccomp", signr); + ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP); + if (unlikely(!ab)) + return; + audit_log_task(ab); + audit_log_format(ab, " sig=%ld", signr); audit_log_format(ab, " syscall=%ld", syscall); audit_log_format(ab, " compat=%d", is_compat_task()); audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current)); -- cgit v1.2.3 From c0a3a20b6c4b5229ef5d26fd9b1c4b1957632aa7 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 11 Jan 2013 14:32:13 -0800 Subject: linux/audit.h: move ptrace.h include to kernel header While the kernel internals want pt_regs (and so it includes linux/ptrace.h), the user version of audit.h does not need it. So move the include out of the uapi version. This avoids issues where people want the audit defines and userland ptrace api. Including both the kernel ptrace and the userland ptrace headers can easily lead to failure. Signed-off-by: Mike Frysinger Cc: Eric Paris Cc: Al Viro Reviewed-by: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/audit.h | 1 + include/uapi/linux/audit.h | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/audit.h b/include/linux/audit.h index 9d5104d7aba9..5a6d718adf34 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -24,6 +24,7 @@ #define _LINUX_AUDIT_H_ #include +#include #include struct audit_sig_info { diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 09a2d94ab113..9f096f1c0907 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -26,7 +26,6 @@ #include #include -#include /* The netlink messages for the audit system is divided into blocks: * 1000 - 1099 are for commanding the audit system -- cgit v1.2.3 From 8fb74b9fb2b182d54beee592350d9ea1f325917a Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 11 Jan 2013 14:32:16 -0800 Subject: mm: compaction: partially revert capture of suitable high-order page Eric Wong reported on 3.7 and 3.8-rc2 that ppoll() got stuck when waiting for POLLIN on a local TCP socket. It was easier to trigger if there was disk IO and dirty pages at the same time and he bisected it to commit 1fb3f8ca0e92 ("mm: compaction: capture a suitable high-order page immediately when it is made available"). The intention of that patch was to improve high-order allocations under memory pressure after changes made to reclaim in 3.6 drastically hurt THP allocations but the approach was flawed. For Eric, the problem was that page->pfmemalloc was not being cleared for captured pages leading to a poor interaction with swap-over-NFS support causing the packets to be dropped. However, I identified a few more problems with the patch including the fact that it can increase contention on zone->lock in some cases which could result in async direct compaction being aborted early. In retrospect the capture patch took the wrong approach. What it should have done is mark the pageblock being migrated as MIGRATE_ISOLATE if it was allocating for THP and avoided races that way. While the patch was showing to improve allocation success rates at the time, the benefit is marginal given the relative complexity and it should be revisited from scratch in the context of the other reclaim-related changes that have taken place since the patch was first written and tested. This patch partially reverts commit 1fb3f8ca0e92 ("mm: compaction: capture a suitable high-order page immediately when it is made available"). Reported-and-tested-by: Eric Wong Tested-by: Eric Dumazet Cc: Signed-off-by: Mel Gorman Cc: David Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 4 +- include/linux/mm.h | 1 - mm/compaction.c | 92 +++++++--------------------------------------- mm/internal.h | 1 - mm/page_alloc.c | 35 ++++-------------- 5 files changed, 23 insertions(+), 110 deletions(-) (limited to 'include/linux') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 6ecb6dc2f303..cc7bddeaf553 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - bool sync, bool *contended, struct page **page); + bool sync, bool *contended); extern int compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync, bool *contended, struct page **page) + bool sync, bool *contended) { return COMPACT_CONTINUE; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 63204078f72b..66e2f7c61e5c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); int split_free_page(struct page *page); -int capture_free_page(struct page *page, int alloc_order, int migratetype); /* * Compound pages have a destructor function. Provide a diff --git a/mm/compaction.c b/mm/compaction.c index f8f5c111b7d7..c62bd063d766 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -816,6 +816,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, static int compact_finished(struct zone *zone, struct compact_control *cc) { + unsigned int order; unsigned long watermark; if (fatal_signal_pending(current)) @@ -850,22 +851,16 @@ static int compact_finished(struct zone *zone, return COMPACT_CONTINUE; /* Direct compactor: Is a suitable page free? */ - if (cc->page) { - /* Was a suitable page captured? */ - if (*cc->page) + for (order = cc->order; order < MAX_ORDER; order++) { + struct free_area *area = &zone->free_area[order]; + + /* Job done if page is free of the right migratetype */ + if (!list_empty(&area->free_list[cc->migratetype])) + return COMPACT_PARTIAL; + + /* Job done if allocation would set block type */ + if (cc->order >= pageblock_order && area->nr_free) return COMPACT_PARTIAL; - } else { - unsigned int order; - for (order = cc->order; order < MAX_ORDER; order++) { - struct free_area *area = &zone->free_area[cc->order]; - /* Job done if page is free of the right migratetype */ - if (!list_empty(&area->free_list[cc->migratetype])) - return COMPACT_PARTIAL; - - /* Job done if allocation would set block type */ - if (cc->order >= pageblock_order && area->nr_free) - return COMPACT_PARTIAL; - } } return COMPACT_CONTINUE; @@ -921,60 +916,6 @@ unsigned long compaction_suitable(struct zone *zone, int order) return COMPACT_CONTINUE; } -static void compact_capture_page(struct compact_control *cc) -{ - unsigned long flags; - int mtype, mtype_low, mtype_high; - - if (!cc->page || *cc->page) - return; - - /* - * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP - * regardless of the migratetype of the freelist is is captured from. - * This is fine because the order for a high-order MIGRATE_MOVABLE - * allocation is typically at least a pageblock size and overall - * fragmentation is not impaired. Other allocation types must - * capture pages from their own migratelist because otherwise they - * could pollute other pageblocks like MIGRATE_MOVABLE with - * difficult to move pages and making fragmentation worse overall. - */ - if (cc->migratetype == MIGRATE_MOVABLE) { - mtype_low = 0; - mtype_high = MIGRATE_PCPTYPES; - } else { - mtype_low = cc->migratetype; - mtype_high = cc->migratetype + 1; - } - - /* Speculatively examine the free lists without zone lock */ - for (mtype = mtype_low; mtype < mtype_high; mtype++) { - int order; - for (order = cc->order; order < MAX_ORDER; order++) { - struct page *page; - struct free_area *area; - area = &(cc->zone->free_area[order]); - if (list_empty(&area->free_list[mtype])) - continue; - - /* Take the lock and attempt capture of the page */ - if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc)) - return; - if (!list_empty(&area->free_list[mtype])) { - page = list_entry(area->free_list[mtype].next, - struct page, lru); - if (capture_free_page(page, cc->order, mtype)) { - spin_unlock_irqrestore(&cc->zone->lock, - flags); - *cc->page = page; - return; - } - } - spin_unlock_irqrestore(&cc->zone->lock, flags); - } - } -} - static int compact_zone(struct zone *zone, struct compact_control *cc) { int ret; @@ -1054,9 +995,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) goto out; } } - - /* Capture a page now if it is a suitable size */ - compact_capture_page(cc); } out: @@ -1069,8 +1007,7 @@ out: static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - bool sync, bool *contended, - struct page **page) + bool sync, bool *contended) { unsigned long ret; struct compact_control cc = { @@ -1080,7 +1017,6 @@ static unsigned long compact_zone_order(struct zone *zone, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, .sync = sync, - .page = page, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -1110,7 +1046,7 @@ int sysctl_extfrag_threshold = 500; */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync, bool *contended, struct page **page) + bool sync, bool *contended) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; @@ -1136,7 +1072,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, int status; status = compact_zone_order(zone, order, gfp_mask, sync, - contended, page); + contended); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ @@ -1192,7 +1128,6 @@ int compact_pgdat(pg_data_t *pgdat, int order) struct compact_control cc = { .order = order, .sync = false, - .page = NULL, }; return __compact_pgdat(pgdat, &cc); @@ -1203,7 +1138,6 @@ static int compact_node(int nid) struct compact_control cc = { .order = -1, .sync = true, - .page = NULL, }; return __compact_pgdat(NODE_DATA(nid), &cc); diff --git a/mm/internal.h b/mm/internal.h index d597f94cc205..9ba21100ebf3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -135,7 +135,6 @@ struct compact_control { int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; bool contended; /* True if a lock was contended */ - struct page **page; /* Page captured of requested size */ }; unsigned long diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c957805a7f0e..df2022ff0c8a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1384,14 +1384,8 @@ void split_page(struct page *page, unsigned int order) set_page_refcounted(page + i); } -/* - * Similar to the split_page family of functions except that the page - * required at the given order and being isolated now to prevent races - * with parallel allocators - */ -int capture_free_page(struct page *page, int alloc_order, int migratetype) +static int __isolate_free_page(struct page *page, unsigned int order) { - unsigned int order; unsigned long watermark; struct zone *zone; int mt; @@ -1399,7 +1393,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) BUG_ON(!PageBuddy(page)); zone = page_zone(page); - order = page_order(page); mt = get_pageblock_migratetype(page); if (mt != MIGRATE_ISOLATE) { @@ -1408,7 +1401,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) return 0; - __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt); + __mod_zone_freepage_state(zone, -(1UL << order), mt); } /* Remove page from free list */ @@ -1416,11 +1409,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) zone->free_area[order].nr_free--; rmv_page_order(page); - if (alloc_order != order) - expand(zone, page, alloc_order, order, - &zone->free_area[order], migratetype); - - /* Set the pageblock if the captured page is at least a pageblock */ + /* Set the pageblock if the isolated page is at least a pageblock */ if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { @@ -1431,7 +1420,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) } } - return 1UL << alloc_order; + return 1UL << order; } /* @@ -1449,10 +1438,9 @@ int split_free_page(struct page *page) unsigned int order; int nr_pages; - BUG_ON(!PageBuddy(page)); order = page_order(page); - nr_pages = capture_free_page(page, order, 0); + nr_pages = __isolate_free_page(page, order); if (!nr_pages) return 0; @@ -2136,8 +2124,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, bool *contended_compaction, bool *deferred_compaction, unsigned long *did_some_progress) { - struct page *page = NULL; - if (!order) return NULL; @@ -2149,16 +2135,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, current->flags |= PF_MEMALLOC; *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, nodemask, sync_migration, - contended_compaction, &page); + contended_compaction); current->flags &= ~PF_MEMALLOC; - /* If compaction captured a page, prep and use it */ - if (page) { - prep_new_page(page, order, gfp_mask); - goto got_page; - } - if (*did_some_progress != COMPACT_SKIPPED) { + struct page *page; + /* Page migration frees to the PCP lists but we want merging */ drain_pages(get_cpu()); put_cpu(); @@ -2168,7 +2150,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, alloc_flags & ~ALLOC_NO_WATERMARKS, preferred_zone, migratetype); if (page) { -got_page: preferred_zone->compact_blockskip_flush = false; preferred_zone->compact_considered = 0; preferred_zone->compact_defer_shift = 0; -- cgit v1.2.3 From 3cb7a56344ca45ee56d71c5f8fe9f922306bff1f Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Fri, 11 Jan 2013 14:32:20 -0800 Subject: lib/rbtree.c: avoid the use of non-static __always_inline lib/rbtree.c declared __rb_erase_color() as __always_inline void, and then exported it with EXPORT_SYMBOL. This was because __rb_erase_color() must be exported for augmented rbtree users, but it must also be inlined into rb_erase() so that the dummy callback can get optimized out of that call site. (Actually with a modern compiler, none of the dummy callback functions should even be generated as separate text functions). The above usage is legal C, but it was unusual enough for some compilers to warn about it. This change makes things more explicit, with a static __always_inline ____rb_erase_color function for use in rb_erase(), and a separate non-inline __rb_erase_color function for use in rb_erase_augmented call sites. Signed-off-by: Michel Lespinasse Reported-by: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rbtree_augmented.h | 14 +++++++++++--- lib/rbtree.c | 20 +++++++++++++++++--- 2 files changed, 28 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 2ac60c9cf644..fea49b5da12a 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); -static __always_inline void -rb_erase_augmented(struct rb_node *node, struct rb_root *root, - const struct rb_augment_callbacks *augment) +static __always_inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) { struct rb_node *child = node->rb_right, *tmp = node->rb_left; struct rb_node *parent, *rebalance; @@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root, } augment->propagate(tmp, NULL); + return rebalance; +} + +static __always_inline void +rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); if (rebalance) __rb_erase_color(rebalance, root, augment->rotate); } diff --git a/lib/rbtree.c b/lib/rbtree.c index 4f56a11d67fa..c0e31fe2fabf 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root, } } -__always_inline void -__rb_erase_color(struct rb_node *parent, struct rb_root *root, +/* + * Inline version for rb_erase() use - we want to be able to inline + * and eliminate the dummy_rotate callback there + */ +static __always_inline void +____rb_erase_color(struct rb_node *parent, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; @@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, } } } + +/* Non-inline version for rb_erase_augmented() use */ +void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + ____rb_erase_color(parent, root, augment_rotate); +} EXPORT_SYMBOL(__rb_erase_color); /* @@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color); void rb_erase(struct rb_node *node, struct rb_root *root) { - rb_erase_augmented(node, root, &dummy_callbacks); + struct rb_node *rebalance; + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); + if (rebalance) + ____rb_erase_color(rebalance, root, dummy_rotate); } EXPORT_SYMBOL(rb_erase); -- cgit v1.2.3 From d07d7507bfb4e23735c9b83e397c43e1e8a173e8 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Thu, 10 Jan 2013 23:19:10 +0000 Subject: net, wireless: overwrite default_ethtool_ops Since: commit 2c60db037034d27f8c636403355d52872da92f81 Author: Eric Dumazet Date: Sun Sep 16 09:17:26 2012 +0000 net: provide a default dev->ethtool_ops wireless core does not correctly assign ethtool_ops. After alloc_netdev*() call, some cfg80211 drivers provide they own ethtool_ops, but some do not. For them, wireless core provide generic cfg80211_ethtool_ops, which is assigned in NETDEV_REGISTER notify call: if (!dev->ethtool_ops) dev->ethtool_ops = &cfg80211_ethtool_ops; But after Eric's commit, dev->ethtool_ops is no longer NULL (on cfg80211 drivers without custom ethtool_ops), but points to &default_ethtool_ops. In order to fix the problem, provide function which will overwrite default_ethtool_ops and use it by wireless core. Signed-off-by: Stanislaw Gruszka Acked-by: Johannes Berg Acked-by: Ben Hutchings Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 +++ net/core/dev.c | 8 ++++++++ net/wireless/core.c | 3 +-- 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c599e4782d45..9ef07d0868b6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -60,6 +60,9 @@ struct wireless_dev; #define SET_ETHTOOL_OPS(netdev,ops) \ ( (netdev)->ethtool_ops = (ops) ) +extern void netdev_set_default_ethtool_ops(struct net_device *dev, + const struct ethtool_ops *ops); + /* hardware address assignment types */ #define NET_ADDR_PERM 0 /* address is permanent (default) */ #define NET_ADDR_RANDOM 1 /* address is generated randomly */ diff --git a/net/core/dev.c b/net/core/dev.c index 515473ee52cb..f64e439b4a00 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6121,6 +6121,14 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) static const struct ethtool_ops default_ethtool_ops; +void netdev_set_default_ethtool_ops(struct net_device *dev, + const struct ethtool_ops *ops) +{ + if (dev->ethtool_ops == &default_ethtool_ops) + dev->ethtool_ops = ops; +} +EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops); + /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for diff --git a/net/wireless/core.c b/net/wireless/core.c index 14d990400354..b677eab55b68 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -866,8 +866,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, /* allow mac80211 to determine the timeout */ wdev->ps_timeout = -1; - if (!dev->ethtool_ops) - dev->ethtool_ops = &cfg80211_ethtool_ops; + netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops); if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || -- cgit v1.2.3 From 0d21b0e3477395e7ff2acc269f15df6e6a8d356d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 12 Jan 2013 11:38:44 +1030 Subject: module: add new state MODULE_STATE_UNFORMED. You should never look at such a module, so it's excised from all paths which traverse the modules list. We add the state at the end, to avoid gratuitous ABI break (ksplice). Signed-off-by: Rusty Russell --- include/linux/module.h | 10 ++++---- kernel/debug/kdb/kdb_main.c | 2 ++ kernel/module.c | 57 +++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 59 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/module.h b/include/linux/module.h index 7760c6d344a3..1375ee3f03aa 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -199,11 +199,11 @@ struct module_use { struct module *source, *target; }; -enum module_state -{ - MODULE_STATE_LIVE, - MODULE_STATE_COMING, - MODULE_STATE_GOING, +enum module_state { + MODULE_STATE_LIVE, /* Normal state. */ + MODULE_STATE_COMING, /* Full formed, running module_init. */ + MODULE_STATE_GOING, /* Going away. */ + MODULE_STATE_UNFORMED, /* Still setting it up. */ }; /** diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4d5f8d5612f3..8875254120b6 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv) kdb_printf("Module Size modstruct Used by\n"); list_for_each_entry(mod, kdb_modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; kdb_printf("%-20s%8u 0x%p ", mod->name, mod->core_size, (void *)mod); diff --git a/kernel/module.c b/kernel/module.c index 41bc1189b061..c3a2ee8e3679 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -188,6 +188,7 @@ struct load_info { ongoing or failed initialization etc. */ static inline int strong_try_module_get(struct module *mod) { + BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); if (mod && mod->state == MODULE_STATE_COMING) return -EBUSY; if (try_module_get(mod)) @@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, #endif }; + if (mod->state == MODULE_STATE_UNFORMED) + continue; + if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) return true; } @@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name, EXPORT_SYMBOL_GPL(find_symbol); /* Search for module by name: must hold module_mutex. */ -struct module *find_module(const char *name) +static struct module *find_module_all(const char *name, + bool even_unformed) { struct module *mod; list_for_each_entry(mod, &modules, list) { + if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) + continue; if (strcmp(mod->name, name) == 0) return mod; } return NULL; } + +struct module *find_module(const char *name) +{ + return find_module_all(name, false); +} EXPORT_SYMBOL_GPL(find_module); #ifdef CONFIG_SMP @@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr) preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (!mod->percpu_size) continue; for_each_possible_cpu(cpu) { @@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr, case MODULE_STATE_GOING: state = "going"; break; + default: + BUG(); } return sprintf(buffer, "%s\n", state); } @@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void) mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if ((mod->module_core) && (mod->core_text_size)) { set_page_attributes(mod->module_core, mod->module_core + mod->core_text_size, @@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void) mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if ((mod->module_core) && (mod->core_text_size)) { set_page_attributes(mod->module_core, mod->module_core + mod->core_text_size, @@ -2998,7 +3018,8 @@ static bool finished_loading(const char *name) mutex_lock(&module_mutex); mod = find_module(name); - ret = !mod || mod->state != MODULE_STATE_COMING; + ret = !mod || mod->state == MODULE_STATE_LIVE + || mod->state == MODULE_STATE_GOING; mutex_unlock(&module_mutex); return ret; @@ -3361,6 +3382,8 @@ const char *module_address_lookup(unsigned long addr, preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_init(addr, mod) || within_module_core(addr, mod)) { if (modname) @@ -3384,6 +3407,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_init(addr, mod) || within_module_core(addr, mod)) { const char *sym; @@ -3408,6 +3433,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_init(addr, mod) || within_module_core(addr, mod)) { const char *sym; @@ -3435,6 +3462,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (symnum < mod->num_symtab) { *value = mod->symtab[symnum].st_value; *type = mod->symtab[symnum].st_info; @@ -3477,9 +3506,12 @@ unsigned long module_kallsyms_lookup_name(const char *name) ret = mod_find_symname(mod, colon+1); *colon = ':'; } else { - list_for_each_entry_rcu(mod, &modules, list) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if ((ret = mod_find_symname(mod, name)) != 0) break; + } } preempt_enable(); return ret; @@ -3494,6 +3526,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, int ret; list_for_each_entry(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; for (i = 0; i < mod->num_symtab; i++) { ret = fn(data, mod->strtab + mod->symtab[i].st_name, mod, mod->symtab[i].st_value); @@ -3509,6 +3543,7 @@ static char *module_flags(struct module *mod, char *buf) { int bx = 0; + BUG_ON(mod->state == MODULE_STATE_UNFORMED); if (mod->taints || mod->state == MODULE_STATE_GOING || mod->state == MODULE_STATE_COMING) { @@ -3550,6 +3585,10 @@ static int m_show(struct seq_file *m, void *p) struct module *mod = list_entry(p, struct module, list); char buf[8]; + /* We always ignore unformed modules. */ + if (mod->state == MODULE_STATE_UNFORMED) + return 0; + seq_printf(m, "%s %u", mod->name, mod->init_size + mod->core_size); print_unload_info(m, mod); @@ -3610,6 +3649,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (mod->num_exentries == 0) continue; @@ -3658,10 +3699,13 @@ struct module *__module_address(unsigned long addr) if (addr < module_addr_min || addr > module_addr_max) return NULL; - list_for_each_entry_rcu(mod, &modules, list) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; if (within_module_core(addr, mod) || within_module_init(addr, mod)) return mod; + } return NULL; } EXPORT_SYMBOL_GPL(__module_address); @@ -3714,8 +3758,11 @@ void print_modules(void) printk(KERN_DEFAULT "Modules linked in:"); /* Most callers should already have preempt disabled, but make sure */ preempt_disable(); - list_for_each_entry_rcu(mod, &modules, list) + list_for_each_entry_rcu(mod, &modules, list) { + if (mod->state == MODULE_STATE_UNFORMED) + continue; printk(" %s%s", mod->name, module_flags(mod, buf)); + } preempt_enable(); if (last_unloaded_module[0]) printk(" [last unloaded: %s]", last_unloaded_module); -- cgit v1.2.3 From 803739d25c2343da6d2f95eebdcbc08bf67097d4 Mon Sep 17 00:00:00 2001 From: Shane Huang Date: Mon, 17 Dec 2012 23:18:59 +0800 Subject: [libata] replace sata_settings with devslp_timing NCQ capability was used to check availability of SATA Settings page from Identify Device Data Log, which contains DevSlp timing variables. It does not work on some HDDs and leads to error messages. IDENTIFY word 78 bit 5(Hardware Feature Control) can't work either because it is only the sufficient condition of Identify Device data log, not the necessary condition. This patch replaced ata_device->sata_settings with ->devslp_timing to only save DevSlp timing variables(8 bytes), instead of the whole SATA Settings page(512 bytes). Addresses https://bugzilla.kernel.org/show_bug.cgi?id=51881 Reported-by: Borislav Petkov Signed-off-by: Shane Huang Cc: stable@vger.kernel.org Signed-off-by: Jeff Garzik --- drivers/ata/libahci.c | 6 +++--- drivers/ata/libata-core.c | 22 +++++++++++++--------- include/linux/ata.h | 8 +++++--- include/linux/libata.h | 4 ++-- 4 files changed, 23 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 320712a7b9ea..6cd7805e47ca 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1951,13 +1951,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) /* Use the nominal value 10 ms if the read MDAT is zero, * the nominal value of DETO is 20 ms. */ - if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] & + if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] & ATA_LOG_DEVSLP_VALID_MASK) { - mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] & + mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] & ATA_LOG_DEVSLP_MDAT_MASK; if (!mdat) mdat = 10; - deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO]; + deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO]; if (!deto) deto = 20; } else { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9e8b99af400d..46cd3f4c6aaa 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev) } } - /* check and mark DevSlp capability */ - if (ata_id_has_devslp(dev->id)) - dev->flags |= ATA_DFLAG_DEVSLP; - - /* Obtain SATA Settings page from Identify Device Data Log, - * which contains DevSlp timing variables etc. - * Exclude old devices with ata_id_has_ncq() + /* Check and mark DevSlp capability. Get DevSlp timing variables + * from SATA Settings page of Identify Device Data Log. */ - if (ata_id_has_ncq(dev->id)) { + if (ata_id_has_devslp(dev->id)) { + u8 sata_setting[ATA_SECT_SIZE]; + int i, j; + + dev->flags |= ATA_DFLAG_DEVSLP; err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, ATA_LOG_SATA_SETTINGS, - dev->sata_settings, + sata_setting, 1); if (err_mask) ata_dev_dbg(dev, "failed to get Identify Device Data, Emask 0x%x\n", err_mask); + else + for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { + j = ATA_LOG_DEVSLP_OFFSET + i; + dev->devslp_timing[i] = sata_setting[j]; + } } dev->cdb_len = 16; diff --git a/include/linux/ata.h b/include/linux/ata.h index 408da9502177..8f7a3d68371a 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -297,10 +297,12 @@ enum { ATA_LOG_SATA_NCQ = 0x10, ATA_LOG_SATA_ID_DEV_DATA = 0x30, ATA_LOG_SATA_SETTINGS = 0x08, - ATA_LOG_DEVSLP_MDAT = 0x30, + ATA_LOG_DEVSLP_OFFSET = 0x30, + ATA_LOG_DEVSLP_SIZE = 0x08, + ATA_LOG_DEVSLP_MDAT = 0x00, ATA_LOG_DEVSLP_MDAT_MASK = 0x1F, - ATA_LOG_DEVSLP_DETO = 0x31, - ATA_LOG_DEVSLP_VALID = 0x37, + ATA_LOG_DEVSLP_DETO = 0x01, + ATA_LOG_DEVSLP_VALID = 0x07, ATA_LOG_DEVSLP_VALID_MASK = 0x80, /* READ/WRITE LONG (obsolete) */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 83ba0ab2c915..649e5f86b5f0 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -652,8 +652,8 @@ struct ata_device { u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ }; - /* Identify Device Data Log (30h), SATA Settings (page 08h) */ - u8 sata_settings[ATA_SECT_SIZE]; + /* DEVSLP Timing Variables from Identify Device Data Log */ + u8 devslp_timing[ATA_LOG_DEVSLP_SIZE]; /* error history */ int spdn_cnt; -- cgit v1.2.3 From 8aef33a7cf40ca9da188e8578b2abe7267a38c52 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Tue, 15 Jan 2013 14:18:04 +0100 Subject: cpuidle: remove the power_specified field in the driver We realized that the power usage field is never filled and when it is filled for tegra, the power_specified flag is not set causing all of these values to be reset when the driver is initialized with set_power_state(). However, the power_specified flag can be simply removed under the assumption that the states are always backward sorted, which is the case with the current code. This change allows the menu governor select function and the cpuidle_play_dead() to be simplified. Moreover, the set_power_states() function can removed as it does not make sense any more. Drop the power_specified flag from struct cpuidle_driver and make the related changes as described above. As a consequence, this also fixes the bug where on the dynamic C-states system, the power fields are not initialized. [rjw: Changelog] References: https://bugzilla.kernel.org/show_bug.cgi?id=42870 References: https://bugzilla.kernel.org/show_bug.cgi?id=43349 References: https://lkml.org/lkml/2012/10/16/518 Signed-off-by: Daniel Lezcano Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/cpuidle.c | 17 ++++------------- drivers/cpuidle/driver.c | 25 ------------------------- drivers/cpuidle/governors/menu.c | 8 ++------ include/linux/cpuidle.h | 2 +- 4 files changed, 7 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index fb4a7dd57f94..e1f6860e069c 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -69,24 +69,15 @@ int cpuidle_play_dead(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); - int i, dead_state = -1; - int power_usage = INT_MAX; + int i; if (!drv) return -ENODEV; /* Find lowest-power state that supports long-term idle */ - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { - struct cpuidle_state *s = &drv->states[i]; - - if (s->power_usage < power_usage && s->enter_dead) { - power_usage = s->power_usage; - dead_state = i; - } - } - - if (dead_state != -1) - return drv->states[dead_state].enter_dead(dev, dead_state); + for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) + if (drv->states[i].enter_dead) + return drv->states[i].enter_dead(dev, i); return -ENODEV; } diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index c2b281afe0ed..422c7b69ba7c 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -19,34 +19,9 @@ DEFINE_SPINLOCK(cpuidle_driver_lock); static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); -static void set_power_states(struct cpuidle_driver *drv) -{ - int i; - - /* - * cpuidle driver should set the drv->power_specified bit - * before registering if the driver provides - * power_usage numbers. - * - * If power_specified is not set, - * we fill in power_usage with decreasing values as the - * cpuidle code has an implicit assumption that state Cn - * uses less power than C(n-1). - * - * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned - * an power value of -1. So we use -2, -3, etc, for other - * c-states. - */ - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) - drv->states[i].power_usage = -1 - i; -} - static void __cpuidle_driver_init(struct cpuidle_driver *drv) { drv->refcnt = 0; - - if (!drv->power_specified) - set_power_states(drv); } static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 20ea33afdda1..fe343a06b7da 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -312,7 +312,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = &__get_cpu_var(menu_devices); int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); - int power_usage = INT_MAX; int i; int multiplier; struct timespec t; @@ -383,11 +382,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) if (s->exit_latency * multiplier > data->predicted_us) continue; - if (s->power_usage < power_usage) { - power_usage = s->power_usage; - data->last_state_idx = i; - data->exit_us = s->exit_latency; - } + data->last_state_idx = i; + data->exit_us = s->exit_latency; } /* not deepest C-state chosen for low predicted residency */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3711b34dc4f9..24cd1037b6d6 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -126,9 +126,9 @@ struct cpuidle_driver { struct module *owner; int refcnt; - unsigned int power_specified:1; /* set to 1 to use the core cpuidle time keeping (for all states). */ unsigned int en_core_tk_irqen:1; + /* states array must be ordered in decreasing power consumption */ struct cpuidle_state states[CPUIDLE_STATE_MAX]; int state_count; int safe_state_index; -- cgit v1.2.3 From 774a1221e862b343388347bac9b318767336b20b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 15 Jan 2013 18:52:51 -0800 Subject: module, async: async_synchronize_full() on module init iff async is used If the default iosched is built as module, the kernel may deadlock while trying to load the iosched module on device probe if the probing was running off async. This is because async_synchronize_full() at the end of module init ends up waiting for the async job which initiated the module loading. async A modprobe 1. finds a device 2. registers the block device 3. request_module(default iosched) 4. modprobe in userland 5. load and init module 6. async_synchronize_full() Async A waits for modprobe to finish in request_module() and modprobe waits for async A to finish in async_synchronize_full(). Because there's no easy to track dependency once control goes out to userland, implementing properly nested flushing is difficult. For now, make module init perform async_synchronize_full() iff module init has queued async jobs as suggested by Linus. This avoids the described deadlock because iosched module doesn't use async and thus wouldn't invoke async_synchronize_full(). This is hacky and incomplete. It will deadlock if async module loading nests; however, this works around the known problem case and seems to be the best of bad options. For more details, please refer to the following thread. http://thread.gmane.org/gmane.linux.kernel/1420814 Signed-off-by: Tejun Heo Reported-by: Alex Riesen Tested-by: Ming Lei Tested-by: Alex Riesen Cc: Arjan van de Ven Cc: Jens Axboe Signed-off-by: Linus Torvalds --- include/linux/sched.h | 1 + kernel/async.c | 3 +++ kernel/module.c | 27 +++++++++++++++++++++++++-- 3 files changed, 29 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 206bb089c06b..6fc8f45de4e9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1810,6 +1810,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ +#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ diff --git a/kernel/async.c b/kernel/async.c index 9d3118384858..a1d585c351d6 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -196,6 +196,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); + /* mark that this task has queued an async job, used by module init */ + current->flags |= PF_USED_ASYNC; + /* schedule for execution */ queue_work(system_unbound_wq, &entry->work); diff --git a/kernel/module.c b/kernel/module.c index 250092c1d57d..b10b048367e1 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3013,6 +3013,12 @@ static int do_init_module(struct module *mod) { int ret = 0; + /* + * We want to find out whether @mod uses async during init. Clear + * PF_USED_ASYNC. async_schedule*() will set it. + */ + current->flags &= ~PF_USED_ASYNC; + blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); @@ -3058,8 +3064,25 @@ static int do_init_module(struct module *mod) blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); - /* We need to finish all async code before the module init sequence is done */ - async_synchronize_full(); + /* + * We need to finish all async code before the module init sequence + * is done. This has potential to deadlock. For example, a newly + * detected block device can trigger request_module() of the + * default iosched from async probing task. Once userland helper + * reaches here, async_synchronize_full() will wait on the async + * task waiting on request_module() and deadlock. + * + * This deadlock is avoided by perfomring async_synchronize_full() + * iff module init queued any async jobs. This isn't a full + * solution as it will deadlock the same if module loading from + * async jobs nests more than once; however, due to the various + * constraints, this hack seems to be the best option for now. + * Please refer to the following thread for details. + * + * http://thread.gmane.org/gmane.linux.kernel/1420814 + */ + if (current->flags & PF_USED_ASYNC) + async_synchronize_full(); mutex_lock(&module_mutex); /* Drop initial reference. */ -- cgit v1.2.3 From e65b9ad222c280c031bc8d3642cc38dd3026fe06 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 15 Jan 2013 20:12:37 +0100 Subject: lockdep, rwsem: fix down_write_nest_lock() if !CONFIG_DEBUG_LOCK_ALLOC Commit 1b963c81b145 ("lockdep, rwsem: provide down_write_nest_lock()") contains a bug in a codepath when CONFIG_DEBUG_LOCK_ALLOC is disabled, which causes down_read() to be called instead of down_write() by mistake on such configurations. Fix that. Reported-and-tested-by: Andrew Clayton Reported-and-tested-by: Zlatko Calusic Signed-off-by: Jiri Kosina Reviewed-by: Rik van Riel Signed-off-by: Linus Torvalds --- include/linux/rwsem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 413cc11e414a..8da67d625e13 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -135,7 +135,7 @@ do { \ #else # define down_read_nested(sem, subclass) down_read(sem) -# define down_write_nest_lock(sem, nest_lock) down_read(sem) +# define down_write_nest_lock(sem, nest_lock) down_write(sem) # define down_write_nested(sem, subclass) down_write(sem) #endif -- cgit v1.2.3 From edea0d03ee5f0ae0051b6adb6681ebdf976b1ca4 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 20 Jan 2013 20:25:47 +0100 Subject: ia64: kill thread_matches(), unexport ptrace_check_attach() The ia64 function "thread_matches()" has no users since commit e868a55c2a8c ("[IA64] remove find_thread_for_addr()"). Remove it. This allows us to make ptrace_check_attach() static to kernel/ptrace.c, which is good since we'll need to change the semantics of it and fix up all the callers. Signed-off-by: Oleg Nesterov Signed-off-by: Linus Torvalds --- arch/ia64/kernel/ptrace.c | 27 --------------------------- include/linux/ptrace.h | 1 - kernel/ptrace.c | 2 +- 3 files changed, 1 insertion(+), 29 deletions(-) (limited to 'include/linux') diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 4265ff64219b..b7a5fffe0924 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) read_unlock(&tasklist_lock); } -static inline int -thread_matches (struct task_struct *thread, unsigned long addr) -{ - unsigned long thread_rbs_end; - struct pt_regs *thread_regs; - - if (ptrace_check_attach(thread, 0) < 0) - /* - * If the thread is not in an attachable state, we'll - * ignore it. The net effect is that if ADDR happens - * to overlap with the portion of the thread's - * register backing store that is currently residing - * on the thread's kernel stack, then ptrace() may end - * up accessing a stale value. But if the thread - * isn't stopped, that's a problem anyhow, so we're - * doing as well as we can... - */ - return 0; - - thread_regs = task_pt_regs(thread); - thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); - if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) - return 0; - - return 1; /* looks like we've got a winner */ -} - /* * Write f32-f127 back to task->thread.fph if it has been modified. */ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 1693775ecfe8..89573a33ab3c 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request, extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern void ptrace_disable(struct task_struct *); -extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1599157336a6..612a56126851 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -139,7 +139,7 @@ void __ptrace_unlink(struct task_struct *child) * RETURNS: * 0 on success, -ESRCH if %child is not ready. */ -int ptrace_check_attach(struct task_struct *child, bool ignore_state) +static int ptrace_check_attach(struct task_struct *child, bool ignore_state) { int ret = -ESRCH; -- cgit v1.2.3 From 910ffdb18a6408e14febbb6e4b6840fd2c928c82 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 21 Jan 2013 20:47:41 +0100 Subject: ptrace: introduce signal_wake_up_state() and ptrace_signal_wake_up() Cleanup and preparation for the next change. signal_wake_up(resume => true) is overused. None of ptrace/jctl callers actually want to wakeup a TASK_WAKEKILL task, but they can't specify the necessary mask. Turn signal_wake_up() into signal_wake_up_state(state), reintroduce signal_wake_up() as a trivial helper, and add ptrace_signal_wake_up() which adds __TASK_TRACED. This way ptrace_signal_wake_up() can work "inside" ptrace_request() even if the tracee doesn't have the TASK_WAKEKILL bit set. Signed-off-by: Oleg Nesterov Signed-off-by: Linus Torvalds --- include/linux/sched.h | 11 ++++++++++- kernel/ptrace.c | 8 ++++---- kernel/signal.c | 14 ++++---------- 3 files changed, 18 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 6fc8f45de4e9..d2112477ff5e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2714,7 +2714,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig) extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); -extern void signal_wake_up(struct task_struct *t, int resume_stopped); +extern void signal_wake_up_state(struct task_struct *t, unsigned int state); + +static inline void signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); +} +static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) +{ + signal_wake_up_state(t, resume ? __TASK_TRACED : 0); +} /* * Wrappers for p->thread_info->cpu access. No-op on UP. diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 612a56126851..62f7c2774b16 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -117,7 +117,7 @@ void __ptrace_unlink(struct task_struct *child) * TASK_KILLABLE sleeps. */ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) - signal_wake_up(child, task_is_traced(child)); + ptrace_signal_wake_up(child, true); spin_unlock(&child->sighand->siglock); } @@ -317,7 +317,7 @@ static int ptrace_attach(struct task_struct *task, long request, */ if (task_is_stopped(task) && task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) - signal_wake_up(task, 1); + signal_wake_up_state(task, __TASK_STOPPED); spin_unlock(&task->sighand->siglock); @@ -737,7 +737,7 @@ int ptrace_request(struct task_struct *child, long request, * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) - signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); + ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; @@ -763,7 +763,7 @@ int ptrace_request(struct task_struct *child, long request, * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) - signal_wake_up(child, true); + ptrace_signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); diff --git a/kernel/signal.c b/kernel/signal.c index 53cd5c4d1172..6e97aa6fa32c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * No need to set need_resched since signal event passing * goes through ->blocked */ -void signal_wake_up(struct task_struct *t, int resume) +void signal_wake_up_state(struct task_struct *t, unsigned int state) { - unsigned int mask; - set_tsk_thread_flag(t, TIF_SIGPENDING); - /* - * For SIGKILL, we want to wake it up in the stopped/traced/killable + * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ - mask = TASK_INTERRUPTIBLE; - if (resume) - mask |= TASK_WAKEKILL; - if (!wake_up_state(t, mask)) + if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) kick_process(t); } @@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t) assert_spin_locked(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); - signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); + ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); } /* -- cgit v1.2.3