From: Peter Zijlstra Subject: [PATCH 06/31] mm: kmem_alloc_estimate() Patch-mainline: not yet Feb 8 2011: Refreshed patch to accomodate an upstream change - commit 55136592 removed dynamic dma slab allocation. Provide a method to get the upper bound on the pages needed to allocate a given number of objects from a given kmem_cache. This lays the foundation for a generic reserve framework as presented in a later patch in this series. This framework needs to convert object demand (kmalloc() bytes, kmem_cache_alloc() objects) to pages. Signed-off-by: Peter Zijlstra Signed-off-by: Suresh Jayaraman --- include/linux/slab.h | 4 ++ mm/slab.c | 75 +++++++++++++++++++++++++++++++++++++++++++ mm/slob.c | 67 +++++++++++++++++++++++++++++++++++++++ mm/slub.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 233 insertions(+) Index: linux-2.6.37-master/include/linux/slab.h =================================================================== --- linux-2.6.37-master.orig/include/linux/slab.h +++ linux-2.6.37-master/include/linux/slab.h @@ -106,6 +106,8 @@ int kmem_cache_shrink(struct kmem_cache void kmem_cache_free(struct kmem_cache *, void *); unsigned int kmem_cache_size(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *); +unsigned kmem_alloc_estimate(struct kmem_cache *cachep, + gfp_t flags, int objects); /* * Please use this macro to create slab caches. Simply specify the @@ -142,6 +144,8 @@ void * __must_check krealloc(const void void kfree(const void *); void kzfree(const void *); size_t ksize(const void *); +unsigned kmalloc_estimate_objs(size_t, gfp_t, int); +unsigned kmalloc_estimate_bytes(gfp_t, size_t); /* * Allocator specific definitions. These are mainly used to establish optimized Index: linux-2.6.37-master/mm/slab.c =================================================================== --- linux-2.6.37-master.orig/mm/slab.c +++ linux-2.6.37-master/mm/slab.c @@ -3890,6 +3890,81 @@ const char *kmem_cache_name(struct kmem_ EXPORT_SYMBOL_GPL(kmem_cache_name); /* + * Calculate the upper bound of pages required to sequentially allocate + * @objects objects from @cachep. + */ +unsigned kmem_alloc_estimate(struct kmem_cache *cachep, + gfp_t flags, int objects) +{ + /* + * (1) memory for objects, + */ + unsigned nr_slabs = DIV_ROUND_UP(objects, cachep->num); + unsigned nr_pages = nr_slabs << cachep->gfporder; + + /* + * (2) memory for each per-cpu queue (nr_cpu_ids), + * (3) memory for each per-node alien queues (nr_cpu_ids), and + * (4) some amount of memory for the slab management structures + * + * XXX: truely account these + */ + nr_pages += 1 + ilog2(nr_pages); + + return nr_pages; +} + +/* + * Calculate the upper bound of pages required to sequentially allocate + * @count objects of @size bytes from kmalloc given @flags. + */ +unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count) +{ + struct kmem_cache *s = kmem_find_general_cachep(size, flags); + if (!s) + return 0; + + return kmem_alloc_estimate(s, flags, count); +} +EXPORT_SYMBOL_GPL(kmalloc_estimate_objs); + +/* + * Calculate the upper bound of pages requires to sequentially allocate @bytes + * from kmalloc in an unspecified number of allocations of nonuniform size. + */ +unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes) +{ + unsigned long pages; + struct cache_sizes *csizep = malloc_sizes; + + /* + * multiply by two, in order to account the worst case slack space + * due to the power-of-two allocation sizes. + */ + pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE); + + /* + * add the kmem_cache overhead of each possible kmalloc cache + */ + for (csizep = malloc_sizes; csizep->cs_cachep; csizep++) { + struct kmem_cache *s; + +#ifdef CONFIG_ZONE_DMA + if (unlikely(flags & __GFP_DMA)) + s = csizep->cs_dmacachep; + else +#endif + s = csizep->cs_cachep; + + if (s) + pages += kmem_alloc_estimate(s, flags, 0); + } + + return pages; +} +EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes); + +/* * This initializes kmem_list3 or resizes various caches for all nodes. */ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) Index: linux-2.6.37-master/mm/slob.c =================================================================== --- linux-2.6.37-master.orig/mm/slob.c +++ linux-2.6.37-master/mm/slob.c @@ -699,6 +699,73 @@ int slab_is_available(void) return slob_ready; } +static __slob_estimate(unsigned size, unsigned align, unsigned objects) +{ + unsigned nr_pages; + + size = SLOB_UNIT * SLOB_UNITS(size + align - 1); + + if (size <= PAGE_SIZE) { + nr_pages = DIV_ROUND_UP(objects, PAGE_SIZE / size); + } else { + nr_pages = objects << get_order(size); + } + + return nr_pages; +} + +/* + * Calculate the upper bound of pages required to sequentially allocate + * @objects objects from @cachep. + */ +unsigned kmem_alloc_estimate(struct kmem_cache *c, gfp_t flags, int objects) +{ + unsigned size = c->size; + + if (c->flags & SLAB_DESTROY_BY_RCU) + size += sizeof(struct slob_rcu); + + return __slob_estimate(size, c->align, objects); +} + +/* + * Calculate the upper bound of pages required to sequentially allocate + * @count objects of @size bytes from kmalloc given @flags. + */ +unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count) +{ + unsigned align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + + return __slob_estimate(size, align, count); +} +EXPORT_SYMBOL_GPL(kmalloc_estimate_objs); + +/* + * Calculate the upper bound of pages requires to sequentially allocate @bytes + * from kmalloc in an unspecified number of allocations of nonuniform size. + */ +unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes) +{ + unsigned long pages; + + /* + * Multiply by two, in order to account the worst case slack space + * due to the power-of-two allocation sizes. + * + * While not true for slob, it cannot do worse than that for sequential + * allocations. + */ + pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE); + + /* + * Our power of two series starts at PAGE_SIZE, so add one page. + */ + pages++; + + return pages; +} +EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes); + void __init kmem_cache_init(void) { slob_ready = 1; Index: linux-2.6.37-master/mm/slub.c =================================================================== --- linux-2.6.37-master.orig/mm/slub.c +++ linux-2.6.37-master/mm/slub.c @@ -2434,6 +2434,42 @@ const char *kmem_cache_name(struct kmem_ } EXPORT_SYMBOL(kmem_cache_name); +/* + * Calculate the upper bound of pages required to sequentially allocate + * @objects objects from @cachep. + * + * We should use s->min_objects because those are the least efficient. + */ +unsigned kmem_alloc_estimate(struct kmem_cache *s, gfp_t flags, int objects) +{ + unsigned long pages; + struct kmem_cache_order_objects x; + + if (WARN_ON(!s) || WARN_ON(!oo_objects(s->min))) + return 0; + + x = s->min; + pages = DIV_ROUND_UP(objects, oo_objects(x)) << oo_order(x); + + /* + * Account the possible additional overhead if the slab holds more that + * one object. Use s->max_objects because that's the worst case. + */ + x = s->oo; + if (oo_objects(x) > 1) { + /* + * Account the possible additional overhead if per cpu slabs + * are currently empty and have to be allocated. This is very + * unlikely but a possible scenario immediately after + * kmem_cache_shrink. + */ + pages += num_possible_cpus() << oo_order(x); + } + + return pages; +} +EXPORT_SYMBOL_GPL(kmem_alloc_estimate); + static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { @@ -2783,6 +2819,57 @@ void kfree(const void *x) EXPORT_SYMBOL(kfree); /* + * Calculate the upper bound of pages required to sequentially allocate + * @count objects of @size bytes from kmalloc given @flags. + */ +unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count) +{ + struct kmem_cache *s = get_slab(size, flags); + if (!s) + return 0; + + return kmem_alloc_estimate(s, flags, count); + +} +EXPORT_SYMBOL_GPL(kmalloc_estimate_objs); + +/* + * Calculate the upper bound of pages requires to sequentially allocate @bytes + * from kmalloc in an unspecified number of allocations of nonuniform size. + */ +unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes) +{ + int i; + unsigned long pages; + + /* + * multiply by two, in order to account the worst case slack space + * due to the power-of-two allocation sizes. + */ + pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE); + + /* + * add the kmem_cache overhead of each possible kmalloc cache + */ + for (i = 1; i < PAGE_SHIFT; i++) { + struct kmem_cache *s; + +#ifdef CONFIG_ZONE_DMA + if (unlikely(flags & SLUB_DMA)) + s = kmalloc_dma_caches[i]; + else +#endif + s = kmalloc_caches[i]; + + if (s) + pages += kmem_alloc_estimate(s, flags, 0); + } + + return pages; +} +EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes); + +/* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the * most items in use come first. New allocations will then fill those up