mirror of
git://git.openwrt.org/openwrt/openwrt.git
synced 2024-12-18 12:54:39 +00:00
kernel: backport page fragment API changes from 4.10+ to 4.9
mt76 now relies on this API Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
888a15ff83
commit
18533ff415
@ -0,0 +1,75 @@
|
||||
From: Alexander Duyck <alexander.h.duyck@intel.com>
|
||||
Date: Wed, 14 Dec 2016 15:05:26 -0800
|
||||
Subject: [PATCH] mm: add support for releasing multiple instances of a page
|
||||
|
||||
Add a function that allows us to batch free a page that has multiple
|
||||
references outstanding. Specifically this function can be used to drop
|
||||
a page being used in the page frag alloc cache. With this drivers can
|
||||
make use of functionality similar to the page frag alloc cache without
|
||||
having to do any workarounds for the fact that there is no function that
|
||||
frees multiple references.
|
||||
|
||||
Link: http://lkml.kernel.org/r/20161110113606.76501.70752.stgit@ahduyck-blue-test.jf.intel.com
|
||||
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
|
||||
Cc: "David S. Miller" <davem@davemloft.net>
|
||||
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
|
||||
Cc: Chris Metcalf <cmetcalf@mellanox.com>
|
||||
Cc: David Howells <dhowells@redhat.com>
|
||||
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
|
||||
Cc: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
|
||||
Cc: Helge Deller <deller@gmx.de>
|
||||
Cc: James Hogan <james.hogan@imgtec.com>
|
||||
Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
|
||||
Cc: Jonas Bonn <jonas@southpole.se>
|
||||
Cc: Keguang Zhang <keguang.zhang@gmail.com>
|
||||
Cc: Ley Foon Tan <lftan@altera.com>
|
||||
Cc: Mark Salter <msalter@redhat.com>
|
||||
Cc: Max Filippov <jcmvbkbc@gmail.com>
|
||||
Cc: Michael Ellerman <mpe@ellerman.id.au>
|
||||
Cc: Michal Simek <monstr@monstr.eu>
|
||||
Cc: Ralf Baechle <ralf@linux-mips.org>
|
||||
Cc: Rich Felker <dalias@libc.org>
|
||||
Cc: Richard Kuo <rkuo@codeaurora.org>
|
||||
Cc: Russell King <linux@armlinux.org.uk>
|
||||
Cc: Steven Miao <realmz6@gmail.com>
|
||||
Cc: Tobias Klauser <tklauser@distanz.ch>
|
||||
Cc: Vineet Gupta <vgupta@synopsys.com>
|
||||
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
---
|
||||
|
||||
--- a/include/linux/gfp.h
|
||||
+++ b/include/linux/gfp.h
|
||||
@@ -506,6 +506,8 @@ extern void free_hot_cold_page(struct pa
|
||||
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
|
||||
|
||||
struct page_frag_cache;
|
||||
+extern void __page_frag_drain(struct page *page, unsigned int order,
|
||||
+ unsigned int count);
|
||||
extern void *__alloc_page_frag(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void __free_page_frag(void *addr);
|
||||
--- a/mm/page_alloc.c
|
||||
+++ b/mm/page_alloc.c
|
||||
@@ -3946,6 +3946,20 @@ static struct page *__page_frag_refill(s
|
||||
return page;
|
||||
}
|
||||
|
||||
+void __page_frag_drain(struct page *page, unsigned int order,
|
||||
+ unsigned int count)
|
||||
+{
|
||||
+ VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
|
||||
+
|
||||
+ if (page_ref_sub_and_test(page, count)) {
|
||||
+ if (order == 0)
|
||||
+ free_hot_cold_page(page, false);
|
||||
+ else
|
||||
+ __free_pages_ok(page, order);
|
||||
+ }
|
||||
+}
|
||||
+EXPORT_SYMBOL(__page_frag_drain);
|
||||
+
|
||||
void *__alloc_page_frag(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask)
|
||||
{
|
@ -0,0 +1,137 @@
|
||||
From: Alexander Duyck <alexander.h.duyck@intel.com>
|
||||
Date: Tue, 10 Jan 2017 16:58:06 -0800
|
||||
Subject: [PATCH] mm: rename __alloc_page_frag to page_frag_alloc and
|
||||
__free_page_frag to page_frag_free
|
||||
|
||||
Patch series "Page fragment updates", v4.
|
||||
|
||||
This patch series takes care of a few cleanups for the page fragments
|
||||
API.
|
||||
|
||||
First we do some renames so that things are much more consistent. First
|
||||
we move the page_frag_ portion of the name to the front of the functions
|
||||
names. Secondly we split out the cache specific functions from the
|
||||
other page fragment functions by adding the word "cache" to the name.
|
||||
|
||||
Finally I added a bit of documentation that will hopefully help to
|
||||
explain some of this. I plan to revisit this later as we get things
|
||||
more ironed out in the near future with the changes planned for the DMA
|
||||
setup to support eXpress Data Path.
|
||||
|
||||
This patch (of 3):
|
||||
|
||||
This patch renames the page frag functions to be more consistent with
|
||||
other APIs. Specifically we place the name page_frag first in the name
|
||||
and then have either an alloc or free call name that we append as the
|
||||
suffix. This makes it a bit clearer in terms of naming.
|
||||
|
||||
In addition we drop the leading double underscores since we are
|
||||
technically no longer a backing interface and instead the front end that
|
||||
is called from the networking APIs.
|
||||
|
||||
Link: http://lkml.kernel.org/r/20170104023854.13451.67390.stgit@localhost.localdomain
|
||||
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
---
|
||||
|
||||
--- a/include/linux/gfp.h
|
||||
+++ b/include/linux/gfp.h
|
||||
@@ -508,9 +508,9 @@ extern void free_hot_cold_page_list(stru
|
||||
struct page_frag_cache;
|
||||
extern void __page_frag_drain(struct page *page, unsigned int order,
|
||||
unsigned int count);
|
||||
-extern void *__alloc_page_frag(struct page_frag_cache *nc,
|
||||
- unsigned int fragsz, gfp_t gfp_mask);
|
||||
-extern void __free_page_frag(void *addr);
|
||||
+extern void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
+ unsigned int fragsz, gfp_t gfp_mask);
|
||||
+extern void page_frag_free(void *addr);
|
||||
|
||||
#define __free_page(page) __free_pages((page), 0)
|
||||
#define free_page(addr) free_pages((addr), 0)
|
||||
--- a/include/linux/skbuff.h
|
||||
+++ b/include/linux/skbuff.h
|
||||
@@ -2471,7 +2471,7 @@ static inline struct sk_buff *netdev_all
|
||||
|
||||
static inline void skb_free_frag(void *addr)
|
||||
{
|
||||
- __free_page_frag(addr);
|
||||
+ page_frag_free(addr);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz);
|
||||
--- a/mm/page_alloc.c
|
||||
+++ b/mm/page_alloc.c
|
||||
@@ -3960,8 +3960,8 @@ void __page_frag_drain(struct page *page
|
||||
}
|
||||
EXPORT_SYMBOL(__page_frag_drain);
|
||||
|
||||
-void *__alloc_page_frag(struct page_frag_cache *nc,
|
||||
- unsigned int fragsz, gfp_t gfp_mask)
|
||||
+void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
+ unsigned int fragsz, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int size = PAGE_SIZE;
|
||||
struct page *page;
|
||||
@@ -4012,19 +4012,19 @@ refill:
|
||||
|
||||
return nc->va + offset;
|
||||
}
|
||||
-EXPORT_SYMBOL(__alloc_page_frag);
|
||||
+EXPORT_SYMBOL(page_frag_alloc);
|
||||
|
||||
/*
|
||||
* Frees a page fragment allocated out of either a compound or order 0 page.
|
||||
*/
|
||||
-void __free_page_frag(void *addr)
|
||||
+void page_frag_free(void *addr)
|
||||
{
|
||||
struct page *page = virt_to_head_page(addr);
|
||||
|
||||
if (unlikely(put_page_testzero(page)))
|
||||
__free_pages_ok(page, compound_order(page));
|
||||
}
|
||||
-EXPORT_SYMBOL(__free_page_frag);
|
||||
+EXPORT_SYMBOL(page_frag_free);
|
||||
|
||||
static void *make_alloc_exact(unsigned long addr, unsigned int order,
|
||||
size_t size)
|
||||
--- a/net/core/skbuff.c
|
||||
+++ b/net/core/skbuff.c
|
||||
@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigne
|
||||
|
||||
local_irq_save(flags);
|
||||
nc = this_cpu_ptr(&netdev_alloc_cache);
|
||||
- data = __alloc_page_frag(nc, fragsz, gfp_mask);
|
||||
+ data = page_frag_alloc(nc, fragsz, gfp_mask);
|
||||
local_irq_restore(flags);
|
||||
return data;
|
||||
}
|
||||
@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned
|
||||
{
|
||||
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||||
|
||||
- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
|
||||
+ return page_frag_alloc(&nc->page, fragsz, gfp_mask);
|
||||
}
|
||||
|
||||
void *napi_alloc_frag(unsigned int fragsz)
|
||||
@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struc
|
||||
local_irq_save(flags);
|
||||
|
||||
nc = this_cpu_ptr(&netdev_alloc_cache);
|
||||
- data = __alloc_page_frag(nc, len, gfp_mask);
|
||||
+ data = page_frag_alloc(nc, len, gfp_mask);
|
||||
pfmemalloc = nc->pfmemalloc;
|
||||
|
||||
local_irq_restore(flags);
|
||||
@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct
|
||||
if (sk_memalloc_socks())
|
||||
gfp_mask |= __GFP_MEMALLOC;
|
||||
|
||||
- data = __alloc_page_frag(&nc->page, len, gfp_mask);
|
||||
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
|
||||
if (unlikely(!data))
|
||||
return NULL;
|
||||
|
@ -0,0 +1,79 @@
|
||||
From: Alexander Duyck <alexander.h.duyck@intel.com>
|
||||
Date: Tue, 10 Jan 2017 16:58:09 -0800
|
||||
Subject: [PATCH] mm: rename __page_frag functions to __page_frag_cache, drop
|
||||
order from drain
|
||||
|
||||
This patch does two things.
|
||||
|
||||
First it goes through and renames the __page_frag prefixed functions to
|
||||
__page_frag_cache so that we can be clear that we are draining or
|
||||
refilling the cache, not the frags themselves.
|
||||
|
||||
Second we drop the order parameter from __page_frag_cache_drain since we
|
||||
don't actually need to pass it since all fragments are either order 0 or
|
||||
must be a compound page.
|
||||
|
||||
Link: http://lkml.kernel.org/r/20170104023954.13451.5678.stgit@localhost.localdomain
|
||||
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
|
||||
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
---
|
||||
|
||||
--- a/include/linux/gfp.h
|
||||
+++ b/include/linux/gfp.h
|
||||
@@ -506,8 +506,7 @@ extern void free_hot_cold_page(struct pa
|
||||
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
|
||||
|
||||
struct page_frag_cache;
|
||||
-extern void __page_frag_drain(struct page *page, unsigned int order,
|
||||
- unsigned int count);
|
||||
+extern void __page_frag_cache_drain(struct page *page, unsigned int count);
|
||||
extern void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void page_frag_free(void *addr);
|
||||
--- a/mm/page_alloc.c
|
||||
+++ b/mm/page_alloc.c
|
||||
@@ -3925,8 +3925,8 @@ EXPORT_SYMBOL(free_pages);
|
||||
* drivers to provide a backing region of memory for use as either an
|
||||
* sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
|
||||
*/
|
||||
-static struct page *__page_frag_refill(struct page_frag_cache *nc,
|
||||
- gfp_t gfp_mask)
|
||||
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
|
||||
+ gfp_t gfp_mask)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
gfp_t gfp = gfp_mask;
|
||||
@@ -3946,19 +3946,20 @@ static struct page *__page_frag_refill(s
|
||||
return page;
|
||||
}
|
||||
|
||||
-void __page_frag_drain(struct page *page, unsigned int order,
|
||||
- unsigned int count)
|
||||
+void __page_frag_cache_drain(struct page *page, unsigned int count)
|
||||
{
|
||||
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
|
||||
|
||||
if (page_ref_sub_and_test(page, count)) {
|
||||
+ unsigned int order = compound_order(page);
|
||||
+
|
||||
if (order == 0)
|
||||
free_hot_cold_page(page, false);
|
||||
else
|
||||
__free_pages_ok(page, order);
|
||||
}
|
||||
}
|
||||
-EXPORT_SYMBOL(__page_frag_drain);
|
||||
+EXPORT_SYMBOL(__page_frag_cache_drain);
|
||||
|
||||
void *page_frag_alloc(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask)
|
||||
@@ -3969,7 +3970,7 @@ void *page_frag_alloc(struct page_frag_c
|
||||
|
||||
if (unlikely(!nc->va)) {
|
||||
refill:
|
||||
- page = __page_frag_refill(nc, gfp_mask);
|
||||
+ page = __page_frag_cache_refill(nc, gfp_mask);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user