Skip to content

Commit 47626f8

Browse files
minaNipaLocal
authored and
NipaLocal
committed
net: add get_netmem/put_netmem support
Currently net_iovs support only pp ref counts, and do not support a page ref equivalent. This is fine for the RX path as net_iovs are used exclusively with the pp and only pp refcounting is needed there. The TX path however does not use pp ref counts, thus, support for get_page/put_page equivalent is needed for netmem. Support get_netmem/put_netmem. Check the type of the netmem before passing it to page or net_iov specific code to obtain a page ref equivalent. For dmabuf net_iovs, we obtain a ref on the underlying binding. This ensures the entire binding doesn't disappear until all the net_iovs have been put_netmem'ed. We do not need to track the refcount of individual dmabuf net_iovs as we don't allocate/free them from a pool similar to what the buddy allocator does for pages. This code is written to be extensible by other net_iov implementers. get_netmem/put_netmem will check the type of the netmem and route it to the correct helper: pages -> [get|put]_page() dmabuf net_iovs -> net_devmem_[get|put]_net_iov() new net_iovs -> new helpers Signed-off-by: Mina Almasry <[email protected]> Acked-by: Stanislav Fomichev <[email protected]> Signed-off-by: NipaLocal <nipa@local>
1 parent 3a25f0f commit 47626f8

File tree

5 files changed

+65
-2
lines changed

5 files changed

+65
-2
lines changed

include/linux/skbuff_ref.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
*/
1818
static inline void __skb_frag_ref(skb_frag_t *frag)
1919
{
20-
get_page(skb_frag_page(frag));
20+
get_netmem(skb_frag_netmem(frag));
2121
}
2222

2323
/**
@@ -40,7 +40,7 @@ static inline void skb_page_unref(netmem_ref netmem, bool recycle)
4040
if (recycle && napi_pp_put_page(netmem))
4141
return;
4242
#endif
43-
put_page(netmem_to_page(netmem));
43+
put_netmem(netmem);
4444
}
4545

4646
/**

include/net/netmem.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,4 +264,7 @@ static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
264264
return __netmem_clear_lsb(netmem)->dma_addr;
265265
}
266266

267+
void get_netmem(netmem_ref netmem);
268+
void put_netmem(netmem_ref netmem);
269+
267270
#endif /* _NET_NETMEM_H */

net/core/devmem.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,16 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
325325
return ERR_PTR(err);
326326
}
327327

328+
void net_devmem_get_net_iov(struct net_iov *niov)
329+
{
330+
net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov));
331+
}
332+
333+
void net_devmem_put_net_iov(struct net_iov *niov)
334+
{
335+
net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov));
336+
}
337+
328338
/*** "Dmabuf devmem memory provider" ***/
329339

330340
int mp_dmabuf_devmem_init(struct page_pool *pool)

net/core/devmem.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@ struct net_devmem_dmabuf_binding {
2929
* The binding undos itself and unmaps the underlying dmabuf once all
3030
* those refs are dropped and the binding is no longer desired or in
3131
* use.
32+
*
33+
* net_devmem_get_net_iov() on dmabuf net_iovs will increment this
34+
* reference, making sure that the binding remains alive until all the
35+
* net_iovs are no longer used.
3236
*/
3337
refcount_t ref;
3438

@@ -111,6 +115,9 @@ net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
111115
__net_devmem_dmabuf_binding_free(binding);
112116
}
113117

118+
void net_devmem_get_net_iov(struct net_iov *niov);
119+
void net_devmem_put_net_iov(struct net_iov *niov);
120+
114121
struct net_iov *
115122
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
116123
void net_devmem_free_dmabuf(struct net_iov *ppiov);
@@ -120,6 +127,19 @@ bool net_is_devmem_iov(struct net_iov *niov);
120127
#else
121128
struct net_devmem_dmabuf_binding;
122129

130+
static inline void
131+
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
132+
{
133+
}
134+
135+
static inline void net_devmem_get_net_iov(struct net_iov *niov)
136+
{
137+
}
138+
139+
static inline void net_devmem_put_net_iov(struct net_iov *niov)
140+
{
141+
}
142+
123143
static inline void
124144
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
125145
{

net/core/skbuff.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@
9090
#include <linux/textsearch.h>
9191

9292
#include "dev.h"
93+
#include "devmem.h"
9394
#include "netmem_priv.h"
9495
#include "sock_destructor.h"
9596

@@ -7254,3 +7255,32 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
72547255
return false;
72557256
}
72567257
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
7258+
7259+
void get_netmem(netmem_ref netmem)
7260+
{
7261+
if (netmem_is_net_iov(netmem)) {
7262+
/* Assume any net_iov is devmem and route it to
7263+
* net_devmem_get_net_iov. As new net_iov types are added they
7264+
* need to be checked here.
7265+
*/
7266+
net_devmem_get_net_iov(netmem_to_net_iov(netmem));
7267+
return;
7268+
}
7269+
get_page(netmem_to_page(netmem));
7270+
}
7271+
EXPORT_SYMBOL(get_netmem);
7272+
7273+
void put_netmem(netmem_ref netmem)
7274+
{
7275+
if (netmem_is_net_iov(netmem)) {
7276+
/* Assume any net_iov is devmem and route it to
7277+
* net_devmem_put_net_iov. As new net_iov types are added they
7278+
* need to be checked here.
7279+
*/
7280+
net_devmem_put_net_iov(netmem_to_net_iov(netmem));
7281+
return;
7282+
}
7283+
7284+
put_page(netmem_to_page(netmem));
7285+
}
7286+
EXPORT_SYMBOL(put_netmem);

0 commit comments

Comments
 (0)