17
17
#include <net/netdev_rx_queue.h>
18
18
#include <net/page_pool/helpers.h>
19
19
#include <net/page_pool/memory_provider.h>
20
+ #include <net/sock.h>
20
21
#include <trace/events/page_pool.h>
21
22
22
23
#include "devmem.h"
@@ -73,8 +74,10 @@ void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
73
74
dma_buf_detach (binding -> dmabuf , binding -> attachment );
74
75
dma_buf_put (binding -> dmabuf );
75
76
xa_destroy (& binding -> bound_rxqs );
77
+ kvfree (binding -> tx_vec );
76
78
kfree (binding );
77
79
}
80
+ EXPORT_SYMBOL (__net_devmem_dmabuf_binding_free );
78
81
79
82
struct net_iov *
80
83
net_devmem_alloc_dmabuf (struct net_devmem_dmabuf_binding * binding )
@@ -119,6 +122,13 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
119
122
unsigned long xa_idx ;
120
123
unsigned int rxq_idx ;
121
124
125
+ xa_erase (& net_devmem_dmabuf_bindings , binding -> id );
126
+
127
+ /* Ensure no tx net_devmem_lookup_dmabuf() are in flight after the
128
+ * erase.
129
+ */
130
+ synchronize_net ();
131
+
122
132
if (binding -> list .next )
123
133
list_del (& binding -> list );
124
134
@@ -133,8 +143,6 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
133
143
WARN_ON (netdev_rx_queue_restart (binding -> dev , rxq_idx ));
134
144
}
135
145
136
- xa_erase (& net_devmem_dmabuf_bindings , binding -> id );
137
-
138
146
net_devmem_dmabuf_binding_put (binding );
139
147
}
140
148
@@ -197,8 +205,9 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
197
205
}
198
206
199
207
struct net_devmem_dmabuf_binding *
200
- net_devmem_bind_dmabuf (struct net_device * dev , unsigned int dmabuf_fd ,
201
- struct netlink_ext_ack * extack )
208
+ net_devmem_bind_dmabuf (struct net_device * dev ,
209
+ enum dma_data_direction direction ,
210
+ unsigned int dmabuf_fd , struct netlink_ext_ack * extack )
202
211
{
203
212
struct net_devmem_dmabuf_binding * binding ;
204
213
static u32 id_alloc_next ;
@@ -241,7 +250,7 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
241
250
}
242
251
243
252
binding -> sgt = dma_buf_map_attachment_unlocked (binding -> attachment ,
244
- DMA_FROM_DEVICE );
253
+ direction );
245
254
if (IS_ERR (binding -> sgt )) {
246
255
err = PTR_ERR (binding -> sgt );
247
256
NL_SET_ERR_MSG (extack , "Failed to map dmabuf attachment" );
@@ -252,13 +261,23 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
252
261
* binding can be much more flexible than that. We may be able to
253
262
* allocate MTU sized chunks here. Leave that for future work...
254
263
*/
255
- binding -> chunk_pool =
256
- gen_pool_create ( PAGE_SHIFT , dev_to_node (& dev -> dev ));
264
+ binding -> chunk_pool = gen_pool_create ( PAGE_SHIFT ,
265
+ dev_to_node (& dev -> dev ));
257
266
if (!binding -> chunk_pool ) {
258
267
err = - ENOMEM ;
259
268
goto err_unmap ;
260
269
}
261
270
271
+ if (direction == DMA_TO_DEVICE ) {
272
+ binding -> tx_vec = kvmalloc_array (dmabuf -> size / PAGE_SIZE ,
273
+ sizeof (struct net_iov * ),
274
+ GFP_KERNEL );
275
+ if (!binding -> tx_vec ) {
276
+ err = - ENOMEM ;
277
+ goto err_free_chunks ;
278
+ }
279
+ }
280
+
262
281
virtual = 0 ;
263
282
for_each_sgtable_dma_sg (binding -> sgt , sg , sg_idx ) {
264
283
dma_addr_t dma_addr = sg_dma_address (sg );
@@ -300,6 +319,8 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
300
319
niov -> owner = & owner -> area ;
301
320
page_pool_set_dma_addr_netmem (net_iov_to_netmem (niov ),
302
321
net_devmem_get_dma_addr (niov ));
322
+ if (direction == DMA_TO_DEVICE )
323
+ binding -> tx_vec [owner -> area .base_virtual / PAGE_SIZE + i ] = niov ;
303
324
}
304
325
305
326
virtual += len ;
@@ -311,6 +332,8 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
311
332
gen_pool_for_each_chunk (binding -> chunk_pool ,
312
333
net_devmem_dmabuf_free_chunk_owner , NULL );
313
334
gen_pool_destroy (binding -> chunk_pool );
335
+
336
+ kvfree (binding -> tx_vec );
314
337
err_unmap :
315
338
dma_buf_unmap_attachment_unlocked (binding -> attachment , binding -> sgt ,
316
339
DMA_FROM_DEVICE );
@@ -325,6 +348,21 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
325
348
return ERR_PTR (err );
326
349
}
327
350
351
+ struct net_devmem_dmabuf_binding * net_devmem_lookup_dmabuf (u32 id )
352
+ {
353
+ struct net_devmem_dmabuf_binding * binding ;
354
+
355
+ rcu_read_lock ();
356
+ binding = xa_load (& net_devmem_dmabuf_bindings , id );
357
+ if (binding ) {
358
+ if (!net_devmem_dmabuf_binding_get (binding ))
359
+ binding = NULL ;
360
+ }
361
+ rcu_read_unlock ();
362
+
363
+ return binding ;
364
+ }
365
+
328
366
void net_devmem_get_net_iov (struct net_iov * niov )
329
367
{
330
368
net_devmem_dmabuf_binding_get (net_devmem_iov_binding (niov ));
@@ -335,6 +373,53 @@ void net_devmem_put_net_iov(struct net_iov *niov)
335
373
net_devmem_dmabuf_binding_put (net_devmem_iov_binding (niov ));
336
374
}
337
375
376
+ struct net_devmem_dmabuf_binding * net_devmem_get_binding (struct sock * sk ,
377
+ unsigned int dmabuf_id )
378
+ {
379
+ struct net_devmem_dmabuf_binding * binding ;
380
+ struct dst_entry * dst = __sk_dst_get (sk );
381
+ int err = 0 ;
382
+
383
+ binding = net_devmem_lookup_dmabuf (dmabuf_id );
384
+ if (!binding || !binding -> tx_vec ) {
385
+ err = - EINVAL ;
386
+ goto out_err ;
387
+ }
388
+
389
+ /* The dma-addrs in this binding are only reachable to the corresponding
390
+ * net_device.
391
+ */
392
+ if (!dst || !dst -> dev || dst -> dev -> ifindex != binding -> dev -> ifindex ) {
393
+ err = - ENODEV ;
394
+ goto out_err ;
395
+ }
396
+
397
+ return binding ;
398
+
399
+ out_err :
400
+ if (binding )
401
+ net_devmem_dmabuf_binding_put (binding );
402
+
403
+ return ERR_PTR (err );
404
+ }
405
+
406
+ struct net_iov *
407
+ net_devmem_get_niov_at (struct net_devmem_dmabuf_binding * binding ,
408
+ size_t virt_addr , size_t * off , size_t * size )
409
+ {
410
+ size_t idx ;
411
+
412
+ if (virt_addr >= binding -> dmabuf -> size )
413
+ return NULL ;
414
+
415
+ idx = virt_addr / PAGE_SIZE ;
416
+
417
+ * off = virt_addr % PAGE_SIZE ;
418
+ * size = PAGE_SIZE - * off ;
419
+
420
+ return binding -> tx_vec [idx ];
421
+ }
422
+
338
423
/*** "Dmabuf devmem memory provider" ***/
339
424
340
425
int mp_dmabuf_devmem_init (struct page_pool * pool )
0 commit comments