11// SPDX-License-Identifier: GPL-2.0-only
22/*
3- * Copyright (c) 2015-2016, Linaro Limited
3+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
44 */
5+ #include <linux/anon_inodes.h>
56#include <linux/device.h>
6- #include <linux/dma-buf.h>
7- #include <linux/fdtable.h>
87#include <linux/idr.h>
8+ #include <linux/mm.h>
99#include <linux/sched.h>
1010#include <linux/slab.h>
1111#include <linux/tee_drv.h>
1212#include <linux/uio.h>
13- #include <linux/module.h>
1413#include "tee_private.h"
1514
16- MODULE_IMPORT_NS (DMA_BUF );
17-
1815static void release_registered_pages (struct tee_shm * shm )
1916{
2017 if (shm -> pages ) {
@@ -31,16 +28,8 @@ static void release_registered_pages(struct tee_shm *shm)
3128 }
3229}
3330
34- static void tee_shm_release (struct tee_shm * shm )
31+ static void tee_shm_release (struct tee_device * teedev , struct tee_shm * shm )
3532{
36- struct tee_device * teedev = shm -> ctx -> teedev ;
37-
38- if (shm -> flags & TEE_SHM_DMA_BUF ) {
39- mutex_lock (& teedev -> mutex );
40- idr_remove (& teedev -> idr , shm -> id );
41- mutex_unlock (& teedev -> mutex );
42- }
43-
4433 if (shm -> flags & TEE_SHM_POOL ) {
4534 struct tee_shm_pool_mgr * poolm ;
4635
@@ -67,45 +56,6 @@ static void tee_shm_release(struct tee_shm *shm)
6756 tee_device_put (teedev );
6857}
6958
70- static struct sg_table * tee_shm_op_map_dma_buf (struct dma_buf_attachment
71- * attach , enum dma_data_direction dir )
72- {
73- return NULL ;
74- }
75-
76- static void tee_shm_op_unmap_dma_buf (struct dma_buf_attachment * attach ,
77- struct sg_table * table ,
78- enum dma_data_direction dir )
79- {
80- }
81-
82- static void tee_shm_op_release (struct dma_buf * dmabuf )
83- {
84- struct tee_shm * shm = dmabuf -> priv ;
85-
86- tee_shm_release (shm );
87- }
88-
89- static int tee_shm_op_mmap (struct dma_buf * dmabuf , struct vm_area_struct * vma )
90- {
91- struct tee_shm * shm = dmabuf -> priv ;
92- size_t size = vma -> vm_end - vma -> vm_start ;
93-
94- /* Refuse sharing shared memory provided by application */
95- if (shm -> flags & TEE_SHM_USER_MAPPED )
96- return - EINVAL ;
97-
98- return remap_pfn_range (vma , vma -> vm_start , shm -> paddr >> PAGE_SHIFT ,
99- size , vma -> vm_page_prot );
100- }
101-
102- static const struct dma_buf_ops tee_shm_dma_buf_ops = {
103- .map_dma_buf = tee_shm_op_map_dma_buf ,
104- .unmap_dma_buf = tee_shm_op_unmap_dma_buf ,
105- .release = tee_shm_op_release ,
106- .mmap = tee_shm_op_mmap ,
107- };
108-
10959struct tee_shm * tee_shm_alloc (struct tee_context * ctx , size_t size , u32 flags )
11060{
11161 struct tee_device * teedev = ctx -> teedev ;
@@ -140,6 +90,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
14090 goto err_dev_put ;
14191 }
14292
93+ refcount_set (& shm -> refcount , 1 );
14394 shm -> flags = flags | TEE_SHM_POOL ;
14495 shm -> ctx = ctx ;
14596 if (flags & TEE_SHM_DMA_BUF )
@@ -153,39 +104,19 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
153104 goto err_kfree ;
154105 }
155106
156-
157107 if (flags & TEE_SHM_DMA_BUF ) {
158- DEFINE_DMA_BUF_EXPORT_INFO (exp_info );
159-
160108 mutex_lock (& teedev -> mutex );
161109 shm -> id = idr_alloc (& teedev -> idr , shm , 1 , 0 , GFP_KERNEL );
162110 mutex_unlock (& teedev -> mutex );
163111 if (shm -> id < 0 ) {
164112 ret = ERR_PTR (shm -> id );
165113 goto err_pool_free ;
166114 }
167-
168- exp_info .ops = & tee_shm_dma_buf_ops ;
169- exp_info .size = shm -> size ;
170- exp_info .flags = O_RDWR ;
171- exp_info .priv = shm ;
172-
173- shm -> dmabuf = dma_buf_export (& exp_info );
174- if (IS_ERR (shm -> dmabuf )) {
175- ret = ERR_CAST (shm -> dmabuf );
176- goto err_rem ;
177- }
178115 }
179116
180117 teedev_ctx_get (ctx );
181118
182119 return shm ;
183- err_rem :
184- if (flags & TEE_SHM_DMA_BUF ) {
185- mutex_lock (& teedev -> mutex );
186- idr_remove (& teedev -> idr , shm -> id );
187- mutex_unlock (& teedev -> mutex );
188- }
189120err_pool_free :
190121 poolm -> ops -> free (poolm , shm );
191122err_kfree :
@@ -246,6 +177,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
246177 goto err ;
247178 }
248179
180+ refcount_set (& shm -> refcount , 1 );
249181 shm -> flags = flags | TEE_SHM_REGISTER ;
250182 shm -> ctx = ctx ;
251183 shm -> id = -1 ;
@@ -306,22 +238,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
306238 goto err ;
307239 }
308240
309- if (flags & TEE_SHM_DMA_BUF ) {
310- DEFINE_DMA_BUF_EXPORT_INFO (exp_info );
311-
312- exp_info .ops = & tee_shm_dma_buf_ops ;
313- exp_info .size = shm -> size ;
314- exp_info .flags = O_RDWR ;
315- exp_info .priv = shm ;
316-
317- shm -> dmabuf = dma_buf_export (& exp_info );
318- if (IS_ERR (shm -> dmabuf )) {
319- ret = ERR_CAST (shm -> dmabuf );
320- teedev -> desc -> ops -> shm_unregister (ctx , shm );
321- goto err ;
322- }
323- }
324-
325241 return shm ;
326242err :
327243 if (shm ) {
@@ -339,6 +255,35 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
339255}
340256EXPORT_SYMBOL_GPL (tee_shm_register );
341257
258+ static int tee_shm_fop_release (struct inode * inode , struct file * filp )
259+ {
260+ tee_shm_put (filp -> private_data );
261+ return 0 ;
262+ }
263+
264+ static int tee_shm_fop_mmap (struct file * filp , struct vm_area_struct * vma )
265+ {
266+ struct tee_shm * shm = filp -> private_data ;
267+ size_t size = vma -> vm_end - vma -> vm_start ;
268+
269+ /* Refuse sharing shared memory provided by application */
270+ if (shm -> flags & TEE_SHM_USER_MAPPED )
271+ return - EINVAL ;
272+
273+ /* check for overflowing the buffer's size */
274+ if (vma -> vm_pgoff + vma_pages (vma ) > shm -> size >> PAGE_SHIFT )
275+ return - EINVAL ;
276+
277+ return remap_pfn_range (vma , vma -> vm_start , shm -> paddr >> PAGE_SHIFT ,
278+ size , vma -> vm_page_prot );
279+ }
280+
281+ static const struct file_operations tee_shm_fops = {
282+ .owner = THIS_MODULE ,
283+ .release = tee_shm_fop_release ,
284+ .mmap = tee_shm_fop_mmap ,
285+ };
286+
342287/**
343288 * tee_shm_get_fd() - Increase reference count and return file descriptor
344289 * @shm: Shared memory handle
@@ -351,10 +296,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
351296 if (!(shm -> flags & TEE_SHM_DMA_BUF ))
352297 return - EINVAL ;
353298
354- get_dma_buf (shm -> dmabuf );
355- fd = dma_buf_fd (shm -> dmabuf , O_CLOEXEC );
299+ /* matched by tee_shm_put() in tee_shm_op_release() */
300+ refcount_inc (& shm -> refcount );
301+ fd = anon_inode_getfd ("tee_shm" , & tee_shm_fops , shm , O_RDWR );
356302 if (fd < 0 )
357- dma_buf_put (shm -> dmabuf );
303+ tee_shm_put (shm );
358304 return fd ;
359305}
360306
@@ -364,17 +310,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
364310 */
365311void tee_shm_free (struct tee_shm * shm )
366312{
367- /*
368- * dma_buf_put() decreases the dmabuf reference counter and will
369- * call tee_shm_release() when the last reference is gone.
370- *
371- * In the case of driver private memory we call tee_shm_release
372- * directly instead as it doesn't have a reference counter.
373- */
374- if (shm -> flags & TEE_SHM_DMA_BUF )
375- dma_buf_put (shm -> dmabuf );
376- else
377- tee_shm_release (shm );
313+ tee_shm_put (shm );
378314}
379315EXPORT_SYMBOL_GPL (tee_shm_free );
380316
@@ -481,10 +417,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
481417 teedev = ctx -> teedev ;
482418 mutex_lock (& teedev -> mutex );
483419 shm = idr_find (& teedev -> idr , id );
420+ /*
421+ * If the tee_shm was found in the IDR it must have a refcount
422+ * larger than 0 due to the guarantee in tee_shm_put() below. So
423+ * it's safe to use refcount_inc().
424+ */
484425 if (!shm || shm -> ctx != ctx )
485426 shm = ERR_PTR (- EINVAL );
486- else if ( shm -> flags & TEE_SHM_DMA_BUF )
487- get_dma_buf ( shm -> dmabuf );
427+ else
428+ refcount_inc ( & shm -> refcount );
488429 mutex_unlock (& teedev -> mutex );
489430 return shm ;
490431}
@@ -496,7 +437,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
496437 */
497438void tee_shm_put (struct tee_shm * shm )
498439{
499- if (shm -> flags & TEE_SHM_DMA_BUF )
500- dma_buf_put (shm -> dmabuf );
440+ struct tee_device * teedev = shm -> ctx -> teedev ;
441+ bool do_release = false;
442+
443+ mutex_lock (& teedev -> mutex );
444+ if (refcount_dec_and_test (& shm -> refcount )) {
445+ /*
446+ * refcount has reached 0, we must now remove it from the
447+ * IDR before releasing the mutex. This will guarantee that
448+ * the refcount_inc() in tee_shm_get_from_id() never starts
449+ * from 0.
450+ */
451+ if (shm -> flags & TEE_SHM_DMA_BUF )
452+ idr_remove (& teedev -> idr , shm -> id );
453+ do_release = true;
454+ }
455+ mutex_unlock (& teedev -> mutex );
456+
457+ if (do_release )
458+ tee_shm_release (teedev , shm );
501459}
502460EXPORT_SYMBOL_GPL (tee_shm_put );
0 commit comments