@@ -2793,6 +2793,48 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
2793
2793
return err ;
2794
2794
}
2795
2795
2796
+ static struct irdma_mr * irdma_alloc_iwmr (struct ib_umem * region ,
2797
+ struct ib_pd * pd , u64 virt ,
2798
+ enum irdma_memreg_type reg_type )
2799
+ {
2800
+ struct irdma_device * iwdev = to_iwdev (pd -> device );
2801
+ struct irdma_pbl * iwpbl = NULL ;
2802
+ struct irdma_mr * iwmr = NULL ;
2803
+ unsigned long pgsz_bitmap ;
2804
+
2805
+ iwmr = kzalloc (sizeof (* iwmr ), GFP_KERNEL );
2806
+ if (!iwmr )
2807
+ return ERR_PTR (- ENOMEM );
2808
+
2809
+ iwpbl = & iwmr -> iwpbl ;
2810
+ iwpbl -> iwmr = iwmr ;
2811
+ iwmr -> region = region ;
2812
+ iwmr -> ibmr .pd = pd ;
2813
+ iwmr -> ibmr .device = pd -> device ;
2814
+ iwmr -> ibmr .iova = virt ;
2815
+ iwmr -> type = reg_type ;
2816
+
2817
+ pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM ) ?
2818
+ iwdev -> rf -> sc_dev .hw_attrs .page_size_cap : PAGE_SIZE ;
2819
+
2820
+ iwmr -> page_size = ib_umem_find_best_pgsz (region , pgsz_bitmap , virt );
2821
+ if (unlikely (!iwmr -> page_size )) {
2822
+ kfree (iwmr );
2823
+ return ERR_PTR (- EOPNOTSUPP );
2824
+ }
2825
+
2826
+ iwmr -> len = region -> length ;
2827
+ iwpbl -> user_base = virt ;
2828
+ iwmr -> page_cnt = ib_umem_num_dma_blocks (region , iwmr -> page_size );
2829
+
2830
+ return iwmr ;
2831
+ }
2832
+
2833
+ static void irdma_free_iwmr (struct irdma_mr * iwmr )
2834
+ {
2835
+ kfree (iwmr );
2836
+ }
2837
+
2796
2838
/**
2797
2839
* irdma_reg_user_mr - Register a user memory region
2798
2840
* @pd: ptr of pd
@@ -2838,34 +2880,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2838
2880
return ERR_PTR (- EFAULT );
2839
2881
}
2840
2882
2841
- iwmr = kzalloc ( sizeof ( * iwmr ), GFP_KERNEL );
2842
- if (! iwmr ) {
2883
+ iwmr = irdma_alloc_iwmr ( region , pd , virt , req . reg_type );
2884
+ if (IS_ERR ( iwmr ) ) {
2843
2885
ib_umem_release (region );
2844
- return ERR_PTR ( - ENOMEM ) ;
2886
+ return ( struct ib_mr * ) iwmr ;
2845
2887
}
2846
2888
2847
2889
iwpbl = & iwmr -> iwpbl ;
2848
- iwpbl -> iwmr = iwmr ;
2849
- iwmr -> region = region ;
2850
- iwmr -> ibmr .pd = pd ;
2851
- iwmr -> ibmr .device = pd -> device ;
2852
- iwmr -> ibmr .iova = virt ;
2853
- iwmr -> page_size = PAGE_SIZE ;
2854
-
2855
- if (req .reg_type == IRDMA_MEMREG_TYPE_MEM ) {
2856
- iwmr -> page_size = ib_umem_find_best_pgsz (region ,
2857
- iwdev -> rf -> sc_dev .hw_attrs .page_size_cap ,
2858
- virt );
2859
- if (unlikely (!iwmr -> page_size )) {
2860
- kfree (iwmr );
2861
- ib_umem_release (region );
2862
- return ERR_PTR (- EOPNOTSUPP );
2863
- }
2864
- }
2865
- iwmr -> len = region -> length ;
2866
- iwpbl -> user_base = virt ;
2867
- iwmr -> type = req .reg_type ;
2868
- iwmr -> page_cnt = ib_umem_num_dma_blocks (region , iwmr -> page_size );
2869
2890
2870
2891
switch (req .reg_type ) {
2871
2892
case IRDMA_MEMREG_TYPE_QP :
@@ -2918,13 +2939,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2918
2939
goto error ;
2919
2940
}
2920
2941
2921
- iwmr -> type = req .reg_type ;
2922
-
2923
2942
return & iwmr -> ibmr ;
2924
-
2925
2943
error :
2926
2944
ib_umem_release (region );
2927
- kfree (iwmr );
2945
+ irdma_free_iwmr (iwmr );
2928
2946
2929
2947
return ERR_PTR (err );
2930
2948
}
0 commit comments