@@ -56,7 +56,7 @@ static void
5656create_mkey_callback (int status , struct mlx5_async_work * context );
5757static struct mlx5_ib_mr * reg_create (struct ib_pd * pd , struct ib_umem * umem ,
5858 u64 iova , int access_flags ,
59- unsigned int page_size , bool populate ,
59+ unsigned long page_size , bool populate ,
6060 int access_mode );
6161static int __mlx5_ib_dereg_mr (struct ib_mr * ibmr );
6262
@@ -1125,7 +1125,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
11251125 struct mlx5r_cache_rb_key rb_key = {};
11261126 struct mlx5_cache_ent * ent ;
11271127 struct mlx5_ib_mr * mr ;
1128- unsigned int page_size ;
1128+ unsigned long page_size ;
11291129
11301130 if (umem -> is_dmabuf )
11311131 page_size = mlx5_umem_dmabuf_default_pgsz (umem , iova );
@@ -1229,7 +1229,7 @@ reg_create_crossing_vhca_mr(struct ib_pd *pd, u64 iova, u64 length, int access_f
12291229 */
12301230static struct mlx5_ib_mr * reg_create (struct ib_pd * pd , struct ib_umem * umem ,
12311231 u64 iova , int access_flags ,
1232- unsigned int page_size , bool populate ,
1232+ unsigned long page_size , bool populate ,
12331233 int access_mode )
12341234{
12351235 struct mlx5_ib_dev * dev = to_mdev (pd -> device );
@@ -1435,7 +1435,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
14351435 mr = alloc_cacheable_mr (pd , umem , iova , access_flags ,
14361436 MLX5_MKC_ACCESS_MODE_MTT );
14371437 } else {
1438- unsigned int page_size =
1438+ unsigned long page_size =
14391439 mlx5_umem_mkc_find_best_pgsz (dev , umem , iova );
14401440
14411441 mutex_lock (& dev -> slow_path_mutex );
0 commit comments