@@ -250,16 +250,11 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep,
250250 }
251251}
252252
253- static int dump_qp (int id , void * p , void * data )
253+ static int dump_qp (struct c4iw_qp * qp , struct c4iw_debugfs_data * qpd )
254254{
255- struct c4iw_qp * qp = p ;
256- struct c4iw_debugfs_data * qpd = data ;
257255 int space ;
258256 int cc ;
259257
260- if (id != qp -> wq .sq .qid )
261- return 0 ;
262-
263258 space = qpd -> bufsize - qpd -> pos - 1 ;
264259 if (space == 0 )
265260 return 1 ;
@@ -335,7 +330,9 @@ static int qp_release(struct inode *inode, struct file *file)
335330
336331static int qp_open (struct inode * inode , struct file * file )
337332{
333+ struct c4iw_qp * qp ;
338334 struct c4iw_debugfs_data * qpd ;
335+ unsigned long index ;
339336 int count = 1 ;
340337
341338 qpd = kmalloc (sizeof * qpd , GFP_KERNEL );
@@ -345,9 +342,12 @@ static int qp_open(struct inode *inode, struct file *file)
345342 qpd -> devp = inode -> i_private ;
346343 qpd -> pos = 0 ;
347344
348- spin_lock_irq (& qpd -> devp -> lock );
349- idr_for_each (& qpd -> devp -> qpidr , count_idrs , & count );
350- spin_unlock_irq (& qpd -> devp -> lock );
345+ /*
346+ * No need to lock; we drop the lock to call vmalloc so it's racy
347+ * anyway. Someone who cares should switch this over to seq_file
348+ */
349+ xa_for_each (& qpd -> devp -> qps , index , qp )
350+ count ++ ;
351351
352352 qpd -> bufsize = count * 180 ;
353353 qpd -> buf = vmalloc (qpd -> bufsize );
@@ -356,9 +356,10 @@ static int qp_open(struct inode *inode, struct file *file)
356356 return - ENOMEM ;
357357 }
358358
359- spin_lock_irq (& qpd -> devp -> lock );
360- idr_for_each (& qpd -> devp -> qpidr , dump_qp , qpd );
361- spin_unlock_irq (& qpd -> devp -> lock );
359+ xa_lock_irq (& qpd -> devp -> qps );
360+ xa_for_each (& qpd -> devp -> qps , index , qp )
361+ dump_qp (qp , qpd );
362+ xa_unlock_irq (& qpd -> devp -> qps );
362363
363364 qpd -> buf [qpd -> pos ++ ] = 0 ;
364365 file -> private_data = qpd ;
@@ -932,8 +933,7 @@ void c4iw_dealloc(struct uld_ctx *ctx)
932933{
933934 c4iw_rdev_close (& ctx -> dev -> rdev );
934935 WARN_ON (!xa_empty (& ctx -> dev -> cqs ));
935- WARN_ON_ONCE (!idr_is_empty (& ctx -> dev -> qpidr ));
936- idr_destroy (& ctx -> dev -> qpidr );
936+ WARN_ON (!xa_empty (& ctx -> dev -> qps ));
937937 WARN_ON_ONCE (!idr_is_empty (& ctx -> dev -> mmidr ));
938938 idr_destroy (& ctx -> dev -> mmidr );
939939 wait_event (ctx -> dev -> wait , idr_is_empty (& ctx -> dev -> hwtid_idr ));
@@ -1044,7 +1044,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
10441044 }
10451045
10461046 xa_init_flags (& devp -> cqs , XA_FLAGS_LOCK_IRQ );
1047- idr_init (& devp -> qpidr );
1047+ xa_init_flags (& devp -> qps , XA_FLAGS_LOCK_IRQ );
10481048 idr_init (& devp -> mmidr );
10491049 idr_init (& devp -> hwtid_idr );
10501050 idr_init (& devp -> stid_idr );
@@ -1264,34 +1264,21 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
12641264 return 0 ;
12651265}
12661266
1267- static int disable_qp_db (int id , void * p , void * data )
1268- {
1269- struct c4iw_qp * qp = p ;
1270-
1271- t4_disable_wq_db (& qp -> wq );
1272- return 0 ;
1273- }
1274-
12751267static void stop_queues (struct uld_ctx * ctx )
12761268{
1277- unsigned long flags ;
1269+ struct c4iw_qp * qp ;
1270+ unsigned long index , flags ;
12781271
1279- spin_lock_irqsave (& ctx -> dev -> lock , flags );
1272+ xa_lock_irqsave (& ctx -> dev -> qps , flags );
12801273 ctx -> dev -> rdev .stats .db_state_transitions ++ ;
12811274 ctx -> dev -> db_state = STOPPED ;
1282- if (ctx -> dev -> rdev .flags & T4_STATUS_PAGE_DISABLED )
1283- idr_for_each (& ctx -> dev -> qpidr , disable_qp_db , NULL );
1284- else
1275+ if (ctx -> dev -> rdev .flags & T4_STATUS_PAGE_DISABLED ) {
1276+ xa_for_each (& ctx -> dev -> qps , index , qp )
1277+ t4_disable_wq_db (& qp -> wq );
1278+ } else {
12851279 ctx -> dev -> rdev .status_page -> db_off = 1 ;
1286- spin_unlock_irqrestore (& ctx -> dev -> lock , flags );
1287- }
1288-
1289- static int enable_qp_db (int id , void * p , void * data )
1290- {
1291- struct c4iw_qp * qp = p ;
1292-
1293- t4_enable_wq_db (& qp -> wq );
1294- return 0 ;
1280+ }
1281+ xa_unlock_irqrestore (& ctx -> dev -> qps , flags );
12951282}
12961283
12971284static void resume_rc_qp (struct c4iw_qp * qp )
@@ -1321,18 +1308,21 @@ static void resume_a_chunk(struct uld_ctx *ctx)
13211308
13221309static void resume_queues (struct uld_ctx * ctx )
13231310{
1324- spin_lock_irq (& ctx -> dev -> lock );
1311+ xa_lock_irq (& ctx -> dev -> qps );
13251312 if (ctx -> dev -> db_state != STOPPED )
13261313 goto out ;
13271314 ctx -> dev -> db_state = FLOW_CONTROL ;
13281315 while (1 ) {
13291316 if (list_empty (& ctx -> dev -> db_fc_list )) {
1317+ struct c4iw_qp * qp ;
1318+ unsigned long index ;
1319+
13301320 WARN_ON (ctx -> dev -> db_state != FLOW_CONTROL );
13311321 ctx -> dev -> db_state = NORMAL ;
13321322 ctx -> dev -> rdev .stats .db_state_transitions ++ ;
13331323 if (ctx -> dev -> rdev .flags & T4_STATUS_PAGE_DISABLED ) {
1334- idr_for_each (& ctx -> dev -> qpidr , enable_qp_db ,
1335- NULL );
1324+ xa_for_each (& ctx -> dev -> qps , index , qp )
1325+ t4_enable_wq_db ( & qp -> wq );
13361326 } else {
13371327 ctx -> dev -> rdev .status_page -> db_off = 0 ;
13381328 }
@@ -1344,12 +1334,12 @@ static void resume_queues(struct uld_ctx *ctx)
13441334 resume_a_chunk (ctx );
13451335 }
13461336 if (!list_empty (& ctx -> dev -> db_fc_list )) {
1347- spin_unlock_irq (& ctx -> dev -> lock );
1337+ xa_unlock_irq (& ctx -> dev -> qps );
13481338 if (DB_FC_RESUME_DELAY ) {
13491339 set_current_state (TASK_UNINTERRUPTIBLE );
13501340 schedule_timeout (DB_FC_RESUME_DELAY );
13511341 }
1352- spin_lock_irq (& ctx -> dev -> lock );
1342+ xa_lock_irq (& ctx -> dev -> qps );
13531343 if (ctx -> dev -> db_state != FLOW_CONTROL )
13541344 break ;
13551345 }
@@ -1358,31 +1348,14 @@ static void resume_queues(struct uld_ctx *ctx)
13581348out :
13591349 if (ctx -> dev -> db_state != NORMAL )
13601350 ctx -> dev -> rdev .stats .db_fc_interruptions ++ ;
1361- spin_unlock_irq (& ctx -> dev -> lock );
1351+ xa_unlock_irq (& ctx -> dev -> qps );
13621352}
13631353
13641354struct qp_list {
13651355 unsigned idx ;
13661356 struct c4iw_qp * * qps ;
13671357};
13681358
1369- static int add_and_ref_qp (int id , void * p , void * data )
1370- {
1371- struct qp_list * qp_listp = data ;
1372- struct c4iw_qp * qp = p ;
1373-
1374- c4iw_qp_add_ref (& qp -> ibqp );
1375- qp_listp -> qps [qp_listp -> idx ++ ] = qp ;
1376- return 0 ;
1377- }
1378-
1379- static int count_qps (int id , void * p , void * data )
1380- {
1381- unsigned * countp = data ;
1382- (* countp )++ ;
1383- return 0 ;
1384- }
1385-
13861359static void deref_qps (struct qp_list * qp_list )
13871360{
13881361 int idx ;
@@ -1399,7 +1372,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
13991372 for (idx = 0 ; idx < qp_list -> idx ; idx ++ ) {
14001373 struct c4iw_qp * qp = qp_list -> qps [idx ];
14011374
1402- spin_lock_irq (& qp -> rhp -> lock );
1375+ xa_lock_irq (& qp -> rhp -> qps );
14031376 spin_lock (& qp -> lock );
14041377 ret = cxgb4_sync_txq_pidx (qp -> rhp -> rdev .lldi .ports [0 ],
14051378 qp -> wq .sq .qid ,
@@ -1409,7 +1382,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
14091382 pr_err ("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n" ,
14101383 pci_name (ctx -> lldi .pdev ), qp -> wq .sq .qid );
14111384 spin_unlock (& qp -> lock );
1412- spin_unlock_irq (& qp -> rhp -> lock );
1385+ xa_unlock_irq (& qp -> rhp -> qps );
14131386 return ;
14141387 }
14151388 qp -> wq .sq .wq_pidx_inc = 0 ;
@@ -1423,12 +1396,12 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
14231396 pr_err ("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n" ,
14241397 pci_name (ctx -> lldi .pdev ), qp -> wq .rq .qid );
14251398 spin_unlock (& qp -> lock );
1426- spin_unlock_irq (& qp -> rhp -> lock );
1399+ xa_unlock_irq (& qp -> rhp -> qps );
14271400 return ;
14281401 }
14291402 qp -> wq .rq .wq_pidx_inc = 0 ;
14301403 spin_unlock (& qp -> lock );
1431- spin_unlock_irq (& qp -> rhp -> lock );
1404+ xa_unlock_irq (& qp -> rhp -> qps );
14321405
14331406 /* Wait for the dbfifo to drain */
14341407 while (cxgb4_dbfifo_count (qp -> rhp -> rdev .lldi .ports [0 ], 1 ) > 0 ) {
@@ -1440,6 +1413,8 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
14401413
14411414static void recover_queues (struct uld_ctx * ctx )
14421415{
1416+ struct c4iw_qp * qp ;
1417+ unsigned long index ;
14431418 int count = 0 ;
14441419 struct qp_list qp_list ;
14451420 int ret ;
@@ -1457,22 +1432,26 @@ static void recover_queues(struct uld_ctx *ctx)
14571432 }
14581433
14591434 /* Count active queues so we can build a list of queues to recover */
1460- spin_lock_irq (& ctx -> dev -> lock );
1435+ xa_lock_irq (& ctx -> dev -> qps );
14611436 WARN_ON (ctx -> dev -> db_state != STOPPED );
14621437 ctx -> dev -> db_state = RECOVERY ;
1463- idr_for_each (& ctx -> dev -> qpidr , count_qps , & count );
1438+ xa_for_each (& ctx -> dev -> qps , index , qp )
1439+ count ++ ;
14641440
14651441 qp_list .qps = kcalloc (count , sizeof (* qp_list .qps ), GFP_ATOMIC );
14661442 if (!qp_list .qps ) {
1467- spin_unlock_irq (& ctx -> dev -> lock );
1443+ xa_unlock_irq (& ctx -> dev -> qps );
14681444 return ;
14691445 }
14701446 qp_list .idx = 0 ;
14711447
14721448 /* add and ref each qp so it doesn't get freed */
1473- idr_for_each (& ctx -> dev -> qpidr , add_and_ref_qp , & qp_list );
1449+ xa_for_each (& ctx -> dev -> qps , index , qp ) {
1450+ c4iw_qp_add_ref (& qp -> ibqp );
1451+ qp_list .qps [qp_list .idx ++ ] = qp ;
1452+ }
14741453
1475- spin_unlock_irq (& ctx -> dev -> lock );
1454+ xa_unlock_irq (& ctx -> dev -> qps );
14761455
14771456 /* now traverse the list in a safe context to recover the db state*/
14781457 recover_lost_dbs (ctx , & qp_list );
@@ -1481,10 +1460,10 @@ static void recover_queues(struct uld_ctx *ctx)
14811460 deref_qps (& qp_list );
14821461 kfree (qp_list .qps );
14831462
1484- spin_lock_irq (& ctx -> dev -> lock );
1463+ xa_lock_irq (& ctx -> dev -> qps );
14851464 WARN_ON (ctx -> dev -> db_state != RECOVERY );
14861465 ctx -> dev -> db_state = STOPPED ;
1487- spin_unlock_irq (& ctx -> dev -> lock );
1466+ xa_unlock_irq (& ctx -> dev -> qps );
14881467}
14891468
14901469static int c4iw_uld_control (void * handle , enum cxgb4_control control , ...)
0 commit comments