@@ -592,11 +592,11 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
592592
593593/* Wait for the command queue to become non-full */
594594static int arm_smmu_cmdq_poll_until_not_full (struct arm_smmu_device * smmu ,
595+ struct arm_smmu_cmdq * cmdq ,
595596 struct arm_smmu_ll_queue * llq )
596597{
597598 unsigned long flags ;
598599 struct arm_smmu_queue_poll qp ;
599- struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq (smmu );
600600 int ret = 0 ;
601601
602602 /*
@@ -627,11 +627,11 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
627627 * Must be called with the cmdq lock held in some capacity.
628628 */
629629static int __arm_smmu_cmdq_poll_until_msi (struct arm_smmu_device * smmu ,
630+ struct arm_smmu_cmdq * cmdq ,
630631 struct arm_smmu_ll_queue * llq )
631632{
632633 int ret = 0 ;
633634 struct arm_smmu_queue_poll qp ;
634- struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq (smmu );
635635 u32 * cmd = (u32 * )(Q_ENT (& cmdq -> q , llq -> prod ));
636636
637637 queue_poll_init (smmu , & qp );
@@ -651,10 +651,10 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
651651 * Must be called with the cmdq lock held in some capacity.
652652 */
653653static int __arm_smmu_cmdq_poll_until_consumed (struct arm_smmu_device * smmu ,
654+ struct arm_smmu_cmdq * cmdq ,
654655 struct arm_smmu_ll_queue * llq )
655656{
656657 struct arm_smmu_queue_poll qp ;
657- struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq (smmu );
658658 u32 prod = llq -> prod ;
659659 int ret = 0 ;
660660
@@ -701,12 +701,13 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
701701}
702702
703703static int arm_smmu_cmdq_poll_until_sync (struct arm_smmu_device * smmu ,
704+ struct arm_smmu_cmdq * cmdq ,
704705 struct arm_smmu_ll_queue * llq )
705706{
706707 if (smmu -> options & ARM_SMMU_OPT_MSIPOLL )
707- return __arm_smmu_cmdq_poll_until_msi (smmu , llq );
708+ return __arm_smmu_cmdq_poll_until_msi (smmu , cmdq , llq );
708709
709- return __arm_smmu_cmdq_poll_until_consumed (smmu , llq );
710+ return __arm_smmu_cmdq_poll_until_consumed (smmu , cmdq , llq );
710711}
711712
712713static void arm_smmu_cmdq_write_entries (struct arm_smmu_cmdq * cmdq , u64 * cmds ,
@@ -743,13 +744,13 @@ static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
743744 * CPU will appear before any of the commands from the other CPU.
744745 */
745746static int arm_smmu_cmdq_issue_cmdlist (struct arm_smmu_device * smmu ,
747+ struct arm_smmu_cmdq * cmdq ,
746748 u64 * cmds , int n , bool sync )
747749{
748750 u64 cmd_sync [CMDQ_ENT_DWORDS ];
749751 u32 prod ;
750752 unsigned long flags ;
751753 bool owner ;
752- struct arm_smmu_cmdq * cmdq = arm_smmu_get_cmdq (smmu );
753754 struct arm_smmu_ll_queue llq , head ;
754755 int ret = 0 ;
755756
@@ -763,7 +764,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
763764
764765 while (!queue_has_space (& llq , n + sync )) {
765766 local_irq_restore (flags );
766- if (arm_smmu_cmdq_poll_until_not_full (smmu , & llq ))
767+ if (arm_smmu_cmdq_poll_until_not_full (smmu , cmdq , & llq ))
767768 dev_err_ratelimited (smmu -> dev , "CMDQ timeout\n" );
768769 local_irq_save (flags );
769770 }
@@ -839,7 +840,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
839840 /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
840841 if (sync ) {
841842 llq .prod = queue_inc_prod_n (& llq , n );
842- ret = arm_smmu_cmdq_poll_until_sync (smmu , & llq );
843+ ret = arm_smmu_cmdq_poll_until_sync (smmu , cmdq , & llq );
843844 if (ret ) {
844845 dev_err_ratelimited (smmu -> dev ,
845846 "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n" ,
@@ -874,7 +875,8 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
874875 return - EINVAL ;
875876 }
876877
877- return arm_smmu_cmdq_issue_cmdlist (smmu , cmd , 1 , sync );
878+ return arm_smmu_cmdq_issue_cmdlist (
879+ smmu , arm_smmu_get_cmdq (smmu ), cmd , 1 , sync );
878880}
879881
880882static int arm_smmu_cmdq_issue_cmd (struct arm_smmu_device * smmu ,
@@ -889,6 +891,13 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
889891 return __arm_smmu_cmdq_issue_cmd (smmu , ent , true);
890892}
891893
894+ static void arm_smmu_cmdq_batch_init (struct arm_smmu_device * smmu ,
895+ struct arm_smmu_cmdq_batch * cmds )
896+ {
897+ cmds -> num = 0 ;
898+ cmds -> cmdq = arm_smmu_get_cmdq (smmu );
899+ }
900+
892901static void arm_smmu_cmdq_batch_add (struct arm_smmu_device * smmu ,
893902 struct arm_smmu_cmdq_batch * cmds ,
894903 struct arm_smmu_cmdq_ent * cmd )
@@ -897,13 +906,15 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
897906
898907 if (cmds -> num == CMDQ_BATCH_ENTRIES - 1 &&
899908 (smmu -> options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC )) {
900- arm_smmu_cmdq_issue_cmdlist (smmu , cmds -> cmds , cmds -> num , true);
901- cmds -> num = 0 ;
909+ arm_smmu_cmdq_issue_cmdlist (smmu , cmds -> cmdq , cmds -> cmds ,
910+ cmds -> num , true);
911+ arm_smmu_cmdq_batch_init (smmu , cmds );
902912 }
903913
904914 if (cmds -> num == CMDQ_BATCH_ENTRIES ) {
905- arm_smmu_cmdq_issue_cmdlist (smmu , cmds -> cmds , cmds -> num , false);
906- cmds -> num = 0 ;
915+ arm_smmu_cmdq_issue_cmdlist (smmu , cmds -> cmdq , cmds -> cmds ,
916+ cmds -> num , false);
917+ arm_smmu_cmdq_batch_init (smmu , cmds );
907918 }
908919
909920 index = cmds -> num * CMDQ_ENT_DWORDS ;
@@ -919,7 +930,8 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
919930static int arm_smmu_cmdq_batch_submit (struct arm_smmu_device * smmu ,
920931 struct arm_smmu_cmdq_batch * cmds )
921932{
922- return arm_smmu_cmdq_issue_cmdlist (smmu , cmds -> cmds , cmds -> num , true);
933+ return arm_smmu_cmdq_issue_cmdlist (smmu , cmds -> cmdq , cmds -> cmds ,
934+ cmds -> num , true);
923935}
924936
925937static void arm_smmu_page_response (struct device * dev , struct iopf_fault * unused ,
@@ -1170,7 +1182,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master,
11701182 },
11711183 };
11721184
1173- cmds . num = 0 ;
1185+ arm_smmu_cmdq_batch_init ( smmu , & cmds ) ;
11741186 for (i = 0 ; i < master -> num_streams ; i ++ ) {
11751187 cmd .cfgi .sid = master -> streams [i ].id ;
11761188 arm_smmu_cmdq_batch_add (smmu , & cmds , & cmd );
@@ -2021,7 +2033,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
20212033
20222034 arm_smmu_atc_inv_to_cmd (ssid , 0 , 0 , & cmd );
20232035
2024- cmds . num = 0 ;
2036+ arm_smmu_cmdq_batch_init ( master -> smmu , & cmds ) ;
20252037 for (i = 0 ; i < master -> num_streams ; i ++ ) {
20262038 cmd .atc .sid = master -> streams [i ].id ;
20272039 arm_smmu_cmdq_batch_add (master -> smmu , & cmds , & cmd );
@@ -2059,7 +2071,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
20592071 if (!atomic_read (& smmu_domain -> nr_ats_masters ))
20602072 return 0 ;
20612073
2062- cmds . num = 0 ;
2074+ arm_smmu_cmdq_batch_init ( smmu_domain -> smmu , & cmds ) ;
20632075
20642076 spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
20652077 list_for_each_entry (master_domain , & smmu_domain -> devices ,
@@ -2141,7 +2153,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
21412153 num_pages ++ ;
21422154 }
21432155
2144- cmds . num = 0 ;
2156+ arm_smmu_cmdq_batch_init ( smmu , & cmds ) ;
21452157
21462158 while (iova < end ) {
21472159 if (smmu -> features & ARM_SMMU_FEAT_RANGE_INV ) {
0 commit comments