@@ -48,6 +48,7 @@ struct rfkill {
4848 bool persistent ;
4949 bool polling_paused ;
5050 bool suspended ;
51+ bool need_sync ;
5152
5253 const struct rfkill_ops * ops ;
5354 void * data ;
@@ -368,6 +369,17 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
368369 rfkill_event (rfkill );
369370}
370371
372+ static void rfkill_sync (struct rfkill * rfkill )
373+ {
374+ lockdep_assert_held (& rfkill_global_mutex );
375+
376+ if (!rfkill -> need_sync )
377+ return ;
378+
379+ rfkill_set_block (rfkill , rfkill_global_states [rfkill -> type ].cur );
380+ rfkill -> need_sync = false;
381+ }
382+
371383static void rfkill_update_global_state (enum rfkill_type type , bool blocked )
372384{
373385 int i ;
@@ -730,6 +742,10 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
730742{
731743 struct rfkill * rfkill = to_rfkill (dev );
732744
745+ mutex_lock (& rfkill_global_mutex );
746+ rfkill_sync (rfkill );
747+ mutex_unlock (& rfkill_global_mutex );
748+
733749 return sysfs_emit (buf , "%d\n" , (rfkill -> state & RFKILL_BLOCK_SW ) ? 1 : 0 );
734750}
735751
@@ -751,6 +767,7 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
751767 return - EINVAL ;
752768
753769 mutex_lock (& rfkill_global_mutex );
770+ rfkill_sync (rfkill );
754771 rfkill_set_block (rfkill , state );
755772 mutex_unlock (& rfkill_global_mutex );
756773
@@ -783,6 +800,10 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
783800{
784801 struct rfkill * rfkill = to_rfkill (dev );
785802
803+ mutex_lock (& rfkill_global_mutex );
804+ rfkill_sync (rfkill );
805+ mutex_unlock (& rfkill_global_mutex );
806+
786807 return sysfs_emit (buf , "%d\n" , user_state_from_blocked (rfkill -> state ));
787808}
788809
@@ -805,6 +826,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
805826 return - EINVAL ;
806827
807828 mutex_lock (& rfkill_global_mutex );
829+ rfkill_sync (rfkill );
808830 rfkill_set_block (rfkill , state == RFKILL_USER_STATE_SOFT_BLOCKED );
809831 mutex_unlock (& rfkill_global_mutex );
810832
@@ -1032,14 +1054,10 @@ static void rfkill_uevent_work(struct work_struct *work)
10321054
10331055static void rfkill_sync_work (struct work_struct * work )
10341056{
1035- struct rfkill * rfkill ;
1036- bool cur ;
1037-
1038- rfkill = container_of (work , struct rfkill , sync_work );
1057+ struct rfkill * rfkill = container_of (work , struct rfkill , sync_work );
10391058
10401059 mutex_lock (& rfkill_global_mutex );
1041- cur = rfkill_global_states [rfkill -> type ].cur ;
1042- rfkill_set_block (rfkill , cur );
1060+ rfkill_sync (rfkill );
10431061 mutex_unlock (& rfkill_global_mutex );
10441062}
10451063
@@ -1087,6 +1105,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
10871105 round_jiffies_relative (POLL_INTERVAL ));
10881106
10891107 if (!rfkill -> persistent || rfkill_epo_lock_active ) {
1108+ rfkill -> need_sync = true;
10901109 schedule_work (& rfkill -> sync_work );
10911110 } else {
10921111#ifdef CONFIG_RFKILL_INPUT
@@ -1171,6 +1190,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
11711190 ev = kzalloc (sizeof (* ev ), GFP_KERNEL );
11721191 if (!ev )
11731192 goto free ;
1193+ rfkill_sync (rfkill );
11741194 rfkill_fill_event (& ev -> ev , rfkill , RFKILL_OP_ADD );
11751195 list_add_tail (& ev -> list , & data -> events );
11761196 }
0 commit comments