@@ -1816,19 +1816,16 @@ static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1816
1816
static void ufshcd_ungate_work (struct work_struct * work )
1817
1817
{
1818
1818
int ret ;
1819
- unsigned long flags ;
1820
1819
struct ufs_hba * hba = container_of (work , struct ufs_hba ,
1821
1820
clk_gating .ungate_work );
1822
1821
1823
1822
cancel_delayed_work_sync (& hba -> clk_gating .gate_work );
1824
1823
1825
- spin_lock_irqsave (hba -> host -> host_lock , flags );
1826
- if (hba -> clk_gating .state == CLKS_ON ) {
1827
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1828
- return ;
1824
+ scoped_guard (spinlock_irqsave , & hba -> clk_gating .lock ) {
1825
+ if (hba -> clk_gating .state == CLKS_ON )
1826
+ return ;
1829
1827
}
1830
1828
1831
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1832
1829
ufshcd_hba_vreg_set_hpm (hba );
1833
1830
ufshcd_setup_clocks (hba , true);
1834
1831
@@ -1863,7 +1860,7 @@ void ufshcd_hold(struct ufs_hba *hba)
1863
1860
if (!ufshcd_is_clkgating_allowed (hba ) ||
1864
1861
!hba -> clk_gating .is_initialized )
1865
1862
return ;
1866
- spin_lock_irqsave (hba -> host -> host_lock , flags );
1863
+ spin_lock_irqsave (& hba -> clk_gating . lock , flags );
1867
1864
hba -> clk_gating .active_reqs ++ ;
1868
1865
1869
1866
start :
@@ -1879,11 +1876,11 @@ void ufshcd_hold(struct ufs_hba *hba)
1879
1876
*/
1880
1877
if (ufshcd_can_hibern8_during_gating (hba ) &&
1881
1878
ufshcd_is_link_hibern8 (hba )) {
1882
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1879
+ spin_unlock_irqrestore (& hba -> clk_gating . lock , flags );
1883
1880
flush_result = flush_work (& hba -> clk_gating .ungate_work );
1884
1881
if (hba -> clk_gating .is_suspended && !flush_result )
1885
1882
return ;
1886
- spin_lock_irqsave (hba -> host -> host_lock , flags );
1883
+ spin_lock_irqsave (& hba -> clk_gating . lock , flags );
1887
1884
goto start ;
1888
1885
}
1889
1886
break ;
@@ -1912,48 +1909,50 @@ void ufshcd_hold(struct ufs_hba *hba)
1912
1909
*/
1913
1910
fallthrough ;
1914
1911
case REQ_CLKS_ON :
1915
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1912
+ spin_unlock_irqrestore (& hba -> clk_gating . lock , flags );
1916
1913
flush_work (& hba -> clk_gating .ungate_work );
1917
1914
/* Make sure state is CLKS_ON before returning */
1918
- spin_lock_irqsave (hba -> host -> host_lock , flags );
1915
+ spin_lock_irqsave (& hba -> clk_gating . lock , flags );
1919
1916
goto start ;
1920
1917
default :
1921
1918
dev_err (hba -> dev , "%s: clk gating is in invalid state %d\n" ,
1922
1919
__func__ , hba -> clk_gating .state );
1923
1920
break ;
1924
1921
}
1925
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1922
+ spin_unlock_irqrestore (& hba -> clk_gating . lock , flags );
1926
1923
}
1927
1924
EXPORT_SYMBOL_GPL (ufshcd_hold );
1928
1925
1929
1926
static void ufshcd_gate_work (struct work_struct * work )
1930
1927
{
1931
1928
struct ufs_hba * hba = container_of (work , struct ufs_hba ,
1932
1929
clk_gating .gate_work .work );
1933
- unsigned long flags ;
1934
1930
int ret ;
1935
1931
1936
- spin_lock_irqsave ( hba -> host -> host_lock , flags );
1937
- /*
1938
- * In case you are here to cancel this work the gating state
1939
- * would be marked as REQ_CLKS_ON. In this case save time by
1940
- * skipping the gating work and exit after changing the clock
1941
- * state to CLKS_ON.
1942
- */
1943
- if (hba -> clk_gating .is_suspended ||
1944
- ( hba -> clk_gating .state != REQ_CLKS_OFF ) ) {
1945
- hba -> clk_gating .state = CLKS_ON ;
1946
- trace_ufshcd_clk_gating (dev_name (hba -> dev ),
1947
- hba -> clk_gating .state );
1948
- goto rel_lock ;
1949
- }
1932
+ scoped_guard ( spinlock_irqsave , & hba -> clk_gating . lock ) {
1933
+ /*
1934
+ * In case you are here to cancel this work the gating state
1935
+ * would be marked as REQ_CLKS_ON. In this case save time by
1936
+ * skipping the gating work and exit after changing the clock
1937
+ * state to CLKS_ON.
1938
+ */
1939
+ if (hba -> clk_gating .is_suspended ||
1940
+ hba -> clk_gating .state != REQ_CLKS_OFF ) {
1941
+ hba -> clk_gating .state = CLKS_ON ;
1942
+ trace_ufshcd_clk_gating (dev_name (hba -> dev ),
1943
+ hba -> clk_gating .state );
1944
+ return ;
1945
+ }
1950
1946
1951
- if (ufshcd_is_ufs_dev_busy (hba ) ||
1952
- hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1953
- hba -> clk_gating .active_reqs )
1954
- goto rel_lock ;
1947
+ if (hba -> clk_gating .active_reqs )
1948
+ return ;
1949
+ }
1955
1950
1956
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1951
+ scoped_guard (spinlock_irqsave , hba -> host -> host_lock ) {
1952
+ if (ufshcd_is_ufs_dev_busy (hba ) ||
1953
+ hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL )
1954
+ return ;
1955
+ }
1957
1956
1958
1957
/* put the link into hibern8 mode before turning off clocks */
1959
1958
if (ufshcd_can_hibern8_during_gating (hba )) {
@@ -1964,7 +1963,7 @@ static void ufshcd_gate_work(struct work_struct *work)
1964
1963
__func__ , ret );
1965
1964
trace_ufshcd_clk_gating (dev_name (hba -> dev ),
1966
1965
hba -> clk_gating .state );
1967
- goto out ;
1966
+ return ;
1968
1967
}
1969
1968
ufshcd_set_link_hibern8 (hba );
1970
1969
}
@@ -1984,32 +1983,34 @@ static void ufshcd_gate_work(struct work_struct *work)
1984
1983
* prevent from doing cancel work multiple times when there are
1985
1984
* new requests arriving before the current cancel work is done.
1986
1985
*/
1987
- spin_lock_irqsave ( hba -> host -> host_lock , flags );
1986
+ guard ( spinlock_irqsave )( & hba -> clk_gating . lock );
1988
1987
if (hba -> clk_gating .state == REQ_CLKS_OFF ) {
1989
1988
hba -> clk_gating .state = CLKS_OFF ;
1990
1989
trace_ufshcd_clk_gating (dev_name (hba -> dev ),
1991
1990
hba -> clk_gating .state );
1992
1991
}
1993
- rel_lock :
1994
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
1995
- out :
1996
- return ;
1997
1992
}
1998
1993
1999
- /* host lock must be held before calling this variant */
2000
1994
static void __ufshcd_release (struct ufs_hba * hba )
2001
1995
{
1996
+ lockdep_assert_held (& hba -> clk_gating .lock );
1997
+
2002
1998
if (!ufshcd_is_clkgating_allowed (hba ))
2003
1999
return ;
2004
2000
2005
2001
hba -> clk_gating .active_reqs -- ;
2006
2002
2007
2003
if (hba -> clk_gating .active_reqs || hba -> clk_gating .is_suspended ||
2008
- hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
2009
- ufshcd_has_pending_tasks (hba ) || !hba -> clk_gating .is_initialized ||
2004
+ !hba -> clk_gating .is_initialized ||
2010
2005
hba -> clk_gating .state == CLKS_OFF )
2011
2006
return ;
2012
2007
2008
+ scoped_guard (spinlock_irqsave , hba -> host -> host_lock ) {
2009
+ if (ufshcd_has_pending_tasks (hba ) ||
2010
+ hba -> ufshcd_state != UFSHCD_STATE_OPERATIONAL )
2011
+ return ;
2012
+ }
2013
+
2013
2014
hba -> clk_gating .state = REQ_CLKS_OFF ;
2014
2015
trace_ufshcd_clk_gating (dev_name (hba -> dev ), hba -> clk_gating .state );
2015
2016
queue_delayed_work (hba -> clk_gating .clk_gating_workq ,
@@ -2019,11 +2020,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
2019
2020
2020
2021
void ufshcd_release (struct ufs_hba * hba )
2021
2022
{
2022
- unsigned long flags ;
2023
-
2024
- spin_lock_irqsave (hba -> host -> host_lock , flags );
2023
+ guard (spinlock_irqsave )(& hba -> clk_gating .lock );
2025
2024
__ufshcd_release (hba );
2026
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
2027
2025
}
2028
2026
EXPORT_SYMBOL_GPL (ufshcd_release );
2029
2027
@@ -2038,11 +2036,9 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
2038
2036
void ufshcd_clkgate_delay_set (struct device * dev , unsigned long value )
2039
2037
{
2040
2038
struct ufs_hba * hba = dev_get_drvdata (dev );
2041
- unsigned long flags ;
2042
2039
2043
- spin_lock_irqsave ( hba -> host -> host_lock , flags );
2040
+ guard ( spinlock_irqsave )( & hba -> clk_gating . lock );
2044
2041
hba -> clk_gating .delay_ms = value ;
2045
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
2046
2042
}
2047
2043
EXPORT_SYMBOL_GPL (ufshcd_clkgate_delay_set );
2048
2044
@@ -2070,26 +2066,25 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
2070
2066
struct device_attribute * attr , const char * buf , size_t count )
2071
2067
{
2072
2068
struct ufs_hba * hba = dev_get_drvdata (dev );
2073
- unsigned long flags ;
2074
2069
u32 value ;
2075
2070
2076
2071
if (kstrtou32 (buf , 0 , & value ))
2077
2072
return - EINVAL ;
2078
2073
2079
2074
value = !!value ;
2080
2075
2081
- spin_lock_irqsave (hba -> host -> host_lock , flags );
2076
+ guard (spinlock_irqsave )(& hba -> clk_gating .lock );
2077
+
2082
2078
if (value == hba -> clk_gating .is_enabled )
2083
- goto out ;
2079
+ return count ;
2084
2080
2085
2081
if (value )
2086
2082
__ufshcd_release (hba );
2087
2083
else
2088
2084
hba -> clk_gating .active_reqs ++ ;
2089
2085
2090
2086
hba -> clk_gating .is_enabled = value ;
2091
- out :
2092
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
2087
+
2093
2088
return count ;
2094
2089
}
2095
2090
@@ -2131,6 +2126,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2131
2126
INIT_DELAYED_WORK (& hba -> clk_gating .gate_work , ufshcd_gate_work );
2132
2127
INIT_WORK (& hba -> clk_gating .ungate_work , ufshcd_ungate_work );
2133
2128
2129
+ spin_lock_init (& hba -> clk_gating .lock );
2130
+
2134
2131
hba -> clk_gating .clk_gating_workq = alloc_ordered_workqueue (
2135
2132
"ufs_clk_gating_%d" , WQ_MEM_RECLAIM | WQ_HIGHPRI ,
2136
2133
hba -> host -> host_no );
@@ -9126,7 +9123,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9126
9123
int ret = 0 ;
9127
9124
struct ufs_clk_info * clki ;
9128
9125
struct list_head * head = & hba -> clk_list_head ;
9129
- unsigned long flags ;
9130
9126
ktime_t start = ktime_get ();
9131
9127
bool clk_state_changed = false;
9132
9128
@@ -9177,11 +9173,10 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9177
9173
clk_disable_unprepare (clki -> clk );
9178
9174
}
9179
9175
} else if (!ret && on ) {
9180
- spin_lock_irqsave ( hba -> host -> host_lock , flags );
9181
- hba -> clk_gating .state = CLKS_ON ;
9176
+ scoped_guard ( spinlock_irqsave , & hba -> clk_gating . lock )
9177
+ hba -> clk_gating .state = CLKS_ON ;
9182
9178
trace_ufshcd_clk_gating (dev_name (hba -> dev ),
9183
9179
hba -> clk_gating .state );
9184
- spin_unlock_irqrestore (hba -> host -> host_lock , flags );
9185
9180
}
9186
9181
9187
9182
if (clk_state_changed )
0 commit comments