Skip to content

Commit 40fab1f

Browse files
change(freertos/smp): Update event_groups.c locking
Updated event_groups.c to use granular locking - Added xTaskSpinlock and xISRSpinlock - Replaced critical section macros with data group locking macros such as taskENTER/EXIT_CRITICAL() with taskLOCK/UNLOCK_DATA_GROUP(). - Added prvSuspendEventGroup() and prvResumeEventGroup() to suspend the event group when executing non-deterministic code. - xEventGroupSetBits() and vEventGroupDelete() accesses the kernel data group directly. Thus, added vTaskSuspendAll()/xTaskResumeAll() to these fucntions. Co-authored-by: Sudeep Mohanty <[email protected]>
1 parent 7cbf829 commit 40fab1f

File tree

2 files changed

+136
-17
lines changed

2 files changed

+136
-17
lines changed

event_groups.c

Lines changed: 132 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,33 @@
6363
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
6464
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
6565
#endif
66+
67+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
68+
portSPINLOCK_TYPE xTaskSpinlock;
69+
portSPINLOCK_TYPE xISRSpinlock;
70+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
6671
} EventGroup_t;
6772

6873
/*-----------------------------------------------------------*/
6974

75+
/*
76+
* Suspends an event group. Prevents other tasks from accessing the queue but allows
77+
* ISRs to pend access to the queue. Caller cannot be preempted by other tasks
78+
* after suspending the event group, thus allowing the caller to execute non-deterministic
79+
* operations.
80+
*/
81+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
82+
static void prvSuspendEventGroup( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
83+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
84+
85+
/*
86+
* Resume an event group. Handles all pended access from ISRs, then reenables
87+
* preemption for the caller.
88+
*/
89+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
90+
static void prvResumeEventGroup( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
91+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
92+
7093
/*
7194
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
7295
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
@@ -79,6 +102,29 @@
79102
const EventBits_t uxBitsToWaitFor,
80103
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
81104

105+
/*-----------------------------------------------------------*/
106+
107+
/*
108+
* Macro used to suspend and resume an event group. When a task suspends an,
109+
* event group. the task will can have thread safe non-deterministic access to
110+
* the event group.
111+
* - Concurrent access from tasks will be protected by the xTaskSpinlock
112+
* - Concurrent access from ISRs will be pended
113+
*
114+
* When the tasks resume the event group, all pended access attempts are handled.
115+
*/
116+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
117+
#define eventSUSPEND( pxEventBits ) prvSuspendEventGroup( pxEventBits )
118+
#define eventRESUME( pxEventBits ) \
119+
( { \
120+
prvResumeEventGroup( pxEventBits ); \
121+
pdTRUE; \
122+
} )
123+
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
124+
#define eventSUSPEND( pxEventBits ) vTaskSuspendAll()
125+
#define eventRESUME( pxEventBits ) xTaskResumeAll()
126+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
127+
82128
/*-----------------------------------------------------------*/
83129

84130
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
@@ -122,6 +168,13 @@
122168
}
123169
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
124170

171+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
172+
{
173+
portINIT_EVENT_GROUP_TASK_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
174+
portINIT_EVENT_GROUP_ISR_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
175+
}
176+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
177+
125178
traceEVENT_GROUP_CREATE( pxEventBits );
126179
}
127180
else
@@ -167,6 +220,13 @@
167220
}
168221
#endif /* configSUPPORT_STATIC_ALLOCATION */
169222

223+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
224+
{
225+
portINIT_EVENT_GROUP_TASK_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
226+
portINIT_EVENT_GROUP_ISR_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
227+
}
228+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
229+
170230
traceEVENT_GROUP_CREATE( pxEventBits );
171231
}
172232
else
@@ -202,7 +262,7 @@
202262
}
203263
#endif
204264

205-
vTaskSuspendAll();
265+
eventSUSPEND( pxEventBits );
206266
{
207267
uxOriginalBitValue = pxEventBits->uxEventBits;
208268

@@ -245,7 +305,7 @@
245305
}
246306
}
247307
}
248-
xAlreadyYielded = xTaskResumeAll();
308+
xAlreadyYielded = eventRESUME( pxEventBits );
249309

250310
if( xTicksToWait != ( TickType_t ) 0 )
251311
{
@@ -267,7 +327,7 @@
267327
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
268328
{
269329
/* The task timed out, just return the current event bit value. */
270-
taskENTER_CRITICAL();
330+
taskLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) );
271331
{
272332
uxReturn = pxEventBits->uxEventBits;
273333

@@ -284,7 +344,7 @@
284344
mtCOVERAGE_TEST_MARKER();
285345
}
286346
}
287-
taskEXIT_CRITICAL();
347+
taskUNLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) );
288348

289349
xTimeoutOccurred = pdTRUE;
290350
}
@@ -333,7 +393,7 @@
333393
}
334394
#endif
335395

336-
vTaskSuspendAll();
396+
eventSUSPEND( pxEventBits );
337397
{
338398
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
339399

@@ -401,7 +461,7 @@
401461
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
402462
}
403463
}
404-
xAlreadyYielded = xTaskResumeAll();
464+
xAlreadyYielded = eventRESUME( pxEventBits );
405465

406466
if( xTicksToWait != ( TickType_t ) 0 )
407467
{
@@ -422,7 +482,7 @@
422482

423483
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
424484
{
425-
taskENTER_CRITICAL();
485+
taskLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) );
426486
{
427487
/* The task timed out, just return the current event bit value. */
428488
uxReturn = pxEventBits->uxEventBits;
@@ -447,7 +507,7 @@
447507

448508
xTimeoutOccurred = pdTRUE;
449509
}
450-
taskEXIT_CRITICAL();
510+
taskUNLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) );
451511
}
452512
else
453513
{
@@ -482,7 +542,7 @@
482542
configASSERT( xEventGroup );
483543
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
484544

485-
taskENTER_CRITICAL();
545+
taskLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) );
486546
{
487547
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
488548

@@ -493,7 +553,7 @@
493553
/* Clear the bits. */
494554
pxEventBits->uxEventBits &= ~uxBitsToClear;
495555
}
496-
taskEXIT_CRITICAL();
556+
taskUNLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) );
497557

498558
traceRETURN_xEventGroupClearBits( uxReturn );
499559

@@ -524,19 +584,19 @@
524584
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
525585
{
526586
UBaseType_t uxSavedInterruptStatus;
527-
EventGroup_t const * const pxEventBits = xEventGroup;
587+
EventGroup_t * const pxEventBits = xEventGroup;
528588
EventBits_t uxReturn;
529589

530590
traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
531591

532592
/* MISRA Ref 4.7.1 [Return value shall be checked] */
533593
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
534594
/* coverity[misra_c_2012_directive_4_7_violation] */
535-
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
595+
uxSavedInterruptStatus = taskLOCK_DATA_GROUP_FROM_ISR( &( pxEventBits->xISRSpinlock ) );
536596
{
537597
uxReturn = pxEventBits->uxEventBits;
538598
}
539-
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
599+
taskUNLOCK_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, &( pxEventBits->xISRSpinlock ) );
540600

541601
traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
542602

@@ -564,10 +624,17 @@
564624

565625
pxList = &( pxEventBits->xTasksWaitingForBits );
566626
pxListEnd = listGET_END_MARKER( pxList );
567-
vTaskSuspendAll();
627+
eventSUSPEND( pxEventBits );
568628
{
569629
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
570630

631+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
632+
633+
/* We are about to access the kernel data group non-deterministically,
634+
* thus we suspend the kernel data group.*/
635+
vTaskSuspendAll();
636+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
637+
571638
pxListItem = listGET_HEAD_ENTRY( pxList );
572639

573640
/* Set the bits. */
@@ -638,8 +705,12 @@
638705

639706
/* Snapshot resulting bits. */
640707
uxReturnBits = pxEventBits->uxEventBits;
708+
709+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
710+
( void ) xTaskResumeAll();
711+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
641712
}
642-
( void ) xTaskResumeAll();
713+
( void ) eventRESUME( pxEventBits );
643714

644715
traceRETURN_xEventGroupSetBits( uxReturnBits );
645716

@@ -658,19 +729,30 @@
658729

659730
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
660731

661-
vTaskSuspendAll();
732+
eventSUSPEND( pxEventBits );
662733
{
663734
traceEVENT_GROUP_DELETE( xEventGroup );
664735

736+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
737+
738+
/* We are about to access the kernel data group non-deterministically,
739+
* thus we suspend the kernel data group.*/
740+
vTaskSuspendAll();
741+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
742+
665743
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
666744
{
667745
/* Unblock the task, returning 0 as the event list is being deleted
668746
* and cannot therefore have any bits set. */
669747
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
670748
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
671749
}
750+
751+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
752+
( void ) xTaskResumeAll();
753+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
672754
}
673-
( void ) xTaskResumeAll();
755+
( void ) eventRESUME( pxEventBits );
674756

675757
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
676758
{
@@ -775,6 +857,39 @@
775857
}
776858
/*-----------------------------------------------------------*/
777859

860+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
861+
static void prvSuspendEventGroup( EventGroup_t * pxEventBits )
862+
{
863+
/* Disable preempt so that current task cannot be preempted by another task */
864+
vTaskPreemptionDisable( NULL );
865+
866+
portDISABLE_INTERRUPTS();
867+
868+
/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
869+
* the event group while it is suspended. */
870+
portGET_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
871+
872+
portENABLE_INTERRUPTS();
873+
}
874+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
875+
/*-----------------------------------------------------------*/
876+
877+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
878+
static void prvResumeEventGroup( EventGroup_t * pxEventBits )
879+
{
880+
portDISABLE_INTERRUPTS();
881+
882+
/* Release the previously held task spinlock */
883+
portRELEASE_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
884+
885+
portENABLE_INTERRUPTS();
886+
887+
/* Re-enable preemption so that current task cannot be preempted by other tasks */
888+
vTaskPreemptionEnable( NULL );
889+
}
890+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
891+
/*-----------------------------------------------------------*/
892+
778893
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
779894
const EventBits_t uxBitsToWaitFor,
780895
const BaseType_t xWaitForAllBits )

include/FreeRTOS.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3429,6 +3429,10 @@ typedef struct xSTATIC_EVENT_GROUP
34293429
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
34303430
uint8_t ucDummy4;
34313431
#endif
3432+
3433+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
3434+
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
3435+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
34323436
} StaticEventGroup_t;
34333437

34343438
/*

0 commit comments

Comments
 (0)