|
63 | 63 | #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
64 | 64 | uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
65 | 65 | #endif
|
| 66 | + |
| 67 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 68 | + portSPINLOCK_TYPE xTaskSpinlock; |
| 69 | + portSPINLOCK_TYPE xISRSpinlock; |
| 70 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
66 | 71 | } EventGroup_t;
|
67 | 72 |
|
68 | 73 | /*-----------------------------------------------------------*/
|
69 | 74 |
|
| 75 | +/* |
| 76 | + * Suspends an event group. Prevents other tasks from accessing the queue but allows |
| 77 | + * ISRs to pend access to the queue. Caller cannot be preempted by other tasks |
| 78 | + * after suspending the event group, thus allowing the caller to execute non-deterministic |
| 79 | + * operations. |
| 80 | + */ |
| 81 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 82 | + static void prvSuspendEventGroup( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; |
| 83 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 84 | + |
| 85 | +/* |
| 86 | + * Resume an event group. Handles all pended access from ISRs, then reenables |
| 87 | + * preemption for the caller. |
| 88 | + */ |
| 89 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 90 | + static void prvResumeEventGroup( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; |
| 91 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 92 | + |
70 | 93 | /*
|
71 | 94 | * Test the bits set in uxCurrentEventBits to see if the wait condition is met.
|
72 | 95 | * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
|
|
79 | 102 | const EventBits_t uxBitsToWaitFor,
|
80 | 103 | const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
|
81 | 104 |
|
| 105 | +/*-----------------------------------------------------------*/ |
| 106 | + |
| 107 | +/* |
| 108 | + * Macro used to suspend and resume an event group. When a task suspends an, |
| 109 | + * event group. the task will can have thread safe non-deterministic access to |
| 110 | + * the event group. |
| 111 | + * - Concurrent access from tasks will be protected by the xTaskSpinlock |
| 112 | + * - Concurrent access from ISRs will be pended |
| 113 | + * |
| 114 | + * When the tasks resume the event group, all pended access attempts are handled. |
| 115 | + */ |
| 116 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 117 | + #define eventSUSPEND( pxEventBits ) prvSuspendEventGroup( pxEventBits ) |
| 118 | + #define eventRESUME( pxEventBits ) \ |
| 119 | + ( { \ |
| 120 | + prvResumeEventGroup( pxEventBits ); \ |
| 121 | + pdTRUE; \ |
| 122 | + } ) |
| 123 | + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 124 | + #define eventSUSPEND( pxEventBits ) vTaskSuspendAll() |
| 125 | + #define eventRESUME( pxEventBits ) xTaskResumeAll() |
| 126 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 127 | + |
82 | 128 | /*-----------------------------------------------------------*/
|
83 | 129 |
|
84 | 130 | #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
|
122 | 168 | }
|
123 | 169 | #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
124 | 170 |
|
| 171 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 172 | + { |
| 173 | + portINIT_EVENT_GROUP_TASK_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 174 | + portINIT_EVENT_GROUP_ISR_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); |
| 175 | + } |
| 176 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 177 | + |
125 | 178 | traceEVENT_GROUP_CREATE( pxEventBits );
|
126 | 179 | }
|
127 | 180 | else
|
|
167 | 220 | }
|
168 | 221 | #endif /* configSUPPORT_STATIC_ALLOCATION */
|
169 | 222 |
|
| 223 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 224 | + { |
| 225 | + portINIT_EVENT_GROUP_TASK_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 226 | + portINIT_EVENT_GROUP_ISR_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); |
| 227 | + } |
| 228 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 229 | + |
170 | 230 | traceEVENT_GROUP_CREATE( pxEventBits );
|
171 | 231 | }
|
172 | 232 | else
|
|
202 | 262 | }
|
203 | 263 | #endif
|
204 | 264 |
|
205 |
| - vTaskSuspendAll(); |
| 265 | + eventSUSPEND( pxEventBits ); |
206 | 266 | {
|
207 | 267 | uxOriginalBitValue = pxEventBits->uxEventBits;
|
208 | 268 |
|
|
245 | 305 | }
|
246 | 306 | }
|
247 | 307 | }
|
248 |
| - xAlreadyYielded = xTaskResumeAll(); |
| 308 | + xAlreadyYielded = eventRESUME( pxEventBits ); |
249 | 309 |
|
250 | 310 | if( xTicksToWait != ( TickType_t ) 0 )
|
251 | 311 | {
|
|
267 | 327 | if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
268 | 328 | {
|
269 | 329 | /* The task timed out, just return the current event bit value. */
|
270 |
| - taskENTER_CRITICAL(); |
| 330 | + taskLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ); |
271 | 331 | {
|
272 | 332 | uxReturn = pxEventBits->uxEventBits;
|
273 | 333 |
|
|
284 | 344 | mtCOVERAGE_TEST_MARKER();
|
285 | 345 | }
|
286 | 346 | }
|
287 |
| - taskEXIT_CRITICAL(); |
| 347 | + taskUNLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ); |
288 | 348 |
|
289 | 349 | xTimeoutOccurred = pdTRUE;
|
290 | 350 | }
|
|
333 | 393 | }
|
334 | 394 | #endif
|
335 | 395 |
|
336 |
| - vTaskSuspendAll(); |
| 396 | + eventSUSPEND( pxEventBits ); |
337 | 397 | {
|
338 | 398 | const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
339 | 399 |
|
|
401 | 461 | traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
402 | 462 | }
|
403 | 463 | }
|
404 |
| - xAlreadyYielded = xTaskResumeAll(); |
| 464 | + xAlreadyYielded = eventRESUME( pxEventBits ); |
405 | 465 |
|
406 | 466 | if( xTicksToWait != ( TickType_t ) 0 )
|
407 | 467 | {
|
|
422 | 482 |
|
423 | 483 | if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
424 | 484 | {
|
425 |
| - taskENTER_CRITICAL(); |
| 485 | + taskLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ); |
426 | 486 | {
|
427 | 487 | /* The task timed out, just return the current event bit value. */
|
428 | 488 | uxReturn = pxEventBits->uxEventBits;
|
|
447 | 507 |
|
448 | 508 | xTimeoutOccurred = pdTRUE;
|
449 | 509 | }
|
450 |
| - taskEXIT_CRITICAL(); |
| 510 | + taskUNLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ); |
451 | 511 | }
|
452 | 512 | else
|
453 | 513 | {
|
|
482 | 542 | configASSERT( xEventGroup );
|
483 | 543 | configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
484 | 544 |
|
485 |
| - taskENTER_CRITICAL(); |
| 545 | + taskLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ); |
486 | 546 | {
|
487 | 547 | traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
488 | 548 |
|
|
493 | 553 | /* Clear the bits. */
|
494 | 554 | pxEventBits->uxEventBits &= ~uxBitsToClear;
|
495 | 555 | }
|
496 |
| - taskEXIT_CRITICAL(); |
| 556 | + taskUNLOCK_DATA_GROUP( &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ); |
497 | 557 |
|
498 | 558 | traceRETURN_xEventGroupClearBits( uxReturn );
|
499 | 559 |
|
|
524 | 584 | EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
|
525 | 585 | {
|
526 | 586 | UBaseType_t uxSavedInterruptStatus;
|
527 |
| - EventGroup_t const * const pxEventBits = xEventGroup; |
| 587 | + EventGroup_t * const pxEventBits = xEventGroup; |
528 | 588 | EventBits_t uxReturn;
|
529 | 589 |
|
530 | 590 | traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
|
531 | 591 |
|
532 | 592 | /* MISRA Ref 4.7.1 [Return value shall be checked] */
|
533 | 593 | /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
534 | 594 | /* coverity[misra_c_2012_directive_4_7_violation] */
|
535 |
| - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); |
| 595 | + uxSavedInterruptStatus = taskLOCK_DATA_GROUP_FROM_ISR( &( pxEventBits->xISRSpinlock ) ); |
536 | 596 | {
|
537 | 597 | uxReturn = pxEventBits->uxEventBits;
|
538 | 598 | }
|
539 |
| - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); |
| 599 | + taskUNLOCK_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, &( pxEventBits->xISRSpinlock ) ); |
540 | 600 |
|
541 | 601 | traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
|
542 | 602 |
|
|
564 | 624 |
|
565 | 625 | pxList = &( pxEventBits->xTasksWaitingForBits );
|
566 | 626 | pxListEnd = listGET_END_MARKER( pxList );
|
567 |
| - vTaskSuspendAll(); |
| 627 | + eventSUSPEND( pxEventBits ); |
568 | 628 | {
|
569 | 629 | traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
570 | 630 |
|
| 631 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 632 | + |
| 633 | + /* We are about to access the kernel data group non-deterministically, |
| 634 | + * thus we suspend the kernel data group.*/ |
| 635 | + vTaskSuspendAll(); |
| 636 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 637 | + |
571 | 638 | pxListItem = listGET_HEAD_ENTRY( pxList );
|
572 | 639 |
|
573 | 640 | /* Set the bits. */
|
|
638 | 705 |
|
639 | 706 | /* Snapshot resulting bits. */
|
640 | 707 | uxReturnBits = pxEventBits->uxEventBits;
|
| 708 | + |
| 709 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 710 | + ( void ) xTaskResumeAll(); |
| 711 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
641 | 712 | }
|
642 |
| - ( void ) xTaskResumeAll(); |
| 713 | + ( void ) eventRESUME( pxEventBits ); |
643 | 714 |
|
644 | 715 | traceRETURN_xEventGroupSetBits( uxReturnBits );
|
645 | 716 |
|
|
658 | 729 |
|
659 | 730 | pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
660 | 731 |
|
661 |
| - vTaskSuspendAll(); |
| 732 | + eventSUSPEND( pxEventBits ); |
662 | 733 | {
|
663 | 734 | traceEVENT_GROUP_DELETE( xEventGroup );
|
664 | 735 |
|
| 736 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 737 | + |
| 738 | + /* We are about to access the kernel data group non-deterministically, |
| 739 | + * thus we suspend the kernel data group.*/ |
| 740 | + vTaskSuspendAll(); |
| 741 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 742 | + |
665 | 743 | while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
666 | 744 | {
|
667 | 745 | /* Unblock the task, returning 0 as the event list is being deleted
|
668 | 746 | * and cannot therefore have any bits set. */
|
669 | 747 | configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
670 | 748 | vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
671 | 749 | }
|
| 750 | + |
| 751 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 752 | + ( void ) xTaskResumeAll(); |
| 753 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
672 | 754 | }
|
673 |
| - ( void ) xTaskResumeAll(); |
| 755 | + ( void ) eventRESUME( pxEventBits ); |
674 | 756 |
|
675 | 757 | #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
676 | 758 | {
|
|
775 | 857 | }
|
776 | 858 | /*-----------------------------------------------------------*/
|
777 | 859 |
|
| 860 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 861 | + static void prvSuspendEventGroup( EventGroup_t * pxEventBits ) |
| 862 | + { |
| 863 | + /* Disable preempt so that current task cannot be preempted by another task */ |
| 864 | + vTaskPreemptionDisable( NULL ); |
| 865 | + |
| 866 | + portDISABLE_INTERRUPTS(); |
| 867 | + |
| 868 | + /* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing |
| 869 | + * the event group while it is suspended. */ |
| 870 | + portGET_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 871 | + |
| 872 | + portENABLE_INTERRUPTS(); |
| 873 | + } |
| 874 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 875 | +/*-----------------------------------------------------------*/ |
| 876 | + |
| 877 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 878 | + static void prvResumeEventGroup( EventGroup_t * pxEventBits ) |
| 879 | + { |
| 880 | + portDISABLE_INTERRUPTS(); |
| 881 | + |
| 882 | + /* Release the previously held task spinlock */ |
| 883 | + portRELEASE_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 884 | + |
| 885 | + portENABLE_INTERRUPTS(); |
| 886 | + |
| 887 | + /* Re-enable preemption so that current task cannot be preempted by other tasks */ |
| 888 | + vTaskPreemptionEnable( NULL ); |
| 889 | + } |
| 890 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 891 | +/*-----------------------------------------------------------*/ |
| 892 | + |
778 | 893 | static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
779 | 894 | const EventBits_t uxBitsToWaitFor,
|
780 | 895 | const BaseType_t xWaitForAllBits )
|
|
0 commit comments