|
63 | 63 | #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
64 | 64 | uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
65 | 65 | #endif
|
| 66 | + |
| 67 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 68 | + portSPINLOCK_TYPE xTaskSpinlock; |
| 69 | + portSPINLOCK_TYPE xISRSpinlock; |
| 70 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
66 | 71 | } EventGroup_t;
|
67 | 72 |
|
68 | 73 | /*-----------------------------------------------------------*/
|
69 | 74 |
|
| 75 | +/* |
| 76 | + * Macro to mark the start of a critical code region. |
| 77 | + */ |
| 78 | + #if ( portUSING_GRANULAR_LOCKS == 1 ) |
| 79 | + #define egENTER_CRITICAL( pxEventBits ) portLOCK_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ) |
| 80 | + #define egENTER_CRITICAL_FROM_ISR( pxEventBits ) portLOCK_DATA_GROUP_FROM_ISR( ( portSPINLOCK_TYPE * ) &( pxEventBits->xISRSpinlock ) ) |
| 81 | + #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ |
| 82 | + #define egENTER_CRITICAL( pxEventBits ) do { ( void ) pxEventBits; taskENTER_CRITICAL(); } while( 0 ) |
| 83 | + #define egENTER_CRITICAL_FROM_ISR( pxEventBits ) do { ( void ) pxEventBits; taskENTER_CRITICAL_FROM_ISR(); } while( 0 ) |
| 84 | + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ |
| 85 | + |
| 86 | +/* |
| 87 | + * Macro to mark the end of a critical code region. |
| 88 | + */ |
| 89 | + #if ( portUSING_GRANULAR_LOCKS == 1 ) |
| 90 | + #define egEXIT_CRITICAL( pxEventBits ) portUNLOCK_DATA_GROUP( ( portSPINLOCK_TYPE * ) &( pxEventBits->xTaskSpinlock ), &( pxEventBits->xISRSpinlock ) ) |
| 91 | + #define egEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) portUNLOCK_DATA_GROUP_FROM_ISR( uxSavedInterruptStatus, ( portSPINLOCK_TYPE * ) &( pxEventBits->xISRSpinlock ) ) |
| 92 | + #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ |
| 93 | + #define egEXIT_CRITICAL( pxEventBits ) do { ( void ) pxEventBits; taskEXIT_CRITICAL(); } while( 0 ) |
| 94 | + #define egEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) do { ( void ) pxEventBits; taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); } while( 0 ) |
| 95 | + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ |
| 96 | + |
| 97 | +/* |
| 98 | + * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows |
| 99 | + * ISRs to pend access to the event group. Caller cannot be preempted by other tasks |
| 100 | + * after locking the event group, thus allowing the caller to execute non-deterministic |
| 101 | + * operations. |
| 102 | + */ |
| 103 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 104 | + static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; |
| 105 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 106 | + |
| 107 | +/* |
| 108 | + * Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables |
| 109 | + * preemption for the caller. |
| 110 | + */ |
| 111 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 112 | + static void prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; |
| 113 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 114 | + |
70 | 115 | /*
|
71 | 116 | * Test the bits set in uxCurrentEventBits to see if the wait condition is met.
|
72 | 117 | * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
|
|
79 | 124 | const EventBits_t uxBitsToWaitFor,
|
80 | 125 | const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
|
81 | 126 |
|
| 127 | +/*-----------------------------------------------------------*/ |
| 128 | + |
| 129 | +/* |
| 130 | + * Macro used to lock and unlock an event group. When a task lockss an, |
| 131 | + * event group, the task will have thread safe non-deterministic access to |
| 132 | + * the event group. |
| 133 | + * - Concurrent access from other tasks will be blocked by the xTaskSpinlock |
| 134 | + * - Concurrent access from ISRs will be pended |
| 135 | + * |
| 136 | + * When the task unlocks the event group, all pended access attempts are handled. |
| 137 | + */ |
| 138 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 139 | + #define egLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits ) |
| 140 | + #define egUNLOCK( pxEventBits ) \ |
| 141 | + ( { \ |
| 142 | + prvUnlockEventGroupForTasks( pxEventBits ); \ |
| 143 | + pdTRUE; \ |
| 144 | + } ) |
| 145 | + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 146 | + #define egLOCK( pxEventBits ) vTaskSuspendAll() |
| 147 | + #define egUNLOCK( pxEventBits ) xTaskResumeAll() |
| 148 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 149 | + |
82 | 150 | /*-----------------------------------------------------------*/
|
83 | 151 |
|
84 | 152 | #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
|
122 | 190 | }
|
123 | 191 | #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
124 | 192 |
|
| 193 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 194 | + { |
| 195 | + portINIT_EVENT_GROUP_TASK_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 196 | + portINIT_EVENT_GROUP_ISR_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); |
| 197 | + } |
| 198 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 199 | + |
125 | 200 | traceEVENT_GROUP_CREATE( pxEventBits );
|
126 | 201 | }
|
127 | 202 | else
|
|
167 | 242 | }
|
168 | 243 | #endif /* configSUPPORT_STATIC_ALLOCATION */
|
169 | 244 |
|
| 245 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 246 | + { |
| 247 | + portINIT_EVENT_GROUP_TASK_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 248 | + portINIT_EVENT_GROUP_ISR_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); |
| 249 | + } |
| 250 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 251 | + |
170 | 252 | traceEVENT_GROUP_CREATE( pxEventBits );
|
171 | 253 | }
|
172 | 254 | else
|
|
202 | 284 | }
|
203 | 285 | #endif
|
204 | 286 |
|
205 |
| - vTaskSuspendAll(); |
| 287 | + egLOCK( pxEventBits ); |
206 | 288 | {
|
207 | 289 | uxOriginalBitValue = pxEventBits->uxEventBits;
|
208 | 290 |
|
|
245 | 327 | }
|
246 | 328 | }
|
247 | 329 | }
|
248 |
| - xAlreadyYielded = xTaskResumeAll(); |
| 330 | + xAlreadyYielded = egUNLOCK( pxEventBits ); |
249 | 331 |
|
250 | 332 | if( xTicksToWait != ( TickType_t ) 0 )
|
251 | 333 | {
|
|
267 | 349 | if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
268 | 350 | {
|
269 | 351 | /* The task timed out, just return the current event bit value. */
|
270 |
| - taskENTER_CRITICAL(); |
| 352 | + egENTER_CRITICAL( pxEventBits ); |
271 | 353 | {
|
272 | 354 | uxReturn = pxEventBits->uxEventBits;
|
273 | 355 |
|
|
284 | 366 | mtCOVERAGE_TEST_MARKER();
|
285 | 367 | }
|
286 | 368 | }
|
287 |
| - taskEXIT_CRITICAL(); |
| 369 | + egEXIT_CRITICAL( pxEventBits ); |
288 | 370 |
|
289 | 371 | xTimeoutOccurred = pdTRUE;
|
290 | 372 | }
|
|
333 | 415 | }
|
334 | 416 | #endif
|
335 | 417 |
|
336 |
| - vTaskSuspendAll(); |
| 418 | + egLOCK( pxEventBits ); |
337 | 419 | {
|
338 | 420 | const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
339 | 421 |
|
|
401 | 483 | traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
402 | 484 | }
|
403 | 485 | }
|
404 |
| - xAlreadyYielded = xTaskResumeAll(); |
| 486 | + xAlreadyYielded = egUNLOCK( pxEventBits ); |
405 | 487 |
|
406 | 488 | if( xTicksToWait != ( TickType_t ) 0 )
|
407 | 489 | {
|
|
422 | 504 |
|
423 | 505 | if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
424 | 506 | {
|
425 |
| - taskENTER_CRITICAL(); |
| 507 | + egENTER_CRITICAL( pxEventBits ); |
426 | 508 | {
|
427 | 509 | /* The task timed out, just return the current event bit value. */
|
428 | 510 | uxReturn = pxEventBits->uxEventBits;
|
|
447 | 529 |
|
448 | 530 | xTimeoutOccurred = pdTRUE;
|
449 | 531 | }
|
450 |
| - taskEXIT_CRITICAL(); |
| 532 | + egEXIT_CRITICAL( pxEventBits ); |
451 | 533 | }
|
452 | 534 | else
|
453 | 535 | {
|
|
482 | 564 | configASSERT( xEventGroup );
|
483 | 565 | configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
484 | 566 |
|
485 |
| - taskENTER_CRITICAL(); |
| 567 | + egENTER_CRITICAL( pxEventBits ); |
486 | 568 | {
|
487 | 569 | traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
488 | 570 |
|
|
493 | 575 | /* Clear the bits. */
|
494 | 576 | pxEventBits->uxEventBits &= ~uxBitsToClear;
|
495 | 577 | }
|
496 |
| - taskEXIT_CRITICAL(); |
| 578 | + egEXIT_CRITICAL( pxEventBits ); |
497 | 579 |
|
498 | 580 | traceRETURN_xEventGroupClearBits( uxReturn );
|
499 | 581 |
|
|
524 | 606 | EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
|
525 | 607 | {
|
526 | 608 | UBaseType_t uxSavedInterruptStatus;
|
527 |
| - EventGroup_t const * const pxEventBits = xEventGroup; |
| 609 | + EventGroup_t * const pxEventBits = xEventGroup; |
528 | 610 | EventBits_t uxReturn;
|
529 | 611 |
|
530 | 612 | traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
|
531 | 613 |
|
532 | 614 | /* MISRA Ref 4.7.1 [Return value shall be checked] */
|
533 | 615 | /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
534 | 616 | /* coverity[misra_c_2012_directive_4_7_violation] */
|
535 |
| - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); |
| 617 | + uxSavedInterruptStatus = egENTER_CRITICAL_FROM_ISR( pxEventBits ); |
536 | 618 | {
|
537 | 619 | uxReturn = pxEventBits->uxEventBits;
|
538 | 620 | }
|
539 |
| - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); |
| 621 | + egEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ); |
540 | 622 |
|
541 | 623 | traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
|
542 | 624 |
|
|
564 | 646 |
|
565 | 647 | pxList = &( pxEventBits->xTasksWaitingForBits );
|
566 | 648 | pxListEnd = listGET_END_MARKER( pxList );
|
567 |
| - vTaskSuspendAll(); |
| 649 | + egLOCK( pxEventBits ); |
568 | 650 | {
|
569 | 651 | traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
570 | 652 |
|
| 653 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 654 | + |
| 655 | + /* We are about to access the kernel data group non-deterministically, |
| 656 | + * thus we suspend the kernel data group.*/ |
| 657 | + vTaskSuspendAll(); |
| 658 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 659 | + |
571 | 660 | pxListItem = listGET_HEAD_ENTRY( pxList );
|
572 | 661 |
|
573 | 662 | /* Set the bits. */
|
|
638 | 727 |
|
639 | 728 | /* Snapshot resulting bits. */
|
640 | 729 | uxReturnBits = pxEventBits->uxEventBits;
|
| 730 | + |
| 731 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 732 | + ( void ) xTaskResumeAll(); |
| 733 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
641 | 734 | }
|
642 |
| - ( void ) xTaskResumeAll(); |
| 735 | + ( void ) egUNLOCK( pxEventBits ); |
643 | 736 |
|
644 | 737 | traceRETURN_xEventGroupSetBits( uxReturnBits );
|
645 | 738 |
|
|
658 | 751 |
|
659 | 752 | pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
660 | 753 |
|
661 |
| - vTaskSuspendAll(); |
| 754 | + egLOCK( pxEventBits ); |
662 | 755 | {
|
663 | 756 | traceEVENT_GROUP_DELETE( xEventGroup );
|
664 | 757 |
|
| 758 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 759 | + |
| 760 | + /* We are about to access the kernel data group non-deterministically, |
| 761 | + * thus we suspend the kernel data group.*/ |
| 762 | + vTaskSuspendAll(); |
| 763 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 764 | + |
665 | 765 | while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
666 | 766 | {
|
667 | 767 | /* Unblock the task, returning 0 as the event list is being deleted
|
668 | 768 | * and cannot therefore have any bits set. */
|
669 | 769 | configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
670 | 770 | vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
671 | 771 | }
|
| 772 | + |
| 773 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 774 | + ( void ) xTaskResumeAll(); |
| 775 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
672 | 776 | }
|
673 |
| - ( void ) xTaskResumeAll(); |
| 777 | + ( void ) egUNLOCK( pxEventBits ); |
674 | 778 |
|
675 | 779 | #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
676 | 780 | {
|
|
775 | 879 | }
|
776 | 880 | /*-----------------------------------------------------------*/
|
777 | 881 |
|
| 882 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 883 | + static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) |
| 884 | + { |
| 885 | + /* Disable preempt so that current task cannot be preempted by another task */ |
| 886 | + vTaskPreemptionDisable( NULL ); |
| 887 | + |
| 888 | + portDISABLE_INTERRUPTS(); |
| 889 | + |
| 890 | + /* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing |
| 891 | + * the event group while it is suspended. */ |
| 892 | + portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); |
| 893 | + |
| 894 | + portENABLE_INTERRUPTS(); |
| 895 | + } |
| 896 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 897 | +/*-----------------------------------------------------------*/ |
| 898 | + |
| 899 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 900 | + static void prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) |
| 901 | + { |
| 902 | + portDISABLE_INTERRUPTS(); |
| 903 | + |
| 904 | + /* Release the previously held task spinlock */ |
| 905 | + portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); |
| 906 | + |
| 907 | + portENABLE_INTERRUPTS(); |
| 908 | + |
| 909 | + /* Re-enable preemption so that current task cannot be preempted by other tasks */ |
| 910 | + vTaskPreemptionEnable( NULL ); |
| 911 | + } |
| 912 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 913 | +/*-----------------------------------------------------------*/ |
| 914 | + |
778 | 915 | static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
779 | 916 | const EventBits_t uxBitsToWaitFor,
|
780 | 917 | const BaseType_t xWaitForAllBits )
|
|
0 commit comments