|
63 | 63 | #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
|
64 | 64 | uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
|
65 | 65 | #endif
|
| 66 | + |
| 67 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 68 | + portSPINLOCK_TYPE xTaskSpinlock; |
| 69 | + portSPINLOCK_TYPE xISRSpinlock; |
| 70 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
66 | 71 | } EventGroup_t;
|
67 | 72 |
|
68 | 73 | /*-----------------------------------------------------------*/
|
69 | 74 |
|
| 75 | +/* |
| 76 | + * Macros to mark the start and end of a critical code region. |
| 77 | + */ |
| 78 | + #if ( portUSING_GRANULAR_LOCKS == 1 ) |
| 79 | + #define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( pxEventBits ) |
| 80 | + #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxEventBits ) |
| 81 | + #define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( pxEventBits ) |
| 82 | + #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) |
| 83 | + #else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ |
| 84 | + #define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL(); |
| 85 | + #define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR(); |
| 86 | + #define event_groupsEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL(); |
| 87 | + #define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); |
| 88 | + #endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */ |
| 89 | + |
| 90 | +/* |
| 91 | + * Locks an event group for tasks. Prevents other tasks from accessing the event group but allows |
| 92 | + * ISRs to pend access to the event group. Caller cannot be preempted by other tasks |
| 93 | + * after locking the event group, thus allowing the caller to execute non-deterministic |
| 94 | + * operations. |
| 95 | + */ |
| 96 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 97 | + static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; |
| 98 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 99 | + |
| 100 | +/* |
| 101 | + * Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables |
| 102 | + * preemption for the caller. |
| 103 | + */ |
| 104 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 105 | + static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION; |
| 106 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 107 | + |
70 | 108 | /*
|
71 | 109 | * Test the bits set in uxCurrentEventBits to see if the wait condition is met.
|
72 | 110 | * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
|
|
79 | 117 | const EventBits_t uxBitsToWaitFor,
|
80 | 118 | const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
|
81 | 119 |
|
| 120 | +/*-----------------------------------------------------------*/ |
| 121 | + |
| 122 | +/* |
| 123 | + * Macros used to lock and unlock an event group. When a task locks an, |
| 124 | + * event group, the task will have thread safe non-deterministic access to |
| 125 | + * the event group. |
| 126 | + * - Concurrent access from other tasks will be blocked by the xTaskSpinlock |
| 127 | + * - Concurrent access from ISRs will be pended |
| 128 | + * |
| 129 | + * When the task unlocks the event group, all pended access attempts are handled. |
| 130 | + */ |
| 131 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 132 | + #define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits ) |
| 133 | + #define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits ); |
| 134 | + #else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 135 | + #define event_groupsLOCK( pxEventBits ) vTaskSuspendAll() |
| 136 | + #define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll() |
| 137 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 138 | + |
82 | 139 | /*-----------------------------------------------------------*/
|
83 | 140 |
|
84 | 141 | #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
|
122 | 179 | }
|
123 | 180 | #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
|
124 | 181 |
|
| 182 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 183 | + { |
| 184 | + portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 185 | + portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); |
| 186 | + } |
| 187 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 188 | + |
125 | 189 | traceEVENT_GROUP_CREATE( pxEventBits );
|
126 | 190 | }
|
127 | 191 | else
|
|
167 | 231 | }
|
168 | 232 | #endif /* configSUPPORT_STATIC_ALLOCATION */
|
169 | 233 |
|
| 234 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 235 | + { |
| 236 | + portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) ); |
| 237 | + portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) ); |
| 238 | + } |
| 239 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 240 | + |
170 | 241 | traceEVENT_GROUP_CREATE( pxEventBits );
|
171 | 242 | }
|
172 | 243 | else
|
|
202 | 273 | }
|
203 | 274 | #endif
|
204 | 275 |
|
205 |
| - vTaskSuspendAll(); |
| 276 | + event_groupsLOCK( pxEventBits ); |
206 | 277 | {
|
207 | 278 | uxOriginalBitValue = pxEventBits->uxEventBits;
|
208 | 279 |
|
|
245 | 316 | }
|
246 | 317 | }
|
247 | 318 | }
|
248 |
| - xAlreadyYielded = xTaskResumeAll(); |
| 319 | + xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); |
249 | 320 |
|
250 | 321 | if( xTicksToWait != ( TickType_t ) 0 )
|
251 | 322 | {
|
|
267 | 338 | if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
268 | 339 | {
|
269 | 340 | /* The task timed out, just return the current event bit value. */
|
270 |
| - taskENTER_CRITICAL(); |
| 341 | + event_groupsENTER_CRITICAL( pxEventBits ); |
271 | 342 | {
|
272 | 343 | uxReturn = pxEventBits->uxEventBits;
|
273 | 344 |
|
|
284 | 355 | mtCOVERAGE_TEST_MARKER();
|
285 | 356 | }
|
286 | 357 | }
|
287 |
| - taskEXIT_CRITICAL(); |
| 358 | + event_groupsEXIT_CRITICAL( pxEventBits ); |
288 | 359 |
|
289 | 360 | xTimeoutOccurred = pdTRUE;
|
290 | 361 | }
|
|
333 | 404 | }
|
334 | 405 | #endif
|
335 | 406 |
|
336 |
| - vTaskSuspendAll(); |
| 407 | + event_groupsLOCK( pxEventBits ); |
337 | 408 | {
|
338 | 409 | const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
|
339 | 410 |
|
|
401 | 472 | traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
|
402 | 473 | }
|
403 | 474 | }
|
404 |
| - xAlreadyYielded = xTaskResumeAll(); |
| 475 | + xAlreadyYielded = event_groupsUNLOCK( pxEventBits ); |
405 | 476 |
|
406 | 477 | if( xTicksToWait != ( TickType_t ) 0 )
|
407 | 478 | {
|
|
422 | 493 |
|
423 | 494 | if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
|
424 | 495 | {
|
425 |
| - taskENTER_CRITICAL(); |
| 496 | + event_groupsENTER_CRITICAL( pxEventBits ); |
426 | 497 | {
|
427 | 498 | /* The task timed out, just return the current event bit value. */
|
428 | 499 | uxReturn = pxEventBits->uxEventBits;
|
|
447 | 518 |
|
448 | 519 | xTimeoutOccurred = pdTRUE;
|
449 | 520 | }
|
450 |
| - taskEXIT_CRITICAL(); |
| 521 | + event_groupsEXIT_CRITICAL( pxEventBits ); |
451 | 522 | }
|
452 | 523 | else
|
453 | 524 | {
|
|
482 | 553 | configASSERT( xEventGroup );
|
483 | 554 | configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
|
484 | 555 |
|
485 |
| - taskENTER_CRITICAL(); |
| 556 | + event_groupsENTER_CRITICAL( pxEventBits ); |
486 | 557 | {
|
487 | 558 | traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
|
488 | 559 |
|
|
493 | 564 | /* Clear the bits. */
|
494 | 565 | pxEventBits->uxEventBits &= ~uxBitsToClear;
|
495 | 566 | }
|
496 |
| - taskEXIT_CRITICAL(); |
| 567 | + event_groupsEXIT_CRITICAL( pxEventBits ); |
497 | 568 |
|
498 | 569 | traceRETURN_xEventGroupClearBits( uxReturn );
|
499 | 570 |
|
|
524 | 595 | EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
|
525 | 596 | {
|
526 | 597 | UBaseType_t uxSavedInterruptStatus;
|
527 |
| - EventGroup_t const * const pxEventBits = xEventGroup; |
| 598 | + EventGroup_t * const pxEventBits = xEventGroup; |
528 | 599 | EventBits_t uxReturn;
|
529 | 600 |
|
530 | 601 | traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
|
531 | 602 |
|
532 | 603 | /* MISRA Ref 4.7.1 [Return value shall be checked] */
|
533 | 604 | /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
|
534 | 605 | /* coverity[misra_c_2012_directive_4_7_violation] */
|
535 |
| - uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR(); |
| 606 | + uxSavedInterruptStatus = event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ); |
536 | 607 | {
|
537 | 608 | uxReturn = pxEventBits->uxEventBits;
|
538 | 609 | }
|
539 |
| - taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); |
| 610 | + event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ); |
540 | 611 |
|
541 | 612 | traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
|
542 | 613 |
|
|
564 | 635 |
|
565 | 636 | pxList = &( pxEventBits->xTasksWaitingForBits );
|
566 | 637 | pxListEnd = listGET_END_MARKER( pxList );
|
567 |
| - vTaskSuspendAll(); |
| 638 | + event_groupsLOCK( pxEventBits ); |
568 | 639 | {
|
569 | 640 | traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
570 | 641 |
|
| 642 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 643 | + |
| 644 | + /* We are about to access the kernel data group non-deterministically, |
| 645 | + * thus we suspend the kernel data group.*/ |
| 646 | + vTaskSuspendAll(); |
| 647 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 648 | + |
571 | 649 | pxListItem = listGET_HEAD_ENTRY( pxList );
|
572 | 650 |
|
573 | 651 | /* Set the bits. */
|
|
638 | 716 |
|
639 | 717 | /* Snapshot resulting bits. */
|
640 | 718 | uxReturnBits = pxEventBits->uxEventBits;
|
| 719 | + |
| 720 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 721 | + ( void ) xTaskResumeAll(); |
| 722 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
641 | 723 | }
|
642 |
| - ( void ) xTaskResumeAll(); |
| 724 | + ( void ) event_groupsUNLOCK( pxEventBits ); |
643 | 725 |
|
644 | 726 | traceRETURN_xEventGroupSetBits( uxReturnBits );
|
645 | 727 |
|
|
658 | 740 |
|
659 | 741 | pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
660 | 742 |
|
661 |
| - vTaskSuspendAll(); |
| 743 | + event_groupsLOCK( pxEventBits ); |
662 | 744 | {
|
663 | 745 | traceEVENT_GROUP_DELETE( xEventGroup );
|
664 | 746 |
|
| 747 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 748 | + |
| 749 | + /* We are about to access the kernel data group non-deterministically, |
| 750 | + * thus we suspend the kernel data group.*/ |
| 751 | + vTaskSuspendAll(); |
| 752 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 753 | + |
665 | 754 | while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
666 | 755 | {
|
667 | 756 | /* Unblock the task, returning 0 as the event list is being deleted
|
668 | 757 | * and cannot therefore have any bits set. */
|
669 | 758 | configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
|
670 | 759 | vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
671 | 760 | }
|
| 761 | + |
| 762 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 763 | + ( void ) xTaskResumeAll(); |
| 764 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
672 | 765 | }
|
673 |
| - ( void ) xTaskResumeAll(); |
| 766 | + ( void ) event_groupsUNLOCK( pxEventBits ); |
674 | 767 |
|
675 | 768 | #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
676 | 769 | {
|
|
774 | 867 | traceRETURN_vEventGroupClearBitsCallback();
|
775 | 868 | }
|
776 | 869 | /*-----------------------------------------------------------*/
|
| 870 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 871 | + static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) |
| 872 | + { |
| 873 | + /* Disable preemption so that the current task cannot be preempted by another task */ |
| 874 | + vTaskPreemptionDisable( NULL ); |
| 875 | + |
| 876 | + /* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing |
| 877 | + * the event group while it is suspended. */ |
| 878 | + portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); |
| 879 | + } |
| 880 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 881 | +/*-----------------------------------------------------------*/ |
| 882 | + |
| 883 | + #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) |
| 884 | + static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) |
| 885 | + { |
| 886 | + /* Release the previously held task spinlock */ |
| 887 | + portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) ); |
| 888 | + |
| 889 | + /* Re-enable preemption */ |
| 890 | + vTaskPreemptionEnable( NULL ); |
| 891 | + |
| 892 | + /* We assume that the task was preempted when preemption was enabled */ |
| 893 | + return pdTRUE; |
| 894 | + } |
| 895 | + #endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */ |
| 896 | +/*-----------------------------------------------------------*/ |
777 | 897 |
|
778 | 898 | static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
|
779 | 899 | const EventBits_t uxBitsToWaitFor,
|
|
0 commit comments