Skip to content

Commit 2f58dd5

Browse files
change(freertos/smp): Update event_groups.c locking
Updated event_groups.c to use granular locking - Added xTaskSpinlock and xISRSpinlock - Replaced critical section macros with data group critical section macros such as taskENTER/EXIT_CRITICAL/_FROM_ISR() with event_groupsENTER/EXIT_CRITICAL/_FROM_ISR(). - Added vEventGroupsEnterCritical/FromISR() and vEventGroupsExitCriti/FromISR() functions that map to the data group critical section macros. - Added prvLockEventGroupForTasks() and prvUnlockEventGroupForTasks() to suspend the event group when executing non-deterministic code. - xEventGroupSetBits() and vEventGroupDelete() accesses the kernel data group directly. Thus, added vTaskSuspendAll()/xTaskResumeAll() to these fucntions. Co-authored-by: Sudeep Mohanty <sudeep.mohanty@espressif.com>
1 parent 3176808 commit 2f58dd5

File tree

2 files changed

+141
-17
lines changed

2 files changed

+141
-17
lines changed

event_groups.c

Lines changed: 137 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,48 @@
6363
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
6464
uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
6565
#endif
66+
67+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
68+
portSPINLOCK_TYPE xTaskSpinlock;
69+
portSPINLOCK_TYPE xISRSpinlock;
70+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
6671
} EventGroup_t;
6772

6873
/*-----------------------------------------------------------*/
6974

75+
/*
76+
* Macros to mark the start and end of a critical code region.
77+
*/
78+
#if ( portUSING_GRANULAR_LOCKS == 1 )
79+
#define event_groupsENTER_CRITICAL( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL( pxEventBits )
80+
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxEventBits )
81+
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL( pxEventBits )
82+
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskDATA_GROUP_EXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits )
83+
#else /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
84+
#define event_groupsENTER_CRITICAL( pxEventBits ) taskENTER_CRITICAL();
85+
#define event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits ) taskENTER_CRITICAL_FROM_ISR();
86+
#define event_groupsEXIT_CRITICAL( pxEventBits ) taskEXIT_CRITICAL();
87+
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
88+
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
89+
90+
/*
91+
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
92+
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
93+
* after locking the event group, thus allowing the caller to execute non-deterministic
94+
* operations.
95+
*/
96+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
97+
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
98+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
99+
100+
/*
101+
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
102+
* preemption for the caller.
103+
*/
104+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
105+
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
106+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
107+
70108
/*
71109
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
72110
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
@@ -79,6 +117,25 @@
79117
const EventBits_t uxBitsToWaitFor,
80118
const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
81119

120+
/*-----------------------------------------------------------*/
121+
122+
/*
123+
* Macros used to lock and unlock an event group. When a task locks an,
124+
* event group, the task will have thread safe non-deterministic access to
125+
* the event group.
126+
* - Concurrent access from other tasks will be blocked by the xTaskSpinlock
127+
* - Concurrent access from ISRs will be pended
128+
*
129+
* When the task unlocks the event group, all pended access attempts are handled.
130+
*/
131+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
132+
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits )
133+
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits );
134+
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
135+
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
136+
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
137+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
138+
82139
/*-----------------------------------------------------------*/
83140

84141
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
@@ -122,6 +179,13 @@
122179
}
123180
#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
124181

182+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
183+
{
184+
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
185+
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
186+
}
187+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
188+
125189
traceEVENT_GROUP_CREATE( pxEventBits );
126190
}
127191
else
@@ -167,6 +231,13 @@
167231
}
168232
#endif /* configSUPPORT_STATIC_ALLOCATION */
169233

234+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
235+
{
236+
portINIT_SPINLOCK( &( pxEventBits->xTaskSpinlock ) );
237+
portINIT_SPINLOCK( &( pxEventBits->xISRSpinlock ) );
238+
}
239+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
240+
170241
traceEVENT_GROUP_CREATE( pxEventBits );
171242
}
172243
else
@@ -202,7 +273,7 @@
202273
}
203274
#endif
204275

205-
vTaskSuspendAll();
276+
event_groupsLOCK( pxEventBits );
206277
{
207278
uxOriginalBitValue = pxEventBits->uxEventBits;
208279

@@ -245,7 +316,7 @@
245316
}
246317
}
247318
}
248-
xAlreadyYielded = xTaskResumeAll();
319+
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
249320

250321
if( xTicksToWait != ( TickType_t ) 0 )
251322
{
@@ -267,7 +338,7 @@
267338
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
268339
{
269340
/* The task timed out, just return the current event bit value. */
270-
taskENTER_CRITICAL();
341+
event_groupsENTER_CRITICAL( pxEventBits );
271342
{
272343
uxReturn = pxEventBits->uxEventBits;
273344

@@ -284,7 +355,7 @@
284355
mtCOVERAGE_TEST_MARKER();
285356
}
286357
}
287-
taskEXIT_CRITICAL();
358+
event_groupsEXIT_CRITICAL( pxEventBits );
288359

289360
xTimeoutOccurred = pdTRUE;
290361
}
@@ -333,7 +404,7 @@
333404
}
334405
#endif
335406

336-
vTaskSuspendAll();
407+
event_groupsLOCK( pxEventBits );
337408
{
338409
const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
339410

@@ -401,7 +472,7 @@
401472
traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
402473
}
403474
}
404-
xAlreadyYielded = xTaskResumeAll();
475+
xAlreadyYielded = event_groupsUNLOCK( pxEventBits );
405476

406477
if( xTicksToWait != ( TickType_t ) 0 )
407478
{
@@ -422,7 +493,7 @@
422493

423494
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
424495
{
425-
taskENTER_CRITICAL();
496+
event_groupsENTER_CRITICAL( pxEventBits );
426497
{
427498
/* The task timed out, just return the current event bit value. */
428499
uxReturn = pxEventBits->uxEventBits;
@@ -447,7 +518,7 @@
447518

448519
xTimeoutOccurred = pdTRUE;
449520
}
450-
taskEXIT_CRITICAL();
521+
event_groupsEXIT_CRITICAL( pxEventBits );
451522
}
452523
else
453524
{
@@ -482,7 +553,7 @@
482553
configASSERT( xEventGroup );
483554
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
484555

485-
taskENTER_CRITICAL();
556+
event_groupsENTER_CRITICAL( pxEventBits );
486557
{
487558
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
488559

@@ -493,7 +564,7 @@
493564
/* Clear the bits. */
494565
pxEventBits->uxEventBits &= ~uxBitsToClear;
495566
}
496-
taskEXIT_CRITICAL();
567+
event_groupsEXIT_CRITICAL( pxEventBits );
497568

498569
traceRETURN_xEventGroupClearBits( uxReturn );
499570

@@ -524,19 +595,19 @@
524595
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
525596
{
526597
UBaseType_t uxSavedInterruptStatus;
527-
EventGroup_t const * const pxEventBits = xEventGroup;
598+
EventGroup_t * const pxEventBits = xEventGroup;
528599
EventBits_t uxReturn;
529600

530601
traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
531602

532603
/* MISRA Ref 4.7.1 [Return value shall be checked] */
533604
/* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
534605
/* coverity[misra_c_2012_directive_4_7_violation] */
535-
uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
606+
uxSavedInterruptStatus = event_groupsENTER_CRITICAL_FROM_ISR( pxEventBits );
536607
{
537608
uxReturn = pxEventBits->uxEventBits;
538609
}
539-
taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
610+
event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits );
540611

541612
traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
542613

@@ -564,10 +635,17 @@
564635

565636
pxList = &( pxEventBits->xTasksWaitingForBits );
566637
pxListEnd = listGET_END_MARKER( pxList );
567-
vTaskSuspendAll();
638+
event_groupsLOCK( pxEventBits );
568639
{
569640
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
570641

642+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
643+
644+
/* We are about to access the kernel data group non-deterministically,
645+
* thus we suspend the kernel data group.*/
646+
vTaskSuspendAll();
647+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
648+
571649
pxListItem = listGET_HEAD_ENTRY( pxList );
572650

573651
/* Set the bits. */
@@ -638,8 +716,12 @@
638716

639717
/* Snapshot resulting bits. */
640718
uxReturnBits = pxEventBits->uxEventBits;
719+
720+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
721+
( void ) xTaskResumeAll();
722+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
641723
}
642-
( void ) xTaskResumeAll();
724+
( void ) event_groupsUNLOCK( pxEventBits );
643725

644726
traceRETURN_xEventGroupSetBits( uxReturnBits );
645727

@@ -658,19 +740,30 @@
658740

659741
pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
660742

661-
vTaskSuspendAll();
743+
event_groupsLOCK( pxEventBits );
662744
{
663745
traceEVENT_GROUP_DELETE( xEventGroup );
664746

747+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
748+
749+
/* We are about to access the kernel data group non-deterministically,
750+
* thus we suspend the kernel data group.*/
751+
vTaskSuspendAll();
752+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
753+
665754
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
666755
{
667756
/* Unblock the task, returning 0 as the event list is being deleted
668757
* and cannot therefore have any bits set. */
669758
configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
670759
vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
671760
}
761+
762+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
763+
( void ) xTaskResumeAll();
764+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
672765
}
673-
( void ) xTaskResumeAll();
766+
( void ) event_groupsUNLOCK( pxEventBits );
674767

675768
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
676769
{
@@ -774,6 +867,33 @@
774867
traceRETURN_vEventGroupClearBitsCallback();
775868
}
776869
/*-----------------------------------------------------------*/
870+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
871+
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
872+
{
873+
/* Disable preemption so that the current task cannot be preempted by another task */
874+
vTaskPreemptionDisable( NULL );
875+
876+
/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
877+
* the event group while it is suspended. */
878+
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
879+
}
880+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
881+
/*-----------------------------------------------------------*/
882+
883+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
884+
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
885+
{
886+
/* Release the previously held task spinlock */
887+
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
888+
889+
/* Re-enable preemption */
890+
vTaskPreemptionEnable( NULL );
891+
892+
/* We assume that the task was preempted when preemption was enabled */
893+
return pdTRUE;
894+
}
895+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
896+
/*-----------------------------------------------------------*/
777897

778898
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
779899
const EventBits_t uxBitsToWaitFor,

include/FreeRTOS.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3354,6 +3354,10 @@ typedef struct xSTATIC_EVENT_GROUP
33543354
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
33553355
uint8_t ucDummy4;
33563356
#endif
3357+
3358+
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
3359+
portSPINLOCK_TYPE xDummySpinlock[ 2 ];
3360+
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
33573361
} StaticEventGroup_t;
33583362

33593363
/*

0 commit comments

Comments
 (0)