Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions examples/template_configuration/FreeRTOSConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -546,6 +546,14 @@
* vTaskPreemptionEnable APIs. */
#define configUSE_TASK_PREEMPTION_DISABLE 0

/* When using SMP (i.e. configNUMBER_OF_CORES is greater than one), set
* configUSE_SCHEDULER_CORE_MASK to 1 to enable the scheduler core mask feature.
* When enabled, the vTaskSetSchedulerCoreMask and uxTaskGetSchedulerCoreMask
* APIs can be used to control which cores are allowed to run non-idle tasks
* system-wide at run time. Set to 0 to exclude this feature from the build.
* Defaults to 1 if left undefined. */
#define configUSE_SCHEDULER_CORE_MASK 1

/* When using SMP (i.e. configNUMBER_OF_CORES is greater than one), set
* configUSE_PASSIVE_IDLE_HOOK to 1 to allow the application writer to use
* the passive idle task hook to add background functionality without the
Expand Down
28 changes: 28 additions & 0 deletions include/FreeRTOS.h
Original file line number Diff line number Diff line change
Expand Up @@ -512,6 +512,14 @@
#define configUSE_CORE_AFFINITY 0
#endif /* configUSE_CORE_AFFINITY */

#ifndef configUSE_SCHEDULER_CORE_MASK
#if ( configNUMBER_OF_CORES > 1 )
#define configUSE_SCHEDULER_CORE_MASK 1
#else
#define configUSE_SCHEDULER_CORE_MASK 0
#endif
#endif /* configUSE_SCHEDULER_CORE_MASK */

#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) )
#ifndef configTASK_DEFAULT_CORE_AFFINITY
#define configTASK_DEFAULT_CORE_AFFINITY tskNO_AFFINITY
Expand Down Expand Up @@ -1846,6 +1854,22 @@
#define traceRETURN_vTaskCoreAffinityGet( uxCoreAffinityMask )
#endif

#ifndef traceENTER_vTaskSetSchedulerCoreMask
#define traceENTER_vTaskSetSchedulerCoreMask( uxCoreMask )
#endif

#ifndef traceRETURN_vTaskSetSchedulerCoreMask
#define traceRETURN_vTaskSetSchedulerCoreMask()
#endif

#ifndef traceENTER_uxTaskGetSchedulerCoreMask
#define traceENTER_uxTaskGetSchedulerCoreMask()
#endif

#ifndef traceRETURN_uxTaskGetSchedulerCoreMask
#define traceRETURN_uxTaskGetSchedulerCoreMask( uxCoreMask )
#endif

#ifndef traceENTER_vTaskPreemptionDisable
#define traceENTER_vTaskPreemptionDisable( xTask )
#endif
Expand Down Expand Up @@ -2902,6 +2926,10 @@
#error configUSE_CORE_AFFINITY is not supported in single core FreeRTOS
#endif

#if ( ( configNUMBER_OF_CORES == 1 ) && ( configUSE_SCHEDULER_CORE_MASK != 0 ) )
#error configUSE_SCHEDULER_CORE_MASK is not supported in single core FreeRTOS
#endif

#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_PORT_OPTIMISED_TASK_SELECTION != 0 ) )
#error configUSE_PORT_OPTIMISED_TASK_SELECTION is not supported in SMP FreeRTOS
#endif
Expand Down
48 changes: 48 additions & 0 deletions include/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -1420,6 +1420,54 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
UBaseType_t vTaskCoreAffinityGet( ConstTaskHandle_t xTask );
#endif

#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_SCHEDULER_CORE_MASK == 1 ) )

/**
*
* Controls which cores are allowed to run non-idle tasks system-wide.
* Bit N = 1 means core N may run non-idle tasks; bit N = 0 means core N will
* only run its idle task. configNUMBER_OF_CORES must be greater than 1 for
* this function to be available.
*
* Masking a core (including core 0) does NOT power it off or stop its tick
* ISR and scheduler from executing. All cores remain active; the mask only
* controls whether the scheduler may dispatch a non-idle task onto a core.
* A masked core continues to service its tick interrupt and enters the
* scheduler normally, but will always be assigned the idle task.
*
* Passing 0 as the mask is valid; every core will run only its idle task
* until a new mask is applied.
*
Comment thread
kuopinghsu marked this conversation as resolved.
* If a core that is currently running a non-idle task becomes disabled by
* the new mask, it is yielded immediately so the scheduler can replace the
* running task with the idle task.
*
* @param uxCoreMask Bitmask of cores to enable. Cores are numbered 0 to
* configNUMBER_OF_CORES - 1. Pass ( tskNO_AFFINITY ) to re-enable all cores.
*
* Example usage (4-core system, exclude core 2):
*
* // Allow scheduling on cores 0, 1 and 3 only.
* vTaskSetSchedulerCoreMask( ( 1 << 0 ) | ( 1 << 1 ) | ( 1 << 3 ) );
*
* // Later, restore all four cores.
* vTaskSetSchedulerCoreMask( ( 1 << 0 ) | ( 1 << 1 ) | ( 1 << 2 ) | ( 1 << 3 ) );
*/
void vTaskSetSchedulerCoreMask( UBaseType_t uxCoreMask ) PRIVILEGED_FUNCTION;

/**
* @brief Gets the current global scheduler core mask.
*
* @return Bitmask where bit N = 1 means core N is currently allowed to run
* non-idle tasks.
*
* Example usage:
*
* UBaseType_t uxMask = uxTaskGetSchedulerCoreMask();
*/
UBaseType_t uxTaskGetSchedulerCoreMask( void ) PRIVILEGED_FUNCTION;
#endif

#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )

/**
Expand Down
131 changes: 117 additions & 14 deletions tasks.c
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,20 @@ PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandles[ configNUMBER_OF_CORES ]; /**< Holds the handles of the idle tasks. The idle tasks are created automatically when the scheduler is started. */

#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_SCHEDULER_CORE_MASK == 1 ) )
/* Global scheduler core mask. Bit N = 1 means core N is allowed to run
* non-idle tasks. Defaults to all cores enabled. Use
* vTaskSetSchedulerCoreMask() / uxTaskGetSchedulerCoreMask() to change it
* at run time.
*
* The mask is derived by right-shifting ~0 rather than left-shifting 1,
* because left-shifting by a value equal to the type width is undefined
* behaviour in C (C11 §6.5.7). Using UBaseType_t throughout also avoids
* the assumption that unsigned long is at least as wide as UBaseType_t. */
PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerCoreMask =
( ( UBaseType_t ) ( ~( UBaseType_t ) 0U ) >> ( ( sizeof( UBaseType_t ) * ( size_t ) 8U ) - ( size_t ) configNUMBER_OF_CORES ) );
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_SCHEDULER_CORE_MASK == 1 ) ) */

/* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
* For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
* to determine the number of priority lists to read back from the remote target. */
Expand Down Expand Up @@ -941,12 +955,19 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
#endif
{
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
#if ( configUSE_SCHEDULER_CORE_MASK == 1 )
/* Non-idle tasks may not be scheduled on disabled cores. */
if( ( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) ||
( ( uxSchedulerCoreMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) )
#endif
{
xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
xLowestPriorityCore = xCoreID;
#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
if( pxCurrentTCBs[ xCoreID ]->xPreemptionDisable == pdFALSE )
#endif
{
xLowestPriorityToPreempt = xCurrentCoreTaskPriority;
xLowestPriorityCore = xCoreID;
}
}
}
}
Expand Down Expand Up @@ -1090,14 +1111,21 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
#endif
{
/* If the task is not being executed by any core swap it in. */
pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING;
#if ( configUSE_CORE_AFFINITY == 1 )
pxPreviousTCB = pxCurrentTCBs[ xCoreID ];
#if ( configUSE_SCHEDULER_CORE_MASK == 1 )
/* Non-idle tasks may not run on scheduler-disabled cores. */
if( ( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) ||
( ( uxSchedulerCoreMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) )
#endif
pxTCB->xTaskRunState = xCoreID;
pxCurrentTCBs[ xCoreID ] = pxTCB;
xTaskScheduled = pdTRUE;
{
/* If the task is not being executed by any core swap it in. */
pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING;
#if ( configUSE_CORE_AFFINITY == 1 )
pxPreviousTCB = pxCurrentTCBs[ xCoreID ];
#endif
pxTCB->xTaskRunState = xCoreID;
pxCurrentTCBs[ xCoreID ] = pxTCB;
xTaskScheduled = pdTRUE;
}
}
}
else if( pxTCB == pxCurrentTCBs[ xCoreID ] )
Expand All @@ -1108,9 +1136,16 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
if( ( pxTCB->uxCoreAffinityMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
#endif
{
/* The task is already running on this core, mark it as scheduled. */
pxTCB->xTaskRunState = xCoreID;
xTaskScheduled = pdTRUE;
#if ( configUSE_SCHEDULER_CORE_MASK == 1 )
/* Non-idle tasks may not continue on scheduler-disabled cores. */
if( ( ( pxTCB->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) != 0U ) ||
( ( uxSchedulerCoreMask & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U ) )
#endif
{
/* The task is already running on this core, mark it as scheduled. */
pxTCB->xTaskRunState = xCoreID;
xTaskScheduled = pdTRUE;
}
}
}
else
Expand Down Expand Up @@ -3099,6 +3134,74 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,

/*-----------------------------------------------------------*/

#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_SCHEDULER_CORE_MASK == 1 ) )
void vTaskSetSchedulerCoreMask( UBaseType_t uxCoreMask )
{
BaseType_t xCoreID;
UBaseType_t uxOldMask;
Comment thread
kuopinghsu marked this conversation as resolved.
UBaseType_t uxDisabledCores;

traceENTER_vTaskSetSchedulerCoreMask( uxCoreMask );

taskENTER_CRITICAL();
{
uxOldMask = uxSchedulerCoreMask;

/* Clamp to the number of physical cores so stray bits are ignored.
* The valid-core mask is derived by right-shifting ~0 to avoid
* left-shift UB and unsigned long width assumptions (see the
* uxSchedulerCoreMask initialiser for a full explanation). */
uxSchedulerCoreMask = uxCoreMask &
( ( UBaseType_t ) ( ~( UBaseType_t ) 0U ) >> ( ( sizeof( UBaseType_t ) * ( size_t ) 8U ) - ( size_t ) configNUMBER_OF_CORES ) );

if( xSchedulerRunning != pdFALSE )
{
/* For each core that was just disabled (was allowed, now disallowed),
* yield it immediately if it is running a non-idle task so the
* scheduler re-selects the idle task on that core. */
uxDisabledCores = uxOldMask & ~uxSchedulerCoreMask;

for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUMBER_OF_CORES; xCoreID++ )
{
if( ( uxDisabledCores & ( ( UBaseType_t ) 1U << ( UBaseType_t ) xCoreID ) ) != 0U )
{
if( ( pxCurrentTCBs[ xCoreID ]->uxTaskAttributes & taskATTRIBUTE_IS_IDLE ) == 0U )
{
prvYieldCore( xCoreID );
}
}
}
}
}
taskEXIT_CRITICAL();

traceRETURN_vTaskSetSchedulerCoreMask();
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_SCHEDULER_CORE_MASK == 1 ) ) */

/*-----------------------------------------------------------*/

#if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_SCHEDULER_CORE_MASK == 1 ) )
UBaseType_t uxTaskGetSchedulerCoreMask( void )
{
UBaseType_t uxCoreMask;

traceENTER_uxTaskGetSchedulerCoreMask();

portBASE_TYPE_ENTER_CRITICAL();
{
uxCoreMask = uxSchedulerCoreMask;
}
portBASE_TYPE_EXIT_CRITICAL();

traceRETURN_uxTaskGetSchedulerCoreMask( uxCoreMask );

return uxCoreMask;
}
#endif /* #if ( ( configNUMBER_OF_CORES > 1 ) && ( configUSE_SCHEDULER_CORE_MASK == 1 ) ) */

/*-----------------------------------------------------------*/

#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )

void vTaskPreemptionDisable( const TaskHandle_t xTask )
Expand Down