ostd/task/scheduler/mod.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
// SPDX-License-Identifier: MPL-2.0
//! Scheduling subsystem (in-OSTD part).
//!
//! This module defines what OSTD expects from a scheduling implementation
//! and provides useful functions for controlling the execution flow.
mod fifo_scheduler;
pub mod info;
use spin::Once;
use super::{preempt::cpu_local, processor, Task};
use crate::{
cpu::{CpuId, CpuSet, PinCurrentCpu},
prelude::*,
task::disable_preempt,
timer,
};
/// Injects a scheduler implementation into framework.
///
/// This function can only be called once and must be called during the initialization of kernel.
pub fn inject_scheduler(scheduler: &'static dyn Scheduler<Task>) {
SCHEDULER.call_once(|| scheduler);
timer::register_callback(|| {
SCHEDULER.get().unwrap().local_mut_rq_with(&mut |local_rq| {
if local_rq.update_current(UpdateFlags::Tick) {
cpu_local::set_need_preempt();
}
})
});
}
static SCHEDULER: Once<&'static dyn Scheduler<Task>> = Once::new();
/// A per-CPU task scheduler.
pub trait Scheduler<T = Task>: Sync + Send {
/// Enqueues a runnable task.
///
/// Scheduler developers can perform load-balancing or some accounting work here.
///
/// If the `current` of a CPU needs to be preempted, this method returns the id of
/// that CPU.
fn enqueue(&self, runnable: Arc<T>, flags: EnqueueFlags) -> Option<CpuId>;
/// Gets an immutable access to the local runqueue of the current CPU core.
fn local_rq_with(&self, f: &mut dyn FnMut(&dyn LocalRunQueue<T>));
/// Gets a mutable access to the local runqueue of the current CPU core.
fn local_mut_rq_with(&self, f: &mut dyn FnMut(&mut dyn LocalRunQueue<T>));
}
/// The _local_ view of a per-CPU runqueue.
///
/// This local view provides the interface for the runqueue of a CPU core
/// to be inspected and manipulated by the code running on this particular CPU core.
///
/// Conceptually, a local runqueue consists of two parts:
/// (1) a priority queue of runnable tasks;
/// (2) the current running task.
/// The exact definition of "priority" is left for the concrete implementation to decide.
pub trait LocalRunQueue<T = Task> {
/// Gets the current runnable task.
fn current(&self) -> Option<&Arc<T>>;
/// Updates the current runnable task's scheduling statistics and potentially its
/// position in the queue.
///
/// If the current runnable task needs to be preempted, the method returns `true`.
fn update_current(&mut self, flags: UpdateFlags) -> bool;
/// Picks the next current runnable task.
///
/// This method returns the chosen next current runnable task. If there is no
/// candidate for next current runnable task, this method returns `None`.
fn pick_next_current(&mut self) -> Option<&Arc<T>>;
/// Removes the current runnable task from runqueue.
///
/// This method returns the current runnable task. If there is no current runnable
/// task, this method returns `None`.
fn dequeue_current(&mut self) -> Option<Arc<T>>;
}
/// Possible triggers of an `enqueue` action.
#[derive(PartialEq, Copy, Clone)]
pub enum EnqueueFlags {
/// Spawn a new task.
Spawn,
/// Wake a sleeping task.
Wake,
}
/// Possible triggers of an `update_current` action.
#[derive(PartialEq, Copy, Clone)]
pub enum UpdateFlags {
/// Timer interrupt.
Tick,
/// Task waiting.
Wait,
/// Task yielding.
Yield,
}
/// Preempts the current task.
#[track_caller]
pub(crate) fn might_preempt() {
if !cpu_local::should_preempt() {
return;
}
yield_now();
}
/// Blocks the current task unless `has_woken()` returns `true`.
///
/// Note that this method may return due to spurious wake events. It's the caller's responsibility
/// to detect them (if necessary).
#[track_caller]
pub(crate) fn park_current<F>(has_woken: F)
where
F: Fn() -> bool,
{
let mut current = None;
let mut is_first_try = true;
reschedule(|local_rq: &mut dyn LocalRunQueue| {
if is_first_try {
if has_woken() {
return ReschedAction::DoNothing;
}
// Note the race conditions: the current task may be woken after the above `has_woken`
// check, but before the below `dequeue_current` action, we need to make sure that the
// wakeup event isn't lost.
//
// Currently, for the FIFO scheduler, `Scheduler::enqueue` will try to lock `local_rq`
// when the above race condition occurs, so it will wait until we finish calling the
// `dequeue_current` method and nothing bad will happen. This may need to be revisited
// after more complex schedulers are introduced.
local_rq.update_current(UpdateFlags::Wait);
current = local_rq.dequeue_current();
}
if let Some(next_task) = local_rq.pick_next_current() {
if Arc::ptr_eq(current.as_ref().unwrap(), next_task) {
return ReschedAction::DoNothing;
}
return ReschedAction::SwitchTo(next_task.clone());
}
is_first_try = false;
ReschedAction::Retry
});
}
/// Unblocks a target task.
pub(crate) fn unpark_target(runnable: Arc<Task>) {
let preempt_cpu = SCHEDULER
.get()
.unwrap()
.enqueue(runnable, EnqueueFlags::Wake);
if let Some(preempt_cpu_id) = preempt_cpu {
set_need_preempt(preempt_cpu_id);
}
}
/// Enqueues a newly built task.
///
/// Note that the new task is not guaranteed to run at once.
#[track_caller]
pub(super) fn run_new_task(runnable: Arc<Task>) {
// FIXME: remove this check for `SCHEDULER`.
// Currently OSTD cannot know whether its user has injected a scheduler.
if !SCHEDULER.is_completed() {
fifo_scheduler::init();
}
let preempt_cpu = SCHEDULER
.get()
.unwrap()
.enqueue(runnable, EnqueueFlags::Spawn);
if let Some(preempt_cpu_id) = preempt_cpu {
set_need_preempt(preempt_cpu_id);
}
might_preempt();
}
fn set_need_preempt(cpu_id: CpuId) {
let preempt_guard = disable_preempt();
if preempt_guard.current_cpu() == cpu_id {
cpu_local::set_need_preempt();
} else {
crate::smp::inter_processor_call(&CpuSet::from(cpu_id), || {
cpu_local::set_need_preempt();
});
}
}
/// Dequeues the current task from its runqueue.
///
/// This should only be called if the current is to exit.
#[track_caller]
pub(super) fn exit_current() -> ! {
reschedule(|local_rq: &mut dyn LocalRunQueue| {
let _ = local_rq.dequeue_current();
if let Some(next_task) = local_rq.pick_next_current() {
ReschedAction::SwitchTo(next_task.clone())
} else {
ReschedAction::Retry
}
});
unreachable!()
}
/// Yields execution.
#[track_caller]
pub(super) fn yield_now() {
reschedule(|local_rq| {
local_rq.update_current(UpdateFlags::Yield);
if let Some(next_task) = local_rq.pick_next_current() {
ReschedAction::SwitchTo(next_task.clone())
} else {
ReschedAction::DoNothing
}
})
}
/// Do rescheduling by acting on the scheduling decision (`ReschedAction`) made by a
/// user-given closure.
///
/// The closure makes the scheduling decision by taking the local runqueue has its input.
#[track_caller]
fn reschedule<F>(mut f: F)
where
F: FnMut(&mut dyn LocalRunQueue) -> ReschedAction,
{
// Even if the decision below is `DoNothing`, we should clear this flag. Meanwhile, to avoid
// race conditions, we should do this before making the decision.
cpu_local::clear_need_preempt();
let next_task = loop {
let mut action = ReschedAction::DoNothing;
SCHEDULER.get().unwrap().local_mut_rq_with(&mut |rq| {
action = f(rq);
});
match action {
ReschedAction::DoNothing => {
return;
}
ReschedAction::Retry => {
continue;
}
ReschedAction::SwitchTo(next_task) => {
break next_task;
}
};
};
// `switch_to_task` will spin if it finds that the next task is still running on some CPU core,
// which guarantees soundness regardless of the scheduler implementation.
//
// FIXME: The scheduler decision and context switching are not atomic, which can lead to some
// strange behavior even if the scheduler is implemented correctly. See "Problem 2" at
// <https://github.com/asterinas/asterinas/issues/1633> for details.
processor::switch_to_task(next_task);
}
/// Possible actions of a rescheduling.
enum ReschedAction {
/// Keep running current task and do nothing.
DoNothing,
/// Loop until finding a task to swap out the current.
Retry,
/// Switch to target task.
SwitchTo(Arc<Task>),
}