scx_rlfifo/
main.rs

1// Copyright (c) Andrea Righi <andrea.righi@linux.dev>
2
3// This software may be used and distributed according to the terms of the
4// GNU General Public License version 2.
5
6//! # Round-Robin Linux kernel scheduler that runs in user-space
7//!
8//! ## Overview
9//!
10//! This is a fully functional Round-Robin scheduler for the Linux kernel that operates
11//! in user-space and it is 100% implemented in Rust.
12//!
13//! It dequeues tasks in FIFO order and assigns dynamic time slices, preempting and
14//! re-enqueuing tasks to achieve basic Round-Robin behavior.
15//!
16//! The scheduler is designed to serve as a simple template for developers looking to implement
17//! more advanced scheduling policies.
18//!
19//! It is based on `scx_rustland_core`, a framework that is specifically designed to simplify the
20//! creation of user-space schedulers, leveraging the Linux kernel's `sched_ext` feature (a
21//! technology that allows to implement schedulers in BPF).
22//!
23//! The `scx_rustland_core` crate offers an abstraction layer over `sched_ext`, enabling developers
24//! to write schedulers in Rust without needing to interact directly with low-level kernel or BPF
25//! internal details.
26//!
27//! ## scx_rustland_core API
28//!
29//! ### struct `BpfScheduler`
30//!
31//! The `BpfScheduler` struct is the core interface for interacting with `sched_ext` via BPF.
32//!
33//! - **Initialization**:
34//!   - `BpfScheduler::init()` registers the scheduler and initializes the BPF component.
35//!
36//! - **Task Management**:
37//!   - `dequeue_task()`: Consume a task that wants to run, returns a QueuedTask object
38//!   - `select_cpu(pid: i32, prev_cpu: i32, flags: u64)`: Select an idle CPU for a task
39//!   - `dispatch_task(task: &DispatchedTask)`: Dispatch a task
40//!
41//! - **Completion Notification**:
42//!   - `notify_complete(nr_pending: u64)` Give control to the BPF component and report the number
43//!      of tasks that are still pending (this function can sleep)
44//!
45//! Each task received from dequeue_task() contains the following:
46//!
47//!
48//! struct QueuedTask {
49//!     pub pid: i32,              // pid that uniquely identifies a task
50//!     pub cpu: i32,              // CPU previously used by the task
51//!     pub nr_cpus_allowed: u64,  // Number of CPUs that the task can use
52//!     pub flags: u64,            // task's enqueue flags
53//!     pub start_ts: u64,         // Timestamp since last time the task ran on a CPU (in ns)
54//!     pub stop_ts: u64,          // Timestamp since last time the task released a CPU (in ns)
55//!     pub exec_runtime: u64,     // Total cpu time since last sleep (in ns)
56//!     pub weight: u64,           // Task priority in the range [1..10000] (default is 100)
57//!     pub vtime: u64,            // Current task vruntime / deadline (set by the scheduler)
58//! }
59//!
60//! Each task dispatched using dispatch_task() contains the following:
61//!
62//! struct DispatchedTask {
63//!     pub pid: i32,      // pid that uniquely identifies a task
64//!     pub cpu: i32,      // target CPU selected by the scheduler
65//!                        // (RL_CPU_ANY = dispatch on the first CPU available)
66//!     pub flags: u64,    // task's enqueue flags
67//!     pub slice_ns: u64, // time slice in nanoseconds assigned to the task
68//!                        // (0 = use default time slice)
69//!     pub vtime: u64,    // this value can be used to send the task's vruntime or deadline
70//!                        // directly to the underlying BPF dispatcher
71//! }
72//!
73//! Other internal statistics that can be used to implement better scheduling policies:
74//!
75//!  let n: u64 = *self.bpf.nr_online_cpus_mut();       // amount of online CPUs
76//!  let n: u64 = *self.bpf.nr_running_mut();           // amount of currently running tasks
77//!  let n: u64 = *self.bpf.nr_queued_mut();            // amount of tasks queued to be scheduled
78//!  let n: u64 = *self.bpf.nr_scheduled_mut();         // amount of tasks managed by the user-space scheduler
79//!  let n: u64 = *self.bpf.nr_user_dispatches_mut();   // amount of user-space dispatches
80//!  let n: u64 = *self.bpf.nr_kernel_dispatches_mut(); // amount of kernel dispatches
81//!  let n: u64 = *self.bpf.nr_cancel_dispatches_mut(); // amount of cancelled dispatches
82//!  let n: u64 = *self.bpf.nr_bounce_dispatches_mut(); // amount of bounced dispatches
83//!  let n: u64 = *self.bpf.nr_failed_dispatches_mut(); // amount of failed dispatches
84//!  let n: u64 = *self.bpf.nr_sched_congested_mut();   // amount of scheduler congestion events
85
86mod bpf_skel;
87pub use bpf_skel::*;
88pub mod bpf_intf;
89
90#[rustfmt::skip]
91mod bpf;
92use std::mem::MaybeUninit;
93use std::time::SystemTime;
94
95use anyhow::Result;
96use bpf::*;
97use libbpf_rs::OpenObject;
98use scx_utils::UserExitInfo;
99
100// Maximum time slice (in nanoseconds) that a task can use before it is re-enqueued.
101const SLICE_NS: u64 = 5_000_000;
102
103struct Scheduler<'a> {
104    bpf: BpfScheduler<'a>, // Connector to the sched_ext BPF backend
105}
106
107impl<'a> Scheduler<'a> {
108    fn init(open_object: &'a mut MaybeUninit<OpenObject>) -> Result<Self> {
109        let bpf = BpfScheduler::init(
110            open_object,
111            0,     // exit_dump_len (buffer size of exit info, 0 = default)
112            false, // partial (false = include all tasks)
113            false, // debug (false = debug mode off)
114            true,  // builtin_idle (true = allow BPF to use idle CPUs if available)
115        )?;
116        Ok(Self { bpf })
117    }
118
119    fn dispatch_tasks(&mut self) {
120        // Get the amount of tasks that are waiting to be scheduled.
121        let nr_waiting = *self.bpf.nr_queued_mut();
122
123        // Start consuming and dispatching tasks, until all the CPUs are busy or there are no more
124        // tasks to be dispatched.
125        while let Ok(Some(task)) = self.bpf.dequeue_task() {
126            // Create a new task to be dispatched from the received enqueued task.
127            let mut dispatched_task = DispatchedTask::new(&task);
128
129            // Decide where the task needs to run (pick a target CPU).
130            //
131            // A call to select_cpu() will return the most suitable idle CPU for the task,
132            // prioritizing its previously used CPU (task.cpu).
133            //
134            // If we can't find any idle CPU, run on the first CPU available.
135            let cpu = self.bpf.select_cpu(task.pid, task.cpu, task.flags);
136            dispatched_task.cpu = if cpu >= 0 { cpu } else { RL_CPU_ANY };
137
138            // Determine the task's time slice: assign value inversely proportional to the number
139            // of tasks waiting to be scheduled.
140            dispatched_task.slice_ns = SLICE_NS / (nr_waiting + 1);
141
142            // Dispatch the task.
143            self.bpf.dispatch_task(&dispatched_task).unwrap();
144        }
145
146        // Notify the BPF component that tasks have been dispatched.
147        //
148        // This function will put the scheduler to sleep, until another task needs to run.
149        self.bpf.notify_complete(0);
150    }
151
152    fn print_stats(&mut self) {
153        // Internal scx_rustland_core statistics.
154        let nr_user_dispatches = *self.bpf.nr_user_dispatches_mut();
155        let nr_kernel_dispatches = *self.bpf.nr_kernel_dispatches_mut();
156        let nr_cancel_dispatches = *self.bpf.nr_cancel_dispatches_mut();
157        let nr_bounce_dispatches = *self.bpf.nr_bounce_dispatches_mut();
158        let nr_failed_dispatches = *self.bpf.nr_failed_dispatches_mut();
159        let nr_sched_congested = *self.bpf.nr_sched_congested_mut();
160
161        println!(
162            "user={} kernel={} cancel={} bounce={} fail={} cong={}",
163            nr_user_dispatches,
164            nr_kernel_dispatches,
165            nr_cancel_dispatches,
166            nr_bounce_dispatches,
167            nr_failed_dispatches,
168            nr_sched_congested,
169        );
170    }
171
172    fn now() -> u64 {
173        SystemTime::now()
174            .duration_since(SystemTime::UNIX_EPOCH)
175            .unwrap()
176            .as_secs()
177    }
178
179    fn run(&mut self) -> Result<UserExitInfo> {
180        let mut prev_ts = Self::now();
181
182        while !self.bpf.exited() {
183            self.dispatch_tasks();
184
185            let curr_ts = Self::now();
186            if curr_ts > prev_ts {
187                self.print_stats();
188                prev_ts = curr_ts;
189            }
190        }
191        self.bpf.shutdown_and_report()
192    }
193}
194
195fn print_warning() {
196    let warning = r#"
197**************************************************************************
198
199WARNING: The purpose of scx_rlfifo is to provide a simple scheduler
200implementation based on scx_rustland_core, and it is not intended for
201use in production environments. If you want to run a scheduler that makes
202decisions in user space, it is recommended to use *scx_rustland* instead.
203
204Please do not open GitHub issues in the event of poor performance, or
205scheduler eviction due to a runnable task timeout. However, if running this
206scheduler results in a system crash or the entire system becoming unresponsive,
207please open a GitHub issue.
208
209**************************************************************************"#;
210
211    println!("{}", warning);
212}
213
214fn main() -> Result<()> {
215    print_warning();
216
217    // Initialize and load the FIFO scheduler.
218    let mut open_object = MaybeUninit::uninit();
219    loop {
220        let mut sched = Scheduler::init(&mut open_object)?;
221        if !sched.run()?.should_restart() {
222            break;
223        }
224    }
225
226    Ok(())
227}