scx_rustland/main.rs
1// Copyright (c) Andrea Righi <andrea.righi@linux.dev>
2
3// This software may be used and distributed according to the terms of the
4// GNU General Public License version 2.
5mod bpf_skel;
6pub use bpf_skel::*;
7pub mod bpf_intf;
8
9#[rustfmt::skip]
10mod bpf;
11use bpf::*;
12
13mod stats;
14use std::collections::BTreeSet;
15use std::io::{self};
16use std::mem::MaybeUninit;
17use std::time::Duration;
18use std::time::SystemTime;
19
20use anyhow::Result;
21use clap::Parser;
22use libbpf_rs::OpenObject;
23use log::info;
24use log::warn;
25use procfs::process::Process;
26use scx_stats::prelude::*;
27use scx_utils::build_id;
28use scx_utils::libbpf_clap_opts::LibbpfOpts;
29use scx_utils::UserExitInfo;
30use stats::Metrics;
31
32const SCHEDULER_NAME: &str = "RustLand";
33
34/// scx_rustland: user-space scheduler written in Rust
35///
36/// scx_rustland is designed to prioritize interactive workloads over background CPU-intensive
37/// workloads. For this reason the typical use case of this scheduler involves low-latency
38/// interactive applications, such as gaming, video conferencing and live streaming.
39///
40/// scx_rustland is also designed to be an "easy to read" template that can be used by any
41/// developer to quickly experiment more complex scheduling policies fully implemented in Rust.
42///
43/// The scheduler is based on scx_rustland_core, which implements the low level sched-ext
44/// functionalities.
45///
46/// The scheduling policy implemented in user-space is a based on a deadline, evaluated as
47/// following:
48///
49/// deadline = vruntime + exec_runtime
50///
51/// Where, vruntime reflects the task's total runtime scaled by weight (ensuring fairness), while
52/// exec_runtime accounts the CPU time used since the last sleep (capturing responsiveness). Tasks
53/// are then dispatched from the lowest to the highest deadline.
54///
55/// This approach favors latency-sensitive tasks: those that frequently sleep will accumulate less
56/// exec_runtime, resulting in earlier deadlines. In contrast, CPU-intensive tasks that don’t sleep
57/// accumulate a larger exec_runtime and thus get scheduled later.
58///
59/// All the tasks are stored in a BTreeSet (TaskTree), using the deadline as the ordering key.
60/// Once the order of execution is determined all tasks are sent back to the BPF counterpart
61/// (scx_rustland_core) to be dispatched.
62///
63/// The BPF dispatcher is completely agnostic of the particular scheduling policy implemented in
64/// user-space. For this reason developers that are willing to use this scheduler to experiment
65/// scheduling policies should be able to simply modify the Rust component, without having to deal
66/// with any internal kernel / BPF details.
67///
68/// === Troubleshooting ===
69///
70/// - Reduce the time slice (option `-s`) if you experience lag or cracking audio.
71///
72#[derive(Debug, Parser)]
73struct Opts {
74 /// Scheduling slice duration in microseconds.
75 #[clap(short = 's', long, default_value = "20000")]
76 slice_us: u64,
77
78 /// Scheduling minimum slice duration in microseconds.
79 #[clap(short = 'S', long, default_value = "1000")]
80 slice_us_min: u64,
81
82 /// If set, per-CPU tasks are dispatched directly to their only eligible CPU.
83 /// This can help enforce affinity-based isolation for better performance.
84 #[clap(short = 'l', long, action = clap::ArgAction::SetTrue)]
85 percpu_local: bool,
86
87 /// If specified, only tasks which have their scheduling policy set to SCHED_EXT using
88 /// sched_setscheduler(2) are switched. Otherwise, all tasks are switched.
89 #[clap(short = 'p', long, action = clap::ArgAction::SetTrue)]
90 partial: bool,
91
92 /// Exit debug dump buffer length. 0 indicates default.
93 #[clap(long, default_value = "0")]
94 exit_dump_len: u32,
95
96 /// Enable verbose output, including libbpf details. Moreover, BPF scheduling events will be
97 /// reported in tracefs (e.g., /sys/kernel/tracing/trace_pipe).
98 #[clap(short = 'v', long, action = clap::ArgAction::SetTrue)]
99 verbose: bool,
100
101 /// Enable stats monitoring with the specified interval.
102 #[clap(long)]
103 stats: Option<f64>,
104
105 /// Run in stats monitoring mode with the specified interval. Scheduler
106 /// is not launched.
107 #[clap(long)]
108 monitor: Option<f64>,
109
110 /// Show descriptions for statistics.
111 #[clap(long)]
112 help_stats: bool,
113
114 /// Print scheduler version and exit.
115 #[clap(short = 'V', long, action = clap::ArgAction::SetTrue)]
116 version: bool,
117
118 #[clap(flatten, next_help_heading = "Libbpf Options")]
119 pub libbpf: LibbpfOpts,
120}
121
122// Time constants.
123const NSEC_PER_USEC: u64 = 1_000;
124
125#[derive(Debug, PartialEq, Eq, Clone)]
126struct Task {
127 qtask: QueuedTask, // queued task
128 deadline: u64, // task deadline (that determines the order how tasks are dispatched)
129 timestamp: u64, // task enqueue timestamp
130}
131
132// Sort tasks by their interactive status first (interactive tasks are always scheduled before
133// regular tasks), then sort them by their vruntime, then by their timestamp and lastly by their
134// pid.
135impl Ord for Task {
136 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
137 self.deadline
138 .cmp(&other.deadline)
139 .then_with(|| self.timestamp.cmp(&other.timestamp))
140 .then_with(|| self.qtask.pid.cmp(&other.qtask.pid))
141 }
142}
143
144impl PartialOrd for Task {
145 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
146 Some(self.cmp(other))
147 }
148}
149
150// Main scheduler object
151struct Scheduler<'a> {
152 bpf: BpfScheduler<'a>, // BPF connector
153 opts: &'a Opts, // scheduler options
154 stats_server: StatsServer<(), Metrics>, // statistics
155 tasks: BTreeSet<Task>, // tasks ordered by deadline
156 vruntime_now: u64, // Tracks the latest observed (max) vruntime across tasks
157 init_page_faults: u64, // Initial page faults counter
158 slice_ns: u64, // Default time slice (in ns)
159 slice_ns_min: u64, // Minimum time slice (in ns)
160}
161
162impl<'a> Scheduler<'a> {
163 fn init(opts: &'a Opts, open_object: &'a mut MaybeUninit<OpenObject>) -> Result<Self> {
164 let stats_server = StatsServer::new(stats::server_data()).launch()?;
165
166 let slice_ns = opts.slice_us * NSEC_PER_USEC;
167 let slice_ns_min = opts.slice_us_min * NSEC_PER_USEC;
168
169 // Low-level BPF connector.
170 let bpf = BpfScheduler::init(
171 open_object,
172 opts.libbpf.clone().into_bpf_open_opts(),
173 opts.exit_dump_len,
174 opts.partial,
175 opts.verbose,
176 true, // Enable built-in idle CPU selection policy
177 slice_ns_min,
178 "rustland",
179 )?;
180
181 info!(
182 "{} version {} - scx_rustland_core {}",
183 SCHEDULER_NAME,
184 build_id::full_version(env!("CARGO_PKG_VERSION")),
185 scx_rustland_core::VERSION
186 );
187
188 // Return scheduler object.
189 Ok(Self {
190 bpf,
191 opts,
192 stats_server,
193 tasks: BTreeSet::new(),
194 vruntime_now: 0,
195 init_page_faults: 0,
196 slice_ns,
197 slice_ns_min,
198 })
199 }
200
201 fn get_metrics(&mut self) -> Metrics {
202 let page_faults = Self::get_page_faults().unwrap_or_default();
203 if self.init_page_faults == 0 {
204 self.init_page_faults = page_faults;
205 }
206 let nr_page_faults = page_faults - self.init_page_faults;
207
208 Metrics {
209 nr_running: *self.bpf.nr_running_mut(),
210 nr_cpus: *self.bpf.nr_online_cpus_mut(),
211 nr_queued: *self.bpf.nr_queued_mut(),
212 nr_scheduled: *self.bpf.nr_scheduled_mut(),
213 nr_page_faults,
214 nr_user_dispatches: *self.bpf.nr_user_dispatches_mut(),
215 nr_kernel_dispatches: *self.bpf.nr_kernel_dispatches_mut(),
216 nr_cancel_dispatches: *self.bpf.nr_cancel_dispatches_mut(),
217 nr_bounce_dispatches: *self.bpf.nr_bounce_dispatches_mut(),
218 nr_failed_dispatches: *self.bpf.nr_failed_dispatches_mut(),
219 nr_sched_congested: *self.bpf.nr_sched_congested_mut(),
220 }
221 }
222
223 // Return current timestamp in ns.
224 fn now() -> u64 {
225 let ts = SystemTime::now()
226 .duration_since(SystemTime::UNIX_EPOCH)
227 .unwrap();
228 ts.as_nanos() as u64
229 }
230
231 // Return a value proportional to the task's weight.
232 fn scale_by_task_weight(task: &QueuedTask, value: u64) -> u64 {
233 value * task.weight / 100
234 }
235
236 // Return a value inversely proportional to the task's weight.
237 fn scale_by_task_weight_inverse(task: &QueuedTask, value: u64) -> u64 {
238 value * 100 / task.weight
239 }
240
241 /// Updates a task's virtual runtime based on kernel information and
242 /// returns the evaluated deadline.
243 ///
244 /// This method implements the main task ordering logic of the scheduler.
245 fn update_enqueued(&mut self, task: &mut QueuedTask) -> u64 {
246 // Update task's vruntime.
247 task.vtime = if task.vtime == 0 {
248 // Re-align new tasks to the current vruntime.
249 self.vruntime_now
250 } else {
251 // Prevent sleeping tasks from gaining more than one full slice of vruntime credit.
252 let vruntime_min = self.vruntime_now.saturating_sub(self.slice_ns);
253 task.vtime.max(vruntime_min)
254 };
255
256 // Compute the time slice the task just consumed.
257 let slice_ns = task.stop_ts.saturating_sub(task.start_ts);
258
259 // Update task and global vruntimes.
260 let vslice = Self::scale_by_task_weight_inverse(task, slice_ns);
261 task.vtime += vslice;
262 self.vruntime_now += vslice;
263
264 // Compute the deadline, adding the accumulated runtime since the last sleep. Cap
265 // exec_runtime to 100 time slices to prevent starvation of CPU-intensive tasks.
266 task.vtime + task.exec_runtime.min(self.slice_ns.saturating_mul(100))
267 }
268
269 /// Dispatch the next task in the queue.
270 ///
271 /// Return true if dispatching succeeded or there was no task to dispatch, or false if
272 /// dispatching failed (the task is automatically re-enqueued in that case).
273 fn dispatch_task(&mut self) -> bool {
274 // Retrieve the next task to dispatch, if any.
275 let Some(task) = self.tasks.pop_first() else {
276 return true;
277 };
278
279 // Initialize a dispatched task from the queued one.
280 let mut dispatched_task = DispatchedTask::new(&task.qtask);
281
282 // Assign the minimum time slice scaled by the task's priority.
283 dispatched_task.slice_ns = Self::scale_by_task_weight(&task.qtask, self.slice_ns_min);
284
285 // Propagate the evaluated deadline to the BPF backend.
286 dispatched_task.vtime = task.deadline;
287
288 // Attempt to select an idle CPU for the task (if percpu_local is enabled, send per-CPU
289 // tasks directly to their only usable CPU).
290 dispatched_task.cpu = if self.opts.percpu_local {
291 task.qtask.cpu
292 } else {
293 match self
294 .bpf
295 .select_cpu(task.qtask.pid, task.qtask.cpu, task.qtask.flags)
296 {
297 cpu if cpu >= 0 => cpu,
298 _ => RL_CPU_ANY,
299 }
300 };
301
302 // Send the task to the BPF dispatcher.
303 if self.bpf.dispatch_task(&dispatched_task).is_err() {
304 // Dispatching failed: reinsert the task and stop dispatching.
305 self.tasks.insert(task);
306 return false;
307 }
308
309 true
310 }
311
312 // Drain all the tasks from the queued list, update their vruntime (Self::update_enqueued()),
313 // then push them all to the task pool (doing so will sort them by their vruntime).
314 fn drain_queued_tasks(&mut self) {
315 loop {
316 match self.bpf.dequeue_task() {
317 Ok(Some(mut task)) => {
318 // Update task information and determine vruntime.
319 let deadline = self.update_enqueued(&mut task);
320 let timestamp = Self::now();
321
322 // Insert task in the task pool (ordered by vruntime).
323 self.tasks.insert(Task {
324 qtask: task,
325 deadline,
326 timestamp,
327 });
328 }
329 Ok(None) => {
330 break;
331 }
332 Err(err) => {
333 warn!("Error: {err}");
334 break;
335 }
336 }
337 }
338 }
339
340 // Main scheduling function (called in a loop to periodically drain tasks from the queued list
341 // and dispatch them to the BPF part via the dispatched list).
342 fn schedule(&mut self) {
343 self.drain_queued_tasks();
344 self.dispatch_task();
345
346 // Notify the dispatcher if there are still pending tasks to be processed.
347 self.bpf.notify_complete(self.tasks.len() as u64);
348 }
349
350 // Get total page faults from the process.
351 fn get_page_faults() -> Result<u64, io::Error> {
352 let myself = Process::myself().map_err(io::Error::other)?;
353 let stat = myself.stat().map_err(io::Error::other)?;
354
355 Ok(stat.minflt + stat.majflt)
356 }
357
358 fn run(&mut self) -> Result<UserExitInfo> {
359 let (res_ch, req_ch) = self.stats_server.channels();
360
361 while !self.bpf.exited() {
362 // Call the main scheduler body.
363 self.schedule();
364
365 // Handle monitor requests asynchronously.
366 if req_ch.try_recv().is_ok() {
367 res_ch.send(self.get_metrics())?;
368 }
369 }
370
371 self.bpf.shutdown_and_report()
372 }
373}
374
375// Unregister the scheduler.
376impl Drop for Scheduler<'_> {
377 fn drop(&mut self) {
378 info!("Unregister {SCHEDULER_NAME} scheduler");
379 }
380}
381
382fn main() -> Result<()> {
383 let opts = Opts::parse();
384
385 if opts.version {
386 println!(
387 "{} version {} - scx_rustland_core {}",
388 SCHEDULER_NAME,
389 build_id::full_version(env!("CARGO_PKG_VERSION")),
390 scx_rustland_core::VERSION
391 );
392 return Ok(());
393 }
394
395 if opts.help_stats {
396 stats::server_data().describe_meta(&mut std::io::stdout(), None)?;
397 return Ok(());
398 }
399
400 let loglevel = simplelog::LevelFilter::Info;
401
402 let mut lcfg = simplelog::ConfigBuilder::new();
403 lcfg.set_time_offset_to_local()
404 .expect("Failed to set local time offset")
405 .set_time_level(simplelog::LevelFilter::Error)
406 .set_location_level(simplelog::LevelFilter::Off)
407 .set_target_level(simplelog::LevelFilter::Off)
408 .set_thread_level(simplelog::LevelFilter::Off);
409 simplelog::TermLogger::init(
410 loglevel,
411 lcfg.build(),
412 simplelog::TerminalMode::Stderr,
413 simplelog::ColorChoice::Auto,
414 )?;
415
416 if let Some(intv) = opts.monitor.or(opts.stats) {
417 let jh = std::thread::spawn(move || stats::monitor(Duration::from_secs_f64(intv)).unwrap());
418 if opts.monitor.is_some() {
419 let _ = jh.join();
420 return Ok(());
421 }
422 }
423
424 let mut open_object = MaybeUninit::uninit();
425 loop {
426 let mut sched = Scheduler::init(&opts, &mut open_object)?;
427 if !sched.run()?.should_restart() {
428 break;
429 }
430 }
431
432 Ok(())
433}