scx_wd40/
main.rs

1// Copyright (c) Meta Platforms, Inc. and affiliates.
2
3// This software may be used and distributed according to the terms of the
4// GNU General Public License version 2.
5mod bpf_skel;
6pub use bpf_skel::*;
7pub mod bpf_intf;
8
9mod domain;
10use domain::DomainGroup;
11
12pub mod tuner;
13use tuner::Tuner;
14
15pub mod load_balance;
16use load_balance::LoadBalancer;
17
18mod stats;
19use std::collections::BTreeMap;
20use std::mem::MaybeUninit;
21use std::sync::atomic::AtomicBool;
22use std::sync::atomic::Ordering;
23use std::sync::Arc;
24use std::time::Duration;
25use std::time::Instant;
26use std::time::SystemTime;
27use std::time::UNIX_EPOCH;
28
29use stats::ClusterStats;
30use stats::NodeStats;
31
32#[macro_use]
33extern crate static_assertions;
34
35use ::fb_procfs as procfs;
36use anyhow::anyhow;
37use anyhow::bail;
38use anyhow::Context;
39use anyhow::Result;
40use clap::Parser;
41use crossbeam::channel::RecvTimeoutError;
42use libbpf_rs::skel::Skel;
43use libbpf_rs::MapCore as _;
44use libbpf_rs::OpenObject;
45use libbpf_rs::ProgramInput;
46use log::info;
47use scx_arena::ArenaLib;
48use scx_stats::prelude::*;
49use scx_utils::build_id;
50use scx_utils::compat;
51use scx_utils::init_libbpf_logging;
52use scx_utils::libbpf_clap_opts::LibbpfOpts;
53use scx_utils::scx_enums;
54use scx_utils::scx_ops_attach;
55use scx_utils::scx_ops_load;
56use scx_utils::scx_ops_open;
57use scx_utils::uei_exited;
58use scx_utils::uei_report;
59use scx_utils::Cpumask;
60use scx_utils::Topology;
61use scx_utils::UserExitInfo;
62use scx_utils::NR_CPU_IDS;
63
64const SCHEDULER_NAME: &str = "scx_wd40";
65const MAX_DOMS: usize = bpf_intf::consts_MAX_DOMS as usize;
66const MAX_CPUS: usize = bpf_intf::consts_MAX_CPUS as usize;
67
68// Number of u64 words in a BPF CPU mask.
69static mut MASK_LEN: usize = 0;
70
71/// scx_wd40: A fork of the scx_rusty multi-domain scheduler.
72///
73/// The message below is from the original scx_rusty codebase:
74///
75/// The BPF part does simple vtime or round robin scheduling in each domain
76/// while tracking average load of each domain and duty cycle of each task.
77///
78/// The userspace part performs two roles. First, it makes higher frequency
79/// (100ms) tuning decisions. It identifies CPUs which are not too heavily
80/// loaded and marks them so that they can pull tasks from other overloaded
81/// domains on the fly.
82///
83/// Second, it drives lower frequency (2s) load balancing. It determines
84/// whether load balancing is necessary by comparing domain load averages.
85/// If there are large enough load differences, it examines upto 1024
86/// recently active tasks on the domain to determine which should be
87/// migrated.
88///
89/// The overhead of userspace operations is low. Load balancing is not
90/// performed frequently, but work-conservation is still maintained through
91/// tuning and greedy execution. Load balancing itself is not that expensive
92/// either. It only accesses per-domain load metrics to determine the domains
93/// that need load balancing, as well as limited number of per-task metrics
94/// for each pushing domain.
95///
96/// An earlier variant of this scheduler was used to balance across six
97/// domains, each representing a chiplet in a six-chiplet AMD processor, and
98/// could match the performance of production setup using CFS.
99///
100/// WARNING: scx_rusty currently assumes that all domains have equal
101/// processing power and at similar distances from each other. This
102/// limitation will be removed in the future.
103#[derive(Debug, Parser)]
104struct Opts {
105    /// Scheduling slice duration for under-utilized hosts, in microseconds.
106    #[clap(short = 'u', long, default_value = "20000")]
107    slice_us_underutil: u64,
108
109    /// Scheduling slice duration for over-utilized hosts, in microseconds.
110    #[clap(short = 'o', long, default_value = "1000")]
111    slice_us_overutil: u64,
112
113    /// Load balance interval in seconds.
114    #[clap(short = 'i', long, default_value = "2.0")]
115    interval: f64,
116
117    /// The tuner runs at a higher frequency than the load balancer to dynamically
118    /// tune scheduling behavior. Tuning interval in seconds.
119    #[clap(short = 'I', long, default_value = "0.1")]
120    tune_interval: f64,
121
122    /// The half-life of task and domain load running averages in seconds.
123    #[clap(short = 'l', long, default_value = "1.0")]
124    load_half_life: f64,
125
126    /// Build domains according to how CPUs are grouped at this cache level
127    /// as determined by /sys/devices/system/cpu/cpuX/cache/indexI/id.
128    #[clap(short = 'c', long, default_value = "3")]
129    cache_level: u32,
130
131    /// When non-zero, enable greedy task stealing. When a domain is idle, a cpu
132    /// will attempt to steal tasks from another domain as follows:
133    ///
134    /// 1. Try to consume a task from the current domain
135    /// 2. Try to consume a task from another domain in the current NUMA node
136    ///    (or globally, if running on a single-socket system), if the domain
137    ///    has at least this specified number of tasks enqueued.
138    ///
139    /// See greedy_threshold_x_numa to enable task stealing across NUMA nodes.
140    /// Tasks stolen in this manner are not permanently stolen from their
141    /// domain.
142    #[clap(short = 'g', long, default_value = "1")]
143    greedy_threshold: u32,
144
145    /// When non-zero, enable greedy task stealing across NUMA nodes. The order
146    /// of greedy task stealing follows greedy-threshold as described above, and
147    /// greedy-threshold must be nonzero to enable task stealing across NUMA
148    /// nodes.
149    #[clap(long, default_value = "0")]
150    greedy_threshold_x_numa: u32,
151
152    /// Disable load balancing. Unless disabled, userspace will periodically calculate
153    /// the load factor of each domain and instruct BPF which processes to move.
154    #[clap(long, action = clap::ArgAction::SetTrue)]
155    no_load_balance: bool,
156
157    /// Put per-cpu kthreads directly into local dsq's.
158    #[clap(short = 'k', long, action = clap::ArgAction::SetTrue)]
159    kthreads_local: bool,
160
161    /// In recent kernels (>=v6.6), the kernel is responsible for balancing
162    /// kworkers across L3 cache domains. Exclude them from load-balancing
163    /// to avoid conflicting operations. Greedy executions still apply.
164    #[clap(short = 'b', long, action = clap::ArgAction::SetTrue)]
165    balanced_kworkers: bool,
166
167    /// Use FIFO scheduling instead of weighted vtime scheduling.
168    #[clap(short = 'f', long, action = clap::ArgAction::SetTrue)]
169    fifo_sched: bool,
170
171    /// Idle CPUs with utilization lower than this will get remote tasks
172    /// directly pushed onto them. 0 disables, 100 always enables.
173    #[clap(short = 'D', long, default_value = "90.0")]
174    direct_greedy_under: f64,
175
176    /// Idle CPUs with utilization lower than this may get kicked to
177    /// accelerate stealing when a task is queued on a saturated remote
178    /// domain. 0 disables, 100 enables always.
179    #[clap(short = 'K', long, default_value = "100.0")]
180    kick_greedy_under: f64,
181
182    /// Whether tasks can be pushed directly to idle CPUs on NUMA nodes
183    /// different than their domain's node. If direct-greedy-under is disabled,
184    /// this option is a no-op. Otherwise, if this option is set to false
185    /// (default), tasks will only be directly pushed to idle CPUs if they
186    /// reside on the same NUMA node as the task's domain.
187    #[clap(short = 'r', long, action = clap::ArgAction::SetTrue)]
188    direct_greedy_numa: bool,
189
190    /// If specified, only tasks which have their scheduling policy set to
191    /// SCHED_EXT using sched_setscheduler(2) are switched. Otherwise, all
192    /// tasks are switched.
193    #[clap(short = 'p', long, action = clap::ArgAction::SetTrue)]
194    partial: bool,
195
196    /// Enables soft NUMA affinity for tasks that use set_mempolicy. This
197    /// may improve performance in some scenarios when using mempolicies.
198    #[clap(long, action = clap::ArgAction::SetTrue)]
199    mempolicy_affinity: bool,
200
201    /// Enable stats monitoring with the specified interval.
202    #[clap(long)]
203    stats: Option<f64>,
204
205    /// Run in stats monitoring mode with the specified interval. The scheduler
206    /// is not launched.
207    #[clap(long)]
208    monitor: Option<f64>,
209
210    /// Exit debug dump buffer length. 0 indicates default.
211    #[clap(long, default_value = "0")]
212    exit_dump_len: u32,
213
214    /// Enable verbose output, including libbpf details. Specify multiple
215    /// times to increase verbosity.
216    #[clap(short = 'v', long, action = clap::ArgAction::Count)]
217    verbose: u8,
218
219    /// Print version and exit.
220    #[clap(long)]
221    version: bool,
222
223    /// Show descriptions for statistics.
224    #[clap(long)]
225    help_stats: bool,
226
227    /// Tunable for prioritizing CPU performance by configuring the CPU frequency governor.
228    /// Valid values are [0, 1024]. Higher values prioritize performance, lower values
229    /// prioritize energy efficiency. When in doubt, use 0 or 1024.
230    #[clap(long, default_value = "0")]
231    perf: u32,
232
233    #[clap(flatten, next_help_heading = "Libbpf Options")]
234    pub libbpf: LibbpfOpts,
235}
236
237fn read_cpu_busy_and_total(reader: &procfs::ProcReader) -> Result<(u64, u64)> {
238    let cs = reader
239        .read_stat()
240        .context("Failed to read procfs")?
241        .total_cpu
242        .ok_or_else(|| anyhow!("Could not read total cpu stat in proc"))?;
243
244    Ok(match cs {
245        procfs::CpuStat {
246            user_usec: Some(user),
247            nice_usec: Some(nice),
248            system_usec: Some(system),
249            idle_usec: Some(idle),
250            iowait_usec: Some(iowait),
251            irq_usec: Some(irq),
252            softirq_usec: Some(softirq),
253            stolen_usec: Some(stolen),
254            guest_usec: _,
255            guest_nice_usec: _,
256        } => {
257            let busy = user + system + nice + irq + softirq + stolen;
258            let total = busy + idle + iowait;
259            (busy, total)
260        }
261        _ => bail!("Some procfs stats are not populated!"),
262    })
263}
264
265pub fn sub_or_zero(curr: &u64, prev: &u64) -> u64 {
266    curr.checked_sub(*prev).unwrap_or(0u64)
267}
268
269pub fn update_bpf_mask(bpfptr: *mut types::scx_bitmap, cpumask: &Cpumask) -> Result<()> {
270    let bpfmask = unsafe { &mut *bpfptr };
271
272    unsafe { cpumask.write_to_ptr(&raw mut bpfmask.bits as *mut u64, MASK_LEN)? };
273
274    Ok(())
275}
276
277#[derive(Clone, Debug)]
278struct StatsCtx {
279    cpu_busy: u64,
280    cpu_total: u64,
281    bpf_stats: Vec<u64>,
282    time_used: Duration,
283}
284
285impl StatsCtx {
286    fn read_bpf_stats(skel: &BpfSkel) -> Result<Vec<u64>> {
287        let stats_map = &skel.maps.stats;
288        let mut stats: Vec<u64> = Vec::new();
289
290        for stat in 0..bpf_intf::stat_idx_RUSTY_NR_STATS {
291            let cpu_stat_vec = stats_map
292                .lookup_percpu(&stat.to_ne_bytes(), libbpf_rs::MapFlags::ANY)
293                .with_context(|| format!("Failed to lookup stat {}", stat))?
294                .expect("per-cpu stat should exist");
295            let sum = cpu_stat_vec
296                .iter()
297                .map(|val| {
298                    u64::from_ne_bytes(
299                        val.as_slice()
300                            .try_into()
301                            .expect("Invalid value length in stat map"),
302                    )
303                })
304                .sum();
305            stats.push(sum);
306        }
307        Ok(stats)
308    }
309
310    fn blank() -> Self {
311        Self {
312            cpu_busy: 0,
313            cpu_total: 0,
314            bpf_stats: vec![0u64; bpf_intf::stat_idx_RUSTY_NR_STATS as usize],
315            time_used: Duration::default(),
316        }
317    }
318
319    fn new(skel: &BpfSkel, proc_reader: &procfs::ProcReader, time_used: Duration) -> Result<Self> {
320        let (cpu_busy, cpu_total) = read_cpu_busy_and_total(proc_reader)?;
321
322        Ok(Self {
323            cpu_busy,
324            cpu_total,
325            bpf_stats: Self::read_bpf_stats(skel)?,
326            time_used,
327        })
328    }
329
330    fn delta(&self, rhs: &Self) -> Self {
331        Self {
332            cpu_busy: sub_or_zero(&self.cpu_busy, &rhs.cpu_busy),
333            cpu_total: sub_or_zero(&self.cpu_total, &rhs.cpu_total),
334            bpf_stats: self
335                .bpf_stats
336                .iter()
337                .zip(rhs.bpf_stats.iter())
338                .map(|(lhs, rhs)| sub_or_zero(&lhs, &rhs))
339                .collect(),
340            time_used: self.time_used - rhs.time_used,
341        }
342    }
343}
344
345struct Scheduler<'a> {
346    skel: BpfSkel<'a>,
347    struct_ops: Option<libbpf_rs::Link>,
348
349    sched_interval: Duration,
350    tune_interval: Duration,
351    balance_load: bool,
352    balanced_kworkers: bool,
353
354    dom_group: Arc<DomainGroup>,
355
356    proc_reader: procfs::ProcReader,
357
358    lb_at: SystemTime,
359    lb_stats: BTreeMap<usize, NodeStats>,
360    time_used: Duration,
361
362    tuner: Tuner,
363    stats_server: StatsServer<StatsCtx, (StatsCtx, ClusterStats)>,
364}
365
366impl<'a> Scheduler<'a> {
367    fn setup_wd40(skel: &mut BpfSkel<'a>) -> Result<()> {
368        // Allocate the arena memory from the BPF side so userspace initializes it before starting
369        // the scheduler. Despite the function call's name this is neither a test nor a test run,
370        // it's the recommended way of executing SEC("syscall") probes.
371        let input = ProgramInput {
372            ..Default::default()
373        };
374        let output = skel.progs.wd40_setup.test_run(input)?;
375        if output.return_value != 0 {
376            bail!(
377                "Could not initialize WD40 arenas, wd40_arena_setup returned {}",
378                output.return_value as i32
379            );
380        }
381
382        Ok(())
383    }
384
385    fn setup_arenas(skel: &mut BpfSkel<'a>) -> Result<()> {
386        let task_size = std::mem::size_of::<types::task_ctx>();
387        let arenalib = ArenaLib::init(skel.object_mut(), task_size, *NR_CPU_IDS)?;
388        arenalib.setup()?;
389
390        Self::setup_wd40(skel)?;
391
392        Ok(())
393    }
394
395    fn init(opts: &Opts, open_object: &'a mut MaybeUninit<OpenObject>) -> Result<Self> {
396        // Open the BPF prog first for verification.
397        let mut skel_builder = BpfSkelBuilder::default();
398        skel_builder.obj_builder.debug(opts.verbose > 0);
399        init_libbpf_logging(None);
400        info!(
401            "Running scx_wd40 (build ID: {})",
402            build_id::full_version(env!("CARGO_PKG_VERSION"))
403        );
404        let open_opts = opts.libbpf.clone().into_bpf_open_opts();
405        let mut skel = scx_ops_open!(skel_builder, open_object, wd40, open_opts).unwrap();
406
407        // Initialize skel according to @opts.
408        let domains = Arc::new(DomainGroup::new(&Topology::new()?)?);
409
410        if *NR_CPU_IDS > MAX_CPUS {
411            bail!(
412                "Num possible CPU IDs ({}) exceeds maximum of ({})",
413                *NR_CPU_IDS,
414                MAX_CPUS
415            );
416        }
417
418        if domains.nr_doms() > MAX_DOMS {
419            bail!(
420                "nr_doms ({}) is greater than MAX_DOMS ({})",
421                domains.nr_doms(),
422                MAX_DOMS
423            );
424        }
425
426        skel.maps.bss_data.as_mut().unwrap().slice_ns = scx_enums.SCX_SLICE_DFL;
427
428        let rodata = skel.maps.rodata_data.as_mut().unwrap();
429        rodata.nr_nodes = domains.nr_nodes() as u32;
430        rodata.nr_doms = domains.nr_doms() as u32;
431        rodata.nr_cpu_ids = *NR_CPU_IDS as u32;
432
433        if opts.partial {
434            skel.struct_ops.wd40_mut().flags |= *compat::SCX_OPS_SWITCH_PARTIAL;
435        }
436        skel.struct_ops.wd40_mut().exit_dump_len = opts.exit_dump_len;
437
438        rodata.load_half_life = (opts.load_half_life * 1000000000.0) as u32;
439        rodata.kthreads_local = opts.kthreads_local;
440        rodata.fifo_sched = opts.fifo_sched;
441        rodata.greedy_threshold = opts.greedy_threshold;
442        rodata.greedy_threshold_x_numa = opts.greedy_threshold_x_numa;
443        rodata.direct_greedy_numa = opts.direct_greedy_numa;
444        rodata.mempolicy_affinity = opts.mempolicy_affinity;
445        rodata.debug = opts.verbose as u32;
446        rodata.wd40_perf_mode = opts.perf;
447
448        let mut skel = scx_ops_load!(skel, wd40, uei)?;
449
450        Self::setup_arenas(&mut skel)?;
451
452        let bss_data = skel.maps.bss_data.as_mut().unwrap();
453        info!(
454            "Mask length {}, number of possible CPUs {}",
455            bss_data.mask_size,
456            skel.maps.rodata_data.as_mut().unwrap().nr_cpu_ids
457        );
458        // Read the mask length chosen by BPF. We count elements in the u64 array, like the BPF
459        // program does.
460        //
461        // This invocation is safe because there is no concurrency in the program during initialization.
462        unsafe { MASK_LEN = bss_data.mask_size as usize };
463
464        let types::topo_level(index) = types::topo_level::TOPO_LLC;
465
466        for numa in 0..domains.nr_nodes() {
467            let mut numa_mask = Cpumask::new();
468            let node_domains = domains.numa_doms(&numa);
469            for dom in node_domains.iter() {
470                let dom_mask = dom.mask();
471                numa_mask = numa_mask.or(&dom_mask);
472            }
473
474            update_bpf_mask(bss_data.node_data[numa], &numa_mask)?;
475            info!("NODE[{:02}] mask= {}", numa, numa_mask);
476
477            for dom in node_domains.iter() {
478                // XXX Remove this by using the topo node's cpumask.
479                let ptr = bss_data.topo_nodes[index as usize][dom.id()];
480                let domc = unsafe { std::mem::transmute::<u64, &mut types::dom_ctx>(ptr) };
481                update_bpf_mask(domc.cpumask, &dom.mask())?;
482
483                bss_data.dom_numa_id_map[dom.id()] =
484                    numa.try_into().expect("NUMA ID could not fit into 32 bits");
485
486                info!(" DOM[{:02}] mask= {}", dom.id(), dom.mask());
487            }
488        }
489
490        // Actually get the scheduler starting.
491        let struct_ops = Some(scx_ops_attach!(skel, wd40)?);
492        let stats_server = StatsServer::new(stats::server_data()).launch()?;
493
494        for (id, dom) in domains.doms().iter() {
495            let mut ctx = dom.ctx.lock().unwrap();
496
497            let ptr = skel.maps.bss_data.as_mut().unwrap().topo_nodes[index as usize][*id];
498            let domc = unsafe { std::mem::transmute::<u64, &mut types::dom_ctx>(ptr) };
499            *ctx = Some(domc);
500        }
501
502        info!("WD40 scheduler started! Run `scx_wd40 --monitor` for metrics.");
503
504        // Other stuff.
505        let proc_reader = procfs::ProcReader::new();
506
507        Ok(Self {
508            skel,
509            struct_ops, // should be held to keep it attached
510
511            sched_interval: Duration::from_secs_f64(opts.interval),
512            tune_interval: Duration::from_secs_f64(opts.tune_interval),
513            balance_load: !opts.no_load_balance,
514            balanced_kworkers: opts.balanced_kworkers,
515
516            dom_group: domains.clone(),
517            proc_reader,
518
519            lb_at: SystemTime::now(),
520            lb_stats: BTreeMap::new(),
521            time_used: Duration::default(),
522
523            tuner: Tuner::new(
524                domains,
525                opts.direct_greedy_under,
526                opts.kick_greedy_under,
527                opts.slice_us_underutil * 1000,
528                opts.slice_us_overutil * 1000,
529            )?,
530            stats_server,
531        })
532    }
533
534    fn cluster_stats(&self, sc: &StatsCtx, node_stats: BTreeMap<usize, NodeStats>) -> ClusterStats {
535        let stat = |idx| sc.bpf_stats[idx as usize];
536        let total = stat(bpf_intf::stat_idx_RUSTY_STAT_WAKE_SYNC)
537            + stat(bpf_intf::stat_idx_RUSTY_STAT_SYNC_PREV_IDLE)
538            + stat(bpf_intf::stat_idx_RUSTY_STAT_PREV_IDLE)
539            + stat(bpf_intf::stat_idx_RUSTY_STAT_GREEDY_IDLE)
540            + stat(bpf_intf::stat_idx_RUSTY_STAT_PINNED)
541            + stat(bpf_intf::stat_idx_RUSTY_STAT_DIRECT_DISPATCH)
542            + stat(bpf_intf::stat_idx_RUSTY_STAT_DIRECT_GREEDY)
543            + stat(bpf_intf::stat_idx_RUSTY_STAT_DIRECT_GREEDY_FAR)
544            + stat(bpf_intf::stat_idx_RUSTY_STAT_DSQ_DISPATCH)
545            + stat(bpf_intf::stat_idx_RUSTY_STAT_GREEDY_LOCAL)
546            + stat(bpf_intf::stat_idx_RUSTY_STAT_GREEDY_XNUMA);
547        let stat_pct = |idx| stat(idx) as f64 / total as f64 * 100.0;
548
549        let cpu_busy = if sc.cpu_total != 0 {
550            (sc.cpu_busy as f64 / sc.cpu_total as f64) * 100.0
551        } else {
552            0.0
553        };
554
555        ClusterStats {
556            at_us: SystemTime::now()
557                .duration_since(UNIX_EPOCH)
558                .unwrap()
559                .as_micros()
560                .try_into()
561                .unwrap(),
562            lb_at_us: self
563                .lb_at
564                .duration_since(UNIX_EPOCH)
565                .unwrap()
566                .as_micros()
567                .try_into()
568                .unwrap(),
569            total,
570            slice_us: self.tuner.slice_ns / 1000,
571
572            cpu_busy,
573            load: node_stats.iter().map(|(_k, v)| v.load).sum::<f64>(),
574            nr_migrations: sc.bpf_stats[bpf_intf::stat_idx_RUSTY_STAT_LOAD_BALANCE as usize],
575
576            task_get_err: sc.bpf_stats[bpf_intf::stat_idx_RUSTY_STAT_TASK_GET_ERR as usize],
577            time_used: sc.time_used.as_secs_f64(),
578
579            sync_prev_idle: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_SYNC_PREV_IDLE),
580            wake_sync: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_WAKE_SYNC),
581            prev_idle: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_PREV_IDLE),
582            greedy_idle: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_GREEDY_IDLE),
583            pinned: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_PINNED),
584            direct: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_DIRECT_DISPATCH),
585            greedy: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_DIRECT_GREEDY),
586            greedy_far: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_DIRECT_GREEDY_FAR),
587            dsq_dispatch: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_DSQ_DISPATCH),
588            greedy_local: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_GREEDY_LOCAL),
589            greedy_xnuma: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_GREEDY_XNUMA),
590            kick_greedy: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_KICK_GREEDY),
591            repatriate: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_REPATRIATE),
592            dl_clamp: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_DL_CLAMP),
593            dl_preset: stat_pct(bpf_intf::stat_idx_RUSTY_STAT_DL_PRESET),
594
595            direct_greedy_cpus: self.tuner.direct_greedy_mask.as_raw_slice().to_owned(),
596            kick_greedy_cpus: self.tuner.kick_greedy_mask.as_raw_slice().to_owned(),
597
598            nodes: node_stats,
599        }
600    }
601
602    fn lb_step(&mut self) -> Result<()> {
603        let mut lb = LoadBalancer::new(
604            &mut self.skel,
605            self.dom_group.clone(),
606            self.balanced_kworkers,
607            self.tuner.fully_utilized,
608            self.balance_load,
609        );
610
611        lb.load_balance()?;
612
613        self.lb_at = SystemTime::now();
614        self.lb_stats = lb.get_stats();
615        Ok(())
616    }
617
618    fn run(&mut self, shutdown: Arc<AtomicBool>) -> Result<UserExitInfo> {
619        let (res_ch, req_ch) = self.stats_server.channels();
620        let now = Instant::now();
621        let mut next_tune_at = now + self.tune_interval;
622        let mut next_sched_at = now + self.sched_interval;
623
624        self.skel.maps.stats.value_size() as usize;
625
626        while !shutdown.load(Ordering::Relaxed) && !uei_exited!(&self.skel, uei) {
627            let now = Instant::now();
628
629            if now >= next_tune_at {
630                self.tuner.step(&mut self.skel)?;
631                next_tune_at += self.tune_interval;
632                if next_tune_at < now {
633                    next_tune_at = now + self.tune_interval;
634                }
635            }
636
637            if now >= next_sched_at {
638                self.lb_step()?;
639                next_sched_at += self.sched_interval;
640                if next_sched_at < now {
641                    next_sched_at = now + self.sched_interval;
642                }
643            }
644
645            self.time_used += Instant::now().duration_since(now);
646
647            match req_ch.recv_deadline(next_sched_at.min(next_tune_at)) {
648                Ok(prev_sc) => {
649                    let cur_sc = StatsCtx::new(&self.skel, &self.proc_reader, self.time_used)?;
650                    let delta_sc = cur_sc.delta(&prev_sc);
651                    let cstats = self.cluster_stats(&delta_sc, self.lb_stats.clone());
652                    res_ch.send((cur_sc, cstats))?;
653                }
654                Err(RecvTimeoutError::Timeout) => {}
655                Err(e) => Err(e)?,
656            }
657        }
658
659        let _ = self.struct_ops.take();
660        uei_report!(&self.skel, uei)
661    }
662}
663
664impl Drop for Scheduler<'_> {
665    fn drop(&mut self) {
666        info!("Unregister {SCHEDULER_NAME} scheduler");
667
668        if let Some(struct_ops) = self.struct_ops.take() {
669            drop(struct_ops);
670        }
671    }
672}
673
674fn main() -> Result<()> {
675    let opts = Opts::parse();
676
677    if opts.version {
678        println!(
679            "scx_wd40: {}",
680            build_id::full_version(env!("CARGO_PKG_VERSION"))
681        );
682        return Ok(());
683    }
684
685    if opts.help_stats {
686        stats::server_data().describe_meta(&mut std::io::stdout(), None)?;
687        return Ok(());
688    }
689
690    let llv = match opts.verbose {
691        0 => simplelog::LevelFilter::Info,
692        1 => simplelog::LevelFilter::Debug,
693        _ => simplelog::LevelFilter::Trace,
694    };
695    let mut lcfg = simplelog::ConfigBuilder::new();
696    lcfg.set_time_offset_to_local()
697        .expect("Failed to set local time offset")
698        .set_time_level(simplelog::LevelFilter::Error)
699        .set_location_level(simplelog::LevelFilter::Off)
700        .set_target_level(simplelog::LevelFilter::Off)
701        .set_thread_level(simplelog::LevelFilter::Off);
702    simplelog::TermLogger::init(
703        llv,
704        lcfg.build(),
705        simplelog::TerminalMode::Stderr,
706        simplelog::ColorChoice::Auto,
707    )?;
708
709    let shutdown = Arc::new(AtomicBool::new(false));
710    let shutdown_clone = shutdown.clone();
711    ctrlc::set_handler(move || {
712        shutdown_clone.store(true, Ordering::Relaxed);
713    })
714    .context("Error setting Ctrl-C handler")?;
715
716    if let Some(intv) = opts.monitor.or(opts.stats) {
717        let shutdown_copy = shutdown.clone();
718        let jh = std::thread::spawn(move || {
719            stats::monitor(Duration::from_secs_f64(intv), shutdown_copy).unwrap()
720        });
721        if opts.monitor.is_some() {
722            let _ = jh.join();
723            return Ok(());
724        }
725    }
726
727    let mut open_object = MaybeUninit::uninit();
728    loop {
729        let mut sched = Scheduler::init(&opts, &mut open_object)?;
730        if !sched.run(shutdown.clone())?.should_restart() {
731            break;
732        }
733    }
734    Ok(())
735}