1use std::io::Write;
7use std::sync::atomic::AtomicBool;
8use std::sync::atomic::Ordering;
9use std::sync::Arc;
10use std::time::Duration;
11
12use anyhow::Result;
13use scx_stats::prelude::*;
14use scx_stats_derive::stat_doc;
15use scx_stats_derive::Stats;
16use serde::Deserialize;
17use serde::Serialize;
18
19#[stat_doc]
20#[derive(Clone, Debug, Default, Serialize, Deserialize, Stats)]
21#[stat(top)]
22pub struct Metrics {
23 #[stat(desc = "Number of running tasks")]
24 pub nr_running: u64,
25 #[stat(desc = "Total CPU runtime in ns")]
26 pub total_runtime: u64,
27 #[stat(desc = "Tasks dispatched from the reserved positive-budget DSQ")]
28 pub reserved_dispatches: u64,
29 #[stat(desc = "Tasks dispatched from the urgent latency lane DSQ")]
30 pub urgent_latency_dispatches: u64,
31 #[stat(desc = "Urgent-latency dispatches granted through the bounded burst path")]
32 pub urgent_latency_burst_grants: u64,
33 #[stat(desc = "Urgent-latency dispatches that continued an already-active burst window")]
34 pub urgent_latency_burst_continuations: u64,
35 #[stat(desc = "Tasks dispatched from the dedicated latency lane DSQ")]
36 pub latency_dispatches: u64,
37 #[stat(desc = "Tasks dispatched from the shared DSQ")]
38 pub shared_dispatches: u64,
39 #[stat(desc = "Tasks dispatched from the contained throughput/background DSQ")]
40 pub contained_dispatches: u64,
41 #[stat(desc = "Tasks fast-dispatched to local DSQs")]
42 pub local_fast_dispatches: u64,
43 #[stat(desc = "Positive-budget wakeups sent to local DSQs with preempt kicks")]
44 pub wake_preempt_dispatches: u64,
45 #[stat(desc = "Wakeups that refilled task budget")]
46 pub budget_refill_events: u64,
47 #[stat(desc = "Times a task ran its budget down to zero or below")]
48 pub budget_exhaustions: u64,
49 #[stat(desc = "Wakeups that still had positive budget at enqueue time")]
50 pub positive_budget_wakeups: u64,
51 #[stat(desc = "Latency-debt wakeups inserted into the urgent latency lane")]
52 pub urgent_latency_enqueues: u64,
53 #[stat(desc = "Interactive wakeups inserted into the dedicated latency lane")]
54 pub latency_lane_enqueues: u64,
55 #[stat(desc = "Soft latency-lane candidates observed before final routing decisions")]
56 pub latency_lane_candidates: u64,
57 #[stat(desc = "Latency-lane candidates that were consumed by the local reserved fast path")]
58 pub latency_candidate_local_enqueues: u64,
59 #[stat(desc = "Soft latency-lane candidates blocked because they were already contained hogs")]
60 pub latency_candidate_hog_blocks: u64,
61 #[stat(desc = "Interactive budget exhaustions that raised per-task latency debt")]
62 pub latency_debt_raises: u64,
63 #[stat(
64 desc = "Times latency debt decayed after the scheduler gave a debt-bearing task latency service"
65 )]
66 pub latency_debt_decays: u64,
67 #[stat(
68 desc = "Latency-lane enqueues driven by accumulated latency debt rather than fresh wake credit alone"
69 )]
70 pub latency_debt_urgent_enqueues: u64,
71 #[stat(
72 desc = "Urgent-debt wakeups that still missed the urgent lane and fell back to ordinary routing"
73 )]
74 pub urgent_latency_misses: u64,
75 #[stat(desc = "Positive-budget tasks inserted directly into selected local DSQs")]
76 pub reserved_local_enqueues: u64,
77 #[stat(desc = "Positive-budget tasks enqueued to the reserved global DSQ")]
78 pub reserved_global_enqueues: u64,
79 #[stat(desc = "Wakeups that fell back to the shared DSQ")]
80 pub shared_wakeup_enqueues: u64,
81 #[stat(desc = "Runnable wakeups observed before enqueue/select_cpu decisions")]
82 pub runnable_wakeups: u64,
83 #[stat(desc = "Local DSQ tasks rescued during cpu_release")]
84 pub cpu_release_reenqueues: u64,
85 #[stat(
86 desc = "Maximum current consecutive urgent-latency dispatches within the bounded burst window across CPUs"
87 )]
88 pub urgent_latency_burst_rounds: u64,
89 #[stat(
90 desc = "Maximum current consecutive high-priority dispatches since shared or contained service last ran across CPUs"
91 )]
92 pub high_priority_burst_rounds: u64,
93 #[stat(
94 desc = "Maximum current consecutive ordinary local-reserved fast-path enqueues under pressure across CPUs"
95 )]
96 pub local_reserved_burst_rounds: u64,
97 #[stat(
98 desc = "Ordinary local-reserved fast-path grants that counted toward the local burst window"
99 )]
100 pub local_reserved_fast_grants: u64,
101 #[stat(
102 desc = "Ordinary local-reserved fast-path grants that continued an already-active local burst window"
103 )]
104 pub local_reserved_burst_continuations: u64,
105 #[stat(desc = "Ordinary local-reserved fast-path enqueues skipped by the local burst cap")]
106 pub local_quota_skips: u64,
107 #[stat(
108 desc = "Dispatch rounds where a lower-lane quota check skipped the normal reserved/high-priority order"
109 )]
110 pub reserved_quota_skips: u64,
111 #[stat(desc = "Shared-lane dispatches forced early by the bounded quota")]
112 pub quota_shared_forces: u64,
113 #[stat(desc = "Contained-lane dispatches forced early by the bounded quota")]
114 pub quota_contained_forces: u64,
115 #[stat(desc = "Tasks initialized through init_task task storage setup")]
116 pub init_task_events: u64,
117 #[stat(desc = "Tasks explicitly initialized on entry into scx_flow")]
118 pub enable_events: u64,
119 #[stat(desc = "Tasks explicitly cleaned up on exit from scx_flow")]
120 pub exit_task_events: u64,
121 #[stat(desc = "Wakeups where select_cpu() biased toward the task's last CPU")]
122 pub cpu_stability_biases: u64,
123 #[stat(desc = "Wakeups where the chosen target CPU matched the task's last CPU")]
124 pub last_cpu_matches: u64,
125 #[stat(desc = "Observed task migrations between successive runs")]
126 pub cpu_migrations: u64,
127 #[stat(desc = "Pinned positive-budget wakeups classified into the RT-sensitive lane")]
128 pub rt_sensitive_wakeups: u64,
129 #[stat(desc = "RT-sensitive wakeups inserted directly into selected local DSQs")]
130 pub rt_sensitive_local_enqueues: u64,
131 #[stat(desc = "RT-sensitive wakeups that used the preempt path")]
132 pub rt_sensitive_preempts: u64,
133 #[stat(
134 desc = "Maximum current consecutive dispatches from the reserved global DSQ across CPUs"
135 )]
136 pub reserved_lane_burst_rounds: u64,
137 #[stat(
138 desc = "Dispatches granted from the reserved global DSQ while reserved-lane shaping was active"
139 )]
140 pub reserved_lane_grants: u64,
141 #[stat(
142 desc = "Reserved global dispatches that continued an already-active reserved-lane burst window"
143 )]
144 pub reserved_lane_burst_continuations: u64,
145 #[stat(
146 desc = "Reserved global dispatches skipped because the reserved-lane burst cap engaged"
147 )]
148 pub reserved_lane_skips: u64,
149 #[stat(desc = "Shared-lane dispatches forced by the reserved-lane burst cap")]
150 pub reserved_lane_shared_forces: u64,
151 #[stat(desc = "Contained-lane dispatches forced by the reserved-lane burst cap")]
152 pub reserved_lane_contained_forces: u64,
153 #[stat(
154 desc = "Reserved-lane cap attempts that wanted shared service but found no immediately dispatchable shared work"
155 )]
156 pub reserved_lane_shared_misses: u64,
157 #[stat(
158 desc = "Reserved-lane cap attempts that wanted contained service but found no immediately dispatchable contained work"
159 )]
160 pub reserved_lane_contained_misses: u64,
161 #[stat(
162 desc = "Contained-lane enqueues promoted to the head because the contained lane was already meaningfully starved"
163 )]
164 pub contained_starved_head_enqueues: u64,
165 #[stat(
166 desc = "Shared-lane enqueues promoted to the head because the shared lane was already meaningfully starved"
167 )]
168 pub shared_starved_head_enqueues: u64,
169 #[stat(
170 desc = "Direct-local wakeups that were eligible for the bounded front-door before final fast-path checks"
171 )]
172 pub direct_local_candidates: u64,
173 #[stat(
174 desc = "Positive-budget wakeups routed through the bounded direct-local front-door without using the RT-sensitive path"
175 )]
176 pub direct_local_enqueues: u64,
177 #[stat(
178 desc = "Direct-local candidates that lost the front-door and decayed back toward ordinary routing"
179 )]
180 pub direct_local_rejections: u64,
181 #[stat(
182 desc = "Wakeups where the chosen target CPU did not match the remembered last CPU continuity hint"
183 )]
184 pub direct_local_mismatches: u64,
185 #[stat(desc = "Wakeups that qualified for the decayed IPC-confidence path")]
186 pub ipc_wake_candidates: u64,
187 #[stat(desc = "IPC-confidence wakeups inserted directly into selected local DSQs")]
188 pub ipc_local_enqueues: u64,
189 #[stat(
190 desc = "Times the decayed IPC-confidence score strengthened after a short blocking run"
191 )]
192 pub ipc_score_raises: u64,
193 #[stat(desc = "Local slice boosts granted through the IPC-confidence path")]
194 pub ipc_boosts: u64,
195 #[stat(desc = "Tasks routed into the dedicated contained throughput/background DSQ")]
196 pub contained_enqueues: u64,
197 #[stat(desc = "Enqueues where a persistent hog-like task had latency privileges reduced")]
198 pub hog_containment_enqueues: u64,
199 #[stat(desc = "Times a previously contained hog-like task decayed back below containment")]
200 pub hog_recoveries: u64,
201 #[stat(
202 desc = "Maximum current consecutive dispatch rounds since a contained/background task last ran across CPUs"
203 )]
204 pub contained_starvation_rounds: u64,
205 #[stat(
206 desc = "Maximum current consecutive dispatch rounds since a shared-fallback task last ran across CPUs"
207 )]
208 pub shared_starvation_rounds: u64,
209 #[stat(desc = "Contained/background tasks rescued early by the fairness floor")]
210 pub contained_rescue_dispatches: u64,
211 #[stat(desc = "Shared-fallback tasks rescued early by the fairness floor")]
212 pub shared_rescue_dispatches: u64,
213 #[stat(desc = "Current latency-credit grant per strong interactive refill")]
214 pub tune_latency_credit_grant: u64,
215 #[stat(desc = "Current latency-credit decay applied when credit is consumed or exhausted")]
216 pub tune_latency_credit_decay: u64,
217 #[stat(
218 desc = "Current debt threshold required before a wakeup qualifies for the urgent latency lane"
219 )]
220 pub tune_latency_debt_urgent_min: u64,
221 #[stat(desc = "Current maximum consecutive urgent-latency dispatches allowed in one burst")]
222 pub tune_urgent_latency_burst_max: u64,
223 #[stat(
224 desc = "Current maximum consecutive high-priority dispatches before lower-lane quota checks engage"
225 )]
226 pub tune_reserved_quota_burst_max: u64,
227 #[stat(desc = "Current contained-lane fairness-floor threshold")]
228 pub tune_contained_starvation_max: u64,
229 #[stat(desc = "Current shared-lane fairness-floor threshold")]
230 pub tune_shared_starvation_max: u64,
231 #[stat(desc = "Current runnable-pressure cap for the ordinary local fast path")]
232 pub tune_local_fast_nr_running_max: u64,
233 #[stat(
234 desc = "Current maximum consecutive ordinary local-reserved fast-path enqueues allowed under pressure"
235 )]
236 pub tune_local_reserved_burst_max: u64,
237 #[stat(
238 desc = "Current maximum consecutive dispatches allowed from the reserved global DSQ before forcing a lower-lane rotation"
239 )]
240 pub tune_reserved_lane_burst_max: u64,
241 #[stat(desc = "Adaptive tuning generation counter")]
242 pub autotune_generation: u64,
243 #[stat(desc = "Adaptive tuning mode (0=balanced, 1=latency, 2=throughput)")]
244 pub autotune_mode: u64,
245 #[stat(desc = "Current reserved slice cap in ns")]
246 pub tune_reserved_max_ns: u64,
247 #[stat(desc = "Current shared slice in ns")]
248 pub tune_shared_slice_ns: u64,
249 #[stat(desc = "Current interactive wake refill floor in ns")]
250 pub tune_interactive_floor_ns: u64,
251 #[stat(desc = "Current preempt budget threshold in ns")]
252 pub tune_preempt_budget_min_ns: u64,
253 #[stat(desc = "Current preempt refill threshold in ns")]
254 pub tune_preempt_refill_min_ns: u64,
255}
256
257impl Metrics {
258 fn autotune_mode_name(&self) -> &'static str {
259 match self.autotune_mode {
260 1 => "latency",
261 2 => "throughput",
262 _ => "balanced",
263 }
264 }
265
266 fn format<W: Write>(&self, w: &mut W) -> Result<()> {
267 writeln!(
268 w,
269 "[{}] mode={} gen={} run={} urgent_lat_disp={} urgent_grant={} urgent_cont={} latency_disp={} reserve_disp={} contained_disp={} shared_disp={} local_fast={} wake_preempt={} refill={} exhaust={} pos_wake={} urgent_lat_enq={} latency_enq={} latency_cand={} latency_local={} latency_hog_block={} debt_raise={} debt_decay={} debt_urgent={} urgent_miss={} reserve_local={} reserve_global={} shared_wake={} runnable={} cpu_release={} urgent_burst={} high_prio_burst={} reserved_burst={} reserved_grant={} reserved_cont={} reserved_skip={} reserved_shared={} reserved_contained={} reserved_miss_shared={} reserved_miss_contained={} contained_head={} shared_head={} local_burst={} local_grant={} local_cont={} local_quota_skip={} quota_skip={} quota_shared={} quota_contained={} init_task={} enable={} exit_task={} cpu_bias={} last_cpu_hit={} migrations={} rt_wake={} rt_local={} rt_preempt={} direct_cand={} direct_local={} direct_reject={} direct_mismatch={} ipc_wake={} ipc_local={} ipc_raise={} ipc_boost={} contained_enq={} hog_contain={} hog_recover={} contained_starve={} shared_starve={} contained_rescue={} shared_rescue={} reserve_cap_us={} shared_slice_us={} refill_floor_us={} preempt_budget_us={} preempt_refill_us={} credit_grant={} credit_decay={} debt_min={} urgent_burst_max={} reserved_quota_max={} reserved_lane_max={} contained_floor={} shared_floor={} local_fast_cap={} local_burst_max={}",
270 crate::SCHEDULER_NAME,
271 self.autotune_mode_name(),
272 self.autotune_generation,
273 self.nr_running,
274 self.urgent_latency_dispatches,
275 self.urgent_latency_burst_grants,
276 self.urgent_latency_burst_continuations,
277 self.latency_dispatches,
278 self.reserved_dispatches,
279 self.contained_dispatches,
280 self.shared_dispatches,
281 self.local_fast_dispatches,
282 self.wake_preempt_dispatches,
283 self.budget_refill_events,
284 self.budget_exhaustions,
285 self.positive_budget_wakeups,
286 self.urgent_latency_enqueues,
287 self.latency_lane_enqueues,
288 self.latency_lane_candidates,
289 self.latency_candidate_local_enqueues,
290 self.latency_candidate_hog_blocks,
291 self.latency_debt_raises,
292 self.latency_debt_decays,
293 self.latency_debt_urgent_enqueues,
294 self.urgent_latency_misses,
295 self.reserved_local_enqueues,
296 self.reserved_global_enqueues,
297 self.shared_wakeup_enqueues,
298 self.runnable_wakeups,
299 self.cpu_release_reenqueues,
300 self.urgent_latency_burst_rounds,
301 self.high_priority_burst_rounds,
302 self.reserved_lane_burst_rounds,
303 self.reserved_lane_grants,
304 self.reserved_lane_burst_continuations,
305 self.reserved_lane_skips,
306 self.reserved_lane_shared_forces,
307 self.reserved_lane_contained_forces,
308 self.reserved_lane_shared_misses,
309 self.reserved_lane_contained_misses,
310 self.contained_starved_head_enqueues,
311 self.shared_starved_head_enqueues,
312 self.local_reserved_burst_rounds,
313 self.local_reserved_fast_grants,
314 self.local_reserved_burst_continuations,
315 self.local_quota_skips,
316 self.reserved_quota_skips,
317 self.quota_shared_forces,
318 self.quota_contained_forces,
319 self.init_task_events,
320 self.enable_events,
321 self.exit_task_events,
322 self.cpu_stability_biases,
323 self.last_cpu_matches,
324 self.cpu_migrations,
325 self.rt_sensitive_wakeups,
326 self.rt_sensitive_local_enqueues,
327 self.rt_sensitive_preempts,
328 self.direct_local_candidates,
329 self.direct_local_enqueues,
330 self.direct_local_rejections,
331 self.direct_local_mismatches,
332 self.ipc_wake_candidates,
333 self.ipc_local_enqueues,
334 self.ipc_score_raises,
335 self.ipc_boosts,
336 self.contained_enqueues,
337 self.hog_containment_enqueues,
338 self.hog_recoveries,
339 self.contained_starvation_rounds,
340 self.shared_starvation_rounds,
341 self.contained_rescue_dispatches,
342 self.shared_rescue_dispatches,
343 self.tune_reserved_max_ns / 1000,
344 self.tune_shared_slice_ns / 1000,
345 self.tune_interactive_floor_ns / 1000,
346 self.tune_preempt_budget_min_ns / 1000,
347 self.tune_preempt_refill_min_ns / 1000,
348 self.tune_latency_credit_grant,
349 self.tune_latency_credit_decay,
350 self.tune_latency_debt_urgent_min,
351 self.tune_urgent_latency_burst_max,
352 self.tune_reserved_quota_burst_max,
353 self.tune_reserved_lane_burst_max,
354 self.tune_contained_starvation_max,
355 self.tune_shared_starvation_max,
356 self.tune_local_fast_nr_running_max,
357 self.tune_local_reserved_burst_max,
358 )?;
359 Ok(())
360 }
361
362 pub fn delta(&self, rhs: &Self) -> Self {
363 Self {
364 nr_running: self.nr_running,
365 total_runtime: self.total_runtime.wrapping_sub(rhs.total_runtime),
366 reserved_dispatches: self
367 .reserved_dispatches
368 .wrapping_sub(rhs.reserved_dispatches),
369 urgent_latency_dispatches: self
370 .urgent_latency_dispatches
371 .wrapping_sub(rhs.urgent_latency_dispatches),
372 urgent_latency_burst_grants: self
373 .urgent_latency_burst_grants
374 .wrapping_sub(rhs.urgent_latency_burst_grants),
375 urgent_latency_burst_continuations: self
376 .urgent_latency_burst_continuations
377 .wrapping_sub(rhs.urgent_latency_burst_continuations),
378 latency_dispatches: self.latency_dispatches.wrapping_sub(rhs.latency_dispatches),
379 contained_dispatches: self
380 .contained_dispatches
381 .wrapping_sub(rhs.contained_dispatches),
382 shared_dispatches: self.shared_dispatches.wrapping_sub(rhs.shared_dispatches),
383 local_fast_dispatches: self
384 .local_fast_dispatches
385 .wrapping_sub(rhs.local_fast_dispatches),
386 wake_preempt_dispatches: self
387 .wake_preempt_dispatches
388 .wrapping_sub(rhs.wake_preempt_dispatches),
389 budget_refill_events: self
390 .budget_refill_events
391 .wrapping_sub(rhs.budget_refill_events),
392 budget_exhaustions: self.budget_exhaustions.wrapping_sub(rhs.budget_exhaustions),
393 positive_budget_wakeups: self
394 .positive_budget_wakeups
395 .wrapping_sub(rhs.positive_budget_wakeups),
396 urgent_latency_enqueues: self
397 .urgent_latency_enqueues
398 .wrapping_sub(rhs.urgent_latency_enqueues),
399 latency_lane_enqueues: self
400 .latency_lane_enqueues
401 .wrapping_sub(rhs.latency_lane_enqueues),
402 latency_lane_candidates: self
403 .latency_lane_candidates
404 .wrapping_sub(rhs.latency_lane_candidates),
405 latency_candidate_local_enqueues: self
406 .latency_candidate_local_enqueues
407 .wrapping_sub(rhs.latency_candidate_local_enqueues),
408 latency_candidate_hog_blocks: self
409 .latency_candidate_hog_blocks
410 .wrapping_sub(rhs.latency_candidate_hog_blocks),
411 latency_debt_raises: self
412 .latency_debt_raises
413 .wrapping_sub(rhs.latency_debt_raises),
414 latency_debt_decays: self
415 .latency_debt_decays
416 .wrapping_sub(rhs.latency_debt_decays),
417 latency_debt_urgent_enqueues: self
418 .latency_debt_urgent_enqueues
419 .wrapping_sub(rhs.latency_debt_urgent_enqueues),
420 urgent_latency_misses: self
421 .urgent_latency_misses
422 .wrapping_sub(rhs.urgent_latency_misses),
423 reserved_local_enqueues: self
424 .reserved_local_enqueues
425 .wrapping_sub(rhs.reserved_local_enqueues),
426 reserved_global_enqueues: self
427 .reserved_global_enqueues
428 .wrapping_sub(rhs.reserved_global_enqueues),
429 shared_wakeup_enqueues: self
430 .shared_wakeup_enqueues
431 .wrapping_sub(rhs.shared_wakeup_enqueues),
432 runnable_wakeups: self.runnable_wakeups.wrapping_sub(rhs.runnable_wakeups),
433 cpu_release_reenqueues: self
434 .cpu_release_reenqueues
435 .wrapping_sub(rhs.cpu_release_reenqueues),
436 urgent_latency_burst_rounds: self.urgent_latency_burst_rounds,
437 high_priority_burst_rounds: self.high_priority_burst_rounds,
438 local_reserved_burst_rounds: self.local_reserved_burst_rounds,
439 local_reserved_fast_grants: self
440 .local_reserved_fast_grants
441 .wrapping_sub(rhs.local_reserved_fast_grants),
442 local_reserved_burst_continuations: self
443 .local_reserved_burst_continuations
444 .wrapping_sub(rhs.local_reserved_burst_continuations),
445 local_quota_skips: self.local_quota_skips.wrapping_sub(rhs.local_quota_skips),
446 reserved_quota_skips: self
447 .reserved_quota_skips
448 .wrapping_sub(rhs.reserved_quota_skips),
449 quota_shared_forces: self
450 .quota_shared_forces
451 .wrapping_sub(rhs.quota_shared_forces),
452 quota_contained_forces: self
453 .quota_contained_forces
454 .wrapping_sub(rhs.quota_contained_forces),
455 init_task_events: self.init_task_events.wrapping_sub(rhs.init_task_events),
456 enable_events: self.enable_events.wrapping_sub(rhs.enable_events),
457 exit_task_events: self.exit_task_events.wrapping_sub(rhs.exit_task_events),
458 cpu_stability_biases: self
459 .cpu_stability_biases
460 .wrapping_sub(rhs.cpu_stability_biases),
461 last_cpu_matches: self.last_cpu_matches.wrapping_sub(rhs.last_cpu_matches),
462 cpu_migrations: self.cpu_migrations.wrapping_sub(rhs.cpu_migrations),
463 rt_sensitive_wakeups: self
464 .rt_sensitive_wakeups
465 .wrapping_sub(rhs.rt_sensitive_wakeups),
466 rt_sensitive_local_enqueues: self
467 .rt_sensitive_local_enqueues
468 .wrapping_sub(rhs.rt_sensitive_local_enqueues),
469 rt_sensitive_preempts: self
470 .rt_sensitive_preempts
471 .wrapping_sub(rhs.rt_sensitive_preempts),
472 reserved_lane_burst_rounds: self.reserved_lane_burst_rounds,
473 reserved_lane_grants: self
474 .reserved_lane_grants
475 .wrapping_sub(rhs.reserved_lane_grants),
476 reserved_lane_burst_continuations: self
477 .reserved_lane_burst_continuations
478 .wrapping_sub(rhs.reserved_lane_burst_continuations),
479 reserved_lane_skips: self
480 .reserved_lane_skips
481 .wrapping_sub(rhs.reserved_lane_skips),
482 reserved_lane_shared_forces: self
483 .reserved_lane_shared_forces
484 .wrapping_sub(rhs.reserved_lane_shared_forces),
485 reserved_lane_contained_forces: self
486 .reserved_lane_contained_forces
487 .wrapping_sub(rhs.reserved_lane_contained_forces),
488 reserved_lane_shared_misses: self
489 .reserved_lane_shared_misses
490 .wrapping_sub(rhs.reserved_lane_shared_misses),
491 reserved_lane_contained_misses: self
492 .reserved_lane_contained_misses
493 .wrapping_sub(rhs.reserved_lane_contained_misses),
494 contained_starved_head_enqueues: self
495 .contained_starved_head_enqueues
496 .wrapping_sub(rhs.contained_starved_head_enqueues),
497 shared_starved_head_enqueues: self
498 .shared_starved_head_enqueues
499 .wrapping_sub(rhs.shared_starved_head_enqueues),
500 direct_local_candidates: self
501 .direct_local_candidates
502 .wrapping_sub(rhs.direct_local_candidates),
503 direct_local_enqueues: self
504 .direct_local_enqueues
505 .wrapping_sub(rhs.direct_local_enqueues),
506 direct_local_rejections: self
507 .direct_local_rejections
508 .wrapping_sub(rhs.direct_local_rejections),
509 direct_local_mismatches: self
510 .direct_local_mismatches
511 .wrapping_sub(rhs.direct_local_mismatches),
512 ipc_wake_candidates: self
513 .ipc_wake_candidates
514 .wrapping_sub(rhs.ipc_wake_candidates),
515 ipc_local_enqueues: self.ipc_local_enqueues.wrapping_sub(rhs.ipc_local_enqueues),
516 ipc_score_raises: self.ipc_score_raises.wrapping_sub(rhs.ipc_score_raises),
517 ipc_boosts: self.ipc_boosts.wrapping_sub(rhs.ipc_boosts),
518 contained_enqueues: self.contained_enqueues.wrapping_sub(rhs.contained_enqueues),
519 hog_containment_enqueues: self
520 .hog_containment_enqueues
521 .wrapping_sub(rhs.hog_containment_enqueues),
522 hog_recoveries: self.hog_recoveries.wrapping_sub(rhs.hog_recoveries),
523 contained_starvation_rounds: self.contained_starvation_rounds,
524 shared_starvation_rounds: self.shared_starvation_rounds,
525 contained_rescue_dispatches: self
526 .contained_rescue_dispatches
527 .wrapping_sub(rhs.contained_rescue_dispatches),
528 shared_rescue_dispatches: self
529 .shared_rescue_dispatches
530 .wrapping_sub(rhs.shared_rescue_dispatches),
531 tune_latency_credit_grant: self.tune_latency_credit_grant,
532 tune_latency_credit_decay: self.tune_latency_credit_decay,
533 tune_latency_debt_urgent_min: self.tune_latency_debt_urgent_min,
534 tune_urgent_latency_burst_max: self.tune_urgent_latency_burst_max,
535 tune_reserved_quota_burst_max: self.tune_reserved_quota_burst_max,
536 tune_contained_starvation_max: self.tune_contained_starvation_max,
537 tune_shared_starvation_max: self.tune_shared_starvation_max,
538 tune_local_fast_nr_running_max: self.tune_local_fast_nr_running_max,
539 tune_local_reserved_burst_max: self.tune_local_reserved_burst_max,
540 tune_reserved_lane_burst_max: self.tune_reserved_lane_burst_max,
541 autotune_generation: self.autotune_generation,
542 autotune_mode: self.autotune_mode,
543 tune_reserved_max_ns: self.tune_reserved_max_ns,
544 tune_shared_slice_ns: self.tune_shared_slice_ns,
545 tune_interactive_floor_ns: self.tune_interactive_floor_ns,
546 tune_preempt_budget_min_ns: self.tune_preempt_budget_min_ns,
547 tune_preempt_refill_min_ns: self.tune_preempt_refill_min_ns,
548 }
549 }
550}
551
552pub fn server_data() -> StatsServerData<(), Metrics> {
553 let open: Box<dyn StatsOpener<(), Metrics>> = Box::new(move |(req_ch, res_ch)| {
554 req_ch.send(())?;
555 let mut prev = res_ch.recv()?;
556
557 let read: Box<dyn StatsReader<(), Metrics>> = Box::new(move |_args, (req_ch, res_ch)| {
558 req_ch.send(())?;
559 let cur = res_ch.recv()?;
560 let delta = cur.delta(&prev);
561 prev = cur;
562 delta.to_json()
563 });
564
565 Ok(read)
566 });
567
568 StatsServerData::new()
569 .add_meta(Metrics::meta())
570 .add_ops("top", StatsOps { open, close: None })
571}
572
573pub fn monitor(intv: Duration, shutdown: Arc<AtomicBool>) -> Result<()> {
574 scx_utils::monitor_stats::<Metrics>(
575 &[],
576 intv,
577 || shutdown.load(Ordering::Relaxed),
578 |metrics| metrics.format(&mut std::io::stdout()),
579 )
580}