diff options
author | V3n3RiX <venerix@koprulu.sector> | 2024-04-27 15:55:37 +0100 |
---|---|---|
committer | V3n3RiX <venerix@koprulu.sector> | 2024-04-27 15:55:37 +0100 |
commit | dd730cbe97ed8f365595d6723ef2f9db83e0a141 (patch) | |
tree | 9d533c1661c52ee33b27fb724ebc78b42c81f0f3 /sys-kernel/linux-sources-redcore-lts/files | |
parent | 5e2fbf3a6be51fca0d8bb3e58ce4e2af0c0441b8 (diff) |
sys-kernel/linux-{image,sources}-redcore-lts : revision bump, use the BORE scheduler (https://github.com/firelzrd/bore-scheduler)
Diffstat (limited to 'sys-kernel/linux-sources-redcore-lts/files')
8 files changed, 1983 insertions, 3 deletions
diff --git a/sys-kernel/linux-sources-redcore-lts/files/5.15-0001-linux5.15.y-bore5.1.0.patch b/sys-kernel/linux-sources-redcore-lts/files/5.15-0001-linux5.15.y-bore5.1.0.patch new file mode 100644 index 00000000..365ef1f2 --- /dev/null +++ b/sys-kernel/linux-sources-redcore-lts/files/5.15-0001-linux5.15.y-bore5.1.0.patch @@ -0,0 +1,530 @@ +From 60a78397ed1cce95e3d1b22b0ea4adf9f2141de7 Mon Sep 17 00:00:00 2001 +From: Masahito S <firelzrd@gmail.com> +Date: Thu, 28 Mar 2024 00:47:09 +0900 +Subject: [PATCH] linux5.15.y-bore5.1.0 + +--- + include/linux/sched.h | 10 +++ + init/Kconfig | 17 +++++ + kernel/sched/core.c | 143 ++++++++++++++++++++++++++++++++++++++++++ + kernel/sched/debug.c | 3 + + kernel/sched/fair.c | 122 ++++++++++++++++++++++++++++++----- + kernel/sysctl.c | 76 ++++++++++++++++++++++ + 6 files changed, 357 insertions(+), 14 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 7bfc2b45cd..7058ceab13 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -535,6 +535,16 @@ struct sched_entity { + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; ++#ifdef CONFIG_SCHED_BORE ++ u64 burst_time; ++ u8 prev_burst_penalty; ++ u8 curr_burst_penalty; ++ u8 burst_penalty; ++ u8 burst_score; ++ u8 child_burst; ++ u32 child_burst_cnt; ++ u64 child_burst_last_cached; ++#endif // CONFIG_SCHED_BORE + + u64 nr_migrations; + +diff --git a/init/Kconfig b/init/Kconfig +index dafc3ba6fa..64bcc7e6e5 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1266,6 +1266,23 @@ config CHECKPOINT_RESTORE + + If unsure, say N here. + ++config SCHED_BORE ++ bool "Burst-Oriented Response Enhancer" ++ default y ++ help ++ In Desktop and Mobile computing, one might prefer interactive ++ tasks to keep responsive no matter what they run in the background. ++ ++ Enabling this kernel feature modifies the scheduler to discriminate ++ tasks by their burst time (runtime since it last went sleeping or ++ yielding state) and prioritize those that run less bursty. ++ Such tasks usually include window compositor, widgets backend, ++ terminal emulator, video playback, games and so on. ++ With a little impact to scheduling fairness, it may improve ++ responsiveness especially under heavy background workload. ++ ++ If unsure, say Y here. ++ + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" + select CGROUPS +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 25b8ea9116..937c0abb86 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4221,6 +4221,138 @@ int wake_up_state(struct task_struct *p, unsigned int state) + return try_to_wake_up(p, state, 0); + } + ++#ifdef CONFIG_SCHED_BORE ++extern u8 sched_burst_fork_atavistic; ++extern uint sched_burst_cache_lifetime; ++ ++static void __init sched_init_bore(void) { ++ init_task.se.burst_time = 0; ++ init_task.se.prev_burst_penalty = 0; ++ init_task.se.curr_burst_penalty = 0; ++ init_task.se.burst_penalty = 0; ++ init_task.se.burst_score = 0; ++ init_task.se.child_burst_last_cached = 0; ++} ++ ++void inline sched_fork_bore(struct task_struct *p) { ++ p->se.burst_time = 0; ++ p->se.curr_burst_penalty = 0; ++ p->se.burst_score = 0; ++ p->se.child_burst_last_cached = 0; ++} ++ ++static u32 count_child_tasks(struct task_struct *p) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ list_for_each_entry(child, &p->children, sibling) {cnt++;} ++ return cnt; ++} ++ ++static inline bool task_is_inheritable(struct task_struct *p) { ++ return (p->sched_class == &fair_sched_class); ++} ++ ++static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { ++ u64 expiration_time = ++ p->se.child_burst_last_cached + sched_burst_cache_lifetime; ++ return ((s64)(expiration_time - now) < 0); ++} ++ ++static void __update_child_burst_cache( ++ struct task_struct *p, u32 cnt, u32 sum, u64 now) { ++ u8 avg = 0; ++ if (cnt) avg = sum / cnt; ++ p->se.child_burst = max(avg, p->se.burst_penalty); ++ p->se.child_burst_cnt = cnt; ++ p->se.child_burst_last_cached = now; ++} ++ ++static inline void update_child_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ if (!task_is_inheritable(child)) continue; ++ cnt++; ++ sum += child->se.burst_penalty; ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++} ++ ++static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *parent = p->real_parent; ++ if (child_burst_cache_expired(parent, now)) ++ update_child_burst_direct(parent, now); ++ ++ return parent->se.child_burst; ++} ++ ++static void update_child_burst_topological( ++ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) { ++ struct task_struct *child, *dec; ++ u32 cnt = 0, dcnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ dec = child; ++ while ((dcnt = count_child_tasks(dec)) == 1) ++ dec = list_first_entry(&dec->children, struct task_struct, sibling); ++ ++ if (!dcnt || !depth) { ++ if (!task_is_inheritable(dec)) continue; ++ cnt++; ++ sum += dec->se.burst_penalty; ++ continue; ++ } ++ if (!child_burst_cache_expired(dec, now)) { ++ cnt += dec->se.child_burst_cnt; ++ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt; ++ continue; ++ } ++ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum); ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++ *acnt += cnt; ++ *asum += sum; ++} ++ ++static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) { ++ struct task_struct *anc = p->real_parent; ++ u32 cnt = 0, sum = 0; ++ ++ while (anc->real_parent != anc && count_child_tasks(anc) == 1) ++ anc = anc->real_parent; ++ ++ if (child_burst_cache_expired(anc, now)) ++ update_child_burst_topological( ++ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); ++ ++ return anc->se.child_burst; ++} ++ ++static inline void inherit_burst(struct task_struct *p) { ++ u8 burst_cache; ++ u64 now = ktime_get_ns(); ++ ++ read_lock(&tasklist_lock); ++ burst_cache = likely(sched_burst_fork_atavistic)? ++ __inherit_burst_topological(p, now): ++ __inherit_burst_direct(p, now); ++ read_unlock(&tasklist_lock); ++ ++ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache); ++} ++ ++static void sched_post_fork_bore(struct task_struct *p) { ++ if (p->sched_class == &fair_sched_class) ++ inherit_burst(p); ++ p->se.burst_penalty = p->se.prev_burst_penalty; ++} ++#endif // CONFIG_SCHED_BORE ++ + /* + * Perform scheduler related setup for a newly forked process p. + * p is forked by current. +@@ -4237,6 +4369,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) + p->se.prev_sum_exec_runtime = 0; + p->se.nr_migrations = 0; + p->se.vruntime = 0; ++#ifdef CONFIG_SCHED_BORE ++ sched_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + INIT_LIST_HEAD(&p->se.group_node); + + #ifdef CONFIG_FAIR_GROUP_SCHED +@@ -4469,6 +4604,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) + + void sched_post_fork(struct task_struct *p) + { ++#ifdef CONFIG_SCHED_BORE ++ sched_post_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + uclamp_post_fork(p); + } + +@@ -9381,6 +9519,11 @@ void __init sched_init(void) + BUG_ON(&dl_sched_class + 1 != &stop_sched_class); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ sched_init_bore(); ++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.1.0 by Masahito Suzuki"); ++#endif // CONFIG_SCHED_BORE ++ + wait_bit_init(); + + #ifdef CONFIG_FAIR_GROUP_SCHED +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 5ffe16ff03..de5345d03b 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -545,6 +545,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) + SPLIT_NS(p->se.sum_exec_runtime), + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime))); + ++#ifdef CONFIG_SCHED_BORE ++ SEQ_printf(m, " %2d", p->se.burst_score); ++#endif // CONFIG_SCHED_BORE + #ifdef CONFIG_NUMA_BALANCING + SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); + #endif +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index fd8b565664..8c4c060510 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -19,6 +19,9 @@ + * + * Adaptive scheduling granularity, math enhancements by Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra ++ * ++ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler ++ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com> + */ + #include "sched.h" + +@@ -84,6 +87,74 @@ static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + ++#ifdef CONFIG_SCHED_BORE ++bool __read_mostly sched_bore = 1; ++bool __read_mostly sched_burst_smoothness_long = 1; ++bool __read_mostly sched_burst_smoothness_short = 0; ++u8 __read_mostly sched_burst_fork_atavistic = 2; ++u8 __read_mostly sched_burst_penalty_offset = 22; ++uint __read_mostly sched_burst_penalty_scale = 1280; ++uint __read_mostly sched_burst_cache_lifetime = 60000000; ++ ++#define MAX_BURST_PENALTY (39U <<2) ++ ++static inline u32 log2plus1_u64_u32f8(u64 v) { ++ u32 msb = fls64(v); ++ s32 excess_bits = msb - 9; ++ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits; ++ return msb << 8 | fractional; ++} ++ ++static inline u32 calc_burst_penalty(u64 burst_time) { ++ u32 greed, tolerance, penalty, scaled_penalty; ++ ++ greed = log2plus1_u64_u32f8(burst_time); ++ tolerance = sched_burst_penalty_offset << 8; ++ penalty = max(0, (s32)greed - (s32)tolerance); ++ scaled_penalty = penalty * sched_burst_penalty_scale >> 16; ++ ++ return min(MAX_BURST_PENALTY, scaled_penalty); ++} ++ ++static inline u64 scale_slice(u64 delta, struct sched_entity *se) { ++ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22); ++} ++ ++static void update_burst_score(struct sched_entity *se) { ++ if (!entity_is_task(se)) return; ++ struct task_struct *p = task_of(se); ++ u8 prio = p->static_prio - MAX_RT_PRIO; ++ u8 prev_prio = min(39, prio + se->burst_score); ++ ++ se->burst_score = se->burst_penalty >> 2; ++ ++ u8 new_prio = min(39, prio + se->burst_score); ++ if (new_prio != prev_prio) ++ reweight_task(p, new_prio); ++} ++ ++static void update_burst_penalty(struct sched_entity *se) { ++ se->curr_burst_penalty = calc_burst_penalty(se->burst_time); ++ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty); ++ update_burst_score(se); ++} ++ ++static inline u32 binary_smooth(u32 new, u32 old) { ++ int increment = new - old; ++ return (0 <= increment)? ++ old + ( increment >> (int)sched_burst_smoothness_long): ++ old - (-increment >> (int)sched_burst_smoothness_short); ++} ++ ++static void restart_burst(struct sched_entity *se) { ++ se->burst_penalty = se->prev_burst_penalty = ++ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty); ++ se->curr_burst_penalty = 0; ++ se->burst_time = 0; ++ update_burst_score(se); ++} ++#endif // CONFIG_SCHED_BORE ++ + int sched_thermal_decay_shift; + static int __init setup_sched_thermal_decay_shift(char *str) + { +@@ -645,7 +716,6 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) + { + if (unlikely(se->load.weight != NICE_0_LOAD)) + delta = __calc_delta(delta, NICE_0_LOAD, &se->load); +- + return delta; + } + +@@ -848,7 +918,13 @@ static void update_curr(struct cfs_rq *cfs_rq) + curr->sum_exec_runtime += delta_exec; + schedstat_add(cfs_rq->exec_clock, delta_exec); + ++#ifdef CONFIG_SCHED_BORE ++ curr->burst_time += delta_exec; ++ update_burst_penalty(curr); ++ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr)); ++#else // !CONFIG_SCHED_BORE + curr->vruntime += calc_delta_fair(delta_exec, curr); ++#endif // CONFIG_SCHED_BORE + update_min_vruntime(cfs_rq); + + if (entity_is_task(curr)) { +@@ -5907,6 +5983,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + bool was_sched_idle = sched_idle_rq(rq); + + util_est_dequeue(&rq->cfs, p); ++#ifdef CONFIG_SCHED_BORE ++ if (task_sleep) { ++ cfs_rq = cfs_rq_of(se); ++ if (cfs_rq->curr == se) ++ update_curr(cfs_rq); ++ restart_burst(se); ++ } ++#endif // CONFIG_SCHED_BORE + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); +@@ -7660,24 +7744,31 @@ static void yield_task_fair(struct rq *rq) + /* + * Are we the only task in the tree? + */ ++#if !defined(CONFIG_SCHED_BORE) + if (unlikely(rq->nr_running == 1)) + return; + + clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + +- if (curr->policy != SCHED_BATCH) { +- update_rq_clock(rq); +- /* +- * Update run-time statistics of the 'current'. +- */ +- update_curr(cfs_rq); +- /* +- * Tell update_rq_clock() that we've just updated, +- * so we don't do microscopic update in schedule() +- * and double the fastpath cost. +- */ +- rq_clock_skip_update(rq); +- } ++ update_rq_clock(rq); ++ /* ++ * Update run-time statistics of the 'current'. ++ */ ++ update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ restart_burst(se); ++ if (unlikely(rq->nr_running == 1)) ++ return; ++ ++ clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE ++ /* ++ * Tell update_rq_clock() that we've just updated, ++ * so we don't do microscopic update in schedule() ++ * and double the fastpath cost. ++ */ ++ rq_clock_skip_update(rq); + + set_skip_buddy(se); + } +@@ -11486,6 +11577,9 @@ static void task_fork_fair(struct task_struct *p) + update_curr(cfs_rq); + se->vruntime = curr->vruntime; + } ++#ifdef CONFIG_SCHED_BORE ++ update_burst_score(se); ++#endif // CONFIG_SCHED_BORE + place_entity(cfs_rq, se, 1); + + if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 928798f89c..27d197c737 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -133,6 +133,19 @@ static int minolduid; + static int ngroups_max = NGROUPS_MAX; + static const int cap_last_cap = CAP_LAST_CAP; + ++#ifdef CONFIG_SCHED_BORE ++extern u8 sched_bore; ++extern u8 sched_burst_smoothness_long; ++extern u8 sched_burst_smoothness_short; ++extern u8 sched_burst_fork_atavistic; ++extern u8 sched_burst_penalty_offset; ++extern uint sched_burst_penalty_scale; ++extern uint sched_burst_cache_lifetime; ++static int __maybe_unused three = 3; ++static int __maybe_unused sixty_four = 64; ++static int __maybe_unused maxval_12_bits = 4095; ++#endif // CONFIG_SCHED_BORE ++ + /* + * This is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs + * and hung_task_check_interval_secs +@@ -1775,6 +1788,69 @@ int proc_do_static_key(struct ctl_table *table, int write, + } + + static struct ctl_table kern_table[] = { ++#ifdef CONFIG_SCHED_BORE ++ { ++ .procname = "sched_bore", ++ .data = &sched_bore, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ONE, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_long", ++ .data = &sched_burst_smoothness_long, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_short", ++ .data = &sched_burst_smoothness_short, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_fork_atavistic", ++ .data = &sched_burst_fork_atavistic, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &three, ++ }, ++ { ++ .procname = "sched_burst_penalty_offset", ++ .data = &sched_burst_penalty_offset, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &sixty_four, ++ }, ++ { ++ .procname = "sched_burst_penalty_scale", ++ .data = &sched_burst_penalty_scale, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &maxval_12_bits, ++ }, ++ { ++ .procname = "sched_burst_cache_lifetime", ++ .data = &sched_burst_cache_lifetime, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec, ++ }, ++#endif // CONFIG_SCHED_BORE + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +-- +2.34.1 + diff --git a/sys-kernel/linux-sources-redcore-lts/files/5.15-0002-constgran-vanilla-max.patch b/sys-kernel/linux-sources-redcore-lts/files/5.15-0002-constgran-vanilla-max.patch new file mode 100644 index 00000000..aa20644d --- /dev/null +++ b/sys-kernel/linux-sources-redcore-lts/files/5.15-0002-constgran-vanilla-max.patch @@ -0,0 +1,86 @@ +From 250ba4a23f392dd37c673fd3879c6281a3a62291 Mon Sep 17 00:00:00 2001 +From: Masahito S <firelzrd@gmail.com> +Date: Mon, 15 May 2023 06:00:52 +0900 +Subject: [PATCH] constgran vanilla-max sched: Make latency / granularity + constant tunable_scaling = 1 (logarithmic) -> 0 (constant) latency_ns = + variable 6-24ms -> constant 24ms min_granularity_ns = variable 0.75-3ms -> + constant 3ms wakeup_granularity_ns = variable 1-4ms -> constant 4ms + +--- + kernel/sched/fair.c | 31 +++++++++++++++++++++++++++---- + 1 file changed, 27 insertions(+), 4 deletions(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 8b5bf7280..340f8dd14 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -36,10 +36,16 @@ + * (to see the precise effective timeslice length of your workload, + * run vmstat and monitor the context-switches (cs) field) + * +- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: 24ms constant, units: nanoseconds) ++ * (CFS default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_latency = 24000000ULL; ++static unsigned int normalized_sysctl_sched_latency = 24000000ULL; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_latency = 6000000ULL; + static unsigned int normalized_sysctl_sched_latency = 6000000ULL; ++#endif // CONFIG_SCHED_BORE + + /* + * The initial- and re-scaling of tunables is configurable +@@ -50,17 +56,28 @@ static unsigned int normalized_sysctl_sched_latency = 6000000ULL; + * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) + * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus + * +- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) ++ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) ++ * (CFS default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; ++#endif // CONFIG_SCHED_BORE + + /* + * Minimal preemption granularity for CPU-bound tasks: + * +- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: 3 msec constant, units: nanoseconds) ++ * (CFS default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_min_granularity = 3000000ULL; ++static unsigned int normalized_sysctl_sched_min_granularity = 3000000ULL; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_min_granularity = 750000ULL; + static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; ++#endif // CONFIG_SCHED_BORE + + /* + * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity +@@ -80,10 +97,16 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; + * and reduces their over-scheduling. Synchronous workloads will still + * have immediate wakeup/sleep latencies. + * +- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: 4 msec constant, units: nanoseconds) ++ * (CFS default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_wakeup_granularity = 4000000UL; ++static unsigned int normalized_sysctl_sched_wakeup_granularity = 4000000UL; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_wakeup_granularity = 1000000UL; + static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; ++#endif // CONFIG_SCHED_BORE + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + +-- +2.25.1 + diff --git a/sys-kernel/linux-sources-redcore-lts/files/5.15-amd64.config b/sys-kernel/linux-sources-redcore-lts/files/5.15-amd64.config index 8a2e2543..0c1f9970 100644 --- a/sys-kernel/linux-sources-redcore-lts/files/5.15-amd64.config +++ b/sys-kernel/linux-sources-redcore-lts/files/5.15-amd64.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 5.15.156-redcore-lts Kernel Configuration +# Linux/x86 5.15.156-redcore-lts-r1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Gentoo Hardened 13.2.0-r15 p3) 13.2.0" CONFIG_CC_IS_GCC=y @@ -223,6 +223,7 @@ CONFIG_USER_NS_UNPRIVILEGED=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_BORE=y CONFIG_SCHED_AUTOGROUP=y # CONFIG_SYSFS_DEPRECATED is not set CONFIG_RELAY=y diff --git a/sys-kernel/linux-sources-redcore-lts/files/6.1-0001-linux6.1.y-bore5.1.0.patch b/sys-kernel/linux-sources-redcore-lts/files/6.1-0001-linux6.1.y-bore5.1.0.patch new file mode 100644 index 00000000..10fdbde1 --- /dev/null +++ b/sys-kernel/linux-sources-redcore-lts/files/6.1-0001-linux6.1.y-bore5.1.0.patch @@ -0,0 +1,507 @@ +From b81d0a14e0383fb596314320f9853e9c416545bb Mon Sep 17 00:00:00 2001 +From: Masahito S <firelzrd@gmail.com> +Date: Thu, 28 Mar 2024 00:38:44 +0900 +Subject: [PATCH] linux6.1.y-bore5.1.0 + +--- + include/linux/sched.h | 10 +++ + init/Kconfig | 17 ++++ + kernel/sched/core.c | 143 ++++++++++++++++++++++++++++++++ + kernel/sched/debug.c | 3 + + kernel/sched/fair.c | 187 ++++++++++++++++++++++++++++++++++++++---- + 5 files changed, 346 insertions(+), 14 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 0cac69902e..159733cf83 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -555,6 +555,16 @@ struct sched_entity { + u64 sum_exec_runtime; + u64 vruntime; + u64 prev_sum_exec_runtime; ++#ifdef CONFIG_SCHED_BORE ++ u64 burst_time; ++ u8 prev_burst_penalty; ++ u8 curr_burst_penalty; ++ u8 burst_penalty; ++ u8 burst_score; ++ u8 child_burst; ++ u32 child_burst_cnt; ++ u64 child_burst_last_cached; ++#endif // CONFIG_SCHED_BORE + + u64 nr_migrations; + +diff --git a/init/Kconfig b/init/Kconfig +index 1487046402..31d5bf4f5e 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1280,6 +1280,23 @@ config CHECKPOINT_RESTORE + + If unsure, say N here. + ++config SCHED_BORE ++ bool "Burst-Oriented Response Enhancer" ++ default y ++ help ++ In Desktop and Mobile computing, one might prefer interactive ++ tasks to keep responsive no matter what they run in the background. ++ ++ Enabling this kernel feature modifies the scheduler to discriminate ++ tasks by their burst time (runtime since it last went sleeping or ++ yielding state) and prioritize those that run less bursty. ++ Such tasks usually include window compositor, widgets backend, ++ terminal emulator, video playback, games and so on. ++ With a little impact to scheduling fairness, it may improve ++ responsiveness especially under heavy background workload. ++ ++ If unsure, say Y here. ++ + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" + select CGROUPS +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 18a4f8f28a..c9270d4a77 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4368,6 +4368,138 @@ int wake_up_state(struct task_struct *p, unsigned int state) + return try_to_wake_up(p, state, 0); + } + ++#ifdef CONFIG_SCHED_BORE ++extern u8 sched_burst_fork_atavistic; ++extern uint sched_burst_cache_lifetime; ++ ++static void __init sched_init_bore(void) { ++ init_task.se.burst_time = 0; ++ init_task.se.prev_burst_penalty = 0; ++ init_task.se.curr_burst_penalty = 0; ++ init_task.se.burst_penalty = 0; ++ init_task.se.burst_score = 0; ++ init_task.se.child_burst_last_cached = 0; ++} ++ ++void inline sched_fork_bore(struct task_struct *p) { ++ p->se.burst_time = 0; ++ p->se.curr_burst_penalty = 0; ++ p->se.burst_score = 0; ++ p->se.child_burst_last_cached = 0; ++} ++ ++static u32 count_child_tasks(struct task_struct *p) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ list_for_each_entry(child, &p->children, sibling) {cnt++;} ++ return cnt; ++} ++ ++static inline bool task_is_inheritable(struct task_struct *p) { ++ return (p->sched_class == &fair_sched_class); ++} ++ ++static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { ++ u64 expiration_time = ++ p->se.child_burst_last_cached + sched_burst_cache_lifetime; ++ return ((s64)(expiration_time - now) < 0); ++} ++ ++static void __update_child_burst_cache( ++ struct task_struct *p, u32 cnt, u32 sum, u64 now) { ++ u8 avg = 0; ++ if (cnt) avg = sum / cnt; ++ p->se.child_burst = max(avg, p->se.burst_penalty); ++ p->se.child_burst_cnt = cnt; ++ p->se.child_burst_last_cached = now; ++} ++ ++static inline void update_child_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ if (!task_is_inheritable(child)) continue; ++ cnt++; ++ sum += child->se.burst_penalty; ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++} ++ ++static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *parent = p->real_parent; ++ if (child_burst_cache_expired(parent, now)) ++ update_child_burst_direct(parent, now); ++ ++ return parent->se.child_burst; ++} ++ ++static void update_child_burst_topological( ++ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) { ++ struct task_struct *child, *dec; ++ u32 cnt = 0, dcnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ dec = child; ++ while ((dcnt = count_child_tasks(dec)) == 1) ++ dec = list_first_entry(&dec->children, struct task_struct, sibling); ++ ++ if (!dcnt || !depth) { ++ if (!task_is_inheritable(dec)) continue; ++ cnt++; ++ sum += dec->se.burst_penalty; ++ continue; ++ } ++ if (!child_burst_cache_expired(dec, now)) { ++ cnt += dec->se.child_burst_cnt; ++ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt; ++ continue; ++ } ++ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum); ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++ *acnt += cnt; ++ *asum += sum; ++} ++ ++static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) { ++ struct task_struct *anc = p->real_parent; ++ u32 cnt = 0, sum = 0; ++ ++ while (anc->real_parent != anc && count_child_tasks(anc) == 1) ++ anc = anc->real_parent; ++ ++ if (child_burst_cache_expired(anc, now)) ++ update_child_burst_topological( ++ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); ++ ++ return anc->se.child_burst; ++} ++ ++static inline void inherit_burst(struct task_struct *p) { ++ u8 burst_cache; ++ u64 now = ktime_get_ns(); ++ ++ read_lock(&tasklist_lock); ++ burst_cache = likely(sched_burst_fork_atavistic)? ++ __inherit_burst_topological(p, now): ++ __inherit_burst_direct(p, now); ++ read_unlock(&tasklist_lock); ++ ++ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache); ++} ++ ++static void sched_post_fork_bore(struct task_struct *p) { ++ if (p->sched_class == &fair_sched_class) ++ inherit_burst(p); ++ p->se.burst_penalty = p->se.prev_burst_penalty; ++} ++#endif // CONFIG_SCHED_BORE ++ + /* + * Perform scheduler related setup for a newly forked process p. + * p is forked by current. +@@ -4384,6 +4516,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) + p->se.prev_sum_exec_runtime = 0; + p->se.nr_migrations = 0; + p->se.vruntime = 0; ++#ifdef CONFIG_SCHED_BORE ++ sched_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + INIT_LIST_HEAD(&p->se.group_node); + + #ifdef CONFIG_FAIR_GROUP_SCHED +@@ -4689,6 +4824,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) + + void sched_post_fork(struct task_struct *p) + { ++#ifdef CONFIG_SCHED_BORE ++ sched_post_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + uclamp_post_fork(p); + } + +@@ -9675,6 +9813,11 @@ void __init sched_init(void) + BUG_ON(&dl_sched_class != &stop_sched_class + 1); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ sched_init_bore(); ++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.1.0 by Masahito Suzuki"); ++#endif // CONFIG_SCHED_BORE ++ + wait_bit_init(); + + #ifdef CONFIG_FAIR_GROUP_SCHED +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 1637b65ba0..a0157ac1be 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -547,6 +547,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); + ++#ifdef CONFIG_SCHED_BORE ++ SEQ_printf(m, " %2d", p->se.burst_score); ++#endif // CONFIG_SCHED_BORE + #ifdef CONFIG_NUMA_BALANCING + SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); + #endif +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 2558ab9033..138e1901ea 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -19,6 +19,9 @@ + * + * Adaptive scheduling granularity, math enhancements by Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra ++ * ++ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler ++ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com> + */ + #include <linux/energy_model.h> + #include <linux/mmap_lock.h> +@@ -126,6 +129,76 @@ static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + ++#ifdef CONFIG_SCHED_BORE ++u8 __read_mostly sched_bore = 1; ++u8 __read_mostly sched_burst_smoothness_long = 1; ++u8 __read_mostly sched_burst_smoothness_short = 0; ++u8 __read_mostly sched_burst_fork_atavistic = 2; ++u8 __read_mostly sched_burst_penalty_offset = 22; ++uint __read_mostly sched_burst_penalty_scale = 1280; ++uint __read_mostly sched_burst_cache_lifetime = 60000000; ++static int __maybe_unused sixty_four = 64; ++static int __maybe_unused maxval_12_bits = 4095; ++ ++#define MAX_BURST_PENALTY (39U <<2) ++ ++static inline u32 log2plus1_u64_u32f8(u64 v) { ++ u32 msb = fls64(v); ++ s32 excess_bits = msb - 9; ++ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits; ++ return msb << 8 | fractional; ++} ++ ++static inline u32 calc_burst_penalty(u64 burst_time) { ++ u32 greed, tolerance, penalty, scaled_penalty; ++ ++ greed = log2plus1_u64_u32f8(burst_time); ++ tolerance = sched_burst_penalty_offset << 8; ++ penalty = max(0, (s32)greed - (s32)tolerance); ++ scaled_penalty = penalty * sched_burst_penalty_scale >> 16; ++ ++ return min(MAX_BURST_PENALTY, scaled_penalty); ++} ++ ++static inline u64 scale_slice(u64 delta, struct sched_entity *se) { ++ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22); ++} ++ ++static void update_burst_score(struct sched_entity *se) { ++ if (!entity_is_task(se)) return; ++ struct task_struct *p = task_of(se); ++ u8 prio = p->static_prio - MAX_RT_PRIO; ++ u8 prev_prio = min(39, prio + se->burst_score); ++ ++ se->burst_score = se->burst_penalty >> 2; ++ ++ u8 new_prio = min(39, prio + se->burst_score); ++ if (new_prio != prev_prio) ++ reweight_task(p, new_prio); ++} ++ ++static void update_burst_penalty(struct sched_entity *se) { ++ se->curr_burst_penalty = calc_burst_penalty(se->burst_time); ++ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty); ++ update_burst_score(se); ++} ++ ++static inline u32 binary_smooth(u32 new, u32 old) { ++ int increment = new - old; ++ return (0 <= increment)? ++ old + ( increment >> (int)sched_burst_smoothness_long): ++ old - (-increment >> (int)sched_burst_smoothness_short); ++} ++ ++static void restart_burst(struct sched_entity *se) { ++ se->burst_penalty = se->prev_burst_penalty = ++ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty); ++ se->curr_burst_penalty = 0; ++ se->burst_time = 0; ++ update_burst_score(se); ++} ++#endif // CONFIG_SCHED_BORE ++ + int sched_thermal_decay_shift; + static int __init setup_sched_thermal_decay_shift(char *str) + { +@@ -180,6 +253,69 @@ static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; + + #ifdef CONFIG_SYSCTL + static struct ctl_table sched_fair_sysctls[] = { ++#ifdef CONFIG_SCHED_BORE ++ { ++ .procname = "sched_bore", ++ .data = &sched_bore, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ONE, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_long", ++ .data = &sched_burst_smoothness_long, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_short", ++ .data = &sched_burst_smoothness_short, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_fork_atavistic", ++ .data = &sched_burst_fork_atavistic, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_THREE, ++ }, ++ { ++ .procname = "sched_burst_penalty_offset", ++ .data = &sched_burst_penalty_offset, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &sixty_four, ++ }, ++ { ++ .procname = "sched_burst_penalty_scale", ++ .data = &sched_burst_penalty_scale, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &maxval_12_bits, ++ }, ++ { ++ .procname = "sched_burst_cache_lifetime", ++ .data = &sched_burst_cache_lifetime, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec, ++ }, ++#endif // CONFIG_SCHED_BORE + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -695,7 +831,6 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) + { + if (unlikely(se->load.weight != NICE_0_LOAD)) + delta = __calc_delta(delta, NICE_0_LOAD, &se->load); +- + return delta; + } + +@@ -905,7 +1040,13 @@ static void update_curr(struct cfs_rq *cfs_rq) + curr->sum_exec_runtime += delta_exec; + schedstat_add(cfs_rq->exec_clock, delta_exec); + ++#ifdef CONFIG_SCHED_BORE ++ curr->burst_time += delta_exec; ++ update_burst_penalty(curr); ++ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr)); ++#else // !CONFIG_SCHED_BORE + curr->vruntime += calc_delta_fair(delta_exec, curr); ++#endif // CONFIG_SCHED_BORE + update_min_vruntime(cfs_rq); + + if (entity_is_task(curr)) { +@@ -6171,6 +6312,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + bool was_sched_idle = sched_idle_rq(rq); + + util_est_dequeue(&rq->cfs, p); ++#ifdef CONFIG_SCHED_BORE ++ if (task_sleep) { ++ cfs_rq = cfs_rq_of(se); ++ if (cfs_rq->curr == se) ++ update_curr(cfs_rq); ++ restart_burst(se); ++ } ++#endif // CONFIG_SCHED_BORE + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); +@@ -7939,24 +8088,31 @@ static void yield_task_fair(struct rq *rq) + /* + * Are we the only task in the tree? + */ ++#if !defined(CONFIG_SCHED_BORE) + if (unlikely(rq->nr_running == 1)) + return; + + clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + +- if (curr->policy != SCHED_BATCH) { +- update_rq_clock(rq); +- /* +- * Update run-time statistics of the 'current'. +- */ +- update_curr(cfs_rq); +- /* +- * Tell update_rq_clock() that we've just updated, +- * so we don't do microscopic update in schedule() +- * and double the fastpath cost. +- */ +- rq_clock_skip_update(rq); +- } ++ update_rq_clock(rq); ++ /* ++ * Update run-time statistics of the 'current'. ++ */ ++ update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ restart_burst(se); ++ if (unlikely(rq->nr_running == 1)) ++ return; ++ ++ clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE ++ /* ++ * Tell update_rq_clock() that we've just updated, ++ * so we don't do microscopic update in schedule() ++ * and double the fastpath cost. ++ */ ++ rq_clock_skip_update(rq); + + set_skip_buddy(se); + } +@@ -11869,6 +12025,9 @@ static void task_fork_fair(struct task_struct *p) + update_curr(cfs_rq); + se->vruntime = curr->vruntime; + } ++#ifdef CONFIG_SCHED_BORE ++ update_burst_score(se); ++#endif // CONFIG_SCHED_BORE + place_entity(cfs_rq, se, 1); + + if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { +-- +2.34.1 + diff --git a/sys-kernel/linux-sources-redcore-lts/files/6.1-0002-constgran-vanilla-max.patch b/sys-kernel/linux-sources-redcore-lts/files/6.1-0002-constgran-vanilla-max.patch new file mode 100644 index 00000000..aa20644d --- /dev/null +++ b/sys-kernel/linux-sources-redcore-lts/files/6.1-0002-constgran-vanilla-max.patch @@ -0,0 +1,86 @@ +From 250ba4a23f392dd37c673fd3879c6281a3a62291 Mon Sep 17 00:00:00 2001 +From: Masahito S <firelzrd@gmail.com> +Date: Mon, 15 May 2023 06:00:52 +0900 +Subject: [PATCH] constgran vanilla-max sched: Make latency / granularity + constant tunable_scaling = 1 (logarithmic) -> 0 (constant) latency_ns = + variable 6-24ms -> constant 24ms min_granularity_ns = variable 0.75-3ms -> + constant 3ms wakeup_granularity_ns = variable 1-4ms -> constant 4ms + +--- + kernel/sched/fair.c | 31 +++++++++++++++++++++++++++---- + 1 file changed, 27 insertions(+), 4 deletions(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 8b5bf7280..340f8dd14 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -36,10 +36,16 @@ + * (to see the precise effective timeslice length of your workload, + * run vmstat and monitor the context-switches (cs) field) + * +- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: 24ms constant, units: nanoseconds) ++ * (CFS default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_latency = 24000000ULL; ++static unsigned int normalized_sysctl_sched_latency = 24000000ULL; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_latency = 6000000ULL; + static unsigned int normalized_sysctl_sched_latency = 6000000ULL; ++#endif // CONFIG_SCHED_BORE + + /* + * The initial- and re-scaling of tunables is configurable +@@ -50,17 +56,28 @@ static unsigned int normalized_sysctl_sched_latency = 6000000ULL; + * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) + * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus + * +- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) ++ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) ++ * (CFS default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; ++#endif // CONFIG_SCHED_BORE + + /* + * Minimal preemption granularity for CPU-bound tasks: + * +- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: 3 msec constant, units: nanoseconds) ++ * (CFS default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_min_granularity = 3000000ULL; ++static unsigned int normalized_sysctl_sched_min_granularity = 3000000ULL; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_min_granularity = 750000ULL; + static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; ++#endif // CONFIG_SCHED_BORE + + /* + * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity +@@ -80,10 +97,16 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; + * and reduces their over-scheduling. Synchronous workloads will still + * have immediate wakeup/sleep latencies. + * +- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: 4 msec constant, units: nanoseconds) ++ * (CFS default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_wakeup_granularity = 4000000UL; ++static unsigned int normalized_sysctl_sched_wakeup_granularity = 4000000UL; ++#else // CONFIG_SCHED_BORE + unsigned int sysctl_sched_wakeup_granularity = 1000000UL; + static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; ++#endif // CONFIG_SCHED_BORE + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + +-- +2.25.1 + diff --git a/sys-kernel/linux-sources-redcore-lts/files/6.1-amd64.config b/sys-kernel/linux-sources-redcore-lts/files/6.1-amd64.config index dbb95b76..1a2598aa 100644 --- a/sys-kernel/linux-sources-redcore-lts/files/6.1-amd64.config +++ b/sys-kernel/linux-sources-redcore-lts/files/6.1-amd64.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 6.1.87-redcore-lts Kernel Configuration +# Linux/x86 6.1.87-redcore-lts-r1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Gentoo Hardened 13.2.0-r15 p3) 13.2.0" CONFIG_CC_IS_GCC=y @@ -231,6 +231,7 @@ CONFIG_USER_NS_UNPRIVILEGED=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_BORE=y CONFIG_SCHED_AUTOGROUP=y # CONFIG_SYSFS_DEPRECATED is not set CONFIG_RELAY=y diff --git a/sys-kernel/linux-sources-redcore-lts/files/6.6-0001-linux6.6.y-bore5.1.0.patch b/sys-kernel/linux-sources-redcore-lts/files/6.6-0001-linux6.6.y-bore5.1.0.patch new file mode 100644 index 00000000..9ec609a8 --- /dev/null +++ b/sys-kernel/linux-sources-redcore-lts/files/6.6-0001-linux6.6.y-bore5.1.0.patch @@ -0,0 +1,768 @@ +From 8a12c22fd48d20301f22591bb36587499136717d Mon Sep 17 00:00:00 2001 +From: Masahito S <firelzrd@gmail.com> +Date: Mon, 22 Apr 2024 04:12:58 +0900 +Subject: [PATCH] linux6.6.y-bore5.1.0 + +--- + include/linux/sched.h | 10 ++ + init/Kconfig | 17 +++ + kernel/sched/core.c | 143 +++++++++++++++++++++++++ + kernel/sched/debug.c | 60 ++++++++++- + kernel/sched/fair.c | 230 ++++++++++++++++++++++++++++++++++++++-- + kernel/sched/features.h | 4 + + kernel/sched/sched.h | 7 ++ + 7 files changed, 462 insertions(+), 9 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 77f01ac385..20fe8ee925 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -559,6 +559,16 @@ struct sched_entity { + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; ++#ifdef CONFIG_SCHED_BORE ++ u64 burst_time; ++ u8 prev_burst_penalty; ++ u8 curr_burst_penalty; ++ u8 burst_penalty; ++ u8 burst_score; ++ u8 child_burst; ++ u32 child_burst_cnt; ++ u64 child_burst_last_cached; ++#endif // CONFIG_SCHED_BORE + s64 vlag; + u64 slice; + +diff --git a/init/Kconfig b/init/Kconfig +index 6d35728b94..eb18e8f304 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1258,6 +1258,23 @@ config CHECKPOINT_RESTORE + + If unsure, say N here. + ++config SCHED_BORE ++ bool "Burst-Oriented Response Enhancer" ++ default y ++ help ++ In Desktop and Mobile computing, one might prefer interactive ++ tasks to keep responsive no matter what they run in the background. ++ ++ Enabling this kernel feature modifies the scheduler to discriminate ++ tasks by their burst time (runtime since it last went sleeping or ++ yielding state) and prioritize those that run less bursty. ++ Such tasks usually include window compositor, widgets backend, ++ terminal emulator, video playback, games and so on. ++ With a little impact to scheduling fairness, it may improve ++ responsiveness especially under heavy background workload. ++ ++ If unsure, say Y here. ++ + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" + select CGROUPS +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index a854b71836..40bf030523 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4488,6 +4488,138 @@ int wake_up_state(struct task_struct *p, unsigned int state) + return try_to_wake_up(p, state, 0); + } + ++#ifdef CONFIG_SCHED_BORE ++extern u8 sched_burst_fork_atavistic; ++extern uint sched_burst_cache_lifetime; ++ ++static void __init sched_init_bore(void) { ++ init_task.se.burst_time = 0; ++ init_task.se.prev_burst_penalty = 0; ++ init_task.se.curr_burst_penalty = 0; ++ init_task.se.burst_penalty = 0; ++ init_task.se.burst_score = 0; ++ init_task.se.child_burst_last_cached = 0; ++} ++ ++void inline sched_fork_bore(struct task_struct *p) { ++ p->se.burst_time = 0; ++ p->se.curr_burst_penalty = 0; ++ p->se.burst_score = 0; ++ p->se.child_burst_last_cached = 0; ++} ++ ++static u32 count_child_tasks(struct task_struct *p) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ list_for_each_entry(child, &p->children, sibling) {cnt++;} ++ return cnt; ++} ++ ++static inline bool task_is_inheritable(struct task_struct *p) { ++ return (p->sched_class == &fair_sched_class); ++} ++ ++static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { ++ u64 expiration_time = ++ p->se.child_burst_last_cached + sched_burst_cache_lifetime; ++ return ((s64)(expiration_time - now) < 0); ++} ++ ++static void __update_child_burst_cache( ++ struct task_struct *p, u32 cnt, u32 sum, u64 now) { ++ u8 avg = 0; ++ if (cnt) avg = sum / cnt; ++ p->se.child_burst = max(avg, p->se.burst_penalty); ++ p->se.child_burst_cnt = cnt; ++ p->se.child_burst_last_cached = now; ++} ++ ++static inline void update_child_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ if (!task_is_inheritable(child)) continue; ++ cnt++; ++ sum += child->se.burst_penalty; ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++} ++ ++static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *parent = p->real_parent; ++ if (child_burst_cache_expired(parent, now)) ++ update_child_burst_direct(parent, now); ++ ++ return parent->se.child_burst; ++} ++ ++static void update_child_burst_topological( ++ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) { ++ struct task_struct *child, *dec; ++ u32 cnt = 0, dcnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ dec = child; ++ while ((dcnt = count_child_tasks(dec)) == 1) ++ dec = list_first_entry(&dec->children, struct task_struct, sibling); ++ ++ if (!dcnt || !depth) { ++ if (!task_is_inheritable(dec)) continue; ++ cnt++; ++ sum += dec->se.burst_penalty; ++ continue; ++ } ++ if (!child_burst_cache_expired(dec, now)) { ++ cnt += dec->se.child_burst_cnt; ++ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt; ++ continue; ++ } ++ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum); ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++ *acnt += cnt; ++ *asum += sum; ++} ++ ++static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) { ++ struct task_struct *anc = p->real_parent; ++ u32 cnt = 0, sum = 0; ++ ++ while (anc->real_parent != anc && count_child_tasks(anc) == 1) ++ anc = anc->real_parent; ++ ++ if (child_burst_cache_expired(anc, now)) ++ update_child_burst_topological( ++ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); ++ ++ return anc->se.child_burst; ++} ++ ++static inline void inherit_burst(struct task_struct *p) { ++ u8 burst_cache; ++ u64 now = ktime_get_ns(); ++ ++ read_lock(&tasklist_lock); ++ burst_cache = likely(sched_burst_fork_atavistic)? ++ __inherit_burst_topological(p, now): ++ __inherit_burst_direct(p, now); ++ read_unlock(&tasklist_lock); ++ ++ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache); ++} ++ ++static void sched_post_fork_bore(struct task_struct *p) { ++ if (p->sched_class == &fair_sched_class) ++ inherit_burst(p); ++ p->se.burst_penalty = p->se.prev_burst_penalty; ++} ++#endif // CONFIG_SCHED_BORE ++ + /* + * Perform scheduler related setup for a newly forked process p. + * p is forked by current. +@@ -4504,6 +4636,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) + p->se.prev_sum_exec_runtime = 0; + p->se.nr_migrations = 0; + p->se.vruntime = 0; ++#ifdef CONFIG_SCHED_BORE ++ sched_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + p->se.vlag = 0; + p->se.slice = sysctl_sched_base_slice; + INIT_LIST_HEAD(&p->se.group_node); +@@ -4823,6 +4958,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) + + void sched_post_fork(struct task_struct *p) + { ++#ifdef CONFIG_SCHED_BORE ++ sched_post_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + uclamp_post_fork(p); + } + +@@ -9922,6 +10060,11 @@ void __init sched_init(void) + BUG_ON(&dl_sched_class != &stop_sched_class + 1); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ sched_init_bore(); ++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.1.0 by Masahito Suzuki"); ++#endif // CONFIG_SCHED_BORE ++ + wait_bit_init(); + + #ifdef CONFIG_FAIR_GROUP_SCHED +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 4c3d0d9f3d..02c8816c26 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = { + }; + + #ifdef CONFIG_SMP ++#ifdef CONFIG_SCHED_BORE ++static ssize_t sched_min_base_slice_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[16]; ++ unsigned int value; ++ ++ if (cnt > 15) ++ cnt = 15; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ buf[cnt] = '\0'; ++ ++ if (kstrtouint(buf, 10, &value)) ++ return -EINVAL; + ++ if (!value) ++ return -EINVAL; ++ ++ sysctl_sched_min_base_slice = value; ++ sched_update_min_base_slice(); ++ ++ *ppos += cnt; ++ return cnt; ++} ++ ++static int sched_min_base_slice_show(struct seq_file *m, void *v) ++{ ++ seq_printf(m, "%d\n", sysctl_sched_min_base_slice); ++ return 0; ++} ++ ++static int sched_min_base_slice_open(struct inode *inode, struct file *filp) ++{ ++ return single_open(filp, sched_min_base_slice_show, NULL); ++} ++ ++static const struct file_operations sched_min_base_slice_fops = { ++ .open = sched_min_base_slice_open, ++ .write = sched_min_base_slice_write, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++#else // !CONFIG_SCHED_BORE + static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) + { +@@ -213,7 +258,7 @@ static const struct file_operations sched_scaling_fops = { + .llseek = seq_lseek, + .release = single_release, + }; +- ++#endif // CONFIG_SCHED_BORE + #endif /* SMP */ + + #ifdef CONFIG_PREEMPT_DYNAMIC +@@ -347,13 +392,20 @@ static __init int sched_init_debug(void) + debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops); ++ debugfs_create_u32("base_slice_ns", 0400, debugfs_sched, &sysctl_sched_base_slice); ++#else // !CONFIG_SCHED_BORE + debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice); ++#endif // CONFIG_SCHED_BORE + + debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); + debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); + + #ifdef CONFIG_SMP ++#if !defined(CONFIG_SCHED_BORE) + debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops); ++#endif // CONFIG_SCHED_BORE + debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); + debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate); + +@@ -595,6 +647,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); + ++#ifdef CONFIG_SCHED_BORE ++ SEQ_printf(m, " %2d", p->se.burst_score); ++#endif // CONFIG_SCHED_BORE + #ifdef CONFIG_NUMA_BALANCING + SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); + #endif +@@ -1068,6 +1123,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + + P(se.load.weight); + #ifdef CONFIG_SMP ++#ifdef CONFIG_SCHED_BORE ++ P(se.burst_score); ++#endif // CONFIG_SCHED_BORE + P(se.avg.load_sum); + P(se.avg.runnable_sum); + P(se.avg.util_sum); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index d336af9cba..3ef80eeb5e 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -19,6 +19,9 @@ + * + * Adaptive scheduling granularity, math enhancements by Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra ++ * ++ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler ++ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com> + */ + #include <linux/energy_model.h> + #include <linux/mmap_lock.h> +@@ -66,17 +69,29 @@ + * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) + * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus + * +- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) ++ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) ++ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; ++#else // !CONFIG_SCHED_BORE + unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; ++#endif // CONFIG_SCHED_BORE + + /* + * Minimal preemption granularity for CPU-bound tasks: + * +- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds) ++ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ; ++static unsigned int configured_sched_base_slice = 1000000000ULL / HZ; ++unsigned int sysctl_sched_min_base_slice = 2000000ULL; ++#else // !CONFIG_SCHED_BORE + unsigned int sysctl_sched_base_slice = 750000ULL; + static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; ++#endif // CONFIG_SCHED_BORE + + /* + * After fork, child runs first. If set to 0 (default) then +@@ -86,6 +101,99 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + ++#ifdef CONFIG_SCHED_BORE ++u8 __read_mostly sched_bore = 1; ++u8 __read_mostly sched_burst_smoothness_long = 1; ++u8 __read_mostly sched_burst_smoothness_short = 0; ++u8 __read_mostly sched_burst_fork_atavistic = 2; ++u8 __read_mostly sched_burst_penalty_offset = 22; ++uint __read_mostly sched_burst_penalty_scale = 1280; ++uint __read_mostly sched_burst_cache_lifetime = 60000000; ++static int __maybe_unused sixty_four = 64; ++static int __maybe_unused maxval_12_bits = 4095; ++ ++#define MAX_BURST_PENALTY (39U <<2) ++ ++static inline u32 log2plus1_u64_u32f8(u64 v) { ++ u32 msb = fls64(v); ++ s32 excess_bits = msb - 9; ++ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits; ++ return msb << 8 | fractional; ++} ++ ++static inline u32 calc_burst_penalty(u64 burst_time) { ++ u32 greed, tolerance, penalty, scaled_penalty; ++ ++ greed = log2plus1_u64_u32f8(burst_time); ++ tolerance = sched_burst_penalty_offset << 8; ++ penalty = max(0, (s32)greed - (s32)tolerance); ++ scaled_penalty = penalty * sched_burst_penalty_scale >> 16; ++ ++ return min(MAX_BURST_PENALTY, scaled_penalty); ++} ++ ++static inline u64 scale_slice(u64 delta, struct sched_entity *se) { ++ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22); ++} ++ ++static inline u64 __unscale_slice(u64 delta, u8 score) { ++ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10); ++} ++ ++static inline u64 unscale_slice(u64 delta, struct sched_entity *se) { ++ return __unscale_slice(delta, se->burst_score); ++} ++ ++void reweight_task(struct task_struct *p, int prio); ++ ++static void update_burst_score(struct sched_entity *se) { ++ if (!entity_is_task(se)) return; ++ struct task_struct *p = task_of(se); ++ u8 prio = p->static_prio - MAX_RT_PRIO; ++ u8 prev_prio = min(39, prio + se->burst_score); ++ ++ se->burst_score = se->burst_penalty >> 2; ++ ++ u8 new_prio = min(39, prio + se->burst_score); ++ if (new_prio != prev_prio) ++ reweight_task(p, new_prio); ++} ++ ++static void update_burst_penalty(struct sched_entity *se) { ++ se->curr_burst_penalty = calc_burst_penalty(se->burst_time); ++ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty); ++ update_burst_score(se); ++} ++ ++static inline u32 binary_smooth(u32 new, u32 old) { ++ int increment = new - old; ++ return (0 <= increment)? ++ old + ( increment >> (int)sched_burst_smoothness_long): ++ old - (-increment >> (int)sched_burst_smoothness_short); ++} ++ ++static void restart_burst(struct sched_entity *se) { ++ se->burst_penalty = se->prev_burst_penalty = ++ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty); ++ se->curr_burst_penalty = 0; ++ se->burst_time = 0; ++ update_burst_score(se); ++} ++ ++static void restart_burst_rescale_deadline(struct sched_entity *se) { ++ s64 vscaled, wremain, vremain = se->deadline - se->vruntime; ++ u8 prev_score = se->burst_score; ++ restart_burst(se); ++ if (prev_score > se->burst_score) { ++ wremain = __unscale_slice(abs(vremain), prev_score); ++ vscaled = scale_slice(wremain, se); ++ if (unlikely(vremain < 0)) ++ vscaled = -vscaled; ++ se->deadline = se->vruntime + vscaled; ++ } ++} ++#endif // CONFIG_SCHED_BORE ++ + int sched_thermal_decay_shift; + static int __init setup_sched_thermal_decay_shift(char *str) + { +@@ -145,6 +253,69 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; + + #ifdef CONFIG_SYSCTL + static struct ctl_table sched_fair_sysctls[] = { ++#ifdef CONFIG_SCHED_BORE ++ { ++ .procname = "sched_bore", ++ .data = &sched_bore, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ONE, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_long", ++ .data = &sched_burst_smoothness_long, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_short", ++ .data = &sched_burst_smoothness_short, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_fork_atavistic", ++ .data = &sched_burst_fork_atavistic, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_THREE, ++ }, ++ { ++ .procname = "sched_burst_penalty_offset", ++ .data = &sched_burst_penalty_offset, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &sixty_four, ++ }, ++ { ++ .procname = "sched_burst_penalty_scale", ++ .data = &sched_burst_penalty_scale, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &maxval_12_bits, ++ }, ++ { ++ .procname = "sched_burst_cache_lifetime", ++ .data = &sched_burst_cache_lifetime, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec, ++ }, ++#endif // CONFIG_SCHED_BORE + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -210,6 +381,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w) + * + * This idea comes from the SD scheduler of Con Kolivas: + */ ++#ifdef CONFIG_SCHED_BORE ++static void update_sysctl(void) { ++ sysctl_sched_base_slice = ++ max(sysctl_sched_min_base_slice, configured_sched_base_slice); ++} ++void sched_update_min_base_slice(void) { update_sysctl(); } ++#else // !CONFIG_SCHED_BORE + static unsigned int get_update_sysctl_factor(void) + { + unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); +@@ -240,6 +418,7 @@ static void update_sysctl(void) + SET_SYSCTL(sched_base_slice); + #undef SET_SYSCTL + } ++#endif // CONFIG_SCHED_BORE + + void __init sched_init_granularity(void) + { +@@ -715,6 +894,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) + lag = avg_vruntime(cfs_rq) - se->vruntime; + + limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); ++#ifdef CONFIG_SCHED_BORE ++ limit >>= 1; ++#endif // CONFIG_SCHED_BORE + se->vlag = clamp(lag, -limit, limit); + } + +@@ -996,6 +1178,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) + * Scheduling class statistics methods: + */ + #ifdef CONFIG_SMP ++#if !defined(CONFIG_SCHED_BORE) + int sched_update_scaling(void) + { + unsigned int factor = get_update_sysctl_factor(); +@@ -1007,6 +1190,7 @@ int sched_update_scaling(void) + + return 0; + } ++#endif // CONFIG_SCHED_BORE + #endif + #endif + +@@ -1173,7 +1357,13 @@ static void update_curr(struct cfs_rq *cfs_rq) + curr->sum_exec_runtime += delta_exec; + schedstat_add(cfs_rq->exec_clock, delta_exec); + ++#ifdef CONFIG_SCHED_BORE ++ curr->burst_time += delta_exec; ++ update_burst_penalty(curr); ++ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr)); ++#else // !CONFIG_SCHED_BORE + curr->vruntime += calc_delta_fair(delta_exec, curr); ++#endif // CONFIG_SCHED_BORE + update_deadline(cfs_rq, curr); + update_min_vruntime(cfs_rq); + +@@ -3627,10 +3817,9 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } + #endif + + static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, +- unsigned long weight) ++ unsigned long weight, u64 avruntime) + { + unsigned long old_weight = se->load.weight; +- u64 avruntime = avg_vruntime(cfs_rq); + s64 vlag, vslice; + + /* +@@ -3738,11 +3927,13 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, + { + bool curr = cfs_rq->curr == se; + ++ if (curr) ++ update_curr(cfs_rq); ++ u64 avruntime = avg_vruntime(cfs_rq); ++ + if (se->on_rq) { + /* commit outstanding execution time */ +- if (curr) +- update_curr(cfs_rq); +- else ++ if (!curr) + __dequeue_entity(cfs_rq, se); + update_load_sub(&cfs_rq->load, se->load.weight); + } +@@ -3755,7 +3946,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, + */ + se->vlag = div_s64(se->vlag * se->load.weight, weight); + } else { +- reweight_eevdf(cfs_rq, se, weight); ++ reweight_eevdf(cfs_rq, se, weight, avruntime); + } + + update_load_set(&se->load, weight); +@@ -5076,6 +5267,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) + * + * EEVDF: placement strategy #1 / #2 + */ ++#ifdef CONFIG_SCHED_BORE ++ if (se->vlag) ++#endif // CONFIG_SCHED_BORE + if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) { + struct sched_entity *curr = cfs_rq->curr; + unsigned long load; +@@ -6698,6 +6892,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + bool was_sched_idle = sched_idle_rq(rq); + + util_est_dequeue(&rq->cfs, p); ++#ifdef CONFIG_SCHED_BORE ++ if (task_sleep) { ++ cfs_rq = cfs_rq_of(se); ++ if (cfs_rq->curr == se) ++ update_curr(cfs_rq); ++ restart_burst(se); ++ } ++#endif // CONFIG_SCHED_BORE + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); +@@ -8429,16 +8631,25 @@ static void yield_task_fair(struct rq *rq) + /* + * Are we the only task in the tree? + */ ++#if !defined(CONFIG_SCHED_BORE) + if (unlikely(rq->nr_running == 1)) + return; + + clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + + update_rq_clock(rq); + /* + * Update run-time statistics of the 'current'. + */ + update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ restart_burst_rescale_deadline(se); ++ if (unlikely(rq->nr_running == 1)) ++ return; ++ ++ clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + /* + * Tell update_rq_clock() that we've just updated, + * so we don't do microscopic update in schedule() +@@ -12515,6 +12726,9 @@ static void task_fork_fair(struct task_struct *p) + curr = cfs_rq->curr; + if (curr) + update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ update_burst_score(se); ++#endif // CONFIG_SCHED_BORE + place_entity(cfs_rq, se, ENQUEUE_INITIAL); + rq_unlock(rq, &rf); + } +diff --git a/kernel/sched/features.h b/kernel/sched/features.h +index f770168230..9979586b9a 100644 +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -6,7 +6,11 @@ + */ + SCHED_FEAT(PLACE_LAG, true) + SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) ++#ifdef CONFIG_SCHED_BORE ++SCHED_FEAT(RUN_TO_PARITY, false) ++#else // !CONFIG_SCHED_BORE + SCHED_FEAT(RUN_TO_PARITY, true) ++#endif // CONFIG_SCHED_BORE + + /* + * Prefer to schedule the task we woke last (assuming it failed +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 0484627240..71cc8c0795 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1945,7 +1945,11 @@ static inline void dirty_sched_domain_sysctl(int cpu) + } + #endif + ++#ifdef CONFIG_SCHED_BORE ++extern void sched_update_min_base_slice(void); ++#else // !CONFIG_SCHED_BORE + extern int sched_update_scaling(void); ++#endif // CONFIG_SCHED_BORE + + static inline const struct cpumask *task_user_cpus(struct task_struct *p) + { +@@ -2525,6 +2529,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate; + extern const_debug unsigned int sysctl_sched_migration_cost; + + extern unsigned int sysctl_sched_base_slice; ++#ifdef CONFIG_SCHED_BORE ++extern unsigned int sysctl_sched_min_base_slice; ++#endif // CONFIG_SCHED_BORE + + #ifdef CONFIG_SCHED_DEBUG + extern int sysctl_resched_latency_warn_ms; +-- +2.34.1 + diff --git a/sys-kernel/linux-sources-redcore-lts/files/6.6-amd64.config b/sys-kernel/linux-sources-redcore-lts/files/6.6-amd64.config index 15502964..681af895 100644 --- a/sys-kernel/linux-sources-redcore-lts/files/6.6-amd64.config +++ b/sys-kernel/linux-sources-redcore-lts/files/6.6-amd64.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 6.6.28-redcore-lts Kernel Configuration +# Linux/x86 6.6.28-redcore-lts-r1 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Gentoo Hardened 13.2.0-r15 p3) 13.2.0" CONFIG_CC_IS_GCC=y @@ -231,6 +231,7 @@ CONFIG_USER_NS_UNPRIVILEGED=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_BORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y |