diff options
author | V3n3RiX <venerix@koprulu.sector> | 2024-04-27 15:31:48 +0100 |
---|---|---|
committer | V3n3RiX <venerix@koprulu.sector> | 2024-04-27 15:31:48 +0100 |
commit | 5e2fbf3a6be51fca0d8bb3e58ce4e2af0c0441b8 (patch) | |
tree | 0554665f0d8fc9105e20e517d216a1b6782341b4 | |
parent | 326d73402801de84140ae8bfad77009e69ee16c1 (diff) |
sys-kernel/linux-{image,sources}-redcore : revision bump, use the BORE scheduler (https://github.com/firelzrd/bore-scheduler)
-rw-r--r-- | metadata/md5-cache/sys-kernel/linux-image-redcore-6.8.7-r1 (renamed from metadata/md5-cache/sys-kernel/linux-image-redcore-6.8.7) | 2 | ||||
-rw-r--r-- | metadata/md5-cache/sys-kernel/linux-sources-redcore-6.8.7-r1 (renamed from metadata/md5-cache/sys-kernel/linux-sources-redcore-6.8.7) | 2 | ||||
-rw-r--r-- | metadata/pkg_desc_index | 4 | ||||
-rw-r--r-- | sys-kernel/linux-image-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch | 764 | ||||
-rw-r--r-- | sys-kernel/linux-image-redcore/files/6.8-amd64.config | 1 | ||||
-rw-r--r-- | sys-kernel/linux-image-redcore/linux-image-redcore-6.8.7-r1.ebuild (renamed from sys-kernel/linux-image-redcore/linux-image-redcore-6.8.7.ebuild) | 3 | ||||
-rw-r--r-- | sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch | 764 | ||||
-rw-r--r-- | sys-kernel/linux-sources-redcore/files/6.8-amd64.config | 1 | ||||
-rw-r--r-- | sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.7-r1.ebuild (renamed from sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.7.ebuild) | 3 |
9 files changed, 1538 insertions, 6 deletions
diff --git a/metadata/md5-cache/sys-kernel/linux-image-redcore-6.8.7 b/metadata/md5-cache/sys-kernel/linux-image-redcore-6.8.7-r1 index 02ba16bd..369eaeb2 100644 --- a/metadata/md5-cache/sys-kernel/linux-image-redcore-6.8.7 +++ b/metadata/md5-cache/sys-kernel/linux-image-redcore-6.8.7-r1 @@ -12,4 +12,4 @@ RESTRICT=binchecks strip mirror SLOT=6.8 SRC_URI=https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.8.7.tar.xz _eclasses_=desktop 021728fdc1b03b36357dbc89489e0f0d edos2unix 33e347e171066657f91f8b0c72ec8773 epatch 2b02655f061dfa25067b543539110259 eqawarn c9847c43b3253a276ae2eabddedab3d7 estack c61c368a76fdf3a82fdf8dbaebea3804 eutils 27d6d8292d4e729f95acaddba111de88 ltprune 97143780d341cc8d8f1d4c6187a36d29 multilib c19072c3cd7ac5cb21de013f7e9832e0 preserve-libs 21162ec96c87041004a75348d97342dd strip-linguas ac3ee41ee2d31d8c41a77c0838320cc7 toolchain-funcs e56c7649b804f051623c8bc1a1c44084 vcs-clean d271b7bc7e6a009758d7d4ef749174e3 wrapper 4a1902f969e5718126434fc35f3a0d9c -_md5_=3d600db74d353e7c65224f69a0144f60 +_md5_=9aac1c5df982c3a7ab7e0ca5e4530578 diff --git a/metadata/md5-cache/sys-kernel/linux-sources-redcore-6.8.7 b/metadata/md5-cache/sys-kernel/linux-sources-redcore-6.8.7-r1 index 72ad6253..3b60988b 100644 --- a/metadata/md5-cache/sys-kernel/linux-sources-redcore-6.8.7 +++ b/metadata/md5-cache/sys-kernel/linux-sources-redcore-6.8.7-r1 @@ -11,4 +11,4 @@ RESTRICT=strip mirror SLOT=6.8 SRC_URI=https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.8.7.tar.xz _eclasses_=desktop 021728fdc1b03b36357dbc89489e0f0d edos2unix 33e347e171066657f91f8b0c72ec8773 epatch 2b02655f061dfa25067b543539110259 eqawarn c9847c43b3253a276ae2eabddedab3d7 estack c61c368a76fdf3a82fdf8dbaebea3804 eutils 27d6d8292d4e729f95acaddba111de88 ltprune 97143780d341cc8d8f1d4c6187a36d29 multilib c19072c3cd7ac5cb21de013f7e9832e0 preserve-libs 21162ec96c87041004a75348d97342dd strip-linguas ac3ee41ee2d31d8c41a77c0838320cc7 toolchain-funcs e56c7649b804f051623c8bc1a1c44084 vcs-clean d271b7bc7e6a009758d7d4ef749174e3 wrapper 4a1902f969e5718126434fc35f3a0d9c -_md5_=367f1124bd8a6bb5df1202919830b0bd +_md5_=d58292f21fbccd1a478b98d19b74e1dc diff --git a/metadata/pkg_desc_index b/metadata/pkg_desc_index index 84f80be6..4f49a394 100644 --- a/metadata/pkg_desc_index +++ b/metadata/pkg_desc_index @@ -66,9 +66,9 @@ sys-fs/zfs-utils 2.2.3: Userland utilities for ZFS Linux kernel module sys-kernel/bcmwl-dkms 6.30.223.271-r10: Broadcom's IEEE 802.11a/b/g/n hybrid Linux device driver source sys-kernel/dkms 2.3-r3: Dynamic Kernel Module Support sys-kernel/dracut 059-r15: Generic initramfs generation tool -sys-kernel/linux-image-redcore 6.7.12 6.8.7: Redcore Linux Kernel Image +sys-kernel/linux-image-redcore 6.7.12 6.8.7-r1: Redcore Linux Kernel Image sys-kernel/linux-image-redcore-lts 5.15.156 6.1.87 6.6.28: Redcore Linux LTS Kernel Image -sys-kernel/linux-sources-redcore 6.7.12 6.8.7: Redcore Linux Kernel Sources +sys-kernel/linux-sources-redcore 6.7.12 6.8.7-r1: Redcore Linux Kernel Sources sys-kernel/linux-sources-redcore-lts 5.15.156 6.1.87 6.6.28: Redcore Linux LTS Kernel Sources sys-kernel/nvidia-drivers-dkms 390.157-r6 470.239.06 535.171.04: NVIDIA driver sources for linux sys-kernel/tp_smapi-dkms 0.44: IBM ThinkPad SMAPI BIOS driver sources diff --git a/sys-kernel/linux-image-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch b/sys-kernel/linux-image-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch new file mode 100644 index 00000000..0fffdcea --- /dev/null +++ b/sys-kernel/linux-image-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch @@ -0,0 +1,764 @@ +From feae72fd7f2403910c157dd679d6ec240ed1dfbf Mon Sep 17 00:00:00 2001 +From: Masahito S <firelzrd@gmail.com> +Date: Mon, 22 Apr 2024 04:12:58 +0900 +Subject: [PATCH] linux6.8.y-bore5.1.0 + +--- + include/linux/sched.h | 10 ++ + init/Kconfig | 17 +++ + kernel/sched/core.c | 143 +++++++++++++++++++++++++ + kernel/sched/debug.c | 60 ++++++++++- + kernel/sched/fair.c | 230 ++++++++++++++++++++++++++++++++++++++-- + kernel/sched/features.h | 4 + + kernel/sched/sched.h | 7 ++ + 7 files changed, 462 insertions(+), 9 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index ffe8f618ab..0ab0b04240 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -547,6 +547,16 @@ struct sched_entity { + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; ++#ifdef CONFIG_SCHED_BORE ++ u64 burst_time; ++ u8 prev_burst_penalty; ++ u8 curr_burst_penalty; ++ u8 burst_penalty; ++ u8 burst_score; ++ u8 child_burst; ++ u32 child_burst_cnt; ++ u64 child_burst_last_cached; ++#endif // CONFIG_SCHED_BORE + s64 vlag; + u64 slice; + +diff --git a/init/Kconfig b/init/Kconfig +index bee58f7468..13427dbb48 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1279,6 +1279,23 @@ config CHECKPOINT_RESTORE + + If unsure, say N here. + ++config SCHED_BORE ++ bool "Burst-Oriented Response Enhancer" ++ default y ++ help ++ In Desktop and Mobile computing, one might prefer interactive ++ tasks to keep responsive no matter what they run in the background. ++ ++ Enabling this kernel feature modifies the scheduler to discriminate ++ tasks by their burst time (runtime since it last went sleeping or ++ yielding state) and prioritize those that run less bursty. ++ Such tasks usually include window compositor, widgets backend, ++ terminal emulator, video playback, games and so on. ++ With a little impact to scheduling fairness, it may improve ++ responsiveness especially under heavy background workload. ++ ++ If unsure, say Y here. ++ + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" + select CGROUPS +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 9116bcc903..d1711f75f8 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4507,6 +4507,138 @@ int wake_up_state(struct task_struct *p, unsigned int state) + return try_to_wake_up(p, state, 0); + } + ++#ifdef CONFIG_SCHED_BORE ++extern u8 sched_burst_fork_atavistic; ++extern uint sched_burst_cache_lifetime; ++ ++static void __init sched_init_bore(void) { ++ init_task.se.burst_time = 0; ++ init_task.se.prev_burst_penalty = 0; ++ init_task.se.curr_burst_penalty = 0; ++ init_task.se.burst_penalty = 0; ++ init_task.se.burst_score = 0; ++ init_task.se.child_burst_last_cached = 0; ++} ++ ++void inline sched_fork_bore(struct task_struct *p) { ++ p->se.burst_time = 0; ++ p->se.curr_burst_penalty = 0; ++ p->se.burst_score = 0; ++ p->se.child_burst_last_cached = 0; ++} ++ ++static u32 count_child_tasks(struct task_struct *p) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ list_for_each_entry(child, &p->children, sibling) {cnt++;} ++ return cnt; ++} ++ ++static inline bool task_is_inheritable(struct task_struct *p) { ++ return (p->sched_class == &fair_sched_class); ++} ++ ++static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { ++ u64 expiration_time = ++ p->se.child_burst_last_cached + sched_burst_cache_lifetime; ++ return ((s64)(expiration_time - now) < 0); ++} ++ ++static void __update_child_burst_cache( ++ struct task_struct *p, u32 cnt, u32 sum, u64 now) { ++ u8 avg = 0; ++ if (cnt) avg = sum / cnt; ++ p->se.child_burst = max(avg, p->se.burst_penalty); ++ p->se.child_burst_cnt = cnt; ++ p->se.child_burst_last_cached = now; ++} ++ ++static inline void update_child_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ if (!task_is_inheritable(child)) continue; ++ cnt++; ++ sum += child->se.burst_penalty; ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++} ++ ++static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *parent = p->real_parent; ++ if (child_burst_cache_expired(parent, now)) ++ update_child_burst_direct(parent, now); ++ ++ return parent->se.child_burst; ++} ++ ++static void update_child_burst_topological( ++ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) { ++ struct task_struct *child, *dec; ++ u32 cnt = 0, dcnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ dec = child; ++ while ((dcnt = count_child_tasks(dec)) == 1) ++ dec = list_first_entry(&dec->children, struct task_struct, sibling); ++ ++ if (!dcnt || !depth) { ++ if (!task_is_inheritable(dec)) continue; ++ cnt++; ++ sum += dec->se.burst_penalty; ++ continue; ++ } ++ if (!child_burst_cache_expired(dec, now)) { ++ cnt += dec->se.child_burst_cnt; ++ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt; ++ continue; ++ } ++ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum); ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++ *acnt += cnt; ++ *asum += sum; ++} ++ ++static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) { ++ struct task_struct *anc = p->real_parent; ++ u32 cnt = 0, sum = 0; ++ ++ while (anc->real_parent != anc && count_child_tasks(anc) == 1) ++ anc = anc->real_parent; ++ ++ if (child_burst_cache_expired(anc, now)) ++ update_child_burst_topological( ++ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); ++ ++ return anc->se.child_burst; ++} ++ ++static inline void inherit_burst(struct task_struct *p) { ++ u8 burst_cache; ++ u64 now = ktime_get_ns(); ++ ++ read_lock(&tasklist_lock); ++ burst_cache = likely(sched_burst_fork_atavistic)? ++ __inherit_burst_topological(p, now): ++ __inherit_burst_direct(p, now); ++ read_unlock(&tasklist_lock); ++ ++ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache); ++} ++ ++static void sched_post_fork_bore(struct task_struct *p) { ++ if (p->sched_class == &fair_sched_class) ++ inherit_burst(p); ++ p->se.burst_penalty = p->se.prev_burst_penalty; ++} ++#endif // CONFIG_SCHED_BORE ++ + /* + * Perform scheduler related setup for a newly forked process p. + * p is forked by current. +@@ -4523,6 +4655,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) + p->se.prev_sum_exec_runtime = 0; + p->se.nr_migrations = 0; + p->se.vruntime = 0; ++#ifdef CONFIG_SCHED_BORE ++ sched_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + p->se.vlag = 0; + p->se.slice = sysctl_sched_base_slice; + INIT_LIST_HEAD(&p->se.group_node); +@@ -4839,6 +4974,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) + + void sched_post_fork(struct task_struct *p) + { ++#ifdef CONFIG_SCHED_BORE ++ sched_post_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + uclamp_post_fork(p); + } + +@@ -9910,6 +10048,11 @@ void __init sched_init(void) + BUG_ON(&dl_sched_class != &stop_sched_class + 1); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ sched_init_bore(); ++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.1.0 by Masahito Suzuki"); ++#endif // CONFIG_SCHED_BORE ++ + wait_bit_init(); + + #ifdef CONFIG_FAIR_GROUP_SCHED +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 8d5d98a583..b178612617 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = { + }; + + #ifdef CONFIG_SMP ++#ifdef CONFIG_SCHED_BORE ++static ssize_t sched_min_base_slice_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[16]; ++ unsigned int value; ++ ++ if (cnt > 15) ++ cnt = 15; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ buf[cnt] = '\0'; ++ ++ if (kstrtouint(buf, 10, &value)) ++ return -EINVAL; + ++ if (!value) ++ return -EINVAL; ++ ++ sysctl_sched_min_base_slice = value; ++ sched_update_min_base_slice(); ++ ++ *ppos += cnt; ++ return cnt; ++} ++ ++static int sched_min_base_slice_show(struct seq_file *m, void *v) ++{ ++ seq_printf(m, "%d\n", sysctl_sched_min_base_slice); ++ return 0; ++} ++ ++static int sched_min_base_slice_open(struct inode *inode, struct file *filp) ++{ ++ return single_open(filp, sched_min_base_slice_show, NULL); ++} ++ ++static const struct file_operations sched_min_base_slice_fops = { ++ .open = sched_min_base_slice_open, ++ .write = sched_min_base_slice_write, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++#else // !CONFIG_SCHED_BORE + static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) + { +@@ -213,7 +258,7 @@ static const struct file_operations sched_scaling_fops = { + .llseek = seq_lseek, + .release = single_release, + }; +- ++#endif // CONFIG_SCHED_BORE + #endif /* SMP */ + + #ifdef CONFIG_PREEMPT_DYNAMIC +@@ -347,13 +392,20 @@ static __init int sched_init_debug(void) + debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops); ++ debugfs_create_u32("base_slice_ns", 0400, debugfs_sched, &sysctl_sched_base_slice); ++#else // !CONFIG_SCHED_BORE + debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice); ++#endif // CONFIG_SCHED_BORE + + debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); + debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); + + #ifdef CONFIG_SMP ++#if !defined(CONFIG_SCHED_BORE) + debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops); ++#endif // CONFIG_SCHED_BORE + debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); + debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate); + +@@ -595,6 +647,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); + ++#ifdef CONFIG_SCHED_BORE ++ SEQ_printf(m, " %2d", p->se.burst_score); ++#endif // CONFIG_SCHED_BORE + #ifdef CONFIG_NUMA_BALANCING + SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); + #endif +@@ -1068,6 +1123,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + + P(se.load.weight); + #ifdef CONFIG_SMP ++#ifdef CONFIG_SCHED_BORE ++ P(se.burst_score); ++#endif // CONFIG_SCHED_BORE + P(se.avg.load_sum); + P(se.avg.runnable_sum); + P(se.avg.util_sum); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 533547e3c9..a2346b1b44 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -19,6 +19,9 @@ + * + * Adaptive scheduling granularity, math enhancements by Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra ++ * ++ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler ++ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com> + */ + #include <linux/energy_model.h> + #include <linux/mmap_lock.h> +@@ -64,20 +67,125 @@ + * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) + * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus + * +- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) ++ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) ++ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; ++#else // !CONFIG_SCHED_BORE + unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; ++#endif // CONFIG_SCHED_BORE + + /* + * Minimal preemption granularity for CPU-bound tasks: + * +- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds) ++ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ; ++static unsigned int configured_sched_base_slice = 1000000000ULL / HZ; ++unsigned int sysctl_sched_min_base_slice = 2000000ULL; ++#else // !CONFIG_SCHED_BORE + unsigned int sysctl_sched_base_slice = 750000ULL; + static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; ++#endif // CONFIG_SCHED_BORE + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + ++#ifdef CONFIG_SCHED_BORE ++u8 __read_mostly sched_bore = 1; ++u8 __read_mostly sched_burst_smoothness_long = 1; ++u8 __read_mostly sched_burst_smoothness_short = 0; ++u8 __read_mostly sched_burst_fork_atavistic = 2; ++u8 __read_mostly sched_burst_penalty_offset = 22; ++uint __read_mostly sched_burst_penalty_scale = 1280; ++uint __read_mostly sched_burst_cache_lifetime = 60000000; ++static int __maybe_unused sixty_four = 64; ++static int __maybe_unused maxval_12_bits = 4095; ++ ++#define MAX_BURST_PENALTY (39U <<2) ++ ++static inline u32 log2plus1_u64_u32f8(u64 v) { ++ u32 msb = fls64(v); ++ s32 excess_bits = msb - 9; ++ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits; ++ return msb << 8 | fractional; ++} ++ ++static inline u32 calc_burst_penalty(u64 burst_time) { ++ u32 greed, tolerance, penalty, scaled_penalty; ++ ++ greed = log2plus1_u64_u32f8(burst_time); ++ tolerance = sched_burst_penalty_offset << 8; ++ penalty = max(0, (s32)greed - (s32)tolerance); ++ scaled_penalty = penalty * sched_burst_penalty_scale >> 16; ++ ++ return min(MAX_BURST_PENALTY, scaled_penalty); ++} ++ ++static inline u64 scale_slice(u64 delta, struct sched_entity *se) { ++ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22); ++} ++ ++static inline u64 __unscale_slice(u64 delta, u8 score) { ++ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10); ++} ++ ++static inline u64 unscale_slice(u64 delta, struct sched_entity *se) { ++ return __unscale_slice(delta, se->burst_score); ++} ++ ++void reweight_task(struct task_struct *p, int prio); ++ ++static void update_burst_score(struct sched_entity *se) { ++ if (!entity_is_task(se)) return; ++ struct task_struct *p = task_of(se); ++ u8 prio = p->static_prio - MAX_RT_PRIO; ++ u8 prev_prio = min(39, prio + se->burst_score); ++ ++ se->burst_score = se->burst_penalty >> 2; ++ ++ u8 new_prio = min(39, prio + se->burst_score); ++ if (new_prio != prev_prio) ++ reweight_task(p, new_prio); ++} ++ ++static void update_burst_penalty(struct sched_entity *se) { ++ se->curr_burst_penalty = calc_burst_penalty(se->burst_time); ++ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty); ++ update_burst_score(se); ++} ++ ++static inline u32 binary_smooth(u32 new, u32 old) { ++ int increment = new - old; ++ return (0 <= increment)? ++ old + ( increment >> (int)sched_burst_smoothness_long): ++ old - (-increment >> (int)sched_burst_smoothness_short); ++} ++ ++static void restart_burst(struct sched_entity *se) { ++ se->burst_penalty = se->prev_burst_penalty = ++ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty); ++ se->curr_burst_penalty = 0; ++ se->burst_time = 0; ++ update_burst_score(se); ++} ++ ++static void restart_burst_rescale_deadline(struct sched_entity *se) { ++ s64 vscaled, wremain, vremain = se->deadline - se->vruntime; ++ u8 prev_score = se->burst_score; ++ restart_burst(se); ++ if (prev_score > se->burst_score) { ++ wremain = __unscale_slice(abs(vremain), prev_score); ++ vscaled = scale_slice(wremain, se); ++ if (unlikely(vremain < 0)) ++ vscaled = -vscaled; ++ se->deadline = se->vruntime + vscaled; ++ } ++} ++#endif // CONFIG_SCHED_BORE ++ + int sched_thermal_decay_shift; + static int __init setup_sched_thermal_decay_shift(char *str) + { +@@ -137,6 +245,69 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; + + #ifdef CONFIG_SYSCTL + static struct ctl_table sched_fair_sysctls[] = { ++#ifdef CONFIG_SCHED_BORE ++ { ++ .procname = "sched_bore", ++ .data = &sched_bore, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ONE, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_long", ++ .data = &sched_burst_smoothness_long, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_short", ++ .data = &sched_burst_smoothness_short, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_fork_atavistic", ++ .data = &sched_burst_fork_atavistic, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_THREE, ++ }, ++ { ++ .procname = "sched_burst_penalty_offset", ++ .data = &sched_burst_penalty_offset, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &sixty_four, ++ }, ++ { ++ .procname = "sched_burst_penalty_scale", ++ .data = &sched_burst_penalty_scale, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &maxval_12_bits, ++ }, ++ { ++ .procname = "sched_burst_cache_lifetime", ++ .data = &sched_burst_cache_lifetime, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec, ++ }, ++#endif // CONFIG_SCHED_BORE + #ifdef CONFIG_CFS_BANDWIDTH + { + .procname = "sched_cfs_bandwidth_slice_us", +@@ -195,6 +366,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w) + * + * This idea comes from the SD scheduler of Con Kolivas: + */ ++#ifdef CONFIG_SCHED_BORE ++static void update_sysctl(void) { ++ sysctl_sched_base_slice = ++ max(sysctl_sched_min_base_slice, configured_sched_base_slice); ++} ++void sched_update_min_base_slice(void) { update_sysctl(); } ++#else // !CONFIG_SCHED_BORE + static unsigned int get_update_sysctl_factor(void) + { + unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); +@@ -225,6 +403,7 @@ static void update_sysctl(void) + SET_SYSCTL(sched_base_slice); + #undef SET_SYSCTL + } ++#endif // CONFIG_SCHED_BORE + + void __init sched_init_granularity(void) + { +@@ -704,6 +883,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) + lag = avg_vruntime(cfs_rq) - se->vruntime; + + limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); ++#ifdef CONFIG_SCHED_BORE ++ limit >>= 1; ++#endif // CONFIG_SCHED_BORE + se->vlag = clamp(lag, -limit, limit); + } + +@@ -955,6 +1137,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) + * Scheduling class statistics methods: + */ + #ifdef CONFIG_SMP ++#if !defined(CONFIG_SCHED_BORE) + int sched_update_scaling(void) + { + unsigned int factor = get_update_sysctl_factor(); +@@ -966,6 +1149,7 @@ int sched_update_scaling(void) + + return 0; + } ++#endif // CONFIG_SCHED_BORE + #endif + #endif + +@@ -1165,7 +1349,13 @@ static void update_curr(struct cfs_rq *cfs_rq) + if (unlikely(delta_exec <= 0)) + return; + ++#ifdef CONFIG_SCHED_BORE ++ curr->burst_time += delta_exec; ++ update_burst_penalty(curr); ++ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr)); ++#else // !CONFIG_SCHED_BORE + curr->vruntime += calc_delta_fair(delta_exec, curr); ++#endif // CONFIG_SCHED_BORE + update_deadline(cfs_rq, curr); + update_min_vruntime(cfs_rq); + +@@ -3671,10 +3861,9 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } + #endif + + static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, +- unsigned long weight) ++ unsigned long weight, u64 avruntime) + { + unsigned long old_weight = se->load.weight; +- u64 avruntime = avg_vruntime(cfs_rq); + s64 vlag, vslice; + + /* +@@ -3782,11 +3971,13 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, + { + bool curr = cfs_rq->curr == se; + ++ if (curr) ++ update_curr(cfs_rq); ++ u64 avruntime = avg_vruntime(cfs_rq); ++ + if (se->on_rq) { + /* commit outstanding execution time */ +- if (curr) +- update_curr(cfs_rq); +- else ++ if (!curr) + __dequeue_entity(cfs_rq, se); + update_load_sub(&cfs_rq->load, se->load.weight); + } +@@ -3799,7 +3990,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, + */ + se->vlag = div_s64(se->vlag * se->load.weight, weight); + } else { +- reweight_eevdf(cfs_rq, se, weight); ++ reweight_eevdf(cfs_rq, se, weight, avruntime); + } + + update_load_set(&se->load, weight); +@@ -5171,6 +5362,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) + * + * EEVDF: placement strategy #1 / #2 + */ ++#ifdef CONFIG_SCHED_BORE ++ if (se->vlag) ++#endif // CONFIG_SCHED_BORE + if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) { + struct sched_entity *curr = cfs_rq->curr; + unsigned long load; +@@ -6803,6 +6997,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + bool was_sched_idle = sched_idle_rq(rq); + + util_est_dequeue(&rq->cfs, p); ++#ifdef CONFIG_SCHED_BORE ++ if (task_sleep) { ++ cfs_rq = cfs_rq_of(se); ++ if (cfs_rq->curr == se) ++ update_curr(cfs_rq); ++ restart_burst(se); ++ } ++#endif // CONFIG_SCHED_BORE + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); +@@ -8552,16 +8754,25 @@ static void yield_task_fair(struct rq *rq) + /* + * Are we the only task in the tree? + */ ++#if !defined(CONFIG_SCHED_BORE) + if (unlikely(rq->nr_running == 1)) + return; + + clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + + update_rq_clock(rq); + /* + * Update run-time statistics of the 'current'. + */ + update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ restart_burst_rescale_deadline(se); ++ if (unlikely(rq->nr_running == 1)) ++ return; ++ ++ clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + /* + * Tell update_rq_clock() that we've just updated, + * so we don't do microscopic update in schedule() +@@ -12651,6 +12862,9 @@ static void task_fork_fair(struct task_struct *p) + curr = cfs_rq->curr; + if (curr) + update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ update_burst_score(se); ++#endif // CONFIG_SCHED_BORE + place_entity(cfs_rq, se, ENQUEUE_INITIAL); + rq_unlock(rq, &rf); + } +diff --git a/kernel/sched/features.h b/kernel/sched/features.h +index 143f55df89..3f0fe409f5 100644 +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -6,7 +6,11 @@ + */ + SCHED_FEAT(PLACE_LAG, true) + SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) ++#ifdef CONFIG_SCHED_BORE ++SCHED_FEAT(RUN_TO_PARITY, false) ++#else // !CONFIG_SCHED_BORE + SCHED_FEAT(RUN_TO_PARITY, true) ++#endif // CONFIG_SCHED_BORE + + /* + * Prefer to schedule the task we woke last (assuming it failed +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 001fe047bd..da3ad1d4e1 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1965,7 +1965,11 @@ static inline void dirty_sched_domain_sysctl(int cpu) + } + #endif + ++#ifdef CONFIG_SCHED_BORE ++extern void sched_update_min_base_slice(void); ++#else // !CONFIG_SCHED_BORE + extern int sched_update_scaling(void); ++#endif // CONFIG_SCHED_BORE + + static inline const struct cpumask *task_user_cpus(struct task_struct *p) + { +@@ -2552,6 +2556,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate; + extern const_debug unsigned int sysctl_sched_migration_cost; + + extern unsigned int sysctl_sched_base_slice; ++#ifdef CONFIG_SCHED_BORE ++extern unsigned int sysctl_sched_min_base_slice; ++#endif // CONFIG_SCHED_BORE + + #ifdef CONFIG_SCHED_DEBUG + extern int sysctl_resched_latency_warn_ms; +-- +2.34.1 + diff --git a/sys-kernel/linux-image-redcore/files/6.8-amd64.config b/sys-kernel/linux-image-redcore/files/6.8-amd64.config index 4efc3471..7fa428ce 100644 --- a/sys-kernel/linux-image-redcore/files/6.8-amd64.config +++ b/sys-kernel/linux-image-redcore/files/6.8-amd64.config @@ -233,6 +233,7 @@ CONFIG_USER_NS_UNPRIVILEGED=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_BORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y diff --git a/sys-kernel/linux-image-redcore/linux-image-redcore-6.8.7.ebuild b/sys-kernel/linux-image-redcore/linux-image-redcore-6.8.7-r1.ebuild index de25118a..4f06ed08 100644 --- a/sys-kernel/linux-image-redcore/linux-image-redcore-6.8.7.ebuild +++ b/sys-kernel/linux-image-redcore/linux-image-redcore-6.8.7-r1.ebuild @@ -5,7 +5,7 @@ EAPI=6 inherit eutils -EXTRAVERSION="redcore" +EXTRAVERSION="redcore-r1" KV_FULL="${PV}-${EXTRAVERSION}" KV_MAJOR="6.8" @@ -34,6 +34,7 @@ DEPEND=" RDEPEND="${DEPEND}" PATCHES=( + "${FILESDIR}"/"${KV_MAJOR}"-0001-linux6.8.y-bore5.1.0.patch "${FILESDIR}"/"${KV_MAJOR}"-ath10k-be-quiet.patch "${FILESDIR}"/"${KV_MAJOR}"-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch "${FILESDIR}"/"${KV_MAJOR}"-acpi-use-kern_warning_even_when_error.patch diff --git a/sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch b/sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch new file mode 100644 index 00000000..0fffdcea --- /dev/null +++ b/sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch @@ -0,0 +1,764 @@ +From feae72fd7f2403910c157dd679d6ec240ed1dfbf Mon Sep 17 00:00:00 2001 +From: Masahito S <firelzrd@gmail.com> +Date: Mon, 22 Apr 2024 04:12:58 +0900 +Subject: [PATCH] linux6.8.y-bore5.1.0 + +--- + include/linux/sched.h | 10 ++ + init/Kconfig | 17 +++ + kernel/sched/core.c | 143 +++++++++++++++++++++++++ + kernel/sched/debug.c | 60 ++++++++++- + kernel/sched/fair.c | 230 ++++++++++++++++++++++++++++++++++++++-- + kernel/sched/features.h | 4 + + kernel/sched/sched.h | 7 ++ + 7 files changed, 462 insertions(+), 9 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index ffe8f618ab..0ab0b04240 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -547,6 +547,16 @@ struct sched_entity { + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; ++#ifdef CONFIG_SCHED_BORE ++ u64 burst_time; ++ u8 prev_burst_penalty; ++ u8 curr_burst_penalty; ++ u8 burst_penalty; ++ u8 burst_score; ++ u8 child_burst; ++ u32 child_burst_cnt; ++ u64 child_burst_last_cached; ++#endif // CONFIG_SCHED_BORE + s64 vlag; + u64 slice; + +diff --git a/init/Kconfig b/init/Kconfig +index bee58f7468..13427dbb48 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1279,6 +1279,23 @@ config CHECKPOINT_RESTORE + + If unsure, say N here. + ++config SCHED_BORE ++ bool "Burst-Oriented Response Enhancer" ++ default y ++ help ++ In Desktop and Mobile computing, one might prefer interactive ++ tasks to keep responsive no matter what they run in the background. ++ ++ Enabling this kernel feature modifies the scheduler to discriminate ++ tasks by their burst time (runtime since it last went sleeping or ++ yielding state) and prioritize those that run less bursty. ++ Such tasks usually include window compositor, widgets backend, ++ terminal emulator, video playback, games and so on. ++ With a little impact to scheduling fairness, it may improve ++ responsiveness especially under heavy background workload. ++ ++ If unsure, say Y here. ++ + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" + select CGROUPS +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 9116bcc903..d1711f75f8 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4507,6 +4507,138 @@ int wake_up_state(struct task_struct *p, unsigned int state) + return try_to_wake_up(p, state, 0); + } + ++#ifdef CONFIG_SCHED_BORE ++extern u8 sched_burst_fork_atavistic; ++extern uint sched_burst_cache_lifetime; ++ ++static void __init sched_init_bore(void) { ++ init_task.se.burst_time = 0; ++ init_task.se.prev_burst_penalty = 0; ++ init_task.se.curr_burst_penalty = 0; ++ init_task.se.burst_penalty = 0; ++ init_task.se.burst_score = 0; ++ init_task.se.child_burst_last_cached = 0; ++} ++ ++void inline sched_fork_bore(struct task_struct *p) { ++ p->se.burst_time = 0; ++ p->se.curr_burst_penalty = 0; ++ p->se.burst_score = 0; ++ p->se.child_burst_last_cached = 0; ++} ++ ++static u32 count_child_tasks(struct task_struct *p) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ list_for_each_entry(child, &p->children, sibling) {cnt++;} ++ return cnt; ++} ++ ++static inline bool task_is_inheritable(struct task_struct *p) { ++ return (p->sched_class == &fair_sched_class); ++} ++ ++static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { ++ u64 expiration_time = ++ p->se.child_burst_last_cached + sched_burst_cache_lifetime; ++ return ((s64)(expiration_time - now) < 0); ++} ++ ++static void __update_child_burst_cache( ++ struct task_struct *p, u32 cnt, u32 sum, u64 now) { ++ u8 avg = 0; ++ if (cnt) avg = sum / cnt; ++ p->se.child_burst = max(avg, p->se.burst_penalty); ++ p->se.child_burst_cnt = cnt; ++ p->se.child_burst_last_cached = now; ++} ++ ++static inline void update_child_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *child; ++ u32 cnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ if (!task_is_inheritable(child)) continue; ++ cnt++; ++ sum += child->se.burst_penalty; ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++} ++ ++static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) { ++ struct task_struct *parent = p->real_parent; ++ if (child_burst_cache_expired(parent, now)) ++ update_child_burst_direct(parent, now); ++ ++ return parent->se.child_burst; ++} ++ ++static void update_child_burst_topological( ++ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) { ++ struct task_struct *child, *dec; ++ u32 cnt = 0, dcnt = 0; ++ u32 sum = 0; ++ ++ list_for_each_entry(child, &p->children, sibling) { ++ dec = child; ++ while ((dcnt = count_child_tasks(dec)) == 1) ++ dec = list_first_entry(&dec->children, struct task_struct, sibling); ++ ++ if (!dcnt || !depth) { ++ if (!task_is_inheritable(dec)) continue; ++ cnt++; ++ sum += dec->se.burst_penalty; ++ continue; ++ } ++ if (!child_burst_cache_expired(dec, now)) { ++ cnt += dec->se.child_burst_cnt; ++ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt; ++ continue; ++ } ++ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum); ++ } ++ ++ __update_child_burst_cache(p, cnt, sum, now); ++ *acnt += cnt; ++ *asum += sum; ++} ++ ++static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) { ++ struct task_struct *anc = p->real_parent; ++ u32 cnt = 0, sum = 0; ++ ++ while (anc->real_parent != anc && count_child_tasks(anc) == 1) ++ anc = anc->real_parent; ++ ++ if (child_burst_cache_expired(anc, now)) ++ update_child_burst_topological( ++ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); ++ ++ return anc->se.child_burst; ++} ++ ++static inline void inherit_burst(struct task_struct *p) { ++ u8 burst_cache; ++ u64 now = ktime_get_ns(); ++ ++ read_lock(&tasklist_lock); ++ burst_cache = likely(sched_burst_fork_atavistic)? ++ __inherit_burst_topological(p, now): ++ __inherit_burst_direct(p, now); ++ read_unlock(&tasklist_lock); ++ ++ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache); ++} ++ ++static void sched_post_fork_bore(struct task_struct *p) { ++ if (p->sched_class == &fair_sched_class) ++ inherit_burst(p); ++ p->se.burst_penalty = p->se.prev_burst_penalty; ++} ++#endif // CONFIG_SCHED_BORE ++ + /* + * Perform scheduler related setup for a newly forked process p. + * p is forked by current. +@@ -4523,6 +4655,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) + p->se.prev_sum_exec_runtime = 0; + p->se.nr_migrations = 0; + p->se.vruntime = 0; ++#ifdef CONFIG_SCHED_BORE ++ sched_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + p->se.vlag = 0; + p->se.slice = sysctl_sched_base_slice; + INIT_LIST_HEAD(&p->se.group_node); +@@ -4839,6 +4974,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) + + void sched_post_fork(struct task_struct *p) + { ++#ifdef CONFIG_SCHED_BORE ++ sched_post_fork_bore(p); ++#endif // CONFIG_SCHED_BORE + uclamp_post_fork(p); + } + +@@ -9910,6 +10048,11 @@ void __init sched_init(void) + BUG_ON(&dl_sched_class != &stop_sched_class + 1); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ sched_init_bore(); ++ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.1.0 by Masahito Suzuki"); ++#endif // CONFIG_SCHED_BORE ++ + wait_bit_init(); + + #ifdef CONFIG_FAIR_GROUP_SCHED +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index 8d5d98a583..b178612617 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = { + }; + + #ifdef CONFIG_SMP ++#ifdef CONFIG_SCHED_BORE ++static ssize_t sched_min_base_slice_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[16]; ++ unsigned int value; ++ ++ if (cnt > 15) ++ cnt = 15; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ buf[cnt] = '\0'; ++ ++ if (kstrtouint(buf, 10, &value)) ++ return -EINVAL; + ++ if (!value) ++ return -EINVAL; ++ ++ sysctl_sched_min_base_slice = value; ++ sched_update_min_base_slice(); ++ ++ *ppos += cnt; ++ return cnt; ++} ++ ++static int sched_min_base_slice_show(struct seq_file *m, void *v) ++{ ++ seq_printf(m, "%d\n", sysctl_sched_min_base_slice); ++ return 0; ++} ++ ++static int sched_min_base_slice_open(struct inode *inode, struct file *filp) ++{ ++ return single_open(filp, sched_min_base_slice_show, NULL); ++} ++ ++static const struct file_operations sched_min_base_slice_fops = { ++ .open = sched_min_base_slice_open, ++ .write = sched_min_base_slice_write, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++#else // !CONFIG_SCHED_BORE + static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) + { +@@ -213,7 +258,7 @@ static const struct file_operations sched_scaling_fops = { + .llseek = seq_lseek, + .release = single_release, + }; +- ++#endif // CONFIG_SCHED_BORE + #endif /* SMP */ + + #ifdef CONFIG_PREEMPT_DYNAMIC +@@ -347,13 +392,20 @@ static __init int sched_init_debug(void) + debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); + #endif + ++#ifdef CONFIG_SCHED_BORE ++ debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops); ++ debugfs_create_u32("base_slice_ns", 0400, debugfs_sched, &sysctl_sched_base_slice); ++#else // !CONFIG_SCHED_BORE + debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice); ++#endif // CONFIG_SCHED_BORE + + debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); + debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); + + #ifdef CONFIG_SMP ++#if !defined(CONFIG_SCHED_BORE) + debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops); ++#endif // CONFIG_SCHED_BORE + debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); + debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate); + +@@ -595,6 +647,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), + SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); + ++#ifdef CONFIG_SCHED_BORE ++ SEQ_printf(m, " %2d", p->se.burst_score); ++#endif // CONFIG_SCHED_BORE + #ifdef CONFIG_NUMA_BALANCING + SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); + #endif +@@ -1068,6 +1123,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + + P(se.load.weight); + #ifdef CONFIG_SMP ++#ifdef CONFIG_SCHED_BORE ++ P(se.burst_score); ++#endif // CONFIG_SCHED_BORE + P(se.avg.load_sum); + P(se.avg.runnable_sum); + P(se.avg.util_sum); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 533547e3c9..a2346b1b44 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -19,6 +19,9 @@ + * + * Adaptive scheduling granularity, math enhancements by Peter Zijlstra + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra ++ * ++ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler ++ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com> + */ + #include <linux/energy_model.h> + #include <linux/mmap_lock.h> +@@ -64,20 +67,125 @@ + * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) + * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus + * +- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) ++ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) ++ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; ++#else // !CONFIG_SCHED_BORE + unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; ++#endif // CONFIG_SCHED_BORE + + /* + * Minimal preemption granularity for CPU-bound tasks: + * +- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) ++ * (BORE default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds) ++ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_SCHED_BORE ++unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ; ++static unsigned int configured_sched_base_slice = 1000000000ULL / HZ; ++unsigned int sysctl_sched_min_base_slice = 2000000ULL; ++#else // !CONFIG_SCHED_BORE + unsigned int sysctl_sched_base_slice = 750000ULL; + static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; ++#endif // CONFIG_SCHED_BORE + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + ++#ifdef CONFIG_SCHED_BORE ++u8 __read_mostly sched_bore = 1; ++u8 __read_mostly sched_burst_smoothness_long = 1; ++u8 __read_mostly sched_burst_smoothness_short = 0; ++u8 __read_mostly sched_burst_fork_atavistic = 2; ++u8 __read_mostly sched_burst_penalty_offset = 22; ++uint __read_mostly sched_burst_penalty_scale = 1280; ++uint __read_mostly sched_burst_cache_lifetime = 60000000; ++static int __maybe_unused sixty_four = 64; ++static int __maybe_unused maxval_12_bits = 4095; ++ ++#define MAX_BURST_PENALTY (39U <<2) ++ ++static inline u32 log2plus1_u64_u32f8(u64 v) { ++ u32 msb = fls64(v); ++ s32 excess_bits = msb - 9; ++ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits; ++ return msb << 8 | fractional; ++} ++ ++static inline u32 calc_burst_penalty(u64 burst_time) { ++ u32 greed, tolerance, penalty, scaled_penalty; ++ ++ greed = log2plus1_u64_u32f8(burst_time); ++ tolerance = sched_burst_penalty_offset << 8; ++ penalty = max(0, (s32)greed - (s32)tolerance); ++ scaled_penalty = penalty * sched_burst_penalty_scale >> 16; ++ ++ return min(MAX_BURST_PENALTY, scaled_penalty); ++} ++ ++static inline u64 scale_slice(u64 delta, struct sched_entity *se) { ++ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22); ++} ++ ++static inline u64 __unscale_slice(u64 delta, u8 score) { ++ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10); ++} ++ ++static inline u64 unscale_slice(u64 delta, struct sched_entity *se) { ++ return __unscale_slice(delta, se->burst_score); ++} ++ ++void reweight_task(struct task_struct *p, int prio); ++ ++static void update_burst_score(struct sched_entity *se) { ++ if (!entity_is_task(se)) return; ++ struct task_struct *p = task_of(se); ++ u8 prio = p->static_prio - MAX_RT_PRIO; ++ u8 prev_prio = min(39, prio + se->burst_score); ++ ++ se->burst_score = se->burst_penalty >> 2; ++ ++ u8 new_prio = min(39, prio + se->burst_score); ++ if (new_prio != prev_prio) ++ reweight_task(p, new_prio); ++} ++ ++static void update_burst_penalty(struct sched_entity *se) { ++ se->curr_burst_penalty = calc_burst_penalty(se->burst_time); ++ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty); ++ update_burst_score(se); ++} ++ ++static inline u32 binary_smooth(u32 new, u32 old) { ++ int increment = new - old; ++ return (0 <= increment)? ++ old + ( increment >> (int)sched_burst_smoothness_long): ++ old - (-increment >> (int)sched_burst_smoothness_short); ++} ++ ++static void restart_burst(struct sched_entity *se) { ++ se->burst_penalty = se->prev_burst_penalty = ++ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty); ++ se->curr_burst_penalty = 0; ++ se->burst_time = 0; ++ update_burst_score(se); ++} ++ ++static void restart_burst_rescale_deadline(struct sched_entity *se) { ++ s64 vscaled, wremain, vremain = se->deadline - se->vruntime; ++ u8 prev_score = se->burst_score; ++ restart_burst(se); ++ if (prev_score > se->burst_score) { ++ wremain = __unscale_slice(abs(vremain), prev_score); ++ vscaled = scale_slice(wremain, se); ++ if (unlikely(vremain < 0)) ++ vscaled = -vscaled; ++ se->deadline = se->vruntime + vscaled; ++ } ++} ++#endif // CONFIG_SCHED_BORE ++ + int sched_thermal_decay_shift; + static int __init setup_sched_thermal_decay_shift(char *str) + { +@@ -137,6 +245,69 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; + + #ifdef CONFIG_SYSCTL + static struct ctl_table sched_fair_sysctls[] = { ++#ifdef CONFIG_SCHED_BORE ++ { ++ .procname = "sched_bore", ++ .data = &sched_bore, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ONE, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_long", ++ .data = &sched_burst_smoothness_long, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_smoothness_short", ++ .data = &sched_burst_smoothness_short, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ { ++ .procname = "sched_burst_fork_atavistic", ++ .data = &sched_burst_fork_atavistic, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_THREE, ++ }, ++ { ++ .procname = "sched_burst_penalty_offset", ++ .data = &sched_burst_penalty_offset, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &sixty_four, ++ }, ++ { ++ .procname = "sched_burst_penalty_scale", ++ .data = &sched_burst_penalty_scale, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &maxval_12_bits, ++ }, ++ { ++ .procname = "sched_burst_cache_lifetime", ++ .data = &sched_burst_cache_lifetime, ++ .maxlen = sizeof(uint), ++ .mode = 0644, ++ .proc_handler = proc_douintvec, ++ }, ++#endif // CONFIG_SCHED_BORE + #ifdef CONFIG_CFS_BANDWIDTH + { + .procname = "sched_cfs_bandwidth_slice_us", +@@ -195,6 +366,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w) + * + * This idea comes from the SD scheduler of Con Kolivas: + */ ++#ifdef CONFIG_SCHED_BORE ++static void update_sysctl(void) { ++ sysctl_sched_base_slice = ++ max(sysctl_sched_min_base_slice, configured_sched_base_slice); ++} ++void sched_update_min_base_slice(void) { update_sysctl(); } ++#else // !CONFIG_SCHED_BORE + static unsigned int get_update_sysctl_factor(void) + { + unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); +@@ -225,6 +403,7 @@ static void update_sysctl(void) + SET_SYSCTL(sched_base_slice); + #undef SET_SYSCTL + } ++#endif // CONFIG_SCHED_BORE + + void __init sched_init_granularity(void) + { +@@ -704,6 +883,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) + lag = avg_vruntime(cfs_rq) - se->vruntime; + + limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); ++#ifdef CONFIG_SCHED_BORE ++ limit >>= 1; ++#endif // CONFIG_SCHED_BORE + se->vlag = clamp(lag, -limit, limit); + } + +@@ -955,6 +1137,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) + * Scheduling class statistics methods: + */ + #ifdef CONFIG_SMP ++#if !defined(CONFIG_SCHED_BORE) + int sched_update_scaling(void) + { + unsigned int factor = get_update_sysctl_factor(); +@@ -966,6 +1149,7 @@ int sched_update_scaling(void) + + return 0; + } ++#endif // CONFIG_SCHED_BORE + #endif + #endif + +@@ -1165,7 +1349,13 @@ static void update_curr(struct cfs_rq *cfs_rq) + if (unlikely(delta_exec <= 0)) + return; + ++#ifdef CONFIG_SCHED_BORE ++ curr->burst_time += delta_exec; ++ update_burst_penalty(curr); ++ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr)); ++#else // !CONFIG_SCHED_BORE + curr->vruntime += calc_delta_fair(delta_exec, curr); ++#endif // CONFIG_SCHED_BORE + update_deadline(cfs_rq, curr); + update_min_vruntime(cfs_rq); + +@@ -3671,10 +3861,9 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } + #endif + + static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, +- unsigned long weight) ++ unsigned long weight, u64 avruntime) + { + unsigned long old_weight = se->load.weight; +- u64 avruntime = avg_vruntime(cfs_rq); + s64 vlag, vslice; + + /* +@@ -3782,11 +3971,13 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, + { + bool curr = cfs_rq->curr == se; + ++ if (curr) ++ update_curr(cfs_rq); ++ u64 avruntime = avg_vruntime(cfs_rq); ++ + if (se->on_rq) { + /* commit outstanding execution time */ +- if (curr) +- update_curr(cfs_rq); +- else ++ if (!curr) + __dequeue_entity(cfs_rq, se); + update_load_sub(&cfs_rq->load, se->load.weight); + } +@@ -3799,7 +3990,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, + */ + se->vlag = div_s64(se->vlag * se->load.weight, weight); + } else { +- reweight_eevdf(cfs_rq, se, weight); ++ reweight_eevdf(cfs_rq, se, weight, avruntime); + } + + update_load_set(&se->load, weight); +@@ -5171,6 +5362,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) + * + * EEVDF: placement strategy #1 / #2 + */ ++#ifdef CONFIG_SCHED_BORE ++ if (se->vlag) ++#endif // CONFIG_SCHED_BORE + if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) { + struct sched_entity *curr = cfs_rq->curr; + unsigned long load; +@@ -6803,6 +6997,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + bool was_sched_idle = sched_idle_rq(rq); + + util_est_dequeue(&rq->cfs, p); ++#ifdef CONFIG_SCHED_BORE ++ if (task_sleep) { ++ cfs_rq = cfs_rq_of(se); ++ if (cfs_rq->curr == se) ++ update_curr(cfs_rq); ++ restart_burst(se); ++ } ++#endif // CONFIG_SCHED_BORE + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); +@@ -8552,16 +8754,25 @@ static void yield_task_fair(struct rq *rq) + /* + * Are we the only task in the tree? + */ ++#if !defined(CONFIG_SCHED_BORE) + if (unlikely(rq->nr_running == 1)) + return; + + clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + + update_rq_clock(rq); + /* + * Update run-time statistics of the 'current'. + */ + update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ restart_burst_rescale_deadline(se); ++ if (unlikely(rq->nr_running == 1)) ++ return; ++ ++ clear_buddies(cfs_rq, se); ++#endif // CONFIG_SCHED_BORE + /* + * Tell update_rq_clock() that we've just updated, + * so we don't do microscopic update in schedule() +@@ -12651,6 +12862,9 @@ static void task_fork_fair(struct task_struct *p) + curr = cfs_rq->curr; + if (curr) + update_curr(cfs_rq); ++#ifdef CONFIG_SCHED_BORE ++ update_burst_score(se); ++#endif // CONFIG_SCHED_BORE + place_entity(cfs_rq, se, ENQUEUE_INITIAL); + rq_unlock(rq, &rf); + } +diff --git a/kernel/sched/features.h b/kernel/sched/features.h +index 143f55df89..3f0fe409f5 100644 +--- a/kernel/sched/features.h ++++ b/kernel/sched/features.h +@@ -6,7 +6,11 @@ + */ + SCHED_FEAT(PLACE_LAG, true) + SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) ++#ifdef CONFIG_SCHED_BORE ++SCHED_FEAT(RUN_TO_PARITY, false) ++#else // !CONFIG_SCHED_BORE + SCHED_FEAT(RUN_TO_PARITY, true) ++#endif // CONFIG_SCHED_BORE + + /* + * Prefer to schedule the task we woke last (assuming it failed +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 001fe047bd..da3ad1d4e1 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1965,7 +1965,11 @@ static inline void dirty_sched_domain_sysctl(int cpu) + } + #endif + ++#ifdef CONFIG_SCHED_BORE ++extern void sched_update_min_base_slice(void); ++#else // !CONFIG_SCHED_BORE + extern int sched_update_scaling(void); ++#endif // CONFIG_SCHED_BORE + + static inline const struct cpumask *task_user_cpus(struct task_struct *p) + { +@@ -2552,6 +2556,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate; + extern const_debug unsigned int sysctl_sched_migration_cost; + + extern unsigned int sysctl_sched_base_slice; ++#ifdef CONFIG_SCHED_BORE ++extern unsigned int sysctl_sched_min_base_slice; ++#endif // CONFIG_SCHED_BORE + + #ifdef CONFIG_SCHED_DEBUG + extern int sysctl_resched_latency_warn_ms; +-- +2.34.1 + diff --git a/sys-kernel/linux-sources-redcore/files/6.8-amd64.config b/sys-kernel/linux-sources-redcore/files/6.8-amd64.config index 4efc3471..7fa428ce 100644 --- a/sys-kernel/linux-sources-redcore/files/6.8-amd64.config +++ b/sys-kernel/linux-sources-redcore/files/6.8-amd64.config @@ -233,6 +233,7 @@ CONFIG_USER_NS_UNPRIVILEGED=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_BORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y diff --git a/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.7.ebuild b/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.7-r1.ebuild index 9a4b2777..aef661c8 100644 --- a/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.7.ebuild +++ b/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.7-r1.ebuild @@ -5,7 +5,7 @@ EAPI=6 inherit eutils -EXTRAVERSION="redcore" +EXTRAVERSION="redcore-r1" KV_FULL="${PV}-${EXTRAVERSION}" KV_MAJOR="6.8" @@ -28,6 +28,7 @@ DEPEND=" RDEPEND="${DEPEND}" PATCHES=( + "${FILESDIR}"/"${KV_MAJOR}"-0001-linux6.8.y-bore5.1.0.patch "${FILESDIR}"/"${KV_MAJOR}"-ath10k-be-quiet.patch "${FILESDIR}"/"${KV_MAJOR}"-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch "${FILESDIR}"/"${KV_MAJOR}"-acpi-use-kern_warning_even_when_error.patch |