diff options
author | V3n3RiX <venerix@koprulu.sector> | 2024-05-03 20:57:12 +0100 |
---|---|---|
committer | V3n3RiX <venerix@koprulu.sector> | 2024-05-03 20:57:12 +0100 |
commit | 52ff08b9d52f7916ad7fd62942bd10d76e832dcb (patch) | |
tree | 9bcbbfc69ff49168bdd4ecc917fa2a6f58b4569d /sys-kernel/linux-sources-redcore | |
parent | e675071c2ef223b5cda9cc4b2cd2792bb5834fff (diff) |
sys-kernel/linux-{image,sources}-redcore : version bump
Diffstat (limited to 'sys-kernel/linux-sources-redcore')
-rw-r--r-- | sys-kernel/linux-sources-redcore/Manifest | 2 | ||||
-rw-r--r-- | sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch | 764 | ||||
-rw-r--r-- | sys-kernel/linux-sources-redcore/files/6.8-amd64.config | 6 | ||||
-rw-r--r-- | sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.9.ebuild (renamed from sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.8.ebuild) | 1 |
4 files changed, 4 insertions, 769 deletions
diff --git a/sys-kernel/linux-sources-redcore/Manifest b/sys-kernel/linux-sources-redcore/Manifest index 10938f80..ae8ee076 100644 --- a/sys-kernel/linux-sources-redcore/Manifest +++ b/sys-kernel/linux-sources-redcore/Manifest @@ -1,2 +1,2 @@ DIST linux-6.7.12.tar.xz 141495752 BLAKE2B b2d8949eee0fc5df782f619f4518d3860d0e06b2233cadcccc7a93e8b48f649e671df523cd9d1bb4d18ceb2d3847fc3ca1044a4170be71b2c76357a405ecb875 SHA512 6375c0acbfd1452b3208273d47af4f0501c3953371ec335701662f619da2acac1f9b7d9ff7dfb5123a3867edd5765ba03428d3c018d9bd52b1f0d01dbd26d751 -DIST linux-6.8.8.tar.xz 142584076 BLAKE2B 3c04407c6875d1547c18024db7a020c450b332b10707c847208080819fa92624281fd9b628c1fefe7c16439f60d64dcc80010726a7d4320354b682ccaadb1f35 SHA512 f1c1aee18bfbb7213ef088d28f4ef25d2ff5c29dadea7d61ece3dfac05746a3f3c8a1cee43140ec4a1d97984bece423c1c00ba440f43e18aa1e20e88dc4647ce +DIST linux-6.8.9.tar.xz 142582332 BLAKE2B 0305b6636a4e382855a2804cedc3984f1e891b26b24412e3533b1f29a07459d39be5121d4618da20098623290e922d502b7ac1b774f39e732f23a778a4b5b5be SHA512 67056eae13be9130e11ea7e4d394d1f0b6b1dccc4f080f72c136870d4486fdebc2c315d149ca4f1e57af4c79dedf849e31c439426166544691508edafca3d350 diff --git a/sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch b/sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch deleted file mode 100644 index 0fffdcea..00000000 --- a/sys-kernel/linux-sources-redcore/files/6.8-0001-linux6.8.y-bore5.1.0.patch +++ /dev/null @@ -1,764 +0,0 @@ -From feae72fd7f2403910c157dd679d6ec240ed1dfbf Mon Sep 17 00:00:00 2001 -From: Masahito S <firelzrd@gmail.com> -Date: Mon, 22 Apr 2024 04:12:58 +0900 -Subject: [PATCH] linux6.8.y-bore5.1.0 - ---- - include/linux/sched.h | 10 ++ - init/Kconfig | 17 +++ - kernel/sched/core.c | 143 +++++++++++++++++++++++++ - kernel/sched/debug.c | 60 ++++++++++- - kernel/sched/fair.c | 230 ++++++++++++++++++++++++++++++++++++++-- - kernel/sched/features.h | 4 + - kernel/sched/sched.h | 7 ++ - 7 files changed, 462 insertions(+), 9 deletions(-) - -diff --git a/include/linux/sched.h b/include/linux/sched.h -index ffe8f618ab..0ab0b04240 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -547,6 +547,16 @@ struct sched_entity { - u64 sum_exec_runtime; - u64 prev_sum_exec_runtime; - u64 vruntime; -+#ifdef CONFIG_SCHED_BORE -+ u64 burst_time; -+ u8 prev_burst_penalty; -+ u8 curr_burst_penalty; -+ u8 burst_penalty; -+ u8 burst_score; -+ u8 child_burst; -+ u32 child_burst_cnt; -+ u64 child_burst_last_cached; -+#endif // CONFIG_SCHED_BORE - s64 vlag; - u64 slice; - -diff --git a/init/Kconfig b/init/Kconfig -index bee58f7468..13427dbb48 100644 ---- a/init/Kconfig -+++ b/init/Kconfig -@@ -1279,6 +1279,23 @@ config CHECKPOINT_RESTORE - - If unsure, say N here. - -+config SCHED_BORE -+ bool "Burst-Oriented Response Enhancer" -+ default y -+ help -+ In Desktop and Mobile computing, one might prefer interactive -+ tasks to keep responsive no matter what they run in the background. -+ -+ Enabling this kernel feature modifies the scheduler to discriminate -+ tasks by their burst time (runtime since it last went sleeping or -+ yielding state) and prioritize those that run less bursty. -+ Such tasks usually include window compositor, widgets backend, -+ terminal emulator, video playback, games and so on. -+ With a little impact to scheduling fairness, it may improve -+ responsiveness especially under heavy background workload. -+ -+ If unsure, say Y here. -+ - config SCHED_AUTOGROUP - bool "Automatic process group scheduling" - select CGROUPS -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 9116bcc903..d1711f75f8 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -4507,6 +4507,138 @@ int wake_up_state(struct task_struct *p, unsigned int state) - return try_to_wake_up(p, state, 0); - } - -+#ifdef CONFIG_SCHED_BORE -+extern u8 sched_burst_fork_atavistic; -+extern uint sched_burst_cache_lifetime; -+ -+static void __init sched_init_bore(void) { -+ init_task.se.burst_time = 0; -+ init_task.se.prev_burst_penalty = 0; -+ init_task.se.curr_burst_penalty = 0; -+ init_task.se.burst_penalty = 0; -+ init_task.se.burst_score = 0; -+ init_task.se.child_burst_last_cached = 0; -+} -+ -+void inline sched_fork_bore(struct task_struct *p) { -+ p->se.burst_time = 0; -+ p->se.curr_burst_penalty = 0; -+ p->se.burst_score = 0; -+ p->se.child_burst_last_cached = 0; -+} -+ -+static u32 count_child_tasks(struct task_struct *p) { -+ struct task_struct *child; -+ u32 cnt = 0; -+ list_for_each_entry(child, &p->children, sibling) {cnt++;} -+ return cnt; -+} -+ -+static inline bool task_is_inheritable(struct task_struct *p) { -+ return (p->sched_class == &fair_sched_class); -+} -+ -+static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { -+ u64 expiration_time = -+ p->se.child_burst_last_cached + sched_burst_cache_lifetime; -+ return ((s64)(expiration_time - now) < 0); -+} -+ -+static void __update_child_burst_cache( -+ struct task_struct *p, u32 cnt, u32 sum, u64 now) { -+ u8 avg = 0; -+ if (cnt) avg = sum / cnt; -+ p->se.child_burst = max(avg, p->se.burst_penalty); -+ p->se.child_burst_cnt = cnt; -+ p->se.child_burst_last_cached = now; -+} -+ -+static inline void update_child_burst_direct(struct task_struct *p, u64 now) { -+ struct task_struct *child; -+ u32 cnt = 0; -+ u32 sum = 0; -+ -+ list_for_each_entry(child, &p->children, sibling) { -+ if (!task_is_inheritable(child)) continue; -+ cnt++; -+ sum += child->se.burst_penalty; -+ } -+ -+ __update_child_burst_cache(p, cnt, sum, now); -+} -+ -+static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) { -+ struct task_struct *parent = p->real_parent; -+ if (child_burst_cache_expired(parent, now)) -+ update_child_burst_direct(parent, now); -+ -+ return parent->se.child_burst; -+} -+ -+static void update_child_burst_topological( -+ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) { -+ struct task_struct *child, *dec; -+ u32 cnt = 0, dcnt = 0; -+ u32 sum = 0; -+ -+ list_for_each_entry(child, &p->children, sibling) { -+ dec = child; -+ while ((dcnt = count_child_tasks(dec)) == 1) -+ dec = list_first_entry(&dec->children, struct task_struct, sibling); -+ -+ if (!dcnt || !depth) { -+ if (!task_is_inheritable(dec)) continue; -+ cnt++; -+ sum += dec->se.burst_penalty; -+ continue; -+ } -+ if (!child_burst_cache_expired(dec, now)) { -+ cnt += dec->se.child_burst_cnt; -+ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt; -+ continue; -+ } -+ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum); -+ } -+ -+ __update_child_burst_cache(p, cnt, sum, now); -+ *acnt += cnt; -+ *asum += sum; -+} -+ -+static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) { -+ struct task_struct *anc = p->real_parent; -+ u32 cnt = 0, sum = 0; -+ -+ while (anc->real_parent != anc && count_child_tasks(anc) == 1) -+ anc = anc->real_parent; -+ -+ if (child_burst_cache_expired(anc, now)) -+ update_child_burst_topological( -+ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); -+ -+ return anc->se.child_burst; -+} -+ -+static inline void inherit_burst(struct task_struct *p) { -+ u8 burst_cache; -+ u64 now = ktime_get_ns(); -+ -+ read_lock(&tasklist_lock); -+ burst_cache = likely(sched_burst_fork_atavistic)? -+ __inherit_burst_topological(p, now): -+ __inherit_burst_direct(p, now); -+ read_unlock(&tasklist_lock); -+ -+ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache); -+} -+ -+static void sched_post_fork_bore(struct task_struct *p) { -+ if (p->sched_class == &fair_sched_class) -+ inherit_burst(p); -+ p->se.burst_penalty = p->se.prev_burst_penalty; -+} -+#endif // CONFIG_SCHED_BORE -+ - /* - * Perform scheduler related setup for a newly forked process p. - * p is forked by current. -@@ -4523,6 +4655,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) - p->se.prev_sum_exec_runtime = 0; - p->se.nr_migrations = 0; - p->se.vruntime = 0; -+#ifdef CONFIG_SCHED_BORE -+ sched_fork_bore(p); -+#endif // CONFIG_SCHED_BORE - p->se.vlag = 0; - p->se.slice = sysctl_sched_base_slice; - INIT_LIST_HEAD(&p->se.group_node); -@@ -4839,6 +4974,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) - - void sched_post_fork(struct task_struct *p) - { -+#ifdef CONFIG_SCHED_BORE -+ sched_post_fork_bore(p); -+#endif // CONFIG_SCHED_BORE - uclamp_post_fork(p); - } - -@@ -9910,6 +10048,11 @@ void __init sched_init(void) - BUG_ON(&dl_sched_class != &stop_sched_class + 1); - #endif - -+#ifdef CONFIG_SCHED_BORE -+ sched_init_bore(); -+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.1.0 by Masahito Suzuki"); -+#endif // CONFIG_SCHED_BORE -+ - wait_bit_init(); - - #ifdef CONFIG_FAIR_GROUP_SCHED -diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index 8d5d98a583..b178612617 100644 ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = { - }; - - #ifdef CONFIG_SMP -+#ifdef CONFIG_SCHED_BORE -+static ssize_t sched_min_base_slice_write(struct file *filp, const char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ char buf[16]; -+ unsigned int value; -+ -+ if (cnt > 15) -+ cnt = 15; -+ -+ if (copy_from_user(&buf, ubuf, cnt)) -+ return -EFAULT; -+ buf[cnt] = '\0'; -+ -+ if (kstrtouint(buf, 10, &value)) -+ return -EINVAL; - -+ if (!value) -+ return -EINVAL; -+ -+ sysctl_sched_min_base_slice = value; -+ sched_update_min_base_slice(); -+ -+ *ppos += cnt; -+ return cnt; -+} -+ -+static int sched_min_base_slice_show(struct seq_file *m, void *v) -+{ -+ seq_printf(m, "%d\n", sysctl_sched_min_base_slice); -+ return 0; -+} -+ -+static int sched_min_base_slice_open(struct inode *inode, struct file *filp) -+{ -+ return single_open(filp, sched_min_base_slice_show, NULL); -+} -+ -+static const struct file_operations sched_min_base_slice_fops = { -+ .open = sched_min_base_slice_open, -+ .write = sched_min_base_slice_write, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+#else // !CONFIG_SCHED_BORE - static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *ppos) - { -@@ -213,7 +258,7 @@ static const struct file_operations sched_scaling_fops = { - .llseek = seq_lseek, - .release = single_release, - }; -- -+#endif // CONFIG_SCHED_BORE - #endif /* SMP */ - - #ifdef CONFIG_PREEMPT_DYNAMIC -@@ -347,13 +392,20 @@ static __init int sched_init_debug(void) - debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); - #endif - -+#ifdef CONFIG_SCHED_BORE -+ debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops); -+ debugfs_create_u32("base_slice_ns", 0400, debugfs_sched, &sysctl_sched_base_slice); -+#else // !CONFIG_SCHED_BORE - debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice); -+#endif // CONFIG_SCHED_BORE - - debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); - debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); - - #ifdef CONFIG_SMP -+#if !defined(CONFIG_SCHED_BORE) - debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops); -+#endif // CONFIG_SCHED_BORE - debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); - debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate); - -@@ -595,6 +647,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) - SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), - SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); - -+#ifdef CONFIG_SCHED_BORE -+ SEQ_printf(m, " %2d", p->se.burst_score); -+#endif // CONFIG_SCHED_BORE - #ifdef CONFIG_NUMA_BALANCING - SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); - #endif -@@ -1068,6 +1123,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, - - P(se.load.weight); - #ifdef CONFIG_SMP -+#ifdef CONFIG_SCHED_BORE -+ P(se.burst_score); -+#endif // CONFIG_SCHED_BORE - P(se.avg.load_sum); - P(se.avg.runnable_sum); - P(se.avg.util_sum); -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 533547e3c9..a2346b1b44 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -19,6 +19,9 @@ - * - * Adaptive scheduling granularity, math enhancements by Peter Zijlstra - * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra -+ * -+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler -+ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com> - */ - #include <linux/energy_model.h> - #include <linux/mmap_lock.h> -@@ -64,20 +67,125 @@ - * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) - * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus - * -- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) -+ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) -+ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) - */ -+#ifdef CONFIG_SCHED_BORE -+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; -+#else // !CONFIG_SCHED_BORE - unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; -+#endif // CONFIG_SCHED_BORE - - /* - * Minimal preemption granularity for CPU-bound tasks: - * -- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) -+ * (BORE default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds) -+ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) - */ -+#ifdef CONFIG_SCHED_BORE -+unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ; -+static unsigned int configured_sched_base_slice = 1000000000ULL / HZ; -+unsigned int sysctl_sched_min_base_slice = 2000000ULL; -+#else // !CONFIG_SCHED_BORE - unsigned int sysctl_sched_base_slice = 750000ULL; - static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; -+#endif // CONFIG_SCHED_BORE - - const_debug unsigned int sysctl_sched_migration_cost = 500000UL; - -+#ifdef CONFIG_SCHED_BORE -+u8 __read_mostly sched_bore = 1; -+u8 __read_mostly sched_burst_smoothness_long = 1; -+u8 __read_mostly sched_burst_smoothness_short = 0; -+u8 __read_mostly sched_burst_fork_atavistic = 2; -+u8 __read_mostly sched_burst_penalty_offset = 22; -+uint __read_mostly sched_burst_penalty_scale = 1280; -+uint __read_mostly sched_burst_cache_lifetime = 60000000; -+static int __maybe_unused sixty_four = 64; -+static int __maybe_unused maxval_12_bits = 4095; -+ -+#define MAX_BURST_PENALTY (39U <<2) -+ -+static inline u32 log2plus1_u64_u32f8(u64 v) { -+ u32 msb = fls64(v); -+ s32 excess_bits = msb - 9; -+ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits; -+ return msb << 8 | fractional; -+} -+ -+static inline u32 calc_burst_penalty(u64 burst_time) { -+ u32 greed, tolerance, penalty, scaled_penalty; -+ -+ greed = log2plus1_u64_u32f8(burst_time); -+ tolerance = sched_burst_penalty_offset << 8; -+ penalty = max(0, (s32)greed - (s32)tolerance); -+ scaled_penalty = penalty * sched_burst_penalty_scale >> 16; -+ -+ return min(MAX_BURST_PENALTY, scaled_penalty); -+} -+ -+static inline u64 scale_slice(u64 delta, struct sched_entity *se) { -+ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22); -+} -+ -+static inline u64 __unscale_slice(u64 delta, u8 score) { -+ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10); -+} -+ -+static inline u64 unscale_slice(u64 delta, struct sched_entity *se) { -+ return __unscale_slice(delta, se->burst_score); -+} -+ -+void reweight_task(struct task_struct *p, int prio); -+ -+static void update_burst_score(struct sched_entity *se) { -+ if (!entity_is_task(se)) return; -+ struct task_struct *p = task_of(se); -+ u8 prio = p->static_prio - MAX_RT_PRIO; -+ u8 prev_prio = min(39, prio + se->burst_score); -+ -+ se->burst_score = se->burst_penalty >> 2; -+ -+ u8 new_prio = min(39, prio + se->burst_score); -+ if (new_prio != prev_prio) -+ reweight_task(p, new_prio); -+} -+ -+static void update_burst_penalty(struct sched_entity *se) { -+ se->curr_burst_penalty = calc_burst_penalty(se->burst_time); -+ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty); -+ update_burst_score(se); -+} -+ -+static inline u32 binary_smooth(u32 new, u32 old) { -+ int increment = new - old; -+ return (0 <= increment)? -+ old + ( increment >> (int)sched_burst_smoothness_long): -+ old - (-increment >> (int)sched_burst_smoothness_short); -+} -+ -+static void restart_burst(struct sched_entity *se) { -+ se->burst_penalty = se->prev_burst_penalty = -+ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty); -+ se->curr_burst_penalty = 0; -+ se->burst_time = 0; -+ update_burst_score(se); -+} -+ -+static void restart_burst_rescale_deadline(struct sched_entity *se) { -+ s64 vscaled, wremain, vremain = se->deadline - se->vruntime; -+ u8 prev_score = se->burst_score; -+ restart_burst(se); -+ if (prev_score > se->burst_score) { -+ wremain = __unscale_slice(abs(vremain), prev_score); -+ vscaled = scale_slice(wremain, se); -+ if (unlikely(vremain < 0)) -+ vscaled = -vscaled; -+ se->deadline = se->vruntime + vscaled; -+ } -+} -+#endif // CONFIG_SCHED_BORE -+ - int sched_thermal_decay_shift; - static int __init setup_sched_thermal_decay_shift(char *str) - { -@@ -137,6 +245,69 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; - - #ifdef CONFIG_SYSCTL - static struct ctl_table sched_fair_sysctls[] = { -+#ifdef CONFIG_SCHED_BORE -+ { -+ .procname = "sched_bore", -+ .data = &sched_bore, -+ .maxlen = sizeof(u8), -+ .mode = 0644, -+ .proc_handler = proc_dou8vec_minmax, -+ .extra1 = SYSCTL_ONE, -+ .extra2 = SYSCTL_ONE, -+ }, -+ { -+ .procname = "sched_burst_smoothness_long", -+ .data = &sched_burst_smoothness_long, -+ .maxlen = sizeof(u8), -+ .mode = 0644, -+ .proc_handler = proc_dou8vec_minmax, -+ .extra1 = SYSCTL_ZERO, -+ .extra2 = SYSCTL_ONE, -+ }, -+ { -+ .procname = "sched_burst_smoothness_short", -+ .data = &sched_burst_smoothness_short, -+ .maxlen = sizeof(u8), -+ .mode = 0644, -+ .proc_handler = proc_dou8vec_minmax, -+ .extra1 = SYSCTL_ZERO, -+ .extra2 = SYSCTL_ONE, -+ }, -+ { -+ .procname = "sched_burst_fork_atavistic", -+ .data = &sched_burst_fork_atavistic, -+ .maxlen = sizeof(u8), -+ .mode = 0644, -+ .proc_handler = proc_dou8vec_minmax, -+ .extra1 = SYSCTL_ZERO, -+ .extra2 = SYSCTL_THREE, -+ }, -+ { -+ .procname = "sched_burst_penalty_offset", -+ .data = &sched_burst_penalty_offset, -+ .maxlen = sizeof(u8), -+ .mode = 0644, -+ .proc_handler = proc_dou8vec_minmax, -+ .extra1 = SYSCTL_ZERO, -+ .extra2 = &sixty_four, -+ }, -+ { -+ .procname = "sched_burst_penalty_scale", -+ .data = &sched_burst_penalty_scale, -+ .maxlen = sizeof(uint), -+ .mode = 0644, -+ .proc_handler = proc_douintvec_minmax, -+ .extra1 = SYSCTL_ZERO, -+ .extra2 = &maxval_12_bits, -+ }, -+ { -+ .procname = "sched_burst_cache_lifetime", -+ .data = &sched_burst_cache_lifetime, -+ .maxlen = sizeof(uint), -+ .mode = 0644, -+ .proc_handler = proc_douintvec, -+ }, -+#endif // CONFIG_SCHED_BORE - #ifdef CONFIG_CFS_BANDWIDTH - { - .procname = "sched_cfs_bandwidth_slice_us", -@@ -195,6 +366,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w) - * - * This idea comes from the SD scheduler of Con Kolivas: - */ -+#ifdef CONFIG_SCHED_BORE -+static void update_sysctl(void) { -+ sysctl_sched_base_slice = -+ max(sysctl_sched_min_base_slice, configured_sched_base_slice); -+} -+void sched_update_min_base_slice(void) { update_sysctl(); } -+#else // !CONFIG_SCHED_BORE - static unsigned int get_update_sysctl_factor(void) - { - unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); -@@ -225,6 +403,7 @@ static void update_sysctl(void) - SET_SYSCTL(sched_base_slice); - #undef SET_SYSCTL - } -+#endif // CONFIG_SCHED_BORE - - void __init sched_init_granularity(void) - { -@@ -704,6 +883,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) - lag = avg_vruntime(cfs_rq) - se->vruntime; - - limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); -+#ifdef CONFIG_SCHED_BORE -+ limit >>= 1; -+#endif // CONFIG_SCHED_BORE - se->vlag = clamp(lag, -limit, limit); - } - -@@ -955,6 +1137,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) - * Scheduling class statistics methods: - */ - #ifdef CONFIG_SMP -+#if !defined(CONFIG_SCHED_BORE) - int sched_update_scaling(void) - { - unsigned int factor = get_update_sysctl_factor(); -@@ -966,6 +1149,7 @@ int sched_update_scaling(void) - - return 0; - } -+#endif // CONFIG_SCHED_BORE - #endif - #endif - -@@ -1165,7 +1349,13 @@ static void update_curr(struct cfs_rq *cfs_rq) - if (unlikely(delta_exec <= 0)) - return; - -+#ifdef CONFIG_SCHED_BORE -+ curr->burst_time += delta_exec; -+ update_burst_penalty(curr); -+ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr)); -+#else // !CONFIG_SCHED_BORE - curr->vruntime += calc_delta_fair(delta_exec, curr); -+#endif // CONFIG_SCHED_BORE - update_deadline(cfs_rq, curr); - update_min_vruntime(cfs_rq); - -@@ -3671,10 +3861,9 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } - #endif - - static void reweight_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *se, -- unsigned long weight) -+ unsigned long weight, u64 avruntime) - { - unsigned long old_weight = se->load.weight; -- u64 avruntime = avg_vruntime(cfs_rq); - s64 vlag, vslice; - - /* -@@ -3782,11 +3971,13 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, - { - bool curr = cfs_rq->curr == se; - -+ if (curr) -+ update_curr(cfs_rq); -+ u64 avruntime = avg_vruntime(cfs_rq); -+ - if (se->on_rq) { - /* commit outstanding execution time */ -- if (curr) -- update_curr(cfs_rq); -- else -+ if (!curr) - __dequeue_entity(cfs_rq, se); - update_load_sub(&cfs_rq->load, se->load.weight); - } -@@ -3799,7 +3990,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, - */ - se->vlag = div_s64(se->vlag * se->load.weight, weight); - } else { -- reweight_eevdf(cfs_rq, se, weight); -+ reweight_eevdf(cfs_rq, se, weight, avruntime); - } - - update_load_set(&se->load, weight); -@@ -5171,6 +5362,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - * - * EEVDF: placement strategy #1 / #2 - */ -+#ifdef CONFIG_SCHED_BORE -+ if (se->vlag) -+#endif // CONFIG_SCHED_BORE - if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) { - struct sched_entity *curr = cfs_rq->curr; - unsigned long load; -@@ -6803,6 +6997,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) - bool was_sched_idle = sched_idle_rq(rq); - - util_est_dequeue(&rq->cfs, p); -+#ifdef CONFIG_SCHED_BORE -+ if (task_sleep) { -+ cfs_rq = cfs_rq_of(se); -+ if (cfs_rq->curr == se) -+ update_curr(cfs_rq); -+ restart_burst(se); -+ } -+#endif // CONFIG_SCHED_BORE - - for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); -@@ -8552,16 +8754,25 @@ static void yield_task_fair(struct rq *rq) - /* - * Are we the only task in the tree? - */ -+#if !defined(CONFIG_SCHED_BORE) - if (unlikely(rq->nr_running == 1)) - return; - - clear_buddies(cfs_rq, se); -+#endif // CONFIG_SCHED_BORE - - update_rq_clock(rq); - /* - * Update run-time statistics of the 'current'. - */ - update_curr(cfs_rq); -+#ifdef CONFIG_SCHED_BORE -+ restart_burst_rescale_deadline(se); -+ if (unlikely(rq->nr_running == 1)) -+ return; -+ -+ clear_buddies(cfs_rq, se); -+#endif // CONFIG_SCHED_BORE - /* - * Tell update_rq_clock() that we've just updated, - * so we don't do microscopic update in schedule() -@@ -12651,6 +12862,9 @@ static void task_fork_fair(struct task_struct *p) - curr = cfs_rq->curr; - if (curr) - update_curr(cfs_rq); -+#ifdef CONFIG_SCHED_BORE -+ update_burst_score(se); -+#endif // CONFIG_SCHED_BORE - place_entity(cfs_rq, se, ENQUEUE_INITIAL); - rq_unlock(rq, &rf); - } -diff --git a/kernel/sched/features.h b/kernel/sched/features.h -index 143f55df89..3f0fe409f5 100644 ---- a/kernel/sched/features.h -+++ b/kernel/sched/features.h -@@ -6,7 +6,11 @@ - */ - SCHED_FEAT(PLACE_LAG, true) - SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) -+#ifdef CONFIG_SCHED_BORE -+SCHED_FEAT(RUN_TO_PARITY, false) -+#else // !CONFIG_SCHED_BORE - SCHED_FEAT(RUN_TO_PARITY, true) -+#endif // CONFIG_SCHED_BORE - - /* - * Prefer to schedule the task we woke last (assuming it failed -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 001fe047bd..da3ad1d4e1 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1965,7 +1965,11 @@ static inline void dirty_sched_domain_sysctl(int cpu) - } - #endif - -+#ifdef CONFIG_SCHED_BORE -+extern void sched_update_min_base_slice(void); -+#else // !CONFIG_SCHED_BORE - extern int sched_update_scaling(void); -+#endif // CONFIG_SCHED_BORE - - static inline const struct cpumask *task_user_cpus(struct task_struct *p) - { -@@ -2552,6 +2556,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate; - extern const_debug unsigned int sysctl_sched_migration_cost; - - extern unsigned int sysctl_sched_base_slice; -+#ifdef CONFIG_SCHED_BORE -+extern unsigned int sysctl_sched_min_base_slice; -+#endif // CONFIG_SCHED_BORE - - #ifdef CONFIG_SCHED_DEBUG - extern int sysctl_resched_latency_warn_ms; --- -2.34.1 - diff --git a/sys-kernel/linux-sources-redcore/files/6.8-amd64.config b/sys-kernel/linux-sources-redcore/files/6.8-amd64.config index 3c8ea00f..da53b1c0 100644 --- a/sys-kernel/linux-sources-redcore/files/6.8-amd64.config +++ b/sys-kernel/linux-sources-redcore/files/6.8-amd64.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 6.8.7-redcore-r1 Kernel Configuration +# Linux/x86 6.8.9-redcore Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Gentoo Hardened 13.2.0-r15 p3) 13.2.0" CONFIG_CC_IS_GCC=y @@ -233,7 +233,6 @@ CONFIG_USER_NS_UNPRIVILEGED=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y -CONFIG_SCHED_BORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y @@ -524,7 +523,7 @@ CONFIG_CALL_PADDING=y CONFIG_HAVE_CALL_THUNKS=y CONFIG_CALL_THUNKS=y CONFIG_PREFIX_SYMBOLS=y -CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_CPU_MITIGATIONS=y CONFIG_PAGE_TABLE_ISOLATION=y CONFIG_RETPOLINE=y CONFIG_RETHUNK=y @@ -750,6 +749,7 @@ CONFIG_AS_SHA256_NI=y CONFIG_AS_TPAUSE=y CONFIG_AS_GFNI=y CONFIG_AS_WRUSS=y +CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS=y # # General architecture-dependent options diff --git a/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.8.ebuild b/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.9.ebuild index 3ab9b1d4..9a4b2777 100644 --- a/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.8.ebuild +++ b/sys-kernel/linux-sources-redcore/linux-sources-redcore-6.8.9.ebuild @@ -28,7 +28,6 @@ DEPEND=" RDEPEND="${DEPEND}" PATCHES=( - "${FILESDIR}"/"${KV_MAJOR}"-0001-linux6.8.y-bore5.1.0.patch "${FILESDIR}"/"${KV_MAJOR}"-ath10k-be-quiet.patch "${FILESDIR}"/"${KV_MAJOR}"-ata-fix-NCQ-LOG-strings-and-move-to-debug.patch "${FILESDIR}"/"${KV_MAJOR}"-acpi-use-kern_warning_even_when_error.patch |