summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorV3n3RiX <venerix@redcorelinux.org>2019-05-18 00:37:02 +0100
committerV3n3RiX <venerix@redcorelinux.org>2019-05-18 00:37:02 +0100
commit4b2cc4a30a78eda5b747932226cf16edd8c125b6 (patch)
treeb7412a5ac3b7911e1590dd81059ca0277725b178
parentbe8120dc323a345ad0eb2d0f25d62ea8f824d74c (diff)
sys-kernel/linux-{image,sources}-redcore-lts : drop 4.14
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch9560
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch733
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch48
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch153
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch50
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch54
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch529
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch311
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch160
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch69
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch136
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch81
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch61
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0014-Swap-sucks.patch25
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch19
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch48
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch14
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch13
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-amd64.config9101
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch46
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch177
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-linux-hardened.patch2868
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-mute-pps_state_mismatch.patch16
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch12
-rw-r--r--sys-kernel/linux-image-redcore-lts/files/4.14-uksm-linux-hardened.patch6919
-rw-r--r--sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.95-r1.ebuild165
-rw-r--r--sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.19.20-r2.ebuild (renamed from sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.19.20-r1.ebuild)2
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch9560
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch733
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch48
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch153
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch50
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch54
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch529
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch311
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch160
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch69
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch136
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch81
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch61
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0014-Swap-sucks.patch25
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch19
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch48
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch14
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch13
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-amd64.config9101
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch46
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch177
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-linux-hardened.patch2868
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-mute-pps_state_mismatch.patch16
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch12
-rw-r--r--sys-kernel/linux-sources-redcore-lts/files/4.14-uksm-linux-hardened.patch6919
-rw-r--r--sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.95-r1.ebuild90
-rw-r--r--sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.19.20-r2.ebuild (renamed from sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.19.20-r1.ebuild)2
54 files changed, 2 insertions, 62663 deletions
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
deleted file mode 100644
index a81dbeac..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
+++ /dev/null
@@ -1,9560 +0,0 @@
-diff -Nur a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
---- a/arch/powerpc/platforms/cell/spufs/sched.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/arch/powerpc/platforms/cell/spufs/sched.c 2019-01-05 20:22:51.089998199 +0000
-@@ -65,11 +65,6 @@
- static struct timer_list spuloadavg_timer;
-
- /*
-- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
-- */
--#define NORMAL_PRIO 120
--
--/*
- * Frequency of the spu scheduler tick. By default we do one SPU scheduler
- * tick for every 10 CPU scheduler ticks.
- */
-diff -Nur a/arch/x86/Kconfig b/arch/x86/Kconfig
---- a/arch/x86/Kconfig 2019-01-05 20:17:13.829237906 +0000
-+++ b/arch/x86/Kconfig 2019-01-05 20:30:14.244135060 +0000
-@@ -957,6 +957,20 @@
- config SCHED_SMT
- def_bool y if SMP
-
-+config SMT_NICE
-+ bool "SMT (Hyperthreading) aware nice priority and policy support"
-+ depends on SCHED_MUQSS && SCHED_SMT
-+ default y
-+ ---help---
-+ Enabling Hyperthreading on Intel CPUs decreases the effectiveness
-+ of the use of 'nice' levels and different scheduling policies
-+ (e.g. realtime) due to sharing of CPU power between hyperthreads.
-+ SMT nice support makes each logical CPU aware of what is running on
-+ its hyperthread siblings, maintaining appropriate distribution of
-+ CPU according to nice levels and scheduling policies at the expense
-+ of slightly increased overhead.
-+ If unsure say Y here.
-+
- config SCHED_MC
- def_bool y
- prompt "Multi-core scheduler support"
-diff -Nur a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
---- a/Documentation/scheduler/sched-BFS.txt 1970-01-01 01:00:00.000000000 +0100
-+++ b/Documentation/scheduler/sched-BFS.txt 2019-01-05 20:22:51.089998199 +0000
-@@ -0,0 +1,351 @@
-+BFS - The Brain Fuck Scheduler by Con Kolivas.
-+
-+Goals.
-+
-+The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to
-+completely do away with the complex designs of the past for the cpu process
-+scheduler and instead implement one that is very simple in basic design.
-+The main focus of BFS is to achieve excellent desktop interactivity and
-+responsiveness without heuristics and tuning knobs that are difficult to
-+understand, impossible to model and predict the effect of, and when tuned to
-+one workload cause massive detriment to another.
-+
-+
-+Design summary.
-+
-+BFS is best described as a single runqueue, O(n) lookup, earliest effective
-+virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
-+deadline first) and my previous Staircase Deadline scheduler. Each component
-+shall be described in order to understand the significance of, and reasoning for
-+it. The codebase when the first stable version was released was approximately
-+9000 lines less code than the existing mainline linux kernel scheduler (in
-+2.6.31). This does not even take into account the removal of documentation and
-+the cgroups code that is not used.
-+
-+Design reasoning.
-+
-+The single runqueue refers to the queued but not running processes for the
-+entire system, regardless of the number of CPUs. The reason for going back to
-+a single runqueue design is that once multiple runqueues are introduced,
-+per-CPU or otherwise, there will be complex interactions as each runqueue will
-+be responsible for the scheduling latency and fairness of the tasks only on its
-+own runqueue, and to achieve fairness and low latency across multiple CPUs, any
-+advantage in throughput of having CPU local tasks causes other disadvantages.
-+This is due to requiring a very complex balancing system to at best achieve some
-+semblance of fairness across CPUs and can only maintain relatively low latency
-+for tasks bound to the same CPUs, not across them. To increase said fairness
-+and latency across CPUs, the advantage of local runqueue locking, which makes
-+for better scalability, is lost due to having to grab multiple locks.
-+
-+A significant feature of BFS is that all accounting is done purely based on CPU
-+used and nowhere is sleep time used in any way to determine entitlement or
-+interactivity. Interactivity "estimators" that use some kind of sleep/run
-+algorithm are doomed to fail to detect all interactive tasks, and to falsely tag
-+tasks that aren't interactive as being so. The reason for this is that it is
-+close to impossible to determine that when a task is sleeping, whether it is
-+doing it voluntarily, as in a userspace application waiting for input in the
-+form of a mouse click or otherwise, or involuntarily, because it is waiting for
-+another thread, process, I/O, kernel activity or whatever. Thus, such an
-+estimator will introduce corner cases, and more heuristics will be required to
-+cope with those corner cases, introducing more corner cases and failed
-+interactivity detection and so on. Interactivity in BFS is built into the design
-+by virtue of the fact that tasks that are waking up have not used up their quota
-+of CPU time, and have earlier effective deadlines, thereby making it very likely
-+they will preempt any CPU bound task of equivalent nice level. See below for
-+more information on the virtual deadline mechanism. Even if they do not preempt
-+a running task, because the rr interval is guaranteed to have a bound upper
-+limit on how long a task will wait for, it will be scheduled within a timeframe
-+that will not cause visible interface jitter.
-+
-+
-+Design details.
-+
-+Task insertion.
-+
-+BFS inserts tasks into each relevant queue as an O(1) insertion into a double
-+linked list. On insertion, *every* running queue is checked to see if the newly
-+queued task can run on any idle queue, or preempt the lowest running task on the
-+system. This is how the cross-CPU scheduling of BFS achieves significantly lower
-+latency per extra CPU the system has. In this case the lookup is, in the worst
-+case scenario, O(n) where n is the number of CPUs on the system.
-+
-+Data protection.
-+
-+BFS has one single lock protecting the process local data of every task in the
-+global queue. Thus every insertion, removal and modification of task data in the
-+global runqueue needs to grab the global lock. However, once a task is taken by
-+a CPU, the CPU has its own local data copy of the running process' accounting
-+information which only that CPU accesses and modifies (such as during a
-+timer tick) thus allowing the accounting data to be updated lockless. Once a
-+CPU has taken a task to run, it removes it from the global queue. Thus the
-+global queue only ever has, at most,
-+
-+ (number of tasks requesting cpu time) - (number of logical CPUs) + 1
-+
-+tasks in the global queue. This value is relevant for the time taken to look up
-+tasks during scheduling. This will increase if many tasks with CPU affinity set
-+in their policy to limit which CPUs they're allowed to run on if they outnumber
-+the number of CPUs. The +1 is because when rescheduling a task, the CPU's
-+currently running task is put back on the queue. Lookup will be described after
-+the virtual deadline mechanism is explained.
-+
-+Virtual deadline.
-+
-+The key to achieving low latency, scheduling fairness, and "nice level"
-+distribution in BFS is entirely in the virtual deadline mechanism. The one
-+tunable in BFS is the rr_interval, or "round robin interval". This is the
-+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
-+tasks of the same nice level will be running for, or looking at it the other
-+way around, the longest duration two tasks of the same nice level will be
-+delayed for. When a task requests cpu time, it is given a quota (time_slice)
-+equal to the rr_interval and a virtual deadline. The virtual deadline is
-+offset from the current time in jiffies by this equation:
-+
-+ jiffies + (prio_ratio * rr_interval)
-+
-+The prio_ratio is determined as a ratio compared to the baseline of nice -20
-+and increases by 10% per nice level. The deadline is a virtual one only in that
-+no guarantee is placed that a task will actually be scheduled by this time, but
-+it is used to compare which task should go next. There are three components to
-+how a task is next chosen. First is time_slice expiration. If a task runs out
-+of its time_slice, it is descheduled, the time_slice is refilled, and the
-+deadline reset to that formula above. Second is sleep, where a task no longer
-+is requesting CPU for whatever reason. The time_slice and deadline are _not_
-+adjusted in this case and are just carried over for when the task is next
-+scheduled. Third is preemption, and that is when a newly waking task is deemed
-+higher priority than a currently running task on any cpu by virtue of the fact
-+that it has an earlier virtual deadline than the currently running task. The
-+earlier deadline is the key to which task is next chosen for the first and
-+second cases. Once a task is descheduled, it is put back on the queue, and an
-+O(n) lookup of all queued-but-not-running tasks is done to determine which has
-+the earliest deadline and that task is chosen to receive CPU next.
-+
-+The CPU proportion of different nice tasks works out to be approximately the
-+
-+ (prio_ratio difference)^2
-+
-+The reason it is squared is that a task's deadline does not change while it is
-+running unless it runs out of time_slice. Thus, even if the time actually
-+passes the deadline of another task that is queued, it will not get CPU time
-+unless the current running task deschedules, and the time "base" (jiffies) is
-+constantly moving.
-+
-+Task lookup.
-+
-+BFS has 103 priority queues. 100 of these are dedicated to the static priority
-+of realtime tasks, and the remaining 3 are, in order of best to worst priority,
-+SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
-+scheduling). When a task of these priorities is queued, a bitmap of running
-+priorities is set showing which of these priorities has tasks waiting for CPU
-+time. When a CPU is made to reschedule, the lookup for the next task to get
-+CPU time is performed in the following way:
-+
-+First the bitmap is checked to see what static priority tasks are queued. If
-+any realtime priorities are found, the corresponding queue is checked and the
-+first task listed there is taken (provided CPU affinity is suitable) and lookup
-+is complete. If the priority corresponds to a SCHED_ISO task, they are also
-+taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
-+to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
-+stage, every task in the runlist that corresponds to that priority is checked
-+to see which has the earliest set deadline, and (provided it has suitable CPU
-+affinity) it is taken off the runqueue and given the CPU. If a task has an
-+expired deadline, it is taken and the rest of the lookup aborted (as they are
-+chosen in FIFO order).
-+
-+Thus, the lookup is O(n) in the worst case only, where n is as described
-+earlier, as tasks may be chosen before the whole task list is looked over.
-+
-+
-+Scalability.
-+
-+The major limitations of BFS will be that of scalability, as the separate
-+runqueue designs will have less lock contention as the number of CPUs rises.
-+However they do not scale linearly even with separate runqueues as multiple
-+runqueues will need to be locked concurrently on such designs to be able to
-+achieve fair CPU balancing, to try and achieve some sort of nice-level fairness
-+across CPUs, and to achieve low enough latency for tasks on a busy CPU when
-+other CPUs would be more suited. BFS has the advantage that it requires no
-+balancing algorithm whatsoever, as balancing occurs by proxy simply because
-+all CPUs draw off the global runqueue, in priority and deadline order. Despite
-+the fact that scalability is _not_ the prime concern of BFS, it both shows very
-+good scalability to smaller numbers of CPUs and is likely a more scalable design
-+at these numbers of CPUs.
-+
-+It also has some very low overhead scalability features built into the design
-+when it has been deemed their overhead is so marginal that they're worth adding.
-+The first is the local copy of the running process' data to the CPU it's running
-+on to allow that data to be updated lockless where possible. Then there is
-+deference paid to the last CPU a task was running on, by trying that CPU first
-+when looking for an idle CPU to use the next time it's scheduled. Finally there
-+is the notion of cache locality beyond the last running CPU. The sched_domains
-+information is used to determine the relative virtual "cache distance" that
-+other CPUs have from the last CPU a task was running on. CPUs with shared
-+caches, such as SMT siblings, or multicore CPUs with shared caches, are treated
-+as cache local. CPUs without shared caches are treated as not cache local, and
-+CPUs on different NUMA nodes are treated as very distant. This "relative cache
-+distance" is used by modifying the virtual deadline value when doing lookups.
-+Effectively, the deadline is unaltered between "cache local" CPUs, doubled for
-+"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning
-+behind the doubling of deadlines is as follows. The real cost of migrating a
-+task from one CPU to another is entirely dependant on the cache footprint of
-+the task, how cache intensive the task is, how long it's been running on that
-+CPU to take up the bulk of its cache, how big the CPU cache is, how fast and
-+how layered the CPU cache is, how fast a context switch is... and so on. In
-+other words, it's close to random in the real world where we do more than just
-+one sole workload. The only thing we can be sure of is that it's not free. So
-+BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs
-+is more important than cache locality, and cache locality only plays a part
-+after that. Doubling the effective deadline is based on the premise that the
-+"cache local" CPUs will tend to work on the same tasks up to double the number
-+of cache local CPUs, and once the workload is beyond that amount, it is likely
-+that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA
-+is a value I pulled out of my arse.
-+
-+When choosing an idle CPU for a waking task, the cache locality is determined
-+according to where the task last ran and then idle CPUs are ranked from best
-+to worst to choose the most suitable idle CPU based on cache locality, NUMA
-+node locality and hyperthread sibling business. They are chosen in the
-+following preference (if idle):
-+
-+* Same core, idle or busy cache, idle threads
-+* Other core, same cache, idle or busy cache, idle threads.
-+* Same node, other CPU, idle cache, idle threads.
-+* Same node, other CPU, busy cache, idle threads.
-+* Same core, busy threads.
-+* Other core, same cache, busy threads.
-+* Same node, other CPU, busy threads.
-+* Other node, other CPU, idle cache, idle threads.
-+* Other node, other CPU, busy cache, idle threads.
-+* Other node, other CPU, busy threads.
-+
-+This shows the SMT or "hyperthread" awareness in the design as well which will
-+choose a real idle core first before a logical SMT sibling which already has
-+tasks on the physical CPU.
-+
-+Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
-+However this benchmarking was performed on an earlier design that was far less
-+scalable than the current one so it's hard to know how scalable it is in terms
-+of both CPUs (due to the global runqueue) and heavily loaded machines (due to
-+O(n) lookup) at this stage. Note that in terms of scalability, the number of
-+_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
-+quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
-+results are very promising indeed, without needing to tweak any knobs, features
-+or options. Benchmark contributions are most welcome.
-+
-+
-+Features
-+
-+As the initial prime target audience for BFS was the average desktop user, it
-+was designed to not need tweaking, tuning or have features set to obtain benefit
-+from it. Thus the number of knobs and features has been kept to an absolute
-+minimum and should not require extra user input for the vast majority of cases.
-+There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval
-+and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
-+to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
-+support for CGROUPS. The average user should neither need to know what these
-+are, nor should they need to be using them to have good desktop behaviour.
-+
-+rr_interval
-+
-+There is only one "scheduler" tunable, the round robin interval. This can be
-+accessed in
-+
-+ /proc/sys/kernel/rr_interval
-+
-+The value is in milliseconds, and the default value is set to 6 on a
-+uniprocessor machine, and automatically set to a progressively higher value on
-+multiprocessor machines. The reasoning behind increasing the value on more CPUs
-+is that the effective latency is decreased by virtue of there being more CPUs on
-+BFS (for reasons explained above), and increasing the value allows for less
-+cache contention and more throughput. Valid values are from 1 to 1000
-+Decreasing the value will decrease latencies at the cost of decreasing
-+throughput, while increasing it will improve throughput, but at the cost of
-+worsening latencies. The accuracy of the rr interval is limited by HZ resolution
-+of the kernel configuration. Thus, the worst case latencies are usually slightly
-+higher than this actual value. The default value of 6 is not an arbitrary one.
-+It is based on the fact that humans can detect jitter at approximately 7ms, so
-+aiming for much lower latencies is pointless under most circumstances. It is
-+worth noting this fact when comparing the latency performance of BFS to other
-+schedulers. Worst case latencies being higher than 7ms are far worse than
-+average latencies not being in the microsecond range.
-+
-+Isochronous scheduling.
-+
-+Isochronous scheduling is a unique scheduling policy designed to provide
-+near-real-time performance to unprivileged (ie non-root) users without the
-+ability to starve the machine indefinitely. Isochronous tasks (which means
-+"same time") are set using, for example, the schedtool application like so:
-+
-+ schedtool -I -e amarok
-+
-+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
-+is that it has a priority level between true realtime tasks and SCHED_NORMAL
-+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
-+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
-+rate). However if ISO tasks run for more than a tunable finite amount of time,
-+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
-+time is the percentage of _total CPU_ available across the machine, configurable
-+as a percentage in the following "resource handling" tunable (as opposed to a
-+scheduler tunable):
-+
-+ /proc/sys/kernel/iso_cpu
-+
-+and is set to 70% by default. It is calculated over a rolling 5 second average
-+Because it is the total CPU available, it means that on a multi CPU machine, it
-+is possible to have an ISO task running as realtime scheduling indefinitely on
-+just one CPU, as the other CPUs will be available. Setting this to 100 is the
-+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
-+ability to run any pseudo-realtime tasks.
-+
-+A feature of BFS is that it detects when an application tries to obtain a
-+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
-+appropriate privileges to use those policies. When it detects this, it will
-+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
-+Because some applications constantly set their policy as well as their nice
-+level, there is potential for them to undo the override specified by the user
-+on the command line of setting the policy to SCHED_ISO. To counter this, once
-+a task has been set to SCHED_ISO policy, it needs superuser privileges to set
-+it back to SCHED_NORMAL. This will ensure the task remains ISO and all child
-+processes and threads will also inherit the ISO policy.
-+
-+Idleprio scheduling.
-+
-+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
-+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
-+ultra low priority tasks to be run in the background that have virtually no
-+effect on the foreground tasks. This is ideally suited to distributed computing
-+clients (like setiathome, folding, mprime etc) but can also be used to start
-+a video encode or so on without any slowdown of other tasks. To avoid this
-+policy from grabbing shared resources and holding them indefinitely, if it
-+detects a state where the task is waiting on I/O, the machine is about to
-+suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As
-+per the Isochronous task management, once a task has been scheduled as IDLEPRIO,
-+it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can
-+be set to start as SCHED_IDLEPRIO with the schedtool command like so:
-+
-+ schedtool -D -e ./mprime
-+
-+Subtick accounting.
-+
-+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
-+the accounting is done by simply determining what is happening at the precise
-+moment a timer tick fires off. This becomes increasingly inaccurate as the
-+timer tick frequency (HZ) is lowered. It is possible to create an application
-+which uses almost 100% CPU, yet by being descheduled at the right time, records
-+zero CPU usage. While the main problem with this is that there are possible
-+security implications, it is also difficult to determine how much CPU a task
-+really does use. BFS tries to use the sub-tick accounting from the TSC clock,
-+where possible, to determine real CPU usage. This is not entirely reliable, but
-+is far more likely to produce accurate CPU usage data than the existing designs
-+and will not show tasks as consuming no CPU usage when they actually are. Thus,
-+the amount of CPU reported as being used by BFS will more accurately represent
-+how much CPU the task itself is using (as is shown for example by the 'time'
-+application), so the reported values may be quite different to other schedulers.
-+Values reported as the 'load' are more prone to problems with this design, but
-+per process values are closer to real usage. When comparing throughput of BFS
-+to other designs, it is important to compare the actual completed work in terms
-+of total wall clock time taken and total work done, rather than the reported
-+"cpu usage".
-+
-+
-+Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010
-diff -Nur a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt
---- a/Documentation/scheduler/sched-MuQSS.txt 1970-01-01 01:00:00.000000000 +0100
-+++ b/Documentation/scheduler/sched-MuQSS.txt 2019-01-05 20:22:51.089998199 +0000
-@@ -0,0 +1,347 @@
-+MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas.
-+
-+MuQSS is a per-cpu runqueue variant of the original BFS scheduler with
-+one 8 level skiplist per runqueue, and fine grained locking for much more
-+scalability.
-+
-+
-+Goals.
-+
-+The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from
-+here on (pronounced mux) is to completely do away with the complex designs of
-+the past for the cpu process scheduler and instead implement one that is very
-+simple in basic design. The main focus of MuQSS is to achieve excellent desktop
-+interactivity and responsiveness without heuristics and tuning knobs that are
-+difficult to understand, impossible to model and predict the effect of, and when
-+tuned to one workload cause massive detriment to another, while still being
-+scalable to many CPUs and processes.
-+
-+
-+Design summary.
-+
-+MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1)
-+lookup, earliest effective virtual deadline first tickless design, loosely based
-+on EEVDF (earliest eligible virtual deadline first) and my previous Staircase
-+Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler.
-+Each component shall be described in order to understand the significance of,
-+and reasoning for it.
-+
-+
-+Design reasoning.
-+
-+In BFS, the use of a single runqueue across all CPUs meant that each CPU would
-+need to scan the entire runqueue looking for the process with the earliest
-+deadline and schedule that next, regardless of which CPU it originally came
-+from. This made BFS deterministic with respect to latency and provided
-+guaranteed latencies dependent on number of processes and CPUs. The single
-+runqueue, however, meant that all CPUs would compete for the single lock
-+protecting it, which would lead to increasing lock contention as the number of
-+CPUs rose and appeared to limit scalability of common workloads beyond 16
-+logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously
-+increased overhead proportionate to the number of queued proecesses and led to
-+cache thrashing while iterating over the linked list.
-+
-+MuQSS is an evolution of BFS, designed to maintain the same scheduling
-+decision mechanism and be virtually deterministic without relying on the
-+constrained design of the single runqueue by splitting out the single runqueue
-+to be per-CPU and use skiplists instead of linked lists.
-+
-+The original reason for going back to a single runqueue design for BFS was that
-+once multiple runqueues are introduced, per-CPU or otherwise, there will be
-+complex interactions as each runqueue will be responsible for the scheduling
-+latency and fairness of the tasks only on its own runqueue, and to achieve
-+fairness and low latency across multiple CPUs, any advantage in throughput of
-+having CPU local tasks causes other disadvantages. This is due to requiring a
-+very complex balancing system to at best achieve some semblance of fairness
-+across CPUs and can only maintain relatively low latency for tasks bound to the
-+same CPUs, not across them. To increase said fairness and latency across CPUs,
-+the advantage of local runqueue locking, which makes for better scalability, is
-+lost due to having to grab multiple locks.
-+
-+MuQSS works around the problems inherent in multiple runqueue designs by
-+making its skip lists priority ordered and through novel use of lockless
-+examination of each other runqueue it can decide if it should take the earliest
-+deadline task from another runqueue for latency reasons, or for CPU balancing
-+reasons. It still does not have a balancing system, choosing to allow the
-+next task scheduling decision and task wakeup CPU choice to allow balancing to
-+happen by virtue of its choices.
-+
-+
-+Design details.
-+
-+Custom skip list implementation:
-+
-+To avoid the overhead of building up and tearing down skip list structures,
-+the variant used by MuQSS has a number of optimisations making it specific for
-+its use case in the scheduler. It uses static arrays of 8 'levels' instead of
-+building up and tearing down structures dynamically. This makes each runqueue
-+only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU
-+it means that it scales O(log N) up to 64k x number of logical CPUs which is
-+far beyond the realistic task limits each CPU could handle. By being 8 levels
-+it also makes the array exactly one cacheline in size. Additionally, each
-+skip list node is bidirectional making insertion and removal amortised O(1),
-+being O(k) where k is 1-8. Uniquely, we are only ever interested in the very
-+first entry in each list at all times with MuQSS, so there is never a need to
-+do a search and thus look up is always O(1). In interactive mode, the queues
-+will be searched beyond their first entry if the first task is not suitable
-+for affinity or SMT nice reasons.
-+
-+Task insertion:
-+
-+MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into
-+a custom skip list as described above (based on the original design by William
-+Pugh). Insertion is ordered in such a way that there is never a need to do a
-+search by ordering tasks according to static priority primarily, and then
-+virtual deadline at the time of insertion.
-+
-+Niffies:
-+
-+Niffies are a monotonic forward moving timer not unlike the "jiffies" but are
-+of nanosecond resolution. Niffies are calculated per-runqueue from the high
-+resolution TSC timers, and in order to maintain fairness are synchronised
-+between CPUs whenever both runqueues are locked concurrently.
-+
-+Virtual deadline:
-+
-+The key to achieving low latency, scheduling fairness, and "nice level"
-+distribution in MuQSS is entirely in the virtual deadline mechanism. The one
-+tunable in MuQSS is the rr_interval, or "round robin interval". This is the
-+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
-+tasks of the same nice level will be running for, or looking at it the other
-+way around, the longest duration two tasks of the same nice level will be
-+delayed for. When a task requests cpu time, it is given a quota (time_slice)
-+equal to the rr_interval and a virtual deadline. The virtual deadline is
-+offset from the current time in niffies by this equation:
-+
-+ niffies + (prio_ratio * rr_interval)
-+
-+The prio_ratio is determined as a ratio compared to the baseline of nice -20
-+and increases by 10% per nice level. The deadline is a virtual one only in that
-+no guarantee is placed that a task will actually be scheduled by this time, but
-+it is used to compare which task should go next. There are three components to
-+how a task is next chosen. First is time_slice expiration. If a task runs out
-+of its time_slice, it is descheduled, the time_slice is refilled, and the
-+deadline reset to that formula above. Second is sleep, where a task no longer
-+is requesting CPU for whatever reason. The time_slice and deadline are _not_
-+adjusted in this case and are just carried over for when the task is next
-+scheduled. Third is preemption, and that is when a newly waking task is deemed
-+higher priority than a currently running task on any cpu by virtue of the fact
-+that it has an earlier virtual deadline than the currently running task. The
-+earlier deadline is the key to which task is next chosen for the first and
-+second cases.
-+
-+The CPU proportion of different nice tasks works out to be approximately the
-+
-+ (prio_ratio difference)^2
-+
-+The reason it is squared is that a task's deadline does not change while it is
-+running unless it runs out of time_slice. Thus, even if the time actually
-+passes the deadline of another task that is queued, it will not get CPU time
-+unless the current running task deschedules, and the time "base" (niffies) is
-+constantly moving.
-+
-+Task lookup:
-+
-+As tasks are already pre-ordered according to anticipated scheduling order in
-+the skip lists, lookup for the next suitable task per-runqueue is always a
-+matter of simply selecting the first task in the 0th level skip list entry.
-+In order to maintain optimal latency and fairness across CPUs, MuQSS does a
-+novel examination of every other runqueue in cache locality order, choosing the
-+best task across all runqueues. This provides near-determinism of how long any
-+task across the entire system may wait before receiving CPU time. The other
-+runqueues are first examine lockless and then trylocked to minimise the
-+potential lock contention if they are likely to have a suitable better task.
-+Each other runqueue lock is only held for as long as it takes to examine the
-+entry for suitability. In "interactive" mode, the default setting, MuQSS will
-+look for the best deadline task across all CPUs, while in !interactive mode,
-+it will only select a better deadline task from another CPU if it is more
-+heavily laden than the current one.
-+
-+Lookup is therefore O(k) where k is number of CPUs.
-+
-+
-+Latency.
-+
-+Through the use of virtual deadlines to govern the scheduling order of normal
-+tasks, queue-to-activation latency per runqueue is guaranteed to be bound by
-+the rr_interval tunable which is set to 6ms by default. This means that the
-+longest a CPU bound task will wait for more CPU is proportional to the number
-+of running tasks and in the common case of 0-2 running tasks per CPU, will be
-+under the 7ms threshold for human perception of jitter. Additionally, as newly
-+woken tasks will have an early deadline from their previous runtime, the very
-+tasks that are usually latency sensitive will have the shortest interval for
-+activation, usually preempting any existing CPU bound tasks.
-+
-+Tickless expiry:
-+
-+A feature of MuQSS is that it is not tied to the resolution of the chosen tick
-+rate in Hz, instead depending entirely on the high resolution timers where
-+possible for sub-millisecond accuracy on timeouts regarless of the underlying
-+tick rate. This allows MuQSS to be run with the low overhead of low Hz rates
-+such as 100 by default, benefiting from the improved throughput and lower
-+power usage it provides. Another advantage of this approach is that in
-+combination with the Full No HZ option, which disables ticks on running task
-+CPUs instead of just idle CPUs, the tick can be disabled at all times
-+regardless of how many tasks are running instead of being limited to just one
-+running task. Note that this option is NOT recommended for regular desktop
-+users.
-+
-+
-+Scalability and balancing.
-+
-+Unlike traditional approaches where balancing is a combination of CPU selection
-+at task wakeup and intermittent balancing based on a vast array of rules set
-+according to architecture, busyness calculations and special case management,
-+MuQSS indirectly balances on the fly at task wakeup and next task selection.
-+During initialisation, MuQSS creates a cache coherency ordered list of CPUs for
-+each logical CPU and uses this to aid task/CPU selection when CPUs are busy.
-+Additionally it selects any idle CPUs, if they are available, at any time over
-+busy CPUs according to the following preference:
-+
-+ * Same thread, idle or busy cache, idle or busy threads
-+ * Other core, same cache, idle or busy cache, idle threads.
-+ * Same node, other CPU, idle cache, idle threads.
-+ * Same node, other CPU, busy cache, idle threads.
-+ * Other core, same cache, busy threads.
-+ * Same node, other CPU, busy threads.
-+ * Other node, other CPU, idle cache, idle threads.
-+ * Other node, other CPU, busy cache, idle threads.
-+ * Other node, other CPU, busy threads.
-+
-+Mux is therefore SMT, MC and Numa aware without the need for extra
-+intermittent balancing to maintain CPUs busy and make the most of cache
-+coherency.
-+
-+
-+Features
-+
-+As the initial prime target audience for MuQSS was the average desktop user, it
-+was designed to not need tweaking, tuning or have features set to obtain benefit
-+from it. Thus the number of knobs and features has been kept to an absolute
-+minimum and should not require extra user input for the vast majority of cases.
-+There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval,
-+interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO
-+policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS
-+does _not_ now feature is support for CGROUPS. The average user should neither
-+need to know what these are, nor should they need to be using them to have good
-+desktop behaviour. However since some applications refuse to work without
-+cgroups, one can enable them with MuQSS as a stub and the filesystem will be
-+created which will allow the applications to work.
-+
-+rr_interval:
-+
-+ /proc/sys/kernel/rr_interval
-+
-+The value is in milliseconds, and the default value is set to 6. Valid values
-+are from 1 to 1000 Decreasing the value will decrease latencies at the cost of
-+decreasing throughput, while increasing it will improve throughput, but at the
-+cost of worsening latencies. It is based on the fact that humans can detect
-+jitter at approximately 7ms, so aiming for much lower latencies is pointless
-+under most circumstances. It is worth noting this fact when comparing the
-+latency performance of MuQSS to other schedulers. Worst case latencies being
-+higher than 7ms are far worse than average latencies not being in the
-+microsecond range.
-+
-+interactive:
-+
-+ /proc/sys/kernel/interactive
-+
-+The value is a simple boolean of 1 for on and 0 for off and is set to on by
-+default. Disabling this will disable the near-determinism of MuQSS when
-+selecting the next task by not examining all CPUs for the earliest deadline
-+task, or which CPU to wake to, instead prioritising CPU balancing for improved
-+throughput. Latency will still be bound by rr_interval, but on a per-CPU basis
-+instead of across the whole system.
-+
-+Isochronous scheduling:
-+
-+Isochronous scheduling is a unique scheduling policy designed to provide
-+near-real-time performance to unprivileged (ie non-root) users without the
-+ability to starve the machine indefinitely. Isochronous tasks (which means
-+"same time") are set using, for example, the schedtool application like so:
-+
-+ schedtool -I -e amarok
-+
-+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
-+is that it has a priority level between true realtime tasks and SCHED_NORMAL
-+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
-+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
-+rate). However if ISO tasks run for more than a tunable finite amount of time,
-+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
-+time is the percentage of CPU available per CPU, configurable as a percentage in
-+the following "resource handling" tunable (as opposed to a scheduler tunable):
-+
-+iso_cpu:
-+
-+ /proc/sys/kernel/iso_cpu
-+
-+and is set to 70% by default. It is calculated over a rolling 5 second average
-+Because it is the total CPU available, it means that on a multi CPU machine, it
-+is possible to have an ISO task running as realtime scheduling indefinitely on
-+just one CPU, as the other CPUs will be available. Setting this to 100 is the
-+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
-+ability to run any pseudo-realtime tasks.
-+
-+A feature of MuQSS is that it detects when an application tries to obtain a
-+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
-+appropriate privileges to use those policies. When it detects this, it will
-+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
-+
-+
-+Idleprio scheduling:
-+
-+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
-+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
-+ultra low priority tasks to be run in the background that have virtually no
-+effect on the foreground tasks. This is ideally suited to distributed computing
-+clients (like setiathome, folding, mprime etc) but can also be used to start a
-+video encode or so on without any slowdown of other tasks. To avoid this policy
-+from grabbing shared resources and holding them indefinitely, if it detects a
-+state where the task is waiting on I/O, the machine is about to suspend to ram
-+and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has
-+been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without
-+superuser privileges since it is effectively a lower scheduling policy. Tasks
-+can be set to start as SCHED_IDLEPRIO with the schedtool command like so:
-+
-+schedtool -D -e ./mprime
-+
-+Subtick accounting:
-+
-+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
-+the accounting is done by simply determining what is happening at the precise
-+moment a timer tick fires off. This becomes increasingly inaccurate as the timer
-+tick frequency (HZ) is lowered. It is possible to create an application which
-+uses almost 100% CPU, yet by being descheduled at the right time, records zero
-+CPU usage. While the main problem with this is that there are possible security
-+implications, it is also difficult to determine how much CPU a task really does
-+use. Mux uses sub-tick accounting from the TSC clock to determine real CPU
-+usage. Thus, the amount of CPU reported as being used by MuQSS will more
-+accurately represent how much CPU the task itself is using (as is shown for
-+example by the 'time' application), so the reported values may be quite
-+different to other schedulers. When comparing throughput of MuQSS to other
-+designs, it is important to compare the actual completed work in terms of total
-+wall clock time taken and total work done, rather than the reported "cpu usage".
-+
-+Symmetric MultiThreading (SMT) aware nice:
-+
-+SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the
-+logical CPU count rises by adding thread units to each CPU core, allowing more
-+than one task to be run simultaneously on the same core, the disadvantage of it
-+is that the CPU power is shared between the tasks, not summating to the power
-+of two CPUs. The practical upshot of this is that two tasks running on
-+separate threads of the same core run significantly slower than if they had one
-+core each to run on. While smart CPU selection allows each task to have a core
-+to itself whenever available (as is done on MuQSS), it cannot offset the
-+slowdown that occurs when the cores are all loaded and only a thread is left.
-+Most of the time this is harmless as the CPU is effectively overloaded at this
-+point and the extra thread is of benefit. However when running a niced task in
-+the presence of an un-niced task (say nice 19 v nice 0), the nice task gets
-+precisely the same amount of CPU power as the unniced one. MuQSS has an
-+optional configuration feature known as SMT-NICE which selectively idles the
-+secondary niced thread for a period proportional to the nice difference,
-+allowing CPU distribution according to nice level to be maintained, at the
-+expense of a small amount of extra overhead. If this is configured in on a
-+machine without SMT threads, the overhead is minimal.
-+
-+
-+Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016
-diff -Nur a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
---- a/Documentation/sysctl/kernel.txt 2019-01-05 20:17:13.829237906 +0000
-+++ b/Documentation/sysctl/kernel.txt 2019-01-05 20:22:51.089998199 +0000
-@@ -39,6 +39,7 @@
- - hung_task_timeout_secs
- - hung_task_warnings
- - kexec_load_disabled
-+- iso_cpu
- - kptr_restrict
- - l2cr [ PPC only ]
- - modprobe ==> Documentation/debugging-modules.txt
-@@ -73,6 +74,7 @@
- - randomize_va_space
- - real-root-dev ==> Documentation/admin-guide/initrd.rst
- - reboot-cmd [ SPARC only ]
-+- rr_interval
- - rtsig-max
- - rtsig-nr
- - seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst
-@@ -95,6 +97,7 @@
- - unknown_nmi_panic
- - watchdog
- - watchdog_thresh
-+- yield_type
- - version
-
- ==============================================================
-@@ -397,6 +400,16 @@
-
- ==============================================================
-
-+iso_cpu: (MuQSS CPU scheduler only).
-+
-+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
-+run effectively at realtime priority, averaged over a rolling five
-+seconds over the -whole- system, meaning all cpus.
-+
-+Set to 70 (percent) by default.
-+
-+==============================================================
-+
- l2cr: (PPC only)
-
- This flag controls the L2 cache of G3 processor boards. If
-@@ -823,6 +836,20 @@
-
- ==============================================================
-
-+rr_interval: (MuQSS CPU scheduler only)
-+
-+This is the smallest duration that any cpu process scheduling unit
-+will run for. Increasing this value can increase throughput of cpu
-+bound tasks substantially but at the expense of increased latencies
-+overall. Conversely decreasing it will decrease average and maximum
-+latencies but at the expense of throughput. This value is in
-+milliseconds and the default value chosen depends on the number of
-+cpus available at scheduler initialisation with a minimum of 6.
-+
-+Valid values are from 1-1000.
-+
-+==============================================================
-+
- rtsig-max & rtsig-nr:
-
- The file rtsig-max can be used to tune the maximum number
-@@ -1081,3 +1108,13 @@
- tunable to zero will disable lockup detection altogether.
-
- ==============================================================
-+
-+yield_type: (MuQSS CPU scheduler only)
-+
-+This determines what type of yield calls to sched_yield will perform.
-+
-+ 0: No yield.
-+ 1: Yield only to better priority/deadline tasks. (default)
-+ 2: Expire timeslice and recalculate deadline.
-+
-+==============================================================
-diff -Nur a/fs/proc/base.c b/fs/proc/base.c
---- a/fs/proc/base.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/fs/proc/base.c 2019-01-05 20:22:51.089998199 +0000
-@@ -481,7 +481,7 @@
- seq_printf(m, "0 0 0\n");
- else
- seq_printf(m, "%llu %llu %lu\n",
-- (unsigned long long)task->se.sum_exec_runtime,
-+ (unsigned long long)tsk_seruntime(task),
- (unsigned long long)task->sched_info.run_delay,
- task->sched_info.pcount);
-
-diff -Nur a/include/linux/init_task.h b/include/linux/init_task.h
---- a/include/linux/init_task.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/init_task.h 2019-01-05 20:22:51.089998199 +0000
-@@ -172,8 +172,6 @@
- # define INIT_VTIME(tsk)
- #endif
-
--#define INIT_TASK_COMM "swapper"
--
- #ifdef CONFIG_RT_MUTEXES
- # define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT_CACHED, \
-@@ -223,6 +221,80 @@
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-+#ifdef CONFIG_SCHED_MUQSS
-+#define INIT_TASK_COMM "MuQSS"
-+#define INIT_TASK(tsk) \
-+{ \
-+ INIT_TASK_TI(tsk) \
-+ .state = 0, \
-+ .stack = init_stack, \
-+ .usage = ATOMIC_INIT(2), \
-+ .flags = PF_KTHREAD, \
-+ .prio = NORMAL_PRIO, \
-+ .static_prio = MAX_PRIO-20, \
-+ .normal_prio = NORMAL_PRIO, \
-+ .deadline = 0, \
-+ .policy = SCHED_NORMAL, \
-+ .cpus_allowed = CPU_MASK_ALL, \
-+ .mm = NULL, \
-+ .active_mm = &init_mm, \
-+ .restart_block = { \
-+ .fn = do_no_restart_syscall, \
-+ }, \
-+ .time_slice = 1000000, \
-+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
-+ INIT_PUSHABLE_TASKS(tsk) \
-+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
-+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
-+ .real_parent = &tsk, \
-+ .parent = &tsk, \
-+ .children = LIST_HEAD_INIT(tsk.children), \
-+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
-+ .group_leader = &tsk, \
-+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
-+ RCU_POINTER_INITIALIZER(cred, &init_cred), \
-+ .comm = INIT_TASK_COMM, \
-+ .thread = INIT_THREAD, \
-+ .fs = &init_fs, \
-+ .files = &init_files, \
-+ .signal = &init_signals, \
-+ .sighand = &init_sighand, \
-+ .nsproxy = &init_nsproxy, \
-+ .pending = { \
-+ .list = LIST_HEAD_INIT(tsk.pending.list), \
-+ .signal = {{0}}}, \
-+ .blocked = {{0}}, \
-+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
-+ .journal_info = NULL, \
-+ INIT_CPU_TIMERS(tsk) \
-+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
-+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
-+ .pids = { \
-+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
-+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
-+ }, \
-+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
-+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
-+ INIT_IDS \
-+ INIT_PERF_EVENTS(tsk) \
-+ INIT_TRACE_IRQFLAGS \
-+ INIT_LOCKDEP \
-+ INIT_FTRACE_GRAPH \
-+ INIT_TRACE_RECURSION \
-+ INIT_TASK_RCU_PREEMPT(tsk) \
-+ INIT_TASK_RCU_TASKS(tsk) \
-+ INIT_CPUSET_SEQ(tsk) \
-+ INIT_RT_MUTEXES(tsk) \
-+ INIT_PREV_CPUTIME(tsk) \
-+ INIT_VTIME(tsk) \
-+ INIT_NUMA_BALANCING(tsk) \
-+ INIT_KASAN(tsk) \
-+ INIT_LIVEPATCH(tsk) \
-+ INIT_TASK_SECURITY \
-+}
-+#else /* CONFIG_SCHED_MUQSS */
-+#define INIT_TASK_COMM "swapper"
- #define INIT_TASK(tsk) \
- { \
- INIT_TASK_TI(tsk) \
-@@ -300,7 +372,7 @@
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
- }
--
-+#endif /* CONFIG_SCHED_MUQSS */
-
- /* Attach to the init_task data structure for proper alignment */
- #define __init_task_data __attribute__((__section__(".data..init_task")))
-diff -Nur a/include/linux/ioprio.h b/include/linux/ioprio.h
---- a/include/linux/ioprio.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/ioprio.h 2019-01-05 20:22:51.089998199 +0000
-@@ -52,6 +52,8 @@
- */
- static inline int task_nice_ioprio(struct task_struct *task)
- {
-+ if (iso_task(task))
-+ return 0;
- return (task_nice(task) + 20) / 5;
- }
-
-diff -Nur a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
---- a/include/linux/sched/nohz.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched/nohz.h 2019-01-05 20:22:51.089998199 +0000
-@@ -6,7 +6,7 @@
- * This is the interface between the scheduler and nohz/dynticks:
- */
-
--#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
- extern void cpu_load_update_nohz_start(void);
- extern void cpu_load_update_nohz_stop(void);
- #else
-@@ -23,7 +23,7 @@
- static inline void set_cpu_sd_state_idle(void) { }
- #endif
-
--#ifdef CONFIG_NO_HZ_COMMON
-+#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
- void calc_load_nohz_start(void);
- void calc_load_nohz_stop(void);
- #else
-diff -Nur a/include/linux/sched/prio.h b/include/linux/sched/prio.h
---- a/include/linux/sched/prio.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched/prio.h 2019-01-05 20:22:51.089998199 +0000
-@@ -20,8 +20,20 @@
- */
-
- #define MAX_USER_RT_PRIO 100
-+
-+#ifdef CONFIG_SCHED_MUQSS
-+/* Note different MAX_RT_PRIO */
-+#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
-+
-+#define ISO_PRIO (MAX_RT_PRIO)
-+#define NORMAL_PRIO (MAX_RT_PRIO + 1)
-+#define IDLE_PRIO (MAX_RT_PRIO + 2)
-+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
-+#else /* CONFIG_SCHED_MUQSS */
- #define MAX_RT_PRIO MAX_USER_RT_PRIO
-
-+#endif /* CONFIG_SCHED_MUQSS */
-+
- #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
- #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
-
-diff -Nur a/include/linux/sched/task.h b/include/linux/sched/task.h
---- a/include/linux/sched/task.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched/task.h 2019-01-05 20:22:51.089998199 +0000
-@@ -80,7 +80,7 @@
- extern void free_task(struct task_struct *tsk);
-
- /* sched_exec is called by processes performing an exec */
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS)
- extern void sched_exec(void);
- #else
- #define sched_exec() {}
-diff -Nur a/include/linux/sched.h b/include/linux/sched.h
---- a/include/linux/sched.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched.h 2019-01-05 20:22:51.089998199 +0000
-@@ -27,6 +27,9 @@
- #include <linux/signal_types.h>
- #include <linux/mm_types_task.h>
- #include <linux/task_io_accounting.h>
-+#ifdef CONFIG_SCHED_MUQSS
-+#include <linux/skip_list.h>
-+#endif
-
- /* task_struct member predeclarations (sorted alphabetically): */
- struct audit_context;
-@@ -579,9 +582,11 @@
- unsigned int flags;
- unsigned int ptrace;
-
-+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS)
-+ int on_cpu;
-+#endif
- #ifdef CONFIG_SMP
- struct llist_node wake_entry;
-- int on_cpu;
- #ifdef CONFIG_THREAD_INFO_IN_TASK
- /* Current CPU: */
- unsigned int cpu;
-@@ -598,10 +603,25 @@
- int static_prio;
- int normal_prio;
- unsigned int rt_priority;
-+#ifdef CONFIG_SCHED_MUQSS
-+ int time_slice;
-+ u64 deadline;
-+ skiplist_node node; /* Skip list node */
-+ u64 last_ran;
-+ u64 sched_time; /* sched_clock time spent running */
-+#ifdef CONFIG_SMT_NICE
-+ int smt_bias; /* Policy/nice level bias across smt siblings */
-+#endif
-+#ifdef CONFIG_HOTPLUG_CPU
-+ bool zerobound; /* Bound to CPU0 for hotplug */
-+#endif
-+ unsigned long rt_timeout;
-+#else /* CONFIG_SCHED_MUQSS */
-
- const struct sched_class *sched_class;
- struct sched_entity se;
- struct sched_rt_entity rt;
-+#endif
- #ifdef CONFIG_CGROUP_SCHED
- struct task_group *sched_task_group;
- #endif
-@@ -751,6 +771,10 @@
- u64 utimescaled;
- u64 stimescaled;
- #endif
-+#ifdef CONFIG_SCHED_MUQSS
-+ /* Unbanked cpu time */
-+ unsigned long utime_ns, stime_ns;
-+#endif
- u64 gtime;
- struct prev_cputime prev_cputime;
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-@@ -1155,6 +1179,40 @@
- */
- };
-
-+#ifdef CONFIG_SCHED_MUQSS
-+#define tsk_seruntime(t) ((t)->sched_time)
-+#define tsk_rttimeout(t) ((t)->rt_timeout)
-+
-+static inline void tsk_cpus_current(struct task_struct *p)
-+{
-+}
-+
-+void print_scheduler_version(void);
-+
-+static inline bool iso_task(struct task_struct *p)
-+{
-+ return (p->policy == SCHED_ISO);
-+}
-+#else /* CFS */
-+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
-+#define tsk_rttimeout(t) ((t)->rt.timeout)
-+
-+static inline void tsk_cpus_current(struct task_struct *p)
-+{
-+ p->nr_cpus_allowed = current->nr_cpus_allowed;
-+}
-+
-+static inline void print_scheduler_version(void)
-+{
-+ printk(KERN_INFO "CFS CPU scheduler.\n");
-+}
-+
-+static inline bool iso_task(struct task_struct *p)
-+{
-+ return false;
-+}
-+#endif /* CONFIG_SCHED_MUQSS */
-+
- static inline struct pid *task_pid(struct task_struct *task)
- {
- return task->pids[PIDTYPE_PID].pid;
-diff -Nur a/include/linux/skip_list.h b/include/linux/skip_list.h
---- a/include/linux/skip_list.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/include/linux/skip_list.h 2019-01-05 20:22:51.089998199 +0000
-@@ -0,0 +1,33 @@
-+#ifndef _LINUX_SKIP_LISTS_H
-+#define _LINUX_SKIP_LISTS_H
-+typedef u64 keyType;
-+typedef void *valueType;
-+
-+typedef struct nodeStructure skiplist_node;
-+
-+struct nodeStructure {
-+ int level; /* Levels in this structure */
-+ keyType key;
-+ valueType value;
-+ skiplist_node *next[8];
-+ skiplist_node *prev[8];
-+};
-+
-+typedef struct listStructure {
-+ int entries;
-+ int level; /* Maximum level of the list
-+ (1 more than the number of levels in the list) */
-+ skiplist_node *header; /* pointer to header */
-+} skiplist;
-+
-+void skiplist_init(skiplist_node *slnode);
-+skiplist *new_skiplist(skiplist_node *slnode);
-+void free_skiplist(skiplist *l);
-+void skiplist_node_init(skiplist_node *node);
-+void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed);
-+void skiplist_delete(skiplist *l, skiplist_node *node);
-+
-+static inline bool skiplist_node_empty(skiplist_node *node) {
-+ return (!node->next[0]);
-+}
-+#endif /* _LINUX_SKIP_LISTS_H */
-diff -Nur a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
---- a/include/uapi/linux/sched.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/uapi/linux/sched.h 2019-01-05 20:22:51.089998199 +0000
-@@ -37,9 +37,16 @@
- #define SCHED_FIFO 1
- #define SCHED_RR 2
- #define SCHED_BATCH 3
--/* SCHED_ISO: reserved but not implemented yet */
-+/* SCHED_ISO: Implemented on MuQSS only */
- #define SCHED_IDLE 5
-+#ifdef CONFIG_SCHED_MUQSS
-+#define SCHED_ISO 4
-+#define SCHED_IDLEPRIO SCHED_IDLE
-+#define SCHED_MAX (SCHED_IDLEPRIO)
-+#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
-+#else /* CONFIG_SCHED_MUQSS */
- #define SCHED_DEADLINE 6
-+#endif /* CONFIG_SCHED_MUQSS */
-
- /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
- #define SCHED_RESET_ON_FORK 0x40000000
-diff -Nur a/init/Kconfig b/init/Kconfig
---- a/init/Kconfig 2019-01-05 20:17:13.849238543 +0000
-+++ b/init/Kconfig 2019-01-05 20:22:51.089998199 +0000
-@@ -38,6 +38,18 @@
-
- menu "General setup"
-
-+config SCHED_MUQSS
-+ bool "MuQSS cpu scheduler"
-+ select HIGH_RES_TIMERS
-+ ---help---
-+ The Multiple Queue Skiplist Scheduler for excellent interactivity and
-+ responsiveness on the desktop and highly scalable deterministic
-+ low latency on any hardware.
-+
-+ Say Y here.
-+ default y
-+
-+
- config BROKEN
- bool
-
-@@ -621,6 +633,7 @@
- depends on ARCH_SUPPORTS_NUMA_BALANCING
- depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
- depends on SMP && NUMA && MIGRATION
-+ depends on !SCHED_MUQSS
- help
- This option adds support for automatic NUMA aware memory/task placement.
- The mechanism is quite primitive and is based on migrating memory when
-@@ -723,9 +736,13 @@
- help
- This feature lets CPU scheduler recognize task groups and control CPU
- bandwidth allocation to such task groups. It uses cgroups to group
-- tasks.
-+ tasks. In combination with MuQSS this is purely a STUB to create the
-+ files associated with the CPU controller cgroup but most of the
-+ controls do nothing. This is useful for working in environments and
-+ with applications that will only work if this control group is
-+ present.
-
--if CGROUP_SCHED
-+if CGROUP_SCHED && !SCHED_MUQSS
- config FAIR_GROUP_SCHED
- bool "Group scheduling for SCHED_OTHER"
- depends on CGROUP_SCHED
-@@ -832,6 +849,7 @@
-
- config CGROUP_CPUACCT
- bool "Simple CPU accounting controller"
-+ depends on !SCHED_MUQSS
- help
- Provides a simple controller for monitoring the
- total CPU consumed by the tasks in a cgroup.
-@@ -950,6 +968,7 @@
-
- config SCHED_AUTOGROUP
- bool "Automatic process group scheduling"
-+ depends on !SCHED_MUQSS
- select CGROUPS
- select CGROUP_SCHED
- select FAIR_GROUP_SCHED
-diff -Nur a/init/main.c b/init/main.c
---- a/init/main.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/init/main.c 2019-01-05 20:22:51.089998199 +0000
-@@ -841,7 +841,6 @@
- return ret;
- }
-
--
- extern initcall_t __initcall_start[];
- extern initcall_t __initcall0_start[];
- extern initcall_t __initcall1_start[];
-@@ -1008,6 +1007,8 @@
-
- rcu_end_inkernel_boot();
-
-+ print_scheduler_version();
-+
- if (ramdisk_execute_command) {
- ret = run_init_process(ramdisk_execute_command);
- if (!ret)
-diff -Nur a/kernel/delayacct.c b/kernel/delayacct.c
---- a/kernel/delayacct.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/delayacct.c 2019-01-05 20:22:51.089998199 +0000
-@@ -115,7 +115,7 @@
- */
- t1 = tsk->sched_info.pcount;
- t2 = tsk->sched_info.run_delay;
-- t3 = tsk->se.sum_exec_runtime;
-+ t3 = tsk_seruntime(tsk);
-
- d->cpu_count += t1;
-
-diff -Nur a/kernel/exit.c b/kernel/exit.c
---- a/kernel/exit.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/exit.c 2019-01-05 20:22:51.089998199 +0000
-@@ -129,7 +129,7 @@
- sig->curr_target = next_thread(tsk);
- }
-
-- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-+ add_device_randomness((const void*) &tsk_seruntime(tsk),
- sizeof(unsigned long long));
-
- /*
-@@ -150,7 +150,7 @@
- sig->inblock += task_io_get_inblock(tsk);
- sig->oublock += task_io_get_oublock(tsk);
- task_io_accounting_add(&sig->ioac, &tsk->ioac);
-- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-+ sig->sum_sched_runtime += tsk_seruntime(tsk);
- sig->nr_threads--;
- __unhash_process(tsk, group_dead);
- write_sequnlock(&sig->stats_lock);
-diff -Nur a/kernel/kthread.c b/kernel/kthread.c
---- a/kernel/kthread.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/kthread.c 2019-01-05 20:22:51.099998516 +0000
-@@ -410,6 +410,34 @@
- }
- EXPORT_SYMBOL(kthread_bind);
-
-+#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP)
-+extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
-+
-+/*
-+ * new_kthread_bind is a special variant of __kthread_bind_mask.
-+ * For new threads to work on muqss we want to call do_set_cpus_allowed
-+ * without the task_cpu being set and the task rescheduled until they're
-+ * rescheduled on their own so we call __do_set_cpus_allowed directly which
-+ * only changes the cpumask. This is particularly important for smpboot threads
-+ * to work.
-+ */
-+static void new_kthread_bind(struct task_struct *p, unsigned int cpu)
-+{
-+ unsigned long flags;
-+
-+ if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)))
-+ return;
-+
-+ /* It's safe because the task is inactive. */
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ __do_set_cpus_allowed(p, cpumask_of(cpu));
-+ p->flags |= PF_NO_SETAFFINITY;
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+#else
-+#define new_kthread_bind(p, cpu) kthread_bind(p, cpu)
-+#endif
-+
- /**
- * kthread_create_on_cpu - Create a cpu bound kthread
- * @threadfn: the function to run until signal_pending(current).
-@@ -431,7 +459,7 @@
- cpu);
- if (IS_ERR(p))
- return p;
-- kthread_bind(p, cpu);
-+ new_kthread_bind(p, cpu);
- /* CPU hotplug need to bind once again when unparking the thread. */
- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
- to_kthread(p)->cpu = cpu;
-diff -Nur a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
---- a/kernel/livepatch/transition.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/livepatch/transition.c 2019-01-05 20:22:51.099998516 +0000
-@@ -277,6 +277,12 @@
- return 0;
- }
-
-+#ifdef CONFIG_SCHED_MUQSS
-+typedef unsigned long rq_flags_t;
-+#else
-+typedef struct rq_flags rq_flag_t;
-+#endif
-+
- /*
- * Try to safely switch a task to the target patch state. If it's currently
- * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
-@@ -285,7 +291,7 @@
- static bool klp_try_switch_task(struct task_struct *task)
- {
- struct rq *rq;
-- struct rq_flags flags;
-+ rq_flags_t flags;
- int ret;
- bool success = false;
- char err_buf[STACK_ERR_BUF_SIZE];
-diff -Nur a/kernel/Makefile b/kernel/Makefile
---- a/kernel/Makefile 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/Makefile 2019-01-05 20:22:51.099998516 +0000
-@@ -10,7 +10,7 @@
- extable.o params.o \
- kthread.o sys_ni.o nsproxy.o \
- notifier.o ksysfs.o cred.o reboot.o \
-- async.o range.o smpboot.o ucount.o
-+ async.o range.o smpboot.o ucount.o skip_list.o
-
- obj-$(CONFIG_MODULES) += kmod.o
- obj-$(CONFIG_MULTIUSER) += groups.o
-diff -Nur a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
---- a/kernel/rcu/Kconfig 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/rcu/Kconfig 2019-01-05 20:22:51.099998516 +0000
-@@ -93,7 +93,7 @@
- config CONTEXT_TRACKING_FORCE
- bool "Force context tracking"
- depends on CONTEXT_TRACKING
-- default y if !NO_HZ_FULL
-+ default y if !NO_HZ_FULL && !SCHED_MUQSS
- help
- The major pre-requirement for full dynticks to work is to
- support the context tracking subsystem. But there are also
-diff -Nur a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
---- a/kernel/sched/cpufreq_schedutil.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/cpufreq_schedutil.c 2019-01-05 20:22:51.099998516 +0000
-@@ -176,6 +176,17 @@
- return cpufreq_driver_resolve_freq(policy, freq);
- }
-
-+#ifdef CONFIG_SCHED_MUQSS
-+static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ *util = rq->load_avg;
-+ if (*util > SCHED_CAPACITY_SCALE)
-+ *util = SCHED_CAPACITY_SCALE;
-+ *max = SCHED_CAPACITY_SCALE;
-+}
-+#else /* CONFIG_SCHED_MUQSS */
- static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
-@@ -186,6 +197,7 @@
- *util = min(rq->cfs.avg.util_avg, cfs_max);
- *max = cfs_max;
- }
-+#endif /* CONFIG_SCHED_MUQSS */
-
- static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
- unsigned int flags)
-diff -Nur a/kernel/sched/cputime.c b/kernel/sched/cputime.c
---- a/kernel/sched/cputime.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/cputime.c 2019-01-05 20:22:51.099998516 +0000
-@@ -270,26 +270,6 @@
- return accounted;
- }
-
--#ifdef CONFIG_64BIT
--static inline u64 read_sum_exec_runtime(struct task_struct *t)
--{
-- return t->se.sum_exec_runtime;
--}
--#else
--static u64 read_sum_exec_runtime(struct task_struct *t)
--{
-- u64 ns;
-- struct rq_flags rf;
-- struct rq *rq;
--
-- rq = task_rq_lock(t, &rf);
-- ns = t->se.sum_exec_runtime;
-- task_rq_unlock(rq, t, &rf);
--
-- return ns;
--}
--#endif
--
- /*
- * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
- * tasks (sum on group iteration) belonging to @tsk's group.
-@@ -661,7 +641,7 @@
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- struct task_cputime cputime = {
-- .sum_exec_runtime = p->se.sum_exec_runtime,
-+ .sum_exec_runtime = tsk_seruntime(p),
- };
-
- task_cputime(p, &cputime.utime, &cputime.stime);
-diff -Nur a/kernel/sched/idle.c b/kernel/sched/idle.c
---- a/kernel/sched/idle.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/idle.c 2019-01-05 20:22:51.099998516 +0000
-@@ -209,6 +209,9 @@
- */
- static void do_idle(void)
- {
-+ int cpu = smp_processor_id();
-+ bool pending = false;
-+
- /*
- * If the arch has a polling bit, we maintain an invariant:
- *
-@@ -220,13 +223,16 @@
-
- __current_set_polling();
- quiet_vmstat();
-- tick_nohz_idle_enter();
-+ if (unlikely(softirq_pending(cpu)))
-+ pending = true;
-+ else
-+ tick_nohz_idle_enter();
-
- while (!need_resched()) {
- check_pgt_cache();
- rmb();
-
-- if (cpu_is_offline(smp_processor_id())) {
-+ if (cpu_is_offline(cpu)) {
- cpuhp_report_idle_dead();
- arch_cpu_idle_dead();
- }
-@@ -255,7 +261,8 @@
- * an IPI to fold the state for us.
- */
- preempt_set_need_resched();
-- tick_nohz_idle_exit();
-+ if (!pending)
-+ tick_nohz_idle_exit();
- __current_clr_polling();
-
- /*
-diff -Nur a/kernel/sched/Makefile b/kernel/sched/Makefile
---- a/kernel/sched/Makefile 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/Makefile 2019-01-05 20:22:51.099998516 +0000
-@@ -16,14 +16,20 @@
- CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
- endif
-
--obj-y += core.o loadavg.o clock.o cputime.o
-+ifdef CONFIG_SCHED_MUQSS
-+obj-y += MuQSS.o clock.o
-+else
-+obj-y += core.o loadavg.o clock.o
- obj-y += idle_task.o fair.o rt.o deadline.o
--obj-y += wait.o wait_bit.o swait.o completion.o idle.o
--obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
-+obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
--obj-$(CONFIG_SCHEDSTATS) += stats.o
- obj-$(CONFIG_SCHED_DEBUG) += debug.o
- obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
-+endif
-+obj-y += cputime.o
-+obj-y += wait.o wait_bit.o swait.o completion.o idle.o
-+obj-$(CONFIG_SMP) += cpupri.o topology.o
-+obj-$(CONFIG_SCHEDSTATS) += stats.o
- obj-$(CONFIG_CPU_FREQ) += cpufreq.o
- obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
- obj-$(CONFIG_MEMBARRIER) += membarrier.o
-diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
---- a/kernel/sched/MuQSS.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/kernel/sched/MuQSS.c 2019-01-05 20:22:51.099998516 +0000
-@@ -0,0 +1,6923 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * kernel/sched/MuQSS.c, was kernel/sched.c
-+ *
-+ * Kernel scheduler and related syscalls
-+ *
-+ * Copyright (C) 1991-2002 Linus Torvalds
-+ *
-+ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
-+ * make semaphores SMP safe
-+ * 1998-11-19 Implemented schedule_timeout() and related stuff
-+ * by Andrea Arcangeli
-+ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
-+ * hybrid priority-list and round-robin design with
-+ * an array-switch method of distributing timeslices
-+ * and per-CPU runqueues. Cleanups and useful suggestions
-+ * by Davide Libenzi, preemptible kernel bits by Robert Love.
-+ * 2003-09-03 Interactivity tuning by Con Kolivas.
-+ * 2004-04-02 Scheduler domains code by Nick Piggin
-+ * 2007-04-15 Work begun on replacing all interactivity tuning with a
-+ * fair scheduling design by Con Kolivas.
-+ * 2007-05-05 Load balancing (smp-nice) and other improvements
-+ * by Peter Williams
-+ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
-+ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
-+ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
-+ * Thomas Gleixner, Mike Kravetz
-+ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes
-+ * a whole lot of those previous things.
-+ * 2016-10-01 Multiple Queue Skiplist Scheduler scalable evolution of BFS
-+ * scheduler by Con Kolivas.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/sched/clock.h>
-+#include <uapi/linux/sched/types.h>
-+#include <linux/sched/loadavg.h>
-+#include <linux/sched/hotplug.h>
-+#include <linux/wait_bit.h>
-+#include <linux/cpuset.h>
-+#include <linux/delayacct.h>
-+#include <linux/init_task.h>
-+#include <linux/binfmts.h>
-+#include <linux/context_tracking.h>
-+#include <linux/rcupdate_wait.h>
-+#include <linux/skip_list.h>
-+
-+#include <linux/blkdev.h>
-+#include <linux/kprobes.h>
-+#include <linux/mmu_context.h>
-+#include <linux/module.h>
-+#include <linux/nmi.h>
-+#include <linux/prefetch.h>
-+#include <linux/profile.h>
-+#include <linux/security.h>
-+#include <linux/syscalls.h>
-+#include <linux/tick.h>
-+
-+#include <asm/switch_to.h>
-+#include <asm/tlb.h>
-+#ifdef CONFIG_PARAVIRT
-+#include <asm/paravirt.h>
-+#endif
-+
-+#include "../workqueue_internal.h"
-+#include "../smpboot.h"
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/sched.h>
-+
-+#include "MuQSS.h"
-+
-+#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
-+#define rt_task(p) rt_prio((p)->prio)
-+#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
-+#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
-+ (policy) == SCHED_RR)
-+#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
-+
-+#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO)
-+#define idleprio_task(p) unlikely(is_idle_policy((p)->policy))
-+#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO)
-+
-+#define is_iso_policy(policy) ((policy) == SCHED_ISO)
-+#define iso_task(p) unlikely(is_iso_policy((p)->policy))
-+#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO)
-+
-+#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
-+
-+#define ISO_PERIOD (5 * HZ)
-+
-+#define STOP_PRIO (MAX_RT_PRIO - 1)
-+
-+/*
-+ * Some helpers for converting to/from various scales. Use shifts to get
-+ * approximate multiples of ten for less overhead.
-+ */
-+#define JIFFIES_TO_NS(TIME) ((TIME) * (1073741824 / HZ))
-+#define JIFFY_NS (1073741824 / HZ)
-+#define JIFFY_US (1048576 / HZ)
-+#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS)
-+#define HALF_JIFFY_NS (1073741824 / HZ / 2)
-+#define HALF_JIFFY_US (1048576 / HZ / 2)
-+#define MS_TO_NS(TIME) ((TIME) << 20)
-+#define MS_TO_US(TIME) ((TIME) << 10)
-+#define NS_TO_MS(TIME) ((TIME) >> 20)
-+#define NS_TO_US(TIME) ((TIME) >> 10)
-+#define US_TO_NS(TIME) ((TIME) << 10)
-+
-+#define RESCHED_US (100) /* Reschedule if less than this many μs left */
-+
-+void print_scheduler_version(void)
-+{
-+ printk(KERN_INFO "MuQSS CPU scheduler v0.162 by Con Kolivas.\n");
-+}
-+
-+/*
-+ * This is the time all tasks within the same priority round robin.
-+ * Value is in ms and set to a minimum of 6ms.
-+ * Tunable via /proc interface.
-+ */
-+int rr_interval __read_mostly = 6;
-+
-+/*
-+ * Tunable to choose whether to prioritise latency or throughput, simple
-+ * binary yes or no
-+ */
-+int sched_interactive __read_mostly = 1;
-+
-+/*
-+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
-+ * are allowed to run five seconds as real time tasks. This is the total over
-+ * all online cpus.
-+ */
-+int sched_iso_cpu __read_mostly = 70;
-+
-+/*
-+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
-+ * 0: No yield.
-+ * 1: Yield only to better priority/deadline tasks. (default)
-+ * 2: Expire timeslice and recalculate deadline.
-+ */
-+int sched_yield_type __read_mostly = 1;
-+
-+/*
-+ * The relative length of deadline for each priority(nice) level.
-+ */
-+static int prio_ratios[NICE_WIDTH] __read_mostly;
-+
-+/*
-+ * The quota handed out to tasks of all priority levels when refilling their
-+ * time_slice.
-+ */
-+static inline int timeslice(void)
-+{
-+ return MS_TO_US(rr_interval);
-+}
-+
-+#ifdef CONFIG_SMP
-+static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
-+#endif
-+
-+/* CPUs with isolated domains */
-+cpumask_var_t cpu_isolated_map;
-+
-+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#ifdef CONFIG_SMP
-+struct rq *cpu_rq(int cpu)
-+{
-+ return &per_cpu(runqueues, (cpu));
-+}
-+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-+
-+/*
-+ * For asym packing, by default the lower numbered cpu has higher priority.
-+ */
-+int __weak arch_asym_cpu_priority(int cpu)
-+{
-+ return -cpu;
-+}
-+
-+int __weak arch_sd_sibling_asym_packing(void)
-+{
-+ return 0*SD_ASYM_PACKING;
-+}
-+#else
-+struct rq *uprq;
-+#endif /* CONFIG_SMP */
-+
-+#include "stats.h"
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next) do { } while (0)
-+#endif
-+#ifndef finish_arch_switch
-+# define finish_arch_switch(prev) do { } while (0)
-+#endif
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch() do { } while (0)
-+#endif
-+
-+/*
-+ * All common locking functions performed on rq->lock. rq->clock is local to
-+ * the CPU accessing it so it can be modified just with interrupts disabled
-+ * when we're not updating niffies.
-+ * Looking up task_rq must be done under rq->lock to be safe.
-+ */
-+
-+/*
-+ * RQ-clock updating methods:
-+ */
-+
-+static void update_rq_clock_task(struct rq *rq, s64 delta)
-+{
-+/*
-+ * In theory, the compile should just see 0 here, and optimize out the call
-+ * to sched_rt_avg_update. But I don't trust it...
-+ */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-+
-+ /*
-+ * Since irq_time is only updated on {soft,}irq_exit, we might run into
-+ * this case when a previous update_rq_clock() happened inside a
-+ * {soft,}irq region.
-+ *
-+ * When this happens, we stop ->clock_task and only update the
-+ * prev_irq_time stamp to account for the part that fit, so that a next
-+ * update will consume the rest. This ensures ->clock_task is
-+ * monotonic.
-+ *
-+ * It does however cause some slight miss-attribution of {soft,}irq
-+ * time, a more accurate solution would be to update the irq_time using
-+ * the current rq->clock timestamp, except that would require using
-+ * atomic ops.
-+ */
-+ if (irq_delta > delta)
-+ irq_delta = delta;
-+
-+ rq->prev_irq_time += irq_delta;
-+ delta -= irq_delta;
-+#endif
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+ if (static_key_false((&paravirt_steal_rq_enabled))) {
-+ s64 steal = paravirt_steal_clock(cpu_of(rq));
-+
-+ steal -= rq->prev_steal_time_rq;
-+
-+ if (unlikely(steal > delta))
-+ steal = delta;
-+
-+ rq->prev_steal_time_rq += steal;
-+
-+ delta -= steal;
-+ }
-+#endif
-+ rq->clock_task += delta;
-+}
-+
-+static inline void update_rq_clock(struct rq *rq)
-+{
-+ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-+
-+ if (unlikely(delta < 0))
-+ return;
-+ rq->clock += delta;
-+ update_rq_clock_task(rq, delta);
-+}
-+
-+/*
-+ * Niffies are a globally increasing nanosecond counter. They're only used by
-+ * update_load_avg and time_slice_expired, however deadlines are based on them
-+ * across CPUs. Update them whenever we will call one of those functions, and
-+ * synchronise them across CPUs whenever we hold both runqueue locks.
-+ */
-+static inline void update_clocks(struct rq *rq)
-+{
-+ s64 ndiff, minndiff;
-+ long jdiff;
-+
-+ update_rq_clock(rq);
-+ ndiff = rq->clock - rq->old_clock;
-+ rq->old_clock = rq->clock;
-+ jdiff = jiffies - rq->last_jiffy;
-+
-+ /* Subtract any niffies added by balancing with other rqs */
-+ ndiff -= rq->niffies - rq->last_niffy;
-+ minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies;
-+ if (minndiff < 0)
-+ minndiff = 0;
-+ ndiff = max(ndiff, minndiff);
-+ rq->niffies += ndiff;
-+ rq->last_niffy = rq->niffies;
-+ if (jdiff) {
-+ rq->last_jiffy += jdiff;
-+ rq->last_jiffy_niffies = rq->niffies;
-+ }
-+}
-+
-+static inline int task_on_rq_queued(struct task_struct *p)
-+{
-+ return p->on_rq == TASK_ON_RQ_QUEUED;
-+}
-+
-+static inline int task_on_rq_migrating(struct task_struct *p)
-+{
-+ return p->on_rq == TASK_ON_RQ_MIGRATING;
-+}
-+
-+static inline int rq_trylock(struct rq *rq)
-+ __acquires(rq->lock)
-+{
-+ return raw_spin_trylock(&rq->lock);
-+}
-+
-+/*
-+ * Any time we have two runqueues locked we use that as an opportunity to
-+ * synchronise niffies to the highest value as idle ticks may have artificially
-+ * kept niffies low on one CPU and the truth can only be later.
-+ */
-+static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2)
-+{
-+ if (rq1->niffies > rq2->niffies)
-+ rq2->niffies = rq1->niffies;
-+ else
-+ rq1->niffies = rq2->niffies;
-+}
-+
-+/*
-+ * double_rq_lock - safely lock two runqueues
-+ *
-+ * Note this does not disable interrupts like task_rq_lock,
-+ * you need to do so manually before calling.
-+ */
-+
-+/* For when we know rq1 != rq2 */
-+static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2)
-+ __acquires(rq1->lock)
-+ __acquires(rq2->lock)
-+{
-+ if (rq1 < rq2) {
-+ raw_spin_lock(&rq1->lock);
-+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
-+ } else {
-+ raw_spin_lock(&rq2->lock);
-+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
-+ }
-+}
-+
-+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
-+ __acquires(rq1->lock)
-+ __acquires(rq2->lock)
-+{
-+ BUG_ON(!irqs_disabled());
-+ if (rq1 == rq2) {
-+ raw_spin_lock(&rq1->lock);
-+ __acquire(rq2->lock); /* Fake it out ;) */
-+ } else
-+ __double_rq_lock(rq1, rq2);
-+ synchronise_niffies(rq1, rq2);
-+}
-+
-+/*
-+ * double_rq_unlock - safely unlock two runqueues
-+ *
-+ * Note this does not restore interrupts like task_rq_unlock,
-+ * you need to do so manually after calling.
-+ */
-+static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
-+ __releases(rq1->lock)
-+ __releases(rq2->lock)
-+{
-+ raw_spin_unlock(&rq1->lock);
-+ if (rq1 != rq2)
-+ raw_spin_unlock(&rq2->lock);
-+ else
-+ __release(rq2->lock);
-+}
-+
-+static inline void lock_all_rqs(void)
-+{
-+ int cpu;
-+
-+ preempt_disable();
-+ for_each_possible_cpu(cpu) {
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ do_raw_spin_lock(&rq->lock);
-+ }
-+}
-+
-+static inline void unlock_all_rqs(void)
-+{
-+ int cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ do_raw_spin_unlock(&rq->lock);
-+ }
-+ preempt_enable();
-+}
-+
-+/* Specially nest trylock an rq */
-+static inline bool trylock_rq(struct rq *this_rq, struct rq *rq)
-+{
-+ if (unlikely(!do_raw_spin_trylock(&rq->lock)))
-+ return false;
-+ spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
-+ synchronise_niffies(this_rq, rq);
-+ return true;
-+}
-+
-+/* Unlock a specially nested trylocked rq */
-+static inline void unlock_rq(struct rq *rq)
-+{
-+ spin_release(&rq->lock.dep_map, 1, _RET_IP_);
-+ do_raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * cmpxchg based fetch_or, macro so it works for different integer types
-+ */
-+#define fetch_or(ptr, mask) \
-+ ({ \
-+ typeof(ptr) _ptr = (ptr); \
-+ typeof(mask) _mask = (mask); \
-+ typeof(*_ptr) _old, _val = *_ptr; \
-+ \
-+ for (;;) { \
-+ _old = cmpxchg(_ptr, _val, _val | _mask); \
-+ if (_old == _val) \
-+ break; \
-+ _val = _old; \
-+ } \
-+ _old; \
-+})
-+
-+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
-+/*
-+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
-+ * this avoids any races wrt polling state changes and thereby avoids
-+ * spurious IPIs.
-+ */
-+static bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+ struct thread_info *ti = task_thread_info(p);
-+ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
-+}
-+
-+/*
-+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
-+ *
-+ * If this returns true, then the idle task promises to call
-+ * sched_ttwu_pending() and reschedule soon.
-+ */
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+ struct thread_info *ti = task_thread_info(p);
-+ typeof(ti->flags) old, val = READ_ONCE(ti->flags);
-+
-+ for (;;) {
-+ if (!(val & _TIF_POLLING_NRFLAG))
-+ return false;
-+ if (val & _TIF_NEED_RESCHED)
-+ return true;
-+ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
-+ if (old == val)
-+ break;
-+ val = old;
-+ }
-+ return true;
-+}
-+
-+#else
-+static bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+ set_tsk_need_resched(p);
-+ return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+ return false;
-+}
-+#endif
-+#endif
-+
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+ struct wake_q_node *node = &task->wake_q;
-+
-+ /*
-+ * Atomically grab the task, if ->wake_q is !nil already it means
-+ * its already queued (either by us or someone else) and will get the
-+ * wakeup due to that.
-+ *
-+ * This cmpxchg() implies a full barrier, which pairs with the write
-+ * barrier implied by the wakeup in wake_up_q().
-+ */
-+ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
-+ return;
-+
-+ get_task_struct(task);
-+
-+ /*
-+ * The head is context local, there can be no concurrency.
-+ */
-+ *head->lastp = node;
-+ head->lastp = &node->next;
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+ struct wake_q_node *node = head->first;
-+
-+ while (node != WAKE_Q_TAIL) {
-+ struct task_struct *task;
-+
-+ task = container_of(node, struct task_struct, wake_q);
-+ BUG_ON(!task);
-+ /* Task can safely be re-inserted now */
-+ node = node->next;
-+ task->wake_q.next = NULL;
-+
-+ /*
-+ * wake_up_process() implies a wmb() to pair with the queueing
-+ * in wake_q_add() so as not to miss wakeups.
-+ */
-+ wake_up_process(task);
-+ put_task_struct(task);
-+ }
-+}
-+
-+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-+{
-+ next->on_cpu = 1;
-+}
-+
-+static inline void smp_sched_reschedule(int cpu)
-+{
-+ if (likely(cpu_online(cpu)))
-+ smp_send_reschedule(cpu);
-+}
-+
-+/*
-+ * resched_task - mark a task 'to be rescheduled now'.
-+ *
-+ * On UP this means the setting of the need_resched flag, on SMP it
-+ * might also involve a cross-CPU call to trigger the scheduler on
-+ * the target CPU.
-+ */
-+void resched_task(struct task_struct *p)
-+{
-+ int cpu;
-+#ifdef CONFIG_LOCKDEP
-+ /* Kernel threads call this when creating workqueues while still
-+ * inactive from __kthread_bind_mask, holding only the pi_lock */
-+ if (!(p->flags & PF_KTHREAD)) {
-+ struct rq *rq = task_rq(p);
-+
-+ lockdep_assert_held(&rq->lock);
-+ }
-+#endif
-+ if (test_tsk_need_resched(p))
-+ return;
-+
-+ cpu = task_cpu(p);
-+ if (cpu == smp_processor_id()) {
-+ set_tsk_need_resched(p);
-+ set_preempt_need_resched();
-+ return;
-+ }
-+
-+ if (set_nr_and_not_polling(p))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+/*
-+ * A task that is not running or queued will not have a node set.
-+ * A task that is queued but not running will have a node set.
-+ * A task that is currently running will have ->on_cpu set but no node set.
-+ */
-+static inline bool task_queued(struct task_struct *p)
-+{
-+ return !skiplist_node_empty(&p->node);
-+}
-+
-+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
-+static inline void resched_if_idle(struct rq *rq);
-+
-+/* Dodgy workaround till we figure out where the softirqs are going */
-+static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
-+{
-+ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
-+ do_softirq_own_stack();
-+}
-+
-+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-+{
-+#ifdef CONFIG_SMP
-+ /*
-+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
-+ * We must ensure this doesn't happen until the switch is completely
-+ * finished.
-+ *
-+ * In particular, the load of prev->state in finish_task_switch() must
-+ * happen before this.
-+ *
-+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
-+ */
-+ smp_store_release(&prev->on_cpu, 0);
-+#endif
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+ /* this is a valid case when another task releases the spinlock */
-+ rq->lock.owner = current;
-+#endif
-+ /*
-+ * If we are tracking spinlock dependencies then we have to
-+ * fix up the runqueue lock - which gets 'carried over' from
-+ * prev into current:
-+ */
-+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * If prev was marked as migrating to another CPU in return_task, drop
-+ * the local runqueue lock but leave interrupts disabled and grab the
-+ * remote lock we're migrating it to before enabling them.
-+ */
-+ if (unlikely(task_on_rq_migrating(prev))) {
-+ sched_info_dequeued(rq, prev);
-+ /*
-+ * We move the ownership of prev to the new cpu now. ttwu can't
-+ * activate prev to the wrong cpu since it has to grab this
-+ * runqueue in ttwu_remote.
-+ */
-+#ifdef CONFIG_THREAD_INFO_IN_TASK
-+ prev->cpu = prev->wake_cpu;
-+#else
-+ task_thread_info(prev)->cpu = prev->wake_cpu;
-+#endif
-+ raw_spin_unlock(&rq->lock);
-+
-+ raw_spin_lock(&prev->pi_lock);
-+ rq = __task_rq_lock(prev);
-+ /* Check that someone else hasn't already queued prev */
-+ if (likely(!task_queued(prev))) {
-+ enqueue_task(rq, prev, 0);
-+ prev->on_rq = TASK_ON_RQ_QUEUED;
-+ /* Wake up the CPU if it's not already running */
-+ resched_if_idle(rq);
-+ }
-+ raw_spin_unlock(&prev->pi_lock);
-+ }
-+#endif
-+ /* Accurately set nr_running here for load average calculations */
-+ rq->nr_running = rq->sl->entries + !rq_idle(rq);
-+ rq_unlock(rq);
-+
-+ do_pending_softirq(rq, current);
-+
-+ local_irq_enable();
-+}
-+
-+static inline bool deadline_before(u64 deadline, u64 time)
-+{
-+ return (deadline < time);
-+}
-+
-+/*
-+ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
-+ * is the key to everything. It distributes cpu fairly amongst tasks of the
-+ * same nice value, it proportions cpu according to nice level, it means the
-+ * task that last woke up the longest ago has the earliest deadline, thus
-+ * ensuring that interactive tasks get low latency on wake up. The CPU
-+ * proportion works out to the square of the virtual deadline difference, so
-+ * this equation will give nice 19 3% CPU compared to nice 0.
-+ */
-+static inline u64 prio_deadline_diff(int user_prio)
-+{
-+ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128));
-+}
-+
-+static inline u64 task_deadline_diff(struct task_struct *p)
-+{
-+ return prio_deadline_diff(TASK_USER_PRIO(p));
-+}
-+
-+static inline u64 static_deadline_diff(int static_prio)
-+{
-+ return prio_deadline_diff(USER_PRIO(static_prio));
-+}
-+
-+static inline int longest_deadline_diff(void)
-+{
-+ return prio_deadline_diff(39);
-+}
-+
-+static inline int ms_longest_deadline_diff(void)
-+{
-+ return NS_TO_MS(longest_deadline_diff());
-+}
-+
-+static inline bool rq_local(struct rq *rq);
-+
-+#ifndef SCHED_CAPACITY_SCALE
-+#define SCHED_CAPACITY_SCALE 1024
-+#endif
-+
-+static inline int rq_load(struct rq *rq)
-+{
-+ return rq->nr_running;
-+}
-+
-+/*
-+ * Update the load average for feeding into cpu frequency governors. Use a
-+ * rough estimate of a rolling average with ~ time constant of 32ms.
-+ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144
-+ * Make sure a call to update_clocks has been made before calling this to get
-+ * an updated rq->niffies.
-+ */
-+static void update_load_avg(struct rq *rq, unsigned int flags)
-+{
-+ unsigned long us_interval, curload;
-+ long load;
-+
-+ if (unlikely(rq->niffies <= rq->load_update))
-+ return;
-+
-+ us_interval = NS_TO_US(rq->niffies - rq->load_update);
-+ curload = rq_load(rq);
-+ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
-+ if (unlikely(load < 0))
-+ load = 0;
-+ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
-+ rq->load_avg = load;
-+
-+ rq->load_update = rq->niffies;
-+ if (likely(rq_local(rq)))
-+ cpufreq_trigger(rq, flags);
-+}
-+
-+/*
-+ * Removing from the runqueue. Enter with rq locked. Deleting a task
-+ * from the skip list is done via the stored node reference in the task struct
-+ * and does not require a full look up. Thus it occurs in O(k) time where k
-+ * is the "level" of the list the task was stored at - usually < 4, max 8.
-+ */
-+static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
-+{
-+ skiplist_delete(rq->sl, &p->node);
-+ rq->best_key = rq->node.next[0]->key;
-+ update_clocks(rq);
-+
-+ if (!(flags & DEQUEUE_SAVE))
-+ sched_info_dequeued(task_rq(p), p);
-+ update_load_avg(rq, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_RCU
-+static bool rcu_read_critical(struct task_struct *p)
-+{
-+ return p->rcu_read_unlock_special.b.blocked;
-+}
-+#else /* CONFIG_PREEMPT_RCU */
-+#define rcu_read_critical(p) (false)
-+#endif /* CONFIG_PREEMPT_RCU */
-+
-+/*
-+ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
-+ * an idle task, we ensure none of the following conditions are met.
-+ */
-+static bool idleprio_suitable(struct task_struct *p)
-+{
-+ return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) &&
-+ !signal_pending(p) && !rcu_read_critical(p) && !freezing(p));
-+}
-+
-+/*
-+ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
-+ * that the iso_refractory flag is not set.
-+ */
-+static inline bool isoprio_suitable(struct rq *rq)
-+{
-+ return !rq->iso_refractory;
-+}
-+
-+/*
-+ * Adding to the runqueue. Enter with rq locked.
-+ */
-+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
-+{
-+ unsigned int randseed, cflags = 0;
-+ u64 sl_id;
-+
-+ if (!rt_task(p)) {
-+ /* Check it hasn't gotten rt from PI */
-+ if ((idleprio_task(p) && idleprio_suitable(p)) ||
-+ (iso_task(p) && isoprio_suitable(rq)))
-+ p->prio = p->normal_prio;
-+ else
-+ p->prio = NORMAL_PRIO;
-+ }
-+ /*
-+ * The sl_id key passed to the skiplist generates a sorted list.
-+ * Realtime and sched iso tasks run FIFO so they only need be sorted
-+ * according to priority. The skiplist will put tasks of the same
-+ * key inserted later in FIFO order. Tasks of sched normal, batch
-+ * and idleprio are sorted according to their deadlines. Idleprio
-+ * tasks are offset by an impossibly large deadline value ensuring
-+ * they get sorted into last positions, but still according to their
-+ * own deadlines. This creates a "landscape" of skiplists running
-+ * from priority 0 realtime in first place to the lowest priority
-+ * idleprio tasks last. Skiplist insertion is an O(log n) process.
-+ */
-+ if (p->prio <= ISO_PRIO) {
-+ sl_id = p->prio;
-+ cflags = SCHED_CPUFREQ_RT;
-+ } else {
-+ sl_id = p->deadline;
-+ if (idleprio_task(p)) {
-+ if (p->prio == IDLE_PRIO)
-+ sl_id |= 0xF000000000000000;
-+ else
-+ sl_id += longest_deadline_diff();
-+ }
-+ }
-+ /*
-+ * Some architectures don't have better than microsecond resolution
-+ * so mask out ~microseconds as the random seed for skiplist insertion.
-+ */
-+ update_clocks(rq);
-+ if (!(flags & ENQUEUE_RESTORE))
-+ sched_info_queued(rq, p);
-+ randseed = (rq->niffies >> 10) & 0xFFFFFFFF;
-+ skiplist_insert(rq->sl, &p->node, sl_id, p, randseed);
-+ rq->best_key = rq->node.next[0]->key;
-+ if (p->in_iowait)
-+ cflags |= SCHED_CPUFREQ_IOWAIT;
-+ update_load_avg(rq, cflags);
-+}
-+
-+/*
-+ * Returns the relative length of deadline all compared to the shortest
-+ * deadline which is that of nice -20.
-+ */
-+static inline int task_prio_ratio(struct task_struct *p)
-+{
-+ return prio_ratios[TASK_USER_PRIO(p)];
-+}
-+
-+/*
-+ * task_timeslice - all tasks of all priorities get the exact same timeslice
-+ * length. CPU distribution is handled by giving different deadlines to
-+ * tasks of different priorities. Use 128 as the base value for fast shifts.
-+ */
-+static inline int task_timeslice(struct task_struct *p)
-+{
-+ return (rr_interval * task_prio_ratio(p) / 128);
-+}
-+
-+#ifdef CONFIG_SMP
-+/* Entered with rq locked */
-+static inline void resched_if_idle(struct rq *rq)
-+{
-+ if (rq_idle(rq))
-+ resched_task(rq->curr);
-+}
-+
-+static inline bool rq_local(struct rq *rq)
-+{
-+ return (rq->cpu == smp_processor_id());
-+}
-+#ifdef CONFIG_SMT_NICE
-+static const cpumask_t *thread_cpumask(int cpu);
-+
-+/* Find the best real time priority running on any SMT siblings of cpu and if
-+ * none are running, the static priority of the best deadline task running.
-+ * The lookups to the other runqueues is done lockless as the occasional wrong
-+ * value would be harmless. */
-+static int best_smt_bias(struct rq *this_rq)
-+{
-+ int other_cpu, best_bias = 0;
-+
-+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
-+ struct rq *rq = cpu_rq(other_cpu);
-+
-+ if (rq_idle(rq))
-+ continue;
-+ if (unlikely(!rq->online))
-+ continue;
-+ if (!rq->rq_mm)
-+ continue;
-+ if (likely(rq->rq_smt_bias > best_bias))
-+ best_bias = rq->rq_smt_bias;
-+ }
-+ return best_bias;
-+}
-+
-+static int task_prio_bias(struct task_struct *p)
-+{
-+ if (rt_task(p))
-+ return 1 << 30;
-+ else if (task_running_iso(p))
-+ return 1 << 29;
-+ else if (task_running_idle(p))
-+ return 0;
-+ return MAX_PRIO - p->static_prio;
-+}
-+
-+static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq)
-+{
-+ return true;
-+}
-+
-+static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule;
-+
-+/* We've already decided p can run on CPU, now test if it shouldn't for SMT
-+ * nice reasons. */
-+static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq)
-+{
-+ int best_bias, task_bias;
-+
-+ /* Kernel threads always run */
-+ if (unlikely(!p->mm))
-+ return true;
-+ if (rt_task(p))
-+ return true;
-+ if (!idleprio_suitable(p))
-+ return true;
-+ best_bias = best_smt_bias(this_rq);
-+ /* The smt siblings are all idle or running IDLEPRIO */
-+ if (best_bias < 1)
-+ return true;
-+ task_bias = task_prio_bias(p);
-+ if (task_bias < 1)
-+ return false;
-+ if (task_bias >= best_bias)
-+ return true;
-+ /* Dither 25% cpu of normal tasks regardless of nice difference */
-+ if (best_bias % 4 == 1)
-+ return true;
-+ /* Sorry, you lose */
-+ return false;
-+}
-+#else /* CONFIG_SMT_NICE */
-+#define smt_schedule(p, this_rq) (true)
-+#endif /* CONFIG_SMT_NICE */
-+
-+static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask)
-+{
-+ set_bit(cpu, (volatile unsigned long *)cpumask);
-+}
-+
-+/*
-+ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
-+ * allow easy lookup of whether any suitable idle CPUs are available.
-+ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
-+ * idle_cpus variable than to do a full bitmask check when we are busy. The
-+ * bits are set atomically but read locklessly as occasional false positive /
-+ * negative is harmless.
-+ */
-+static inline void set_cpuidle_map(int cpu)
-+{
-+ if (likely(cpu_online(cpu)))
-+ atomic_set_cpu(cpu, &cpu_idle_map);
-+}
-+
-+static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
-+{
-+ clear_bit(cpu, (volatile unsigned long *)cpumask);
-+}
-+
-+static inline void clear_cpuidle_map(int cpu)
-+{
-+ atomic_clear_cpu(cpu, &cpu_idle_map);
-+}
-+
-+static bool suitable_idle_cpus(struct task_struct *p)
-+{
-+ return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map));
-+}
-+
-+/*
-+ * Resched current on rq. We don't know if rq is local to this CPU nor if it
-+ * is locked so we do not use an intermediate variable for the task to avoid
-+ * having it dereferenced.
-+ */
-+static void resched_curr(struct rq *rq)
-+{
-+ int cpu;
-+
-+ if (test_tsk_need_resched(rq->curr))
-+ return;
-+
-+ rq->preempt = rq->curr;
-+ cpu = rq->cpu;
-+
-+ /* We're doing this without holding the rq lock if it's not task_rq */
-+
-+ if (cpu == smp_processor_id()) {
-+ set_tsk_need_resched(rq->curr);
-+ set_preempt_need_resched();
-+ return;
-+ }
-+
-+ if (set_nr_and_not_polling(rq->curr))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+#define CPUIDLE_DIFF_THREAD (1)
-+#define CPUIDLE_DIFF_CORE (2)
-+#define CPUIDLE_CACHE_BUSY (4)
-+#define CPUIDLE_DIFF_CPU (8)
-+#define CPUIDLE_THREAD_BUSY (16)
-+#define CPUIDLE_DIFF_NODE (32)
-+
-+/*
-+ * The best idle CPU is chosen according to the CPUIDLE ranking above where the
-+ * lowest value would give the most suitable CPU to schedule p onto next. The
-+ * order works out to be the following:
-+ *
-+ * Same thread, idle or busy cache, idle or busy threads
-+ * Other core, same cache, idle or busy cache, idle threads.
-+ * Same node, other CPU, idle cache, idle threads.
-+ * Same node, other CPU, busy cache, idle threads.
-+ * Other core, same cache, busy threads.
-+ * Same node, other CPU, busy threads.
-+ * Other node, other CPU, idle cache, idle threads.
-+ * Other node, other CPU, busy cache, idle threads.
-+ * Other node, other CPU, busy threads.
-+ */
-+static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
-+{
-+ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY |
-+ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE |
-+ CPUIDLE_DIFF_THREAD;
-+ int cpu_tmp;
-+
-+ if (cpumask_test_cpu(best_cpu, tmpmask))
-+ goto out;
-+
-+ for_each_cpu(cpu_tmp, tmpmask) {
-+ int ranking, locality;
-+ struct rq *tmp_rq;
-+
-+ ranking = 0;
-+ tmp_rq = cpu_rq(cpu_tmp);
-+
-+ locality = rq->cpu_locality[cpu_tmp];
-+#ifdef CONFIG_NUMA
-+ if (locality > 3)
-+ ranking |= CPUIDLE_DIFF_NODE;
-+ else
-+#endif
-+ if (locality > 2)
-+ ranking |= CPUIDLE_DIFF_CPU;
-+#ifdef CONFIG_SCHED_MC
-+ else if (locality == 2)
-+ ranking |= CPUIDLE_DIFF_CORE;
-+ else if (!(tmp_rq->cache_idle(tmp_rq)))
-+ ranking |= CPUIDLE_CACHE_BUSY;
-+#endif
-+#ifdef CONFIG_SCHED_SMT
-+ if (locality == 1)
-+ ranking |= CPUIDLE_DIFF_THREAD;
-+ if (!(tmp_rq->siblings_idle(tmp_rq)))
-+ ranking |= CPUIDLE_THREAD_BUSY;
-+#endif
-+ if (ranking < best_ranking) {
-+ best_cpu = cpu_tmp;
-+ best_ranking = ranking;
-+ }
-+ }
-+out:
-+ return best_cpu;
-+}
-+
-+bool cpus_share_cache(int this_cpu, int that_cpu)
-+{
-+ struct rq *this_rq = cpu_rq(this_cpu);
-+
-+ return (this_rq->cpu_locality[that_cpu] < 3);
-+}
-+
-+/* As per resched_curr but only will resched idle task */
-+static inline void resched_idle(struct rq *rq)
-+{
-+ if (test_tsk_need_resched(rq->idle))
-+ return;
-+
-+ rq->preempt = rq->idle;
-+
-+ set_tsk_need_resched(rq->idle);
-+
-+ if (rq_local(rq)) {
-+ set_preempt_need_resched();
-+ return;
-+ }
-+
-+ smp_sched_reschedule(rq->cpu);
-+}
-+
-+static struct rq *resched_best_idle(struct task_struct *p, int cpu)
-+{
-+ cpumask_t tmpmask;
-+ struct rq *rq;
-+ int best_cpu;
-+
-+ cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map);
-+ best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask);
-+ rq = cpu_rq(best_cpu);
-+ if (!smt_schedule(p, rq))
-+ return NULL;
-+ rq->preempt = p;
-+ resched_idle(rq);
-+ return rq;
-+}
-+
-+static inline void resched_suitable_idle(struct task_struct *p)
-+{
-+ if (suitable_idle_cpus(p))
-+ resched_best_idle(p, task_cpu(p));
-+}
-+
-+static inline struct rq *rq_order(struct rq *rq, int cpu)
-+{
-+ return rq->rq_order[cpu];
-+}
-+#else /* CONFIG_SMP */
-+static inline void set_cpuidle_map(int cpu)
-+{
-+}
-+
-+static inline void clear_cpuidle_map(int cpu)
-+{
-+}
-+
-+static inline bool suitable_idle_cpus(struct task_struct *p)
-+{
-+ return uprq->curr == uprq->idle;
-+}
-+
-+static inline void resched_suitable_idle(struct task_struct *p)
-+{
-+}
-+
-+static inline void resched_curr(struct rq *rq)
-+{
-+ resched_task(rq->curr);
-+}
-+
-+static inline void resched_if_idle(struct rq *rq)
-+{
-+}
-+
-+static inline bool rq_local(struct rq *rq)
-+{
-+ return true;
-+}
-+
-+static inline struct rq *rq_order(struct rq *rq, int cpu)
-+{
-+ return rq;
-+}
-+
-+static inline bool smt_schedule(struct task_struct *p, struct rq *rq)
-+{
-+ return true;
-+}
-+#endif /* CONFIG_SMP */
-+
-+static inline int normal_prio(struct task_struct *p)
-+{
-+ if (has_rt_policy(p))
-+ return MAX_RT_PRIO - 1 - p->rt_priority;
-+ if (idleprio_task(p))
-+ return IDLE_PRIO;
-+ if (iso_task(p))
-+ return ISO_PRIO;
-+ return NORMAL_PRIO;
-+}
-+
-+/*
-+ * Calculate the current priority, i.e. the priority
-+ * taken into account by the scheduler. This value might
-+ * be boosted by RT tasks as it will be RT if the task got
-+ * RT-boosted. If not then it returns p->normal_prio.
-+ */
-+static int effective_prio(struct task_struct *p)
-+{
-+ p->normal_prio = normal_prio(p);
-+ /*
-+ * If we are RT tasks or we were boosted to RT priority,
-+ * keep the priority unchanged. Otherwise, update priority
-+ * to the normal priority:
-+ */
-+ if (!rt_prio(p->prio))
-+ return p->normal_prio;
-+ return p->prio;
-+}
-+
-+/*
-+ * activate_task - move a task to the runqueue. Enter with rq locked.
-+ */
-+static void activate_task(struct task_struct *p, struct rq *rq)
-+{
-+ resched_if_idle(rq);
-+
-+ /*
-+ * Sleep time is in units of nanosecs, so shift by 20 to get a
-+ * milliseconds-range estimation of the amount of time that the task
-+ * spent sleeping:
-+ */
-+ if (unlikely(prof_on == SLEEP_PROFILING)) {
-+ if (p->state == TASK_UNINTERRUPTIBLE)
-+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
-+ (rq->niffies - p->last_ran) >> 20);
-+ }
-+
-+ p->prio = effective_prio(p);
-+ if (task_contributes_to_load(p))
-+ rq->nr_uninterruptible--;
-+
-+ enqueue_task(rq, p, 0);
-+ p->on_rq = TASK_ON_RQ_QUEUED;
-+}
-+
-+/*
-+ * deactivate_task - If it's running, it's not on the runqueue and we can just
-+ * decrement the nr_running. Enter with rq locked.
-+ */
-+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
-+{
-+ if (task_contributes_to_load(p))
-+ rq->nr_uninterruptible++;
-+
-+ p->on_rq = 0;
-+ sched_info_dequeued(rq, p);
-+}
-+
-+#ifdef CONFIG_SMP
-+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-+{
-+ struct rq *rq;
-+
-+ if (task_cpu(p) == new_cpu)
-+ return;
-+
-+ /* Do NOT call set_task_cpu on a currently queued task as we will not
-+ * be reliably holding the rq lock after changing CPU. */
-+ BUG_ON(task_queued(p));
-+ rq = task_rq(p);
-+
-+#ifdef CONFIG_LOCKDEP
-+ /*
-+ * The caller should hold either p->pi_lock or rq->lock, when changing
-+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-+ *
-+ * Furthermore, all task_rq users should acquire both locks, see
-+ * task_rq_lock().
-+ */
-+ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-+ lockdep_is_held(&rq->lock)));
-+#endif
-+
-+ trace_sched_migrate_task(p, new_cpu);
-+ perf_event_task_migrate(p);
-+
-+ /*
-+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-+ * successfully executed on another CPU. We must ensure that updates of
-+ * per-task data have been completed by this moment.
-+ */
-+ smp_wmb();
-+
-+ p->wake_cpu = new_cpu;
-+
-+ if (task_running(rq, p)) {
-+ /*
-+ * We should only be calling this on a running task if we're
-+ * holding rq lock.
-+ */
-+ lockdep_assert_held(&rq->lock);
-+
-+ /*
-+ * We can't change the task_thread_info CPU on a running task
-+ * as p will still be protected by the rq lock of the CPU it
-+ * is still running on so we only set the wake_cpu for it to be
-+ * lazily updated once off the CPU.
-+ */
-+ return;
-+ }
-+
-+#ifdef CONFIG_THREAD_INFO_IN_TASK
-+ p->cpu = new_cpu;
-+#else
-+ task_thread_info(p)->cpu = new_cpu;
-+#endif
-+ /* We're no longer protecting p after this point since we're holding
-+ * the wrong runqueue lock. */
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * Move a task off the runqueue and take it to a cpu for it will
-+ * become the running task.
-+ */
-+static inline void take_task(struct rq *rq, int cpu, struct task_struct *p)
-+{
-+ struct rq *p_rq = task_rq(p);
-+
-+ dequeue_task(p_rq, p, DEQUEUE_SAVE);
-+ if (p_rq != rq) {
-+ sched_info_dequeued(p_rq, p);
-+ sched_info_queued(rq, p);
-+ }
-+ set_task_cpu(p, cpu);
-+}
-+
-+/*
-+ * Returns a descheduling task to the runqueue unless it is being
-+ * deactivated.
-+ */
-+static inline void return_task(struct task_struct *p, struct rq *rq,
-+ int cpu, bool deactivate)
-+{
-+ if (deactivate)
-+ deactivate_task(p, rq);
-+ else {
-+#ifdef CONFIG_SMP
-+ /*
-+ * set_task_cpu was called on the running task that doesn't
-+ * want to deactivate so it has to be enqueued to a different
-+ * CPU and we need its lock. Tag it to be moved with as the
-+ * lock is dropped in finish_lock_switch.
-+ */
-+ if (unlikely(p->wake_cpu != cpu))
-+ p->on_rq = TASK_ON_RQ_MIGRATING;
-+ else
-+#endif
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ }
-+}
-+
-+/* Enter with rq lock held. We know p is on the local cpu */
-+static inline void __set_tsk_resched(struct task_struct *p)
-+{
-+ set_tsk_need_resched(p);
-+ set_preempt_need_resched();
-+}
-+
-+/**
-+ * task_curr - is this task currently executing on a CPU?
-+ * @p: the task in question.
-+ *
-+ * Return: 1 if the task is currently executing. 0 otherwise.
-+ */
-+inline int task_curr(const struct task_struct *p)
-+{
-+ return cpu_curr(task_cpu(p)) == p;
-+}
-+
-+#ifdef CONFIG_SMP
-+/*
-+ * wait_task_inactive - wait for a thread to unschedule.
-+ *
-+ * If @match_state is nonzero, it's the @p->state value just checked and
-+ * not expected to change. If it changes, i.e. @p might have woken up,
-+ * then return zero. When we succeed in waiting for @p to be off its CPU,
-+ * we return a positive number (its total switch count). If a second call
-+ * a short while later returns the same number, the caller can be sure that
-+ * @p has remained unscheduled the whole time.
-+ *
-+ * The caller must ensure that the task *will* unschedule sometime soon,
-+ * else this function might spin for a *long* time. This function can't
-+ * be called with interrupts off, or it may introduce deadlock with
-+ * smp_call_function() if an IPI is sent by the same process we are
-+ * waiting to become inactive.
-+ */
-+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
-+{
-+ int running, queued;
-+ unsigned long flags;
-+ unsigned long ncsw;
-+ struct rq *rq;
-+
-+ for (;;) {
-+ rq = task_rq(p);
-+
-+ /*
-+ * If the task is actively running on another CPU
-+ * still, just relax and busy-wait without holding
-+ * any locks.
-+ *
-+ * NOTE! Since we don't hold any locks, it's not
-+ * even sure that "rq" stays as the right runqueue!
-+ * But we don't care, since this will return false
-+ * if the runqueue has changed and p is actually now
-+ * running somewhere else!
-+ */
-+ while (task_running(rq, p)) {
-+ if (match_state && unlikely(p->state != match_state))
-+ return 0;
-+ cpu_relax();
-+ }
-+
-+ /*
-+ * Ok, time to look more closely! We need the rq
-+ * lock now, to be *sure*. If we're wrong, we'll
-+ * just go back and repeat.
-+ */
-+ rq = task_rq_lock(p, &flags);
-+ trace_sched_wait_task(p);
-+ running = task_running(rq, p);
-+ queued = task_on_rq_queued(p);
-+ ncsw = 0;
-+ if (!match_state || p->state == match_state)
-+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-+ task_rq_unlock(rq, p, &flags);
-+
-+ /*
-+ * If it changed from the expected state, bail out now.
-+ */
-+ if (unlikely(!ncsw))
-+ break;
-+
-+ /*
-+ * Was it really running after all now that we
-+ * checked with the proper locks actually held?
-+ *
-+ * Oops. Go back and try again..
-+ */
-+ if (unlikely(running)) {
-+ cpu_relax();
-+ continue;
-+ }
-+
-+ /*
-+ * It's not enough that it's not actively running,
-+ * it must be off the runqueue _entirely_, and not
-+ * preempted!
-+ *
-+ * So if it was still runnable (but just not actively
-+ * running right now), it's preempted, and we should
-+ * yield - it could be a while.
-+ */
-+ if (unlikely(queued)) {
-+ ktime_t to = NSEC_PER_SEC / HZ;
-+
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
-+ continue;
-+ }
-+
-+ /*
-+ * Ahh, all good. It wasn't running, and it wasn't
-+ * runnable, which means that it will never become
-+ * running in the future either. We're all done!
-+ */
-+ break;
-+ }
-+
-+ return ncsw;
-+}
-+
-+/***
-+ * kick_process - kick a running thread to enter/exit the kernel
-+ * @p: the to-be-kicked thread
-+ *
-+ * Cause a process which is running on another CPU to enter
-+ * kernel-mode, without any delay. (to get signals handled.)
-+ *
-+ * NOTE: this function doesn't have to take the runqueue lock,
-+ * because all it wants to ensure is that the remote task enters
-+ * the kernel. If the IPI races and the task has been migrated
-+ * to another CPU then no harm is done and the purpose has been
-+ * achieved as well.
-+ */
-+void kick_process(struct task_struct *p)
-+{
-+ int cpu;
-+
-+ preempt_disable();
-+ cpu = task_cpu(p);
-+ if ((cpu != smp_processor_id()) && task_curr(p))
-+ smp_sched_reschedule(cpu);
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(kick_process);
-+#endif
-+
-+/*
-+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
-+ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
-+ * between themselves, they cooperatively multitask. An idle rq scores as
-+ * prio PRIO_LIMIT so it is always preempted.
-+ */
-+static inline bool
-+can_preempt(struct task_struct *p, int prio, u64 deadline)
-+{
-+ /* Better static priority RT task or better policy preemption */
-+ if (p->prio < prio)
-+ return true;
-+ if (p->prio > prio)
-+ return false;
-+ if (p->policy == SCHED_BATCH)
-+ return false;
-+ /* SCHED_NORMAL and ISO will preempt based on deadline */
-+ if (!deadline_before(p->deadline, deadline))
-+ return false;
-+ return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+/*
-+ * Check to see if p can run on cpu, and if not, whether there are any online
-+ * CPUs it can run on instead.
-+ */
-+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
-+{
-+ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed)))
-+ return true;
-+ return false;
-+}
-+#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
-+
-+static void try_preempt(struct task_struct *p, struct rq *this_rq)
-+{
-+ int i, this_entries = rq_load(this_rq);
-+ cpumask_t tmp;
-+
-+ if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p)))
-+ return;
-+
-+ /* IDLEPRIO tasks never preempt anything but idle */
-+ if (p->policy == SCHED_IDLEPRIO)
-+ return;
-+
-+ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed);
-+
-+ for (i = 0; i < num_possible_cpus(); i++) {
-+ struct rq *rq = this_rq->rq_order[i];
-+
-+ if (!cpumask_test_cpu(rq->cpu, &tmp))
-+ continue;
-+
-+ if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries)
-+ continue;
-+ if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) {
-+ /* We set rq->preempting lockless, it's a hint only */
-+ rq->preempting = p;
-+ resched_curr(rq);
-+ return;
-+ }
-+ }
-+}
-+
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+ const struct cpumask *new_mask, bool check);
-+#else /* CONFIG_SMP */
-+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
-+{
-+ return false;
-+}
-+
-+static void try_preempt(struct task_struct *p, struct rq *this_rq)
-+{
-+ if (p->policy == SCHED_IDLEPRIO)
-+ return;
-+ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
-+ resched_curr(uprq);
-+}
-+
-+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
-+ const struct cpumask *new_mask, bool check)
-+{
-+ return set_cpus_allowed_ptr(p, new_mask);
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * wake flags
-+ */
-+#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
-+#define WF_FORK 0x02 /* child wakeup after fork */
-+#define WF_MIGRATED 0x04 /* internal use, task got migrated */
-+
-+static void
-+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-+{
-+ struct rq *rq;
-+
-+ if (!schedstat_enabled())
-+ return;
-+
-+ rq = this_rq();
-+
-+#ifdef CONFIG_SMP
-+ if (cpu == rq->cpu)
-+ schedstat_inc(rq->ttwu_local);
-+ else {
-+ struct sched_domain *sd;
-+
-+ rcu_read_lock();
-+ for_each_domain(rq->cpu, sd) {
-+ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-+ schedstat_inc(sd->ttwu_wake_remote);
-+ break;
-+ }
-+ }
-+ rcu_read_unlock();
-+ }
-+
-+#endif /* CONFIG_SMP */
-+
-+ schedstat_inc(rq->ttwu_count);
-+}
-+
-+static inline void ttwu_activate(struct rq *rq, struct task_struct *p)
-+{
-+ activate_task(p, rq);
-+
-+ /* if a worker is waking up, notify the workqueue */
-+ if (p->flags & PF_WQ_WORKER)
-+ wq_worker_waking_up(p, cpu_of(rq));
-+}
-+
-+/*
-+ * Mark the task runnable and perform wakeup-preemption.
-+ */
-+static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+ /*
-+ * Sync wakeups (i.e. those types of wakeups where the waker
-+ * has indicated that it will leave the CPU in short order)
-+ * don't trigger a preemption if there are no idle cpus,
-+ * instead waiting for current to deschedule.
-+ */
-+ if (wake_flags & WF_SYNC)
-+ resched_suitable_idle(p);
-+ else
-+ try_preempt(p, rq);
-+ p->state = TASK_RUNNING;
-+ trace_sched_wakeup(p);
-+}
-+
-+static void
-+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+ lockdep_assert_held(&rq->lock);
-+
-+#ifdef CONFIG_SMP
-+ if (p->sched_contributes_to_load)
-+ rq->nr_uninterruptible--;
-+#endif
-+
-+ ttwu_activate(rq, p);
-+ ttwu_do_wakeup(rq, p, wake_flags);
-+}
-+
-+/*
-+ * Called in case the task @p isn't fully descheduled from its runqueue,
-+ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
-+ * since all we need to do is flip p->state to TASK_RUNNING, since
-+ * the task is still ->on_rq.
-+ */
-+static int ttwu_remote(struct task_struct *p, int wake_flags)
-+{
-+ struct rq *rq;
-+ int ret = 0;
-+
-+ rq = __task_rq_lock(p);
-+ if (likely(task_on_rq_queued(p))) {
-+ ttwu_do_wakeup(rq, p, wake_flags);
-+ ret = 1;
-+ }
-+ __task_rq_unlock(rq);
-+
-+ return ret;
-+}
-+
-+#ifdef CONFIG_SMP
-+void sched_ttwu_pending(void)
-+{
-+ struct rq *rq = this_rq();
-+ struct llist_node *llist = llist_del_all(&rq->wake_list);
-+ struct task_struct *p, *t;
-+ unsigned long flags;
-+
-+ if (!llist)
-+ return;
-+
-+ rq_lock_irqsave(rq, &flags);
-+
-+ llist_for_each_entry_safe(p, t, llist, wake_entry)
-+ ttwu_do_activate(rq, p, 0);
-+
-+ rq_unlock_irqrestore(rq, &flags);
-+}
-+
-+void scheduler_ipi(void)
-+{
-+ /*
-+ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
-+ * TIF_NEED_RESCHED remotely (for the first time) will also send
-+ * this IPI.
-+ */
-+ preempt_fold_need_resched();
-+
-+ if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched()))
-+ return;
-+
-+ /*
-+ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
-+ * traditionally all their work was done from the interrupt return
-+ * path. Now that we actually do some work, we need to make sure
-+ * we do call them.
-+ *
-+ * Some archs already do call them, luckily irq_enter/exit nest
-+ * properly.
-+ *
-+ * Arguably we should visit all archs and update all handlers,
-+ * however a fair share of IPIs are still resched only so this would
-+ * somewhat pessimize the simple resched case.
-+ */
-+ irq_enter();
-+ sched_ttwu_pending();
-+ irq_exit();
-+}
-+
-+static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
-+ if (!set_nr_if_polling(rq->idle))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+ }
-+}
-+
-+void wake_up_if_idle(int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ rcu_read_lock();
-+
-+ if (!is_idle_task(rcu_dereference(rq->curr)))
-+ goto out;
-+
-+ if (set_nr_if_polling(rq->idle)) {
-+ trace_sched_wake_idle_without_ipi(cpu);
-+ } else {
-+ rq_lock_irqsave(rq, &flags);
-+ if (likely(is_idle_task(rq->curr)))
-+ smp_sched_reschedule(cpu);
-+ /* Else cpu is not in idle, do nothing here */
-+ rq_unlock_irqrestore(rq, &flags);
-+ }
-+
-+out:
-+ rcu_read_unlock();
-+}
-+
-+static int valid_task_cpu(struct task_struct *p)
-+{
-+ cpumask_t valid_mask;
-+
-+ if (p->flags & PF_KTHREAD)
-+ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_online_mask);
-+ else
-+ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_active_mask);
-+
-+ if (unlikely(!cpumask_weight(&valid_mask))) {
-+ /* Hotplug boot threads do this before the CPU is up */
-+ printk(KERN_INFO "SCHED: No cpumask for %s/%d weight %d\n", p->comm, p->pid, cpumask_weight(&p->cpus_allowed));
-+ return cpumask_any(&p->cpus_allowed);
-+ }
-+ return cpumask_any(&valid_mask);
-+}
-+
-+/*
-+ * For a task that's just being woken up we have a valuable balancing
-+ * opportunity so choose the nearest cache most lightly loaded runqueue.
-+ * Entered with rq locked and returns with the chosen runqueue locked.
-+ */
-+static inline int select_best_cpu(struct task_struct *p)
-+{
-+ unsigned int idlest = ~0U;
-+ struct rq *rq = NULL;
-+ int i;
-+
-+ if (suitable_idle_cpus(p)) {
-+ int cpu = task_cpu(p);
-+
-+ if (unlikely(needs_other_cpu(p, cpu)))
-+ cpu = valid_task_cpu(p);
-+ rq = resched_best_idle(p, cpu);
-+ if (likely(rq))
-+ return rq->cpu;
-+ }
-+
-+ for (i = 0; i < num_possible_cpus(); i++) {
-+ struct rq *other_rq = task_rq(p)->rq_order[i];
-+ int entries;
-+
-+ if (!other_rq->online)
-+ continue;
-+ if (needs_other_cpu(p, other_rq->cpu))
-+ continue;
-+ entries = rq_load(other_rq);
-+ if (entries >= idlest)
-+ continue;
-+ idlest = entries;
-+ rq = other_rq;
-+ }
-+ if (unlikely(!rq))
-+ return task_cpu(p);
-+ return rq->cpu;
-+}
-+#else /* CONFIG_SMP */
-+static int valid_task_cpu(struct task_struct *p)
-+{
-+ return 0;
-+}
-+
-+static inline int select_best_cpu(struct task_struct *p)
-+{
-+ return 0;
-+}
-+
-+static struct rq *resched_best_idle(struct task_struct *p, int cpu)
-+{
-+ return NULL;
-+}
-+#endif /* CONFIG_SMP */
-+
-+static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+
-+#if defined(CONFIG_SMP)
-+ if (!cpus_share_cache(smp_processor_id(), cpu)) {
-+ sched_clock_cpu(cpu); /* Sync clocks across CPUs */
-+ ttwu_queue_remote(p, cpu, wake_flags);
-+ return;
-+ }
-+#endif
-+ rq_lock(rq);
-+ ttwu_do_activate(rq, p, wake_flags);
-+ rq_unlock(rq);
-+}
-+
-+/***
-+ * try_to_wake_up - wake up a thread
-+ * @p: the thread to be awakened
-+ * @state: the mask of task states that can be woken
-+ * @wake_flags: wake modifier flags (WF_*)
-+ *
-+ * Put it on the run-queue if it's not already there. The "current"
-+ * thread is always on the run-queue (except when the actual
-+ * re-schedule is in progress), and as such you're allowed to do
-+ * the simpler "current->state = TASK_RUNNING" to mark yourself
-+ * runnable without the overhead of this.
-+ *
-+ * Return: %true if @p was woken up, %false if it was already running.
-+ * or @state didn't match @p's state.
-+ */
-+static int
-+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
-+{
-+ unsigned long flags;
-+ int cpu, success = 0;
-+
-+ /*
-+ * If we are going to wake up a thread waiting for CONDITION we
-+ * need to ensure that CONDITION=1 done by the caller can not be
-+ * reordered with p->state check below. This pairs with mb() in
-+ * set_current_state() the waiting thread does.
-+ */
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ smp_mb__after_spinlock();
-+ /* state is a volatile long, どうして、分からない */
-+ if (!((unsigned int)p->state & state))
-+ goto out;
-+
-+ trace_sched_waking(p);
-+
-+ /* We're going to change ->state: */
-+ success = 1;
-+ cpu = task_cpu(p);
-+
-+ /*
-+ * Ensure we load p->on_rq _after_ p->state, otherwise it would
-+ * be possible to, falsely, observe p->on_rq == 0 and get stuck
-+ * in smp_cond_load_acquire() below.
-+ *
-+ * sched_ttwu_pending() try_to_wake_up()
-+ * [S] p->on_rq = 1; [L] P->state
-+ * UNLOCK rq->lock -----.
-+ * \
-+ * +--- RMB
-+ * schedule() /
-+ * LOCK rq->lock -----'
-+ * UNLOCK rq->lock
-+ *
-+ * [task p]
-+ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
-+ *
-+ * Pairs with the UNLOCK+LOCK on rq->lock from the
-+ * last wakeup of our task and the schedule that got our task
-+ * current.
-+ */
-+ smp_rmb();
-+ if (p->on_rq && ttwu_remote(p, wake_flags))
-+ goto stat;
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
-+ * possible to, falsely, observe p->on_cpu == 0.
-+ *
-+ * One must be running (->on_cpu == 1) in order to remove oneself
-+ * from the runqueue.
-+ *
-+ * [S] ->on_cpu = 1; [L] ->on_rq
-+ * UNLOCK rq->lock
-+ * RMB
-+ * LOCK rq->lock
-+ * [S] ->on_rq = 0; [L] ->on_cpu
-+ *
-+ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
-+ * from the consecutive calls to schedule(); the first switching to our
-+ * task, the second putting it to sleep.
-+ */
-+ smp_rmb();
-+
-+ /*
-+ * If the owning (remote) CPU is still in the middle of schedule() with
-+ * this task as prev, wait until its done referencing the task.
-+ *
-+ * Pairs with the smp_store_release() in finish_lock_switch().
-+ *
-+ * This ensures that tasks getting woken will be fully ordered against
-+ * their previous state and preserve Program Order.
-+ */
-+ smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+ p->sched_contributes_to_load = !!task_contributes_to_load(p);
-+ p->state = TASK_WAKING;
-+
-+ if (p->in_iowait) {
-+ delayacct_blkio_end();
-+ atomic_dec(&task_rq(p)->nr_iowait);
-+ }
-+
-+ cpu = select_best_cpu(p);
-+ if (task_cpu(p) != cpu)
-+ set_task_cpu(p, cpu);
-+
-+#else /* CONFIG_SMP */
-+
-+ if (p->in_iowait) {
-+ delayacct_blkio_end();
-+ atomic_dec(&task_rq(p)->nr_iowait);
-+ }
-+
-+#endif /* CONFIG_SMP */
-+
-+ ttwu_queue(p, cpu, wake_flags);
-+stat:
-+ ttwu_stat(p, cpu, wake_flags);
-+out:
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+ return success;
-+}
-+
-+/**
-+ * try_to_wake_up_local - try to wake up a local task with rq lock held
-+ * @p: the thread to be awakened
-+ *
-+ * Put @p on the run-queue if it's not already there. The caller must
-+ * ensure that rq is locked and, @p is not the current task.
-+ * rq stays locked over invocation.
-+ */
-+static void try_to_wake_up_local(struct task_struct *p)
-+{
-+ struct rq *rq = task_rq(p);
-+
-+ if (WARN_ON_ONCE(rq != this_rq()) ||
-+ WARN_ON_ONCE(p == current))
-+ return;
-+
-+ lockdep_assert_held(&rq->lock);
-+
-+ if (!raw_spin_trylock(&p->pi_lock)) {
-+ /*
-+ * This is OK, because current is on_cpu, which avoids it being
-+ * picked for load-balance and preemption/IRQs are still
-+ * disabled avoiding further scheduler activity on it and we've
-+ * not yet picked a replacement task.
-+ */
-+ rq_unlock(rq);
-+ raw_spin_lock(&p->pi_lock);
-+ rq_lock(rq);
-+ }
-+
-+ if (!(p->state & TASK_NORMAL))
-+ goto out;
-+
-+ trace_sched_waking(p);
-+
-+ if (!task_on_rq_queued(p)) {
-+ if (p->in_iowait) {
-+ delayacct_blkio_end();
-+ atomic_dec(&rq->nr_iowait);
-+ }
-+ ttwu_activate(rq, p);
-+ }
-+
-+ ttwu_do_wakeup(rq, p, 0);
-+ ttwu_stat(p, smp_processor_id(), 0);
-+out:
-+ raw_spin_unlock(&p->pi_lock);
-+}
-+
-+/**
-+ * wake_up_process - Wake up a specific process
-+ * @p: The process to be woken up.
-+ *
-+ * Attempt to wake up the nominated process and move it to the set of runnable
-+ * processes.
-+ *
-+ * Return: 1 if the process was woken up, 0 if it was already running.
-+ *
-+ * It may be assumed that this function implies a write memory barrier before
-+ * changing the task state if and only if any tasks are woken up.
-+ */
-+int wake_up_process(struct task_struct *p)
-+{
-+ return try_to_wake_up(p, TASK_NORMAL, 0);
-+}
-+EXPORT_SYMBOL(wake_up_process);
-+
-+int wake_up_state(struct task_struct *p, unsigned int state)
-+{
-+ return try_to_wake_up(p, state, 0);
-+}
-+
-+static void time_slice_expired(struct task_struct *p, struct rq *rq);
-+
-+/*
-+ * Perform scheduler related setup for a newly forked process p.
-+ * p is forked by current.
-+ */
-+int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
-+{
-+ unsigned long flags;
-+ int cpu = get_cpu();
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+ INIT_HLIST_HEAD(&p->preempt_notifiers);
-+#endif
-+ /*
-+ * We mark the process as NEW here. This guarantees that
-+ * nobody will actually run it, and a signal or other external
-+ * event cannot wake it up and insert it on the runqueue either.
-+ */
-+ p->state = TASK_NEW;
-+
-+ /*
-+ * The process state is set to the same value of the process executing
-+ * do_fork() code. That is running. This guarantees that nobody will
-+ * actually run it, and a signal or other external event cannot wake
-+ * it up and insert it on the runqueue either.
-+ */
-+
-+ /* Should be reset in fork.c but done here for ease of MuQSS patching */
-+ p->on_cpu =
-+ p->on_rq =
-+ p->utime =
-+ p->stime =
-+ p->sched_time =
-+ p->stime_ns =
-+ p->utime_ns = 0;
-+ skiplist_node_init(&p->node);
-+
-+ /*
-+ * Revert to default priority/policy on fork if requested.
-+ */
-+ if (unlikely(p->sched_reset_on_fork)) {
-+ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
-+ p->policy = SCHED_NORMAL;
-+ p->normal_prio = normal_prio(p);
-+ }
-+
-+ if (PRIO_TO_NICE(p->static_prio) < 0) {
-+ p->static_prio = NICE_TO_PRIO(0);
-+ p->normal_prio = p->static_prio;
-+ }
-+
-+ /*
-+ * We don't need the reset flag anymore after the fork. It has
-+ * fulfilled its duty:
-+ */
-+ p->sched_reset_on_fork = 0;
-+ }
-+
-+ /*
-+ * Silence PROVE_RCU.
-+ */
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ set_task_cpu(p, cpu);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+#ifdef CONFIG_SCHED_INFO
-+ if (unlikely(sched_info_on()))
-+ memset(&p->sched_info, 0, sizeof(p->sched_info));
-+#endif
-+ init_task_preempt_count(p);
-+
-+ put_cpu();
-+ return 0;
-+}
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
-+static bool __initdata __sched_schedstats = false;
-+
-+static void set_schedstats(bool enabled)
-+{
-+ if (enabled)
-+ static_branch_enable(&sched_schedstats);
-+ else
-+ static_branch_disable(&sched_schedstats);
-+}
-+
-+void force_schedstat_enabled(void)
-+{
-+ if (!schedstat_enabled()) {
-+ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
-+ static_branch_enable(&sched_schedstats);
-+ }
-+}
-+
-+static int __init setup_schedstats(char *str)
-+{
-+ int ret = 0;
-+ if (!str)
-+ goto out;
-+
-+ /*
-+ * This code is called before jump labels have been set up, so we can't
-+ * change the static branch directly just yet. Instead set a temporary
-+ * variable so init_schedstats() can do it later.
-+ */
-+ if (!strcmp(str, "enable")) {
-+ __sched_schedstats = true;
-+ ret = 1;
-+ } else if (!strcmp(str, "disable")) {
-+ __sched_schedstats = false;
-+ ret = 1;
-+ }
-+out:
-+ if (!ret)
-+ pr_warn("Unable to parse schedstats=\n");
-+
-+ return ret;
-+}
-+__setup("schedstats=", setup_schedstats);
-+
-+static void __init init_schedstats(void)
-+{
-+ set_schedstats(__sched_schedstats);
-+}
-+
-+#ifdef CONFIG_PROC_SYSCTL
-+int sysctl_schedstats(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ struct ctl_table t;
-+ int err;
-+ int state = static_branch_likely(&sched_schedstats);
-+
-+ if (write && !capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-+
-+ t = *table;
-+ t.data = &state;
-+ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-+ if (err < 0)
-+ return err;
-+ if (write)
-+ set_schedstats(state);
-+ return err;
-+}
-+#endif /* CONFIG_PROC_SYSCTL */
-+#else /* !CONFIG_SCHEDSTATS */
-+static inline void init_schedstats(void) {}
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
-+
-+static void account_task_cpu(struct rq *rq, struct task_struct *p)
-+{
-+ update_clocks(rq);
-+ /* This isn't really a context switch but accounting is the same */
-+ update_cpu_clock_switch(rq, p);
-+ p->last_ran = rq->niffies;
-+}
-+
-+bool sched_smp_initialized __read_mostly;
-+
-+static inline int hrexpiry_enabled(struct rq *rq)
-+{
-+ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
-+ return 0;
-+ return hrtimer_is_hres_active(&rq->hrexpiry_timer);
-+}
-+
-+/*
-+ * Use HR-timers to deliver accurate preemption points.
-+ */
-+static inline void hrexpiry_clear(struct rq *rq)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return;
-+ if (hrtimer_active(&rq->hrexpiry_timer))
-+ hrtimer_cancel(&rq->hrexpiry_timer);
-+}
-+
-+/*
-+ * High-resolution time_slice expiry.
-+ * Runs from hardirq context with interrupts disabled.
-+ */
-+static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
-+{
-+ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
-+ struct task_struct *p;
-+
-+ /* This can happen during CPU hotplug / resume */
-+ if (unlikely(cpu_of(rq) != smp_processor_id()))
-+ goto out;
-+
-+ /*
-+ * We're doing this without the runqueue lock but this should always
-+ * be run on the local CPU. Time slice should run out in __schedule
-+ * but we set it to zero here in case niffies is slightly less.
-+ */
-+ p = rq->curr;
-+ p->time_slice = 0;
-+ __set_tsk_resched(p);
-+out:
-+ return HRTIMER_NORESTART;
-+}
-+
-+/*
-+ * Called to set the hrexpiry timer state.
-+ *
-+ * called with irqs disabled from the local CPU only
-+ */
-+static void hrexpiry_start(struct rq *rq, u64 delay)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return;
-+
-+ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
-+ HRTIMER_MODE_REL_PINNED);
-+}
-+
-+static void init_rq_hrexpiry(struct rq *rq)
-+{
-+ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ rq->hrexpiry_timer.function = hrexpiry;
-+}
-+
-+static inline int rq_dither(struct rq *rq)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return HALF_JIFFY_US;
-+ return 0;
-+}
-+
-+/*
-+ * wake_up_new_task - wake up a newly created task for the first time.
-+ *
-+ * This function will do some initial scheduler statistics housekeeping
-+ * that must be done for every newly created context, then puts the task
-+ * on the runqueue and wakes it.
-+ */
-+void wake_up_new_task(struct task_struct *p)
-+{
-+ struct task_struct *parent, *rq_curr;
-+ struct rq *rq, *new_rq;
-+ unsigned long flags;
-+
-+ parent = p->parent;
-+
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ p->state = TASK_RUNNING;
-+ /* Task_rq can't change yet on a new task */
-+ new_rq = rq = task_rq(p);
-+ if (unlikely(needs_other_cpu(p, task_cpu(p)))) {
-+ set_task_cpu(p, valid_task_cpu(p));
-+ new_rq = task_rq(p);
-+ }
-+
-+ double_rq_lock(rq, new_rq);
-+ rq_curr = rq->curr;
-+
-+ /*
-+ * Make sure we do not leak PI boosting priority to the child.
-+ */
-+ p->prio = rq_curr->normal_prio;
-+
-+ trace_sched_wakeup_new(p);
-+
-+ /*
-+ * Share the timeslice between parent and child, thus the
-+ * total amount of pending timeslices in the system doesn't change,
-+ * resulting in more scheduling fairness. If it's negative, it won't
-+ * matter since that's the same as being 0. rq->rq_deadline is only
-+ * modified within schedule() so it is always equal to
-+ * current->deadline.
-+ */
-+ account_task_cpu(rq, rq_curr);
-+ p->last_ran = rq_curr->last_ran;
-+ if (likely(rq_curr->policy != SCHED_FIFO)) {
-+ rq_curr->time_slice /= 2;
-+ if (rq_curr->time_slice < RESCHED_US) {
-+ /*
-+ * Forking task has run out of timeslice. Reschedule it and
-+ * start its child with a new time slice and deadline. The
-+ * child will end up running first because its deadline will
-+ * be slightly earlier.
-+ */
-+ __set_tsk_resched(rq_curr);
-+ time_slice_expired(p, new_rq);
-+ if (suitable_idle_cpus(p))
-+ resched_best_idle(p, task_cpu(p));
-+ else if (unlikely(rq != new_rq))
-+ try_preempt(p, new_rq);
-+ } else {
-+ p->time_slice = rq_curr->time_slice;
-+ if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) {
-+ /*
-+ * The VM isn't cloned, so we're in a good position to
-+ * do child-runs-first in anticipation of an exec. This
-+ * usually avoids a lot of COW overhead.
-+ */
-+ __set_tsk_resched(rq_curr);
-+ } else {
-+ /*
-+ * Adjust the hrexpiry since rq_curr will keep
-+ * running and its timeslice has been shortened.
-+ */
-+ hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice));
-+ try_preempt(p, new_rq);
-+ }
-+ }
-+ } else {
-+ time_slice_expired(p, new_rq);
-+ try_preempt(p, new_rq);
-+ }
-+ activate_task(p, new_rq);
-+ double_rq_unlock(rq, new_rq);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+
-+static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
-+
-+void preempt_notifier_inc(void)
-+{
-+ static_key_slow_inc(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
-+
-+void preempt_notifier_dec(void)
-+{
-+ static_key_slow_dec(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
-+
-+/**
-+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
-+ * @notifier: notifier struct to register
-+ */
-+void preempt_notifier_register(struct preempt_notifier *notifier)
-+{
-+ if (!static_key_false(&preempt_notifier_key))
-+ WARN(1, "registering preempt_notifier while notifiers disabled\n");
-+
-+ hlist_add_head(&notifier->link, &current->preempt_notifiers);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_register);
-+
-+/**
-+ * preempt_notifier_unregister - no longer interested in preemption notifications
-+ * @notifier: notifier struct to unregister
-+ *
-+ * This is *not* safe to call from within a preemption notifier.
-+ */
-+void preempt_notifier_unregister(struct preempt_notifier *notifier)
-+{
-+ hlist_del(&notifier->link);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
-+
-+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+ struct preempt_notifier *notifier;
-+
-+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+ notifier->ops->sched_in(notifier, raw_smp_processor_id());
-+}
-+
-+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+ if (static_key_false(&preempt_notifier_key))
-+ __fire_sched_in_preempt_notifiers(curr);
-+}
-+
-+static void
-+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+ struct task_struct *next)
-+{
-+ struct preempt_notifier *notifier;
-+
-+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+ notifier->ops->sched_out(notifier, next);
-+}
-+
-+static __always_inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+ struct task_struct *next)
-+{
-+ if (static_key_false(&preempt_notifier_key))
-+ __fire_sched_out_preempt_notifiers(curr, next);
-+}
-+
-+#else /* !CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+}
-+
-+static inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+ struct task_struct *next)
-+{
-+}
-+
-+#endif /* CONFIG_PREEMPT_NOTIFIERS */
-+
-+/**
-+ * prepare_task_switch - prepare to switch tasks
-+ * @rq: the runqueue preparing to switch
-+ * @next: the task we are going to switch to.
-+ *
-+ * This is called with the rq lock held and interrupts off. It must
-+ * be paired with a subsequent finish_task_switch after the context
-+ * switch.
-+ *
-+ * prepare_task_switch sets up locking and calls architecture specific
-+ * hooks.
-+ */
-+static inline void
-+prepare_task_switch(struct rq *rq, struct task_struct *prev,
-+ struct task_struct *next)
-+{
-+ sched_info_switch(rq, prev, next);
-+ perf_event_task_sched_out(prev, next);
-+ fire_sched_out_preempt_notifiers(prev, next);
-+ prepare_lock_switch(rq, next);
-+ prepare_arch_switch(next);
-+}
-+
-+/**
-+ * finish_task_switch - clean up after a task-switch
-+ * @rq: runqueue associated with task-switch
-+ * @prev: the thread we just switched away from.
-+ *
-+ * finish_task_switch must be called after the context switch, paired
-+ * with a prepare_task_switch call before the context switch.
-+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
-+ * and do any other architecture-specific cleanup actions.
-+ *
-+ * Note that we may have delayed dropping an mm in context_switch(). If
-+ * so, we finish that here outside of the runqueue lock. (Doing it
-+ * with the lock held can cause deadlocks; see schedule() for
-+ * details.)
-+ *
-+ * The context switch have flipped the stack from under us and restored the
-+ * local variables which were saved when this task called schedule() in the
-+ * past. prev == current is still correct but we need to recalculate this_rq
-+ * because prev may have moved to another CPU.
-+ */
-+static void finish_task_switch(struct task_struct *prev)
-+ __releases(rq->lock)
-+{
-+ struct rq *rq = this_rq();
-+ struct mm_struct *mm = rq->prev_mm;
-+ long prev_state;
-+
-+ /*
-+ * The previous task will have left us with a preempt_count of 2
-+ * because it left us after:
-+ *
-+ * schedule()
-+ * preempt_disable(); // 1
-+ * __schedule()
-+ * raw_spin_lock_irq(&rq->lock) // 2
-+ *
-+ * Also, see FORK_PREEMPT_COUNT.
-+ */
-+ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
-+ "corrupted preempt_count: %s/%d/0x%x\n",
-+ current->comm, current->pid, preempt_count()))
-+ preempt_count_set(FORK_PREEMPT_COUNT);
-+
-+ rq->prev_mm = NULL;
-+
-+ /*
-+ * A task struct has one reference for the use as "current".
-+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
-+ * schedule one last time. The schedule call will never return, and
-+ * the scheduled task must drop that reference.
-+ *
-+ * We must observe prev->state before clearing prev->on_cpu (in
-+ * finish_lock_switch), otherwise a concurrent wakeup can get prev
-+ * running on another CPU and we could rave with its RUNNING -> DEAD
-+ * transition, resulting in a double drop.
-+ */
-+ prev_state = prev->state;
-+ vtime_task_switch(prev);
-+ perf_event_task_sched_in(prev, current);
-+ /*
-+ * The membarrier system call requires a full memory barrier
-+ * after storing to rq->curr, before going back to user-space.
-+ *
-+ * TODO: This smp_mb__after_unlock_lock can go away if PPC end
-+ * up adding a full barrier to switch_mm(), or we should figure
-+ * out if a smp_mb__after_unlock_lock is really the proper API
-+ * to use.
-+ */
-+ smp_mb__after_unlock_lock();
-+ finish_lock_switch(rq, prev);
-+ finish_arch_post_lock_switch();
-+
-+ fire_sched_in_preempt_notifiers(current);
-+ if (mm)
-+ mmdrop(mm);
-+ if (unlikely(prev_state == TASK_DEAD)) {
-+ /*
-+ * Remove function-return probe instances associated with this
-+ * task and put them back on the free list.
-+ */
-+ kprobe_flush_task(prev);
-+
-+ /* Task is done with its stack. */
-+ put_task_stack(prev);
-+
-+ put_task_struct(prev);
-+ }
-+}
-+
-+/**
-+ * schedule_tail - first thing a freshly forked thread must call.
-+ * @prev: the thread we just switched away from.
-+ */
-+asmlinkage __visible void schedule_tail(struct task_struct *prev)
-+{
-+ /*
-+ * New tasks start with FORK_PREEMPT_COUNT, see there and
-+ * finish_task_switch() for details.
-+ *
-+ * finish_task_switch() will drop rq->lock() and lower preempt_count
-+ * and the preempt_enable() will end up enabling preemption (on
-+ * PREEMPT_COUNT kernels).
-+ */
-+
-+ finish_task_switch(prev);
-+ preempt_enable();
-+
-+ if (current->set_child_tid)
-+ put_user(task_pid_vnr(current), current->set_child_tid);
-+}
-+
-+/*
-+ * context_switch - switch to the new MM and the new thread's register state.
-+ */
-+static __always_inline void
-+context_switch(struct rq *rq, struct task_struct *prev,
-+ struct task_struct *next)
-+{
-+ struct mm_struct *mm, *oldmm;
-+
-+ prepare_task_switch(rq, prev, next);
-+
-+ mm = next->mm;
-+ oldmm = prev->active_mm;
-+ /*
-+ * For paravirt, this is coupled with an exit in switch_to to
-+ * combine the page table reload and the switch backend into
-+ * one hypercall.
-+ */
-+ arch_start_context_switch(prev);
-+
-+ if (!mm) {
-+ next->active_mm = oldmm;
-+ mmgrab(oldmm);
-+ enter_lazy_tlb(oldmm, next);
-+ } else
-+ switch_mm_irqs_off(oldmm, mm, next);
-+
-+ if (!prev->mm) {
-+ prev->active_mm = NULL;
-+ rq->prev_mm = oldmm;
-+ }
-+ /*
-+ * Since the runqueue lock will be released by the next
-+ * task (which is an invalid locking op but in the case
-+ * of the scheduler it's an obvious special-case), so we
-+ * do an early lockdep release here:
-+ */
-+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-+
-+ /* Here we just switch the register state and the stack. */
-+ switch_to(prev, next, prev);
-+ barrier();
-+
-+ finish_task_switch(prev);
-+}
-+
-+/*
-+ * nr_running, nr_uninterruptible and nr_context_switches:
-+ *
-+ * externally visible scheduler statistics: current number of runnable
-+ * threads, total number of context switches performed since bootup.
-+ */
-+unsigned long nr_running(void)
-+{
-+ unsigned long i, sum = 0;
-+
-+ for_each_online_cpu(i)
-+ sum += cpu_rq(i)->nr_running;
-+
-+ return sum;
-+}
-+
-+static unsigned long nr_uninterruptible(void)
-+{
-+ unsigned long i, sum = 0;
-+
-+ for_each_online_cpu(i)
-+ sum += cpu_rq(i)->nr_uninterruptible;
-+
-+ return sum;
-+}
-+
-+/*
-+ * Check if only the current task is running on the CPU.
-+ *
-+ * Caution: this function does not check that the caller has disabled
-+ * preemption, thus the result might have a time-of-check-to-time-of-use
-+ * race. The caller is responsible to use it correctly, for example:
-+ *
-+ * - from a non-preemptable section (of course)
-+ *
-+ * - from a thread that is bound to a single CPU
-+ *
-+ * - in a loop with very short iterations (e.g. a polling loop)
-+ */
-+bool single_task_running(void)
-+{
-+ struct rq *rq = cpu_rq(smp_processor_id());
-+
-+ if (rq_load(rq) == 1)
-+ return true;
-+ else
-+ return false;
-+}
-+EXPORT_SYMBOL(single_task_running);
-+
-+unsigned long long nr_context_switches(void)
-+{
-+ int i;
-+ unsigned long long sum = 0;
-+
-+ for_each_possible_cpu(i)
-+ sum += cpu_rq(i)->nr_switches;
-+
-+ return sum;
-+}
-+
-+/*
-+ * IO-wait accounting, and how its mostly bollocks (on SMP).
-+ *
-+ * The idea behind IO-wait account is to account the idle time that we could
-+ * have spend running if it were not for IO. That is, if we were to improve the
-+ * storage performance, we'd have a proportional reduction in IO-wait time.
-+ *
-+ * This all works nicely on UP, where, when a task blocks on IO, we account
-+ * idle time as IO-wait, because if the storage were faster, it could've been
-+ * running and we'd not be idle.
-+ *
-+ * This has been extended to SMP, by doing the same for each CPU. This however
-+ * is broken.
-+ *
-+ * Imagine for instance the case where two tasks block on one CPU, only the one
-+ * CPU will have IO-wait accounted, while the other has regular idle. Even
-+ * though, if the storage were faster, both could've ran at the same time,
-+ * utilising both CPUs.
-+ *
-+ * This means, that when looking globally, the current IO-wait accounting on
-+ * SMP is a lower bound, by reason of under accounting.
-+ *
-+ * Worse, since the numbers are provided per CPU, they are sometimes
-+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
-+ * associated with any one particular CPU, it can wake to another CPU than it
-+ * blocked on. This means the per CPU IO-wait number is meaningless.
-+ *
-+ * Task CPU affinities can make all that even more 'interesting'.
-+ */
-+
-+unsigned long nr_iowait(void)
-+{
-+ unsigned long i, sum = 0;
-+
-+ for_each_possible_cpu(i)
-+ sum += atomic_read(&cpu_rq(i)->nr_iowait);
-+
-+ return sum;
-+}
-+
-+/*
-+ * Consumers of these two interfaces, like for example the cpufreq menu
-+ * governor are using nonsensical data. Boosting frequency for a CPU that has
-+ * IO-wait which might not even end up running the task when it does become
-+ * runnable.
-+ */
-+
-+unsigned long nr_iowait_cpu(int cpu)
-+{
-+ struct rq *this = cpu_rq(cpu);
-+ return atomic_read(&this->nr_iowait);
-+}
-+
-+unsigned long nr_active(void)
-+{
-+ return nr_running() + nr_uninterruptible();
-+}
-+
-+/*
-+ * I/O wait is the number of running or queued tasks with their ->rq pointer
-+ * set to this cpu as being the CPU they're more likely to run on.
-+ */
-+void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
-+{
-+ struct rq *rq = this_rq();
-+
-+ *nr_waiters = atomic_read(&rq->nr_iowait);
-+ *load = rq_load(rq);
-+}
-+
-+/* Variables and functions for calc_load */
-+static unsigned long calc_load_update;
-+unsigned long avenrun[3];
-+EXPORT_SYMBOL(avenrun);
-+
-+/**
-+ * get_avenrun - get the load average array
-+ * @loads: pointer to dest load array
-+ * @offset: offset to add
-+ * @shift: shift count to shift the result left
-+ *
-+ * These values are estimates at best, so no need for locking.
-+ */
-+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-+{
-+ loads[0] = (avenrun[0] + offset) << shift;
-+ loads[1] = (avenrun[1] + offset) << shift;
-+ loads[2] = (avenrun[2] + offset) << shift;
-+}
-+
-+static unsigned long
-+calc_load(unsigned long load, unsigned long exp, unsigned long active)
-+{
-+ unsigned long newload;
-+
-+ newload = load * exp + active * (FIXED_1 - exp);
-+ if (active >= load)
-+ newload += FIXED_1-1;
-+
-+ return newload / FIXED_1;
-+}
-+
-+/*
-+ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
-+ */
-+void calc_global_load(unsigned long ticks)
-+{
-+ long active;
-+
-+ if (time_before(jiffies, READ_ONCE(calc_load_update)))
-+ return;
-+ active = nr_active() * FIXED_1;
-+
-+ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
-+ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
-+ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
-+
-+ calc_load_update = jiffies + LOAD_FREQ;
-+}
-+
-+DEFINE_PER_CPU(struct kernel_stat, kstat);
-+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-+
-+EXPORT_PER_CPU_SYMBOL(kstat);
-+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-+
-+#ifdef CONFIG_PARAVIRT
-+static inline u64 steal_ticks(u64 steal)
-+{
-+ if (unlikely(steal > NSEC_PER_SEC))
-+ return div_u64(steal, TICK_NSEC);
-+
-+ return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
-+}
-+#endif
-+
-+#ifndef nsecs_to_cputime
-+# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
-+#endif
-+
-+/*
-+ * On each tick, add the number of nanoseconds to the unbanked variables and
-+ * once one tick's worth has accumulated, account it allowing for accurate
-+ * sub-tick accounting and totals.
-+ */
-+static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns)
-+{
-+ u64 *cpustat = kcpustat_this_cpu->cpustat;
-+ unsigned long ticks;
-+
-+ if (atomic_read(&rq->nr_iowait) > 0) {
-+ rq->iowait_ns += ns;
-+ if (rq->iowait_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->iowait_ns);
-+ cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_NSEC * ticks;
-+ rq->iowait_ns %= JIFFY_NS;
-+ }
-+ } else {
-+ rq->idle_ns += ns;
-+ if (rq->idle_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->idle_ns);
-+ cpustat[CPUTIME_IDLE] += (__force u64)TICK_NSEC * ticks;
-+ rq->idle_ns %= JIFFY_NS;
-+ }
-+ }
-+ acct_update_integrals(idle);
-+}
-+
-+static void pc_system_time(struct rq *rq, struct task_struct *p,
-+ int hardirq_offset, unsigned long ns)
-+{
-+ u64 *cpustat = kcpustat_this_cpu->cpustat;
-+ unsigned long ticks;
-+
-+ p->stime_ns += ns;
-+ if (p->stime_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(p->stime_ns);
-+ p->stime_ns %= JIFFY_NS;
-+ p->stime += (__force u64)TICK_NSEC * ticks;
-+ account_group_system_time(p, TICK_NSEC * ticks);
-+ }
-+ p->sched_time += ns;
-+ account_group_exec_runtime(p, ns);
-+
-+ if (hardirq_count() - hardirq_offset) {
-+ rq->irq_ns += ns;
-+ if (rq->irq_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->irq_ns);
-+ cpustat[CPUTIME_IRQ] += (__force u64)TICK_NSEC * ticks;
-+ rq->irq_ns %= JIFFY_NS;
-+ }
-+ } else if (in_serving_softirq()) {
-+ rq->softirq_ns += ns;
-+ if (rq->softirq_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->softirq_ns);
-+ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
-+ rq->softirq_ns %= JIFFY_NS;
-+ }
-+ } else {
-+ rq->system_ns += ns;
-+ if (rq->system_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->system_ns);
-+ cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_NSEC * ticks;
-+ rq->system_ns %= JIFFY_NS;
-+ }
-+ }
-+ acct_update_integrals(p);
-+}
-+
-+static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
-+{
-+ u64 *cpustat = kcpustat_this_cpu->cpustat;
-+ unsigned long ticks;
-+
-+ p->utime_ns += ns;
-+ if (p->utime_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(p->utime_ns);
-+ p->utime_ns %= JIFFY_NS;
-+ p->utime += (__force u64)TICK_NSEC * ticks;
-+ account_group_user_time(p, TICK_NSEC * ticks);
-+ }
-+ p->sched_time += ns;
-+ account_group_exec_runtime(p, ns);
-+
-+ if (this_cpu_ksoftirqd() == p) {
-+ /*
-+ * ksoftirqd time do not get accounted in cpu_softirq_time.
-+ * So, we have to handle it separately here.
-+ */
-+ rq->softirq_ns += ns;
-+ if (rq->softirq_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->softirq_ns);
-+ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
-+ rq->softirq_ns %= JIFFY_NS;
-+ }
-+ }
-+
-+ if (task_nice(p) > 0 || idleprio_task(p)) {
-+ rq->nice_ns += ns;
-+ if (rq->nice_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->nice_ns);
-+ cpustat[CPUTIME_NICE] += (__force u64)TICK_NSEC * ticks;
-+ rq->nice_ns %= JIFFY_NS;
-+ }
-+ } else {
-+ rq->user_ns += ns;
-+ if (rq->user_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->user_ns);
-+ cpustat[CPUTIME_USER] += (__force u64)TICK_NSEC * ticks;
-+ rq->user_ns %= JIFFY_NS;
-+ }
-+ }
-+ acct_update_integrals(p);
-+}
-+
-+/*
-+ * This is called on clock ticks.
-+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
-+ * CPU scheduler quota accounting is also performed here in microseconds.
-+ */
-+static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
-+{
-+ s64 account_ns = rq->niffies - p->last_ran;
-+ struct task_struct *idle = rq->idle;
-+
-+ /* Accurate tick timekeeping */
-+ if (user_mode(get_irq_regs()))
-+ pc_user_time(rq, p, account_ns);
-+ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) {
-+ pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns);
-+ } else
-+ pc_idle_time(rq, idle, account_ns);
-+
-+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
-+ if (p->policy != SCHED_FIFO && p != idle)
-+ p->time_slice -= NS_TO_US(account_ns);
-+
-+ p->last_ran = rq->niffies;
-+}
-+
-+/*
-+ * This is called on context switches.
-+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
-+ * CPU scheduler quota accounting is also performed here in microseconds.
-+ */
-+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
-+{
-+ s64 account_ns = rq->niffies - p->last_ran;
-+ struct task_struct *idle = rq->idle;
-+
-+ /* Accurate subtick timekeeping */
-+ if (p != idle)
-+ pc_user_time(rq, p, account_ns);
-+ else
-+ pc_idle_time(rq, idle, account_ns);
-+
-+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
-+ if (p->policy != SCHED_FIFO && p != idle)
-+ p->time_slice -= NS_TO_US(account_ns);
-+}
-+
-+/*
-+ * Return any ns on the sched_clock that have not yet been accounted in
-+ * @p in case that task is currently running.
-+ *
-+ * Called with task_rq_lock(p) held.
-+ */
-+static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
-+{
-+ u64 ns = 0;
-+
-+ /*
-+ * Must be ->curr _and_ ->on_rq. If dequeued, we would
-+ * project cycles that may never be accounted to this
-+ * thread, breaking clock_gettime().
-+ */
-+ if (p == rq->curr && task_on_rq_queued(p)) {
-+ update_clocks(rq);
-+ ns = rq->niffies - p->last_ran;
-+ }
-+
-+ return ns;
-+}
-+
-+/*
-+ * Return accounted runtime for the task.
-+ * Return separately the current's pending runtime that have not been
-+ * accounted yet.
-+ *
-+ */
-+unsigned long long task_sched_runtime(struct task_struct *p)
-+{
-+ unsigned long flags;
-+ struct rq *rq;
-+ u64 ns;
-+
-+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
-+ /*
-+ * 64-bit doesn't need locks to atomically read a 64bit value.
-+ * So we have a optimization chance when the task's delta_exec is 0.
-+ * Reading ->on_cpu is racy, but this is ok.
-+ *
-+ * If we race with it leaving CPU, we'll take a lock. So we're correct.
-+ * If we race with it entering CPU, unaccounted time is 0. This is
-+ * indistinguishable from the read occurring a few cycles earlier.
-+ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
-+ * been accounted, so we're correct here as well.
-+ */
-+ if (!p->on_cpu || !task_on_rq_queued(p))
-+ return tsk_seruntime(p);
-+#endif
-+
-+ rq = task_rq_lock(p, &flags);
-+ ns = p->sched_time + do_task_delta_exec(p, rq);
-+ task_rq_unlock(rq, p, &flags);
-+
-+ return ns;
-+}
-+
-+/*
-+ * Functions to test for when SCHED_ISO tasks have used their allocated
-+ * quota as real time scheduling and convert them back to SCHED_NORMAL. All
-+ * data is modified only by the local runqueue during scheduler_tick with
-+ * interrupts disabled.
-+ */
-+
-+/*
-+ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
-+ * tasks and set the refractory flag if necessary. There is 10% hysteresis
-+ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
-+ * slow division.
-+ */
-+static inline void iso_tick(struct rq *rq)
-+{
-+ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
-+ rq->iso_ticks += 100;
-+ if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) {
-+ rq->iso_refractory = true;
-+ if (unlikely(rq->iso_ticks > ISO_PERIOD * 100))
-+ rq->iso_ticks = ISO_PERIOD * 100;
-+ }
-+}
-+
-+/* No SCHED_ISO task was running so decrease rq->iso_ticks */
-+static inline void no_iso_tick(struct rq *rq, int ticks)
-+{
-+ if (rq->iso_ticks > 0 || rq->iso_refractory) {
-+ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD;
-+ if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) {
-+ rq->iso_refractory = false;
-+ if (unlikely(rq->iso_ticks < 0))
-+ rq->iso_ticks = 0;
-+ }
-+ }
-+}
-+
-+/* This manages tasks that have run out of timeslice during a scheduler_tick */
-+static void task_running_tick(struct rq *rq)
-+{
-+ struct task_struct *p = rq->curr;
-+
-+ /*
-+ * If a SCHED_ISO task is running we increment the iso_ticks. In
-+ * order to prevent SCHED_ISO tasks from causing starvation in the
-+ * presence of true RT tasks we account those as iso_ticks as well.
-+ */
-+ if (rt_task(p) || task_running_iso(p))
-+ iso_tick(rq);
-+ else
-+ no_iso_tick(rq, 1);
-+
-+ /* SCHED_FIFO tasks never run out of timeslice. */
-+ if (p->policy == SCHED_FIFO)
-+ return;
-+
-+ if (iso_task(p)) {
-+ if (task_running_iso(p)) {
-+ if (rq->iso_refractory) {
-+ /*
-+ * SCHED_ISO task is running as RT and limit
-+ * has been hit. Force it to reschedule as
-+ * SCHED_NORMAL by zeroing its time_slice
-+ */
-+ p->time_slice = 0;
-+ }
-+ } else if (!rq->iso_refractory) {
-+ /* Can now run again ISO. Reschedule to pick up prio */
-+ goto out_resched;
-+ }
-+ }
-+
-+ /*
-+ * Tasks that were scheduled in the first half of a tick are not
-+ * allowed to run into the 2nd half of the next tick if they will
-+ * run out of time slice in the interim. Otherwise, if they have
-+ * less than RESCHED_US μs of time slice left they will be rescheduled.
-+ * Dither is used as a backup for when hrexpiry is disabled or high res
-+ * timers not configured in.
-+ */
-+ if (p->time_slice - rq->dither >= RESCHED_US)
-+ return;
-+out_resched:
-+ rq_lock(rq);
-+ __set_tsk_resched(p);
-+ rq_unlock(rq);
-+}
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+/*
-+ * We can stop the timer tick any time highres timers are active since
-+ * we rely entirely on highres timeouts for task expiry rescheduling.
-+ */
-+static void sched_stop_tick(struct rq *rq, int cpu)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return;
-+ if (!tick_nohz_full_enabled())
-+ return;
-+ if (!tick_nohz_full_cpu(cpu))
-+ return;
-+ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+
-+static inline void sched_start_tick(struct rq *rq, int cpu)
-+{
-+ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+
-+/**
-+ * scheduler_tick_max_deferment
-+ *
-+ * Keep at least one tick per second when a single
-+ * active task is running.
-+ *
-+ * This makes sure that uptime continues to move forward, even
-+ * with a very low granularity.
-+ *
-+ * Return: Maximum deferment in nanoseconds.
-+ */
-+u64 scheduler_tick_max_deferment(void)
-+{
-+ struct rq *rq = this_rq();
-+ unsigned long next, now = READ_ONCE(jiffies);
-+
-+ next = rq->last_jiffy + HZ;
-+
-+ if (time_before_eq(next, now))
-+ return 0;
-+
-+ return jiffies_to_nsecs(next - now);
-+}
-+#else
-+static inline void sched_stop_tick(struct rq *rq, int cpu)
-+{
-+}
-+
-+static inline void sched_start_tick(struct rq *rq, int cpu)
-+{
-+}
-+#endif
-+
-+/*
-+ * This function gets called by the timer code, with HZ frequency.
-+ * We call it with interrupts disabled.
-+ */
-+void scheduler_tick(void)
-+{
-+ int cpu __maybe_unused = smp_processor_id();
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ sched_clock_tick();
-+ update_clocks(rq);
-+ update_load_avg(rq, 0);
-+ update_cpu_clock_tick(rq, rq->curr);
-+ if (!rq_idle(rq))
-+ task_running_tick(rq);
-+ else if (rq->last_jiffy > rq->last_scheduler_tick)
-+ no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick);
-+ rq->last_scheduler_tick = rq->last_jiffy;
-+ rq->last_tick = rq->clock;
-+ perf_event_task_tick();
-+ sched_stop_tick(rq, cpu);
-+}
-+
-+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
-+ defined(CONFIG_PREEMPT_TRACER))
-+/*
-+ * If the value passed in is equal to the current preempt count
-+ * then we just disabled preemption. Start timing the latency.
-+ */
-+static inline void preempt_latency_start(int val)
-+{
-+ if (preempt_count() == val) {
-+ unsigned long ip = get_lock_parent_ip();
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ current->preempt_disable_ip = ip;
-+#endif
-+ trace_preempt_off(CALLER_ADDR0, ip);
-+ }
-+}
-+
-+void preempt_count_add(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ /*
-+ * Underflow?
-+ */
-+ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
-+ return;
-+#endif
-+ __preempt_count_add(val);
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ /*
-+ * Spinlock count overflowing soon?
-+ */
-+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
-+ PREEMPT_MASK - 10);
-+#endif
-+ preempt_latency_start(val);
-+}
-+EXPORT_SYMBOL(preempt_count_add);
-+NOKPROBE_SYMBOL(preempt_count_add);
-+
-+/*
-+ * If the value passed in equals to the current preempt count
-+ * then we just enabled preemption. Stop timing the latency.
-+ */
-+static inline void preempt_latency_stop(int val)
-+{
-+ if (preempt_count() == val)
-+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
-+}
-+
-+void preempt_count_sub(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ /*
-+ * Underflow?
-+ */
-+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
-+ return;
-+ /*
-+ * Is the spinlock portion underflowing?
-+ */
-+ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
-+ !(preempt_count() & PREEMPT_MASK)))
-+ return;
-+#endif
-+
-+ preempt_latency_stop(val);
-+ __preempt_count_sub(val);
-+}
-+EXPORT_SYMBOL(preempt_count_sub);
-+NOKPROBE_SYMBOL(preempt_count_sub);
-+
-+#else
-+static inline void preempt_latency_start(int val) { }
-+static inline void preempt_latency_stop(int val) { }
-+#endif
-+
-+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ return p->preempt_disable_ip;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/*
-+ * The time_slice is only refilled when it is empty and that is when we set a
-+ * new deadline. Make sure update_clocks has been called recently to update
-+ * rq->niffies.
-+ */
-+static void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+ p->time_slice = timeslice();
-+ p->deadline = rq->niffies + task_deadline_diff(p);
-+#ifdef CONFIG_SMT_NICE
-+ if (!p->mm)
-+ p->smt_bias = 0;
-+ else if (rt_task(p))
-+ p->smt_bias = 1 << 30;
-+ else if (task_running_iso(p))
-+ p->smt_bias = 1 << 29;
-+ else if (idleprio_task(p)) {
-+ if (task_running_idle(p))
-+ p->smt_bias = 0;
-+ else
-+ p->smt_bias = 1;
-+ } else if (--p->smt_bias < 1)
-+ p->smt_bias = MAX_PRIO - p->static_prio;
-+#endif
-+}
-+
-+/*
-+ * Timeslices below RESCHED_US are considered as good as expired as there's no
-+ * point rescheduling when there's so little time left. SCHED_BATCH tasks
-+ * have been flagged be not latency sensitive and likely to be fully CPU
-+ * bound so every time they're rescheduled they have their time_slice
-+ * refilled, but get a new later deadline to have little effect on
-+ * SCHED_NORMAL tasks.
-+
-+ */
-+static inline void check_deadline(struct task_struct *p, struct rq *rq)
-+{
-+ if (p->time_slice < RESCHED_US || batch_task(p))
-+ time_slice_expired(p, rq);
-+}
-+
-+/*
-+ * Task selection with skiplists is a simple matter of picking off the first
-+ * task in the sorted list, an O(1) operation. The lookup is amortised O(1)
-+ * being bound to the number of processors.
-+ *
-+ * Runqueues are selectively locked based on their unlocked data and then
-+ * unlocked if not needed. At most 3 locks will be held at any time and are
-+ * released as soon as they're no longer needed. All balancing between CPUs
-+ * is thus done here in an extremely simple first come best fit manner.
-+ *
-+ * This iterates over runqueues in cache locality order. In interactive mode
-+ * it iterates over all CPUs and finds the task with the best key/deadline.
-+ * In non-interactive mode it will only take a task if it's from the current
-+ * runqueue or a runqueue with more tasks than the current one with a better
-+ * key/deadline.
-+ */
-+#ifdef CONFIG_SMP
-+static inline struct task_struct
-+*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
-+{
-+ struct rq *locked = NULL, *chosen = NULL;
-+ struct task_struct *edt = idle;
-+ int i, best_entries = 0;
-+ u64 best_key = ~0ULL;
-+
-+ for (i = 0; i < num_possible_cpus(); i++) {
-+ struct rq *other_rq = rq_order(rq, i);
-+ int entries = other_rq->sl->entries;
-+ skiplist_node *next;
-+
-+ /*
-+ * Check for queued entres lockless first. The local runqueue
-+ * is locked so entries will always be accurate.
-+ */
-+ if (!sched_interactive) {
-+ /*
-+ * Don't reschedule balance across nodes unless the CPU
-+ * is idle.
-+ */
-+ if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3)
-+ break;
-+ if (entries <= best_entries)
-+ continue;
-+ } else if (!entries)
-+ continue;
-+
-+ /* if (i) implies other_rq != rq */
-+ if (i) {
-+ /* Check for best id queued lockless first */
-+ if (other_rq->best_key >= best_key)
-+ continue;
-+
-+ if (unlikely(!trylock_rq(rq, other_rq)))
-+ continue;
-+
-+ /* Need to reevaluate entries after locking */
-+ entries = other_rq->sl->entries;
-+ if (unlikely(!entries)) {
-+ unlock_rq(other_rq);
-+ continue;
-+ }
-+ }
-+
-+ next = &other_rq->node;
-+ /*
-+ * In interactive mode we check beyond the best entry on other
-+ * runqueues if we can't get the best for smt or affinity
-+ * reasons.
-+ */
-+ while ((next = next->next[0]) != &other_rq->node) {
-+ struct task_struct *p;
-+ u64 key = next->key;
-+
-+ /* Reevaluate key after locking */
-+ if (key >= best_key)
-+ break;
-+
-+ p = next->value;
-+ if (!smt_schedule(p, rq)) {
-+ if (i && !sched_interactive)
-+ break;
-+ continue;
-+ }
-+
-+ /* Make sure affinity is ok */
-+ if (i) {
-+ if (needs_other_cpu(p, cpu)) {
-+ if (sched_interactive)
-+ continue;
-+ break;
-+ }
-+ /* From this point on p is the best so far */
-+ if (locked)
-+ unlock_rq(locked);
-+ chosen = locked = other_rq;
-+ }
-+ best_entries = entries;
-+ best_key = key;
-+ edt = p;
-+ break;
-+ }
-+ /* rq->preempting is a hint only as the state may have changed
-+ * since it was set with the resched call but if we have met
-+ * the condition we can break out here. */
-+ if (edt == rq->preempting)
-+ break;
-+ if (i && other_rq != chosen)
-+ unlock_rq(other_rq);
-+ }
-+
-+ if (likely(edt != idle))
-+ take_task(rq, cpu, edt);
-+
-+ if (locked)
-+ unlock_rq(locked);
-+
-+ rq->preempting = NULL;
-+
-+ return edt;
-+}
-+#else /* CONFIG_SMP */
-+static inline struct task_struct
-+*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
-+{
-+ struct task_struct *edt;
-+
-+ if (unlikely(!rq->sl->entries))
-+ return idle;
-+ edt = rq->node.next[0]->value;
-+ take_task(rq, cpu, edt);
-+ return edt;
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * Print scheduling while atomic bug:
-+ */
-+static noinline void __schedule_bug(struct task_struct *prev)
-+{
-+ /* Save this before calling printk(), since that will clobber it */
-+ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+ if (oops_in_progress)
-+ return;
-+
-+ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
-+ prev->comm, prev->pid, preempt_count());
-+
-+ debug_show_held_locks(prev);
-+ print_modules();
-+ if (irqs_disabled())
-+ print_irqtrace_events(prev);
-+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
-+ && in_atomic_preempt_off()) {
-+ pr_err("Preemption disabled at:");
-+ print_ip_sym(preempt_disable_ip);
-+ pr_cont("\n");
-+ }
-+ dump_stack();
-+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+
-+/*
-+ * Various schedule()-time debugging checks and statistics:
-+ */
-+static inline void schedule_debug(struct task_struct *prev)
-+{
-+#ifdef CONFIG_SCHED_STACK_END_CHECK
-+ if (task_stack_end_corrupted(prev))
-+ panic("corrupted stack end detected inside scheduler\n");
-+#endif
-+
-+ if (unlikely(in_atomic_preempt_off())) {
-+ __schedule_bug(prev);
-+ preempt_count_set(PREEMPT_DISABLED);
-+ }
-+ rcu_sleep_check();
-+
-+ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
-+
-+ schedstat_inc(this_rq()->sched_count);
-+}
-+
-+/*
-+ * The currently running task's information is all stored in rq local data
-+ * which is only modified by the local CPU.
-+ */
-+static inline void set_rq_task(struct rq *rq, struct task_struct *p)
-+{
-+ if (p == rq->idle || p->policy == SCHED_FIFO)
-+ hrexpiry_clear(rq);
-+ else
-+ hrexpiry_start(rq, US_TO_NS(p->time_slice));
-+ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
-+ rq->dither = 0;
-+ else
-+ rq->dither = rq_dither(rq);
-+
-+ rq->rq_deadline = p->deadline;
-+ rq->rq_prio = p->prio;
-+#ifdef CONFIG_SMT_NICE
-+ rq->rq_mm = p->mm;
-+ rq->rq_smt_bias = p->smt_bias;
-+#endif
-+}
-+
-+#ifdef CONFIG_SMT_NICE
-+static void check_no_siblings(struct rq __maybe_unused *this_rq) {}
-+static void wake_no_siblings(struct rq __maybe_unused *this_rq) {}
-+static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings;
-+static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings;
-+
-+/* Iterate over smt siblings when we've scheduled a process on cpu and decide
-+ * whether they should continue running or be descheduled. */
-+static void check_smt_siblings(struct rq *this_rq)
-+{
-+ int other_cpu;
-+
-+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
-+ struct task_struct *p;
-+ struct rq *rq;
-+
-+ rq = cpu_rq(other_cpu);
-+ if (rq_idle(rq))
-+ continue;
-+ p = rq->curr;
-+ if (!smt_schedule(p, this_rq))
-+ resched_curr(rq);
-+ }
-+}
-+
-+static void wake_smt_siblings(struct rq *this_rq)
-+{
-+ int other_cpu;
-+
-+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
-+ struct rq *rq;
-+
-+ rq = cpu_rq(other_cpu);
-+ if (rq_idle(rq))
-+ resched_idle(rq);
-+ }
-+}
-+#else
-+static void check_siblings(struct rq __maybe_unused *this_rq) {}
-+static void wake_siblings(struct rq __maybe_unused *this_rq) {}
-+#endif
-+
-+/*
-+ * schedule() is the main scheduler function.
-+ *
-+ * The main means of driving the scheduler and thus entering this function are:
-+ *
-+ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
-+ *
-+ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
-+ * paths. For example, see arch/x86/entry_64.S.
-+ *
-+ * To drive preemption between tasks, the scheduler sets the flag in timer
-+ * interrupt handler scheduler_tick().
-+ *
-+ * 3. Wakeups don't really cause entry into schedule(). They add a
-+ * task to the run-queue and that's it.
-+ *
-+ * Now, if the new task added to the run-queue preempts the current
-+ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
-+ * called on the nearest possible occasion:
-+ *
-+ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
-+ *
-+ * - in syscall or exception context, at the next outmost
-+ * preempt_enable(). (this might be as soon as the wake_up()'s
-+ * spin_unlock()!)
-+ *
-+ * - in IRQ context, return from interrupt-handler to
-+ * preemptible context
-+ *
-+ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
-+ * then at the next:
-+ *
-+ * - cond_resched() call
-+ * - explicit schedule() call
-+ * - return from syscall or exception to user-space
-+ * - return from interrupt-handler to user-space
-+ *
-+ * WARNING: must be called with preemption disabled!
-+ */
-+static void __sched notrace __schedule(bool preempt)
-+{
-+ struct task_struct *prev, *next, *idle;
-+ unsigned long *switch_count;
-+ bool deactivate = false;
-+ struct rq *rq;
-+ u64 niffies;
-+ int cpu;
-+
-+ cpu = smp_processor_id();
-+ rq = cpu_rq(cpu);
-+ prev = rq->curr;
-+ idle = rq->idle;
-+
-+ schedule_debug(prev);
-+
-+ local_irq_disable();
-+ rcu_note_context_switch(preempt);
-+
-+ /*
-+ * Make sure that signal_pending_state()->signal_pending() below
-+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
-+ * done by the caller to avoid the race with signal_wake_up().
-+ */
-+ rq_lock(rq);
-+ smp_mb__after_spinlock();
-+#ifdef CONFIG_SMP
-+ if (rq->preempt) {
-+ /*
-+ * Make sure resched_curr hasn't triggered a preemption
-+ * locklessly on a task that has since scheduled away. Spurious
-+ * wakeup of idle is okay though.
-+ */
-+ if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) {
-+ rq->preempt = NULL;
-+ clear_preempt_need_resched();
-+ rq_unlock_irq(rq);
-+ return;
-+ }
-+ rq->preempt = NULL;
-+ }
-+#endif
-+
-+ switch_count = &prev->nivcsw;
-+ if (!preempt && prev->state) {
-+ if (unlikely(signal_pending_state(prev->state, prev))) {
-+ prev->state = TASK_RUNNING;
-+ } else {
-+ deactivate = true;
-+ prev->on_rq = 0;
-+
-+ if (prev->in_iowait) {
-+ atomic_inc(&rq->nr_iowait);
-+ delayacct_blkio_start();
-+ }
-+
-+ /*
-+ * If a worker is going to sleep, notify and
-+ * ask workqueue whether it wants to wake up a
-+ * task to maintain concurrency. If so, wake
-+ * up the task.
-+ */
-+ if (prev->flags & PF_WQ_WORKER) {
-+ struct task_struct *to_wakeup;
-+
-+ to_wakeup = wq_worker_sleeping(prev);
-+ if (to_wakeup)
-+ try_to_wake_up_local(to_wakeup);
-+ }
-+ }
-+ switch_count = &prev->nvcsw;
-+ }
-+
-+ /*
-+ * Store the niffy value here for use by the next task's last_ran
-+ * below to avoid losing niffies due to update_clocks being called
-+ * again after this point.
-+ */
-+ update_clocks(rq);
-+ niffies = rq->niffies;
-+ update_cpu_clock_switch(rq, prev);
-+
-+ clear_tsk_need_resched(prev);
-+ clear_preempt_need_resched();
-+
-+ if (idle != prev) {
-+ check_deadline(prev, rq);
-+ return_task(prev, rq, cpu, deactivate);
-+ }
-+
-+ next = earliest_deadline_task(rq, cpu, idle);
-+ if (likely(next->prio != PRIO_LIMIT))
-+ clear_cpuidle_map(cpu);
-+ else {
-+ set_cpuidle_map(cpu);
-+ update_load_avg(rq, 0);
-+ }
-+
-+ set_rq_task(rq, next);
-+ next->last_ran = niffies;
-+
-+ if (likely(prev != next)) {
-+ /*
-+ * Don't reschedule an idle task or deactivated tasks
-+ */
-+ if (prev != idle && !deactivate)
-+ resched_suitable_idle(prev);
-+ if (next != idle)
-+ check_siblings(rq);
-+ else
-+ wake_siblings(rq);
-+ rq->nr_switches++;
-+ rq->curr = next;
-+ /*
-+ * The membarrier system call requires each architecture
-+ * to have a full memory barrier after updating
-+ * rq->curr, before returning to user-space. For TSO
-+ * (e.g. x86), the architecture must provide its own
-+ * barrier in switch_mm(). For weakly ordered machines
-+ * for which spin_unlock() acts as a full memory
-+ * barrier, finish_lock_switch() in common code takes
-+ * care of this barrier. For weakly ordered machines for
-+ * which spin_unlock() acts as a RELEASE barrier (only
-+ * arm64 and PowerPC), arm64 has a full barrier in
-+ * switch_to(), and PowerPC has
-+ * smp_mb__after_unlock_lock() before
-+ * finish_lock_switch().
-+ */
-+ ++*switch_count;
-+
-+ trace_sched_switch(preempt, prev, next);
-+ context_switch(rq, prev, next); /* unlocks the rq */
-+ } else {
-+ check_siblings(rq);
-+ rq_unlock(rq);
-+ do_pending_softirq(rq, next);
-+ local_irq_enable();
-+ }
-+}
-+
-+void __noreturn do_task_dead(void)
-+{
-+ /*
-+ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
-+ * when the following two conditions become true.
-+ * - There is race condition of mmap_sem (It is acquired by
-+ * exit_mm()), and
-+ * - SMI occurs before setting TASK_RUNINNG.
-+ * (or hypervisor of virtual machine switches to other guest)
-+ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
-+ *
-+ * To avoid it, we have to wait for releasing tsk->pi_lock which
-+ * is held by try_to_wake_up()
-+ */
-+ raw_spin_lock_irq(&current->pi_lock);
-+ raw_spin_unlock_irq(&current->pi_lock);
-+
-+ /* Causes final put_task_struct in finish_task_switch(). */
-+ __set_current_state(TASK_DEAD);
-+
-+ /* Tell freezer to ignore us: */
-+ current->flags |= PF_NOFREEZE;
-+ __schedule(false);
-+ BUG();
-+
-+ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-+ for (;;)
-+ cpu_relax();
-+}
-+
-+static inline void sched_submit_work(struct task_struct *tsk)
-+{
-+ if (!tsk->state || tsk_is_pi_blocked(tsk) ||
-+ preempt_count() ||
-+ signal_pending_state(tsk->state, tsk))
-+ return;
-+
-+ /*
-+ * If we are going to sleep and we have plugged IO queued,
-+ * make sure to submit it to avoid deadlocks.
-+ */
-+ if (blk_needs_flush_plug(tsk))
-+ blk_schedule_flush_plug(tsk);
-+}
-+
-+asmlinkage __visible void __sched schedule(void)
-+{
-+ struct task_struct *tsk = current;
-+
-+ sched_submit_work(tsk);
-+ do {
-+ preempt_disable();
-+ __schedule(false);
-+ sched_preempt_enable_no_resched();
-+ } while (need_resched());
-+}
-+
-+EXPORT_SYMBOL(schedule);
-+
-+/*
-+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
-+ * state (have scheduled out non-voluntarily) by making sure that all
-+ * tasks have either left the run queue or have gone into user space.
-+ * As idle tasks do not do either, they must not ever be preempted
-+ * (schedule out non-voluntarily).
-+ *
-+ * schedule_idle() is similar to schedule_preempt_disable() except that it
-+ * never enables preemption because it does not call sched_submit_work().
-+ */
-+void __sched schedule_idle(void)
-+{
-+ /*
-+ * As this skips calling sched_submit_work(), which the idle task does
-+ * regardless because that function is a nop when the task is in a
-+ * TASK_RUNNING state, make sure this isn't used someplace that the
-+ * current task can be in any other state. Note, idle is always in the
-+ * TASK_RUNNING state.
-+ */
-+ WARN_ON_ONCE(current->state);
-+ do {
-+ __schedule(false);
-+ } while (need_resched());
-+}
-+
-+#ifdef CONFIG_CONTEXT_TRACKING
-+asmlinkage __visible void __sched schedule_user(void)
-+{
-+ /*
-+ * If we come here after a random call to set_need_resched(),
-+ * or we have been woken up remotely but the IPI has not yet arrived,
-+ * we haven't yet exited the RCU idle mode. Do it here manually until
-+ * we find a better solution.
-+ *
-+ * NB: There are buggy callers of this function. Ideally we
-+ * should warn if prev_state != IN_USER, but that will trigger
-+ * too frequently to make sense yet.
-+ */
-+ enum ctx_state prev_state = exception_enter();
-+ schedule();
-+ exception_exit(prev_state);
-+}
-+#endif
-+
-+/**
-+ * schedule_preempt_disabled - called with preemption disabled
-+ *
-+ * Returns with preemption disabled. Note: preempt_count must be 1
-+ */
-+void __sched schedule_preempt_disabled(void)
-+{
-+ sched_preempt_enable_no_resched();
-+ schedule();
-+ preempt_disable();
-+}
-+
-+static void __sched notrace preempt_schedule_common(void)
-+{
-+ do {
-+ /*
-+ * Because the function tracer can trace preempt_count_sub()
-+ * and it also uses preempt_enable/disable_notrace(), if
-+ * NEED_RESCHED is set, the preempt_enable_notrace() called
-+ * by the function tracer will call this function again and
-+ * cause infinite recursion.
-+ *
-+ * Preemption must be disabled here before the function
-+ * tracer can trace. Break up preempt_disable() into two
-+ * calls. One to disable preemption without fear of being
-+ * traced. The other to still record the preemption latency,
-+ * which can also be traced by the function tracer.
-+ */
-+ preempt_disable_notrace();
-+ preempt_latency_start(1);
-+ __schedule(true);
-+ preempt_latency_stop(1);
-+ preempt_enable_no_resched_notrace();
-+
-+ /*
-+ * Check again in case we missed a preemption opportunity
-+ * between schedule and now.
-+ */
-+ } while (need_resched());
-+}
-+
-+#ifdef CONFIG_PREEMPT
-+/*
-+ * this is the entry point to schedule() from in-kernel preemption
-+ * off of preempt_enable. Kernel preemptions off return from interrupt
-+ * occur there and call schedule directly.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule(void)
-+{
-+ /*
-+ * If there is a non-zero preempt_count or interrupts are disabled,
-+ * we do not want to preempt the current task. Just return..
-+ */
-+ if (likely(!preemptible()))
-+ return;
-+
-+ preempt_schedule_common();
-+}
-+NOKPROBE_SYMBOL(preempt_schedule);
-+EXPORT_SYMBOL(preempt_schedule);
-+
-+/**
-+ * preempt_schedule_notrace - preempt_schedule called by tracing
-+ *
-+ * The tracing infrastructure uses preempt_enable_notrace to prevent
-+ * recursion and tracing preempt enabling caused by the tracing
-+ * infrastructure itself. But as tracing can happen in areas coming
-+ * from userspace or just about to enter userspace, a preempt enable
-+ * can occur before user_exit() is called. This will cause the scheduler
-+ * to be called when the system is still in usermode.
-+ *
-+ * To prevent this, the preempt_enable_notrace will use this function
-+ * instead of preempt_schedule() to exit user context if needed before
-+ * calling the scheduler.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
-+{
-+ enum ctx_state prev_ctx;
-+
-+ if (likely(!preemptible()))
-+ return;
-+
-+ do {
-+ /*
-+ * Because the function tracer can trace preempt_count_sub()
-+ * and it also uses preempt_enable/disable_notrace(), if
-+ * NEED_RESCHED is set, the preempt_enable_notrace() called
-+ * by the function tracer will call this function again and
-+ * cause infinite recursion.
-+ *
-+ * Preemption must be disabled here before the function
-+ * tracer can trace. Break up preempt_disable() into two
-+ * calls. One to disable preemption without fear of being
-+ * traced. The other to still record the preemption latency,
-+ * which can also be traced by the function tracer.
-+ */
-+ preempt_disable_notrace();
-+ preempt_latency_start(1);
-+ /*
-+ * Needs preempt disabled in case user_exit() is traced
-+ * and the tracer calls preempt_enable_notrace() causing
-+ * an infinite recursion.
-+ */
-+ prev_ctx = exception_enter();
-+ __schedule(true);
-+ exception_exit(prev_ctx);
-+
-+ preempt_latency_stop(1);
-+ preempt_enable_no_resched_notrace();
-+ } while (need_resched());
-+}
-+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
-+
-+#endif /* CONFIG_PREEMPT */
-+
-+/*
-+ * this is the entry point to schedule() from kernel preemption
-+ * off of irq context.
-+ * Note, that this is called and return with irqs disabled. This will
-+ * protect us against recursive calling from irq.
-+ */
-+asmlinkage __visible void __sched preempt_schedule_irq(void)
-+{
-+ enum ctx_state prev_state;
-+
-+ /* Catch callers which need to be fixed */
-+ BUG_ON(preempt_count() || !irqs_disabled());
-+
-+ prev_state = exception_enter();
-+
-+ do {
-+ preempt_disable();
-+ local_irq_enable();
-+ __schedule(true);
-+ local_irq_disable();
-+ sched_preempt_enable_no_resched();
-+ } while (need_resched());
-+
-+ exception_exit(prev_state);
-+}
-+
-+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
-+ void *key)
-+{
-+ return try_to_wake_up(curr->private, mode, wake_flags);
-+}
-+EXPORT_SYMBOL(default_wake_function);
-+
-+#ifdef CONFIG_RT_MUTEXES
-+
-+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
-+{
-+ if (pi_task)
-+ prio = min(prio, pi_task->prio);
-+
-+ return prio;
-+}
-+
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+ struct task_struct *pi_task = rt_mutex_get_top_task(p);
-+
-+ return __rt_effective_prio(pi_task, prio);
-+}
-+
-+/*
-+ * rt_mutex_setprio - set the current priority of a task
-+ * @p: task to boost
-+ * @pi_task: donor task
-+ *
-+ * This function changes the 'effective' priority of a task. It does
-+ * not touch ->normal_prio like __setscheduler().
-+ *
-+ * Used by the rt_mutex code to implement priority inheritance
-+ * logic. Call site only calls if the priority of the task changed.
-+ */
-+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
-+{
-+ int prio, oldprio;
-+ struct rq *rq;
-+
-+ /* XXX used to be waiter->prio, not waiter->task->prio */
-+ prio = __rt_effective_prio(pi_task, p->normal_prio);
-+
-+ /*
-+ * If nothing changed; bail early.
-+ */
-+ if (p->pi_top_task == pi_task && prio == p->prio)
-+ return;
-+
-+ rq = __task_rq_lock(p);
-+ update_rq_clock(rq);
-+ /*
-+ * Set under pi_lock && rq->lock, such that the value can be used under
-+ * either lock.
-+ *
-+ * Note that there is loads of tricky to make this pointer cache work
-+ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
-+ * ensure a task is de-boosted (pi_task is set to NULL) before the
-+ * task is allowed to run again (and can exit). This ensures the pointer
-+ * points to a blocked task -- which guaratees the task is present.
-+ */
-+ p->pi_top_task = pi_task;
-+
-+ /*
-+ * For FIFO/RR we only need to set prio, if that matches we're done.
-+ */
-+ if (prio == p->prio)
-+ goto out_unlock;
-+
-+ /*
-+ * Idle task boosting is a nono in general. There is one
-+ * exception, when PREEMPT_RT and NOHZ is active:
-+ *
-+ * The idle task calls get_next_timer_interrupt() and holds
-+ * the timer wheel base->lock on the CPU and another CPU wants
-+ * to access the timer (probably to cancel it). We can safely
-+ * ignore the boosting request, as the idle CPU runs this code
-+ * with interrupts disabled and will complete the lock
-+ * protected section without being interrupted. So there is no
-+ * real need to boost.
-+ */
-+ if (unlikely(p == rq->idle)) {
-+ WARN_ON(p != rq->curr);
-+ WARN_ON(p->pi_blocked_on);
-+ goto out_unlock;
-+ }
-+
-+ trace_sched_pi_setprio(p, pi_task);
-+ oldprio = p->prio;
-+ p->prio = prio;
-+ if (task_running(rq, p)){
-+ if (prio > oldprio)
-+ resched_task(p);
-+ } else if (task_queued(p)) {
-+ dequeue_task(rq, p, DEQUEUE_SAVE);
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ if (prio < oldprio)
-+ try_preempt(p, rq);
-+ }
-+out_unlock:
-+ __task_rq_unlock(rq);
-+}
-+#else
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+ return prio;
-+}
-+#endif
-+
-+/*
-+ * Adjust the deadline for when the priority is to change, before it's
-+ * changed.
-+ */
-+static inline void adjust_deadline(struct task_struct *p, int new_prio)
-+{
-+ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p);
-+}
-+
-+void set_user_nice(struct task_struct *p, long nice)
-+{
-+ int new_static, old_static;
-+ unsigned long flags;
-+ struct rq *rq;
-+
-+ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
-+ return;
-+ new_static = NICE_TO_PRIO(nice);
-+ /*
-+ * We have to be careful, if called from sys_setpriority(),
-+ * the task might be in the middle of scheduling on another CPU.
-+ */
-+ rq = task_rq_lock(p, &flags);
-+ update_rq_clock(rq);
-+
-+ /*
-+ * The RT priorities are set via sched_setscheduler(), but we still
-+ * allow the 'normal' nice value to be set - but as expected
-+ * it wont have any effect on scheduling until the task is
-+ * not SCHED_NORMAL/SCHED_BATCH:
-+ */
-+ if (has_rt_policy(p)) {
-+ p->static_prio = new_static;
-+ goto out_unlock;
-+ }
-+
-+ adjust_deadline(p, new_static);
-+ old_static = p->static_prio;
-+ p->static_prio = new_static;
-+ p->prio = effective_prio(p);
-+
-+ if (task_queued(p)) {
-+ dequeue_task(rq, p, DEQUEUE_SAVE);
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ if (new_static < old_static)
-+ try_preempt(p, rq);
-+ } else if (task_running(rq, p)) {
-+ set_rq_task(rq, p);
-+ if (old_static < new_static)
-+ resched_task(p);
-+ }
-+out_unlock:
-+ task_rq_unlock(rq, p, &flags);
-+}
-+EXPORT_SYMBOL(set_user_nice);
-+
-+/*
-+ * can_nice - check if a task can reduce its nice value
-+ * @p: task
-+ * @nice: nice value
-+ */
-+int can_nice(const struct task_struct *p, const int nice)
-+{
-+ /* Convert nice value [19,-20] to rlimit style value [1,40] */
-+ int nice_rlim = nice_to_rlimit(nice);
-+
-+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
-+ capable(CAP_SYS_NICE));
-+}
-+
-+#ifdef __ARCH_WANT_SYS_NICE
-+
-+/*
-+ * sys_nice - change the priority of the current process.
-+ * @increment: priority increment
-+ *
-+ * sys_setpriority is a more generic, but much slower function that
-+ * does similar things.
-+ */
-+SYSCALL_DEFINE1(nice, int, increment)
-+{
-+ long nice, retval;
-+
-+ /*
-+ * Setpriority might change our priority at the same moment.
-+ * We don't have to worry. Conceptually one call occurs first
-+ * and we have a single winner.
-+ */
-+
-+ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
-+ nice = task_nice(current) + increment;
-+
-+ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
-+ if (increment < 0 && !can_nice(current, nice))
-+ return -EPERM;
-+
-+ retval = security_task_setnice(current, nice);
-+ if (retval)
-+ return retval;
-+
-+ set_user_nice(current, nice);
-+ return 0;
-+}
-+
-+#endif
-+
-+/**
-+ * task_prio - return the priority value of a given task.
-+ * @p: the task in question.
-+ *
-+ * Return: The priority value as seen by users in /proc.
-+ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
-+ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
-+ */
-+int task_prio(const struct task_struct *p)
-+{
-+ int delta, prio = p->prio - MAX_RT_PRIO;
-+
-+ /* rt tasks and iso tasks */
-+ if (prio <= 0)
-+ goto out;
-+
-+ /* Convert to ms to avoid overflows */
-+ delta = NS_TO_MS(p->deadline - task_rq(p)->niffies);
-+ if (unlikely(delta < 0))
-+ delta = 0;
-+ delta = delta * 40 / ms_longest_deadline_diff();
-+ if (delta <= 80)
-+ prio += delta;
-+ if (idleprio_task(p))
-+ prio += 40;
-+out:
-+ return prio;
-+}
-+
-+/**
-+ * idle_cpu - is a given CPU idle currently?
-+ * @cpu: the processor in question.
-+ *
-+ * Return: 1 if the CPU is currently idle. 0 otherwise.
-+ */
-+int idle_cpu(int cpu)
-+{
-+ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
-+}
-+
-+/**
-+ * idle_task - return the idle task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * Return: The idle task for the CPU @cpu.
-+ */
-+struct task_struct *idle_task(int cpu)
-+{
-+ return cpu_rq(cpu)->idle;
-+}
-+
-+/**
-+ * find_process_by_pid - find a process with a matching PID value.
-+ * @pid: the pid in question.
-+ *
-+ * The task of @pid, if found. %NULL otherwise.
-+ */
-+static inline struct task_struct *find_process_by_pid(pid_t pid)
-+{
-+ return pid ? find_task_by_vpid(pid) : current;
-+}
-+
-+/* Actually do priority change: must hold rq lock. */
-+static void __setscheduler(struct task_struct *p, struct rq *rq, int policy,
-+ int prio, bool keep_boost)
-+{
-+ int oldrtprio, oldprio;
-+
-+ p->policy = policy;
-+ oldrtprio = p->rt_priority;
-+ p->rt_priority = prio;
-+ p->normal_prio = normal_prio(p);
-+ oldprio = p->prio;
-+ /*
-+ * Keep a potential priority boosting if called from
-+ * sched_setscheduler().
-+ */
-+ p->prio = normal_prio(p);
-+ if (keep_boost)
-+ p->prio = rt_effective_prio(p, p->prio);
-+
-+ if (task_running(rq, p)) {
-+ set_rq_task(rq, p);
-+ resched_task(p);
-+ } else if (task_queued(p)) {
-+ dequeue_task(rq, p, DEQUEUE_SAVE);
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ if (p->prio < oldprio || p->rt_priority > oldrtprio)
-+ try_preempt(p, rq);
-+ }
-+}
-+
-+/*
-+ * Check the target process has a UID that matches the current process's
-+ */
-+static bool check_same_owner(struct task_struct *p)
-+{
-+ const struct cred *cred = current_cred(), *pcred;
-+ bool match;
-+
-+ rcu_read_lock();
-+ pcred = __task_cred(p);
-+ match = (uid_eq(cred->euid, pcred->euid) ||
-+ uid_eq(cred->euid, pcred->uid));
-+ rcu_read_unlock();
-+ return match;
-+}
-+
-+static int
-+__sched_setscheduler(struct task_struct *p, int policy,
-+ const struct sched_param *param, bool user, bool pi)
-+{
-+ struct sched_param zero_param = { .sched_priority = 0 };
-+ unsigned long flags, rlim_rtprio = 0;
-+ int retval, oldpolicy = -1;
-+ int reset_on_fork;
-+ struct rq *rq;
-+
-+ /* The pi code expects interrupts enabled */
-+ BUG_ON(pi && in_interrupt());
-+
-+ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
-+ unsigned long lflags;
-+
-+ if (!lock_task_sighand(p, &lflags))
-+ return -ESRCH;
-+ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
-+ unlock_task_sighand(p, &lflags);
-+ if (rlim_rtprio)
-+ goto recheck;
-+ /*
-+ * If the caller requested an RT policy without having the
-+ * necessary rights, we downgrade the policy to SCHED_ISO.
-+ * We also set the parameter to zero to pass the checks.
-+ */
-+ policy = SCHED_ISO;
-+ param = &zero_param;
-+ }
-+recheck:
-+ /* Double check policy once rq lock held */
-+ if (policy < 0) {
-+ reset_on_fork = p->sched_reset_on_fork;
-+ policy = oldpolicy = p->policy;
-+ } else {
-+ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
-+ policy &= ~SCHED_RESET_ON_FORK;
-+
-+ if (!SCHED_RANGE(policy))
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * Valid priorities for SCHED_FIFO and SCHED_RR are
-+ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
-+ * SCHED_BATCH is 0.
-+ */
-+ if (param->sched_priority < 0 ||
-+ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) ||
-+ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1))
-+ return -EINVAL;
-+ if (is_rt_policy(policy) != (param->sched_priority != 0))
-+ return -EINVAL;
-+
-+ /*
-+ * Allow unprivileged RT tasks to decrease priority:
-+ */
-+ if (user && !capable(CAP_SYS_NICE)) {
-+ if (is_rt_policy(policy)) {
-+ unsigned long rlim_rtprio =
-+ task_rlimit(p, RLIMIT_RTPRIO);
-+
-+ /* Can't set/change the rt policy */
-+ if (policy != p->policy && !rlim_rtprio)
-+ return -EPERM;
-+
-+ /* Can't increase priority */
-+ if (param->sched_priority > p->rt_priority &&
-+ param->sched_priority > rlim_rtprio)
-+ return -EPERM;
-+ } else {
-+ switch (p->policy) {
-+ /*
-+ * Can only downgrade policies but not back to
-+ * SCHED_NORMAL
-+ */
-+ case SCHED_ISO:
-+ if (policy == SCHED_ISO)
-+ goto out;
-+ if (policy != SCHED_NORMAL)
-+ return -EPERM;
-+ break;
-+ case SCHED_BATCH:
-+ if (policy == SCHED_BATCH)
-+ goto out;
-+ if (policy != SCHED_IDLEPRIO)
-+ return -EPERM;
-+ break;
-+ case SCHED_IDLEPRIO:
-+ if (policy == SCHED_IDLEPRIO)
-+ goto out;
-+ return -EPERM;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ /* Can't change other user's priorities */
-+ if (!check_same_owner(p))
-+ return -EPERM;
-+
-+ /* Normal users shall not reset the sched_reset_on_fork flag: */
-+ if (p->sched_reset_on_fork && !reset_on_fork)
-+ return -EPERM;
-+ }
-+
-+ if (user) {
-+ retval = security_task_setscheduler(p);
-+ if (retval)
-+ return retval;
-+ }
-+
-+ /*
-+ * Make sure no PI-waiters arrive (or leave) while we are
-+ * changing the priority of the task:
-+ *
-+ * To be able to change p->policy safely, the runqueue lock must be
-+ * held.
-+ */
-+ rq = task_rq_lock(p, &flags);
-+ update_rq_clock(rq);
-+
-+ /*
-+ * Changing the policy of the stop threads its a very bad idea:
-+ */
-+ if (p == rq->stop) {
-+ task_rq_unlock(rq, p, &flags);
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * If not changing anything there's no need to proceed further:
-+ */
-+ if (unlikely(policy == p->policy && (!is_rt_policy(policy) ||
-+ param->sched_priority == p->rt_priority))) {
-+ task_rq_unlock(rq, p, &flags);
-+ return 0;
-+ }
-+
-+ /* Re-check policy now with rq lock held */
-+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
-+ policy = oldpolicy = -1;
-+ task_rq_unlock(rq, p, &flags);
-+ goto recheck;
-+ }
-+ p->sched_reset_on_fork = reset_on_fork;
-+
-+ __setscheduler(p, rq, policy, param->sched_priority, pi);
-+ task_rq_unlock(rq, p, &flags);
-+
-+ if (pi)
-+ rt_mutex_adjust_pi(p);
-+out:
-+ return 0;
-+}
-+
-+/**
-+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ *
-+ * NOTE that the task may be already dead.
-+ */
-+int sched_setscheduler(struct task_struct *p, int policy,
-+ const struct sched_param *param)
-+{
-+ return __sched_setscheduler(p, policy, param, true, true);
-+}
-+
-+EXPORT_SYMBOL_GPL(sched_setscheduler);
-+
-+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
-+{
-+ const struct sched_param param = { .sched_priority = attr->sched_priority };
-+ int policy = attr->sched_policy;
-+
-+ return __sched_setscheduler(p, policy, &param, true, true);
-+}
-+EXPORT_SYMBOL_GPL(sched_setattr);
-+
-+/**
-+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Just like sched_setscheduler, only don't bother checking if the
-+ * current context has permission. For example, this is needed in
-+ * stop_machine(): we create temporary high priority worker threads,
-+ * but our caller might not have that capability.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
-+ const struct sched_param *param)
-+{
-+ return __sched_setscheduler(p, policy, param, false, true);
-+}
-+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
-+
-+static int
-+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
-+{
-+ struct sched_param lparam;
-+ struct task_struct *p;
-+ int retval;
-+
-+ if (!param || pid < 0)
-+ return -EINVAL;
-+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
-+ return -EFAULT;
-+
-+ rcu_read_lock();
-+ retval = -ESRCH;
-+ p = find_process_by_pid(pid);
-+ if (p != NULL)
-+ retval = sched_setscheduler(p, policy, &lparam);
-+ rcu_read_unlock();
-+
-+ return retval;
-+}
-+
-+/*
-+ * Mimics kernel/events/core.c perf_copy_attr().
-+ */
-+static int sched_copy_attr(struct sched_attr __user *uattr,
-+ struct sched_attr *attr)
-+{
-+ u32 size;
-+ int ret;
-+
-+ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
-+ return -EFAULT;
-+
-+ /* Zero the full structure, so that a short copy will be nice: */
-+ memset(attr, 0, sizeof(*attr));
-+
-+ ret = get_user(size, &uattr->size);
-+ if (ret)
-+ return ret;
-+
-+ /* Bail out on silly large: */
-+ if (size > PAGE_SIZE)
-+ goto err_size;
-+
-+ /* ABI compatibility quirk: */
-+ if (!size)
-+ size = SCHED_ATTR_SIZE_VER0;
-+
-+ if (size < SCHED_ATTR_SIZE_VER0)
-+ goto err_size;
-+
-+ /*
-+ * If we're handed a bigger struct than we know of,
-+ * ensure all the unknown bits are 0 - i.e. new
-+ * user-space does not rely on any kernel feature
-+ * extensions we dont know about yet.
-+ */
-+ if (size > sizeof(*attr)) {
-+ unsigned char __user *addr;
-+ unsigned char __user *end;
-+ unsigned char val;
-+
-+ addr = (void __user *)uattr + sizeof(*attr);
-+ end = (void __user *)uattr + size;
-+
-+ for (; addr < end; addr++) {
-+ ret = get_user(val, addr);
-+ if (ret)
-+ return ret;
-+ if (val)
-+ goto err_size;
-+ }
-+ size = sizeof(*attr);
-+ }
-+
-+ ret = copy_from_user(attr, uattr, size);
-+ if (ret)
-+ return -EFAULT;
-+
-+ /*
-+ * XXX: Do we want to be lenient like existing syscalls; or do we want
-+ * to be strict and return an error on out-of-bounds values?
-+ */
-+ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
-+
-+ /* sched/core.c uses zero here but we already know ret is zero */
-+ return 0;
-+
-+err_size:
-+ put_user(sizeof(*attr), &uattr->size);
-+ return -E2BIG;
-+}
-+
-+/*
-+ * sched_setparam() passes in -1 for its policy, to let the functions
-+ * it calls know not to change it.
-+ */
-+#define SETPARAM_POLICY -1
-+
-+/**
-+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
-+ * @pid: the pid in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
-+{
-+ if (policy < 0)
-+ return -EINVAL;
-+
-+ return do_sched_setscheduler(pid, policy, param);
-+}
-+
-+/**
-+ * sys_sched_setparam - set/change the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
-+}
-+
-+/**
-+ * sys_sched_setattr - same as above, but with extended sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ */
-+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
-+ unsigned int, flags)
-+{
-+ struct sched_attr attr;
-+ struct task_struct *p;
-+ int retval;
-+
-+ if (!uattr || pid < 0 || flags)
-+ return -EINVAL;
-+
-+ retval = sched_copy_attr(uattr, &attr);
-+ if (retval)
-+ return retval;
-+
-+ if ((int)attr.sched_policy < 0)
-+ return -EINVAL;
-+
-+ rcu_read_lock();
-+ retval = -ESRCH;
-+ p = find_process_by_pid(pid);
-+ if (p != NULL)
-+ retval = sched_setattr(p, &attr);
-+ rcu_read_unlock();
-+
-+ return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
-+ * @pid: the pid in question.
-+ *
-+ * Return: On success, the policy of the thread. Otherwise, a negative error
-+ * code.
-+ */
-+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
-+{
-+ struct task_struct *p;
-+ int retval = -EINVAL;
-+
-+ if (pid < 0)
-+ goto out_nounlock;
-+
-+ retval = -ESRCH;
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ if (p) {
-+ retval = security_task_getscheduler(p);
-+ if (!retval)
-+ retval = p->policy;
-+ }
-+ rcu_read_unlock();
-+
-+out_nounlock:
-+ return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the RT priority.
-+ *
-+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
-+ * code.
-+ */
-+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+ struct sched_param lp = { .sched_priority = 0 };
-+ struct task_struct *p;
-+ int retval = -EINVAL;
-+
-+ if (!param || pid < 0)
-+ goto out_nounlock;
-+
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ retval = -ESRCH;
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ if (has_rt_policy(p))
-+ lp.sched_priority = p->rt_priority;
-+ rcu_read_unlock();
-+
-+ /*
-+ * This one might sleep, we cannot do it with a spinlock held ...
-+ */
-+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-+
-+out_nounlock:
-+ return retval;
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ return retval;
-+}
-+
-+static int sched_read_attr(struct sched_attr __user *uattr,
-+ struct sched_attr *attr,
-+ unsigned int usize)
-+{
-+ int ret;
-+
-+ if (!access_ok(VERIFY_WRITE, uattr, usize))
-+ return -EFAULT;
-+
-+ /*
-+ * If we're handed a smaller struct than we know of,
-+ * ensure all the unknown bits are 0 - i.e. old
-+ * user-space does not get uncomplete information.
-+ */
-+ if (usize < sizeof(*attr)) {
-+ unsigned char *addr;
-+ unsigned char *end;
-+
-+ addr = (void *)attr + usize;
-+ end = (void *)attr + sizeof(*attr);
-+
-+ for (; addr < end; addr++) {
-+ if (*addr)
-+ return -EFBIG;
-+ }
-+
-+ attr->size = usize;
-+ }
-+
-+ ret = copy_to_user(uattr, attr, attr->size);
-+ if (ret)
-+ return -EFAULT;
-+
-+ /* sched/core.c uses zero here but we already know ret is zero */
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ * @size: sizeof(attr) for fwd/bwd comp.
-+ * @flags: for future extension.
-+ */
-+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
-+ unsigned int, size, unsigned int, flags)
-+{
-+ struct sched_attr attr = {
-+ .size = sizeof(struct sched_attr),
-+ };
-+ struct task_struct *p;
-+ int retval;
-+
-+ if (!uattr || pid < 0 || size > PAGE_SIZE ||
-+ size < SCHED_ATTR_SIZE_VER0 || flags)
-+ return -EINVAL;
-+
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ retval = -ESRCH;
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ attr.sched_policy = p->policy;
-+ if (rt_task(p))
-+ attr.sched_priority = p->rt_priority;
-+ else
-+ attr.sched_nice = task_nice(p);
-+
-+ rcu_read_unlock();
-+
-+ retval = sched_read_attr(uattr, &attr, size);
-+ return retval;
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ return retval;
-+}
-+
-+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
-+{
-+ cpumask_var_t cpus_allowed, new_mask;
-+ struct task_struct *p;
-+ int retval;
-+
-+ rcu_read_lock();
-+
-+ p = find_process_by_pid(pid);
-+ if (!p) {
-+ rcu_read_unlock();
-+ return -ESRCH;
-+ }
-+
-+ /* Prevent p going away */
-+ get_task_struct(p);
-+ rcu_read_unlock();
-+
-+ if (p->flags & PF_NO_SETAFFINITY) {
-+ retval = -EINVAL;
-+ goto out_put_task;
-+ }
-+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
-+ retval = -ENOMEM;
-+ goto out_put_task;
-+ }
-+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
-+ retval = -ENOMEM;
-+ goto out_free_cpus_allowed;
-+ }
-+ retval = -EPERM;
-+ if (!check_same_owner(p)) {
-+ rcu_read_lock();
-+ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
-+ rcu_read_unlock();
-+ goto out_unlock;
-+ }
-+ rcu_read_unlock();
-+ }
-+
-+ retval = security_task_setscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ cpuset_cpus_allowed(p, cpus_allowed);
-+ cpumask_and(new_mask, in_mask, cpus_allowed);
-+again:
-+ retval = __set_cpus_allowed_ptr(p, new_mask, true);
-+
-+ if (!retval) {
-+ cpuset_cpus_allowed(p, cpus_allowed);
-+ if (!cpumask_subset(new_mask, cpus_allowed)) {
-+ /*
-+ * We must have raced with a concurrent cpuset
-+ * update. Just reset the cpus_allowed to the
-+ * cpuset's cpus_allowed
-+ */
-+ cpumask_copy(new_mask, cpus_allowed);
-+ goto again;
-+ }
-+ }
-+out_unlock:
-+ free_cpumask_var(new_mask);
-+out_free_cpus_allowed:
-+ free_cpumask_var(cpus_allowed);
-+out_put_task:
-+ put_task_struct(p);
-+ return retval;
-+}
-+
-+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
-+ cpumask_t *new_mask)
-+{
-+ if (len < cpumask_size())
-+ cpumask_clear(new_mask);
-+ else if (len > cpumask_size())
-+ len = cpumask_size();
-+
-+ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
-+}
-+
-+
-+/**
-+ * sys_sched_setaffinity - set the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to the new CPU mask
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
-+ unsigned long __user *, user_mask_ptr)
-+{
-+ cpumask_var_t new_mask;
-+ int retval;
-+
-+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
-+ return -ENOMEM;
-+
-+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
-+ if (retval == 0)
-+ retval = sched_setaffinity(pid, new_mask);
-+ free_cpumask_var(new_mask);
-+ return retval;
-+}
-+
-+long sched_getaffinity(pid_t pid, cpumask_t *mask)
-+{
-+ struct task_struct *p;
-+ unsigned long flags;
-+ int retval;
-+
-+ get_online_cpus();
-+ rcu_read_lock();
-+
-+ retval = -ESRCH;
-+ p = find_process_by_pid(pid);
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ put_online_cpus();
-+
-+ return retval;
-+}
-+
-+/**
-+ * sys_sched_getaffinity - get the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
-+ unsigned long __user *, user_mask_ptr)
-+{
-+ int ret;
-+ cpumask_var_t mask;
-+
-+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
-+ return -EINVAL;
-+ if (len & (sizeof(unsigned long)-1))
-+ return -EINVAL;
-+
-+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
-+ return -ENOMEM;
-+
-+ ret = sched_getaffinity(pid, mask);
-+ if (ret == 0) {
-+ size_t retlen = min_t(size_t, len, cpumask_size());
-+
-+ if (copy_to_user(user_mask_ptr, mask, retlen))
-+ ret = -EFAULT;
-+ else
-+ ret = retlen;
-+ }
-+ free_cpumask_var(mask);
-+
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_yield - yield the current processor to other threads.
-+ *
-+ * This function yields the current CPU to other tasks. It does this by
-+ * scheduling away the current task. If it still has the earliest deadline
-+ * it will be scheduled again as the next task.
-+ *
-+ * Return: 0.
-+ */
-+SYSCALL_DEFINE0(sched_yield)
-+{
-+ struct rq *rq;
-+
-+ if (!sched_yield_type)
-+ goto out;
-+
-+ local_irq_disable();
-+ rq = this_rq();
-+ rq_lock(rq);
-+
-+ if (sched_yield_type > 1)
-+ time_slice_expired(current, rq);
-+ schedstat_inc(rq->yld_count);
-+
-+ /*
-+ * Since we are going to call schedule() anyway, there's
-+ * no need to preempt or enable interrupts:
-+ */
-+ preempt_disable();
-+ rq_unlock(rq);
-+ sched_preempt_enable_no_resched();
-+
-+ schedule();
-+out:
-+ return 0;
-+}
-+
-+#ifndef CONFIG_PREEMPT
-+int __sched _cond_resched(void)
-+{
-+ if (should_resched(0)) {
-+ preempt_schedule_common();
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(_cond_resched);
-+#endif
-+
-+/*
-+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
-+ * call schedule, and on return reacquire the lock.
-+ *
-+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
-+ * operations here to prevent schedule() from being called twice (once via
-+ * spin_unlock(), once by hand).
-+ */
-+int __cond_resched_lock(spinlock_t *lock)
-+{
-+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+ int ret = 0;
-+
-+ lockdep_assert_held(lock);
-+
-+ if (spin_needbreak(lock) || resched) {
-+ spin_unlock(lock);
-+ if (resched)
-+ preempt_schedule_common();
-+ else
-+ cpu_relax();
-+ ret = 1;
-+ spin_lock(lock);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_lock);
-+
-+int __sched __cond_resched_softirq(void)
-+{
-+ BUG_ON(!in_softirq());
-+
-+ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
-+ local_bh_enable();
-+ preempt_schedule_common();
-+ local_bh_disable();
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(__cond_resched_softirq);
-+
-+/**
-+ * yield - yield the current processor to other threads.
-+ *
-+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
-+ *
-+ * The scheduler is at all times free to pick the calling task as the most
-+ * eligible task to run, if removing the yield() call from your code breaks
-+ * it, its already broken.
-+ *
-+ * Typical broken usage is:
-+ *
-+ * while (!event)
-+ * yield();
-+ *
-+ * where one assumes that yield() will let 'the other' process run that will
-+ * make event true. If the current task is a SCHED_FIFO task that will never
-+ * happen. Never use yield() as a progress guarantee!!
-+ *
-+ * If you want to use yield() to wait for something, use wait_event().
-+ * If you want to use yield() to be 'nice' for others, use cond_resched().
-+ * If you still want to use yield(), do not!
-+ */
-+void __sched yield(void)
-+{
-+ set_current_state(TASK_RUNNING);
-+ sys_sched_yield();
-+}
-+EXPORT_SYMBOL(yield);
-+
-+/**
-+ * yield_to - yield the current processor to another thread in
-+ * your thread group, or accelerate that thread toward the
-+ * processor it's on.
-+ * @p: target task
-+ * @preempt: whether task preemption is allowed or not
-+ *
-+ * It's the caller's job to ensure that the target task struct
-+ * can't go away on us before we can do any checks.
-+ *
-+ * Return:
-+ * true (>0) if we indeed boosted the target task.
-+ * false (0) if we failed to boost the target.
-+ * -ESRCH if there's no task to yield to.
-+ */
-+int __sched yield_to(struct task_struct *p, bool preempt)
-+{
-+ struct task_struct *rq_p;
-+ struct rq *rq, *p_rq;
-+ unsigned long flags;
-+ int yielded = 0;
-+
-+ local_irq_save(flags);
-+ rq = this_rq();
-+
-+again:
-+ p_rq = task_rq(p);
-+ /*
-+ * If we're the only runnable task on the rq and target rq also
-+ * has only one task, there's absolutely no point in yielding.
-+ */
-+ if (task_running(p_rq, p) || p->state) {
-+ yielded = -ESRCH;
-+ goto out_irq;
-+ }
-+
-+ double_rq_lock(rq, p_rq);
-+ if (unlikely(task_rq(p) != p_rq)) {
-+ double_rq_unlock(rq, p_rq);
-+ goto again;
-+ }
-+
-+ yielded = 1;
-+ schedstat_inc(rq->yld_count);
-+ rq_p = rq->curr;
-+ if (p->deadline > rq_p->deadline)
-+ p->deadline = rq_p->deadline;
-+ p->time_slice += rq_p->time_slice;
-+ if (p->time_slice > timeslice())
-+ p->time_slice = timeslice();
-+ time_slice_expired(rq_p, rq);
-+ if (preempt && rq != p_rq)
-+ resched_task(p_rq->curr);
-+ double_rq_unlock(rq, p_rq);
-+out_irq:
-+ local_irq_restore(flags);
-+
-+ if (yielded > 0)
-+ schedule();
-+ return yielded;
-+}
-+EXPORT_SYMBOL_GPL(yield_to);
-+
-+int io_schedule_prepare(void)
-+{
-+ int old_iowait = current->in_iowait;
-+
-+ current->in_iowait = 1;
-+ blk_schedule_flush_plug(current);
-+
-+ return old_iowait;
-+}
-+
-+void io_schedule_finish(int token)
-+{
-+ current->in_iowait = token;
-+}
-+
-+/*
-+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
-+ * that process accounting knows that this is a task in IO wait state.
-+ *
-+ * But don't do that if it is a deliberate, throttling IO wait (this task
-+ * has set its backing_dev_info: the queue against which it should throttle)
-+ */
-+
-+long __sched io_schedule_timeout(long timeout)
-+{
-+ int token;
-+ long ret;
-+
-+ token = io_schedule_prepare();
-+ ret = schedule_timeout(timeout);
-+ io_schedule_finish(token);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(io_schedule_timeout);
-+
-+void io_schedule(void)
-+{
-+ int token;
-+
-+ token = io_schedule_prepare();
-+ schedule();
-+ io_schedule_finish(token);
-+}
-+EXPORT_SYMBOL(io_schedule);
-+
-+/**
-+ * sys_sched_get_priority_max - return maximum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the maximum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
-+{
-+ int ret = -EINVAL;
-+
-+ switch (policy) {
-+ case SCHED_FIFO:
-+ case SCHED_RR:
-+ ret = MAX_USER_RT_PRIO-1;
-+ break;
-+ case SCHED_NORMAL:
-+ case SCHED_BATCH:
-+ case SCHED_ISO:
-+ case SCHED_IDLEPRIO:
-+ ret = 0;
-+ break;
-+ }
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_get_priority_min - return minimum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the minimum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
-+{
-+ int ret = -EINVAL;
-+
-+ switch (policy) {
-+ case SCHED_FIFO:
-+ case SCHED_RR:
-+ ret = 1;
-+ break;
-+ case SCHED_NORMAL:
-+ case SCHED_BATCH:
-+ case SCHED_ISO:
-+ case SCHED_IDLEPRIO:
-+ ret = 0;
-+ break;
-+ }
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_rr_get_interval - return the default timeslice of a process.
-+ * @pid: pid of the process.
-+ * @interval: userspace pointer to the timeslice value.
-+ *
-+ *
-+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
-+ * an error code.
-+ */
-+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
-+ struct timespec __user *, interval)
-+{
-+ struct task_struct *p;
-+ unsigned int time_slice;
-+ unsigned long flags;
-+ struct timespec t;
-+ struct rq *rq;
-+ int retval;
-+
-+ if (pid < 0)
-+ return -EINVAL;
-+
-+ retval = -ESRCH;
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ rq = task_rq_lock(p, &flags);
-+ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p));
-+ task_rq_unlock(rq, p, &flags);
-+
-+ rcu_read_unlock();
-+ t = ns_to_timespec(time_slice);
-+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
-+ return retval;
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ return retval;
-+}
-+
-+void sched_show_task(struct task_struct *p)
-+{
-+ unsigned long free = 0;
-+ int ppid;
-+
-+ if (!try_get_task_stack(p))
-+ return;
-+
-+ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
-+
-+ if (p->state == TASK_RUNNING)
-+ printk(KERN_CONT " running task ");
-+#ifdef CONFIG_DEBUG_STACK_USAGE
-+ free = stack_not_used(p);
-+#endif
-+ ppid = 0;
-+ rcu_read_lock();
-+ if (pid_alive(p))
-+ ppid = task_pid_nr(rcu_dereference(p->real_parent));
-+ rcu_read_unlock();
-+ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
-+ task_pid_nr(p), ppid,
-+ (unsigned long)task_thread_info(p)->flags);
-+
-+ print_worker_info(KERN_INFO, p);
-+ show_stack(p, NULL);
-+ put_task_stack(p);
-+}
-+
-+static inline bool
-+state_filter_match(unsigned long state_filter, struct task_struct *p)
-+{
-+ /* no filter, everything matches */
-+ if (!state_filter)
-+ return true;
-+
-+ /* filter, but doesn't match */
-+ if (!(p->state & state_filter))
-+ return false;
-+
-+ /*
-+ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
-+ * TASK_KILLABLE).
-+ */
-+ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
-+ return false;
-+
-+ return true;
-+}
-+
-+void show_state_filter(unsigned long state_filter)
-+{
-+ struct task_struct *g, *p;
-+
-+#if BITS_PER_LONG == 32
-+ printk(KERN_INFO
-+ " task PC stack pid father\n");
-+#else
-+ printk(KERN_INFO
-+ " task PC stack pid father\n");
-+#endif
-+ rcu_read_lock();
-+ for_each_process_thread(g, p) {
-+ /*
-+ * reset the NMI-timeout, listing all files on a slow
-+ * console might take a lot of time:
-+ * Also, reset softlockup watchdogs on all CPUs, because
-+ * another CPU might be blocked waiting for us to process
-+ * an IPI.
-+ */
-+ touch_nmi_watchdog();
-+ touch_all_softlockup_watchdogs();
-+ if (state_filter_match(state_filter, p))
-+ sched_show_task(p);
-+ }
-+
-+ rcu_read_unlock();
-+ /*
-+ * Only show locks if all tasks are dumped:
-+ */
-+ if (!state_filter)
-+ debug_show_all_locks();
-+}
-+
-+void dump_cpu_task(int cpu)
-+{
-+ pr_info("Task dump for CPU %d:\n", cpu);
-+ sched_show_task(cpu_curr(cpu));
-+}
-+
-+#ifdef CONFIG_SMP
-+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ cpumask_copy(&p->cpus_allowed, new_mask);
-+ p->nr_cpus_allowed = cpumask_weight(new_mask);
-+}
-+
-+void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ struct rq *rq = task_rq(p);
-+
-+ lockdep_assert_held(&p->pi_lock);
-+
-+ cpumask_copy(&p->cpus_allowed, new_mask);
-+
-+ if (task_queued(p)) {
-+ /*
-+ * Because __kthread_bind() calls this on blocked tasks without
-+ * holding rq->lock.
-+ */
-+ lockdep_assert_held(&rq->lock);
-+ }
-+}
-+
-+/*
-+ * Calling do_set_cpus_allowed from outside the scheduler code should not be
-+ * called on a running or queued task. We should be holding pi_lock.
-+ */
-+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ __do_set_cpus_allowed(p, new_mask);
-+ if (needs_other_cpu(p, task_cpu(p))) {
-+ struct rq *rq;
-+
-+ rq = __task_rq_lock(p);
-+ set_task_cpu(p, valid_task_cpu(p));
-+ resched_task(p);
-+ __task_rq_unlock(rq);
-+ }
-+}
-+#endif
-+
-+/**
-+ * init_idle - set up an idle thread for a given CPU
-+ * @idle: task in question
-+ * @cpu: cpu the idle task belongs to
-+ *
-+ * NOTE: this function does not set the idle thread's NEED_RESCHED
-+ * flag, to make booting more robust.
-+ */
-+void init_idle(struct task_struct *idle, int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&idle->pi_lock, flags);
-+ raw_spin_lock(&rq->lock);
-+ idle->last_ran = rq->niffies;
-+ time_slice_expired(idle, rq);
-+ idle->state = TASK_RUNNING;
-+ /* Setting prio to illegal value shouldn't matter when never queued */
-+ idle->prio = PRIO_LIMIT;
-+
-+ kasan_unpoison_task_stack(idle);
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * It's possible that init_idle() gets called multiple times on a task,
-+ * in that case do_set_cpus_allowed() will not do the right thing.
-+ *
-+ * And since this is boot we can forgo the serialisation.
-+ */
-+ set_cpus_allowed_common(idle, cpumask_of(cpu));
-+#ifdef CONFIG_SMT_NICE
-+ idle->smt_bias = 0;
-+#endif
-+#endif
-+ set_rq_task(rq, idle);
-+
-+ /* Silence PROVE_RCU */
-+ rcu_read_lock();
-+ set_task_cpu(idle, cpu);
-+ rcu_read_unlock();
-+
-+ rq->curr = rq->idle = idle;
-+ idle->on_rq = TASK_ON_RQ_QUEUED;
-+ raw_spin_unlock(&rq->lock);
-+ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
-+
-+ /* Set the preempt count _outside_ the spinlocks! */
-+ init_idle_preempt_count(idle, cpu);
-+
-+ ftrace_graph_init_idle_task(idle, cpu);
-+ vtime_init_idle(idle, cpu);
-+#ifdef CONFIG_SMP
-+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-+#endif
-+}
-+
-+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
-+ const struct cpumask __maybe_unused *trial)
-+{
-+ return 1;
-+}
-+
-+int task_can_attach(struct task_struct *p,
-+ const struct cpumask *cs_cpus_allowed)
-+{
-+ int ret = 0;
-+
-+ /*
-+ * Kthreads which disallow setaffinity shouldn't be moved
-+ * to a new cpuset; we don't want to change their CPU
-+ * affinity and isolating such threads by their set of
-+ * allowed nodes is unnecessary. Thus, cpusets are not
-+ * applicable for such threads. This prevents checking for
-+ * success of set_cpus_allowed_ptr() on all attached tasks
-+ * before cpus_allowed may be changed.
-+ */
-+ if (p->flags & PF_NO_SETAFFINITY)
-+ ret = -EINVAL;
-+
-+ return ret;
-+}
-+
-+void resched_cpu(int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ rq_lock_irqsave(rq, &flags);
-+ resched_task(cpu_curr(cpu));
-+ rq_unlock_irqrestore(rq, &flags);
-+}
-+
-+#ifdef CONFIG_SMP
-+#ifdef CONFIG_NO_HZ_COMMON
-+void nohz_balance_enter_idle(int cpu)
-+{
-+}
-+
-+void select_nohz_load_balancer(int stop_tick)
-+{
-+}
-+
-+void set_cpu_sd_state_idle(void) {}
-+
-+/*
-+ * In the semi idle case, use the nearest busy CPU for migrating timers
-+ * from an idle CPU. This is good for power-savings.
-+ *
-+ * We don't do similar optimization for completely idle system, as
-+ * selecting an idle CPU will add more delays to the timers than intended
-+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
-+ */
-+int get_nohz_timer_target(void)
-+{
-+ int i, cpu = smp_processor_id();
-+ struct sched_domain *sd;
-+
-+ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
-+ return cpu;
-+
-+ rcu_read_lock();
-+ for_each_domain(cpu, sd) {
-+ for_each_cpu(i, sched_domain_span(sd)) {
-+ if (cpu == i)
-+ continue;
-+
-+ if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
-+ cpu = i;
-+ cpu = i;
-+ goto unlock;
-+ }
-+ }
-+ }
-+
-+ if (!is_housekeeping_cpu(cpu))
-+ cpu = housekeeping_any_cpu();
-+unlock:
-+ rcu_read_unlock();
-+ return cpu;
-+}
-+
-+/*
-+ * When add_timer_on() enqueues a timer into the timer wheel of an
-+ * idle CPU then this timer might expire before the next timer event
-+ * which is scheduled to wake up that CPU. In case of a completely
-+ * idle system the next event might even be infinite time into the
-+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
-+ * leaves the inner idle loop so the newly added timer is taken into
-+ * account when the CPU goes back to idle and evaluates the timer
-+ * wheel for the next timer event.
-+ */
-+void wake_up_idle_cpu(int cpu)
-+{
-+ if (cpu == smp_processor_id())
-+ return;
-+
-+ if (set_nr_and_not_polling(cpu_rq(cpu)->idle))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+static bool wake_up_full_nohz_cpu(int cpu)
-+{
-+ /*
-+ * We just need the target to call irq_exit() and re-evaluate
-+ * the next tick. The nohz full kick at least implies that.
-+ * If needed we can still optimize that later with an
-+ * empty IRQ.
-+ */
-+ if (cpu_is_offline(cpu))
-+ return true; /* Don't try to wake offline CPUs. */
-+ if (tick_nohz_full_cpu(cpu)) {
-+ if (cpu != smp_processor_id() ||
-+ tick_nohz_tick_stopped())
-+ tick_nohz_full_kick_cpu(cpu);
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
-+/*
-+ * Wake up the specified CPU. If the CPU is going offline, it is the
-+ * caller's responsibility to deal with the lost wakeup, for example,
-+ * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
-+ */
-+void wake_up_nohz_cpu(int cpu)
-+{
-+ if (!wake_up_full_nohz_cpu(cpu))
-+ wake_up_idle_cpu(cpu);
-+}
-+#endif /* CONFIG_NO_HZ_COMMON */
-+
-+/*
-+ * Change a given task's CPU affinity. Migrate the thread to a
-+ * proper CPU and schedule it away if the CPU it's executing on
-+ * is removed from the allowed bitmask.
-+ *
-+ * NOTE: the caller must have a valid reference to the task, the
-+ * task must not exit() & deallocate itself prematurely. The
-+ * call is not atomic; no spinlocks may be held.
-+ */
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+ const struct cpumask *new_mask, bool check)
-+{
-+ const struct cpumask *cpu_valid_mask = cpu_active_mask;
-+ bool queued = false, running_wrong = false, kthread;
-+ struct cpumask old_mask;
-+ unsigned long flags;
-+ struct rq *rq;
-+ int ret = 0;
-+
-+ rq = task_rq_lock(p, &flags);
-+ update_rq_clock(rq);
-+
-+ kthread = !!(p->flags & PF_KTHREAD);
-+ if (kthread) {
-+ /*
-+ * Kernel threads are allowed on online && !active CPUs
-+ */
-+ cpu_valid_mask = cpu_online_mask;
-+ }
-+
-+ /*
-+ * Must re-check here, to close a race against __kthread_bind(),
-+ * sched_setaffinity() is not guaranteed to observe the flag.
-+ */
-+ if (check && (p->flags & PF_NO_SETAFFINITY)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ cpumask_copy(&old_mask, &p->cpus_allowed);
-+ if (cpumask_equal(&old_mask, new_mask))
-+ goto out;
-+
-+ if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ queued = task_queued(p);
-+ __do_set_cpus_allowed(p, new_mask);
-+
-+ if (kthread) {
-+ /*
-+ * For kernel threads that do indeed end up on online &&
-+ * !active we want to ensure they are strict per-CPU threads.
-+ */
-+ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
-+ !cpumask_intersects(new_mask, cpu_active_mask) &&
-+ p->nr_cpus_allowed != 1);
-+ }
-+
-+ /* Can the task run on the task's current CPU? If so, we're done */
-+ if (cpumask_test_cpu(task_cpu(p), new_mask))
-+ goto out;
-+
-+ if (task_running(rq, p)) {
-+ /* Task is running on the wrong cpu now, reschedule it. */
-+ if (rq == this_rq()) {
-+ set_tsk_need_resched(p);
-+ running_wrong = true;
-+ } else
-+ resched_task(p);
-+ } else {
-+ int cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-+
-+ if (queued) {
-+ /*
-+ * Switch runqueue locks after dequeueing the task
-+ * here while still holding the pi_lock to be holding
-+ * the correct lock for enqueueing.
-+ */
-+ dequeue_task(rq, p, 0);
-+ rq_unlock(rq);
-+
-+ rq = cpu_rq(cpu);
-+ rq_lock(rq);
-+ }
-+ set_task_cpu(p, cpu);
-+ if (queued)
-+ enqueue_task(rq, p, 0);
-+ }
-+ if (queued)
-+ try_preempt(p, rq);
-+ if (running_wrong)
-+ preempt_disable();
-+out:
-+ task_rq_unlock(rq, p, &flags);
-+
-+ if (running_wrong) {
-+ __schedule(true);
-+ preempt_enable();
-+ }
-+
-+ return ret;
-+}
-+
-+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ return __set_cpus_allowed_ptr(p, new_mask, false);
-+}
-+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/*
-+ * Run through task list and find tasks affined to the dead cpu, then remove
-+ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold
-+ * cpu 0 and src_cpu's runqueue locks.
-+ */
-+static void bind_zero(int src_cpu)
-+{
-+ struct task_struct *p, *t;
-+ struct rq *rq0;
-+ int bound = 0;
-+
-+ if (src_cpu == 0)
-+ return;
-+
-+ rq0 = cpu_rq(0);
-+
-+ do_each_thread(t, p) {
-+ if (cpumask_test_cpu(src_cpu, &p->cpus_allowed)) {
-+ bool local = (task_cpu(p) == src_cpu);
-+ struct rq *rq = task_rq(p);
-+
-+ /* task_running is the cpu stopper thread */
-+ if (local && task_running(rq, p))
-+ continue;
-+ atomic_clear_cpu(src_cpu, &p->cpus_allowed);
-+ atomic_set_cpu(0, &p->cpus_allowed);
-+ p->zerobound = true;
-+ bound++;
-+ if (local) {
-+ bool queued = task_queued(p);
-+
-+ if (queued)
-+ dequeue_task(rq, p, 0);
-+ set_task_cpu(p, 0);
-+ if (queued)
-+ enqueue_task(rq0, p, 0);
-+ }
-+ }
-+ } while_each_thread(t, p);
-+
-+ if (bound) {
-+ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n",
-+ bound, src_cpu);
-+ }
-+}
-+
-+/* Find processes with the zerobound flag and reenable their affinity for the
-+ * CPU coming alive. */
-+static void unbind_zero(int src_cpu)
-+{
-+ int unbound = 0, zerobound = 0;
-+ struct task_struct *p, *t;
-+
-+ if (src_cpu == 0)
-+ return;
-+
-+ do_each_thread(t, p) {
-+ if (!p->mm)
-+ p->zerobound = false;
-+ if (p->zerobound) {
-+ unbound++;
-+ cpumask_set_cpu(src_cpu, &p->cpus_allowed);
-+ /* Once every CPU affinity has been re-enabled, remove
-+ * the zerobound flag */
-+ if (cpumask_subset(cpu_possible_mask, &p->cpus_allowed)) {
-+ p->zerobound = false;
-+ zerobound++;
-+ }
-+ }
-+ } while_each_thread(t, p);
-+
-+ if (unbound) {
-+ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n",
-+ unbound, src_cpu);
-+ }
-+ if (zerobound) {
-+ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n",
-+ zerobound);
-+ }
-+}
-+
-+/*
-+ * Ensure that the idle task is using init_mm right before its cpu goes
-+ * offline.
-+ */
-+void idle_task_exit(void)
-+{
-+ struct mm_struct *mm = current->active_mm;
-+
-+ BUG_ON(cpu_online(smp_processor_id()));
-+
-+ if (mm != &init_mm) {
-+ switch_mm(mm, &init_mm, current);
-+ finish_arch_post_lock_switch();
-+ }
-+ mmdrop(mm);
-+}
-+#else /* CONFIG_HOTPLUG_CPU */
-+static void unbind_zero(int src_cpu) {}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+void sched_set_stop_task(int cpu, struct task_struct *stop)
-+{
-+ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
-+ struct sched_param start_param = { .sched_priority = 0 };
-+ struct task_struct *old_stop = cpu_rq(cpu)->stop;
-+
-+ if (stop) {
-+ /*
-+ * Make it appear like a SCHED_FIFO task, its something
-+ * userspace knows about and won't get confused about.
-+ *
-+ * Also, it will make PI more or less work without too
-+ * much confusion -- but then, stop work should not
-+ * rely on PI working anyway.
-+ */
-+ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
-+ }
-+
-+ cpu_rq(cpu)->stop = stop;
-+
-+ if (old_stop) {
-+ /*
-+ * Reset it back to a normal scheduling policy so that
-+ * it can die in pieces.
-+ */
-+ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
-+ }
-+}
-+
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+
-+static struct ctl_table sd_ctl_dir[] = {
-+ {
-+ .procname = "sched_domain",
-+ .mode = 0555,
-+ },
-+ {}
-+};
-+
-+static struct ctl_table sd_ctl_root[] = {
-+ {
-+ .procname = "kernel",
-+ .mode = 0555,
-+ .child = sd_ctl_dir,
-+ },
-+ {}
-+};
-+
-+static struct ctl_table *sd_alloc_ctl_entry(int n)
-+{
-+ struct ctl_table *entry =
-+ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
-+
-+ return entry;
-+}
-+
-+static void sd_free_ctl_entry(struct ctl_table **tablep)
-+{
-+ struct ctl_table *entry;
-+
-+ /*
-+ * In the intermediate directories, both the child directory and
-+ * procname are dynamically allocated and could fail but the mode
-+ * will always be set. In the lowest directory the names are
-+ * static strings and all have proc handlers.
-+ */
-+ for (entry = *tablep; entry->mode; entry++) {
-+ if (entry->child)
-+ sd_free_ctl_entry(&entry->child);
-+ if (entry->proc_handler == NULL)
-+ kfree(entry->procname);
-+ }
-+
-+ kfree(*tablep);
-+ *tablep = NULL;
-+}
-+
-+#define CPU_LOAD_IDX_MAX 5
-+static int min_load_idx = 0;
-+static int max_load_idx = CPU_LOAD_IDX_MAX-1;
-+
-+static void
-+set_table_entry(struct ctl_table *entry,
-+ const char *procname, void *data, int maxlen,
-+ umode_t mode, proc_handler *proc_handler,
-+ bool load_idx)
-+{
-+ entry->procname = procname;
-+ entry->data = data;
-+ entry->maxlen = maxlen;
-+ entry->mode = mode;
-+ entry->proc_handler = proc_handler;
-+
-+ if (load_idx) {
-+ entry->extra1 = &min_load_idx;
-+ entry->extra2 = &max_load_idx;
-+ }
-+}
-+
-+static struct ctl_table *
-+sd_alloc_ctl_domain_table(struct sched_domain *sd)
-+{
-+ struct ctl_table *table = sd_alloc_ctl_entry(14);
-+
-+ if (table == NULL)
-+ return NULL;
-+
-+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
-+ sizeof(long), 0644, proc_doulongvec_minmax, false);
-+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
-+ sizeof(long), 0644, proc_doulongvec_minmax, false);
-+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[9], "cache_nice_tries",
-+ &sd->cache_nice_tries,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[10], "flags", &sd->flags,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[11], "max_newidle_lb_cost",
-+ &sd->max_newidle_lb_cost,
-+ sizeof(long), 0644, proc_doulongvec_minmax, false);
-+ set_table_entry(&table[12], "name", sd->name,
-+ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
-+ /* &table[13] is terminator */
-+
-+ return table;
-+}
-+
-+static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
-+{
-+ struct ctl_table *entry, *table;
-+ struct sched_domain *sd;
-+ int domain_num = 0, i;
-+ char buf[32];
-+
-+ for_each_domain(cpu, sd)
-+ domain_num++;
-+ entry = table = sd_alloc_ctl_entry(domain_num + 1);
-+ if (table == NULL)
-+ return NULL;
-+
-+ i = 0;
-+ for_each_domain(cpu, sd) {
-+ snprintf(buf, 32, "domain%d", i);
-+ entry->procname = kstrdup(buf, GFP_KERNEL);
-+ entry->mode = 0555;
-+ entry->child = sd_alloc_ctl_domain_table(sd);
-+ entry++;
-+ i++;
-+ }
-+ return table;
-+}
-+
-+static cpumask_var_t sd_sysctl_cpus;
-+static struct ctl_table_header *sd_sysctl_header;
-+
-+void register_sched_domain_sysctl(void)
-+{
-+ static struct ctl_table *cpu_entries;
-+ static struct ctl_table **cpu_idx;
-+ char buf[32];
-+ int i;
-+
-+ if (!cpu_entries) {
-+ cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
-+ if (!cpu_entries)
-+ return;
-+
-+ WARN_ON(sd_ctl_dir[0].child);
-+ sd_ctl_dir[0].child = cpu_entries;
-+ }
-+
-+ if (!cpu_idx) {
-+ struct ctl_table *e = cpu_entries;
-+
-+ cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
-+ if (!cpu_idx)
-+ return;
-+
-+ /* deal with sparse possible map */
-+ for_each_possible_cpu(i) {
-+ cpu_idx[i] = e;
-+ e++;
-+ }
-+ }
-+
-+ if (!cpumask_available(sd_sysctl_cpus)) {
-+ if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
-+ return;
-+
-+ /* init to possible to not have holes in @cpu_entries */
-+ cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
-+ }
-+
-+ for_each_cpu(i, sd_sysctl_cpus) {
-+ struct ctl_table *e = cpu_idx[i];
-+
-+ if (e->child)
-+ sd_free_ctl_entry(&e->child);
-+
-+ if (!e->procname) {
-+ snprintf(buf, 32, "cpu%d", i);
-+ e->procname = kstrdup(buf, GFP_KERNEL);
-+ }
-+ e->mode = 0555;
-+ e->child = sd_alloc_ctl_cpu_table(i);
-+
-+ __cpumask_clear_cpu(i, sd_sysctl_cpus);
-+ }
-+
-+ WARN_ON(sd_sysctl_header);
-+ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
-+}
-+
-+void dirty_sched_domain_sysctl(int cpu)
-+{
-+ if (cpumask_available(sd_sysctl_cpus))
-+ __cpumask_set_cpu(cpu, sd_sysctl_cpus);
-+}
-+
-+/* may be called multiple times per register */
-+void unregister_sched_domain_sysctl(void)
-+{
-+ unregister_sysctl_table(sd_sysctl_header);
-+ sd_sysctl_header = NULL;
-+}
-+#endif /* CONFIG_SYSCTL */
-+
-+void set_rq_online(struct rq *rq)
-+{
-+ if (!rq->online) {
-+ cpumask_set_cpu(cpu_of(rq), rq->rd->online);
-+ rq->online = true;
-+ }
-+}
-+
-+void set_rq_offline(struct rq *rq)
-+{
-+ if (rq->online) {
-+ int cpu = cpu_of(rq);
-+
-+ cpumask_clear_cpu(cpu, rq->rd->online);
-+ rq->online = false;
-+ clear_cpuidle_map(cpu);
-+ }
-+}
-+
-+/*
-+ * used to mark begin/end of suspend/resume:
-+ */
-+static int num_cpus_frozen;
-+
-+/*
-+ * Update cpusets according to cpu_active mask. If cpusets are
-+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
-+ * around partition_sched_domains().
-+ *
-+ * If we come here as part of a suspend/resume, don't touch cpusets because we
-+ * want to restore it back to its original state upon resume anyway.
-+ */
-+static void cpuset_cpu_active(void)
-+{
-+ if (cpuhp_tasks_frozen) {
-+ /*
-+ * num_cpus_frozen tracks how many CPUs are involved in suspend
-+ * resume sequence. As long as this is not the last online
-+ * operation in the resume sequence, just build a single sched
-+ * domain, ignoring cpusets.
-+ */
-+ partition_sched_domains(1, NULL, NULL);
-+ if (--num_cpus_frozen)
-+ return;
-+ /*
-+ * This is the last CPU online operation. So fall through and
-+ * restore the original sched domains by considering the
-+ * cpuset configurations.
-+ */
-+ cpuset_force_rebuild();
-+ }
-+
-+ cpuset_update_active_cpus();
-+}
-+
-+static int cpuset_cpu_inactive(unsigned int cpu)
-+{
-+ if (!cpuhp_tasks_frozen) {
-+ cpuset_update_active_cpus();
-+ } else {
-+ num_cpus_frozen++;
-+ partition_sched_domains(1, NULL, NULL);
-+ }
-+ return 0;
-+}
-+
-+int sched_cpu_activate(unsigned int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ set_cpu_active(cpu, true);
-+
-+ if (sched_smp_initialized) {
-+ sched_domains_numa_masks_set(cpu);
-+ cpuset_cpu_active();
-+ }
-+
-+ /*
-+ * Put the rq online, if not already. This happens:
-+ *
-+ * 1) In the early boot process, because we build the real domains
-+ * after all CPUs have been brought up.
-+ *
-+ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
-+ * domains.
-+ */
-+ rq_lock_irqsave(rq, &flags);
-+ if (rq->rd) {
-+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-+ set_rq_online(rq);
-+ }
-+ unbind_zero(cpu);
-+ rq_unlock_irqrestore(rq, &flags);
-+
-+ return 0;
-+}
-+
-+int sched_cpu_deactivate(unsigned int cpu)
-+{
-+ int ret;
-+
-+ set_cpu_active(cpu, false);
-+ /*
-+ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-+ * users of this state to go away such that all new such users will
-+ * observe it.
-+ *
-+ * Do sync before park smpboot threads to take care the rcu boost case.
-+ */
-+ synchronize_rcu_mult(call_rcu, call_rcu_sched);
-+
-+ if (!sched_smp_initialized)
-+ return 0;
-+
-+ ret = cpuset_cpu_inactive(cpu);
-+ if (ret) {
-+ set_cpu_active(cpu, true);
-+ return ret;
-+ }
-+ sched_domains_numa_masks_clear(cpu);
-+ return 0;
-+}
-+
-+int sched_cpu_starting(unsigned int __maybe_unused cpu)
-+{
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+int sched_cpu_dying(unsigned int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ double_rq_lock(rq, cpu_rq(0));
-+ if (rq->rd) {
-+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-+ set_rq_offline(rq);
-+ }
-+ bind_zero(cpu);
-+ double_rq_unlock(rq, cpu_rq(0));
-+ sched_start_tick(rq, cpu);
-+ hrexpiry_clear(rq);
-+ local_irq_restore(flags);
-+
-+ return 0;
-+}
-+#endif
-+
-+#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
-+/*
-+ * Cheaper version of the below functions in case support for SMT and MC is
-+ * compiled in but CPUs have no siblings.
-+ */
-+static bool sole_cpu_idle(struct rq *rq)
-+{
-+ return rq_idle(rq);
-+}
-+#endif
-+#ifdef CONFIG_SCHED_SMT
-+static const cpumask_t *thread_cpumask(int cpu)
-+{
-+ return topology_sibling_cpumask(cpu);
-+}
-+/* All this CPU's SMT siblings are idle */
-+static bool siblings_cpu_idle(struct rq *rq)
-+{
-+ return cpumask_subset(&rq->thread_mask, &cpu_idle_map);
-+}
-+#endif
-+#ifdef CONFIG_SCHED_MC
-+static const cpumask_t *core_cpumask(int cpu)
-+{
-+ return topology_core_cpumask(cpu);
-+}
-+/* All this CPU's shared cache siblings are idle */
-+static bool cache_cpu_idle(struct rq *rq)
-+{
-+ return cpumask_subset(&rq->core_mask, &cpu_idle_map);
-+}
-+#endif
-+
-+enum sched_domain_level {
-+ SD_LV_NONE = 0,
-+ SD_LV_SIBLING,
-+ SD_LV_MC,
-+ SD_LV_BOOK,
-+ SD_LV_CPU,
-+ SD_LV_NODE,
-+ SD_LV_ALLNODES,
-+ SD_LV_MAX
-+};
-+
-+void __init sched_init_smp(void)
-+{
-+ struct sched_domain *sd;
-+ int cpu, other_cpu;
-+#ifdef CONFIG_SCHED_SMT
-+ bool smt_threads = false;
-+#endif
-+ cpumask_var_t non_isolated_cpus;
-+ struct rq *rq;
-+
-+ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
-+
-+ sched_init_numa();
-+
-+ /*
-+ * There's no userspace yet to cause hotplug operations; hence all the
-+ * cpu masks are stable and all blatant races in the below code cannot
-+ * happen.
-+ */
-+ mutex_lock(&sched_domains_mutex);
-+ sched_init_domains(cpu_active_mask);
-+ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
-+ if (cpumask_empty(non_isolated_cpus))
-+ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
-+ mutex_unlock(&sched_domains_mutex);
-+
-+ /* Move init over to a non-isolated CPU */
-+ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
-+ BUG();
-+ free_cpumask_var(non_isolated_cpus);
-+
-+ mutex_lock(&sched_domains_mutex);
-+ local_irq_disable();
-+ lock_all_rqs();
-+ /*
-+ * Set up the relative cache distance of each online cpu from each
-+ * other in a simple array for quick lookup. Locality is determined
-+ * by the closest sched_domain that CPUs are separated by. CPUs with
-+ * shared cache in SMT and MC are treated as local. Separate CPUs
-+ * (within the same package or physically) within the same node are
-+ * treated as not local. CPUs not even in the same domain (different
-+ * nodes) are treated as very distant.
-+ */
-+ for_each_online_cpu(cpu) {
-+ rq = cpu_rq(cpu);
-+
-+ /* First check if this cpu is in the same node */
-+ for_each_domain(cpu, sd) {
-+ if (sd->level > SD_LV_MC)
-+ continue;
-+ /* Set locality to local node if not already found lower */
-+ for_each_cpu(other_cpu, sched_domain_span(sd)) {
-+ if (rq->cpu_locality[other_cpu] > 3)
-+ rq->cpu_locality[other_cpu] = 3;
-+ }
-+ }
-+
-+ /*
-+ * Each runqueue has its own function in case it doesn't have
-+ * siblings of its own allowing mixed topologies.
-+ */
-+#ifdef CONFIG_SCHED_MC
-+ for_each_cpu(other_cpu, core_cpumask(cpu)) {
-+ if (rq->cpu_locality[other_cpu] > 2)
-+ rq->cpu_locality[other_cpu] = 2;
-+ }
-+ if (cpumask_weight(core_cpumask(cpu)) > 1) {
-+ cpumask_copy(&rq->core_mask, core_cpumask(cpu));
-+ cpumask_clear_cpu(cpu, &rq->core_mask);
-+ rq->cache_idle = cache_cpu_idle;
-+ }
-+#endif
-+#ifdef CONFIG_SCHED_SMT
-+ if (cpumask_weight(thread_cpumask(cpu)) > 1) {
-+ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu));
-+ cpumask_clear_cpu(cpu, &rq->thread_mask);
-+ for_each_cpu(other_cpu, thread_cpumask(cpu))
-+ rq->cpu_locality[other_cpu] = 1;
-+ rq->siblings_idle = siblings_cpu_idle;
-+ smt_threads = true;
-+ }
-+#endif
-+ }
-+ for_each_possible_cpu(cpu) {
-+ int total_cpus = 1, locality;
-+
-+ rq = cpu_rq(cpu);
-+ for (locality = 1; locality <= 4; locality++) {
-+ for_each_possible_cpu(other_cpu) {
-+ if (rq->cpu_locality[other_cpu] == locality)
-+ rq->rq_order[total_cpus++] = cpu_rq(other_cpu);
-+ }
-+ }
-+ }
-+#ifdef CONFIG_SMT_NICE
-+ if (smt_threads) {
-+ check_siblings = &check_smt_siblings;
-+ wake_siblings = &wake_smt_siblings;
-+ smt_schedule = &smt_should_schedule;
-+ }
-+#endif
-+ unlock_all_rqs();
-+ local_irq_enable();
-+ mutex_unlock(&sched_domains_mutex);
-+
-+ for_each_online_cpu(cpu) {
-+ rq = cpu_rq(cpu);
-+
-+ for_each_online_cpu(other_cpu) {
-+ if (other_cpu <= cpu)
-+ continue;
-+ printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]);
-+ }
-+ }
-+
-+ sched_smp_initialized = true;
-+}
-+#else
-+void __init sched_init_smp(void)
-+{
-+ sched_smp_initialized = true;
-+}
-+#endif /* CONFIG_SMP */
-+
-+int in_sched_functions(unsigned long addr)
-+{
-+ return in_lock_functions(addr) ||
-+ (addr >= (unsigned long)__sched_text_start
-+ && addr < (unsigned long)__sched_text_end);
-+}
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+/* task group related information */
-+struct task_group {
-+ struct cgroup_subsys_state css;
-+
-+ struct rcu_head rcu;
-+ struct list_head list;
-+
-+ struct task_group *parent;
-+ struct list_head siblings;
-+ struct list_head children;
-+};
-+
-+/*
-+ * Default task group.
-+ * Every task in system belongs to this group at bootup.
-+ */
-+struct task_group root_task_group;
-+LIST_HEAD(task_groups);
-+
-+/* Cacheline aligned slab cache for task_group */
-+static struct kmem_cache *task_group_cache __read_mostly;
-+#endif /* CONFIG_CGROUP_SCHED */
-+
-+void __init sched_init(void)
-+{
-+#ifdef CONFIG_SMP
-+ int cpu_ids;
-+#endif
-+ int i;
-+ struct rq *rq;
-+
-+ sched_clock_init();
-+
-+ wait_bit_init();
-+
-+ prio_ratios[0] = 128;
-+ for (i = 1 ; i < NICE_WIDTH ; i++)
-+ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
-+
-+ skiplist_node_init(&init_task.node);
-+
-+#ifdef CONFIG_SMP
-+ init_defrootdomain();
-+ cpumask_clear(&cpu_idle_map);
-+#else
-+ uprq = &per_cpu(runqueues, 0);
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+ task_group_cache = KMEM_CACHE(task_group, 0);
-+
-+ list_add(&root_task_group.list, &task_groups);
-+ INIT_LIST_HEAD(&root_task_group.children);
-+ INIT_LIST_HEAD(&root_task_group.siblings);
-+#endif /* CONFIG_CGROUP_SCHED */
-+ for_each_possible_cpu(i) {
-+ rq = cpu_rq(i);
-+ skiplist_init(&rq->node);
-+ rq->sl = new_skiplist(&rq->node);
-+ raw_spin_lock_init(&rq->lock);
-+ rq->nr_running = 0;
-+ rq->nr_uninterruptible = 0;
-+ rq->nr_switches = 0;
-+ rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0;
-+ rq->last_jiffy = jiffies;
-+ rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns =
-+ rq->iowait_ns = rq->idle_ns = 0;
-+ rq->dither = 0;
-+ set_rq_task(rq, &init_task);
-+ rq->iso_ticks = 0;
-+ rq->iso_refractory = false;
-+#ifdef CONFIG_SMP
-+ rq->sd = NULL;
-+ rq->rd = NULL;
-+ rq->online = false;
-+ rq->cpu = i;
-+ rq_attach_root(rq, &def_root_domain);
-+#endif
-+ init_rq_hrexpiry(rq);
-+ atomic_set(&rq->nr_iowait, 0);
-+ }
-+
-+#ifdef CONFIG_SMP
-+ cpu_ids = i;
-+ /*
-+ * Set the base locality for cpu cache distance calculation to
-+ * "distant" (3). Make sure the distance from a CPU to itself is 0.
-+ */
-+ for_each_possible_cpu(i) {
-+ int j;
-+
-+ rq = cpu_rq(i);
-+#ifdef CONFIG_SCHED_SMT
-+ rq->siblings_idle = sole_cpu_idle;
-+#endif
-+#ifdef CONFIG_SCHED_MC
-+ rq->cache_idle = sole_cpu_idle;
-+#endif
-+ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC);
-+ for_each_possible_cpu(j) {
-+ if (i == j)
-+ rq->cpu_locality[j] = 0;
-+ else
-+ rq->cpu_locality[j] = 4;
-+ }
-+ rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
-+ rq->rq_order[0] = rq;
-+ for (j = 1; j < cpu_ids; j++)
-+ rq->rq_order[j] = cpu_rq(j);
-+ }
-+#endif
-+
-+ /*
-+ * The boot idle thread does lazy MMU switching as well:
-+ */
-+ mmgrab(&init_mm);
-+ enter_lazy_tlb(&init_mm, current);
-+
-+ /*
-+ * Make us the idle thread. Technically, schedule() should not be
-+ * called from this thread, however somewhere below it might be,
-+ * but because we are the idle thread, we just pick up running again
-+ * when this runqueue becomes "idle".
-+ */
-+ init_idle(current, smp_processor_id());
-+
-+#ifdef CONFIG_SMP
-+ /* May be allocated at isolcpus cmdline parse time */
-+ if (cpu_isolated_map == NULL)
-+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
-+ idle_thread_set_boot_cpu();
-+#endif /* SMP */
-+
-+ init_schedstats();
-+}
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+static inline int preempt_count_equals(int preempt_offset)
-+{
-+ int nested = preempt_count() + rcu_preempt_depth();
-+
-+ return (nested == preempt_offset);
-+}
-+
-+void __might_sleep(const char *file, int line, int preempt_offset)
-+{
-+ /*
-+ * Blocking primitives will set (and therefore destroy) current->state,
-+ * since we will exit with TASK_RUNNING make sure we enter with it,
-+ * otherwise we will destroy state.
-+ */
-+ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
-+ "do not call blocking ops when !TASK_RUNNING; "
-+ "state=%lx set at [<%p>] %pS\n",
-+ current->state,
-+ (void *)current->task_state_change,
-+ (void *)current->task_state_change);
-+
-+ ___might_sleep(file, line, preempt_offset);
-+}
-+EXPORT_SYMBOL(__might_sleep);
-+
-+void ___might_sleep(const char *file, int line, int preempt_offset)
-+{
-+ /* Ratelimiting timestamp: */
-+ static unsigned long prev_jiffy;
-+
-+ unsigned long preempt_disable_ip;
-+
-+ /* WARN_ON_ONCE() by default, no rate limit required: */
-+ rcu_sleep_check();
-+
-+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
-+ !is_idle_task(current)) ||
-+ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
-+ oops_in_progress)
-+ return;
-+
-+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+ return;
-+ prev_jiffy = jiffies;
-+
-+ /* Save this before calling printk(), since that will clobber it: */
-+ preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+ printk(KERN_ERR
-+ "BUG: sleeping function called from invalid context at %s:%d\n",
-+ file, line);
-+ printk(KERN_ERR
-+ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-+ in_atomic(), irqs_disabled(),
-+ current->pid, current->comm);
-+
-+ if (task_stack_end_corrupted(current))
-+ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
-+
-+ debug_show_held_locks(current);
-+ if (irqs_disabled())
-+ print_irqtrace_events(current);
-+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
-+ && !preempt_count_equals(preempt_offset)) {
-+ pr_err("Preemption disabled at:");
-+ print_ip_sym(preempt_disable_ip);
-+ pr_cont("\n");
-+ }
-+ dump_stack();
-+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL(___might_sleep);
-+#endif
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static inline void normalise_rt_tasks(void)
-+{
-+ struct task_struct *g, *p;
-+ unsigned long flags;
-+ struct rq *rq;
-+
-+ read_lock(&tasklist_lock);
-+ for_each_process_thread(g, p) {
-+ /*
-+ * Only normalize user tasks:
-+ */
-+ if (p->flags & PF_KTHREAD)
-+ continue;
-+
-+ if (!rt_task(p) && !iso_task(p))
-+ continue;
-+
-+ rq = task_rq_lock(p, &flags);
-+ __setscheduler(p, rq, SCHED_NORMAL, 0, false);
-+ task_rq_unlock(rq, p, &flags);
-+ }
-+ read_unlock(&tasklist_lock);
-+}
-+
-+void normalize_rt_tasks(void)
-+{
-+ normalise_rt_tasks();
-+}
-+#endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
-+/*
-+ * These functions are only useful for the IA64 MCA handling, or kdb.
-+ *
-+ * They can only be called when the whole system has been
-+ * stopped - every CPU needs to be quiescent, and no scheduling
-+ * activity can take place. Using them for anything else would
-+ * be a serious bug, and as a result, they aren't even visible
-+ * under any other configuration.
-+ */
-+
-+/**
-+ * curr_task - return the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ *
-+ * Return: The current task for @cpu.
-+ */
-+struct task_struct *curr_task(int cpu)
-+{
-+ return cpu_curr(cpu);
-+}
-+
-+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
-+
-+#ifdef CONFIG_IA64
-+/**
-+ * set_curr_task - set the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ * @p: the task pointer to set.
-+ *
-+ * Description: This function must only be used when non-maskable interrupts
-+ * are serviced on a separate stack. It allows the architecture to switch the
-+ * notion of the current task on a CPU in a non-blocking manner. This function
-+ * must be called with all CPU's synchronised, and interrupts disabled, the
-+ * and caller must save the original value of the current task (see
-+ * curr_task() above) and restore that value before reenabling interrupts and
-+ * re-starting the system.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ */
-+void ia64_set_curr_task(int cpu, struct task_struct *p)
-+{
-+ cpu_curr(cpu) = p;
-+}
-+
-+#endif
-+
-+void init_idle_bootup_task(struct task_struct *idle)
-+{}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+__read_mostly bool sched_debug_enabled;
-+
-+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
-+ struct seq_file *m)
-+{}
-+
-+void proc_sched_set_task(struct task_struct *p)
-+{}
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#define SCHED_LOAD_SHIFT (10)
-+#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
-+
-+unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
-+{
-+ return SCHED_LOAD_SCALE;
-+}
-+
-+unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
-+{
-+ unsigned long weight = cpumask_weight(sched_domain_span(sd));
-+ unsigned long smt_gain = sd->smt_gain;
-+
-+ smt_gain /= weight;
-+
-+ return smt_gain;
-+}
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+static void sched_free_group(struct task_group *tg)
-+{
-+ kmem_cache_free(task_group_cache, tg);
-+}
-+
-+/* allocate runqueue etc for a new task group */
-+struct task_group *sched_create_group(struct task_group *parent)
-+{
-+ struct task_group *tg;
-+
-+ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
-+ if (!tg)
-+ return ERR_PTR(-ENOMEM);
-+
-+ return tg;
-+}
-+
-+void sched_online_group(struct task_group *tg, struct task_group *parent)
-+{
-+}
-+
-+/* rcu callback to free various structures associated with a task group */
-+static void sched_free_group_rcu(struct rcu_head *rhp)
-+{
-+ /* Now it should be safe to free those cfs_rqs */
-+ sched_free_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+void sched_destroy_group(struct task_group *tg)
-+{
-+ /* Wait for possible concurrent references to cfs_rqs complete */
-+ call_rcu(&tg->rcu, sched_free_group_rcu);
-+}
-+
-+void sched_offline_group(struct task_group *tg)
-+{
-+}
-+
-+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-+{
-+ return css ? container_of(css, struct task_group, css) : NULL;
-+}
-+
-+static struct cgroup_subsys_state *
-+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
-+{
-+ struct task_group *parent = css_tg(parent_css);
-+ struct task_group *tg;
-+
-+ if (!parent) {
-+ /* This is early initialization for the top cgroup */
-+ return &root_task_group.css;
-+ }
-+
-+ tg = sched_create_group(parent);
-+ if (IS_ERR(tg))
-+ return ERR_PTR(-ENOMEM);
-+ return &tg->css;
-+}
-+
-+/* Expose task group only after completing cgroup initialization */
-+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
-+{
-+ struct task_group *tg = css_tg(css);
-+ struct task_group *parent = css_tg(css->parent);
-+
-+ if (parent)
-+ sched_online_group(tg, parent);
-+ return 0;
-+}
-+
-+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
-+{
-+ struct task_group *tg = css_tg(css);
-+
-+ sched_offline_group(tg);
-+}
-+
-+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
-+{
-+ struct task_group *tg = css_tg(css);
-+
-+ /*
-+ * Relies on the RCU grace period between css_released() and this.
-+ */
-+ sched_free_group(tg);
-+}
-+
-+static void cpu_cgroup_fork(struct task_struct *task)
-+{
-+}
-+
-+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
-+{
-+ return 0;
-+}
-+
-+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+}
-+
-+static struct cftype cpu_files[] = {
-+ { } /* Terminate */
-+};
-+
-+struct cgroup_subsys cpu_cgrp_subsys = {
-+ .css_alloc = cpu_cgroup_css_alloc,
-+ .css_online = cpu_cgroup_css_online,
-+ .css_released = cpu_cgroup_css_released,
-+ .css_free = cpu_cgroup_css_free,
-+ .fork = cpu_cgroup_fork,
-+ .can_attach = cpu_cgroup_can_attach,
-+ .attach = cpu_cgroup_attach,
-+ .legacy_cftypes = cpu_files,
-+ .early_init = true,
-+};
-+#endif /* CONFIG_CGROUP_SCHED */
-diff -Nur a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
---- a/kernel/sched/MuQSS.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/kernel/sched/MuQSS.h 2019-01-05 20:22:51.099998516 +0000
-@@ -0,0 +1,725 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+#include <linux/sched.h>
-+#include <linux/cpuidle.h>
-+#include <linux/freezer.h>
-+#include <linux/interrupt.h>
-+#include <linux/skip_list.h>
-+#include <linux/stop_machine.h>
-+#include <linux/sched/topology.h>
-+#include <linux/u64_stats_sync.h>
-+#include <linux/tsacct_kern.h>
-+#include <linux/sched/clock.h>
-+#include <linux/sched/wake_q.h>
-+#include <linux/sched/signal.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/cpufreq.h>
-+#include <linux/sched/stat.h>
-+#include <linux/sched/nohz.h>
-+#include <linux/sched/debug.h>
-+#include <linux/sched/hotplug.h>
-+#include <linux/sched/task.h>
-+#include <linux/sched/task_stack.h>
-+#include <linux/sched/cputime.h>
-+#include <linux/sched/init.h>
-+
-+#include <linux/u64_stats_sync.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/tick.h>
-+#include <linux/slab.h>
-+
-+#ifdef CONFIG_PARAVIRT
-+#include <asm/paravirt.h>
-+#endif
-+
-+#include "cpuacct.h"
-+
-+#ifndef MUQSS_SCHED_H
-+#define MUQSS_SCHED_H
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
-+#else
-+# define SCHED_WARN_ON(x) ((void)(x))
-+#endif
-+
-+/* task_struct::on_rq states: */
-+#define TASK_ON_RQ_QUEUED 1
-+#define TASK_ON_RQ_MIGRATING 2
-+
-+struct rq;
-+
-+#ifdef CONFIG_SMP
-+
-+static inline bool sched_asym_prefer(int a, int b)
-+{
-+ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
-+}
-+
-+/*
-+ * We add the notion of a root-domain which will be used to define per-domain
-+ * variables. Each exclusive cpuset essentially defines an island domain by
-+ * fully partitioning the member cpus from any other cpuset. Whenever a new
-+ * exclusive cpuset is created, we also create and attach a new root-domain
-+ * object.
-+ *
-+ */
-+struct root_domain {
-+ atomic_t refcount;
-+ atomic_t rto_count;
-+ struct rcu_head rcu;
-+ cpumask_var_t span;
-+ cpumask_var_t online;
-+
-+ /* Indicate more than one runnable task for any CPU */
-+ bool overload;
-+
-+ /*
-+ * The bit corresponding to a CPU gets set here if such CPU has more
-+ * than one runnable -deadline task (as it is below for RT tasks).
-+ */
-+ cpumask_var_t dlo_mask;
-+ atomic_t dlo_count;
-+ /* Replace unused CFS structures with void */
-+ //struct dl_bw dl_bw;
-+ //struct cpudl cpudl;
-+ void *dl_bw;
-+ void *cpudl;
-+
-+ /*
-+ * The "RT overload" flag: it gets set if a CPU has more than
-+ * one runnable RT task.
-+ */
-+ cpumask_var_t rto_mask;
-+ //struct cpupri cpupri;
-+ void *cpupri;
-+
-+ unsigned long max_cpu_capacity;
-+};
-+
-+extern struct root_domain def_root_domain;
-+extern struct mutex sched_domains_mutex;
-+
-+extern void init_defrootdomain(void);
-+extern int sched_init_domains(const struct cpumask *cpu_map);
-+extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
-+
-+static inline void cpupri_cleanup(void __maybe_unused *cpupri)
-+{
-+}
-+
-+static inline void cpudl_cleanup(void __maybe_unused *cpudl)
-+{
-+}
-+
-+static inline void init_dl_bw(void __maybe_unused *dl_bw)
-+{
-+}
-+
-+static inline int cpudl_init(void __maybe_unused *dl_bw)
-+{
-+ return 0;
-+}
-+
-+static inline int cpupri_init(void __maybe_unused *cpupri)
-+{
-+ return 0;
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * This is the main, per-CPU runqueue data structure.
-+ * This data should only be modified by the local cpu.
-+ */
-+struct rq {
-+ raw_spinlock_t lock;
-+
-+ struct task_struct *curr, *idle, *stop;
-+ struct mm_struct *prev_mm;
-+
-+ unsigned int nr_running;
-+ /*
-+ * This is part of a global counter where only the total sum
-+ * over all CPUs matters. A task can increase this counter on
-+ * one CPU and if it got migrated afterwards it may decrease
-+ * it on another CPU. Always updated under the runqueue lock:
-+ */
-+ unsigned long nr_uninterruptible;
-+ u64 nr_switches;
-+
-+ /* Stored data about rq->curr to work outside rq lock */
-+ u64 rq_deadline;
-+ int rq_prio;
-+
-+ /* Best queued id for use outside lock */
-+ u64 best_key;
-+
-+ unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */
-+ unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */
-+ u64 niffies; /* Last time this RQ updated rq clock */
-+ u64 last_niffy; /* Last niffies as updated by local clock */
-+ u64 last_jiffy_niffies; /* Niffies @ last_jiffy */
-+
-+ u64 load_update; /* When we last updated load */
-+ unsigned long load_avg; /* Rolling load average */
-+#ifdef CONFIG_SMT_NICE
-+ struct mm_struct *rq_mm;
-+ int rq_smt_bias; /* Policy/nice level bias across smt siblings */
-+#endif
-+ /* Accurate timekeeping data */
-+ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns,
-+ iowait_ns, idle_ns;
-+ atomic_t nr_iowait;
-+
-+ skiplist_node node;
-+ skiplist *sl;
-+#ifdef CONFIG_SMP
-+ struct task_struct *preempt; /* Preempt triggered on this task */
-+ struct task_struct *preempting; /* Hint only, what task is preempting */
-+
-+ int cpu; /* cpu of this runqueue */
-+ bool online;
-+
-+ struct root_domain *rd;
-+ struct sched_domain *sd;
-+
-+ unsigned long cpu_capacity_orig;
-+
-+ int *cpu_locality; /* CPU relative cache distance */
-+ struct rq **rq_order; /* RQs ordered by relative cache distance */
-+
-+#ifdef CONFIG_SCHED_SMT
-+ cpumask_t thread_mask;
-+ bool (*siblings_idle)(struct rq *rq);
-+ /* See if all smt siblings are idle */
-+#endif /* CONFIG_SCHED_SMT */
-+#ifdef CONFIG_SCHED_MC
-+ cpumask_t core_mask;
-+ bool (*cache_idle)(struct rq *rq);
-+ /* See if all cache siblings are idle */
-+#endif /* CONFIG_SCHED_MC */
-+#endif /* CONFIG_SMP */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+ u64 prev_irq_time;
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+#ifdef CONFIG_PARAVIRT
-+ u64 prev_steal_time;
-+#endif /* CONFIG_PARAVIRT */
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+ u64 prev_steal_time_rq;
-+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
-+
-+ u64 clock, old_clock, last_tick;
-+ u64 clock_task;
-+ int dither;
-+
-+ int iso_ticks;
-+ bool iso_refractory;
-+
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+ struct hrtimer hrexpiry_timer;
-+#endif
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+ /* latency stats */
-+ struct sched_info rq_sched_info;
-+ unsigned long long rq_cpu_time;
-+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-+
-+ /* sys_sched_yield() stats */
-+ unsigned int yld_count;
-+
-+ /* schedule() stats */
-+ unsigned int sched_switch;
-+ unsigned int sched_count;
-+ unsigned int sched_goidle;
-+
-+ /* try_to_wake_up() stats */
-+ unsigned int ttwu_count;
-+ unsigned int ttwu_local;
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+#ifdef CONFIG_SMP
-+ struct llist_head wake_list;
-+#endif
-+
-+#ifdef CONFIG_CPU_IDLE
-+ /* Must be inspected within a rcu lock section */
-+ struct cpuidle_state *idle_state;
-+#endif
-+};
-+
-+#ifdef CONFIG_SMP
-+struct rq *cpu_rq(int cpu);
-+#endif
-+
-+#ifndef CONFIG_SMP
-+extern struct rq *uprq;
-+#define cpu_rq(cpu) (uprq)
-+#define this_rq() (uprq)
-+#define raw_rq() (uprq)
-+#define task_rq(p) (uprq)
-+#define cpu_curr(cpu) ((uprq)->curr)
-+#else /* CONFIG_SMP */
-+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#define this_rq() this_cpu_ptr(&runqueues)
-+#define raw_rq() raw_cpu_ptr(&runqueues)
-+#define task_rq(p) cpu_rq(task_cpu(p))
-+#endif /* CONFIG_SMP */
-+
-+static inline int task_current(struct rq *rq, struct task_struct *p)
-+{
-+ return rq->curr == p;
-+}
-+
-+static inline int task_running(struct rq *rq, struct task_struct *p)
-+{
-+#ifdef CONFIG_SMP
-+ return p->on_cpu;
-+#else
-+ return task_current(rq, p);
-+#endif
-+}
-+
-+static inline void rq_lock(struct rq *rq)
-+ __acquires(rq->lock)
-+{
-+ raw_spin_lock(&rq->lock);
-+}
-+
-+static inline void rq_unlock(struct rq *rq)
-+ __releases(rq->lock)
-+{
-+ raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void rq_lock_irq(struct rq *rq)
-+ __acquires(rq->lock)
-+{
-+ raw_spin_lock_irq(&rq->lock);
-+}
-+
-+static inline void rq_unlock_irq(struct rq *rq)
-+ __releases(rq->lock)
-+{
-+ raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+static inline void rq_lock_irqsave(struct rq *rq, unsigned long *flags)
-+ __acquires(rq->lock)
-+{
-+ raw_spin_lock_irqsave(&rq->lock, *flags);
-+}
-+
-+static inline void rq_unlock_irqrestore(struct rq *rq, unsigned long *flags)
-+ __releases(rq->lock)
-+{
-+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
-+}
-+
-+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-+ __acquires(p->pi_lock)
-+ __acquires(rq->lock)
-+{
-+ struct rq *rq;
-+
-+ while (42) {
-+ raw_spin_lock_irqsave(&p->pi_lock, *flags);
-+ rq = task_rq(p);
-+ raw_spin_lock(&rq->lock);
-+ if (likely(rq == task_rq(p)))
-+ break;
-+ raw_spin_unlock(&rq->lock);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+ }
-+ return rq;
-+}
-+
-+static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-+ __releases(rq->lock)
-+ __releases(p->pi_lock)
-+{
-+ rq_unlock(rq);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+}
-+
-+static inline struct rq *__task_rq_lock(struct task_struct *p)
-+ __acquires(rq->lock)
-+{
-+ struct rq *rq;
-+
-+ lockdep_assert_held(&p->pi_lock);
-+
-+ while (42) {
-+ rq = task_rq(p);
-+ raw_spin_lock(&rq->lock);
-+ if (likely(rq == task_rq(p)))
-+ break;
-+ raw_spin_unlock(&rq->lock);
-+ }
-+ return rq;
-+}
-+
-+static inline void __task_rq_unlock(struct rq *rq)
-+{
-+ rq_unlock(rq);
-+}
-+
-+/*
-+ * {de,en}queue flags: Most not used on MuQSS.
-+ *
-+ * DEQUEUE_SLEEP - task is no longer runnable
-+ * ENQUEUE_WAKEUP - task just became runnable
-+ *
-+ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
-+ * are in a known state which allows modification. Such pairs
-+ * should preserve as much state as possible.
-+ *
-+ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
-+ * in the runqueue.
-+ *
-+ * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
-+ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
-+ * ENQUEUE_MIGRATED - the task was migrated during wakeup
-+ *
-+ */
-+
-+#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
-+
-+#define ENQUEUE_RESTORE 0x02
-+
-+static inline u64 __rq_clock_broken(struct rq *rq)
-+{
-+ return READ_ONCE(rq->clock);
-+}
-+
-+static inline u64 rq_clock(struct rq *rq)
-+{
-+ lockdep_assert_held(&rq->lock);
-+
-+ return rq->clock;
-+}
-+
-+static inline u64 rq_clock_task(struct rq *rq)
-+{
-+ lockdep_assert_held(&rq->lock);
-+
-+ return rq->clock_task;
-+}
-+
-+#ifdef CONFIG_NUMA
-+enum numa_topology_type {
-+ NUMA_DIRECT,
-+ NUMA_GLUELESS_MESH,
-+ NUMA_BACKPLANE,
-+};
-+extern enum numa_topology_type sched_numa_topology_type;
-+extern int sched_max_numa_distance;
-+extern bool find_numa_distance(int distance);
-+
-+extern void sched_init_numa(void);
-+extern void sched_domains_numa_masks_set(unsigned int cpu);
-+extern void sched_domains_numa_masks_clear(unsigned int cpu);
-+#else
-+static inline void sched_init_numa(void) { }
-+static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
-+static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
-+#endif
-+
-+extern struct mutex sched_domains_mutex;
-+extern struct static_key_false sched_schedstats;
-+
-+#define rcu_dereference_check_sched_domain(p) \
-+ rcu_dereference_check((p), \
-+ lockdep_is_held(&sched_domains_mutex))
-+
-+#ifdef CONFIG_SMP
-+
-+/*
-+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
-+ * See detach_destroy_domains: synchronize_sched for details.
-+ *
-+ * The domain tree of any CPU may only be accessed from within
-+ * preempt-disabled sections.
-+ */
-+#define for_each_domain(cpu, __sd) \
-+ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
-+ __sd; __sd = __sd->parent)
-+
-+#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
-+
-+/**
-+ * highest_flag_domain - Return highest sched_domain containing flag.
-+ * @cpu: The cpu whose highest level of sched domain is to
-+ * be returned.
-+ * @flag: The flag to check for the highest sched_domain
-+ * for the given cpu.
-+ *
-+ * Returns the highest sched_domain of a cpu which contains the given flag.
-+ */
-+static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
-+{
-+ struct sched_domain *sd, *hsd = NULL;
-+
-+ for_each_domain(cpu, sd) {
-+ if (!(sd->flags & flag))
-+ break;
-+ hsd = sd;
-+ }
-+
-+ return hsd;
-+}
-+
-+static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
-+{
-+ struct sched_domain *sd;
-+
-+ for_each_domain(cpu, sd) {
-+ if (sd->flags & flag)
-+ break;
-+ }
-+
-+ return sd;
-+}
-+
-+DECLARE_PER_CPU(struct sched_domain *, sd_llc);
-+DECLARE_PER_CPU(int, sd_llc_size);
-+DECLARE_PER_CPU(int, sd_llc_id);
-+DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-+DECLARE_PER_CPU(struct sched_domain *, sd_numa);
-+DECLARE_PER_CPU(struct sched_domain *, sd_asym);
-+
-+struct sched_group_capacity {
-+ atomic_t ref;
-+ /*
-+ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
-+ * for a single CPU.
-+ */
-+ unsigned long capacity;
-+ unsigned long min_capacity; /* Min per-CPU capacity in group */
-+ unsigned long next_update;
-+ int imbalance; /* XXX unrelated to capacity but shared group state */
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+ int id;
-+#endif
-+
-+ unsigned long cpumask[0]; /* balance mask */
-+};
-+
-+struct sched_group {
-+ struct sched_group *next; /* Must be a circular list */
-+ atomic_t ref;
-+
-+ unsigned int group_weight;
-+ struct sched_group_capacity *sgc;
-+ int asym_prefer_cpu; /* cpu of highest priority in group */
-+
-+ /*
-+ * The CPUs this group covers.
-+ *
-+ * NOTE: this field is variable length. (Allocated dynamically
-+ * by attaching extra space to the end of the structure,
-+ * depending on how many CPUs the kernel has booted up with)
-+ */
-+ unsigned long cpumask[0];
-+};
-+
-+static inline struct cpumask *sched_group_span(struct sched_group *sg)
-+{
-+ return to_cpumask(sg->cpumask);
-+}
-+
-+/*
-+ * See build_balance_mask().
-+ */
-+static inline struct cpumask *group_balance_mask(struct sched_group *sg)
-+{
-+ return to_cpumask(sg->sgc->cpumask);
-+}
-+
-+/**
-+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
-+ * @group: The group whose first cpu is to be returned.
-+ */
-+static inline unsigned int group_first_cpu(struct sched_group *group)
-+{
-+ return cpumask_first(sched_group_span(group));
-+}
-+
-+
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+void register_sched_domain_sysctl(void);
-+void dirty_sched_domain_sysctl(int cpu);
-+void unregister_sched_domain_sysctl(void);
-+#else
-+static inline void register_sched_domain_sysctl(void)
-+{
-+}
-+static inline void dirty_sched_domain_sysctl(int cpu)
-+{
-+}
-+static inline void unregister_sched_domain_sysctl(void)
-+{
-+}
-+#endif
-+
-+extern void sched_ttwu_pending(void);
-+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
-+extern void set_rq_online (struct rq *rq);
-+extern void set_rq_offline(struct rq *rq);
-+extern bool sched_smp_initialized;
-+
-+static inline void update_group_capacity(struct sched_domain *sd, int cpu)
-+{
-+}
-+
-+static inline void trigger_load_balance(struct rq *rq)
-+{
-+}
-+
-+#define sched_feat(x) 0
-+
-+#else /* CONFIG_SMP */
-+
-+static inline void sched_ttwu_pending(void) { }
-+
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_CPU_IDLE
-+static inline void idle_set_state(struct rq *rq,
-+ struct cpuidle_state *idle_state)
-+{
-+ rq->idle_state = idle_state;
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+ SCHED_WARN_ON(!rcu_read_lock_held());
-+ return rq->idle_state;
-+}
-+#else
-+static inline void idle_set_state(struct rq *rq,
-+ struct cpuidle_state *idle_state)
-+{
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+ return NULL;
-+}
-+#endif
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+extern bool sched_debug_enabled;
-+#endif
-+
-+extern void schedule_idle(void);
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+struct irqtime {
-+ u64 total;
-+ u64 tick_delta;
-+ u64 irq_start_time;
-+ struct u64_stats_sync sync;
-+};
-+
-+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
-+
-+/*
-+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
-+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
-+ * and never move forward.
-+ */
-+static inline u64 irq_time_read(int cpu)
-+{
-+ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-+ unsigned int seq;
-+ u64 total;
-+
-+ do {
-+ seq = __u64_stats_fetch_begin(&irqtime->sync);
-+ total = irqtime->total;
-+ } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-+
-+ return total;
-+}
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+
-+#ifdef CONFIG_SMP
-+static inline int cpu_of(struct rq *rq)
-+{
-+ return rq->cpu;
-+}
-+#else /* CONFIG_SMP */
-+static inline int cpu_of(struct rq *rq)
-+{
-+ return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_CPU_FREQ
-+DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
-+
-+static inline void cpufreq_trigger(struct rq *rq, unsigned int flags)
-+{
-+ struct update_util_data *data;
-+
-+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
-+ cpu_of(rq)));
-+
-+ if (data)
-+ data->func(data, rq->niffies, flags);
-+}
-+#else
-+static inline void cpufreq_trigger(struct rq *rq, unsigned int flag)
-+{
-+}
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef arch_scale_freq_capacity
-+#ifndef arch_scale_freq_invariant
-+#define arch_scale_freq_invariant() (true)
-+#endif
-+#else /* arch_scale_freq_capacity */
-+#define arch_scale_freq_invariant() (false)
-+#endif
-+
-+/*
-+ * This should only be called when current == rq->idle. Dodgy workaround for
-+ * when softirqs are pending and we are in the idle loop. Setting current to
-+ * resched will kick us out of the idle loop and the softirqs will be serviced
-+ * on our next pass through schedule().
-+ */
-+static inline bool softirq_pending(int cpu)
-+{
-+ if (likely(!local_softirq_pending()))
-+ return false;
-+ set_tsk_need_resched(current);
-+ return true;
-+}
-+
-+#ifdef CONFIG_64BIT
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ return tsk_seruntime(t);
-+}
-+#else
-+struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags);
-+void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags);
-+
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ unsigned long flags;
-+ u64 ns;
-+ struct rq *rq;
-+
-+ rq = task_rq_lock(t, &flags);
-+ ns = tsk_seruntime(t);
-+ task_rq_unlock(rq, t, &flags);
-+
-+ return ns;
-+}
-+#endif
-+
-+#endif /* MUQSS_SCHED_H */
-diff -Nur a/kernel/sched/sched.h b/kernel/sched/sched.h
---- a/kernel/sched/sched.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/sched.h 2019-01-05 20:22:51.099998516 +0000
-@@ -1,5 +1,8 @@
- /* SPDX-License-Identifier: GPL-2.0 */
-
-+#ifdef CONFIG_SCHED_MUQSS
-+#include "MuQSS.h"
-+#else /* CONFIG_SCHED_MUQSS */
- #include <linux/sched.h>
- #include <linux/sched/autogroup.h>
- #include <linux/sched/sysctl.h>
-@@ -2103,3 +2106,29 @@
- #else /* arch_scale_freq_capacity */
- #define arch_scale_freq_invariant() (false)
- #endif
-+
-+static inline bool softirq_pending(int cpu)
-+{
-+ return false;
-+}
-+
-+#ifdef CONFIG_64BIT
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ return t->se.sum_exec_runtime;
-+}
-+#else
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ u64 ns;
-+ struct rq_flags rf;
-+ struct rq *rq;
-+
-+ rq = task_rq_lock(t, &rf);
-+ ns = t->se.sum_exec_runtime;
-+ task_rq_unlock(rq, t, &rf);
-+
-+ return ns;
-+}
-+#endif
-+#endif /* CONFIG_SCHED_MUQSS */
-diff -Nur a/kernel/skip_list.c b/kernel/skip_list.c
---- a/kernel/skip_list.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/kernel/skip_list.c 2019-01-05 20:22:51.099998516 +0000
-@@ -0,0 +1,148 @@
-+/*
-+ Copyright (C) 2011,2016 Con Kolivas.
-+
-+ Code based on example originally by William Pugh.
-+
-+Skip Lists are a probabilistic alternative to balanced trees, as
-+described in the June 1990 issue of CACM and were invented by
-+William Pugh in 1987.
-+
-+A couple of comments about this implementation:
-+The routine randomLevel has been hard-coded to generate random
-+levels using p=0.25. It can be easily changed.
-+
-+The insertion routine has been implemented so as to use the
-+dirty hack described in the CACM paper: if a random level is
-+generated that is more than the current maximum level, the
-+current maximum level plus one is used instead.
-+
-+Levels start at zero and go up to MaxLevel (which is equal to
-+MaxNumberOfLevels-1).
-+
-+The routines defined in this file are:
-+
-+init: defines slnode
-+
-+new_skiplist: returns a new, empty list
-+
-+randomLevel: Returns a random level based on a u64 random seed passed to it.
-+In MuQSS, the "niffy" time is used for this purpose.
-+
-+insert(l,key, value): inserts the binding (key, value) into l. This operation
-+occurs in O(log n) time.
-+
-+delnode(slnode, l, node): deletes any binding of key from the l based on the
-+actual node value. This operation occurs in O(k) time where k is the
-+number of levels of the node in question (max 8). The original delete
-+function occurred in O(log n) time and involved a search.
-+
-+MuQSS Notes: In this implementation of skiplists, there are bidirectional
-+next/prev pointers and the insert function returns a pointer to the actual
-+node the value is stored. The key here is chosen by the scheduler so as to
-+sort tasks according to the priority list requirements and is no longer used
-+by the scheduler after insertion. The scheduler lookup, however, occurs in
-+O(1) time because it is always the first item in the level 0 linked list.
-+Since the task struct stores a copy of the node pointer upon skiplist_insert,
-+it can also remove it much faster than the original implementation with the
-+aid of prev<->next pointer manipulation and no searching.
-+
-+*/
-+
-+#include <linux/slab.h>
-+#include <linux/skip_list.h>
-+
-+#define MaxNumberOfLevels 8
-+#define MaxLevel (MaxNumberOfLevels - 1)
-+
-+void skiplist_init(skiplist_node *slnode)
-+{
-+ int i;
-+
-+ slnode->key = 0xFFFFFFFFFFFFFFFF;
-+ slnode->level = 0;
-+ slnode->value = NULL;
-+ for (i = 0; i < MaxNumberOfLevels; i++)
-+ slnode->next[i] = slnode->prev[i] = slnode;
-+}
-+
-+skiplist *new_skiplist(skiplist_node *slnode)
-+{
-+ skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC);
-+
-+ BUG_ON(!l);
-+ l->header = slnode;
-+ return l;
-+}
-+
-+void free_skiplist(skiplist *l)
-+{
-+ skiplist_node *p, *q;
-+
-+ p = l->header;
-+ do {
-+ q = p->next[0];
-+ p->next[0]->prev[0] = q->prev[0];
-+ skiplist_node_init(p);
-+ p = q;
-+ } while (p != l->header);
-+ kfree(l);
-+}
-+
-+void skiplist_node_init(skiplist_node *node)
-+{
-+ memset(node, 0, sizeof(skiplist_node));
-+}
-+
-+static inline unsigned int randomLevel(const long unsigned int randseed)
-+{
-+ return find_first_bit(&randseed, MaxLevel) / 2;
-+}
-+
-+void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed)
-+{
-+ skiplist_node *update[MaxNumberOfLevels];
-+ skiplist_node *p, *q;
-+ int k = l->level;
-+
-+ p = l->header;
-+ do {
-+ while (q = p->next[k], q->key <= key)
-+ p = q;
-+ update[k] = p;
-+ } while (--k >= 0);
-+
-+ ++l->entries;
-+ k = randomLevel(randseed);
-+ if (k > l->level) {
-+ k = ++l->level;
-+ update[k] = l->header;
-+ }
-+
-+ node->level = k;
-+ node->key = key;
-+ node->value = value;
-+ do {
-+ p = update[k];
-+ node->next[k] = p->next[k];
-+ p->next[k] = node;
-+ node->prev[k] = p;
-+ node->next[k]->prev[k] = node;
-+ } while (--k >= 0);
-+}
-+
-+void skiplist_delete(skiplist *l, skiplist_node *node)
-+{
-+ int k, m = node->level;
-+
-+ for (k = 0; k <= m; k++) {
-+ node->prev[k]->next[k] = node->next[k];
-+ node->next[k]->prev[k] = node->prev[k];
-+ }
-+ skiplist_node_init(node);
-+ if (m == l->level) {
-+ while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0)
-+ m--;
-+ l->level = m;
-+ }
-+ l->entries--;
-+}
-diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
---- a/kernel/sysctl.c 2019-01-05 20:17:13.859238862 +0000
-+++ b/kernel/sysctl.c 2019-01-05 20:22:51.099998516 +0000
-@@ -135,6 +135,12 @@
- static unsigned long one_ul __read_only = 1;
- static int one_hundred __read_only = 100;
- static int one_thousand __read_only = 1000;
-+#ifdef CONFIG_SCHED_MUQSS
-+extern int rr_interval;
-+extern int sched_interactive;
-+extern int sched_iso_cpu;
-+extern int sched_yield_type;
-+#endif
- #ifdef CONFIG_PRINTK
- static int ten_thousand __read_only = 10000;
- #endif
-@@ -296,7 +302,7 @@
- { }
- };
-
--#ifdef CONFIG_SCHED_DEBUG
-+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS)
- static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
- static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
- static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
-@@ -313,6 +319,7 @@
- #endif
-
- static struct ctl_table kern_table[] = {
-+#ifndef CONFIG_SCHED_MUQSS
- {
- .procname = "sched_child_runs_first",
- .data = &sysctl_sched_child_runs_first,
-@@ -475,6 +482,7 @@
- .extra1 = &one,
- },
- #endif
-+#endif /* !CONFIG_SCHED_MUQSS */
- #ifdef CONFIG_PROVE_LOCKING
- {
- .procname = "prove_locking",
-@@ -1073,6 +1081,44 @@
- .proc_handler = proc_dointvec,
- },
- #endif
-+#ifdef CONFIG_SCHED_MUQSS
-+ {
-+ .procname = "rr_interval",
-+ .data = &rr_interval,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &one,
-+ .extra2 = &one_thousand,
-+ },
-+ {
-+ .procname = "interactive",
-+ .data = &sched_interactive,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
-+ {
-+ .procname = "iso_cpu",
-+ .data = &sched_iso_cpu,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &zero,
-+ .extra2 = &one_hundred,
-+ },
-+ {
-+ .procname = "yield_type",
-+ .data = &sched_yield_type,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &zero,
-+ .extra2 = &two,
-+ },
-+#endif
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- {
- .procname = "spin_retry",
-diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
---- a/kernel/time/clockevents.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/time/clockevents.c 2019-01-05 20:22:51.099998516 +0000
-@@ -198,8 +198,13 @@
-
- #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
-
-+#ifdef CONFIG_SCHED_MUQSS
-+/* Limit min_delta to 100us */
-+#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
-+#else
- /* Limit min_delta to a jiffie */
- #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
-+#endif
-
- /**
- * clockevents_increase_min_delta - raise minimum delta of a clock event device
-diff -Nur a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
---- a/kernel/time/posix-cpu-timers.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/time/posix-cpu-timers.c 2019-01-05 20:22:51.109998835 +0000
-@@ -818,7 +818,7 @@
- tsk_expires->virt_exp = expires;
-
- tsk_expires->sched_exp = check_timers_list(++timers, firing,
-- tsk->se.sum_exec_runtime);
-+ tsk_seruntime(tsk));
-
- /*
- * Check for the special case thread timers.
-@@ -828,7 +828,7 @@
- unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
-
- if (hard != RLIM_INFINITY &&
-- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
-+ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
- /*
- * At the hard limit, we just die.
- * No need to calculate anything else now.
-@@ -840,7 +840,7 @@
- __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
- return;
- }
-- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
-+ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
- /*
- * At the soft limit, send a SIGXCPU every second.
- */
-@@ -1081,7 +1081,7 @@
- struct task_cputime task_sample;
-
- task_cputime(tsk, &task_sample.utime, &task_sample.stime);
-- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
-+ task_sample.sum_exec_runtime = tsk_seruntime(tsk);
- if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
- return 1;
- }
-diff -Nur a/kernel/time/timer.c b/kernel/time/timer.c
---- a/kernel/time/timer.c 2019-01-05 20:17:13.859238862 +0000
-+++ b/kernel/time/timer.c 2019-01-05 20:22:51.109998835 +0000
-@@ -1434,7 +1434,7 @@
- * Check, if the next hrtimer event is before the next timer wheel
- * event:
- */
--static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
-+static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires)
- {
- u64 nextevt = hrtimer_get_next_event();
-
-@@ -1452,6 +1452,9 @@
- if (nextevt <= basem)
- return basem;
-
-+ if (nextevt < expires && nextevt - basem <= TICK_NSEC)
-+ base->is_idle = false;
-+
- /*
- * Round up to the next jiffie. High resolution timers are
- * off, so the hrtimers are expired in the tick and we need to
-@@ -1521,7 +1524,7 @@
- }
- raw_spin_unlock(&base->lock);
-
-- return cmp_next_hrtimer_event(basem, expires);
-+ return cmp_next_hrtimer_event(base, basem, expires);
- }
-
- /**
-diff -Nur a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
---- a/kernel/trace/trace_selftest.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/trace/trace_selftest.c 2019-01-05 20:22:51.109998835 +0000
-@@ -1041,10 +1041,15 @@
- {
- /* Make this a -deadline thread */
- static const struct sched_attr attr = {
-+#ifdef CONFIG_SCHED_MUQSS
-+ /* No deadline on MuQSS, use RR */
-+ .sched_policy = SCHED_RR,
-+#else
- .sched_policy = SCHED_DEADLINE,
- .sched_runtime = 100000ULL,
- .sched_deadline = 10000000ULL,
- .sched_period = 10000000ULL
-+#endif
- };
- struct wakeup_test_data *x = data;
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch
deleted file mode 100644
index 69abb373..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch
+++ /dev/null
@@ -1,733 +0,0 @@
-From e8e37da685f7988182d7920a711e00dd2457af65 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 29 Oct 2016 11:20:37 +1100
-Subject: [PATCH 02/16] Make preemptible kernel default.
-
-Make full preempt default on all arches.
----
- arch/arc/configs/tb10x_defconfig | 2 +-
- arch/arm/configs/bcm2835_defconfig | 2 +-
- arch/arm/configs/imx_v6_v7_defconfig | 2 +-
- arch/arm/configs/mps2_defconfig | 2 +-
- arch/arm/configs/mxs_defconfig | 2 +-
- arch/blackfin/configs/BF518F-EZBRD_defconfig | 2 +-
- arch/blackfin/configs/BF526-EZBRD_defconfig | 2 +-
- arch/blackfin/configs/BF527-EZKIT-V2_defconfig | 2 +-
- arch/blackfin/configs/BF527-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF527-TLL6527M_defconfig | 2 +-
- arch/blackfin/configs/BF533-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF533-STAMP_defconfig | 2 +-
- arch/blackfin/configs/BF537-STAMP_defconfig | 2 +-
- arch/blackfin/configs/BF538-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF548-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF561-ACVILON_defconfig | 2 +-
- arch/blackfin/configs/BF561-EZKIT-SMP_defconfig | 2 +-
- arch/blackfin/configs/BF561-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF609-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BlackStamp_defconfig | 2 +-
- arch/blackfin/configs/CM-BF527_defconfig | 2 +-
- arch/blackfin/configs/PNAV-10_defconfig | 2 +-
- arch/blackfin/configs/SRV1_defconfig | 2 +-
- arch/blackfin/configs/TCM-BF518_defconfig | 2 +-
- arch/mips/configs/fuloong2e_defconfig | 3 ++-
- arch/mips/configs/gpr_defconfig | 3 ++-
- arch/mips/configs/ip22_defconfig | 3 ++-
- arch/mips/configs/ip28_defconfig | 3 ++-
- arch/mips/configs/jazz_defconfig | 3 ++-
- arch/mips/configs/mtx1_defconfig | 3 ++-
- arch/mips/configs/nlm_xlr_defconfig | 2 +-
- arch/mips/configs/pic32mzda_defconfig | 2 +-
- arch/mips/configs/pistachio_defconfig | 2 +-
- arch/mips/configs/pnx8335_stb225_defconfig | 2 +-
- arch/mips/configs/rm200_defconfig | 3 ++-
- arch/parisc/configs/712_defconfig | 2 +-
- arch/parisc/configs/c3000_defconfig | 2 +-
- arch/parisc/configs/default_defconfig | 2 +-
- arch/powerpc/configs/c2k_defconfig | 2 +-
- arch/powerpc/configs/ppc6xx_defconfig | 2 +-
- arch/score/configs/spct6600_defconfig | 2 +-
- arch/sh/configs/se7712_defconfig | 2 +-
- arch/sh/configs/se7721_defconfig | 2 +-
- arch/sh/configs/titan_defconfig | 2 +-
- arch/sparc/configs/sparc64_defconfig | 2 +-
- arch/tile/configs/tilegx_defconfig | 2 +-
- arch/tile/configs/tilepro_defconfig | 2 +-
- arch/x86/configs/i386_defconfig | 2 +-
- arch/x86/configs/x86_64_defconfig | 2 +-
- kernel/Kconfig.preempt | 7 ++++---
- 50 files changed, 60 insertions(+), 52 deletions(-)
-
-diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
-index f30182549395..42910f628869 100644
---- a/arch/arc/configs/tb10x_defconfig
-+++ b/arch/arc/configs/tb10x_defconfig
-@@ -28,7 +28,7 @@ CONFIG_ARC_PLAT_TB10X=y
- CONFIG_ARC_CACHE_LINE_SHIFT=5
- CONFIG_HZ=250
- CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_COMPACTION is not set
- CONFIG_NET=y
- CONFIG_PACKET=y
-diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
-index 43dab4890ad3..44a52166ca5e 100644
---- a/arch/arm/configs/bcm2835_defconfig
-+++ b/arch/arm/configs/bcm2835_defconfig
-@@ -29,7 +29,7 @@ CONFIG_MODULE_UNLOAD=y
- CONFIG_ARCH_MULTI_V6=y
- CONFIG_ARCH_BCM=y
- CONFIG_ARCH_BCM2835=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_AEABI=y
- CONFIG_KSM=y
- CONFIG_CLEANCACHE=y
-diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
-index 32acac9ab81a..1482bb312987 100644
---- a/arch/arm/configs/imx_v6_v7_defconfig
-+++ b/arch/arm/configs/imx_v6_v7_defconfig
-@@ -47,7 +47,7 @@ CONFIG_PCI_MSI=y
- CONFIG_PCI_IMX6=y
- CONFIG_SMP=y
- CONFIG_ARM_PSCI=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_AEABI=y
- CONFIG_HIGHMEM=y
- CONFIG_CMA=y
-diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
-index 0bcdec7cc169..10ceaefa51e0 100644
---- a/arch/arm/configs/mps2_defconfig
-+++ b/arch/arm/configs/mps2_defconfig
-@@ -18,7 +18,7 @@ CONFIG_ARCH_MPS2=y
- CONFIG_SET_MEM_PARAM=y
- CONFIG_DRAM_BASE=0x21000000
- CONFIG_DRAM_SIZE=0x1000000
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_ATAGS is not set
- CONFIG_ZBOOT_ROM_TEXT=0x0
- CONFIG_ZBOOT_ROM_BSS=0x0
-diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
-index e5822ab01b7d..3e77e02f678f 100644
---- a/arch/arm/configs/mxs_defconfig
-+++ b/arch/arm/configs/mxs_defconfig
-@@ -27,7 +27,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
- # CONFIG_ARCH_MULTI_V7 is not set
- CONFIG_ARCH_MXS=y
- # CONFIG_ARM_THUMB is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_AEABI=y
- CONFIG_NET=y
- CONFIG_PACKET=y
-diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
-index 99c00d835f47..39b91dfa55b5 100644
---- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
-+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF518=y
- CONFIG_IRQ_TIMER0=12
- # CONFIG_CYCLES_CLOCKSOURCE is not set
-diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
-index e66ba31ef84d..675cadb3a0c4 100644
---- a/arch/blackfin/configs/BF526-EZBRD_defconfig
-+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF526=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_BFIN526_EZBRD=y
-diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
-index 0207c588c19f..4c517c443af5 100644
---- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
-+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_2=y
- CONFIG_BFIN527_EZKIT_V2=y
-diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
-index 99c131ba7d90..bf8df3e6cf02 100644
---- a/arch/blackfin/configs/BF527-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_1=y
- CONFIG_IRQ_USB_INT0=11
-diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
-index cdeb51856f26..0220b3b15c53 100644
---- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
-+++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
-@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_LBDAF is not set
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_2=y
- CONFIG_BFIN527_TLL6527M=y
-diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
-index ed7d2c096739..6023e3fd2c48 100644
---- a/arch/blackfin/configs/BF533-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BFIN533_EZKIT=y
- CONFIG_TIMER0=11
- CONFIG_CLKIN_HZ=27000000
-diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
-index 0c241f4d28d7..f5cd0f18b711 100644
---- a/arch/blackfin/configs/BF533-STAMP_defconfig
-+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_TIMER0=11
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
-diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
-index e5360b30e39a..48085fde7f9e 100644
---- a/arch/blackfin/configs/BF537-STAMP_defconfig
-+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF537=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
-diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
-index 60f6fb86125c..12deeaaef3cb 100644
---- a/arch/blackfin/configs/BF538-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
-@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF538=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_IRQ_TIMER1=12
-diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
-index 38cb17d218d4..6a68ffc55b5a 100644
---- a/arch/blackfin/configs/BF548-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF548_std=y
- CONFIG_IRQ_TIMER0=11
- # CONFIG_CYCLES_CLOCKSOURCE is not set
-diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
-index 78f6bc79f910..e9f3ba783a4e 100644
---- a/arch/blackfin/configs/BF561-ACVILON_defconfig
-+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
-@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_LBDAF is not set
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF561=y
- CONFIG_BF_REV_0_5=y
- CONFIG_IRQ_TIMER0=10
-diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
-index fac8bb578249..89b75a6c3fab 100644
---- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
-+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF561=y
- CONFIG_SMP=y
- CONFIG_IRQ_TIMER0=10
-diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
-index 2a2e4d0cebc1..67b3d2f419ba 100644
---- a/arch/blackfin/configs/BF561-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF561=y
- CONFIG_IRQ_TIMER0=10
- CONFIG_CLKIN_HZ=30000000
-diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
-index 3ce77f07208a..8cc75d4218fb 100644
---- a/arch/blackfin/configs/BF609-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
-@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF609=y
- CONFIG_PINT1_ASSIGN=0x01010000
- CONFIG_PINT2_ASSIGN=0x07000101
-diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
-index f4a9200e1ab1..9faf0ec7007f 100644
---- a/arch/blackfin/configs/BlackStamp_defconfig
-+++ b/arch/blackfin/configs/BlackStamp_defconfig
-@@ -17,7 +17,7 @@ CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF532=y
- CONFIG_BF_REV_0_5=y
- CONFIG_BLACKSTAMP=y
-diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
-index 1902bb05d086..4a1ad4fd7bb2 100644
---- a/arch/blackfin/configs/CM-BF527_defconfig
-+++ b/arch/blackfin/configs/CM-BF527_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_1=y
- CONFIG_IRQ_TIMER0=12
-diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
-index c7926812971c..9d787e28bbe8 100644
---- a/arch/blackfin/configs/PNAV-10_defconfig
-+++ b/arch/blackfin/configs/PNAV-10_defconfig
-@@ -15,7 +15,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF537=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_PNAV10=y
-diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
-index 23fdc57d657a..225df32dc9a8 100644
---- a/arch/blackfin/configs/SRV1_defconfig
-+++ b/arch/blackfin/configs/SRV1_defconfig
-@@ -13,7 +13,7 @@ CONFIG_MMAP_ALLOW_UNINITIALIZED=y
- CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF537=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_BOOT_LOAD=0x400000
-diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
-index e28959479fe0..425c24e43c34 100644
---- a/arch/blackfin/configs/TCM-BF518_defconfig
-+++ b/arch/blackfin/configs/TCM-BF518_defconfig
-@@ -23,7 +23,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF518=y
- CONFIG_BF_REV_0_1=y
- CONFIG_BFIN518F_TCM=y
-diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
-index 499f51498ecb..f7cb39b0662c 100644
---- a/arch/mips/configs/fuloong2e_defconfig
-+++ b/arch/mips/configs/fuloong2e_defconfig
-@@ -2,7 +2,8 @@ CONFIG_MACH_LOONGSON64=y
- CONFIG_64BIT=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_LOCALVERSION="-fuloong2e"
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
-diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
-index 55438fc9991e..db03ef4f737d 100644
---- a/arch/mips/configs/gpr_defconfig
-+++ b/arch/mips/configs/gpr_defconfig
-@@ -1,7 +1,8 @@
- CONFIG_MIPS_ALCHEMY=y
- CONFIG_MIPS_GPR=y
- CONFIG_HIGH_RES_TIMERS=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
-diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
-index 83e8fe2064aa..93e7b167433b 100644
---- a/arch/mips/configs/ip22_defconfig
-+++ b/arch/mips/configs/ip22_defconfig
-@@ -3,7 +3,8 @@ CONFIG_CPU_R5000=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_HZ_1000=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_IKCONFIG=y
- CONFIG_IKCONFIG_PROC=y
-diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
-index d0a4c2cfacf8..6f0600e99c25 100644
---- a/arch/mips/configs/ip28_defconfig
-+++ b/arch/mips/configs/ip28_defconfig
-@@ -1,6 +1,7 @@
- CONFIG_SGI_IP28=y
- CONFIG_ARC_CONSOLE=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_IKCONFIG=y
- CONFIG_IKCONFIG_PROC=y
-diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
-index 9ad1c94376c8..1d62ce7ff5dc 100644
---- a/arch/mips/configs/jazz_defconfig
-+++ b/arch/mips/configs/jazz_defconfig
-@@ -1,6 +1,7 @@
- CONFIG_MACH_JAZZ=y
- CONFIG_OLIVETTI_M700=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_BSD_PROCESS_ACCT=y
-diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
-index c3d0d0a6e044..aa3426d5f7d7 100644
---- a/arch/mips/configs/mtx1_defconfig
-+++ b/arch/mips/configs/mtx1_defconfig
-@@ -1,6 +1,7 @@
- CONFIG_MIPS_ALCHEMY=y
- CONFIG_MIPS_MTX1=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
-diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
-index 1e18fd7de209..b514e91e5426 100644
---- a/arch/mips/configs/nlm_xlr_defconfig
-+++ b/arch/mips/configs/nlm_xlr_defconfig
-@@ -5,7 +5,7 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
- CONFIG_SMP=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_KEXEC=y
- CONFIG_CROSS_COMPILE=""
- # CONFIG_LOCALVERSION_AUTO is not set
-diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
-index 52192c632ae8..96b087498dab 100644
---- a/arch/mips/configs/pic32mzda_defconfig
-+++ b/arch/mips/configs/pic32mzda_defconfig
-@@ -1,7 +1,7 @@
- CONFIG_MACH_PIC32=y
- CONFIG_DTB_PIC32_MZDA_SK=y
- CONFIG_HZ_100=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_SECCOMP is not set
- CONFIG_SYSVIPC=y
- CONFIG_NO_HZ=y
-diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
-index b22a3cf149b6..cfffca3d37f4 100644
---- a/arch/mips/configs/pistachio_defconfig
-+++ b/arch/mips/configs/pistachio_defconfig
-@@ -5,7 +5,7 @@ CONFIG_MIPS_CPS=y
- CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
- CONFIG_ZSMALLOC=y
- CONFIG_NR_CPUS=4
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_DEFAULT_HOSTNAME="localhost"
- CONFIG_SYSVIPC=y
-diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
-index 81b5eb89446c..19f8cea849a1 100644
---- a/arch/mips/configs/pnx8335_stb225_defconfig
-+++ b/arch/mips/configs/pnx8335_stb225_defconfig
-@@ -3,7 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_HZ_128=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_SECCOMP is not set
- # CONFIG_LOCALVERSION_AUTO is not set
- # CONFIG_SWAP is not set
-diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
-index 99679e514042..2ced507a8ba7 100644
---- a/arch/mips/configs/rm200_defconfig
-+++ b/arch/mips/configs/rm200_defconfig
-@@ -2,7 +2,8 @@ CONFIG_SNI_RM=y
- CONFIG_CPU_LITTLE_ENDIAN=y
- CONFIG_ARC_CONSOLE=y
- CONFIG_HZ_1000=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_BSD_PROCESS_ACCT=y
-diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
-index ccc109761f44..a6a5b0b7a9c9 100644
---- a/arch/parisc/configs/712_defconfig
-+++ b/arch/parisc/configs/712_defconfig
-@@ -13,7 +13,7 @@ CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- CONFIG_PA7100LC=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_GSC_LASI=y
- # CONFIG_PDC_CHASSIS is not set
- CONFIG_BINFMT_MISC=m
-diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
-index 8d41a73bd71b..b8e0a6662ff9 100644
---- a/arch/parisc/configs/c3000_defconfig
-+++ b/arch/parisc/configs/c3000_defconfig
-@@ -13,7 +13,7 @@ CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- CONFIG_PA8X00=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_GSC is not set
- CONFIG_PCI=y
- CONFIG_PCI_LBA=y
-diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
-index 52c9050a7c5c..8d86d2e989f4 100644
---- a/arch/parisc/configs/default_defconfig
-+++ b/arch/parisc/configs/default_defconfig
-@@ -14,7 +14,7 @@ CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- CONFIG_PA7100LC=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_IOMMU_CCIO=y
- CONFIG_GSC_LASI=y
- CONFIG_GSC_WAX=y
-diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
-index f1552af9eecc..f8505e6ec7b3 100644
---- a/arch/powerpc/configs/c2k_defconfig
-+++ b/arch/powerpc/configs/c2k_defconfig
-@@ -29,7 +29,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
- CONFIG_CPU_FREQ_GOV_ONDEMAND=m
- CONFIG_GEN_RTC=y
- CONFIG_HIGHMEM=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BINFMT_MISC=y
- CONFIG_PM=y
- CONFIG_PCI_MSI=y
-diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
-index da0e8d535eb8..c016af41ab4f 100644
---- a/arch/powerpc/configs/ppc6xx_defconfig
-+++ b/arch/powerpc/configs/ppc6xx_defconfig
-@@ -74,7 +74,7 @@ CONFIG_QE_GPIO=y
- CONFIG_MCU_MPC8349EMITX=y
- CONFIG_HIGHMEM=y
- CONFIG_HZ_1000=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BINFMT_MISC=y
- CONFIG_HIBERNATION=y
- CONFIG_PM_DEBUG=y
-diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
-index b2d8802f43b4..46434ca1fa10 100644
---- a/arch/score/configs/spct6600_defconfig
-+++ b/arch/score/configs/spct6600_defconfig
-@@ -1,5 +1,5 @@
- CONFIG_HZ_100=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_EXPERIMENTAL=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
-diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
-index 5a1097641247..eb5fbf554e7f 100644
---- a/arch/sh/configs/se7712_defconfig
-+++ b/arch/sh/configs/se7712_defconfig
-@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
- CONFIG_SH_SOLUTION_ENGINE=y
- CONFIG_SH_PCLK_FREQ=66666666
- CONFIG_HEARTBEAT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_CMDLINE_OVERWRITE=y
- CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
- CONFIG_NET=y
-diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
-index 9c0ef13bee10..cbaa65c8bf9e 100644
---- a/arch/sh/configs/se7721_defconfig
-+++ b/arch/sh/configs/se7721_defconfig
-@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
- CONFIG_SH_7721_SOLUTION_ENGINE=y
- CONFIG_SH_PCLK_FREQ=33333333
- CONFIG_HEARTBEAT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_CMDLINE_OVERWRITE=y
- CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2"
- CONFIG_NET=y
-diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
-index ceb48e9b70f4..1a69eda6610c 100644
---- a/arch/sh/configs/titan_defconfig
-+++ b/arch/sh/configs/titan_defconfig
-@@ -20,7 +20,7 @@ CONFIG_SH_TITAN=y
- CONFIG_SH_PCLK_FREQ=30000000
- CONFIG_SH_DMA=y
- CONFIG_SH_DMA_API=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_CMDLINE_OVERWRITE=y
- CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw"
- CONFIG_PCI=y
-diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
-index 4d4e1cc6402f..04bea1d28ba7 100644
---- a/arch/sparc/configs/sparc64_defconfig
-+++ b/arch/sparc/configs/sparc64_defconfig
-@@ -22,7 +22,7 @@ CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_NUMA=y
- CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_SUN_LDOMS=y
- CONFIG_PCI=y
- CONFIG_PCI_MSI=y
-diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
-index 9f94435cc44f..aa78ee6cd5eb 100644
---- a/arch/tile/configs/tilegx_defconfig
-+++ b/arch/tile/configs/tilegx_defconfig
-@@ -47,7 +47,7 @@ CONFIG_CFQ_GROUP_IOSCHED=y
- CONFIG_NR_CPUS=100
- CONFIG_HZ_100=y
- # CONFIG_COMPACTION is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_TILE_PCI_IO=y
- CONFIG_PCI_DEBUG=y
- # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
-index 1c5bd4f8ffca..38005862062c 100644
---- a/arch/tile/configs/tilepro_defconfig
-+++ b/arch/tile/configs/tilepro_defconfig
-@@ -44,7 +44,7 @@ CONFIG_KARMA_PARTITION=y
- CONFIG_CFQ_GROUP_IOSCHED=y
- CONFIG_HZ_100=y
- # CONFIG_COMPACTION is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_PCI_DEBUG=y
- # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
- CONFIG_BINFMT_MISC=y
-diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
-index 0eb9f92f3717..e5890ae917e5 100644
---- a/arch/x86/configs/i386_defconfig
-+++ b/arch/x86/configs/i386_defconfig
-@@ -41,7 +41,7 @@ CONFIG_SMP=y
- CONFIG_X86_GENERIC=y
- CONFIG_HPET_TIMER=y
- CONFIG_SCHED_SMT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
- CONFIG_X86_MCE=y
- CONFIG_X86_REBOOTFIXUPS=y
-diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
-index 4a4b16e56d35..7452dcadda74 100644
---- a/arch/x86/configs/x86_64_defconfig
-+++ b/arch/x86/configs/x86_64_defconfig
-@@ -40,7 +40,7 @@ CONFIG_SMP=y
- CONFIG_CALGARY_IOMMU=y
- CONFIG_NR_CPUS=64
- CONFIG_SCHED_SMT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
- CONFIG_X86_MCE=y
- CONFIG_MICROCODE=y
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index 3f9c97419f02..1dc79ec7ad09 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -1,7 +1,7 @@
-
- choice
- prompt "Preemption Model"
-- default PREEMPT_NONE
-+ default PREEMPT
-
- config PREEMPT_NONE
- bool "No Forced Preemption (Server)"
-@@ -17,7 +17,7 @@ config PREEMPT_NONE
- latencies.
-
- config PREEMPT_VOLUNTARY
-- bool "Voluntary Kernel Preemption (Desktop)"
-+ bool "Voluntary Kernel Preemption (Nothing)"
- help
- This option reduces the latency of the kernel by adding more
- "explicit preemption points" to the kernel code. These new
-@@ -31,7 +31,8 @@ config PREEMPT_VOLUNTARY
- applications to run more 'smoothly' even when the system is
- under load.
-
-- Select this if you are building a kernel for a desktop system.
-+ Select this for no system in particular (choose Preemptible
-+ instead on a desktop if you know what's good for you).
-
- config PREEMPT
- bool "Preemptible Kernel (Low-Latency Desktop)"
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
deleted file mode 100644
index b7897dbe..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From 44fc740a3ff85d378c28a416a076cc7e019d7b8c Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Fri, 12 May 2017 13:07:37 +1000
-Subject: [PATCH 03/16] Expose vmsplit for our poor 32 bit users.
-
----
- arch/x86/Kconfig | 12 ++++++------
- 1 file changed, 6 insertions(+), 6 deletions(-)
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index e06a7b4e1dc4..931aba4fc567 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -1361,7 +1361,7 @@ config HIGHMEM64G
- endchoice
-
- choice
-- prompt "Memory split" if EXPERT
-+ prompt "Memory split"
- default VMSPLIT_3G
- depends on X86_32
- ---help---
-@@ -1381,17 +1381,17 @@ choice
- option alone!
-
- config VMSPLIT_3G
-- bool "3G/1G user/kernel split"
-+ bool "Default 896MB lowmem (3G/1G user/kernel split)"
- config VMSPLIT_3G_OPT
- depends on !X86_PAE
-- bool "3G/1G user/kernel split (for full 1G low memory)"
-+ bool "1GB lowmem (3G/1G user/kernel split)"
- config VMSPLIT_2G
-- bool "2G/2G user/kernel split"
-+ bool "2GB lowmem (2G/2G user/kernel split)"
- config VMSPLIT_2G_OPT
- depends on !X86_PAE
-- bool "2G/2G user/kernel split (for full 2G low memory)"
-+ bool "2GB lowmem (2G/2G user/kernel split)"
- config VMSPLIT_1G
-- bool "1G/3G user/kernel split"
-+ bool "3GB lowmem (1G/3G user/kernel split)"
- endchoice
-
- config PAGE_OFFSET
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
deleted file mode 100644
index 3c182fbe..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
+++ /dev/null
@@ -1,153 +0,0 @@
-From d27b58b0707ac311be5a51594fc6f22ed1d109e5 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 12 Aug 2017 11:53:39 +1000
-Subject: [PATCH 04/16] Create highres timeout variants of schedule_timeout
- functions.
-
----
- include/linux/freezer.h | 1 +
- include/linux/sched.h | 31 +++++++++++++++++++--
- kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++
- 3 files changed, 101 insertions(+), 2 deletions(-)
-
-diff --git a/include/linux/freezer.h b/include/linux/freezer.h
-index 3995df1d068f..f8645e8f2444 100644
---- a/include/linux/freezer.h
-+++ b/include/linux/freezer.h
-@@ -297,6 +297,7 @@ static inline void set_freezable(void) {}
- #define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
-+#define pm_freezing (false)
- #endif /* !CONFIG_FREEZER */
-
- #endif /* FREEZER_H_INCLUDED */
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 35dc91a0e2ed..38852ebfa864 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -173,13 +173,40 @@ extern cpumask_var_t cpu_isolated_map;
-
- extern void scheduler_tick(void);
-
--#define MAX_SCHEDULE_TIMEOUT LONG_MAX
--
-+#define MAX_SCHEDULE_TIMEOUT LONG_MAX
- extern long schedule_timeout(long timeout);
- extern long schedule_timeout_interruptible(long timeout);
- extern long schedule_timeout_killable(long timeout);
- extern long schedule_timeout_uninterruptible(long timeout);
- extern long schedule_timeout_idle(long timeout);
-+
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+extern long schedule_msec_hrtimeout(long timeout);
-+extern long schedule_min_hrtimeout(void);
-+extern long schedule_msec_hrtimeout_interruptible(long timeout);
-+extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
-+#else
-+static inline long schedule_msec_hrtimeout(long timeout)
-+{
-+ return schedule_timeout(msecs_to_jiffies(timeout));
-+}
-+
-+static inline long schedule_min_hrtimeout(void)
-+{
-+ return schedule_timeout(1);
-+}
-+
-+static inline long schedule_msec_hrtimeout_interruptible(long timeout)
-+{
-+ return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
-+}
-+
-+static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
-+{
-+ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
-+}
-+#endif
-+
- asmlinkage void schedule(void);
- extern void schedule_preempt_disabled(void);
-
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 88f75f92ef36..13227cf2814c 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1787,3 +1787,74 @@ int __sched schedule_hrtimeout(ktime_t *expires,
- return schedule_hrtimeout_range(expires, 0, mode);
- }
- EXPORT_SYMBOL_GPL(schedule_hrtimeout);
-+
-+/*
-+ * As per schedule_hrtimeout but taskes a millisecond value and returns how
-+ * many milliseconds are left.
-+ */
-+long __sched schedule_msec_hrtimeout(long timeout)
-+{
-+ struct hrtimer_sleeper t;
-+ int delta, secs, jiffs;
-+ ktime_t expires;
-+
-+ if (!timeout) {
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
-+ }
-+
-+ jiffs = msecs_to_jiffies(timeout);
-+ /*
-+ * If regular timer resolution is adequate or hrtimer resolution is not
-+ * (yet) better than Hz, as would occur during startup, use regular
-+ * timers.
-+ */
-+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
-+ return schedule_timeout(jiffs);
-+
-+ secs = timeout / 1000;
-+ delta = (timeout % 1000) * NSEC_PER_MSEC;
-+ expires = ktime_set(secs, delta);
-+
-+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
-+
-+ hrtimer_init_sleeper(&t, current);
-+
-+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
-+
-+ if (likely(t.task))
-+ schedule();
-+
-+ hrtimer_cancel(&t.timer);
-+ destroy_hrtimer_on_stack(&t.timer);
-+
-+ __set_current_state(TASK_RUNNING);
-+
-+ expires = hrtimer_expires_remaining(&t.timer);
-+ timeout = ktime_to_ms(expires);
-+ return timeout < 0 ? 0 : timeout;
-+}
-+
-+EXPORT_SYMBOL(schedule_msec_hrtimeout);
-+
-+long __sched schedule_min_hrtimeout(void)
-+{
-+ return schedule_msec_hrtimeout(1);
-+}
-+
-+EXPORT_SYMBOL(schedule_min_hrtimeout);
-+
-+long __sched schedule_msec_hrtimeout_interruptible(long timeout)
-+{
-+ __set_current_state(TASK_INTERRUPTIBLE);
-+ return schedule_msec_hrtimeout(timeout);
-+}
-+EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
-+
-+long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
-+{
-+ __set_current_state(TASK_UNINTERRUPTIBLE);
-+ return schedule_msec_hrtimeout(timeout);
-+}
-+EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
deleted file mode 100644
index 3c889719..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 5da7d1778b96c514394334c92de9b3d8d71f4a29 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 5 Nov 2016 09:27:36 +1100
-Subject: [PATCH 05/16] Special case calls of schedule_timeout(1) to use the
- min hrtimeout of 1ms, working around low Hz resolutions.
-
----
- kernel/time/timer.c | 17 +++++++++++++++--
- 1 file changed, 15 insertions(+), 2 deletions(-)
-
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index 9c18e16059a3..dd4d1b193286 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1741,6 +1741,19 @@ signed long __sched schedule_timeout(signed long timeout)
-
- expire = timeout + jiffies;
-
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ /*
-+ * Special case 1 as being a request for the minimum timeout
-+ * and use highres timers to timeout after 1ms to workaround
-+ * the granularity of low Hz tick timers.
-+ */
-+ if (!schedule_min_hrtimeout())
-+ return 0;
-+ goto out_timeout;
-+ }
-+#endif
-+
- setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
- __mod_timer(&timer, expire, false);
- schedule();
-@@ -1748,10 +1761,10 @@ signed long __sched schedule_timeout(signed long timeout)
-
- /* Remove the timer from the object tracker */
- destroy_timer_on_stack(&timer);
--
-+out_timeout:
- timeout = expire - jiffies;
-
-- out:
-+out:
- return timeout < 0 ? 0 : timeout;
- }
- EXPORT_SYMBOL(schedule_timeout);
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch
deleted file mode 100644
index 2f065652..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 9df803c28bb8ccb2588c0ccaf857b9e673175fed Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Fri, 4 Nov 2016 09:25:54 +1100
-Subject: [PATCH 06/16] Convert msleep to use hrtimers when active.
-
----
- kernel/time/timer.c | 24 ++++++++++++++++++++++--
- 1 file changed, 22 insertions(+), 2 deletions(-)
-
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index dd4d1b193286..c68cb9307f64 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1884,7 +1884,19 @@ void __init init_timers(void)
- */
- void msleep(unsigned int msecs)
- {
-- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
-+ int jiffs = msecs_to_jiffies(msecs);
-+ unsigned long timeout;
-+
-+ /*
-+ * Use high resolution timers where the resolution of tick based
-+ * timers is inadequate.
-+ */
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ while (msecs)
-+ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
-+ return;
-+ }
-+ timeout = msecs_to_jiffies(msecs) + 1;
-
- while (timeout)
- timeout = schedule_timeout_uninterruptible(timeout);
-@@ -1898,7 +1910,15 @@ EXPORT_SYMBOL(msleep);
- */
- unsigned long msleep_interruptible(unsigned int msecs)
- {
-- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
-+ int jiffs = msecs_to_jiffies(msecs);
-+ unsigned long timeout;
-+
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ while (msecs && !signal_pending(current))
-+ msecs = schedule_msec_hrtimeout_interruptible(msecs);
-+ return msecs;
-+ }
-+ timeout = msecs_to_jiffies(msecs) + 1;
-
- while (timeout && !signal_pending(current))
- timeout = schedule_timeout_interruptible(timeout);
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
deleted file mode 100644
index ff071da8..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
+++ /dev/null
@@ -1,529 +0,0 @@
-diff -Nur a/drivers/block/swim.c b/drivers/block/swim.c
---- a/drivers/block/swim.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/block/swim.c 2018-11-03 16:30:39.471807304 +0000
-@@ -332,7 +332,7 @@
- if (swim_readbit(base, MOTOR_ON))
- break;
- current->state = TASK_INTERRUPTIBLE;
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
- } else if (action == OFF) {
- swim_action(base, MOTOR_OFF);
-@@ -351,7 +351,7 @@
- if (!swim_readbit(base, DISK_IN))
- break;
- current->state = TASK_INTERRUPTIBLE;
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
- swim_select(base, RELAX);
- }
-@@ -375,7 +375,7 @@
- for (wait = 0; wait < HZ; wait++) {
-
- current->state = TASK_INTERRUPTIBLE;
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
-
- swim_select(base, RELAX);
- if (!swim_readbit(base, STEP))
-diff -Nur a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
---- a/drivers/bluetooth/hci_qca.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/bluetooth/hci_qca.c 2018-11-03 16:31:56.065260061 +0000
-@@ -880,7 +880,7 @@
- * then host can communicate with new baudrate to controller
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
-+ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
- set_current_state(TASK_RUNNING);
-
- return 0;
-diff -Nur a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
---- a/drivers/char/ipmi/ipmi_msghandler.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/char/ipmi/ipmi_msghandler.c 2018-11-03 16:30:39.473807368 +0000
-@@ -2953,7 +2953,7 @@
- /* Current message first, to preserve order */
- while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
- /* Wait for the message to clear out. */
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
-
- /* No need for locks, the interface is down. */
-diff -Nur a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
---- a/drivers/char/ipmi/ipmi_ssif.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/char/ipmi/ipmi_ssif.c 2018-11-03 16:30:39.473807368 +0000
-@@ -1200,7 +1200,7 @@
-
- /* make sure the driver is not looking for flags any more. */
- while (ssif_info->ssif_state != SSIF_NORMAL)
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
-
- ssif_info->stopping = true;
- del_timer_sync(&ssif_info->retry_timer);
-diff -Nur a/drivers/char/snsc.c b/drivers/char/snsc.c
---- a/drivers/char/snsc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/char/snsc.c 2018-11-03 16:30:39.474807400 +0000
-@@ -198,7 +198,7 @@
- add_wait_queue(&sd->sd_rq, &wait);
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
-
-- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
-+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
-
- remove_wait_queue(&sd->sd_rq, &wait);
- if (signal_pending(current)) {
-@@ -294,7 +294,7 @@
- add_wait_queue(&sd->sd_wq, &wait);
- spin_unlock_irqrestore(&sd->sd_wlock, flags);
-
-- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
-+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
-
- remove_wait_queue(&sd->sd_wq, &wait);
- if (signal_pending(current)) {
-diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-11-03 16:30:39.474807400 +0000
-@@ -235,7 +235,7 @@
- DRM_ERROR("SVGA device lockup.\n");
- break;
- }
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- if (interruptible && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
-diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-11-03 16:30:39.474807400 +0000
-@@ -202,7 +202,7 @@
- break;
- }
- if (lazy)
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- else if ((++count & 0x0F) == 0) {
- /**
- * FIXME: Use schedule_hr_timeout here for
-diff -Nur a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
---- a/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-11-03 16:30:39.475807432 +0000
-@@ -1154,7 +1154,7 @@
- TASK_UNINTERRUPTIBLE);
- if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
- break;
-- schedule_timeout(msecs_to_jiffies(25));
-+ schedule_msec_hrtimeout((25));
- }
- finish_wait(&itv->vsync_waitq, &wait);
- mutex_lock(&itv->serialize_lock);
-diff -Nur a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
---- a/drivers/media/pci/ivtv/ivtv-streams.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/media/pci/ivtv/ivtv-streams.c 2018-11-03 16:30:39.475807432 +0000
-@@ -834,7 +834,7 @@
- while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
- time_before(jiffies,
- then + msecs_to_jiffies(2000))) {
-- schedule_timeout(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout((10));
- }
-
- /* To convert jiffies to ms, we must multiply by 1000
-diff -Nur a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
---- a/drivers/mfd/ucb1x00-core.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/mfd/ucb1x00-core.c 2018-11-03 16:30:39.476807464 +0000
-@@ -253,7 +253,7 @@
- break;
- /* yield to other processes */
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
-
- return UCB_ADC_DAT(val);
-diff -Nur a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
---- a/drivers/misc/sgi-xp/xpc_channel.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/misc/sgi-xp/xpc_channel.c 2018-11-03 16:30:39.476807464 +0000
-@@ -837,7 +837,7 @@
-
- atomic_inc(&ch->n_on_msg_allocate_wq);
- prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
-- ret = schedule_timeout(1);
-+ ret = schedule_min_hrtimeout();
- finish_wait(&ch->msg_allocate_wq, &wait);
- atomic_dec(&ch->n_on_msg_allocate_wq);
-
-diff -Nur a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
---- a/drivers/net/caif/caif_hsi.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/caif/caif_hsi.c 2018-11-03 16:30:39.477807497 +0000
-@@ -940,7 +940,7 @@
- break;
-
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- retry--;
- }
-
-diff -Nur a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
---- a/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-11-03 16:30:39.477807497 +0000
-@@ -250,7 +250,7 @@
- } else {
- /* the PCAN-USB needs time to init */
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
-+ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
- }
-
- return err;
-diff -Nur a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
---- a/drivers/net/usb/lan78xx.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/usb/lan78xx.c 2018-11-03 16:30:39.478807529 +0000
-@@ -2567,7 +2567,7 @@
- while (!skb_queue_empty(&dev->rxq) &&
- !skb_queue_empty(&dev->txq) &&
- !skb_queue_empty(&dev->done)) {
-- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
-+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
- set_current_state(TASK_UNINTERRUPTIBLE);
- netif_dbg(dev, ifdown, dev->net,
- "waited for %d urb completions\n", temp);
-diff -Nur a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
---- a/drivers/net/usb/usbnet.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/usb/usbnet.c 2018-11-03 16:30:39.479807561 +0000
-@@ -772,7 +772,7 @@
- spin_lock_irqsave(&q->lock, flags);
- while (!skb_queue_empty(q)) {
- spin_unlock_irqrestore(&q->lock, flags);
-- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
-+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
- set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock_irqsave(&q->lock, flags);
- }
-diff -Nur a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
---- a/drivers/ntb/test/ntb_perf.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/ntb/test/ntb_perf.c 2018-11-03 16:30:39.479807561 +0000
-@@ -310,7 +310,7 @@
- if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
- last_sleep = jiffies;
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
-
- if (unlikely(kthread_should_stop()))
-diff -Nur a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
---- a/drivers/scsi/fnic/fnic_scsi.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/scsi/fnic/fnic_scsi.c 2018-11-03 16:30:39.480807592 +0000
-@@ -217,7 +217,7 @@
-
- /* wait for io cmpl */
- while (atomic_read(&fnic->in_flight))
-- schedule_timeout(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout((1));
-
- spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
-
-@@ -2255,7 +2255,7 @@
- }
- }
-
-- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
-+ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
-
- /* walk again to check, if IOs are still pending in fw */
- if (fnic_is_abts_pending(fnic, lr_sc))
-diff -Nur a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
---- a/drivers/scsi/snic/snic_scsi.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/scsi/snic/snic_scsi.c 2018-11-03 16:30:39.481807625 +0000
-@@ -2354,7 +2354,7 @@
-
- /* Wait for all the IOs that are entered in Qcmd */
- while (atomic_read(&snic->ios_inflight))
-- schedule_timeout(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout((1));
-
- ret = snic_issue_hba_reset(snic, sc);
- if (ret) {
-diff -Nur a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
---- a/drivers/staging/comedi/drivers/ni_mio_common.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/comedi/drivers/ni_mio_common.c 2018-11-03 16:30:39.483807688 +0000
-@@ -4657,7 +4657,7 @@
- if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
-- if (schedule_timeout(1))
-+ if (schedule_min_hrtimeout())
- return -EIO;
- }
- if (i == timeout) {
-diff -Nur a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
---- a/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-11-03 16:30:39.483807688 +0000
-@@ -329,7 +329,7 @@
- schedule();
- } else {
- now = jiffies;
-- schedule_timeout(msecs_to_jiffies(tms));
-+ schedule_msec_hrtimeout((tms));
- tms -= jiffies_to_msecs(jiffies - now);
- if (tms < 0) /* no more wait but may have new event */
- tms = 0;
-diff -Nur a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
---- a/drivers/staging/rts5208/rtsx.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/rts5208/rtsx.c 2018-11-03 16:30:39.483807688 +0000
-@@ -524,7 +524,7 @@
-
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
-+ schedule_msec_hrtimeout((POLLING_INTERVAL));
-
- /* lock the device pointers */
- mutex_lock(&dev->dev_mutex);
-diff -Nur a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
---- a/drivers/staging/speakup/speakup_acntpc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_acntpc.c 2018-11-03 16:30:39.484807721 +0000
-@@ -206,7 +206,7 @@
- full_time_val = full_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (synth_full()) {
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout((full_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-@@ -234,7 +234,7 @@
- jiffy_delta_val = jiffy_delta->u.n.value;
- delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout(delay_time_val);
- jiff_max = jiffies + jiffy_delta_val;
- }
- }
-diff -Nur a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
---- a/drivers/staging/speakup/speakup_apollo.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_apollo.c 2018-11-03 16:30:39.484807721 +0000
-@@ -174,7 +174,7 @@
- if (!synth->io_ops->synth_out(synth, ch)) {
- synth->io_ops->tiocmset(0, UART_MCR_RTS);
- synth->io_ops->tiocmset(UART_MCR_RTS, 0);
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout(full_time_val);
- continue;
- }
- if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
-diff -Nur a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
---- a/drivers/staging/speakup/speakup_decext.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_decext.c 2018-11-03 16:30:39.484807721 +0000
-@@ -185,7 +185,7 @@
- if (ch == '\n')
- ch = 0x0D;
- if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout(delay_time_val);
- continue;
- }
- set_current_state(TASK_RUNNING);
-diff -Nur a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
---- a/drivers/staging/speakup/speakup_decpc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_decpc.c 2018-11-03 16:30:39.484807721 +0000
-@@ -403,7 +403,7 @@
- if (ch == '\n')
- ch = 0x0D;
- if (dt_sendchar(ch)) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-diff -Nur a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
---- a/drivers/staging/speakup/speakup_dectlk.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_dectlk.c 2018-11-03 16:30:39.485807753 +0000
-@@ -253,7 +253,7 @@
- if (ch == '\n')
- ch = 0x0D;
- if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout(delay_time_val);
- continue;
- }
- set_current_state(TASK_RUNNING);
-diff -Nur a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
---- a/drivers/staging/speakup/speakup_dtlk.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_dtlk.c 2018-11-03 16:30:39.485807753 +0000
-@@ -220,7 +220,7 @@
- delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (synth_full()) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-@@ -236,7 +236,7 @@
- delay_time_val = delay_time->u.n.value;
- jiffy_delta_val = jiffy_delta->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- jiff_max = jiffies + jiffy_delta_val;
- }
- }
-diff -Nur a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
---- a/drivers/staging/speakup/speakup_keypc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_keypc.c 2018-11-03 16:30:39.485807753 +0000
-@@ -208,7 +208,7 @@
- full_time_val = full_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (synth_full()) {
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout((full_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-@@ -241,7 +241,7 @@
- jiffy_delta_val = jiffy_delta->u.n.value;
- delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- jiff_max = jiffies+jiffy_delta_val;
- }
- }
-diff -Nur a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
---- a/drivers/staging/speakup/synth.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/synth.c 2018-11-03 16:30:39.486807785 +0000
-@@ -92,7 +92,7 @@
- if (ch == '\n')
- ch = synth->procspeech;
- if (!synth->io_ops->synth_out(synth, ch)) {
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout(full_time_val);
- continue;
- }
- if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
-diff -Nur a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
---- a/drivers/staging/unisys/visornic/visornic_main.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/unisys/visornic/visornic_main.c 2018-11-03 16:30:39.486807785 +0000
-@@ -556,7 +556,7 @@
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-- wait += schedule_timeout(msecs_to_jiffies(10));
-+ wait += schedule_msec_hrtimeout((10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
-@@ -567,7 +567,7 @@
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-- schedule_timeout(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout((10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (atomic_read(&devdata->usage))
- break;
-@@ -721,7 +721,7 @@
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-- wait += schedule_timeout(msecs_to_jiffies(10));
-+ wait += schedule_msec_hrtimeout((10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
-diff -Nur a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
---- a/drivers/target/target_core_user.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/target/target_core_user.c 2018-11-03 16:30:39.487807817 +0000
-@@ -808,10 +808,9 @@
- pr_debug("sleeping for ring space\n");
- mutex_unlock(&udev->cmdr_lock);
- if (udev->cmd_time_out)
-- ret = schedule_timeout(
-- msecs_to_jiffies(udev->cmd_time_out));
-+ ret = schedule_msec_hrtimeout(udev->cmd_time_out);
- else
-- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
-+ ret = schedule_msec_hrtimeout(TCMU_TIME_OUT);
- finish_wait(&udev->wait_cmdr, &__wait);
- if (!ret) {
- pr_warn("tcmu: command timed out\n");
-diff -Nur a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
---- a/drivers/video/fbdev/omap/hwa742.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/video/fbdev/omap/hwa742.c 2018-11-03 16:30:39.487807817 +0000
-@@ -926,7 +926,7 @@
- if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(5));
-+ schedule_msec_hrtimeout((5));
- }
- hwa742_set_update_mode(hwa742.update_mode_before_suspend);
- }
-diff -Nur a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
---- a/drivers/video/fbdev/pxafb.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/video/fbdev/pxafb.c 2018-11-03 16:30:39.488807849 +0000
-@@ -1286,7 +1286,7 @@
- mutex_unlock(&fbi->ctrlr_lock);
-
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(30));
-+ schedule_msec_hrtimeout((30));
- }
-
- pr_debug("%s(): task ending\n", __func__);
-diff -Nur a/fs/afs/vlocation.c b/fs/afs/vlocation.c
---- a/fs/afs/vlocation.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/fs/afs/vlocation.c 2018-11-03 16:30:39.488807849 +0000
-@@ -129,7 +129,7 @@
- if (vl->upd_busy_cnt > 1) {
- /* second+ BUSY - sleep a little bit */
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
- continue;
- }
-diff -Nur a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
---- a/fs/btrfs/extent-tree.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/fs/btrfs/extent-tree.c 2018-11-03 16:30:39.491807945 +0000
-@@ -6106,7 +6106,7 @@
-
- if (flush != BTRFS_RESERVE_NO_FLUSH &&
- btrfs_transaction_in_commit(fs_info))
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
-
- if (delalloc_lock)
- mutex_lock(&inode->delalloc_mutex);
-diff -Nur a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
---- a/fs/btrfs/inode-map.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/fs/btrfs/inode-map.c 2018-11-03 16:30:39.492807977 +0000
-@@ -89,7 +89,7 @@
- btrfs_release_path(path);
- root->ino_cache_progress = last;
- up_read(&fs_info->commit_root_sem);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- goto again;
- } else
- continue;
-diff -Nur a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
---- a/sound/usb/line6/pcm.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/sound/usb/line6/pcm.c 2018-11-03 16:30:39.492807977 +0000
-@@ -131,7 +131,7 @@
- if (!alive)
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- } while (--timeout > 0);
- if (alive)
- dev_err(line6pcm->line6->ifcdev,
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
deleted file mode 100644
index f9f274ce..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
+++ /dev/null
@@ -1,311 +0,0 @@
-From 3ef5df78c2f425115b87f0f2f59fd189c0f1bbe3 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Feb 2017 13:30:07 +1100
-Subject: [PATCH 08/16] Replace all calls to schedule_timeout_interruptible of
- potentially under 50ms to use schedule_msec_hrtimeout_interruptible.
-
----
- drivers/hwmon/fam15h_power.c | 2 +-
- drivers/iio/light/tsl2563.c | 6 +-----
- drivers/media/i2c/msp3400-driver.c | 4 ++--
- drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++---
- drivers/media/radio/radio-mr800.c | 2 +-
- drivers/media/radio/radio-tea5777.c | 2 +-
- drivers/media/radio/tea575x.c | 2 +-
- drivers/parport/ieee1284.c | 2 +-
- drivers/parport/ieee1284_ops.c | 2 +-
- drivers/platform/x86/intel_ips.c | 8 ++++----
- net/core/pktgen.c | 2 +-
- sound/soc/codecs/wm8350.c | 12 ++++++------
- sound/soc/codecs/wm8900.c | 2 +-
- sound/soc/codecs/wm9713.c | 4 ++--
- 14 files changed, 26 insertions(+), 30 deletions(-)
-
-diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
-index 9545a346044f..c24cf1302ec7 100644
---- a/drivers/hwmon/fam15h_power.c
-+++ b/drivers/hwmon/fam15h_power.c
-@@ -237,7 +237,7 @@ static ssize_t power1_average_show(struct device *dev,
- prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
- }
-
-- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
-+ leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
- if (leftover)
- return 0;
-
-diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
-index 7599693f7fe9..452090739138 100644
---- a/drivers/iio/light/tsl2563.c
-+++ b/drivers/iio/light/tsl2563.c
-@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
- default:
- delay = 402;
- }
-- /*
-- * TODO: Make sure that we wait at least required delay but why we
-- * have to extend it one tick more?
-- */
-- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
-+ schedule_msec_hrtimeout_interruptible(delay + 1);
- }
-
- static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
-diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
-index 3db966db83eb..f0fab7676f72 100644
---- a/drivers/media/i2c/msp3400-driver.c
-+++ b/drivers/media/i2c/msp3400-driver.c
-@@ -179,7 +179,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
- break;
- dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
- dev, addr);
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- }
- if (err == 3) {
- dev_warn(&client->dev, "resetting chip, sound will go off.\n");
-@@ -220,7 +220,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
- break;
- dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
- dev, addr);
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- }
- if (err == 3) {
- dev_warn(&client->dev, "resetting chip, sound will go off.\n");
-diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
-index f752f3993687..23372af61ebf 100644
---- a/drivers/media/pci/ivtv/ivtv-gpio.c
-+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
-@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
- curout = (curout & ~0xF) | 1;
- write_reg(curout, IVTV_REG_GPIO_OUT);
- /* We could use something else for smaller time */
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- curout |= 2;
- write_reg(curout, IVTV_REG_GPIO_OUT);
- curdir &= ~0x80;
-@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
- curout = read_reg(IVTV_REG_GPIO_OUT);
- curout &= ~(1 << itv->card->xceive_pin);
- write_reg(curout, IVTV_REG_GPIO_OUT);
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
-
- curout |= 1 << itv->card->xceive_pin;
- write_reg(curout, IVTV_REG_GPIO_OUT);
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- return 0;
- }
-
-diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
-index c9f59129af79..cb6f8394a5c2 100644
---- a/drivers/media/radio/radio-mr800.c
-+++ b/drivers/media/radio/radio-mr800.c
-@@ -378,7 +378,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
- retval = -ENODATA;
- break;
- }
-- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
-+ if (schedule_msec_hrtimeout_interruptible((10))) {
- retval = -ERESTARTSYS;
- break;
- }
-diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
-index 04ed1a5d1177..d593d28dc286 100644
---- a/drivers/media/radio/radio-tea5777.c
-+++ b/drivers/media/radio/radio-tea5777.c
-@@ -245,7 +245,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
- }
-
- if (wait) {
-- if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
-+ if (schedule_msec_hrtimeout_interruptible((wait)))
- return -ERESTARTSYS;
- }
-
-diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
-index 4dc2067bce14..29f4416fb9ae 100644
---- a/drivers/media/radio/tea575x.c
-+++ b/drivers/media/radio/tea575x.c
-@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
-+ if (schedule_msec_hrtimeout_interruptible((10))) {
- /* some signal arrived, stop search */
- tea->val &= ~TEA575X_BIT_SEARCH;
- snd_tea575x_set_freq(tea);
-diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
-index 74cc6dd982d2..c22c4d5f08d0 100644
---- a/drivers/parport/ieee1284.c
-+++ b/drivers/parport/ieee1284.c
-@@ -215,7 +215,7 @@ int parport_wait_peripheral(struct parport *port,
- /* parport_wait_event didn't time out, but the
- * peripheral wasn't actually ready either.
- * Wait for another 10ms. */
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- }
- }
-
-diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
-index 5d41dda6da4e..34705f6b423f 100644
---- a/drivers/parport/ieee1284_ops.c
-+++ b/drivers/parport/ieee1284_ops.c
-@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
- /* Yield the port for a while. */
- if (count && dev->port->irq != PARPORT_IRQ_NONE) {
- parport_release (dev);
-- schedule_timeout_interruptible(msecs_to_jiffies(40));
-+ schedule_msec_hrtimeout_interruptible((40));
- parport_claim_or_block (dev);
- }
- else
-diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
-index 58dcee562d64..b661b7c071bb 100644
---- a/drivers/platform/x86/intel_ips.c
-+++ b/drivers/platform/x86/intel_ips.c
-@@ -813,7 +813,7 @@ static int ips_adjust(void *data)
- ips_gpu_lower(ips);
-
- sleep:
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
- } while (!kthread_should_stop());
-
- dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
-@@ -992,7 +992,7 @@ static int ips_monitor(void *data)
- seqno_timestamp = get_jiffies_64();
-
- old_cpu_power = thm_readl(THM_CEC);
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
-
- /* Collect an initial average */
- for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
-@@ -1019,7 +1019,7 @@ static int ips_monitor(void *data)
- mchp_samples[i] = mchp;
- }
-
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
- if (kthread_should_stop())
- break;
- }
-@@ -1046,7 +1046,7 @@ static int ips_monitor(void *data)
- * us to reduce the sample frequency if the CPU and GPU are idle.
- */
- old_cpu_power = thm_readl(THM_CEC);
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
- last_sample_period = IPS_SAMPLE_PERIOD;
-
- setup_deferrable_timer_on_stack(&timer, monitor_timeout,
-diff --git a/net/core/pktgen.c b/net/core/pktgen.c
-index 6e1e10ff433a..be5d6f7142e4 100644
---- a/net/core/pktgen.c
-+++ b/net/core/pktgen.c
-@@ -1992,7 +1992,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
- mutex_unlock(&pktgen_thread_lock);
- pr_debug("%s: waiting for %s to disappear....\n",
- __func__, ifname);
-- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
-+ schedule_msec_hrtimeout_interruptible((msec_per_try));
- mutex_lock(&pktgen_thread_lock);
-
- if (++i >= max_tries) {
-diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
-index 2efc5b41ad0f..3e3248c48c6b 100644
---- a/sound/soc/codecs/wm8350.c
-+++ b/sound/soc/codecs/wm8350.c
-@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work)
- out2->ramp == WM8350_RAMP_UP) {
- /* delay is longer over 0dB as increases are larger */
- if (i >= WM8350_OUTn_0dB)
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (2));
- else
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (1));
- } else
- udelay(50); /* doesn't matter if we delay longer */
-@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- (platform->dis_out4 << 6));
-
- /* wait for discharge */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->
- cap_discharge_msecs));
-
-@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- WM8350_VBUFEN);
-
- /* wait for vmid */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->
- vmid_charge_msecs));
-
-@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
-
- /* wait */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->
- vmid_discharge_msecs));
-
-@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- pm1 | WM8350_OUTPUT_DRAIN_EN);
-
- /* wait */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->drain_msecs));
-
- pm1 &= ~WM8350_BIASEN;
-diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
-index c77b49a29311..fc50456e90a9 100644
---- a/sound/soc/codecs/wm8900.c
-+++ b/sound/soc/codecs/wm8900.c
-@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
- /* Need to let things settle before stopping the clock
- * to ensure that restart works, see "Stopping the
- * master clock" in the datasheet. */
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- snd_soc_write(codec, WM8900_REG_POWER2,
- WM8900_REG_POWER2_SYSCLK_ENA);
- break;
-diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
-index 7e4822185feb..0c85a207446a 100644
---- a/sound/soc/codecs/wm9713.c
-+++ b/sound/soc/codecs/wm9713.c
-@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
-
- /* Gracefully shut down the voice interface. */
- snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200);
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
- snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000);
-
-@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
- wm9713->pll_in = freq_in;
-
- /* wait 10ms AC97 link frames for the link to stabilise */
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- return 0;
- }
-
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
deleted file mode 100644
index c910f3df..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-From 6044370cf4bbc5e05f5d78f5772c1d88e3153603 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Feb 2017 13:30:32 +1100
-Subject: [PATCH 09/16] Replace all calls to schedule_timeout_uninterruptible
- of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible
-
----
- drivers/media/pci/cx18/cx18-gpio.c | 4 ++--
- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++--
- drivers/rtc/rtc-wm8350.c | 6 +++---
- drivers/scsi/lpfc/lpfc_scsi.c | 2 +-
- sound/pci/maestro3.c | 4 ++--
- sound/soc/codecs/rt5631.c | 4 ++--
- sound/soc/soc-dapm.c | 2 +-
- 7 files changed, 13 insertions(+), 13 deletions(-)
-
-diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
-index 012859e6dc7b..206bd08265a5 100644
---- a/drivers/media/pci/cx18/cx18-gpio.c
-+++ b/drivers/media/pci/cx18/cx18-gpio.c
-@@ -90,11 +90,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
-
- /* Assert */
- gpio_update(cx, mask, ~active_lo);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
-+ schedule_msec_hrtimeout_uninterruptible((assert_msecs));
-
- /* Deassert */
- gpio_update(cx, mask, ~active_hi);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
-+ schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
- }
-
- /*
-diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
-index 19c442cb93e4..448f41782060 100644
---- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
-+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
-@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
- * doesn't seem to have as many firmware restart cycles...
- *
- * As a test, we're sticking in a 1/100s delay here */
-- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_uninterruptible((10));
-
- return 0;
-
-@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
- IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
- i = 5000;
- do {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(40));
-+ schedule_msec_hrtimeout_uninterruptible((40));
- /* Todo... wait for sync command ... */
-
- read_register(priv->net_dev, IPW_REG_INTA, &inta);
-diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
-index 483c7993516b..fddbaa475066 100644
---- a/drivers/rtc/rtc-wm8350.c
-+++ b/drivers/rtc/rtc-wm8350.c
-@@ -119,7 +119,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
- /* Wait until confirmation of stopping */
- do {
- rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_uninterruptible((1));
- } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
-
- if (!retries) {
-@@ -202,7 +202,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
- /* Wait until confirmation of stopping */
- do {
- rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_uninterruptible((1));
- } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
-
- if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
-@@ -225,7 +225,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
- /* Wait until confirmation */
- do {
- rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_uninterruptible((1));
- } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
-
- if (rtc_ctrl & WM8350_RTC_ALMSTS)
-diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
-index 1a6f122bb25d..c0db66302a3e 100644
---- a/drivers/scsi/lpfc/lpfc_scsi.c
-+++ b/drivers/scsi/lpfc/lpfc_scsi.c
-@@ -5131,7 +5131,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
- tgt_id, lun_id, context);
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
- while (time_after(later, jiffies) && cnt) {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
-+ schedule_msec_hrtimeout_uninterruptible((20));
- cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
- }
- if (cnt) {
-diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
-index 8f20dec97843..944ce63431b0 100644
---- a/sound/pci/maestro3.c
-+++ b/sound/pci/maestro3.c
-@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
- outw(0, io + GPIO_DATA);
- outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
-
-- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
-+ schedule_msec_hrtimeout_uninterruptible((delay1));
-
- outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
- udelay(5);
-@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
- outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
- outw(~0, io + GPIO_MASK);
-
-- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
-+ schedule_msec_hrtimeout_uninterruptible((delay2));
-
- if (! snd_m3_try_read_vendor(chip))
- break;
-diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
-index 55b04c55fb4b..2ed02ad6ac41 100644
---- a/sound/soc/codecs/rt5631.c
-+++ b/sound/soc/codecs/rt5631.c
-@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable)
- hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
- snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
- if (enable) {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_uninterruptible((10));
- /* config one-bit depop parameter */
- rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f);
- snd_soc_update_bits(codec, RT5631_HP_OUT_VOL,
-@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
- hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
- snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
- if (enable) {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_uninterruptible((10));
-
- /* config depop sequence parameter */
- rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f);
-diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
-index dcef67a9bd48..11c2bb48c8f2 100644
---- a/sound/soc/soc-dapm.c
-+++ b/sound/soc/soc-dapm.c
-@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
- static void pop_wait(u32 pop_time)
- {
- if (pop_time)
-- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
-+ schedule_msec_hrtimeout_uninterruptible((pop_time));
- }
-
- static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
deleted file mode 100644
index 260bb98d..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From 071486de633698dcdd163295173ce4663ec9158c Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Feb 2017 13:32:58 +1100
-Subject: [PATCH 10/16] Don't use hrtimer overlay when pm_freezing since some
- drivers still don't correctly use freezable timeouts.
-
----
- kernel/time/hrtimer.c | 2 +-
- kernel/time/timer.c | 9 +++++----
- 2 files changed, 6 insertions(+), 5 deletions(-)
-
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 13227cf2814c..66456c72bace 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1809,7 +1809,7 @@ long __sched schedule_msec_hrtimeout(long timeout)
- * (yet) better than Hz, as would occur during startup, use regular
- * timers.
- */
-- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
-+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
- return schedule_timeout(jiffs);
-
- secs = timeout / 1000;
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index c68cb9307f64..2f2c96b03efe 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -44,6 +44,7 @@
- #include <linux/sched/debug.h>
- #include <linux/slab.h>
- #include <linux/compat.h>
-+#include <linux/freezer.h>
-
- #include <linux/uaccess.h>
- #include <asm/unistd.h>
-@@ -1891,12 +1892,12 @@ void msleep(unsigned int msecs)
- * Use high resolution timers where the resolution of tick based
- * timers is inadequate.
- */
-- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
- while (msecs)
- msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
- return;
- }
-- timeout = msecs_to_jiffies(msecs) + 1;
-+ timeout = jiffs + 1;
-
- while (timeout)
- timeout = schedule_timeout_uninterruptible(timeout);
-@@ -1913,12 +1914,12 @@ unsigned long msleep_interruptible(unsigned int msecs)
- int jiffs = msecs_to_jiffies(msecs);
- unsigned long timeout;
-
-- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
- while (msecs && !signal_pending(current))
- msecs = schedule_msec_hrtimeout_interruptible(msecs);
- return msecs;
- }
-- timeout = msecs_to_jiffies(msecs) + 1;
-+ timeout = jiffs + 1;
-
- while (timeout && !signal_pending(current))
- timeout = schedule_timeout_interruptible(timeout);
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
deleted file mode 100644
index 5ac20300..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
---- a/kernel/sysctl.c 2018-11-03 17:03:07.433069521 +0000
-+++ b/kernel/sysctl.c 2018-11-03 17:02:11.020267246 +0000
-@@ -141,7 +141,9 @@
- extern int sched_iso_cpu;
- extern int sched_yield_type;
- #endif
--#ifdef CONFIG_PRINTK
-+extern int hrtimer_granularity_us;
-+extern int hrtimeout_min_us;
-+#if defined(CONFIG_PRINTK) || defined(CONFIG_SCHED_MUQSS)
- static int ten_thousand __read_only = 10000;
- #endif
- #ifdef CONFIG_PERF_EVENTS
-@@ -1119,6 +1121,24 @@
- .extra2 = &two,
- },
- #endif
-+ {
-+ .procname = "hrtimer_granularity_us",
-+ .data = &hrtimer_granularity_us,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &one,
-+ .extra2 = &ten_thousand,
-+ },
-+ {
-+ .procname = "hrtimeout_min_us",
-+ .data = &hrtimeout_min_us,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &one,
-+ .extra2 = &ten_thousand,
-+ },
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- {
- .procname = "spin_retry",
-diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
---- a/kernel/time/clockevents.c 2018-11-03 17:03:07.433069521 +0000
-+++ b/kernel/time/clockevents.c 2018-11-03 16:58:17.283800909 +0000
-@@ -198,13 +198,9 @@
-
- #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
-
--#ifdef CONFIG_SCHED_MUQSS
-+int __read_mostly hrtimer_granularity_us = 100;
- /* Limit min_delta to 100us */
--#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
--#else
--/* Limit min_delta to a jiffie */
--#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
--#endif
-+#define MIN_DELTA_LIMIT (hrtimer_granularity_us * NSEC_PER_USEC)
-
- /**
- * clockevents_increase_min_delta - raise minimum delta of a clock event device
-diff -Nur a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
---- a/kernel/time/hrtimer.c 2018-11-03 17:04:16.448274547 +0000
-+++ b/kernel/time/hrtimer.c 2018-11-03 16:58:17.283800909 +0000
-@@ -1803,7 +1803,7 @@
- long __sched schedule_msec_hrtimeout(long timeout)
- {
- struct hrtimer_sleeper t;
-- int delta, secs, jiffs;
-+ int delta, jiffs;
- ktime_t expires;
-
- if (!timeout) {
-@@ -1820,9 +1820,8 @@
- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
- return schedule_timeout(jiffs);
-
-- secs = timeout / 1000;
- delta = (timeout % 1000) * NSEC_PER_MSEC;
-- expires = ktime_set(secs, delta);
-+ expires = ktime_set(0, delta);
-
- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires_range_ns(&t.timer, expires, delta);
-@@ -1846,9 +1845,53 @@
-
- EXPORT_SYMBOL(schedule_msec_hrtimeout);
-
-+#define USECS_PER_SEC 1000000
-+extern int hrtimer_granularity_us;
-+
-+static inline long schedule_usec_hrtimeout(long timeout)
-+{
-+ struct hrtimer_sleeper t;
-+ ktime_t expires;
-+ int delta;
-+
-+ if (!timeout) {
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
-+ }
-+
-+ if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
-+ return schedule_timeout(usecs_to_jiffies(timeout));
-+
-+ if (timeout < hrtimer_granularity_us)
-+ timeout = hrtimer_granularity_us;
-+ delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
-+ expires = ktime_set(0, delta);
-+
-+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
-+
-+ hrtimer_init_sleeper(&t, current);
-+
-+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
-+
-+ if (likely(t.task))
-+ schedule();
-+
-+ hrtimer_cancel(&t.timer);
-+ destroy_hrtimer_on_stack(&t.timer);
-+
-+ __set_current_state(TASK_RUNNING);
-+
-+ expires = hrtimer_expires_remaining(&t.timer);
-+ timeout = ktime_to_us(expires);
-+ return timeout < 0 ? 0 : timeout;
-+}
-+
-+int __read_mostly hrtimeout_min_us = 1000;
-+
- long __sched schedule_min_hrtimeout(void)
- {
-- return schedule_msec_hrtimeout(1);
-+ return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
- }
-
- EXPORT_SYMBOL(schedule_min_hrtimeout);
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
deleted file mode 100644
index 99b28d65..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-From 9e47a80f690080c12ce607158b96c305707543b8 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Wed, 7 Dec 2016 21:23:01 +1100
-Subject: [PATCH 12/16] Reinstate default Hz of 100 in combination with MuQSS
- and -ck patches.
-
----
- kernel/Kconfig.hz | 25 ++++++++++++++++++-------
- 1 file changed, 18 insertions(+), 7 deletions(-)
-
-diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
-index 2a202a846757..1806fcac8f14 100644
---- a/kernel/Kconfig.hz
-+++ b/kernel/Kconfig.hz
-@@ -4,7 +4,8 @@
-
- choice
- prompt "Timer frequency"
-- default HZ_250
-+ default HZ_100 if SCHED_MUQSS
-+ default HZ_250_NODEF if !SCHED_MUQSS
- help
- Allows the configuration of the timer frequency. It is customary
- to have the timer interrupt run at 1000 Hz but 100 Hz may be more
-@@ -19,11 +20,18 @@ choice
- config HZ_100
- bool "100 HZ"
- help
-+ 100 Hz is a suitable choice in combination with MuQSS which does
-+ not rely on ticks for rescheduling interrupts, and is not Hz limited
-+ for timeouts and sleeps from both the kernel and userspace.
-+ This allows us to benefit from the lower overhead and higher
-+ throughput of fewer timer ticks.
-+
-+ Non-MuQSS kernels:
- 100 Hz is a typical choice for servers, SMP and NUMA systems
- with lots of processors that may show reduced performance if
- too many timer interrupts are occurring.
-
-- config HZ_250
-+ config HZ_250_NODEF
- bool "250 HZ"
- help
- 250 Hz is a good compromise choice allowing server performance
-@@ -31,7 +39,10 @@ choice
- on SMP and NUMA systems. If you are going to be using NTSC video
- or multimedia, selected 300Hz instead.
-
-- config HZ_300
-+ 250 Hz is the default choice for the mainline scheduler but not
-+ advantageous in combination with MuQSS.
-+
-+ config HZ_300_NODEF
- bool "300 HZ"
- help
- 300 Hz is a good compromise choice allowing server performance
-@@ -39,7 +50,7 @@ choice
- on SMP and NUMA systems and exactly dividing by both PAL and
- NTSC frame rates for video and multimedia work.
-
-- config HZ_1000
-+ config HZ_1000_NODEF
- bool "1000 HZ"
- help
- 1000 Hz is the preferred choice for desktop systems and other
-@@ -50,9 +61,9 @@ endchoice
- config HZ
- int
- default 100 if HZ_100
-- default 250 if HZ_250
-- default 300 if HZ_300
-- default 1000 if HZ_1000
-+ default 250 if HZ_250_NODEF
-+ default 300 if HZ_300_NODEF
-+ default 1000 if HZ_1000_NODEF
-
- config SCHED_HRTICK
- def_bool HIGH_RES_TIMERS
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
deleted file mode 100644
index 63ec9fdf..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From 5902b315d4061ebbe73a62c52e6d3b618066cebc Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Wed, 7 Dec 2016 21:13:16 +1100
-Subject: [PATCH 13/16] Make threaded IRQs optionally the default which can be
- disabled.
-
----
- kernel/irq/Kconfig | 14 ++++++++++++++
- kernel/irq/manage.c | 10 ++++++++++
- 2 files changed, 24 insertions(+)
-
-diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
-index a117adf7084b..0984c54fd4e9 100644
---- a/kernel/irq/Kconfig
-+++ b/kernel/irq/Kconfig
-@@ -111,6 +111,20 @@ config IRQ_DOMAIN_DEBUG
- config IRQ_FORCED_THREADING
- bool
-
-+config FORCE_IRQ_THREADING
-+ bool "Make IRQ threading compulsory"
-+ depends on IRQ_FORCED_THREADING
-+ default y
-+ ---help---
-+
-+ Make IRQ threading mandatory for any IRQ handlers that support it
-+ instead of being optional and requiring the threadirqs kernel
-+ parameter. Instead they can be optionally disabled with the
-+ nothreadirqs kernel parameter.
-+
-+ Enable if you are building for a desktop or low latency system,
-+ otherwise say N.
-+
- config SPARSE_IRQ
- bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
- ---help---
-diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 4bff6a10ae8e..5a6df0dd23c4 100644
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -24,7 +24,17 @@
- #include "internals.h"
-
- #ifdef CONFIG_IRQ_FORCED_THREADING
-+#ifdef CONFIG_FORCE_IRQ_THREADING
-+__read_mostly bool force_irqthreads = true;
-+#else
- __read_mostly bool force_irqthreads;
-+#endif
-+static int __init setup_noforced_irqthreads(char *arg)
-+{
-+ force_irqthreads = false;
-+ return 0;
-+}
-+early_param("nothreadirqs", setup_noforced_irqthreads);
-
- static int __init setup_forced_irqthreads(char *arg)
- {
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0014-Swap-sucks.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0014-Swap-sucks.patch
deleted file mode 100644
index 6bf5bcda..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0014-Swap-sucks.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From ed0ab4c80fcb6fa4abb4f2f897e591df6eaa2d0e Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 12 Aug 2017 12:02:04 +1000
-Subject: [PATCH 14/16] Swap sucks.
-
----
- mm/vmscan.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index eb2f0315b8c0..67d03efab288 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -149,7 +149,7 @@ struct scan_control {
- /*
- * From 0 .. 100. Higher means more swappy.
- */
--int vm_swappiness = 60;
-+int vm_swappiness = 33;
- /*
- * The total number of pages which are beyond the high watermark within all
- * zones.
---
-2.11.0
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
deleted file mode 100644
index bfa509a5..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
-index e84d700709ff6..16364915cff53 100644
---- a/kernel/sched/MuQSS.c
-+++ b/kernel/sched/MuQSS.c
-@@ -70,6 +70,14 @@
-
- #include "MuQSS.h"
-
-+/* needing to include irq_regs.h, "because reasons"...
-+ * implicit declaration of function ‘get_irq_regs’;
-+ * did you mean ‘get_ibs_caps’?
-+ * [-Werror=implicit-function-declaration]
-+ * ^ this is because autodetect is not flawless
-+ */
-+#include <asm/irq_regs.h>
-+
- #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
- #define rt_task(p) rt_prio((p)->prio)
- #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch
deleted file mode 100644
index f7dc1d1c..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
-index e84d700709ff6..b0be7fcfe41f9 100644
---- a/kernel/sched/MuQSS.c
-+++ b/kernel/sched/MuQSS.c
-@@ -55,6 +55,7 @@
- #include <linux/security.h>
- #include <linux/syscalls.h>
- #include <linux/tick.h>
-+#include <linux/version.h>
-
- #include <asm/switch_to.h>
- #include <asm/tlb.h>
-@@ -1959,7 +1960,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
- p->state = TASK_WAKING;
-
- if (p->in_iowait) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
- delayacct_blkio_end();
-+#else
-+ delayacct_blkio_end(p);
-+#endif
- atomic_dec(&task_rq(p)->nr_iowait);
- }
-
-@@ -1970,7 +1975,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
- #else /* CONFIG_SMP */
-
- if (p->in_iowait) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
- delayacct_blkio_end();
-+#else
-+ delayacct_blkio_end(p);
-+#endif
- atomic_dec(&task_rq(p)->nr_iowait);
- }
-
-@@ -2022,7 +2031,11 @@ static void try_to_wake_up_local(struct task_struct *p)
-
- if (!task_on_rq_queued(p)) {
- if (p->in_iowait) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
- delayacct_blkio_end();
-+#else
-+ delayacct_blkio_end(p);
-+#endif
- atomic_dec(&rq->nr_iowait);
- }
- ttwu_activate(rq, p);
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch
deleted file mode 100644
index 1a1717bf..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
---- a/kernel/sched/MuQSS.c 2019-01-05 22:51:24.547448624 +0000
-+++ b/kernel/sched/MuQSS.c 2019-01-05 22:58:29.821451056 +0000
-@@ -1021,6 +1021,10 @@
- #define CPUIDLE_THREAD_BUSY (16)
- #define CPUIDLE_DIFF_NODE (32)
-
-+#ifdef CONFIG_SCHED_SMT
-+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-+#endif
-+
- /*
- * The best idle CPU is chosen according to the CPUIDLE ranking above where the
- * lowest value would give the most suitable CPU to schedule p onto next. The
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch
deleted file mode 100644
index 28f9b2f6..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
-index a4f635820..9b4c4facf 100644
---- a/drivers/net/wireless/ath/ath10k/core.c
-+++ b/drivers/net/wireless/ath/ath10k/core.c
-@@ -519,7 +519,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
- dir = ".";
-
- snprintf(filename, sizeof(filename), "%s/%s", dir, file);
-- ret = request_firmware(&fw, filename, ar->dev);
-+ ret = request_firmware_direct(&fw, filename, ar->dev);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
- filename, ret);
-
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-amd64.config b/sys-kernel/linux-image-redcore-lts/files/4.14-amd64.config
deleted file mode 100644
index 307b0bd9..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-amd64.config
+++ /dev/null
@@ -1,9101 +0,0 @@
-#
-# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.14.95-redcore-lts-r1 Kernel Configuration
-#
-CONFIG_64BIT=y
-CONFIG_X86_64=y
-CONFIG_X86=y
-CONFIG_INSTRUCTION_DECODER=y
-CONFIG_OUTPUT_FORMAT="elf64-x86-64"
-CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_MMU=y
-CONFIG_ARCH_MMAP_RND_BITS_MIN=28
-CONFIG_ARCH_MMAP_RND_BITS_MAX=32
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
-CONFIG_NEED_DMA_MAP_STATE=y
-CONFIG_NEED_SG_DMA_LENGTH=y
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
-CONFIG_GENERIC_HWEIGHT=y
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_ARCH_HAS_CPU_RELAX=y
-CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
-CONFIG_HAVE_SETUP_PER_CPU_AREA=y
-CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
-CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
-CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
-CONFIG_ZONE_DMA32=y
-CONFIG_AUDIT_ARCH=y
-CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
-CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_HAVE_INTEL_TXT=y
-CONFIG_X86_64_SMP=y
-CONFIG_ARCH_SUPPORTS_UPROBES=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_PGTABLE_LEVELS=4
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-CONFIG_IRQ_WORK=y
-CONFIG_BUILDTIME_EXTABLE_SORT=y
-CONFIG_THREAD_INFO_IN_TASK=y
-
-#
-# General setup
-#
-CONFIG_SCHED_MUQSS=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_CROSS_COMPILE=""
-# CONFIG_COMPILE_TEST is not set
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_HAVE_KERNEL_GZIP=y
-CONFIG_HAVE_KERNEL_BZIP2=y
-CONFIG_HAVE_KERNEL_LZMA=y
-CONFIG_HAVE_KERNEL_XZ=y
-CONFIG_HAVE_KERNEL_LZO=y
-CONFIG_HAVE_KERNEL_LZ4=y
-# CONFIG_KERNEL_GZIP is not set
-# CONFIG_KERNEL_BZIP2 is not set
-# CONFIG_KERNEL_LZMA is not set
-# CONFIG_KERNEL_XZ is not set
-# CONFIG_KERNEL_LZO is not set
-CONFIG_KERNEL_LZ4=y
-CONFIG_DEFAULT_HOSTNAME="(none)"
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_POSIX_MQUEUE_SYSCTL=y
-CONFIG_CROSS_MEMORY_ATTACH=y
-CONFIG_FHANDLE=y
-# CONFIG_USELIB is not set
-CONFIG_AUDIT=y
-CONFIG_HAVE_ARCH_AUDITSYSCALL=y
-CONFIG_AUDITSYSCALL=y
-CONFIG_AUDIT_WATCH=y
-CONFIG_AUDIT_TREE=y
-
-#
-# IRQ subsystem
-#
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
-CONFIG_GENERIC_PENDING_IRQ=y
-CONFIG_GENERIC_IRQ_MIGRATION=y
-CONFIG_GENERIC_IRQ_CHIP=y
-CONFIG_IRQ_DOMAIN=y
-CONFIG_IRQ_SIM=y
-CONFIG_IRQ_DOMAIN_HIERARCHY=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
-# CONFIG_IRQ_DOMAIN_DEBUG is not set
-CONFIG_IRQ_FORCED_THREADING=y
-CONFIG_FORCE_IRQ_THREADING=y
-CONFIG_SPARSE_IRQ=y
-# CONFIG_GENERIC_IRQ_DEBUGFS is not set
-CONFIG_CLOCKSOURCE_WATCHDOG=y
-CONFIG_ARCH_CLOCKSOURCE_DATA=y
-CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
-CONFIG_GENERIC_TIME_VSYSCALL=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
-CONFIG_GENERIC_CMOS_UPDATE=y
-
-#
-# Timers subsystem
-#
-CONFIG_TICK_ONESHOT=y
-CONFIG_HZ_PERIODIC=y
-# CONFIG_NO_HZ_IDLE is not set
-# CONFIG_NO_HZ_FULL is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-
-#
-# CPU/Task time and stats accounting
-#
-CONFIG_VIRT_CPU_ACCOUNTING=y
-# CONFIG_TICK_CPU_ACCOUNTING is not set
-CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
-CONFIG_IRQ_TIME_ACCOUNTING=y
-CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-
-#
-# RCU Subsystem
-#
-CONFIG_PREEMPT_RCU=y
-# CONFIG_RCU_EXPERT is not set
-CONFIG_SRCU=y
-CONFIG_TREE_SRCU=y
-CONFIG_TASKS_RCU=y
-CONFIG_RCU_STALL_COMMON=y
-CONFIG_RCU_NEED_SEGCBLIST=y
-CONFIG_CONTEXT_TRACKING=y
-# CONFIG_CONTEXT_TRACKING_FORCE is not set
-CONFIG_BUILD_BIN2C=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=17
-CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
-CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
-CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
-CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
-CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
-CONFIG_ARCH_SUPPORTS_INT128=y
-CONFIG_CGROUPS=y
-CONFIG_PAGE_COUNTER=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_SWAP_ENABLED=y
-CONFIG_BLK_CGROUP=y
-# CONFIG_DEBUG_BLK_CGROUP is not set
-CONFIG_CGROUP_WRITEBACK=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_CGROUP_PIDS=y
-# CONFIG_CGROUP_RDMA is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CPUSETS=y
-CONFIG_PROC_PID_CPUSET=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CGROUP_PERF=y
-CONFIG_CGROUP_BPF=y
-# CONFIG_CGROUP_DEBUG is not set
-CONFIG_SOCK_CGROUP_DATA=y
-# CONFIG_CHECKPOINT_RESTORE is not set
-CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-CONFIG_NET_NS=y
-# CONFIG_SYSFS_DEPRECATED is not set
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_RD_GZIP=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_XZ=y
-CONFIG_RD_LZO=y
-CONFIG_RD_LZ4=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_LOCAL_INIT is not set
-CONFIG_SYSCTL=y
-CONFIG_ANON_INODES=y
-CONFIG_HAVE_UID16=y
-CONFIG_SYSCTL_EXCEPTION_TRACE=y
-CONFIG_HAVE_PCSPKR_PLATFORM=y
-CONFIG_BPF=y
-# CONFIG_EXPERT is not set
-CONFIG_UID16=y
-CONFIG_MULTIUSER=y
-CONFIG_SGETMASK_SYSCALL=y
-CONFIG_SYSFS_SYSCALL=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_POSIX_TIMERS=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
-CONFIG_KALLSYMS_BASE_RELATIVE=y
-CONFIG_PRINTK=y
-CONFIG_PRINTK_NMI=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_PCSPKR_PLATFORM=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_FUTEX_PI=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_BPF_JIT_ALWAYS_ON=y
-CONFIG_SHMEM=y
-CONFIG_AIO=y
-CONFIG_ADVISE_SYSCALLS=y
-CONFIG_USERFAULTFD=y
-CONFIG_PCI_QUIRKS=y
-CONFIG_MEMBARRIER=y
-# CONFIG_EMBEDDED is not set
-CONFIG_HAVE_PERF_EVENTS=y
-# CONFIG_PC104 is not set
-
-#
-# Kernel Performance Events And Counters
-#
-CONFIG_PERF_EVENTS=y
-# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLUB_DEBUG=y
-# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
-# CONFIG_COMPAT_BRK is not set
-# CONFIG_SLAB is not set
-CONFIG_SLUB=y
-CONFIG_SLAB_MERGE_DEFAULT=y
-CONFIG_SLAB_FREELIST_RANDOM=y
-CONFIG_SLAB_FREELIST_HARDENED=y
-CONFIG_SLAB_HARDENED=y
-CONFIG_SLAB_SANITIZE=y
-CONFIG_SLAB_SANITIZE_VERIFY=y
-CONFIG_SLUB_CPU_PARTIAL=y
-CONFIG_SYSTEM_DATA_VERIFICATION=y
-CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
-CONFIG_CRASH_CORE=y
-CONFIG_KEXEC_CORE=y
-CONFIG_HOTPLUG_SMT=y
-# CONFIG_OPROFILE is not set
-CONFIG_HAVE_OPROFILE=y
-CONFIG_OPROFILE_NMI_TIMER=y
-CONFIG_KPROBES=y
-CONFIG_JUMP_LABEL=y
-# CONFIG_STATIC_KEYS_SELFTEST is not set
-CONFIG_OPTPROBES=y
-CONFIG_UPROBES=y
-# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
-CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
-CONFIG_ARCH_USE_BUILTIN_BSWAP=y
-CONFIG_KRETPROBES=y
-CONFIG_USER_RETURN_NOTIFIER=y
-CONFIG_HAVE_IOREMAP_PROT=y
-CONFIG_HAVE_KPROBES=y
-CONFIG_HAVE_KRETPROBES=y
-CONFIG_HAVE_OPTPROBES=y
-CONFIG_HAVE_KPROBES_ON_FTRACE=y
-CONFIG_HAVE_NMI=y
-CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_CONTIGUOUS=y
-CONFIG_GENERIC_SMP_IDLE_THREAD=y
-CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
-CONFIG_ARCH_HAS_SET_MEMORY=y
-CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
-CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
-CONFIG_HAVE_CLK=y
-CONFIG_HAVE_DMA_API_DEBUG=y
-CONFIG_HAVE_HW_BREAKPOINT=y
-CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
-CONFIG_HAVE_USER_RETURN_NOTIFIER=y
-CONFIG_HAVE_PERF_EVENTS_NMI=y
-CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
-CONFIG_HAVE_PERF_REGS=y
-CONFIG_HAVE_PERF_USER_STACK_DUMP=y
-CONFIG_HAVE_ARCH_JUMP_LABEL=y
-CONFIG_HAVE_RCU_TABLE_FREE=y
-CONFIG_HAVE_RCU_TABLE_INVALIDATE=y
-CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
-CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
-CONFIG_HAVE_CMPXCHG_LOCAL=y
-CONFIG_HAVE_CMPXCHG_DOUBLE=y
-CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
-CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
-CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
-CONFIG_SECCOMP_FILTER=y
-CONFIG_HAVE_GCC_PLUGINS=y
-# CONFIG_GCC_PLUGINS is not set
-CONFIG_HAVE_CC_STACKPROTECTOR=y
-CONFIG_CC_STACKPROTECTOR=y
-# CONFIG_CC_STACKPROTECTOR_NONE is not set
-# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
-CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_THIN_ARCHIVES=y
-CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
-CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
-CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
-CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
-CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
-CONFIG_HAVE_ARCH_HUGE_VMAP=y
-CONFIG_HAVE_ARCH_SOFT_DIRTY=y
-CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
-CONFIG_MODULES_USE_ELF_RELA=y
-CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
-CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
-CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
-CONFIG_HAVE_EXIT_THREAD=y
-CONFIG_ARCH_MMAP_RND_BITS=32
-CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
-CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
-CONFIG_HAVE_COPY_THREAD_TLS=y
-CONFIG_HAVE_STACK_VALIDATION=y
-# CONFIG_HAVE_ARCH_HASH is not set
-# CONFIG_ISA_BUS_API is not set
-CONFIG_OLD_SIGSUSPEND3=y
-CONFIG_COMPAT_OLD_SIGACTION=y
-# CONFIG_CPU_NO_EFFICIENT_FFS is not set
-CONFIG_HAVE_ARCH_VMAP_STACK=y
-CONFIG_VMAP_STACK=y
-# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set
-# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
-CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
-CONFIG_STRICT_KERNEL_RWX=y
-CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
-CONFIG_STRICT_MODULE_RWX=y
-CONFIG_ARCH_HAS_REFCOUNT=y
-CONFIG_REFCOUNT_FULL=y
-
-#
-# GCOV-based kernel profiling
-#
-# CONFIG_GCOV_KERNEL is not set
-CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
-CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_BASE_SMALL=0
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG=y
-# CONFIG_MODULE_SIG_FORCE is not set
-CONFIG_MODULE_SIG_ALL=y
-# CONFIG_MODULE_SIG_SHA1 is not set
-# CONFIG_MODULE_SIG_SHA224 is not set
-# CONFIG_MODULE_SIG_SHA256 is not set
-# CONFIG_MODULE_SIG_SHA384 is not set
-CONFIG_MODULE_SIG_SHA512=y
-CONFIG_MODULE_SIG_HASH="sha512"
-CONFIG_MODULE_COMPRESS=y
-CONFIG_MODULE_COMPRESS_GZIP=y
-# CONFIG_MODULE_COMPRESS_XZ is not set
-# CONFIG_TRIM_UNUSED_KSYMS is not set
-CONFIG_MODULES_TREE_LOOKUP=y
-CONFIG_BLOCK=y
-CONFIG_BLK_SCSI_REQUEST=y
-CONFIG_BLK_DEV_BSG=y
-CONFIG_BLK_DEV_BSGLIB=y
-CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_BLK_DEV_ZONED=y
-CONFIG_BLK_DEV_THROTTLING=y
-# CONFIG_BLK_DEV_THROTTLING_LOW is not set
-CONFIG_BLK_CMDLINE_PARSER=y
-CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
-CONFIG_BLK_WBT_MQ=y
-CONFIG_BLK_DEBUG_FS=y
-# CONFIG_BLK_SED_OPAL is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_AIX_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-CONFIG_LDM_PARTITION=y
-CONFIG_LDM_DEBUG=y
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_KARMA_PARTITION is not set
-CONFIG_EFI_PARTITION=y
-# CONFIG_SYSV68_PARTITION is not set
-CONFIG_CMDLINE_PARTITION=y
-CONFIG_BLOCK_COMPAT=y
-CONFIG_BLK_MQ_PCI=y
-CONFIG_BLK_MQ_VIRTIO=y
-CONFIG_BLK_MQ_RDMA=y
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_MQ_IOSCHED_DEADLINE=y
-# CONFIG_MQ_IOSCHED_KYBER is not set
-CONFIG_IOSCHED_BFQ=y
-CONFIG_BFQ_GROUP_IOSCHED=y
-CONFIG_PREEMPT_NOTIFIERS=y
-CONFIG_PADATA=y
-CONFIG_ASN1=y
-CONFIG_UNINLINE_SPIN_UNLOCK=y
-CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
-CONFIG_MUTEX_SPIN_ON_OWNER=y
-CONFIG_RWSEM_SPIN_ON_OWNER=y
-CONFIG_LOCK_SPIN_ON_OWNER=y
-CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
-CONFIG_QUEUED_SPINLOCKS=y
-CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
-CONFIG_QUEUED_RWLOCKS=y
-CONFIG_FREEZER=y
-
-#
-# Processor type and features
-#
-CONFIG_ZONE_DMA=y
-CONFIG_SMP=y
-CONFIG_X86_FEATURE_NAMES=y
-CONFIG_X86_FAST_FEATURE_TESTS=y
-CONFIG_X86_X2APIC=y
-CONFIG_X86_MPPARSE=y
-# CONFIG_GOLDFISH is not set
-CONFIG_RETPOLINE=y
-CONFIG_INTEL_RDT=y
-# CONFIG_X86_EXTENDED_PLATFORM is not set
-CONFIG_X86_INTEL_LPSS=y
-CONFIG_X86_AMD_PLATFORM_DEVICE=y
-CONFIG_IOSF_MBI=y
-# CONFIG_IOSF_MBI_DEBUG is not set
-CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
-CONFIG_SCHED_OMIT_FRAME_POINTER=y
-CONFIG_HYPERVISOR_GUEST=y
-CONFIG_PARAVIRT=y
-# CONFIG_PARAVIRT_DEBUG is not set
-# CONFIG_PARAVIRT_SPINLOCKS is not set
-# CONFIG_XEN is not set
-CONFIG_KVM_GUEST=y
-# CONFIG_KVM_DEBUG_FS is not set
-# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
-CONFIG_PARAVIRT_CLOCK=y
-CONFIG_NO_BOOTMEM=y
-# CONFIG_MK8 is not set
-# CONFIG_MPSC is not set
-# CONFIG_MCORE2 is not set
-# CONFIG_MATOM is not set
-CONFIG_GENERIC_CPU=y
-CONFIG_X86_INTERNODE_CACHE_SHIFT=6
-CONFIG_X86_L1_CACHE_SHIFT=6
-CONFIG_X86_TSC=y
-CONFIG_X86_CMPXCHG64=y
-CONFIG_X86_CMOV=y
-CONFIG_X86_MINIMUM_CPU_FAMILY=64
-CONFIG_X86_DEBUGCTLMSR=y
-CONFIG_CPU_SUP_INTEL=y
-CONFIG_CPU_SUP_AMD=y
-CONFIG_CPU_SUP_CENTAUR=y
-CONFIG_HPET_TIMER=y
-CONFIG_HPET_EMULATE_RTC=y
-CONFIG_DMI=y
-CONFIG_GART_IOMMU=y
-CONFIG_CALGARY_IOMMU=y
-CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
-CONFIG_SWIOTLB=y
-CONFIG_IOMMU_HELPER=y
-CONFIG_MAXSMP=y
-CONFIG_NR_CPUS=8192
-CONFIG_SCHED_SMT=y
-CONFIG_SMT_NICE=y
-CONFIG_SCHED_MC=y
-CONFIG_SCHED_MC_PRIO=y
-# CONFIG_PREEMPT_NONE is not set
-# CONFIG_PREEMPT_VOLUNTARY is not set
-CONFIG_PREEMPT=y
-CONFIG_PREEMPT_COUNT=y
-CONFIG_X86_LOCAL_APIC=y
-CONFIG_X86_IO_APIC=y
-CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_X86_MCE=y
-# CONFIG_X86_MCELOG_LEGACY is not set
-CONFIG_X86_MCE_INTEL=y
-CONFIG_X86_MCE_AMD=y
-CONFIG_X86_MCE_THRESHOLD=y
-# CONFIG_X86_MCE_INJECT is not set
-CONFIG_X86_THERMAL_VECTOR=y
-
-#
-# Performance monitoring
-#
-CONFIG_PERF_EVENTS_INTEL_UNCORE=y
-CONFIG_PERF_EVENTS_INTEL_RAPL=y
-CONFIG_PERF_EVENTS_INTEL_CSTATE=y
-CONFIG_PERF_EVENTS_AMD_POWER=m
-# CONFIG_VM86 is not set
-# CONFIG_X86_16BIT is not set
-CONFIG_X86_VSYSCALL_EMULATION=y
-CONFIG_I8K=m
-CONFIG_MICROCODE=y
-CONFIG_MICROCODE_INTEL=y
-CONFIG_MICROCODE_AMD=y
-CONFIG_MICROCODE_OLD_INTERFACE=y
-CONFIG_X86_MSR=m
-CONFIG_X86_CPUID=m
-# CONFIG_X86_5LEVEL is not set
-CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
-CONFIG_ARCH_DMA_ADDR_T_64BIT=y
-CONFIG_X86_DIRECT_GBPAGES=y
-CONFIG_ARCH_HAS_MEM_ENCRYPT=y
-CONFIG_AMD_MEM_ENCRYPT=y
-# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set
-CONFIG_ARCH_USE_MEMREMAP_PROT=y
-CONFIG_NUMA=y
-CONFIG_AMD_NUMA=y
-CONFIG_X86_64_ACPI_NUMA=y
-CONFIG_NODES_SPAN_OTHER_NODES=y
-# CONFIG_NUMA_EMU is not set
-CONFIG_NODES_SHIFT=10
-CONFIG_ARCH_SPARSEMEM_ENABLE=y
-CONFIG_ARCH_SPARSEMEM_DEFAULT=y
-CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-CONFIG_ARCH_MEMORY_PROBE=y
-CONFIG_ARCH_PROC_KCORE_TEXT=y
-CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_SPARSEMEM_MANUAL=y
-CONFIG_SPARSEMEM=y
-CONFIG_NEED_MULTIPLE_NODES=y
-CONFIG_HAVE_MEMORY_PRESENT=y
-CONFIG_SPARSEMEM_EXTREME=y
-CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
-CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
-CONFIG_SPARSEMEM_VMEMMAP=y
-CONFIG_HAVE_MEMBLOCK=y
-CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
-CONFIG_HAVE_GENERIC_GUP=y
-CONFIG_ARCH_DISCARD_MEMBLOCK=y
-CONFIG_MEMORY_ISOLATION=y
-CONFIG_HAVE_BOOTMEM_INFO_NODE=y
-CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTPLUG_SPARSE=y
-# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set
-CONFIG_MEMORY_HOTREMOVE=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
-CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
-CONFIG_MEMORY_BALLOON=y
-CONFIG_BALLOON_COMPACTION=y
-CONFIG_COMPACTION=y
-CONFIG_MIGRATION=y
-CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
-CONFIG_ARCH_ENABLE_THP_MIGRATION=y
-CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_BOUNCE=y
-CONFIG_VIRT_TO_BUS=y
-CONFIG_MMU_NOTIFIER=y
-CONFIG_KSM=y
-CONFIG_UKSM=y
-# CONFIG_KSM_LEGACY is not set
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
-CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
-CONFIG_MEMORY_FAILURE=y
-# CONFIG_HWPOISON_INJECT is not set
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
-# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
-CONFIG_ARCH_WANTS_THP_SWAP=y
-CONFIG_THP_SWAP=y
-CONFIG_TRANSPARENT_HUGE_PAGECACHE=y
-CONFIG_CLEANCACHE=y
-CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
-# CONFIG_CMA_DEBUG is not set
-# CONFIG_CMA_DEBUGFS is not set
-CONFIG_CMA_AREAS=7
-# CONFIG_ZSWAP is not set
-CONFIG_ZPOOL=m
-CONFIG_ZBUD=m
-CONFIG_Z3FOLD=m
-CONFIG_ZSMALLOC=y
-# CONFIG_PGTABLE_MAPPING is not set
-# CONFIG_ZSMALLOC_STAT is not set
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y
-# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
-# CONFIG_IDLE_PAGE_TRACKING is not set
-CONFIG_ARCH_HAS_ZONE_DEVICE=y
-# CONFIG_ZONE_DEVICE is not set
-CONFIG_FRAME_VECTOR=y
-CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
-CONFIG_ARCH_HAS_PKEYS=y
-# CONFIG_PERCPU_STATS is not set
-CONFIG_X86_PMEM_LEGACY_DEVICE=y
-CONFIG_X86_PMEM_LEGACY=m
-CONFIG_X86_CHECK_BIOS_CORRUPTION=y
-CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
-CONFIG_X86_RESERVE_LOW=64
-CONFIG_MTRR=y
-CONFIG_MTRR_SANITIZER=y
-CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0
-CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
-CONFIG_X86_PAT=y
-CONFIG_ARCH_USES_PG_UNCACHED=y
-CONFIG_ARCH_RANDOM=y
-CONFIG_X86_SMAP=y
-CONFIG_X86_INTEL_MPX=y
-CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
-CONFIG_EFI=y
-CONFIG_EFI_STUB=y
-CONFIG_EFI_MIXED=y
-CONFIG_SECCOMP=y
-CONFIG_HZ_100=y
-# CONFIG_HZ_250_NODEF is not set
-# CONFIG_HZ_300_NODEF is not set
-# CONFIG_HZ_1000_NODEF is not set
-CONFIG_HZ=100
-CONFIG_SCHED_HRTICK=y
-CONFIG_KEXEC=y
-# CONFIG_CRASH_DUMP is not set
-# CONFIG_KEXEC_JUMP is not set
-CONFIG_PHYSICAL_START=0x1000000
-CONFIG_RELOCATABLE=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_X86_NEED_RELOCS=y
-CONFIG_PHYSICAL_ALIGN=0x1000000
-CONFIG_RANDOMIZE_MEMORY=y
-CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
-CONFIG_HOTPLUG_CPU=y
-CONFIG_BOOTPARAM_HOTPLUG_CPU0=y
-# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
-# CONFIG_COMPAT_VDSO is not set
-CONFIG_LEGACY_VSYSCALL_NATIVE=y
-# CONFIG_LEGACY_VSYSCALL_EMULATE is not set
-# CONFIG_LEGACY_VSYSCALL_NONE is not set
-# CONFIG_CMDLINE_BOOL is not set
-CONFIG_MODIFY_LDT_SYSCALL=y
-CONFIG_HAVE_LIVEPATCH=y
-CONFIG_ARCH_HAS_ADD_PAGES=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
-CONFIG_USE_PERCPU_NUMA_NODE_ID=y
-
-#
-# Power management and ACPI options
-#
-CONFIG_ARCH_HIBERNATION_HEADER=y
-CONFIG_SUSPEND=y
-CONFIG_SUSPEND_FREEZER=y
-CONFIG_HIBERNATE_CALLBACKS=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_STD_PARTITION=""
-CONFIG_PM_SLEEP=y
-CONFIG_PM_SLEEP_SMP=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PM_WAKELOCKS_LIMIT=100
-CONFIG_PM_WAKELOCKS_GC=y
-CONFIG_PM=y
-# CONFIG_PM_DEBUG is not set
-CONFIG_PM_OPP=y
-CONFIG_PM_CLK=y
-CONFIG_PM_GENERIC_DOMAINS=y
-# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
-CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
-CONFIG_ACPI=y
-CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
-CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
-CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
-# CONFIG_ACPI_DEBUGGER is not set
-CONFIG_ACPI_SLEEP=y
-# CONFIG_ACPI_PROCFS_POWER is not set
-CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
-# CONFIG_ACPI_EC_DEBUGFS is not set
-CONFIG_ACPI_AC=m
-CONFIG_ACPI_BATTERY=m
-CONFIG_ACPI_BUTTON=m
-CONFIG_ACPI_VIDEO=m
-CONFIG_ACPI_FAN=m
-CONFIG_ACPI_DOCK=y
-CONFIG_ACPI_CPU_FREQ_PSS=y
-CONFIG_ACPI_PROCESSOR_CSTATE=y
-CONFIG_ACPI_PROCESSOR_IDLE=y
-CONFIG_ACPI_CPPC_LIB=y
-CONFIG_ACPI_PROCESSOR=y
-CONFIG_ACPI_IPMI=m
-CONFIG_ACPI_HOTPLUG_CPU=y
-CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
-CONFIG_ACPI_THERMAL=m
-CONFIG_ACPI_NUMA=y
-# CONFIG_ACPI_CUSTOM_DSDT is not set
-CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
-CONFIG_ACPI_TABLE_UPGRADE=y
-# CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_PCI_SLOT=y
-CONFIG_X86_PM_TIMER=y
-CONFIG_ACPI_CONTAINER=y
-CONFIG_ACPI_HOTPLUG_MEMORY=y
-CONFIG_ACPI_HOTPLUG_IOAPIC=y
-CONFIG_ACPI_SBS=m
-CONFIG_ACPI_HED=y
-# CONFIG_ACPI_CUSTOM_METHOD is not set
-CONFIG_ACPI_BGRT=y
-# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
-CONFIG_ACPI_NFIT=m
-CONFIG_HAVE_ACPI_APEI=y
-CONFIG_HAVE_ACPI_APEI_NMI=y
-CONFIG_ACPI_APEI=y
-CONFIG_ACPI_APEI_GHES=y
-CONFIG_ACPI_APEI_PCIEAER=y
-CONFIG_ACPI_APEI_MEMORY_FAILURE=y
-# CONFIG_ACPI_APEI_EINJ is not set
-# CONFIG_ACPI_APEI_ERST_DEBUG is not set
-CONFIG_DPTF_POWER=m
-CONFIG_ACPI_WATCHDOG=y
-CONFIG_ACPI_EXTLOG=m
-CONFIG_PMIC_OPREGION=y
-# CONFIG_XPOWER_PMIC_OPREGION is not set
-# CONFIG_BXT_WC_PMIC_OPREGION is not set
-CONFIG_ACPI_CONFIGFS=m
-CONFIG_SFI=y
-
-#
-# CPU Frequency scaling
-#
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
-CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-
-#
-# CPU frequency scaling drivers
-#
-CONFIG_X86_INTEL_PSTATE=y
-CONFIG_X86_PCC_CPUFREQ=m
-CONFIG_X86_ACPI_CPUFREQ=m
-CONFIG_X86_ACPI_CPUFREQ_CPB=y
-CONFIG_X86_POWERNOW_K8=m
-CONFIG_X86_AMD_FREQ_SENSITIVITY=m
-# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
-# CONFIG_X86_P4_CLOCKMOD is not set
-
-#
-# shared options
-#
-# CONFIG_X86_SPEEDSTEP_LIB is not set
-
-#
-# CPU Idle
-#
-CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_GOV_LADDER=y
-CONFIG_CPU_IDLE_GOV_MENU=y
-# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
-CONFIG_INTEL_IDLE=y
-
-#
-# Bus options (PCI etc.)
-#
-CONFIG_PCI=y
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_HOTPLUG_PCI_PCIE=y
-CONFIG_PCIEAER=y
-CONFIG_PCIE_ECRC=y
-CONFIG_PCIEAER_INJECT=m
-CONFIG_PCIEASPM=y
-# CONFIG_PCIEASPM_DEBUG is not set
-CONFIG_PCIEASPM_DEFAULT=y
-# CONFIG_PCIEASPM_POWERSAVE is not set
-# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
-# CONFIG_PCIEASPM_PERFORMANCE is not set
-CONFIG_PCIE_PME=y
-CONFIG_PCIE_DPC=y
-CONFIG_PCIE_PTM=y
-CONFIG_PCI_BUS_ADDR_T_64BIT=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_MSI_IRQ_DOMAIN=y
-# CONFIG_PCI_DEBUG is not set
-CONFIG_PCI_REALLOC_ENABLE_AUTO=y
-CONFIG_PCI_STUB=m
-CONFIG_HT_IRQ=y
-CONFIG_PCI_ATS=y
-CONFIG_PCI_LOCKLESS_CONFIG=y
-CONFIG_PCI_IOV=y
-CONFIG_PCI_PRI=y
-CONFIG_PCI_PASID=y
-CONFIG_PCI_LABEL=y
-CONFIG_PCI_HYPERV=m
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_HOTPLUG_PCI_ACPI_IBM=m
-CONFIG_HOTPLUG_PCI_CPCI=y
-CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
-CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
-CONFIG_HOTPLUG_PCI_SHPC=m
-
-#
-# DesignWare PCI Core Support
-#
-# CONFIG_PCIE_DW_PLAT is not set
-
-#
-# PCI host controller drivers
-#
-CONFIG_VMD=m
-
-#
-# PCI Endpoint
-#
-# CONFIG_PCI_ENDPOINT is not set
-
-#
-# PCI switch controller drivers
-#
-CONFIG_PCI_SW_SWITCHTEC=m
-CONFIG_ISA_DMA_API=y
-CONFIG_AMD_NB=y
-CONFIG_PCCARD=m
-CONFIG_PCMCIA=m
-CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_CARDBUS=y
-
-#
-# PC-card bridges
-#
-CONFIG_YENTA=m
-CONFIG_YENTA_O2=y
-CONFIG_YENTA_RICOH=y
-CONFIG_YENTA_TI=y
-CONFIG_YENTA_ENE_TUNE=y
-CONFIG_YENTA_TOSHIBA=y
-CONFIG_PD6729=m
-CONFIG_I82092=m
-CONFIG_PCCARD_NONSTATIC=y
-CONFIG_RAPIDIO=y
-CONFIG_RAPIDIO_TSI721=y
-CONFIG_RAPIDIO_DISC_TIMEOUT=30
-CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
-CONFIG_RAPIDIO_DMA_ENGINE=y
-# CONFIG_RAPIDIO_DEBUG is not set
-CONFIG_RAPIDIO_ENUM_BASIC=m
-CONFIG_RAPIDIO_CHMAN=m
-CONFIG_RAPIDIO_MPORT_CDEV=m
-
-#
-# RapidIO Switch drivers
-#
-CONFIG_RAPIDIO_TSI57X=y
-CONFIG_RAPIDIO_CPS_XX=y
-CONFIG_RAPIDIO_TSI568=y
-CONFIG_RAPIDIO_CPS_GEN2=y
-CONFIG_RAPIDIO_RXS_GEN3=m
-CONFIG_X86_SYSFB=y
-
-#
-# Executable file formats / Emulations
-#
-CONFIG_BINFMT_ELF=y
-CONFIG_COMPAT_BINFMT_ELF=y
-CONFIG_ELFCORE=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
-CONFIG_BINFMT_SCRIPT=y
-# CONFIG_HAVE_AOUT is not set
-CONFIG_BINFMT_MISC=y
-CONFIG_COREDUMP=y
-CONFIG_IA32_EMULATION=y
-CONFIG_IA32_AOUT=y
-CONFIG_X86_X32=y
-CONFIG_COMPAT_32=y
-CONFIG_COMPAT=y
-CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
-CONFIG_SYSVIPC_COMPAT=y
-CONFIG_X86_DEV_DMA_OPS=y
-CONFIG_NET=y
-CONFIG_COMPAT_NETLINK_MESSAGES=y
-CONFIG_NET_INGRESS=y
-CONFIG_NET_EGRESS=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=m
-CONFIG_PACKET_DIAG=m
-CONFIG_UNIX=m
-CONFIG_UNIX_DIAG=m
-CONFIG_TLS=m
-CONFIG_XFRM=y
-CONFIG_XFRM_OFFLOAD=y
-CONFIG_XFRM_ALGO=m
-CONFIG_XFRM_USER=m
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_XFRM_IPCOMP=m
-CONFIG_NET_KEY=m
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_SMC=m
-CONFIG_SMC_DIAG=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_FIB_TRIE_STATS=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_ROUTE_CLASSID=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE_DEMUX=m
-CONFIG_NET_IP_TUNNEL=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
-CONFIG_NET_IPVTI=m
-CONFIG_NET_UDP_TUNNEL=m
-CONFIG_NET_FOU=m
-CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_ESP_OFFLOAD=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_TUNNEL=m
-CONFIG_INET_TUNNEL=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
-CONFIG_INET_TCP_DIAG=m
-CONFIG_INET_UDP_DIAG=m
-# CONFIG_INET_RAW_DIAG is not set
-# CONFIG_INET_DIAG_DESTROY is not set
-CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_BIC=m
-CONFIG_TCP_CONG_CUBIC=m
-CONFIG_TCP_CONG_WESTWOOD=m
-CONFIG_TCP_CONG_HTCP=m
-CONFIG_TCP_CONG_HSTCP=m
-CONFIG_TCP_CONG_HYBLA=m
-CONFIG_TCP_CONG_VEGAS=m
-CONFIG_TCP_CONG_NV=m
-CONFIG_TCP_CONG_SCALABLE=m
-CONFIG_TCP_CONG_LP=m
-CONFIG_TCP_CONG_VENO=m
-CONFIG_TCP_CONG_YEAH=m
-CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_TCP_CONG_DCTCP=m
-# CONFIG_TCP_CONG_CDG is not set
-CONFIG_TCP_CONG_BBR=m
-CONFIG_DEFAULT_RENO=y
-CONFIG_DEFAULT_TCP_CONG="reno"
-CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=m
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_ESP_OFFLOAD=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_IPV6_ILA=m
-CONFIG_INET6_XFRM_TUNNEL=m
-CONFIG_INET6_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_VTI=m
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_SIT_6RD=y
-CONFIG_IPV6_NDISC_NODETYPE=y
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_GRE=m
-CONFIG_IPV6_FOU=m
-CONFIG_IPV6_FOU_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_IPV6_MROUTE=y
-CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
-CONFIG_IPV6_PIMSM_V2=y
-CONFIG_IPV6_SEG6_LWTUNNEL=y
-CONFIG_IPV6_SEG6_HMAC=y
-# CONFIG_NETLABEL is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_NET_PTP_CLASSIFY=y
-CONFIG_NETWORK_PHY_TIMESTAMPING=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_ADVANCED=y
-CONFIG_BRIDGE_NETFILTER=m
-
-#
-# Core Netfilter Configuration
-#
-CONFIG_NETFILTER_INGRESS=y
-CONFIG_NETFILTER_NETLINK=m
-CONFIG_NETFILTER_NETLINK_ACCT=m
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NETFILTER_NETLINK_LOG=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_COMMON=m
-CONFIG_NF_LOG_NETDEV=m
-CONFIG_NF_CONNTRACK_MARK=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_ZONES=y
-CONFIG_NF_CONNTRACK_PROCFS=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_TIMEOUT=y
-CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CONNTRACK_LABELS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_GRE=m
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_BROADCAST=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_SNMP=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NF_CT_NETLINK_TIMEOUT=m
-CONFIG_NF_CT_NETLINK_HELPER=m
-CONFIG_NETFILTER_NETLINK_GLUE_CT=y
-CONFIG_NF_NAT=m
-CONFIG_NF_NAT_NEEDED=y
-CONFIG_NF_NAT_PROTO_DCCP=y
-CONFIG_NF_NAT_PROTO_UDPLITE=y
-CONFIG_NF_NAT_PROTO_SCTP=y
-CONFIG_NF_NAT_AMANDA=m
-CONFIG_NF_NAT_FTP=m
-CONFIG_NF_NAT_IRC=m
-CONFIG_NF_NAT_SIP=m
-CONFIG_NF_NAT_TFTP=m
-CONFIG_NF_NAT_REDIRECT=m
-CONFIG_NETFILTER_SYNPROXY=m
-CONFIG_NF_TABLES=m
-CONFIG_NF_TABLES_INET=m
-CONFIG_NF_TABLES_NETDEV=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
-CONFIG_NFT_NUMGEN=m
-CONFIG_NFT_CT=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
-CONFIG_NFT_COUNTER=m
-CONFIG_NFT_LOG=m
-CONFIG_NFT_LIMIT=m
-CONFIG_NFT_MASQ=m
-CONFIG_NFT_REDIR=m
-CONFIG_NFT_NAT=m
-CONFIG_NFT_OBJREF=m
-CONFIG_NFT_QUEUE=m
-CONFIG_NFT_QUOTA=m
-CONFIG_NFT_REJECT=m
-CONFIG_NFT_REJECT_INET=m
-CONFIG_NFT_COMPAT=m
-CONFIG_NFT_HASH=m
-CONFIG_NFT_FIB=m
-CONFIG_NFT_FIB_INET=m
-CONFIG_NF_DUP_NETDEV=m
-CONFIG_NFT_DUP_NETDEV=m
-CONFIG_NFT_FWD_NETDEV=m
-CONFIG_NFT_FIB_NETDEV=m
-CONFIG_NETFILTER_XTABLES=m
-
-#
-# Xtables combined modules
-#
-CONFIG_NETFILTER_XT_MARK=m
-CONFIG_NETFILTER_XT_CONNMARK=m
-CONFIG_NETFILTER_XT_SET=m
-
-#
-# Xtables targets
-#
-CONFIG_NETFILTER_XT_TARGET_AUDIT=m
-CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
-CONFIG_NETFILTER_XT_TARGET_CT=m
-CONFIG_NETFILTER_XT_TARGET_DSCP=m
-CONFIG_NETFILTER_XT_TARGET_HL=m
-CONFIG_NETFILTER_XT_TARGET_HMARK=m
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
-CONFIG_NETFILTER_XT_TARGET_LED=m
-CONFIG_NETFILTER_XT_TARGET_LOG=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_NAT=m
-CONFIG_NETFILTER_XT_TARGET_NETMAP=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
-CONFIG_NETFILTER_XT_TARGET_RATEEST=m
-CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
-CONFIG_NETFILTER_XT_TARGET_TEE=m
-CONFIG_NETFILTER_XT_TARGET_TPROXY=m
-CONFIG_NETFILTER_XT_TARGET_TRACE=m
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
-
-#
-# Xtables matches
-#
-CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
-CONFIG_NETFILTER_XT_MATCH_BPF=m
-CONFIG_NETFILTER_XT_MATCH_CGROUP=m
-CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ECN=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_HL=m
-CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
-CONFIG_NETFILTER_XT_MATCH_IPVS=m
-CONFIG_NETFILTER_XT_MATCH_L2TP=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_NFACCT=m
-CONFIG_NETFILTER_XT_MATCH_OSF=m
-CONFIG_NETFILTER_XT_MATCH_OWNER=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_RATEEST=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_IP_SET=m
-CONFIG_IP_SET_MAX=256
-CONFIG_IP_SET_BITMAP_IP=m
-CONFIG_IP_SET_BITMAP_IPMAC=m
-CONFIG_IP_SET_BITMAP_PORT=m
-CONFIG_IP_SET_HASH_IP=m
-CONFIG_IP_SET_HASH_IPMARK=m
-CONFIG_IP_SET_HASH_IPPORT=m
-CONFIG_IP_SET_HASH_IPPORTIP=m
-CONFIG_IP_SET_HASH_IPPORTNET=m
-CONFIG_IP_SET_HASH_IPMAC=m
-CONFIG_IP_SET_HASH_MAC=m
-CONFIG_IP_SET_HASH_NETPORTNET=m
-CONFIG_IP_SET_HASH_NET=m
-CONFIG_IP_SET_HASH_NETNET=m
-CONFIG_IP_SET_HASH_NETPORT=m
-CONFIG_IP_SET_HASH_NETIFACE=m
-CONFIG_IP_SET_LIST_SET=m
-CONFIG_IP_VS=m
-CONFIG_IP_VS_IPV6=y
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_AH_ESP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-CONFIG_IP_VS_PROTO_SCTP=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_FO=m
-CONFIG_IP_VS_OVF=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS SH scheduler
-#
-CONFIG_IP_VS_SH_TAB_BITS=8
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
-CONFIG_IP_VS_NFCT=y
-CONFIG_IP_VS_PE_SIP=m
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_NF_DEFRAG_IPV4=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
-CONFIG_NFT_DUP_IPV4=m
-CONFIG_NFT_FIB_IPV4=m
-CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_DUP_IPV4=m
-CONFIG_NF_LOG_ARP=m
-CONFIG_NF_LOG_IPV4=m
-CONFIG_NF_REJECT_IPV4=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NF_NAT_MASQUERADE_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
-CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_NF_NAT_PROTO_GRE=m
-CONFIG_NF_NAT_PPTP=m
-CONFIG_NF_NAT_H323=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_RPFILTER=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-# CONFIG_IP_NF_SECURITY is not set
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-
-#
-# IPv6: Netfilter Configuration
-#
-CONFIG_NF_DEFRAG_IPV6=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
-CONFIG_NFT_REJECT_IPV6=m
-CONFIG_NFT_DUP_IPV6=m
-CONFIG_NFT_FIB_IPV6=m
-CONFIG_NF_DUP_IPV6=m
-CONFIG_NF_REJECT_IPV6=m
-CONFIG_NF_LOG_IPV6=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_NF_NAT_MASQUERADE_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RPFILTER=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_TARGET_SYNPROXY=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-# CONFIG_IP6_NF_SECURITY is not set
-CONFIG_IP6_NF_NAT=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
-
-#
-# DECnet: Netfilter Configuration
-#
-CONFIG_DECNET_NF_GRABULATOR=m
-CONFIG_NF_TABLES_BRIDGE=m
-CONFIG_NFT_BRIDGE_META=m
-CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_ARP=m
-CONFIG_BRIDGE_EBT_IP=m
-CONFIG_BRIDGE_EBT_IP6=m
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_MARK=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_VLAN=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-CONFIG_INET_DCCP_DIAG=m
-
-#
-# DCCP CCIDs Configuration
-#
-# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=y
-# CONFIG_IP_DCCP_CCID3_DEBUG is not set
-CONFIG_IP_DCCP_TFRC_LIB=y
-
-#
-# DCCP Kernel Hacking
-#
-# CONFIG_IP_DCCP_DEBUG is not set
-# CONFIG_NET_DCCPPROBE is not set
-CONFIG_IP_SCTP=m
-# CONFIG_NET_SCTPPROBE is not set
-# CONFIG_SCTP_DBG_OBJCNT is not set
-CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y
-# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set
-# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
-CONFIG_SCTP_COOKIE_HMAC_MD5=y
-CONFIG_SCTP_COOKIE_HMAC_SHA1=y
-CONFIG_INET_SCTP_DIAG=m
-CONFIG_RDS=m
-CONFIG_RDS_RDMA=m
-CONFIG_RDS_TCP=m
-# CONFIG_RDS_DEBUG is not set
-CONFIG_TIPC=m
-CONFIG_TIPC_MEDIA_IB=y
-CONFIG_TIPC_MEDIA_UDP=y
-CONFIG_ATM=m
-CONFIG_ATM_CLIP=m
-# CONFIG_ATM_CLIP_NO_ICMP is not set
-CONFIG_ATM_LANE=m
-CONFIG_ATM_MPOA=m
-CONFIG_ATM_BR2684=m
-# CONFIG_ATM_BR2684_IPFILTER is not set
-CONFIG_L2TP=m
-# CONFIG_L2TP_DEBUGFS is not set
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=m
-CONFIG_L2TP_ETH=m
-CONFIG_STP=m
-CONFIG_GARP=m
-CONFIG_MRP=m
-CONFIG_BRIDGE=m
-CONFIG_BRIDGE_IGMP_SNOOPING=y
-CONFIG_BRIDGE_VLAN_FILTERING=y
-CONFIG_HAVE_NET_DSA=y
-CONFIG_NET_DSA=m
-CONFIG_NET_DSA_TAG_DSA=y
-CONFIG_NET_DSA_TAG_EDSA=y
-CONFIG_NET_DSA_TAG_KSZ=y
-CONFIG_NET_DSA_TAG_LAN9303=y
-CONFIG_NET_DSA_TAG_MTK=y
-CONFIG_NET_DSA_TAG_TRAILER=y
-CONFIG_NET_DSA_TAG_QCA=y
-CONFIG_VLAN_8021Q=m
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_VLAN_8021Q_MVRP=y
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
-CONFIG_LLC=m
-CONFIG_LLC2=m
-CONFIG_IPX=m
-CONFIG_IPX_INTERN=y
-CONFIG_ATALK=m
-CONFIG_DEV_APPLETALK=m
-CONFIG_IPDDP=m
-CONFIG_IPDDP_ENCAP=y
-CONFIG_X25=m
-CONFIG_LAPB=m
-CONFIG_PHONET=m
-CONFIG_6LOWPAN=m
-# CONFIG_6LOWPAN_DEBUGFS is not set
-CONFIG_6LOWPAN_NHC=m
-CONFIG_6LOWPAN_NHC_DEST=m
-CONFIG_6LOWPAN_NHC_FRAGMENT=m
-CONFIG_6LOWPAN_NHC_HOP=m
-CONFIG_6LOWPAN_NHC_IPV6=m
-CONFIG_6LOWPAN_NHC_MOBILITY=m
-CONFIG_6LOWPAN_NHC_ROUTING=m
-CONFIG_6LOWPAN_NHC_UDP=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
-CONFIG_6LOWPAN_GHC_UDP=m
-CONFIG_6LOWPAN_GHC_ICMPV6=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
-CONFIG_IEEE802154=m
-CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y
-CONFIG_IEEE802154_SOCKET=m
-CONFIG_IEEE802154_6LOWPAN=m
-CONFIG_MAC802154=m
-CONFIG_NET_SCHED=y
-
-#
-# Queueing/Scheduling
-#
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_ATM=m
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_MULTIQ=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFB=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_DRR=m
-CONFIG_NET_SCH_MQPRIO=m
-CONFIG_NET_SCH_CHOKE=m
-CONFIG_NET_SCH_QFQ=m
-CONFIG_NET_SCH_CODEL=m
-CONFIG_NET_SCH_FQ_CODEL=m
-CONFIG_NET_SCH_FQ=m
-CONFIG_NET_SCH_HHF=m
-CONFIG_NET_SCH_PIE=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_SCH_PLUG=m
-# CONFIG_NET_SCH_DEFAULT is not set
-
-#
-# Classification
-#
-CONFIG_NET_CLS=y
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_CLS_U32_PERF=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_CLS_FLOW=m
-CONFIG_NET_CLS_CGROUP=m
-CONFIG_NET_CLS_BPF=m
-CONFIG_NET_CLS_FLOWER=m
-CONFIG_NET_CLS_MATCHALL=m
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_STACK=32
-CONFIG_NET_EMATCH_CMP=m
-CONFIG_NET_EMATCH_NBYTE=m
-CONFIG_NET_EMATCH_U32=m
-CONFIG_NET_EMATCH_META=m
-CONFIG_NET_EMATCH_TEXT=m
-CONFIG_NET_EMATCH_CANID=m
-CONFIG_NET_EMATCH_IPSET=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=m
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_SAMPLE=m
-CONFIG_NET_ACT_IPT=m
-CONFIG_NET_ACT_NAT=m
-CONFIG_NET_ACT_PEDIT=m
-# CONFIG_NET_ACT_SIMP is not set
-CONFIG_NET_ACT_SKBEDIT=m
-CONFIG_NET_ACT_CSUM=m
-CONFIG_NET_ACT_VLAN=m
-CONFIG_NET_ACT_BPF=m
-CONFIG_NET_ACT_CONNMARK=m
-CONFIG_NET_ACT_SKBMOD=m
-CONFIG_NET_ACT_IFE=m
-CONFIG_NET_ACT_TUNNEL_KEY=m
-CONFIG_NET_IFE_SKBMARK=m
-CONFIG_NET_IFE_SKBPRIO=m
-CONFIG_NET_IFE_SKBTCINDEX=m
-CONFIG_NET_CLS_IND=y
-CONFIG_NET_SCH_FIFO=y
-CONFIG_DCB=y
-CONFIG_DNS_RESOLVER=y
-CONFIG_BATMAN_ADV=m
-# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_BLA=y
-CONFIG_BATMAN_ADV_DAT=y
-CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
-CONFIG_BATMAN_ADV_DEBUGFS=y
-# CONFIG_BATMAN_ADV_DEBUG is not set
-CONFIG_OPENVSWITCH=m
-CONFIG_OPENVSWITCH_GRE=m
-CONFIG_OPENVSWITCH_VXLAN=m
-CONFIG_OPENVSWITCH_GENEVE=m
-CONFIG_VSOCKETS=m
-CONFIG_VMWARE_VMCI_VSOCKETS=m
-CONFIG_VIRTIO_VSOCKETS=m
-CONFIG_VIRTIO_VSOCKETS_COMMON=m
-CONFIG_HYPERV_VSOCKETS=m
-CONFIG_NETLINK_DIAG=m
-CONFIG_MPLS=y
-CONFIG_NET_MPLS_GSO=m
-CONFIG_MPLS_ROUTING=m
-CONFIG_MPLS_IPTUNNEL=m
-CONFIG_NET_NSH=m
-CONFIG_HSR=m
-CONFIG_NET_SWITCHDEV=y
-CONFIG_NET_L3_MASTER_DEV=y
-# CONFIG_NET_NCSI is not set
-CONFIG_RPS=y
-CONFIG_RFS_ACCEL=y
-CONFIG_XPS=y
-CONFIG_CGROUP_NET_PRIO=y
-CONFIG_CGROUP_NET_CLASSID=y
-CONFIG_NET_RX_BUSY_POLL=y
-CONFIG_BQL=y
-CONFIG_BPF_JIT=y
-# CONFIG_BPF_STREAM_PARSER is not set
-CONFIG_NET_FLOW_LIMIT=y
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_TCPPROBE is not set
-# CONFIG_NET_DROP_MONITOR is not set
-CONFIG_HAMRADIO=y
-
-#
-# Packet Radio protocols
-#
-CONFIG_AX25=m
-CONFIG_AX25_DAMA_SLAVE=y
-CONFIG_NETROM=m
-CONFIG_ROSE=m
-
-#
-# AX.25 network device drivers
-#
-CONFIG_MKISS=m
-CONFIG_6PACK=m
-CONFIG_BPQETHER=m
-CONFIG_BAYCOM_SER_FDX=m
-CONFIG_BAYCOM_SER_HDX=m
-CONFIG_BAYCOM_PAR=m
-CONFIG_YAM=m
-CONFIG_CAN=m
-CONFIG_CAN_RAW=m
-CONFIG_CAN_BCM=m
-CONFIG_CAN_GW=m
-
-#
-# CAN Device Drivers
-#
-CONFIG_CAN_VCAN=m
-CONFIG_CAN_VXCAN=m
-CONFIG_CAN_SLCAN=m
-CONFIG_CAN_DEV=m
-CONFIG_CAN_CALC_BITTIMING=y
-CONFIG_CAN_LEDS=y
-CONFIG_CAN_JANZ_ICAN3=m
-CONFIG_CAN_C_CAN=m
-CONFIG_CAN_C_CAN_PLATFORM=m
-CONFIG_CAN_C_CAN_PCI=m
-CONFIG_CAN_CC770=m
-CONFIG_CAN_CC770_ISA=m
-CONFIG_CAN_CC770_PLATFORM=m
-CONFIG_CAN_IFI_CANFD=m
-CONFIG_CAN_M_CAN=m
-CONFIG_CAN_PEAK_PCIEFD=m
-CONFIG_CAN_SJA1000=m
-CONFIG_CAN_SJA1000_ISA=m
-CONFIG_CAN_SJA1000_PLATFORM=m
-CONFIG_CAN_EMS_PCMCIA=m
-CONFIG_CAN_EMS_PCI=m
-CONFIG_CAN_PEAK_PCMCIA=m
-CONFIG_CAN_PEAK_PCI=m
-CONFIG_CAN_PEAK_PCIEC=y
-CONFIG_CAN_KVASER_PCI=m
-CONFIG_CAN_PLX_PCI=m
-CONFIG_CAN_SOFTING=m
-CONFIG_CAN_SOFTING_CS=m
-
-#
-# CAN SPI interfaces
-#
-CONFIG_CAN_HI311X=m
-CONFIG_CAN_MCP251X=m
-
-#
-# CAN USB interfaces
-#
-CONFIG_CAN_EMS_USB=m
-CONFIG_CAN_ESD_USB2=m
-CONFIG_CAN_GS_USB=m
-CONFIG_CAN_KVASER_USB=m
-CONFIG_CAN_PEAK_USB=m
-CONFIG_CAN_8DEV_USB=m
-CONFIG_CAN_MCBA_USB=m
-# CONFIG_CAN_DEBUG_DEVICES is not set
-CONFIG_BT=m
-CONFIG_BT_BREDR=y
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_CMTP=m
-CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
-CONFIG_BT_LE=y
-CONFIG_BT_6LOWPAN=m
-CONFIG_BT_LEDS=y
-# CONFIG_BT_SELFTEST is not set
-CONFIG_BT_DEBUGFS=y
-
-#
-# Bluetooth device drivers
-#
-CONFIG_BT_INTEL=m
-CONFIG_BT_BCM=m
-CONFIG_BT_RTL=m
-CONFIG_BT_QCA=m
-CONFIG_BT_HCIBTUSB=m
-CONFIG_BT_HCIBTUSB_BCM=y
-CONFIG_BT_HCIBTUSB_RTL=y
-CONFIG_BT_HCIBTSDIO=m
-CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_SERDEV=y
-CONFIG_BT_HCIUART_H4=y
-CONFIG_BT_HCIUART_NOKIA=m
-CONFIG_BT_HCIUART_BCSP=y
-CONFIG_BT_HCIUART_ATH3K=y
-CONFIG_BT_HCIUART_LL=y
-CONFIG_BT_HCIUART_3WIRE=y
-CONFIG_BT_HCIUART_INTEL=y
-CONFIG_BT_HCIUART_BCM=y
-CONFIG_BT_HCIUART_QCA=y
-CONFIG_BT_HCIUART_AG6XX=y
-CONFIG_BT_HCIUART_MRVL=y
-CONFIG_BT_HCIBCM203X=m
-CONFIG_BT_HCIBPA10X=m
-CONFIG_BT_HCIBFUSB=m
-CONFIG_BT_HCIDTL1=m
-CONFIG_BT_HCIBT3C=m
-CONFIG_BT_HCIBLUECARD=m
-CONFIG_BT_HCIBTUART=m
-CONFIG_BT_HCIVHCI=m
-CONFIG_BT_MRVL=m
-CONFIG_BT_MRVL_SDIO=m
-CONFIG_BT_ATH3K=m
-CONFIG_BT_WILINK=m
-CONFIG_AF_RXRPC=m
-CONFIG_AF_RXRPC_IPV6=y
-# CONFIG_AF_RXRPC_INJECT_LOSS is not set
-# CONFIG_AF_RXRPC_DEBUG is not set
-# CONFIG_RXKAD is not set
-CONFIG_AF_KCM=m
-CONFIG_STREAM_PARSER=m
-CONFIG_FIB_RULES=y
-CONFIG_WIRELESS=y
-CONFIG_WIRELESS_EXT=y
-CONFIG_WEXT_CORE=y
-CONFIG_WEXT_PROC=y
-CONFIG_WEXT_SPY=y
-CONFIG_WEXT_PRIV=y
-CONFIG_CFG80211=m
-CONFIG_NL80211_TESTMODE=y
-# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
-CONFIG_CFG80211_DEFAULT_PS=y
-# CONFIG_CFG80211_DEBUGFS is not set
-# CONFIG_CFG80211_INTERNAL_REGDB is not set
-CONFIG_CFG80211_CRDA_SUPPORT=y
-CONFIG_CFG80211_WEXT=y
-CONFIG_CFG80211_WEXT_EXPORT=y
-CONFIG_LIB80211=m
-CONFIG_LIB80211_CRYPT_WEP=m
-CONFIG_LIB80211_CRYPT_CCMP=m
-CONFIG_LIB80211_CRYPT_TKIP=m
-# CONFIG_LIB80211_DEBUG is not set
-CONFIG_MAC80211=m
-CONFIG_MAC80211_HAS_RC=y
-CONFIG_MAC80211_RC_MINSTREL=y
-CONFIG_MAC80211_RC_MINSTREL_HT=y
-# CONFIG_MAC80211_RC_MINSTREL_VHT is not set
-CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
-CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
-CONFIG_MAC80211_MESH=y
-CONFIG_MAC80211_LEDS=y
-# CONFIG_MAC80211_DEBUGFS is not set
-# CONFIG_MAC80211_MESSAGE_TRACING is not set
-# CONFIG_MAC80211_DEBUG_MENU is not set
-CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
-CONFIG_WIMAX=m
-CONFIG_WIMAX_DEBUG_LEVEL=8
-CONFIG_RFKILL=m
-CONFIG_RFKILL_LEDS=y
-CONFIG_RFKILL_INPUT=y
-CONFIG_RFKILL_GPIO=m
-CONFIG_NET_9P=m
-CONFIG_NET_9P_VIRTIO=m
-CONFIG_NET_9P_RDMA=m
-# CONFIG_NET_9P_DEBUG is not set
-CONFIG_CAIF=m
-# CONFIG_CAIF_DEBUG is not set
-CONFIG_CAIF_NETDEV=m
-CONFIG_CAIF_USB=m
-CONFIG_CEPH_LIB=m
-# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
-CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
-CONFIG_NFC=m
-CONFIG_NFC_DIGITAL=m
-CONFIG_NFC_NCI=m
-CONFIG_NFC_NCI_SPI=m
-CONFIG_NFC_NCI_UART=m
-CONFIG_NFC_HCI=m
-CONFIG_NFC_SHDLC=y
-
-#
-# Near Field Communication (NFC) devices
-#
-CONFIG_NFC_TRF7970A=m
-CONFIG_NFC_MEI_PHY=m
-CONFIG_NFC_SIM=m
-CONFIG_NFC_PORT100=m
-CONFIG_NFC_FDP=m
-CONFIG_NFC_FDP_I2C=m
-CONFIG_NFC_PN544=m
-CONFIG_NFC_PN544_I2C=m
-CONFIG_NFC_PN544_MEI=m
-CONFIG_NFC_PN533=m
-CONFIG_NFC_PN533_USB=m
-CONFIG_NFC_PN533_I2C=m
-CONFIG_NFC_MICROREAD=m
-CONFIG_NFC_MICROREAD_I2C=m
-CONFIG_NFC_MICROREAD_MEI=m
-CONFIG_NFC_MRVL=m
-CONFIG_NFC_MRVL_USB=m
-CONFIG_NFC_MRVL_UART=m
-CONFIG_NFC_MRVL_I2C=m
-CONFIG_NFC_MRVL_SPI=m
-CONFIG_NFC_ST21NFCA=m
-CONFIG_NFC_ST21NFCA_I2C=m
-CONFIG_NFC_ST_NCI=m
-CONFIG_NFC_ST_NCI_I2C=m
-CONFIG_NFC_ST_NCI_SPI=m
-CONFIG_NFC_NXP_NCI=m
-CONFIG_NFC_NXP_NCI_I2C=m
-CONFIG_NFC_S3FWRN5=m
-CONFIG_NFC_S3FWRN5_I2C=m
-CONFIG_NFC_ST95HF=m
-CONFIG_PSAMPLE=m
-CONFIG_NET_IFE=m
-CONFIG_LWTUNNEL=y
-CONFIG_LWTUNNEL_BPF=y
-CONFIG_DST_CACHE=y
-CONFIG_GRO_CELLS=y
-CONFIG_NET_DEVLINK=m
-CONFIG_MAY_USE_DEVLINK=m
-CONFIG_HAVE_EBPF_JIT=y
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-# CONFIG_UEVENT_HELPER is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_EXTRA_FIRMWARE=""
-CONFIG_FW_LOADER_USER_HELPER=y
-# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
-CONFIG_WANT_DEV_COREDUMP=y
-CONFIG_ALLOW_DEV_COREDUMP=y
-CONFIG_DEV_COREDUMP=y
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
-CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_GENERIC_CPU_DEVICES is not set
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_CPU_VULNERABILITIES=y
-CONFIG_REGMAP=y
-CONFIG_REGMAP_I2C=m
-CONFIG_REGMAP_SPI=y
-CONFIG_REGMAP_SPMI=m
-CONFIG_REGMAP_W1=m
-CONFIG_REGMAP_MMIO=y
-CONFIG_REGMAP_IRQ=y
-CONFIG_DMA_SHARED_BUFFER=y
-# CONFIG_DMA_FENCE_TRACE is not set
-# CONFIG_DMA_CMA is not set
-
-#
-# Bus devices
-#
-CONFIG_CONNECTOR=m
-CONFIG_MTD=m
-CONFIG_MTD_TESTS=m
-CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
-CONFIG_MTD_REDBOOT_PARTS_READONLY=y
-CONFIG_MTD_CMDLINE_PARTS=m
-CONFIG_MTD_AR7_PARTS=m
-
-#
-# Partition parsers
-#
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
-CONFIG_SM_FTL=m
-CONFIG_MTD_OOPS=m
-CONFIG_MTD_SWAP=m
-# CONFIG_MTD_PARTITIONED_MASTER is not set
-
-#
-# RAM/ROM/Flash chip drivers
-#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
-CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
-
-#
-# Mapping drivers for chip access
-#
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-# CONFIG_MTD_PHYSMAP_COMPAT is not set
-CONFIG_MTD_SBC_GXX=m
-CONFIG_MTD_AMD76XROM=m
-CONFIG_MTD_ICHXROM=m
-CONFIG_MTD_ESB2ROM=m
-CONFIG_MTD_CK804XROM=m
-CONFIG_MTD_SCB2_FLASH=m
-CONFIG_MTD_NETtel=m
-CONFIG_MTD_L440GX=m
-CONFIG_MTD_PCI=m
-CONFIG_MTD_PCMCIA=m
-# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
-CONFIG_MTD_GPIO_ADDR=m
-CONFIG_MTD_INTEL_VR_NOR=m
-CONFIG_MTD_PLATRAM=m
-CONFIG_MTD_LATCH_ADDR=m
-
-#
-# Self-contained MTD device drivers
-#
-CONFIG_MTD_PMC551=m
-CONFIG_MTD_PMC551_BUGFIX=y
-# CONFIG_MTD_PMC551_DEBUG is not set
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_DATAFLASH_WRITE_VERIFY=y
-CONFIG_MTD_DATAFLASH_OTP=y
-CONFIG_MTD_M25P80=m
-CONFIG_MTD_MCHP23K256=m
-CONFIG_MTD_SST25L=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-CONFIG_MTD_BLOCK2MTD=m
-
-#
-# Disk-On-Chip Device Drivers
-#
-CONFIG_MTD_DOCG3=m
-CONFIG_BCH_CONST_M=14
-CONFIG_BCH_CONST_T=4
-CONFIG_MTD_NAND_ECC=m
-CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND=m
-CONFIG_MTD_NAND_BCH=m
-CONFIG_MTD_NAND_ECC_BCH=y
-CONFIG_MTD_SM_COMMON=m
-CONFIG_MTD_NAND_DENALI=m
-CONFIG_MTD_NAND_DENALI_PCI=m
-CONFIG_MTD_NAND_GPIO=m
-# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set
-CONFIG_MTD_NAND_RICOH=m
-CONFIG_MTD_NAND_DISKONCHIP=m
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0x0
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH=y
-CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
-CONFIG_MTD_NAND_DOCG4=m
-CONFIG_MTD_NAND_CAFE=m
-CONFIG_MTD_NAND_NANDSIM=m
-CONFIG_MTD_NAND_PLATFORM=m
-CONFIG_MTD_ONENAND=m
-CONFIG_MTD_ONENAND_VERIFY_WRITE=y
-CONFIG_MTD_ONENAND_GENERIC=m
-CONFIG_MTD_ONENAND_OTP=y
-CONFIG_MTD_ONENAND_2X_PROGRAM=y
-
-#
-# LPDDR & LPDDR2 PCM memory drivers
-#
-CONFIG_MTD_LPDDR=m
-CONFIG_MTD_QINFO_PROBE=m
-CONFIG_MTD_SPI_NOR=m
-CONFIG_MTD_MT81xx_NOR=m
-CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
-CONFIG_MTD_UBI=m
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MTD_UBI_BEB_LIMIT=20
-CONFIG_MTD_UBI_FASTMAP=y
-# CONFIG_MTD_UBI_GLUEBI is not set
-CONFIG_MTD_UBI_BLOCK=y
-# CONFIG_OF is not set
-CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
-CONFIG_PARPORT=m
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_SERIAL=m
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_PC_SUPERIO=y
-CONFIG_PARPORT_PC_PCMCIA=m
-# CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_AX88796=m
-CONFIG_PARPORT_1284=y
-CONFIG_PARPORT_NOT_PC=y
-CONFIG_PNP=y
-# CONFIG_PNP_DEBUG_MESSAGES is not set
-
-#
-# Protocols
-#
-CONFIG_PNPACPI=y
-CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_NULL_BLK is not set
-CONFIG_BLK_DEV_FD=m
-CONFIG_PARIDE=m
-
-#
-# Parallel IDE high-level drivers
-#
-CONFIG_PARIDE_PD=m
-CONFIG_PARIDE_PCD=m
-CONFIG_PARIDE_PF=m
-CONFIG_PARIDE_PT=m
-CONFIG_PARIDE_PG=m
-
-#
-# Parallel IDE protocol modules
-#
-CONFIG_PARIDE_ATEN=m
-CONFIG_PARIDE_BPCK=m
-CONFIG_PARIDE_COMM=m
-CONFIG_PARIDE_DSTR=m
-CONFIG_PARIDE_FIT2=m
-CONFIG_PARIDE_FIT3=m
-CONFIG_PARIDE_EPAT=m
-CONFIG_PARIDE_EPATC8=y
-CONFIG_PARIDE_EPIA=m
-CONFIG_PARIDE_FRIQ=m
-CONFIG_PARIDE_FRPW=m
-CONFIG_PARIDE_KBIC=m
-CONFIG_PARIDE_KTTI=m
-CONFIG_PARIDE_ON20=m
-CONFIG_PARIDE_ON26=m
-CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
-CONFIG_ZRAM=m
-CONFIG_ZRAM_WRITEBACK=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_DRBD=m
-# CONFIG_DRBD_FAULT_INJECTION is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SKD=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=m
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_BLK_DEV_RAM_DAX=y
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_BUFFERS=8
-CONFIG_CDROM_PKTCDVD_WCACHE=y
-CONFIG_ATA_OVER_ETH=m
-CONFIG_VIRTIO_BLK=m
-# CONFIG_VIRTIO_BLK_SCSI is not set
-CONFIG_BLK_DEV_RBD=m
-CONFIG_BLK_DEV_RSXX=m
-CONFIG_NVME_CORE=m
-CONFIG_BLK_DEV_NVME=m
-CONFIG_NVME_FABRICS=m
-CONFIG_NVME_RDMA=m
-CONFIG_NVME_FC=m
-CONFIG_NVME_TARGET=m
-CONFIG_NVME_TARGET_LOOP=m
-CONFIG_NVME_TARGET_RDMA=m
-CONFIG_NVME_TARGET_FC=m
-CONFIG_NVME_TARGET_FCLOOP=m
-
-#
-# Misc devices
-#
-CONFIG_SENSORS_LIS3LV02D=m
-CONFIG_AD525X_DPOT=m
-CONFIG_AD525X_DPOT_I2C=m
-CONFIG_AD525X_DPOT_SPI=m
-# CONFIG_DUMMY_IRQ is not set
-CONFIG_IBM_ASM=m
-CONFIG_PHANTOM=m
-CONFIG_SGI_IOC4=m
-CONFIG_TIFM_CORE=m
-CONFIG_TIFM_7XX1=m
-CONFIG_ICS932S401=m
-CONFIG_ENCLOSURE_SERVICES=m
-CONFIG_HP_ILO=m
-CONFIG_APDS9802ALS=m
-CONFIG_ISL29003=m
-CONFIG_ISL29020=m
-CONFIG_SENSORS_TSL2550=m
-CONFIG_SENSORS_BH1770=m
-CONFIG_SENSORS_APDS990X=m
-CONFIG_HMC6352=m
-CONFIG_DS1682=m
-CONFIG_TI_DAC7512=m
-CONFIG_VMWARE_BALLOON=m
-CONFIG_USB_SWITCH_FSA9480=m
-CONFIG_LATTICE_ECP3_CONFIG=m
-CONFIG_SRAM=y
-CONFIG_PCI_ENDPOINT_TEST=m
-CONFIG_C2PORT=m
-CONFIG_C2PORT_DURAMAR_2150=m
-
-#
-# EEPROM support
-#
-CONFIG_EEPROM_AT24=m
-CONFIG_EEPROM_AT25=m
-CONFIG_EEPROM_LEGACY=m
-CONFIG_EEPROM_MAX6875=m
-CONFIG_EEPROM_93CX6=m
-CONFIG_EEPROM_93XX46=m
-CONFIG_EEPROM_IDT_89HPESX=m
-CONFIG_CB710_CORE=m
-# CONFIG_CB710_DEBUG is not set
-CONFIG_CB710_DEBUG_ASSUMPTIONS=y
-
-#
-# Texas Instruments shared transport line discipline
-#
-CONFIG_TI_ST=m
-CONFIG_SENSORS_LIS3_I2C=m
-
-#
-# Altera FPGA firmware download module
-#
-CONFIG_ALTERA_STAPL=m
-CONFIG_INTEL_MEI=y
-CONFIG_INTEL_MEI_ME=y
-CONFIG_INTEL_MEI_TXE=m
-CONFIG_VMWARE_VMCI=m
-
-#
-# Intel MIC Bus Driver
-#
-CONFIG_INTEL_MIC_BUS=m
-
-#
-# SCIF Bus Driver
-#
-CONFIG_SCIF_BUS=m
-
-#
-# VOP Bus Driver
-#
-CONFIG_VOP_BUS=m
-
-#
-# Intel MIC Host Driver
-#
-CONFIG_INTEL_MIC_HOST=m
-
-#
-# Intel MIC Card Driver
-#
-CONFIG_INTEL_MIC_CARD=m
-
-#
-# SCIF Driver
-#
-CONFIG_SCIF=m
-
-#
-# Intel MIC Coprocessor State Management (COSM) Drivers
-#
-CONFIG_MIC_COSM=m
-
-#
-# VOP Driver
-#
-CONFIG_VOP=m
-CONFIG_VHOST_RING=m
-CONFIG_GENWQE=m
-CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
-CONFIG_ECHO=m
-# CONFIG_CXL_BASE is not set
-# CONFIG_CXL_AFU_DRIVER_OPS is not set
-# CONFIG_CXL_LIB is not set
-CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI_MOD=m
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
-CONFIG_SCSI_DMA=y
-CONFIG_SCSI_NETLINK=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-
-#
-# SCSI Transports
-#
-CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=m
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_SAS_ATTRS=m
-CONFIG_SCSI_SAS_LIBSAS=m
-CONFIG_SCSI_SAS_ATA=y
-CONFIG_SCSI_SAS_HOST_SMP=y
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_LOWLEVEL=y
-CONFIG_ISCSI_TCP=m
-CONFIG_ISCSI_BOOT_SYSFS=m
-CONFIG_SCSI_CXGB3_ISCSI=m
-CONFIG_SCSI_CXGB4_ISCSI=m
-CONFIG_SCSI_BNX2_ISCSI=m
-CONFIG_SCSI_BNX2X_FCOE=m
-CONFIG_BE2ISCSI=m
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_HPSA=m
-CONFIG_SCSI_3W_9XXX=m
-CONFIG_SCSI_3W_SAS=m
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=4
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_DEBUG_ENABLE is not set
-CONFIG_AIC79XX_DEBUG_MASK=0
-# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC94XX=m
-# CONFIG_AIC94XX_DEBUG is not set
-CONFIG_SCSI_MVSAS=m
-# CONFIG_SCSI_MVSAS_DEBUG is not set
-CONFIG_SCSI_MVSAS_TASKLET=y
-CONFIG_SCSI_MVUMI=m
-CONFIG_SCSI_DPT_I2O=m
-CONFIG_SCSI_ADVANSYS=m
-CONFIG_SCSI_ARCMSR=m
-CONFIG_SCSI_ESAS2R=m
-CONFIG_MEGARAID_NEWGEN=y
-CONFIG_MEGARAID_MM=m
-CONFIG_MEGARAID_MAILBOX=m
-CONFIG_MEGARAID_LEGACY=m
-CONFIG_MEGARAID_SAS=m
-CONFIG_SCSI_MPT3SAS=m
-CONFIG_SCSI_MPT2SAS_MAX_SGE=128
-CONFIG_SCSI_MPT3SAS_MAX_SGE=128
-CONFIG_SCSI_MPT2SAS=m
-CONFIG_SCSI_SMARTPQI=m
-CONFIG_SCSI_UFSHCD=m
-CONFIG_SCSI_UFSHCD_PCI=m
-CONFIG_SCSI_UFS_DWC_TC_PCI=m
-CONFIG_SCSI_UFSHCD_PLATFORM=m
-CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m
-CONFIG_SCSI_HPTIOP=m
-CONFIG_SCSI_BUSLOGIC=m
-CONFIG_SCSI_FLASHPOINT=y
-CONFIG_VMWARE_PVSCSI=m
-CONFIG_HYPERV_STORAGE=m
-CONFIG_LIBFC=m
-CONFIG_LIBFCOE=m
-CONFIG_FCOE=m
-CONFIG_FCOE_FNIC=m
-CONFIG_SCSI_SNIC=m
-# CONFIG_SCSI_SNIC_DEBUG_FS is not set
-CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_EATA=m
-# CONFIG_SCSI_EATA_TAGGED_QUEUE is not set
-# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set
-CONFIG_SCSI_EATA_MAX_TAGS=16
-CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_GDTH=m
-CONFIG_SCSI_ISCI=m
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INITIO=m
-CONFIG_SCSI_INIA100=m
-CONFIG_SCSI_PPA=m
-CONFIG_SCSI_IMM=m
-# CONFIG_SCSI_IZIP_EPP16 is not set
-# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_STEX=m
-CONFIG_SCSI_SYM53C8XX_2=m
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-CONFIG_SCSI_SYM53C8XX_MMIO=y
-CONFIG_SCSI_IPR=m
-# CONFIG_SCSI_IPR_TRACE is not set
-# CONFIG_SCSI_IPR_DUMP is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA_FC=m
-CONFIG_TCM_QLA2XXX=m
-# CONFIG_TCM_QLA2XXX_DEBUG is not set
-CONFIG_SCSI_QLA_ISCSI=m
-CONFIG_QEDI=m
-CONFIG_QEDF=m
-CONFIG_SCSI_LPFC=m
-# CONFIG_SCSI_LPFC_DEBUG_FS is not set
-CONFIG_SCSI_DC395x=m
-CONFIG_SCSI_AM53C974=m
-CONFIG_SCSI_WD719X=m
-# CONFIG_SCSI_DEBUG is not set
-CONFIG_SCSI_PMCRAID=m
-CONFIG_SCSI_PM8001=m
-CONFIG_SCSI_BFA_FC=m
-CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_CHELSIO_FCOE=m
-CONFIG_SCSI_LOWLEVEL_PCMCIA=y
-CONFIG_PCMCIA_AHA152X=m
-CONFIG_PCMCIA_FDOMAIN=m
-CONFIG_PCMCIA_QLOGIC=m
-CONFIG_PCMCIA_SYM53C500=m
-CONFIG_SCSI_DH=y
-CONFIG_SCSI_DH_RDAC=m
-CONFIG_SCSI_DH_HP_SW=m
-CONFIG_SCSI_DH_EMC=m
-CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
-CONFIG_SCSI_OSD_DPRINT_SENSE=1
-# CONFIG_SCSI_OSD_DEBUG is not set
-CONFIG_ATA=m
-# CONFIG_ATA_NONSTANDARD is not set
-CONFIG_ATA_VERBOSE_ERROR=y
-CONFIG_ATA_ACPI=y
-CONFIG_SATA_ZPODD=y
-CONFIG_SATA_PMP=y
-
-#
-# Controllers with non-SFF native interface
-#
-CONFIG_SATA_AHCI=m
-CONFIG_SATA_AHCI_PLATFORM=m
-CONFIG_SATA_INIC162X=m
-CONFIG_SATA_ACARD_AHCI=m
-CONFIG_SATA_SIL24=m
-CONFIG_ATA_SFF=y
-
-#
-# SFF controllers with custom DMA interface
-#
-CONFIG_PDC_ADMA=m
-CONFIG_SATA_QSTOR=m
-CONFIG_SATA_SX4=m
-CONFIG_ATA_BMDMA=y
-
-#
-# SATA SFF controllers with BMDMA
-#
-CONFIG_ATA_PIIX=m
-CONFIG_SATA_DWC=m
-# CONFIG_SATA_DWC_OLD_DMA is not set
-# CONFIG_SATA_DWC_DEBUG is not set
-CONFIG_SATA_MV=m
-CONFIG_SATA_NV=m
-CONFIG_SATA_PROMISE=m
-CONFIG_SATA_SIL=m
-CONFIG_SATA_SIS=m
-CONFIG_SATA_SVW=m
-CONFIG_SATA_ULI=m
-CONFIG_SATA_VIA=m
-CONFIG_SATA_VITESSE=m
-
-#
-# PATA SFF controllers with BMDMA
-#
-CONFIG_PATA_ALI=m
-CONFIG_PATA_AMD=m
-CONFIG_PATA_ARTOP=m
-CONFIG_PATA_ATIIXP=m
-CONFIG_PATA_ATP867X=m
-CONFIG_PATA_CMD64X=m
-CONFIG_PATA_CYPRESS=m
-CONFIG_PATA_EFAR=m
-CONFIG_PATA_HPT366=m
-CONFIG_PATA_HPT37X=m
-CONFIG_PATA_HPT3X2N=m
-CONFIG_PATA_HPT3X3=m
-CONFIG_PATA_HPT3X3_DMA=y
-CONFIG_PATA_IT8213=m
-CONFIG_PATA_IT821X=m
-CONFIG_PATA_JMICRON=m
-CONFIG_PATA_MARVELL=m
-CONFIG_PATA_NETCELL=m
-CONFIG_PATA_NINJA32=m
-CONFIG_PATA_NS87415=m
-CONFIG_PATA_OLDPIIX=m
-CONFIG_PATA_OPTIDMA=m
-CONFIG_PATA_PDC2027X=m
-CONFIG_PATA_PDC_OLD=m
-CONFIG_PATA_RADISYS=m
-CONFIG_PATA_RDC=m
-CONFIG_PATA_SCH=m
-CONFIG_PATA_SERVERWORKS=m
-CONFIG_PATA_SIL680=m
-CONFIG_PATA_SIS=m
-CONFIG_PATA_TOSHIBA=m
-CONFIG_PATA_TRIFLEX=m
-CONFIG_PATA_VIA=m
-CONFIG_PATA_WINBOND=m
-
-#
-# PIO-only SFF controllers
-#
-CONFIG_PATA_CMD640_PCI=m
-CONFIG_PATA_MPIIX=m
-CONFIG_PATA_NS87410=m
-CONFIG_PATA_OPTI=m
-CONFIG_PATA_PCMCIA=m
-CONFIG_PATA_RZ1000=m
-
-#
-# Generic fallback / legacy drivers
-#
-CONFIG_PATA_ACPI=m
-CONFIG_ATA_GENERIC=m
-CONFIG_PATA_LEGACY=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
-CONFIG_MD_CLUSTER=m
-CONFIG_BCACHE=m
-# CONFIG_BCACHE_DEBUG is not set
-# CONFIG_BCACHE_CLOSURES_DEBUG is not set
-CONFIG_BLK_DEV_DM_BUILTIN=y
-CONFIG_BLK_DEV_DM=m
-# CONFIG_DM_MQ_DEFAULT is not set
-# CONFIG_DM_DEBUG is not set
-CONFIG_DM_BUFIO=m
-# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
-CONFIG_DM_BIO_PRISON=m
-CONFIG_DM_PERSISTENT_DATA=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
-CONFIG_DM_CACHE_SMQ=m
-CONFIG_DM_ERA=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_LOG_USERSPACE=m
-CONFIG_DM_RAID=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_QL=m
-CONFIG_DM_MULTIPATH_ST=m
-CONFIG_DM_DELAY=m
-CONFIG_DM_UEVENT=y
-CONFIG_DM_FLAKEY=m
-CONFIG_DM_VERITY=m
-# CONFIG_DM_VERITY_FEC is not set
-CONFIG_DM_SWITCH=m
-CONFIG_DM_LOG_WRITES=m
-CONFIG_DM_INTEGRITY=m
-CONFIG_DM_ZONED=m
-CONFIG_TARGET_CORE=m
-CONFIG_TCM_IBLOCK=m
-CONFIG_TCM_FILEIO=m
-CONFIG_TCM_PSCSI=m
-CONFIG_TCM_USER2=m
-CONFIG_LOOPBACK_TARGET=m
-CONFIG_TCM_FC=m
-CONFIG_ISCSI_TARGET=m
-CONFIG_ISCSI_TARGET_CXGB4=m
-CONFIG_SBP_TARGET=m
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_FC=m
-CONFIG_FUSION_SAS=m
-CONFIG_FUSION_MAX_SGE=128
-CONFIG_FUSION_CTL=m
-CONFIG_FUSION_LAN=m
-CONFIG_FUSION_LOGGING=y
-
-#
-# IEEE 1394 (FireWire) support
-#
-CONFIG_FIREWIRE=m
-CONFIG_FIREWIRE_OHCI=m
-CONFIG_FIREWIRE_SBP2=m
-CONFIG_FIREWIRE_NET=m
-CONFIG_FIREWIRE_NOSY=m
-CONFIG_MACINTOSH_DRIVERS=y
-CONFIG_MAC_EMUMOUSEBTN=m
-CONFIG_NETDEVICES=y
-CONFIG_MII=m
-CONFIG_NET_CORE=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_EQUALIZER=m
-CONFIG_NET_FC=y
-CONFIG_IFB=m
-CONFIG_NET_TEAM=m
-CONFIG_NET_TEAM_MODE_BROADCAST=m
-CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
-CONFIG_NET_TEAM_MODE_RANDOM=m
-CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
-CONFIG_NET_TEAM_MODE_LOADBALANCE=m
-CONFIG_MACVLAN=m
-CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
-CONFIG_IPVTAP=m
-CONFIG_VXLAN=m
-CONFIG_GENEVE=m
-CONFIG_GTP=m
-CONFIG_MACSEC=m
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL=y
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_NTB_NETDEV is not set
-CONFIG_RIONET=m
-CONFIG_RIONET_TX_SIZE=128
-CONFIG_RIONET_RX_SIZE=128
-CONFIG_TUN=m
-CONFIG_TAP=m
-# CONFIG_TUN_VNET_CROSS_LE is not set
-CONFIG_VETH=m
-CONFIG_VIRTIO_NET=m
-CONFIG_NLMON=m
-CONFIG_NET_VRF=m
-CONFIG_VSOCKMON=m
-CONFIG_SUNGEM_PHY=m
-CONFIG_ARCNET=m
-CONFIG_ARCNET_1201=m
-CONFIG_ARCNET_1051=m
-CONFIG_ARCNET_RAW=m
-CONFIG_ARCNET_CAP=m
-CONFIG_ARCNET_COM90xx=m
-CONFIG_ARCNET_COM90xxIO=m
-CONFIG_ARCNET_RIM_I=m
-CONFIG_ARCNET_COM20020=m
-CONFIG_ARCNET_COM20020_PCI=m
-CONFIG_ARCNET_COM20020_CS=m
-CONFIG_ATM_DRIVERS=y
-# CONFIG_ATM_DUMMY is not set
-CONFIG_ATM_TCP=m
-CONFIG_ATM_LANAI=m
-CONFIG_ATM_ENI=m
-# CONFIG_ATM_ENI_DEBUG is not set
-# CONFIG_ATM_ENI_TUNE_BURST is not set
-CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
-# CONFIG_ATM_ZATM_DEBUG is not set
-CONFIG_ATM_NICSTAR=m
-CONFIG_ATM_NICSTAR_USE_SUNI=y
-CONFIG_ATM_NICSTAR_USE_IDT77105=y
-CONFIG_ATM_IDT77252=m
-# CONFIG_ATM_IDT77252_DEBUG is not set
-# CONFIG_ATM_IDT77252_RCV_ALL is not set
-CONFIG_ATM_IDT77252_USE_SUNI=y
-CONFIG_ATM_AMBASSADOR=m
-# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-CONFIG_ATM_HORIZON=m
-# CONFIG_ATM_HORIZON_DEBUG is not set
-CONFIG_ATM_IA=m
-# CONFIG_ATM_IA_DEBUG is not set
-CONFIG_ATM_FORE200E=m
-CONFIG_ATM_FORE200E_USE_TASKLET=y
-CONFIG_ATM_FORE200E_TX_RETRY=16
-CONFIG_ATM_FORE200E_DEBUG=0
-CONFIG_ATM_HE=m
-CONFIG_ATM_HE_USE_SUNI=y
-CONFIG_ATM_SOLOS=m
-
-#
-# CAIF transport drivers
-#
-CONFIG_CAIF_TTY=m
-CONFIG_CAIF_SPI_SLAVE=m
-CONFIG_CAIF_SPI_SYNC=y
-CONFIG_CAIF_HSI=m
-CONFIG_CAIF_VIRTIO=m
-
-#
-# Distributed Switch Architecture drivers
-#
-CONFIG_B53=m
-CONFIG_B53_SPI_DRIVER=m
-CONFIG_B53_MDIO_DRIVER=m
-CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
-CONFIG_NET_DSA_LOOP=m
-CONFIG_NET_DSA_MT7530=m
-CONFIG_NET_DSA_MV88E6060=m
-CONFIG_MICROCHIP_KSZ=m
-CONFIG_MICROCHIP_KSZ_SPI_DRIVER=m
-CONFIG_NET_DSA_MV88E6XXX=m
-CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y
-CONFIG_NET_DSA_QCA8K=m
-CONFIG_NET_DSA_SMSC_LAN9303=m
-CONFIG_NET_DSA_SMSC_LAN9303_I2C=m
-CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m
-CONFIG_ETHERNET=y
-CONFIG_MDIO=m
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_3C589=m
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-CONFIG_NET_VENDOR_ADAPTEC=y
-CONFIG_ADAPTEC_STARFIRE=m
-CONFIG_NET_VENDOR_AGERE=y
-CONFIG_ET131X=m
-CONFIG_NET_VENDOR_ALACRITECH=y
-CONFIG_SLICOSS=m
-CONFIG_NET_VENDOR_ALTEON=y
-CONFIG_ACENIC=m
-# CONFIG_ACENIC_OMIT_TIGON_I is not set
-CONFIG_ALTERA_TSE=m
-CONFIG_NET_VENDOR_AMAZON=y
-CONFIG_ENA_ETHERNET=m
-CONFIG_NET_VENDOR_AMD=y
-CONFIG_AMD8111_ETH=m
-CONFIG_PCNET32=m
-CONFIG_PCMCIA_NMCLAN=m
-CONFIG_AMD_XGBE=m
-CONFIG_AMD_XGBE_DCB=y
-CONFIG_AMD_XGBE_HAVE_ECC=y
-CONFIG_NET_VENDOR_AQUANTIA=y
-CONFIG_AQTION=m
-CONFIG_NET_VENDOR_ARC=y
-CONFIG_NET_VENDOR_ATHEROS=y
-CONFIG_ATL2=m
-CONFIG_ATL1=m
-CONFIG_ATL1E=m
-CONFIG_ATL1C=m
-CONFIG_ALX=m
-CONFIG_NET_VENDOR_AURORA=y
-CONFIG_AURORA_NB8800=m
-CONFIG_NET_CADENCE=y
-CONFIG_MACB=m
-CONFIG_MACB_USE_HWSTAMP=y
-CONFIG_MACB_PCI=m
-CONFIG_NET_VENDOR_BROADCOM=y
-CONFIG_B44=m
-CONFIG_B44_PCI_AUTOSELECT=y
-CONFIG_B44_PCICORE_AUTOSELECT=y
-CONFIG_B44_PCI=y
-CONFIG_BNX2=m
-CONFIG_CNIC=m
-CONFIG_TIGON3=m
-CONFIG_TIGON3_HWMON=y
-CONFIG_BNX2X=m
-CONFIG_BNX2X_SRIOV=y
-CONFIG_BNXT=m
-CONFIG_BNXT_SRIOV=y
-CONFIG_BNXT_FLOWER_OFFLOAD=y
-CONFIG_BNXT_DCB=y
-CONFIG_NET_VENDOR_BROCADE=y
-CONFIG_BNA=m
-CONFIG_NET_VENDOR_CAVIUM=y
-CONFIG_THUNDER_NIC_PF=m
-CONFIG_THUNDER_NIC_VF=m
-CONFIG_THUNDER_NIC_BGX=m
-CONFIG_THUNDER_NIC_RGX=m
-CONFIG_LIQUIDIO=m
-CONFIG_LIQUIDIO_VF=m
-CONFIG_NET_VENDOR_CHELSIO=y
-CONFIG_CHELSIO_T1=m
-CONFIG_CHELSIO_T1_1G=y
-CONFIG_CHELSIO_T3=m
-CONFIG_CHELSIO_T4=m
-CONFIG_CHELSIO_T4_DCB=y
-# CONFIG_CHELSIO_T4_FCOE is not set
-CONFIG_CHELSIO_T4VF=m
-CONFIG_CHELSIO_LIB=m
-CONFIG_NET_VENDOR_CISCO=y
-CONFIG_ENIC=m
-CONFIG_CX_ECAT=m
-CONFIG_DNET=m
-CONFIG_NET_VENDOR_DEC=y
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_DE2104X_DSL=0
-CONFIG_TULIP=m
-CONFIG_TULIP_MWI=y
-CONFIG_TULIP_MMIO=y
-CONFIG_TULIP_NAPI=y
-CONFIG_TULIP_NAPI_HW_MITIGATION=y
-CONFIG_DE4X5=m
-CONFIG_WINBOND_840=m
-CONFIG_DM9102=m
-CONFIG_ULI526X=m
-CONFIG_PCMCIA_XIRCOM=m
-CONFIG_NET_VENDOR_DLINK=y
-CONFIG_DL2K=m
-CONFIG_SUNDANCE=m
-# CONFIG_SUNDANCE_MMIO is not set
-CONFIG_NET_VENDOR_EMULEX=y
-CONFIG_BE2NET=m
-CONFIG_BE2NET_HWMON=y
-CONFIG_NET_VENDOR_EZCHIP=y
-CONFIG_NET_VENDOR_EXAR=y
-CONFIG_S2IO=m
-CONFIG_VXGE=m
-# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
-CONFIG_NET_VENDOR_FUJITSU=y
-CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_NET_VENDOR_HP=y
-CONFIG_HP100=m
-CONFIG_NET_VENDOR_HUAWEI=y
-CONFIG_HINIC=m
-CONFIG_NET_VENDOR_INTEL=y
-CONFIG_E100=m
-CONFIG_E1000=m
-CONFIG_E1000E=m
-CONFIG_E1000E_HWTS=y
-CONFIG_IGB=m
-CONFIG_IGB_HWMON=y
-CONFIG_IGB_DCA=y
-CONFIG_IGBVF=m
-CONFIG_IXGB=m
-CONFIG_IXGBE=m
-CONFIG_IXGBE_HWMON=y
-CONFIG_IXGBE_DCA=y
-CONFIG_IXGBE_DCB=y
-CONFIG_IXGBEVF=m
-CONFIG_I40E=m
-CONFIG_I40E_DCB=y
-CONFIG_I40EVF=m
-CONFIG_FM10K=m
-CONFIG_NET_VENDOR_I825XX=y
-CONFIG_JME=m
-CONFIG_NET_VENDOR_MARVELL=y
-CONFIG_MVMDIO=m
-CONFIG_SKGE=m
-# CONFIG_SKGE_DEBUG is not set
-CONFIG_SKGE_GENESIS=y
-CONFIG_SKY2=m
-# CONFIG_SKY2_DEBUG is not set
-CONFIG_NET_VENDOR_MELLANOX=y
-CONFIG_MLX4_EN=m
-CONFIG_MLX4_EN_DCB=y
-CONFIG_MLX4_CORE=m
-CONFIG_MLX4_DEBUG=y
-CONFIG_MLX5_CORE=m
-CONFIG_MLX5_ACCEL=y
-CONFIG_MLX5_FPGA=y
-# CONFIG_MLX5_CORE_EN is not set
-CONFIG_MLXSW_CORE=m
-CONFIG_MLXSW_CORE_HWMON=y
-CONFIG_MLXSW_CORE_THERMAL=y
-CONFIG_MLXSW_PCI=m
-CONFIG_MLXSW_I2C=m
-CONFIG_MLXSW_SWITCHIB=m
-CONFIG_MLXSW_SWITCHX2=m
-CONFIG_MLXSW_SPECTRUM=m
-CONFIG_MLXSW_SPECTRUM_DCB=y
-CONFIG_MLXSW_MINIMAL=m
-CONFIG_MLXFW=m
-CONFIG_NET_VENDOR_MICREL=y
-CONFIG_KS8842=m
-CONFIG_KS8851=m
-CONFIG_KS8851_MLL=m
-CONFIG_KSZ884X_PCI=m
-CONFIG_NET_VENDOR_MICROCHIP=y
-CONFIG_ENC28J60=m
-# CONFIG_ENC28J60_WRITEVERIFY is not set
-CONFIG_ENCX24J600=m
-CONFIG_NET_VENDOR_MYRI=y
-CONFIG_MYRI10GE=m
-CONFIG_MYRI10GE_DCA=y
-CONFIG_FEALNX=m
-CONFIG_NET_VENDOR_NATSEMI=y
-CONFIG_NATSEMI=m
-CONFIG_NS83820=m
-CONFIG_NET_VENDOR_NETRONOME=y
-CONFIG_NFP=m
-# CONFIG_NFP_APP_FLOWER is not set
-# CONFIG_NFP_DEBUG is not set
-CONFIG_NET_VENDOR_8390=y
-CONFIG_PCMCIA_AXNET=m
-CONFIG_NE2K_PCI=m
-CONFIG_PCMCIA_PCNET=m
-CONFIG_NET_VENDOR_NVIDIA=y
-CONFIG_FORCEDETH=m
-CONFIG_NET_VENDOR_OKI=y
-CONFIG_ETHOC=m
-CONFIG_NET_PACKET_ENGINE=y
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
-CONFIG_NET_VENDOR_QLOGIC=y
-CONFIG_QLA3XXX=m
-CONFIG_QLCNIC=m
-CONFIG_QLCNIC_SRIOV=y
-CONFIG_QLCNIC_DCB=y
-CONFIG_QLCNIC_HWMON=y
-CONFIG_QLGE=m
-CONFIG_NETXEN_NIC=m
-CONFIG_QED=m
-CONFIG_QED_LL2=y
-CONFIG_QED_SRIOV=y
-CONFIG_QEDE=m
-CONFIG_QED_RDMA=y
-CONFIG_QED_ISCSI=y
-CONFIG_QED_FCOE=y
-CONFIG_NET_VENDOR_QUALCOMM=y
-CONFIG_QCOM_EMAC=m
-CONFIG_RMNET=m
-CONFIG_NET_VENDOR_REALTEK=y
-CONFIG_ATP=m
-CONFIG_8139CP=m
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_PIO is not set
-CONFIG_8139TOO_TUNE_TWISTER=y
-CONFIG_8139TOO_8129=y
-# CONFIG_8139_OLD_RX_RESET is not set
-CONFIG_R8169=m
-CONFIG_NET_VENDOR_RENESAS=y
-CONFIG_NET_VENDOR_RDC=y
-CONFIG_R6040=m
-CONFIG_NET_VENDOR_ROCKER=y
-CONFIG_ROCKER=m
-CONFIG_NET_VENDOR_SAMSUNG=y
-CONFIG_SXGBE_ETH=m
-CONFIG_NET_VENDOR_SEEQ=y
-CONFIG_NET_VENDOR_SILAN=y
-CONFIG_SC92031=m
-CONFIG_NET_VENDOR_SIS=y
-CONFIG_SIS900=m
-CONFIG_SIS190=m
-CONFIG_NET_VENDOR_SOLARFLARE=y
-CONFIG_SFC=m
-CONFIG_SFC_MTD=y
-CONFIG_SFC_MCDI_MON=y
-CONFIG_SFC_SRIOV=y
-CONFIG_SFC_MCDI_LOGGING=y
-CONFIG_SFC_FALCON=m
-CONFIG_SFC_FALCON_MTD=y
-CONFIG_NET_VENDOR_SMSC=y
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_EPIC100=m
-CONFIG_SMSC911X=m
-# CONFIG_SMSC911X_ARCH_HOOKS is not set
-CONFIG_SMSC9420=m
-CONFIG_NET_VENDOR_STMICRO=y
-CONFIG_STMMAC_ETH=m
-CONFIG_STMMAC_PLATFORM=m
-CONFIG_DWMAC_GENERIC=m
-CONFIG_STMMAC_PCI=m
-CONFIG_NET_VENDOR_SUN=y
-CONFIG_HAPPYMEAL=m
-CONFIG_SUNGEM=m
-CONFIG_CASSINI=m
-CONFIG_NIU=m
-CONFIG_NET_VENDOR_TEHUTI=y
-CONFIG_TEHUTI=m
-CONFIG_NET_VENDOR_TI=y
-CONFIG_TI_CPSW_ALE=m
-CONFIG_TLAN=m
-CONFIG_NET_VENDOR_VIA=y
-CONFIG_VIA_RHINE=m
-CONFIG_VIA_RHINE_MMIO=y
-CONFIG_VIA_VELOCITY=m
-CONFIG_NET_VENDOR_WIZNET=y
-CONFIG_WIZNET_W5100=m
-CONFIG_WIZNET_W5300=m
-# CONFIG_WIZNET_BUS_DIRECT is not set
-# CONFIG_WIZNET_BUS_INDIRECT is not set
-CONFIG_WIZNET_BUS_ANY=y
-CONFIG_WIZNET_W5100_SPI=m
-CONFIG_NET_VENDOR_XIRCOM=y
-CONFIG_PCMCIA_XIRC2PS=m
-CONFIG_NET_VENDOR_SYNOPSYS=y
-CONFIG_DWC_XLGMAC=m
-CONFIG_DWC_XLGMAC_PCI=m
-CONFIG_FDDI=m
-CONFIG_DEFXX=m
-# CONFIG_DEFXX_MMIO is not set
-CONFIG_SKFP=m
-CONFIG_HIPPI=y
-CONFIG_ROADRUNNER=m
-CONFIG_ROADRUNNER_LARGE_RINGS=y
-CONFIG_NET_SB1000=m
-CONFIG_MDIO_DEVICE=m
-CONFIG_MDIO_BUS=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_MDIO_CAVIUM=m
-CONFIG_MDIO_GPIO=m
-CONFIG_MDIO_THUNDER=m
-CONFIG_PHYLIB=m
-CONFIG_SWPHY=y
-CONFIG_LED_TRIGGER_PHY=y
-
-#
-# MII PHY device drivers
-#
-CONFIG_AMD_PHY=m
-CONFIG_AQUANTIA_PHY=m
-CONFIG_AT803X_PHY=m
-CONFIG_BCM7XXX_PHY=m
-CONFIG_BCM87XX_PHY=m
-CONFIG_BCM_NET_PHYLIB=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_CORTINA_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_DP83848_PHY=m
-CONFIG_DP83867_PHY=m
-CONFIG_FIXED_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_INTEL_XWAY_PHY=m
-CONFIG_LSI_ET1011C_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_MARVELL_PHY=m
-CONFIG_MARVELL_10G_PHY=m
-CONFIG_MICREL_PHY=m
-CONFIG_MICROCHIP_PHY=m
-CONFIG_MICROSEMI_PHY=m
-CONFIG_NATIONAL_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_REALTEK_PHY=m
-CONFIG_ROCKCHIP_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_STE10XP=m
-CONFIG_TERANETICS_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_XILINX_GMII2RGMII=m
-CONFIG_MICREL_KS8995MA=m
-CONFIG_PLIP=m
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_MPPE=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPPOATM=m
-CONFIG_PPPOE=m
-CONFIG_PPTP=m
-CONFIG_PPPOL2TP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_SLIP=m
-CONFIG_SLHC=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-
-#
-# Host-side USB support is needed for USB Network Adapter support
-#
-CONFIG_USB_NET_DRIVERS=m
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_RTL8152=m
-CONFIG_USB_LAN78XX=m
-CONFIG_USB_USBNET=m
-CONFIG_USB_NET_AX8817X=m
-CONFIG_USB_NET_AX88179_178A=m
-CONFIG_USB_NET_CDCETHER=m
-CONFIG_USB_NET_CDC_EEM=m
-CONFIG_USB_NET_CDC_NCM=m
-CONFIG_USB_NET_HUAWEI_CDC_NCM=m
-CONFIG_USB_NET_CDC_MBIM=m
-CONFIG_USB_NET_DM9601=m
-CONFIG_USB_NET_SR9700=m
-CONFIG_USB_NET_SR9800=m
-CONFIG_USB_NET_SMSC75XX=m
-CONFIG_USB_NET_SMSC95XX=m
-CONFIG_USB_NET_GL620A=m
-CONFIG_USB_NET_NET1080=m
-CONFIG_USB_NET_PLUSB=m
-CONFIG_USB_NET_MCS7830=m
-CONFIG_USB_NET_RNDIS_HOST=m
-CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
-CONFIG_USB_NET_CDC_SUBSET=m
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_BELKIN=y
-CONFIG_USB_ARMLINUX=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_KC2190=y
-CONFIG_USB_NET_ZAURUS=m
-CONFIG_USB_NET_CX82310_ETH=m
-CONFIG_USB_NET_KALMIA=m
-CONFIG_USB_NET_QMI_WWAN=m
-CONFIG_USB_HSO=m
-CONFIG_USB_NET_INT51X1=m
-CONFIG_USB_CDC_PHONET=m
-CONFIG_USB_IPHETH=m
-CONFIG_USB_SIERRA_NET=m
-CONFIG_USB_VL600=m
-CONFIG_USB_NET_CH9200=m
-CONFIG_WLAN=y
-CONFIG_WLAN_VENDOR_ADMTEK=y
-CONFIG_ADM8211=m
-CONFIG_ATH_COMMON=m
-CONFIG_WLAN_VENDOR_ATH=y
-# CONFIG_ATH_DEBUG is not set
-CONFIG_ATH5K=m
-# CONFIG_ATH5K_DEBUG is not set
-# CONFIG_ATH5K_TRACER is not set
-CONFIG_ATH5K_PCI=y
-CONFIG_ATH9K_HW=m
-CONFIG_ATH9K_COMMON=m
-CONFIG_ATH9K_BTCOEX_SUPPORT=y
-CONFIG_ATH9K=m
-CONFIG_ATH9K_PCI=y
-CONFIG_ATH9K_AHB=y
-# CONFIG_ATH9K_DEBUGFS is not set
-CONFIG_ATH9K_DYNACK=y
-CONFIG_ATH9K_WOW=y
-CONFIG_ATH9K_RFKILL=y
-CONFIG_ATH9K_CHANNEL_CONTEXT=y
-CONFIG_ATH9K_PCOEM=y
-CONFIG_ATH9K_HTC=m
-# CONFIG_ATH9K_HTC_DEBUGFS is not set
-CONFIG_ATH9K_HWRNG=y
-CONFIG_CARL9170=m
-CONFIG_CARL9170_LEDS=y
-CONFIG_CARL9170_WPC=y
-# CONFIG_CARL9170_HWRNG is not set
-CONFIG_ATH6KL=m
-CONFIG_ATH6KL_SDIO=m
-CONFIG_ATH6KL_USB=m
-# CONFIG_ATH6KL_DEBUG is not set
-# CONFIG_ATH6KL_TRACING is not set
-CONFIG_AR5523=m
-CONFIG_WIL6210=m
-CONFIG_WIL6210_ISR_COR=y
-CONFIG_WIL6210_TRACING=y
-CONFIG_WIL6210_DEBUGFS=y
-CONFIG_ATH10K=m
-CONFIG_ATH10K_PCI=m
-CONFIG_ATH10K_SDIO=m
-CONFIG_ATH10K_USB=m
-# CONFIG_ATH10K_DEBUG is not set
-# CONFIG_ATH10K_DEBUGFS is not set
-# CONFIG_ATH10K_TRACING is not set
-CONFIG_WCN36XX=m
-# CONFIG_WCN36XX_DEBUGFS is not set
-CONFIG_WLAN_VENDOR_ATMEL=y
-CONFIG_ATMEL=m
-CONFIG_PCI_ATMEL=m
-CONFIG_PCMCIA_ATMEL=m
-CONFIG_AT76C50X_USB=m
-CONFIG_WLAN_VENDOR_BROADCOM=y
-CONFIG_B43=m
-CONFIG_B43_BCMA=y
-CONFIG_B43_SSB=y
-CONFIG_B43_BUSES_BCMA_AND_SSB=y
-# CONFIG_B43_BUSES_BCMA is not set
-# CONFIG_B43_BUSES_SSB is not set
-CONFIG_B43_PCI_AUTOSELECT=y
-CONFIG_B43_PCICORE_AUTOSELECT=y
-CONFIG_B43_SDIO=y
-CONFIG_B43_BCMA_PIO=y
-CONFIG_B43_PIO=y
-CONFIG_B43_PHY_G=y
-CONFIG_B43_PHY_N=y
-CONFIG_B43_PHY_LP=y
-CONFIG_B43_PHY_HT=y
-CONFIG_B43_LEDS=y
-CONFIG_B43_HWRNG=y
-# CONFIG_B43_DEBUG is not set
-CONFIG_B43LEGACY=m
-CONFIG_B43LEGACY_PCI_AUTOSELECT=y
-CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
-CONFIG_B43LEGACY_LEDS=y
-CONFIG_B43LEGACY_HWRNG=y
-# CONFIG_B43LEGACY_DEBUG is not set
-CONFIG_B43LEGACY_DMA=y
-CONFIG_B43LEGACY_PIO=y
-CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
-# CONFIG_B43LEGACY_DMA_MODE is not set
-# CONFIG_B43LEGACY_PIO_MODE is not set
-CONFIG_BRCMUTIL=m
-CONFIG_BRCMSMAC=m
-CONFIG_BRCMFMAC=m
-CONFIG_BRCMFMAC_PROTO_BCDC=y
-CONFIG_BRCMFMAC_PROTO_MSGBUF=y
-CONFIG_BRCMFMAC_SDIO=y
-CONFIG_BRCMFMAC_USB=y
-CONFIG_BRCMFMAC_PCIE=y
-CONFIG_BRCM_TRACING=y
-# CONFIG_BRCMDBG is not set
-CONFIG_WLAN_VENDOR_CISCO=y
-CONFIG_AIRO=m
-CONFIG_AIRO_CS=m
-CONFIG_WLAN_VENDOR_INTEL=y
-CONFIG_IPW2100=m
-CONFIG_IPW2100_MONITOR=y
-# CONFIG_IPW2100_DEBUG is not set
-CONFIG_IPW2200=m
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_RADIOTAP=y
-CONFIG_IPW2200_PROMISCUOUS=y
-CONFIG_IPW2200_QOS=y
-# CONFIG_IPW2200_DEBUG is not set
-CONFIG_LIBIPW=m
-# CONFIG_LIBIPW_DEBUG is not set
-CONFIG_IWLEGACY=m
-CONFIG_IWL4965=m
-CONFIG_IWL3945=m
-
-#
-# iwl3945 / iwl4965 Debugging Options
-#
-# CONFIG_IWLEGACY_DEBUG is not set
-CONFIG_IWLWIFI=m
-CONFIG_IWLWIFI_LEDS=y
-CONFIG_IWLDVM=m
-CONFIG_IWLMVM=m
-CONFIG_IWLWIFI_OPMODE_MODULAR=y
-CONFIG_IWLWIFI_BCAST_FILTERING=y
-
-#
-# Debugging Options
-#
-# CONFIG_IWLWIFI_DEBUG is not set
-CONFIG_IWLWIFI_DEVICE_TRACING=y
-CONFIG_WLAN_VENDOR_INTERSIL=y
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_HOSTAP_PLX=m
-CONFIG_HOSTAP_PCI=m
-CONFIG_HOSTAP_CS=m
-CONFIG_HERMES=m
-CONFIG_HERMES_PRISM=y
-CONFIG_HERMES_CACHE_FW_ON_INIT=y
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_NORTEL_HERMES=m
-CONFIG_PCI_HERMES=m
-CONFIG_PCMCIA_HERMES=m
-CONFIG_PCMCIA_SPECTRUM=m
-CONFIG_ORINOCO_USB=m
-CONFIG_P54_COMMON=m
-CONFIG_P54_USB=m
-CONFIG_P54_PCI=m
-CONFIG_P54_SPI=m
-CONFIG_P54_SPI_DEFAULT_EEPROM=y
-CONFIG_P54_LEDS=y
-CONFIG_PRISM54=m
-CONFIG_WLAN_VENDOR_MARVELL=y
-CONFIG_LIBERTAS=m
-CONFIG_LIBERTAS_USB=m
-CONFIG_LIBERTAS_CS=m
-CONFIG_LIBERTAS_SDIO=m
-CONFIG_LIBERTAS_SPI=m
-# CONFIG_LIBERTAS_DEBUG is not set
-CONFIG_LIBERTAS_MESH=y
-CONFIG_LIBERTAS_THINFIRM=m
-# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
-CONFIG_LIBERTAS_THINFIRM_USB=m
-CONFIG_MWIFIEX=m
-CONFIG_MWIFIEX_SDIO=m
-CONFIG_MWIFIEX_PCIE=m
-CONFIG_MWIFIEX_USB=m
-CONFIG_MWL8K=m
-CONFIG_WLAN_VENDOR_MEDIATEK=y
-CONFIG_MT7601U=m
-CONFIG_WLAN_VENDOR_RALINK=y
-CONFIG_RT2X00=m
-CONFIG_RT2400PCI=m
-CONFIG_RT2500PCI=m
-CONFIG_RT61PCI=m
-CONFIG_RT2800PCI=m
-CONFIG_RT2800PCI_RT33XX=y
-CONFIG_RT2800PCI_RT35XX=y
-CONFIG_RT2800PCI_RT53XX=y
-CONFIG_RT2800PCI_RT3290=y
-CONFIG_RT2500USB=m
-CONFIG_RT73USB=m
-CONFIG_RT2800USB=m
-CONFIG_RT2800USB_RT33XX=y
-CONFIG_RT2800USB_RT35XX=y
-CONFIG_RT2800USB_RT3573=y
-CONFIG_RT2800USB_RT53XX=y
-CONFIG_RT2800USB_RT55XX=y
-CONFIG_RT2800USB_UNKNOWN=y
-CONFIG_RT2800_LIB=m
-CONFIG_RT2800_LIB_MMIO=m
-CONFIG_RT2X00_LIB_MMIO=m
-CONFIG_RT2X00_LIB_PCI=m
-CONFIG_RT2X00_LIB_USB=m
-CONFIG_RT2X00_LIB=m
-CONFIG_RT2X00_LIB_FIRMWARE=y
-CONFIG_RT2X00_LIB_CRYPTO=y
-CONFIG_RT2X00_LIB_LEDS=y
-# CONFIG_RT2X00_DEBUG is not set
-CONFIG_WLAN_VENDOR_REALTEK=y
-CONFIG_RTL8180=m
-CONFIG_RTL8187=m
-CONFIG_RTL8187_LEDS=y
-CONFIG_RTL_CARDS=m
-CONFIG_RTL8192CE=m
-CONFIG_RTL8192SE=m
-CONFIG_RTL8192DE=m
-CONFIG_RTL8723AE=m
-CONFIG_RTL8723BE=m
-CONFIG_RTL8188EE=m
-CONFIG_RTL8192EE=m
-CONFIG_RTL8821AE=m
-CONFIG_RTL8192CU=m
-CONFIG_RTLWIFI=m
-CONFIG_RTLWIFI_PCI=m
-CONFIG_RTLWIFI_USB=m
-# CONFIG_RTLWIFI_DEBUG is not set
-CONFIG_RTL8192C_COMMON=m
-CONFIG_RTL8723_COMMON=m
-CONFIG_RTLBTCOEXIST=m
-CONFIG_RTL8XXXU=m
-CONFIG_RTL8XXXU_UNTESTED=y
-CONFIG_WLAN_VENDOR_RSI=y
-CONFIG_RSI_91X=m
-# CONFIG_RSI_DEBUGFS is not set
-CONFIG_RSI_SDIO=m
-CONFIG_RSI_USB=m
-CONFIG_WLAN_VENDOR_ST=y
-CONFIG_CW1200=m
-CONFIG_CW1200_WLAN_SDIO=m
-CONFIG_CW1200_WLAN_SPI=m
-CONFIG_WLAN_VENDOR_TI=y
-CONFIG_WL1251=m
-CONFIG_WL1251_SPI=m
-CONFIG_WL1251_SDIO=m
-CONFIG_WL12XX=m
-CONFIG_WL18XX=m
-CONFIG_WLCORE=m
-CONFIG_WLCORE_SDIO=m
-CONFIG_WILINK_PLATFORM_DATA=y
-CONFIG_WLAN_VENDOR_ZYDAS=y
-CONFIG_USB_ZD1201=m
-CONFIG_ZD1211RW=m
-# CONFIG_ZD1211RW_DEBUG is not set
-CONFIG_WLAN_VENDOR_QUANTENNA=y
-CONFIG_QTNFMAC=m
-CONFIG_QTNFMAC_PEARL_PCIE=m
-CONFIG_PCMCIA_RAYCS=m
-CONFIG_PCMCIA_WL3501=m
-# CONFIG_MAC80211_HWSIM is not set
-CONFIG_USB_NET_RNDIS_WLAN=m
-
-#
-# WiMAX Wireless Broadband devices
-#
-CONFIG_WIMAX_I2400M=m
-CONFIG_WIMAX_I2400M_USB=m
-CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
-CONFIG_WAN=y
-CONFIG_LANMEDIA=m
-CONFIG_HDLC=m
-CONFIG_HDLC_RAW=m
-CONFIG_HDLC_RAW_ETH=m
-CONFIG_HDLC_CISCO=m
-CONFIG_HDLC_FR=m
-CONFIG_HDLC_PPP=m
-CONFIG_HDLC_X25=m
-CONFIG_PCI200SYN=m
-CONFIG_WANXL=m
-CONFIG_PC300TOO=m
-CONFIG_FARSYNC=m
-CONFIG_DSCC4=m
-CONFIG_DSCC4_PCISYNC=y
-CONFIG_DSCC4_PCI_RST=y
-CONFIG_DLCI=m
-CONFIG_DLCI_MAX=8
-CONFIG_LAPBETHER=m
-CONFIG_X25_ASY=m
-CONFIG_SBNI=m
-CONFIG_SBNI_MULTILINE=y
-CONFIG_IEEE802154_DRIVERS=m
-CONFIG_IEEE802154_FAKELB=m
-CONFIG_IEEE802154_AT86RF230=m
-# CONFIG_IEEE802154_AT86RF230_DEBUGFS is not set
-CONFIG_IEEE802154_MRF24J40=m
-CONFIG_IEEE802154_CC2520=m
-CONFIG_IEEE802154_ATUSB=m
-CONFIG_IEEE802154_ADF7242=m
-CONFIG_IEEE802154_CA8210=m
-# CONFIG_IEEE802154_CA8210_DEBUGFS is not set
-CONFIG_VMXNET3=m
-CONFIG_FUJITSU_ES=m
-CONFIG_HYPERV_NET=m
-CONFIG_ISDN=y
-CONFIG_ISDN_I4L=m
-CONFIG_ISDN_PPP=y
-CONFIG_ISDN_PPP_VJ=y
-CONFIG_ISDN_MPP=y
-CONFIG_IPPP_FILTER=y
-CONFIG_ISDN_PPP_BSDCOMP=m
-CONFIG_ISDN_AUDIO=y
-CONFIG_ISDN_TTY_FAX=y
-CONFIG_ISDN_X25=y
-
-#
-# ISDN feature submodules
-#
-CONFIG_ISDN_DIVERSION=m
-
-#
-# ISDN4Linux hardware drivers
-#
-
-#
-# Passive cards
-#
-CONFIG_ISDN_DRV_HISAX=m
-
-#
-# D-channel protocol features
-#
-CONFIG_HISAX_EURO=y
-CONFIG_DE_AOC=y
-# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-# CONFIG_HISAX_NO_LLC is not set
-# CONFIG_HISAX_NO_KEYPAD is not set
-CONFIG_HISAX_1TR6=y
-CONFIG_HISAX_NI1=y
-CONFIG_HISAX_MAX_CARDS=8
-
-#
-# HiSax supported cards
-#
-CONFIG_HISAX_16_3=y
-CONFIG_HISAX_TELESPCI=y
-CONFIG_HISAX_S0BOX=y
-CONFIG_HISAX_FRITZPCI=y
-CONFIG_HISAX_AVM_A1_PCMCIA=y
-CONFIG_HISAX_ELSA=y
-CONFIG_HISAX_DIEHLDIVA=y
-CONFIG_HISAX_SEDLBAUER=y
-CONFIG_HISAX_NETJET=y
-CONFIG_HISAX_NETJET_U=y
-CONFIG_HISAX_NICCY=y
-CONFIG_HISAX_BKM_A4T=y
-CONFIG_HISAX_SCT_QUADRO=y
-CONFIG_HISAX_GAZEL=y
-CONFIG_HISAX_HFC_PCI=y
-CONFIG_HISAX_W6692=y
-CONFIG_HISAX_HFC_SX=y
-CONFIG_HISAX_ENTERNOW_PCI=y
-# CONFIG_HISAX_DEBUG is not set
-
-#
-# HiSax PCMCIA card service modules
-#
-CONFIG_HISAX_SEDLBAUER_CS=m
-CONFIG_HISAX_ELSA_CS=m
-CONFIG_HISAX_AVM_A1_CS=m
-CONFIG_HISAX_TELES_CS=m
-
-#
-# HiSax sub driver modules
-#
-CONFIG_HISAX_ST5481=m
-CONFIG_HISAX_HFCUSB=m
-CONFIG_HISAX_HFC4S8S=m
-CONFIG_HISAX_FRITZ_PCIPNP=m
-CONFIG_ISDN_CAPI=m
-CONFIG_CAPI_TRACE=y
-CONFIG_ISDN_CAPI_CAPI20=m
-CONFIG_ISDN_CAPI_MIDDLEWARE=y
-CONFIG_ISDN_CAPI_CAPIDRV=m
-# CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE is not set
-
-#
-# CAPI hardware drivers
-#
-CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-CONFIG_ISDN_DRV_AVMB1_C4=m
-CONFIG_CAPI_EICON=y
-CONFIG_ISDN_DIVAS=m
-CONFIG_ISDN_DIVAS_BRIPCI=y
-CONFIG_ISDN_DIVAS_PRIPCI=y
-CONFIG_ISDN_DIVAS_DIVACAPI=m
-CONFIG_ISDN_DIVAS_USERIDI=m
-CONFIG_ISDN_DIVAS_MAINT=m
-CONFIG_ISDN_DRV_GIGASET=m
-CONFIG_GIGASET_CAPI=y
-# CONFIG_GIGASET_I4L is not set
-# CONFIG_GIGASET_DUMMYLL is not set
-CONFIG_GIGASET_BASE=m
-CONFIG_GIGASET_M105=m
-CONFIG_GIGASET_M101=m
-# CONFIG_GIGASET_DEBUG is not set
-CONFIG_HYSDN=m
-CONFIG_HYSDN_CAPI=y
-CONFIG_MISDN=m
-CONFIG_MISDN_DSP=m
-CONFIG_MISDN_L1OIP=m
-
-#
-# mISDN hardware drivers
-#
-CONFIG_MISDN_HFCPCI=m
-CONFIG_MISDN_HFCMULTI=m
-CONFIG_MISDN_HFCUSB=m
-CONFIG_MISDN_AVMFRITZ=m
-CONFIG_MISDN_SPEEDFAX=m
-CONFIG_MISDN_INFINEON=m
-CONFIG_MISDN_W6692=m
-CONFIG_MISDN_NETJET=m
-CONFIG_MISDN_IPAC=m
-CONFIG_MISDN_ISAR=m
-CONFIG_ISDN_HDLC=m
-CONFIG_NVM=y
-# CONFIG_NVM_DEBUG is not set
-CONFIG_NVM_RRPC=m
-CONFIG_NVM_PBLK=m
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-CONFIG_INPUT_LEDS=y
-CONFIG_INPUT_FF_MEMLESS=m
-CONFIG_INPUT_POLLDEV=m
-CONFIG_INPUT_SPARSEKMAP=m
-CONFIG_INPUT_MATRIXKMAP=m
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-CONFIG_INPUT_JOYDEV=m
-CONFIG_INPUT_EVDEV=m
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ADC=m
-CONFIG_KEYBOARD_ADP5588=m
-CONFIG_KEYBOARD_ADP5589=m
-CONFIG_KEYBOARD_ATKBD=y
-CONFIG_KEYBOARD_QT1070=m
-CONFIG_KEYBOARD_QT2160=m
-CONFIG_KEYBOARD_DLINK_DIR685=m
-CONFIG_KEYBOARD_LKKBD=m
-CONFIG_KEYBOARD_GPIO=m
-CONFIG_KEYBOARD_GPIO_POLLED=m
-CONFIG_KEYBOARD_TCA6416=m
-CONFIG_KEYBOARD_TCA8418=m
-CONFIG_KEYBOARD_MATRIX=m
-CONFIG_KEYBOARD_LM8323=m
-CONFIG_KEYBOARD_LM8333=m
-CONFIG_KEYBOARD_MAX7359=m
-CONFIG_KEYBOARD_MCS=m
-CONFIG_KEYBOARD_MPR121=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_OPENCORES=m
-CONFIG_KEYBOARD_SAMSUNG=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_TM2_TOUCHKEY=m
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_PS2_ALPS=y
-CONFIG_MOUSE_PS2_BYD=y
-CONFIG_MOUSE_PS2_LOGIPS2PP=y
-CONFIG_MOUSE_PS2_SYNAPTICS=y
-CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
-CONFIG_MOUSE_PS2_CYPRESS=y
-CONFIG_MOUSE_PS2_LIFEBOOK=y
-CONFIG_MOUSE_PS2_TRACKPOINT=y
-CONFIG_MOUSE_PS2_ELANTECH=y
-CONFIG_MOUSE_PS2_SENTELIC=y
-CONFIG_MOUSE_PS2_TOUCHKIT=y
-CONFIG_MOUSE_PS2_FOCALTECH=y
-# CONFIG_MOUSE_PS2_VMMOUSE is not set
-CONFIG_MOUSE_PS2_SMBUS=y
-CONFIG_MOUSE_SERIAL=m
-CONFIG_MOUSE_APPLETOUCH=m
-CONFIG_MOUSE_BCM5974=m
-CONFIG_MOUSE_CYAPA=m
-CONFIG_MOUSE_ELAN_I2C=m
-CONFIG_MOUSE_ELAN_I2C_I2C=y
-CONFIG_MOUSE_ELAN_I2C_SMBUS=y
-CONFIG_MOUSE_VSXXXAA=m
-CONFIG_MOUSE_GPIO=m
-CONFIG_MOUSE_SYNAPTICS_I2C=m
-CONFIG_MOUSE_SYNAPTICS_USB=m
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_JOYSTICK_ANALOG=m
-CONFIG_JOYSTICK_A3D=m
-CONFIG_JOYSTICK_ADI=m
-CONFIG_JOYSTICK_COBRA=m
-CONFIG_JOYSTICK_GF2K=m
-CONFIG_JOYSTICK_GRIP=m
-CONFIG_JOYSTICK_GRIP_MP=m
-CONFIG_JOYSTICK_GUILLEMOT=m
-CONFIG_JOYSTICK_INTERACT=m
-CONFIG_JOYSTICK_SIDEWINDER=m
-CONFIG_JOYSTICK_TMDC=m
-CONFIG_JOYSTICK_IFORCE=m
-CONFIG_JOYSTICK_IFORCE_USB=y
-CONFIG_JOYSTICK_IFORCE_232=y
-CONFIG_JOYSTICK_WARRIOR=m
-CONFIG_JOYSTICK_MAGELLAN=m
-CONFIG_JOYSTICK_SPACEORB=m
-CONFIG_JOYSTICK_SPACEBALL=m
-CONFIG_JOYSTICK_STINGER=m
-CONFIG_JOYSTICK_TWIDJOY=m
-CONFIG_JOYSTICK_ZHENHUA=m
-CONFIG_JOYSTICK_DB9=m
-CONFIG_JOYSTICK_GAMECON=m
-CONFIG_JOYSTICK_TURBOGRAFX=m
-CONFIG_JOYSTICK_AS5011=m
-# CONFIG_JOYSTICK_JOYDUMP is not set
-CONFIG_JOYSTICK_XPAD=m
-CONFIG_JOYSTICK_XPAD_FF=y
-CONFIG_JOYSTICK_XPAD_LEDS=y
-CONFIG_JOYSTICK_WALKERA0701=m
-CONFIG_JOYSTICK_PSXPAD_SPI=m
-CONFIG_JOYSTICK_PSXPAD_SPI_FF=y
-CONFIG_INPUT_TABLET=y
-CONFIG_TABLET_USB_ACECAD=m
-CONFIG_TABLET_USB_AIPTEK=m
-CONFIG_TABLET_USB_GTCO=m
-CONFIG_TABLET_USB_HANWANG=m
-CONFIG_TABLET_USB_KBTAB=m
-CONFIG_TABLET_USB_PEGASUS=m
-CONFIG_TABLET_SERIAL_WACOM4=m
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_PROPERTIES=y
-CONFIG_TOUCHSCREEN_ADS7846=m
-CONFIG_TOUCHSCREEN_AD7877=m
-CONFIG_TOUCHSCREEN_AD7879=m
-CONFIG_TOUCHSCREEN_AD7879_I2C=m
-CONFIG_TOUCHSCREEN_AD7879_SPI=m
-CONFIG_TOUCHSCREEN_ATMEL_MXT=m
-# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set
-CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
-CONFIG_TOUCHSCREEN_BU21013=m
-CONFIG_TOUCHSCREEN_CY8CTMG110=m
-CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
-CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
-CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
-CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
-CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
-CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
-CONFIG_TOUCHSCREEN_DA9052=m
-CONFIG_TOUCHSCREEN_DYNAPRO=m
-CONFIG_TOUCHSCREEN_HAMPSHIRE=m
-CONFIG_TOUCHSCREEN_EETI=m
-CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
-CONFIG_TOUCHSCREEN_FUJITSU=m
-CONFIG_TOUCHSCREEN_GOODIX=m
-CONFIG_TOUCHSCREEN_ILI210X=m
-CONFIG_TOUCHSCREEN_GUNZE=m
-CONFIG_TOUCHSCREEN_EKTF2127=m
-CONFIG_TOUCHSCREEN_ELAN=m
-CONFIG_TOUCHSCREEN_ELO=m
-CONFIG_TOUCHSCREEN_WACOM_W8001=m
-CONFIG_TOUCHSCREEN_WACOM_I2C=m
-CONFIG_TOUCHSCREEN_MAX11801=m
-CONFIG_TOUCHSCREEN_MCS5000=m
-CONFIG_TOUCHSCREEN_MMS114=m
-CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
-CONFIG_TOUCHSCREEN_MTOUCH=m
-CONFIG_TOUCHSCREEN_INEXIO=m
-CONFIG_TOUCHSCREEN_MK712=m
-CONFIG_TOUCHSCREEN_PENMOUNT=m
-CONFIG_TOUCHSCREEN_EDT_FT5X06=m
-CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
-CONFIG_TOUCHSCREEN_TOUCHWIN=m
-CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
-CONFIG_TOUCHSCREEN_UCB1400=m
-CONFIG_TOUCHSCREEN_PIXCIR=m
-CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
-CONFIG_TOUCHSCREEN_WM831X=m
-CONFIG_TOUCHSCREEN_WM97XX=m
-CONFIG_TOUCHSCREEN_WM9705=y
-CONFIG_TOUCHSCREEN_WM9712=y
-CONFIG_TOUCHSCREEN_WM9713=y
-CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
-CONFIG_TOUCHSCREEN_MC13783=m
-CONFIG_TOUCHSCREEN_USB_EGALAX=y
-CONFIG_TOUCHSCREEN_USB_PANJIT=y
-CONFIG_TOUCHSCREEN_USB_3M=y
-CONFIG_TOUCHSCREEN_USB_ITM=y
-CONFIG_TOUCHSCREEN_USB_ETURBO=y
-CONFIG_TOUCHSCREEN_USB_GUNZE=y
-CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
-CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
-CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
-CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
-CONFIG_TOUCHSCREEN_USB_GOTOP=y
-CONFIG_TOUCHSCREEN_USB_JASTEC=y
-CONFIG_TOUCHSCREEN_USB_ELO=y
-CONFIG_TOUCHSCREEN_USB_E2I=y
-CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
-CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
-CONFIG_TOUCHSCREEN_USB_NEXIO=y
-CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
-CONFIG_TOUCHSCREEN_TOUCHIT213=m
-CONFIG_TOUCHSCREEN_TSC_SERIO=m
-CONFIG_TOUCHSCREEN_TSC200X_CORE=m
-CONFIG_TOUCHSCREEN_TSC2004=m
-CONFIG_TOUCHSCREEN_TSC2005=m
-CONFIG_TOUCHSCREEN_TSC2007=m
-CONFIG_TOUCHSCREEN_TSC2007_IIO=y
-CONFIG_TOUCHSCREEN_PCAP=m
-CONFIG_TOUCHSCREEN_RM_TS=m
-CONFIG_TOUCHSCREEN_SILEAD=m
-CONFIG_TOUCHSCREEN_SIS_I2C=m
-CONFIG_TOUCHSCREEN_ST1232=m
-CONFIG_TOUCHSCREEN_STMFTS=m
-CONFIG_TOUCHSCREEN_SUR40=m
-CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
-CONFIG_TOUCHSCREEN_SX8654=m
-CONFIG_TOUCHSCREEN_TPS6507X=m
-CONFIG_TOUCHSCREEN_ZET6223=m
-CONFIG_TOUCHSCREEN_ZFORCE=m
-CONFIG_TOUCHSCREEN_ROHM_BU21023=m
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_88PM80X_ONKEY=m
-CONFIG_INPUT_AD714X=m
-CONFIG_INPUT_AD714X_I2C=m
-CONFIG_INPUT_AD714X_SPI=m
-CONFIG_INPUT_ARIZONA_HAPTICS=m
-CONFIG_INPUT_BMA150=m
-CONFIG_INPUT_E3X0_BUTTON=m
-CONFIG_INPUT_PCSPKR=m
-CONFIG_INPUT_MAX77693_HAPTIC=m
-CONFIG_INPUT_MC13783_PWRBUTTON=m
-CONFIG_INPUT_MMA8450=m
-CONFIG_INPUT_APANEL=m
-CONFIG_INPUT_GP2A=m
-CONFIG_INPUT_GPIO_BEEPER=m
-CONFIG_INPUT_GPIO_TILT_POLLED=m
-CONFIG_INPUT_GPIO_DECODER=m
-CONFIG_INPUT_ATLAS_BTNS=m
-CONFIG_INPUT_ATI_REMOTE2=m
-CONFIG_INPUT_KEYSPAN_REMOTE=m
-CONFIG_INPUT_KXTJ9=m
-CONFIG_INPUT_KXTJ9_POLLED_MODE=y
-CONFIG_INPUT_POWERMATE=m
-CONFIG_INPUT_YEALINK=m
-CONFIG_INPUT_CM109=m
-CONFIG_INPUT_REGULATOR_HAPTIC=m
-CONFIG_INPUT_RETU_PWRBUTTON=m
-CONFIG_INPUT_TPS65218_PWRBUTTON=m
-CONFIG_INPUT_AXP20X_PEK=m
-CONFIG_INPUT_UINPUT=m
-CONFIG_INPUT_PCF50633_PMU=m
-CONFIG_INPUT_PCF8574=m
-CONFIG_INPUT_PWM_BEEPER=m
-CONFIG_INPUT_PWM_VIBRA=m
-CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
-CONFIG_INPUT_DA9052_ONKEY=m
-CONFIG_INPUT_DA9063_ONKEY=m
-CONFIG_INPUT_WM831X_ON=m
-CONFIG_INPUT_PCAP=m
-CONFIG_INPUT_ADXL34X=m
-CONFIG_INPUT_ADXL34X_I2C=m
-CONFIG_INPUT_ADXL34X_SPI=m
-CONFIG_INPUT_IMS_PCU=m
-CONFIG_INPUT_CMA3000=m
-CONFIG_INPUT_CMA3000_I2C=m
-CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
-CONFIG_INPUT_SOC_BUTTON_ARRAY=m
-CONFIG_INPUT_DRV260X_HAPTICS=m
-CONFIG_INPUT_DRV2665_HAPTICS=m
-CONFIG_INPUT_DRV2667_HAPTICS=m
-CONFIG_RMI4_CORE=m
-CONFIG_RMI4_I2C=m
-CONFIG_RMI4_SPI=m
-CONFIG_RMI4_SMB=m
-CONFIG_RMI4_F03=y
-CONFIG_RMI4_F03_SERIO=m
-CONFIG_RMI4_2D_SENSOR=y
-CONFIG_RMI4_F11=y
-CONFIG_RMI4_F12=y
-CONFIG_RMI4_F30=y
-CONFIG_RMI4_F34=y
-CONFIG_RMI4_F54=y
-CONFIG_RMI4_F55=y
-
-#
-# Hardware I/O ports
-#
-CONFIG_SERIO=y
-CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_CT82C710=m
-CONFIG_SERIO_PARKBD=m
-CONFIG_SERIO_PCIPS2=m
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIO_RAW=m
-CONFIG_SERIO_ALTERA_PS2=m
-CONFIG_SERIO_PS2MULT=m
-CONFIG_SERIO_ARC_PS2=m
-CONFIG_HYPERV_KEYBOARD=m
-CONFIG_SERIO_GPIO_PS2=m
-CONFIG_USERIO=m
-CONFIG_GAMEPORT=m
-CONFIG_GAMEPORT_NS558=m
-CONFIG_GAMEPORT_L4=m
-CONFIG_GAMEPORT_EMU10K1=m
-CONFIG_GAMEPORT_FM801=m
-
-#
-# Character devices
-#
-CONFIG_TTY=y
-CONFIG_VT=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-CONFIG_VT_CONSOLE=y
-CONFIG_VT_CONSOLE_SLEEP=y
-CONFIG_HW_CONSOLE=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
-CONFIG_CYZ_INTR=y
-CONFIG_MOXA_INTELLIO=m
-CONFIG_MOXA_SMARTIO=m
-CONFIG_SYNCLINK=m
-CONFIG_SYNCLINKMP=m
-CONFIG_SYNCLINK_GT=m
-CONFIG_NOZOMI=m
-CONFIG_ISI=m
-CONFIG_N_HDLC=m
-CONFIG_N_GSM=m
-CONFIG_TRACE_ROUTER=m
-CONFIG_TRACE_SINK=m
-CONFIG_DEVMEM=y
-# CONFIG_DEVKMEM is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_EARLYCON=y
-CONFIG_SERIAL_8250=y
-# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
-CONFIG_SERIAL_8250_PNP=y
-CONFIG_SERIAL_8250_FINTEK=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_DMA=y
-CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_EXAR=y
-CONFIG_SERIAL_8250_CS=m
-CONFIG_SERIAL_8250_MEN_MCB=m
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-# CONFIG_SERIAL_8250_FSL is not set
-CONFIG_SERIAL_8250_DW=m
-CONFIG_SERIAL_8250_RT288X=y
-CONFIG_SERIAL_8250_LPSS=y
-CONFIG_SERIAL_8250_MID=y
-CONFIG_SERIAL_8250_MOXA=m
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_MAX3100=m
-CONFIG_SERIAL_MAX310X=y
-CONFIG_SERIAL_UARTLITE=m
-CONFIG_SERIAL_UARTLITE_NR_UARTS=1
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_JSM=m
-CONFIG_SERIAL_SCCNXP=m
-CONFIG_SERIAL_SC16IS7XX_CORE=m
-CONFIG_SERIAL_SC16IS7XX=m
-CONFIG_SERIAL_SC16IS7XX_I2C=y
-CONFIG_SERIAL_SC16IS7XX_SPI=y
-CONFIG_SERIAL_ALTERA_JTAGUART=m
-CONFIG_SERIAL_ALTERA_UART=m
-CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
-CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
-CONFIG_SERIAL_IFX6X60=m
-CONFIG_SERIAL_ARC=m
-CONFIG_SERIAL_ARC_NR_PORTS=1
-CONFIG_SERIAL_RP2=m
-CONFIG_SERIAL_RP2_NR_UARTS=32
-CONFIG_SERIAL_FSL_LPUART=m
-CONFIG_SERIAL_MEN_Z135=m
-CONFIG_SERIAL_DEV_BUS=m
-CONFIG_PRINTER=m
-CONFIG_LP_CONSOLE=y
-CONFIG_PPDEV=m
-CONFIG_HVC_DRIVER=y
-CONFIG_VIRTIO_CONSOLE=m
-CONFIG_IPMI_HANDLER=m
-CONFIG_IPMI_DMI_DECODE=y
-CONFIG_IPMI_PANIC_EVENT=y
-CONFIG_IPMI_PANIC_STRING=y
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_SSIF=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_HW_RANDOM=m
-CONFIG_HW_RANDOM_TIMERIOMEM=m
-CONFIG_HW_RANDOM_INTEL=m
-CONFIG_HW_RANDOM_AMD=m
-CONFIG_HW_RANDOM_VIA=m
-CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_HW_RANDOM_TPM=m
-CONFIG_NVRAM=m
-CONFIG_R3964=m
-CONFIG_APPLICOM=m
-
-#
-# PCMCIA character devices
-#
-CONFIG_SYNCLINK_CS=m
-CONFIG_CARDMAN_4000=m
-CONFIG_CARDMAN_4040=m
-CONFIG_SCR24X=m
-CONFIG_IPWIRELESS=m
-CONFIG_MWAVE=m
-CONFIG_RAW_DRIVER=m
-CONFIG_MAX_RAW_DEVS=256
-CONFIG_HPET=y
-CONFIG_HPET_MMAP=y
-CONFIG_HPET_MMAP_DEFAULT=y
-CONFIG_HANGCHECK_TIMER=m
-CONFIG_TCG_TPM=m
-CONFIG_TCG_TIS_CORE=m
-CONFIG_TCG_TIS=m
-CONFIG_TCG_TIS_SPI=m
-CONFIG_TCG_TIS_I2C_ATMEL=m
-CONFIG_TCG_TIS_I2C_INFINEON=m
-CONFIG_TCG_TIS_I2C_NUVOTON=m
-CONFIG_TCG_NSC=m
-CONFIG_TCG_ATMEL=m
-CONFIG_TCG_INFINEON=m
-CONFIG_TCG_CRB=m
-CONFIG_TCG_VTPM_PROXY=m
-CONFIG_TCG_TIS_ST33ZP24=m
-CONFIG_TCG_TIS_ST33ZP24_I2C=m
-CONFIG_TCG_TIS_ST33ZP24_SPI=m
-CONFIG_TELCLOCK=m
-CONFIG_DEVPORT=y
-CONFIG_XILLYBUS=m
-CONFIG_XILLYBUS_PCIE=m
-
-#
-# I2C support
-#
-CONFIG_I2C=m
-CONFIG_I2C_BOARDINFO=y
-CONFIG_I2C_COMPAT=y
-CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_MUX=m
-
-#
-# Multiplexer I2C Chip support
-#
-CONFIG_I2C_MUX_GPIO=m
-CONFIG_I2C_MUX_LTC4306=m
-CONFIG_I2C_MUX_PCA9541=m
-CONFIG_I2C_MUX_PCA954x=m
-CONFIG_I2C_MUX_REG=m
-CONFIG_I2C_MUX_MLXCPLD=m
-CONFIG_I2C_HELPER_AUTO=y
-CONFIG_I2C_SMBUS=m
-CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCA=m
-
-#
-# I2C Hardware Bus support
-#
-
-#
-# PC SMBus host controller drivers
-#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD756_S4882=m
-CONFIG_I2C_AMD8111=m
-CONFIG_I2C_I801=m
-CONFIG_I2C_ISCH=m
-CONFIG_I2C_ISMT=m
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_NFORCE2=m
-CONFIG_I2C_NFORCE2_S4985=m
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-
-#
-# ACPI drivers
-#
-CONFIG_I2C_SCMI=m
-
-#
-# I2C system bus drivers (mostly embedded / system-on-chip)
-#
-CONFIG_I2C_CBUS_GPIO=m
-CONFIG_I2C_DESIGNWARE_CORE=m
-CONFIG_I2C_DESIGNWARE_PLATFORM=m
-# CONFIG_I2C_DESIGNWARE_SLAVE is not set
-CONFIG_I2C_DESIGNWARE_PCI=m
-# CONFIG_I2C_DESIGNWARE_BAYTRAIL is not set
-CONFIG_I2C_EMEV2=m
-CONFIG_I2C_GPIO=m
-CONFIG_I2C_KEMPLD=m
-CONFIG_I2C_OCORES=m
-CONFIG_I2C_PCA_PLATFORM=m
-# CONFIG_I2C_PXA_PCI is not set
-CONFIG_I2C_SIMTEC=m
-CONFIG_I2C_XILINX=m
-
-#
-# External I2C/SMBus adapter drivers
-#
-CONFIG_I2C_DIOLAN_U2C=m
-CONFIG_I2C_DLN2=m
-CONFIG_I2C_PARPORT=m
-CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_ROBOTFUZZ_OSIF=m
-CONFIG_I2C_TAOS_EVM=m
-CONFIG_I2C_TINY_USB=m
-CONFIG_I2C_VIPERBOARD=m
-
-#
-# Other I2C/SMBus bus drivers
-#
-CONFIG_I2C_MLXCPLD=m
-CONFIG_I2C_CROS_EC_TUNNEL=m
-# CONFIG_I2C_STUB is not set
-CONFIG_I2C_SLAVE=y
-CONFIG_I2C_SLAVE_EEPROM=m
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-CONFIG_SPI=y
-# CONFIG_SPI_DEBUG is not set
-CONFIG_SPI_MASTER=y
-
-#
-# SPI Master Controller Drivers
-#
-CONFIG_SPI_ALTERA=m
-CONFIG_SPI_AXI_SPI_ENGINE=m
-CONFIG_SPI_BITBANG=m
-CONFIG_SPI_BUTTERFLY=m
-CONFIG_SPI_CADENCE=m
-CONFIG_SPI_DESIGNWARE=m
-CONFIG_SPI_DW_PCI=m
-CONFIG_SPI_DW_MID_DMA=y
-CONFIG_SPI_DW_MMIO=m
-CONFIG_SPI_DLN2=m
-CONFIG_SPI_GPIO=m
-CONFIG_SPI_LM70_LLP=m
-CONFIG_SPI_OC_TINY=m
-CONFIG_SPI_PXA2XX=m
-CONFIG_SPI_PXA2XX_PCI=m
-CONFIG_SPI_ROCKCHIP=m
-CONFIG_SPI_SC18IS602=m
-CONFIG_SPI_XCOMM=m
-CONFIG_SPI_XILINX=m
-CONFIG_SPI_ZYNQMP_GQSPI=m
-
-#
-# SPI Protocol Masters
-#
-CONFIG_SPI_SPIDEV=m
-CONFIG_SPI_LOOPBACK_TEST=m
-CONFIG_SPI_TLE62X0=m
-CONFIG_SPI_SLAVE=y
-CONFIG_SPI_SLAVE_TIME=m
-CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
-CONFIG_SPMI=m
-CONFIG_HSI=m
-CONFIG_HSI_BOARDINFO=y
-
-#
-# HSI controllers
-#
-
-#
-# HSI clients
-#
-CONFIG_HSI_CHAR=m
-CONFIG_PPS=m
-# CONFIG_PPS_DEBUG is not set
-# CONFIG_NTP_PPS is not set
-
-#
-# PPS clients support
-#
-# CONFIG_PPS_CLIENT_KTIMER is not set
-CONFIG_PPS_CLIENT_LDISC=m
-CONFIG_PPS_CLIENT_PARPORT=m
-CONFIG_PPS_CLIENT_GPIO=m
-
-#
-# PPS generators support
-#
-
-#
-# PTP clock support
-#
-CONFIG_PTP_1588_CLOCK=m
-CONFIG_DP83640_PHY=m
-CONFIG_PTP_1588_CLOCK_KVM=m
-CONFIG_PINCTRL=y
-
-#
-# Pin controllers
-#
-CONFIG_PINMUX=y
-CONFIG_PINCONF=y
-CONFIG_GENERIC_PINCONF=y
-# CONFIG_DEBUG_PINCTRL is not set
-CONFIG_PINCTRL_AMD=m
-CONFIG_PINCTRL_MCP23S08=m
-CONFIG_PINCTRL_BAYTRAIL=y
-CONFIG_PINCTRL_CHERRYVIEW=m
-CONFIG_PINCTRL_INTEL=m
-CONFIG_PINCTRL_BROXTON=m
-CONFIG_PINCTRL_CANNONLAKE=m
-CONFIG_PINCTRL_DENVERTON=m
-CONFIG_PINCTRL_GEMINILAKE=m
-CONFIG_PINCTRL_LEWISBURG=m
-CONFIG_PINCTRL_SUNRISEPOINT=m
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_ACPI=y
-CONFIG_GPIOLIB_IRQCHIP=y
-# CONFIG_DEBUG_GPIO is not set
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_GENERIC=m
-CONFIG_GPIO_MAX730X=m
-
-#
-# Memory mapped GPIO drivers
-#
-CONFIG_GPIO_AMDPT=m
-CONFIG_GPIO_AXP209=m
-CONFIG_GPIO_DWAPB=m
-CONFIG_GPIO_EXAR=m
-CONFIG_GPIO_GENERIC_PLATFORM=m
-CONFIG_GPIO_ICH=m
-CONFIG_GPIO_LYNXPOINT=y
-CONFIG_GPIO_MENZ127=m
-CONFIG_GPIO_MOCKUP=m
-CONFIG_GPIO_VX855=m
-
-#
-# Port-mapped I/O GPIO drivers
-#
-CONFIG_GPIO_F7188X=m
-CONFIG_GPIO_IT87=m
-CONFIG_GPIO_SCH=m
-CONFIG_GPIO_SCH311X=m
-
-#
-# I2C GPIO expanders
-#
-CONFIG_GPIO_ADP5588=m
-CONFIG_GPIO_MAX7300=m
-CONFIG_GPIO_MAX732X=m
-CONFIG_GPIO_PCA953X=m
-CONFIG_GPIO_PCF857X=m
-CONFIG_GPIO_TPIC2810=m
-
-#
-# MFD GPIO expanders
-#
-CONFIG_GPIO_ARIZONA=m
-CONFIG_GPIO_BD9571MWV=m
-CONFIG_GPIO_DA9052=m
-CONFIG_GPIO_DLN2=m
-CONFIG_GPIO_JANZ_TTL=m
-CONFIG_GPIO_KEMPLD=m
-CONFIG_GPIO_LP3943=m
-CONFIG_GPIO_LP873X=m
-CONFIG_GPIO_TPS65086=m
-CONFIG_GPIO_TPS65218=m
-CONFIG_GPIO_TPS65912=m
-CONFIG_GPIO_UCB1400=m
-CONFIG_GPIO_WHISKEY_COVE=m
-CONFIG_GPIO_WM831X=m
-CONFIG_GPIO_WM8994=m
-
-#
-# PCI GPIO expanders
-#
-CONFIG_GPIO_AMD8111=m
-CONFIG_GPIO_ML_IOH=m
-CONFIG_GPIO_PCI_IDIO_16=m
-CONFIG_GPIO_RDC321X=m
-
-#
-# SPI GPIO expanders
-#
-CONFIG_GPIO_MAX7301=m
-CONFIG_GPIO_MC33880=m
-CONFIG_GPIO_PISOSR=m
-CONFIG_GPIO_XRA1403=m
-
-#
-# USB GPIO expanders
-#
-CONFIG_GPIO_VIPERBOARD=m
-CONFIG_W1=m
-CONFIG_W1_CON=y
-
-#
-# 1-wire Bus Masters
-#
-CONFIG_W1_MASTER_MATROX=m
-CONFIG_W1_MASTER_DS2490=m
-CONFIG_W1_MASTER_DS2482=m
-CONFIG_W1_MASTER_DS1WM=m
-CONFIG_W1_MASTER_GPIO=m
-
-#
-# 1-wire Slaves
-#
-CONFIG_W1_SLAVE_THERM=m
-CONFIG_W1_SLAVE_SMEM=m
-# CONFIG_W1_SLAVE_DS2405 is not set
-CONFIG_W1_SLAVE_DS2408=m
-# CONFIG_W1_SLAVE_DS2408_READBACK is not set
-CONFIG_W1_SLAVE_DS2413=m
-CONFIG_W1_SLAVE_DS2406=m
-CONFIG_W1_SLAVE_DS2423=m
-CONFIG_W1_SLAVE_DS2805=m
-CONFIG_W1_SLAVE_DS2431=m
-CONFIG_W1_SLAVE_DS2433=m
-CONFIG_W1_SLAVE_DS2433_CRC=y
-CONFIG_W1_SLAVE_DS2438=m
-CONFIG_W1_SLAVE_DS2760=m
-CONFIG_W1_SLAVE_DS2780=m
-CONFIG_W1_SLAVE_DS2781=m
-CONFIG_W1_SLAVE_DS28E04=m
-CONFIG_POWER_AVS=y
-CONFIG_POWER_RESET=y
-# CONFIG_POWER_RESET_RESTART is not set
-CONFIG_POWER_SUPPLY=y
-# CONFIG_POWER_SUPPLY_DEBUG is not set
-CONFIG_PDA_POWER=m
-CONFIG_GENERIC_ADC_BATTERY=m
-CONFIG_WM831X_BACKUP=m
-CONFIG_WM831X_POWER=m
-# CONFIG_TEST_POWER is not set
-CONFIG_BATTERY_DS2760=m
-CONFIG_BATTERY_DS2780=m
-CONFIG_BATTERY_DS2781=m
-CONFIG_BATTERY_DS2782=m
-CONFIG_BATTERY_SBS=m
-CONFIG_CHARGER_SBS=m
-CONFIG_BATTERY_BQ27XXX=m
-CONFIG_BATTERY_BQ27XXX_I2C=m
-CONFIG_BATTERY_BQ27XXX_HDQ=m
-# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
-CONFIG_BATTERY_DA9052=m
-CONFIG_CHARGER_DA9150=m
-CONFIG_BATTERY_DA9150=m
-CONFIG_CHARGER_AXP20X=m
-CONFIG_BATTERY_AXP20X=m
-CONFIG_AXP20X_POWER=m
-CONFIG_AXP288_CHARGER=m
-CONFIG_AXP288_FUEL_GAUGE=m
-CONFIG_BATTERY_MAX17040=m
-CONFIG_BATTERY_MAX17042=m
-CONFIG_BATTERY_MAX1721X=m
-CONFIG_CHARGER_PCF50633=m
-CONFIG_CHARGER_ISP1704=m
-CONFIG_CHARGER_MAX8903=m
-CONFIG_CHARGER_LP8727=m
-CONFIG_CHARGER_GPIO=m
-CONFIG_CHARGER_MANAGER=y
-CONFIG_CHARGER_LTC3651=m
-CONFIG_CHARGER_MAX14577=m
-CONFIG_CHARGER_MAX77693=m
-CONFIG_CHARGER_BQ2415X=m
-CONFIG_CHARGER_BQ24190=m
-CONFIG_CHARGER_BQ24257=m
-CONFIG_CHARGER_BQ24735=m
-CONFIG_CHARGER_BQ25890=m
-CONFIG_CHARGER_SMB347=m
-CONFIG_CHARGER_TPS65217=m
-CONFIG_BATTERY_GAUGE_LTC2941=m
-CONFIG_BATTERY_RT5033=m
-CONFIG_CHARGER_RT9455=m
-CONFIG_HWMON=m
-CONFIG_HWMON_VID=m
-# CONFIG_HWMON_DEBUG_CHIP is not set
-
-#
-# Native drivers
-#
-CONFIG_SENSORS_ABITUGURU=m
-CONFIG_SENSORS_ABITUGURU3=m
-CONFIG_SENSORS_AD7314=m
-CONFIG_SENSORS_AD7414=m
-CONFIG_SENSORS_AD7418=m
-CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1026=m
-CONFIG_SENSORS_ADM1029=m
-CONFIG_SENSORS_ADM1031=m
-CONFIG_SENSORS_ADM9240=m
-CONFIG_SENSORS_ADT7X10=m
-CONFIG_SENSORS_ADT7310=m
-CONFIG_SENSORS_ADT7410=m
-CONFIG_SENSORS_ADT7411=m
-CONFIG_SENSORS_ADT7462=m
-CONFIG_SENSORS_ADT7470=m
-CONFIG_SENSORS_ADT7475=m
-CONFIG_SENSORS_ASC7621=m
-CONFIG_SENSORS_K8TEMP=m
-CONFIG_SENSORS_K10TEMP=m
-CONFIG_SENSORS_FAM15H_POWER=m
-CONFIG_SENSORS_APPLESMC=m
-CONFIG_SENSORS_ASB100=m
-CONFIG_SENSORS_ASPEED=m
-CONFIG_SENSORS_ATXP1=m
-CONFIG_SENSORS_DS620=m
-CONFIG_SENSORS_DS1621=m
-CONFIG_SENSORS_DELL_SMM=m
-CONFIG_SENSORS_DA9052_ADC=m
-CONFIG_SENSORS_I5K_AMB=m
-CONFIG_SENSORS_F71805F=m
-CONFIG_SENSORS_F71882FG=m
-CONFIG_SENSORS_F75375S=m
-CONFIG_SENSORS_MC13783_ADC=m
-CONFIG_SENSORS_FSCHMD=m
-CONFIG_SENSORS_FTSTEUTATES=m
-CONFIG_SENSORS_GL518SM=m
-CONFIG_SENSORS_GL520SM=m
-CONFIG_SENSORS_G760A=m
-CONFIG_SENSORS_G762=m
-CONFIG_SENSORS_GPIO_FAN=m
-CONFIG_SENSORS_HIH6130=m
-CONFIG_SENSORS_IBMAEM=m
-CONFIG_SENSORS_IBMPEX=m
-CONFIG_SENSORS_IIO_HWMON=m
-CONFIG_SENSORS_I5500=m
-CONFIG_SENSORS_CORETEMP=m
-CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_JC42=m
-CONFIG_SENSORS_POWR1220=m
-CONFIG_SENSORS_LINEAGE=m
-CONFIG_SENSORS_LTC2945=m
-CONFIG_SENSORS_LTC2990=m
-CONFIG_SENSORS_LTC4151=m
-CONFIG_SENSORS_LTC4215=m
-CONFIG_SENSORS_LTC4222=m
-CONFIG_SENSORS_LTC4245=m
-CONFIG_SENSORS_LTC4260=m
-CONFIG_SENSORS_LTC4261=m
-CONFIG_SENSORS_MAX1111=m
-CONFIG_SENSORS_MAX16065=m
-CONFIG_SENSORS_MAX1619=m
-CONFIG_SENSORS_MAX1668=m
-CONFIG_SENSORS_MAX197=m
-CONFIG_SENSORS_MAX31722=m
-CONFIG_SENSORS_MAX6639=m
-CONFIG_SENSORS_MAX6642=m
-CONFIG_SENSORS_MAX6650=m
-CONFIG_SENSORS_MAX6697=m
-CONFIG_SENSORS_MAX31790=m
-CONFIG_SENSORS_MCP3021=m
-CONFIG_SENSORS_TC654=m
-CONFIG_SENSORS_MENF21BMC_HWMON=m
-CONFIG_SENSORS_ADCXX=m
-CONFIG_SENSORS_LM63=m
-CONFIG_SENSORS_LM70=m
-CONFIG_SENSORS_LM73=m
-CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
-CONFIG_SENSORS_LM78=m
-CONFIG_SENSORS_LM80=m
-CONFIG_SENSORS_LM83=m
-CONFIG_SENSORS_LM85=m
-CONFIG_SENSORS_LM87=m
-CONFIG_SENSORS_LM90=m
-CONFIG_SENSORS_LM92=m
-CONFIG_SENSORS_LM93=m
-CONFIG_SENSORS_LM95234=m
-CONFIG_SENSORS_LM95241=m
-CONFIG_SENSORS_LM95245=m
-CONFIG_SENSORS_PC87360=m
-CONFIG_SENSORS_PC87427=m
-CONFIG_SENSORS_NTC_THERMISTOR=m
-CONFIG_SENSORS_NCT6683=m
-CONFIG_SENSORS_NCT6775=m
-CONFIG_SENSORS_NCT7802=m
-CONFIG_SENSORS_NCT7904=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_PMBUS=m
-CONFIG_SENSORS_PMBUS=m
-CONFIG_SENSORS_ADM1275=m
-CONFIG_SENSORS_IBM_CFFPS=m
-CONFIG_SENSORS_IR35221=m
-CONFIG_SENSORS_LM25066=m
-CONFIG_SENSORS_LTC2978=m
-CONFIG_SENSORS_LTC2978_REGULATOR=y
-CONFIG_SENSORS_LTC3815=m
-CONFIG_SENSORS_MAX16064=m
-CONFIG_SENSORS_MAX20751=m
-CONFIG_SENSORS_MAX34440=m
-CONFIG_SENSORS_MAX8688=m
-CONFIG_SENSORS_TPS40422=m
-CONFIG_SENSORS_TPS53679=m
-CONFIG_SENSORS_UCD9000=m
-CONFIG_SENSORS_UCD9200=m
-CONFIG_SENSORS_ZL6100=m
-CONFIG_SENSORS_SHT15=m
-CONFIG_SENSORS_SHT21=m
-CONFIG_SENSORS_SHT3x=m
-CONFIG_SENSORS_SHTC1=m
-CONFIG_SENSORS_SIS5595=m
-CONFIG_SENSORS_DME1737=m
-CONFIG_SENSORS_EMC1403=m
-CONFIG_SENSORS_EMC2103=m
-CONFIG_SENSORS_EMC6W201=m
-CONFIG_SENSORS_SMSC47M1=m
-CONFIG_SENSORS_SMSC47M192=m
-CONFIG_SENSORS_SMSC47B397=m
-CONFIG_SENSORS_SCH56XX_COMMON=m
-CONFIG_SENSORS_SCH5627=m
-CONFIG_SENSORS_SCH5636=m
-CONFIG_SENSORS_STTS751=m
-CONFIG_SENSORS_SMM665=m
-CONFIG_SENSORS_ADC128D818=m
-CONFIG_SENSORS_ADS1015=m
-CONFIG_SENSORS_ADS7828=m
-CONFIG_SENSORS_ADS7871=m
-CONFIG_SENSORS_AMC6821=m
-CONFIG_SENSORS_INA209=m
-CONFIG_SENSORS_INA2XX=m
-CONFIG_SENSORS_INA3221=m
-CONFIG_SENSORS_TC74=m
-CONFIG_SENSORS_THMC50=m
-CONFIG_SENSORS_TMP102=m
-CONFIG_SENSORS_TMP103=m
-CONFIG_SENSORS_TMP108=m
-CONFIG_SENSORS_TMP401=m
-CONFIG_SENSORS_TMP421=m
-CONFIG_SENSORS_VIA_CPUTEMP=m
-CONFIG_SENSORS_VIA686A=m
-CONFIG_SENSORS_VT1211=m
-CONFIG_SENSORS_VT8231=m
-CONFIG_SENSORS_W83781D=m
-CONFIG_SENSORS_W83791D=m
-CONFIG_SENSORS_W83792D=m
-CONFIG_SENSORS_W83793=m
-CONFIG_SENSORS_W83795=m
-# CONFIG_SENSORS_W83795_FANCTRL is not set
-CONFIG_SENSORS_W83L785TS=m
-CONFIG_SENSORS_W83L786NG=m
-CONFIG_SENSORS_W83627HF=m
-CONFIG_SENSORS_W83627EHF=m
-CONFIG_SENSORS_WM831X=m
-CONFIG_SENSORS_XGENE=m
-
-#
-# ACPI drivers
-#
-CONFIG_SENSORS_ACPI_POWER=m
-CONFIG_SENSORS_ATK0110=m
-CONFIG_THERMAL=y
-CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
-CONFIG_THERMAL_WRITABLE_TRIPS=y
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
-# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
-# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
-# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
-CONFIG_THERMAL_GOV_FAIR_SHARE=y
-CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_GOV_BANG_BANG=y
-CONFIG_THERMAL_GOV_USER_SPACE=y
-# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
-CONFIG_CLOCK_THERMAL=y
-CONFIG_DEVFREQ_THERMAL=y
-# CONFIG_THERMAL_EMULATION is not set
-CONFIG_INTEL_POWERCLAMP=m
-CONFIG_X86_PKG_TEMP_THERMAL=m
-CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
-CONFIG_INTEL_SOC_DTS_THERMAL=m
-
-#
-# ACPI INT340X thermal drivers
-#
-CONFIG_INT340X_THERMAL=m
-CONFIG_ACPI_THERMAL_REL=m
-CONFIG_INT3406_THERMAL=m
-CONFIG_INTEL_BXT_PMIC_THERMAL=m
-CONFIG_INTEL_PCH_THERMAL=m
-CONFIG_GENERIC_ADC_THERMAL=m
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_CORE=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
-# CONFIG_WATCHDOG_SYSFS is not set
-
-#
-# Watchdog Device Drivers
-#
-CONFIG_SOFT_WATCHDOG=m
-# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set
-CONFIG_DA9052_WATCHDOG=m
-CONFIG_DA9063_WATCHDOG=m
-CONFIG_DA9062_WATCHDOG=m
-CONFIG_MENF21BMC_WATCHDOG=m
-CONFIG_WDAT_WDT=m
-CONFIG_WM831X_WATCHDOG=m
-CONFIG_XILINX_WATCHDOG=m
-CONFIG_ZIIRAVE_WATCHDOG=m
-CONFIG_CADENCE_WATCHDOG=m
-CONFIG_DW_WATCHDOG=m
-CONFIG_MAX63XX_WATCHDOG=m
-CONFIG_RETU_WATCHDOG=m
-CONFIG_ACQUIRE_WDT=m
-CONFIG_ADVANTECH_WDT=m
-CONFIG_ALIM1535_WDT=m
-CONFIG_ALIM7101_WDT=m
-CONFIG_F71808E_WDT=m
-# CONFIG_SP5100_TCO is not set
-CONFIG_SBC_FITPC2_WATCHDOG=m
-CONFIG_EUROTECH_WDT=m
-CONFIG_IB700_WDT=m
-CONFIG_IBMASR=m
-CONFIG_WAFER_WDT=m
-CONFIG_I6300ESB_WDT=m
-CONFIG_IE6XX_WDT=m
-CONFIG_ITCO_WDT=m
-CONFIG_ITCO_VENDOR_SUPPORT=y
-CONFIG_IT8712F_WDT=m
-CONFIG_IT87_WDT=m
-CONFIG_HP_WATCHDOG=m
-CONFIG_KEMPLD_WDT=m
-CONFIG_HPWDT_NMI_DECODING=y
-CONFIG_SC1200_WDT=m
-CONFIG_PC87413_WDT=m
-CONFIG_NV_TCO=m
-CONFIG_60XX_WDT=m
-CONFIG_CPU5_WDT=m
-CONFIG_SMSC_SCH311X_WDT=m
-CONFIG_SMSC37B787_WDT=m
-CONFIG_VIA_WDT=m
-CONFIG_W83627HF_WDT=m
-CONFIG_W83877F_WDT=m
-CONFIG_W83977F_WDT=m
-CONFIG_MACHZ_WDT=m
-CONFIG_SBC_EPX_C3_WATCHDOG=m
-CONFIG_INTEL_MEI_WDT=m
-CONFIG_NI903X_WDT=m
-CONFIG_NIC7018_WDT=m
-CONFIG_MEN_A21_WDT=m
-
-#
-# PCI-based Watchdog Cards
-#
-CONFIG_PCIPCWATCHDOG=m
-CONFIG_WDTPCI=m
-
-#
-# USB-based Watchdog Cards
-#
-CONFIG_USBPCWATCHDOG=m
-
-#
-# Watchdog Pretimeout Governors
-#
-CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
-# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP is not set
-CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y
-CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m
-CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y
-CONFIG_SSB_POSSIBLE=y
-
-#
-# Sonics Silicon Backplane
-#
-CONFIG_SSB=m
-CONFIG_SSB_SPROM=y
-CONFIG_SSB_BLOCKIO=y
-CONFIG_SSB_PCIHOST_POSSIBLE=y
-CONFIG_SSB_PCIHOST=y
-CONFIG_SSB_B43_PCI_BRIDGE=y
-CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
-CONFIG_SSB_PCMCIAHOST=y
-CONFIG_SSB_SDIOHOST_POSSIBLE=y
-CONFIG_SSB_SDIOHOST=y
-# CONFIG_SSB_DEBUG is not set
-CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
-CONFIG_SSB_DRIVER_PCICORE=y
-CONFIG_SSB_DRIVER_GPIO=y
-CONFIG_BCMA_POSSIBLE=y
-CONFIG_BCMA=m
-CONFIG_BCMA_BLOCKIO=y
-CONFIG_BCMA_HOST_PCI_POSSIBLE=y
-CONFIG_BCMA_HOST_PCI=y
-CONFIG_BCMA_HOST_SOC=y
-CONFIG_BCMA_DRIVER_PCI=y
-CONFIG_BCMA_SFLASH=y
-CONFIG_BCMA_DRIVER_GMAC_CMN=y
-CONFIG_BCMA_DRIVER_GPIO=y
-# CONFIG_BCMA_DEBUG is not set
-
-#
-# Multifunction device drivers
-#
-CONFIG_MFD_CORE=y
-CONFIG_MFD_BCM590XX=m
-CONFIG_MFD_BD9571MWV=m
-CONFIG_MFD_AXP20X=m
-CONFIG_MFD_AXP20X_I2C=m
-CONFIG_MFD_CROS_EC=m
-CONFIG_MFD_CROS_EC_I2C=m
-CONFIG_MFD_CROS_EC_SPI=m
-CONFIG_PMIC_DA9052=y
-CONFIG_MFD_DA9052_SPI=y
-CONFIG_MFD_DA9062=m
-CONFIG_MFD_DA9063=m
-CONFIG_MFD_DA9150=m
-CONFIG_MFD_DLN2=m
-CONFIG_MFD_MC13XXX=m
-CONFIG_MFD_MC13XXX_SPI=m
-CONFIG_MFD_MC13XXX_I2C=m
-CONFIG_HTC_PASIC3=m
-CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
-CONFIG_LPC_ICH=m
-CONFIG_LPC_SCH=m
-CONFIG_INTEL_SOC_PMIC_BXTWC=m
-CONFIG_MFD_INTEL_LPSS=m
-CONFIG_MFD_INTEL_LPSS_ACPI=m
-CONFIG_MFD_INTEL_LPSS_PCI=m
-CONFIG_MFD_JANZ_CMODIO=m
-CONFIG_MFD_KEMPLD=m
-CONFIG_MFD_88PM800=m
-CONFIG_MFD_88PM805=m
-CONFIG_MFD_MAX14577=m
-CONFIG_MFD_MAX77693=m
-CONFIG_MFD_MAX8907=m
-CONFIG_MFD_MT6397=m
-CONFIG_MFD_MENF21BMC=m
-CONFIG_EZX_PCAP=y
-CONFIG_MFD_VIPERBOARD=m
-CONFIG_MFD_RETU=m
-CONFIG_MFD_PCF50633=m
-CONFIG_PCF50633_ADC=m
-CONFIG_PCF50633_GPIO=m
-CONFIG_UCB1400_CORE=m
-CONFIG_MFD_RDC321X=m
-CONFIG_MFD_RTSX_PCI=m
-CONFIG_MFD_RT5033=m
-CONFIG_MFD_RTSX_USB=m
-CONFIG_MFD_SI476X_CORE=m
-CONFIG_MFD_SM501=m
-CONFIG_MFD_SM501_GPIO=y
-CONFIG_MFD_SKY81452=m
-CONFIG_ABX500_CORE=y
-CONFIG_MFD_SYSCON=y
-CONFIG_MFD_TI_AM335X_TSCADC=m
-CONFIG_MFD_LP3943=m
-CONFIG_MFD_TI_LMU=m
-CONFIG_TPS6105X=m
-CONFIG_TPS65010=m
-CONFIG_TPS6507X=m
-CONFIG_MFD_TPS65086=m
-CONFIG_MFD_TPS65217=m
-CONFIG_MFD_TI_LP873X=m
-CONFIG_MFD_TPS65218=m
-CONFIG_MFD_TPS65912=y
-CONFIG_MFD_TPS65912_I2C=m
-CONFIG_MFD_TPS65912_SPI=y
-CONFIG_MFD_WL1273_CORE=m
-CONFIG_MFD_LM3533=m
-# CONFIG_MFD_TMIO is not set
-CONFIG_MFD_VX855=m
-CONFIG_MFD_ARIZONA=y
-CONFIG_MFD_ARIZONA_I2C=m
-CONFIG_MFD_ARIZONA_SPI=m
-CONFIG_MFD_CS47L24=y
-CONFIG_MFD_WM5102=y
-CONFIG_MFD_WM5110=y
-CONFIG_MFD_WM8997=y
-CONFIG_MFD_WM8998=y
-CONFIG_MFD_WM831X=y
-CONFIG_MFD_WM831X_SPI=y
-CONFIG_MFD_WM8994=m
-CONFIG_REGULATOR=y
-# CONFIG_REGULATOR_DEBUG is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=m
-CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
-CONFIG_REGULATOR_USERSPACE_CONSUMER=m
-CONFIG_REGULATOR_88PM800=m
-CONFIG_REGULATOR_ACT8865=m
-CONFIG_REGULATOR_AD5398=m
-CONFIG_REGULATOR_ANATOP=m
-CONFIG_REGULATOR_ARIZONA_LDO1=m
-CONFIG_REGULATOR_ARIZONA_MICSUPP=m
-CONFIG_REGULATOR_AXP20X=m
-CONFIG_REGULATOR_BCM590XX=m
-CONFIG_REGULATOR_BD9571MWV=m
-CONFIG_REGULATOR_DA9052=m
-CONFIG_REGULATOR_DA9062=m
-CONFIG_REGULATOR_DA9063=m
-CONFIG_REGULATOR_DA9210=m
-CONFIG_REGULATOR_DA9211=m
-CONFIG_REGULATOR_FAN53555=m
-CONFIG_REGULATOR_GPIO=m
-CONFIG_REGULATOR_ISL9305=m
-CONFIG_REGULATOR_ISL6271A=m
-CONFIG_REGULATOR_LM363X=m
-CONFIG_REGULATOR_LP3971=m
-CONFIG_REGULATOR_LP3972=m
-CONFIG_REGULATOR_LP872X=m
-CONFIG_REGULATOR_LP8755=m
-CONFIG_REGULATOR_LTC3589=m
-CONFIG_REGULATOR_LTC3676=m
-CONFIG_REGULATOR_MAX14577=m
-CONFIG_REGULATOR_MAX1586=m
-CONFIG_REGULATOR_MAX8649=m
-CONFIG_REGULATOR_MAX8660=m
-CONFIG_REGULATOR_MAX8907=m
-CONFIG_REGULATOR_MAX8952=m
-CONFIG_REGULATOR_MAX77693=m
-CONFIG_REGULATOR_MC13XXX_CORE=m
-CONFIG_REGULATOR_MC13783=m
-CONFIG_REGULATOR_MC13892=m
-CONFIG_REGULATOR_MT6311=m
-CONFIG_REGULATOR_MT6323=m
-CONFIG_REGULATOR_MT6397=m
-CONFIG_REGULATOR_PCAP=m
-CONFIG_REGULATOR_PCF50633=m
-CONFIG_REGULATOR_PFUZE100=m
-CONFIG_REGULATOR_PV88060=m
-CONFIG_REGULATOR_PV88080=m
-CONFIG_REGULATOR_PV88090=m
-CONFIG_REGULATOR_PWM=m
-CONFIG_REGULATOR_QCOM_SPMI=m
-CONFIG_REGULATOR_RT5033=m
-CONFIG_REGULATOR_SKY81452=m
-CONFIG_REGULATOR_TPS51632=m
-CONFIG_REGULATOR_TPS6105X=m
-CONFIG_REGULATOR_TPS62360=m
-CONFIG_REGULATOR_TPS65023=m
-CONFIG_REGULATOR_TPS6507X=m
-CONFIG_REGULATOR_TPS65086=m
-CONFIG_REGULATOR_TPS65132=m
-CONFIG_REGULATOR_TPS65217=m
-CONFIG_REGULATOR_TPS6524X=m
-CONFIG_REGULATOR_TPS65912=m
-CONFIG_REGULATOR_WM831X=m
-CONFIG_REGULATOR_WM8994=m
-CONFIG_CEC_CORE=m
-CONFIG_RC_CORE=m
-CONFIG_RC_MAP=m
-CONFIG_RC_DECODERS=y
-CONFIG_LIRC=m
-CONFIG_IR_LIRC_CODEC=m
-CONFIG_IR_NEC_DECODER=m
-CONFIG_IR_RC5_DECODER=m
-CONFIG_IR_RC6_DECODER=m
-CONFIG_IR_JVC_DECODER=m
-CONFIG_IR_SONY_DECODER=m
-CONFIG_IR_SANYO_DECODER=m
-CONFIG_IR_SHARP_DECODER=m
-CONFIG_IR_MCE_KBD_DECODER=m
-CONFIG_IR_XMP_DECODER=m
-CONFIG_RC_DEVICES=y
-CONFIG_RC_ATI_REMOTE=m
-CONFIG_IR_ENE=m
-CONFIG_IR_HIX5HD2=m
-CONFIG_IR_IMON=m
-CONFIG_IR_MCEUSB=m
-CONFIG_IR_ITE_CIR=m
-CONFIG_IR_FINTEK=m
-CONFIG_IR_NUVOTON=m
-CONFIG_IR_REDRAT3=m
-CONFIG_IR_SPI=m
-CONFIG_IR_STREAMZAP=m
-CONFIG_IR_WINBOND_CIR=m
-CONFIG_IR_IGORPLUGUSB=m
-CONFIG_IR_IGUANA=m
-CONFIG_IR_TTUSBIR=m
-CONFIG_RC_LOOPBACK=m
-CONFIG_IR_GPIO_CIR=m
-CONFIG_IR_GPIO_TX=m
-CONFIG_IR_PWM_TX=m
-CONFIG_IR_SERIAL=m
-CONFIG_IR_SERIAL_TRANSMITTER=y
-CONFIG_IR_SIR=m
-CONFIG_MEDIA_SUPPORT=m
-
-#
-# Multimedia core support
-#
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
-CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
-CONFIG_MEDIA_RADIO_SUPPORT=y
-CONFIG_MEDIA_SDR_SUPPORT=y
-CONFIG_MEDIA_CEC_SUPPORT=y
-CONFIG_MEDIA_CEC_RC=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_MEDIA_CONTROLLER_DVB=y
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_VIDEO_V4L2=m
-# CONFIG_VIDEO_ADV_DEBUG is not set
-# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
-CONFIG_VIDEO_PCI_SKELETON=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_V4L2_MEM2MEM_DEV=m
-CONFIG_V4L2_FLASH_LED_CLASS=m
-CONFIG_V4L2_FWNODE=m
-CONFIG_VIDEOBUF_GEN=m
-CONFIG_VIDEOBUF_DMA_SG=m
-CONFIG_VIDEOBUF_VMALLOC=m
-CONFIG_VIDEOBUF_DVB=m
-CONFIG_VIDEOBUF2_CORE=m
-CONFIG_VIDEOBUF2_MEMOPS=m
-CONFIG_VIDEOBUF2_DMA_CONTIG=m
-CONFIG_VIDEOBUF2_VMALLOC=m
-CONFIG_VIDEOBUF2_DMA_SG=m
-CONFIG_VIDEOBUF2_DVB=m
-CONFIG_DVB_CORE=m
-CONFIG_DVB_NET=y
-CONFIG_TTPCI_EEPROM=m
-CONFIG_DVB_MAX_ADAPTERS=8
-CONFIG_DVB_DYNAMIC_MINORS=y
-# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
-
-#
-# Media drivers
-#
-CONFIG_MEDIA_USB_SUPPORT=y
-
-#
-# Webcam devices
-#
-CONFIG_USB_VIDEO_CLASS=m
-CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
-CONFIG_USB_GSPCA=m
-CONFIG_USB_M5602=m
-CONFIG_USB_STV06XX=m
-CONFIG_USB_GL860=m
-CONFIG_USB_GSPCA_BENQ=m
-CONFIG_USB_GSPCA_CONEX=m
-CONFIG_USB_GSPCA_CPIA1=m
-CONFIG_USB_GSPCA_DTCS033=m
-CONFIG_USB_GSPCA_ETOMS=m
-CONFIG_USB_GSPCA_FINEPIX=m
-CONFIG_USB_GSPCA_JEILINJ=m
-CONFIG_USB_GSPCA_JL2005BCD=m
-CONFIG_USB_GSPCA_KINECT=m
-CONFIG_USB_GSPCA_KONICA=m
-CONFIG_USB_GSPCA_MARS=m
-CONFIG_USB_GSPCA_MR97310A=m
-CONFIG_USB_GSPCA_NW80X=m
-CONFIG_USB_GSPCA_OV519=m
-CONFIG_USB_GSPCA_OV534=m
-CONFIG_USB_GSPCA_OV534_9=m
-CONFIG_USB_GSPCA_PAC207=m
-CONFIG_USB_GSPCA_PAC7302=m
-CONFIG_USB_GSPCA_PAC7311=m
-CONFIG_USB_GSPCA_SE401=m
-CONFIG_USB_GSPCA_SN9C2028=m
-CONFIG_USB_GSPCA_SN9C20X=m
-CONFIG_USB_GSPCA_SONIXB=m
-CONFIG_USB_GSPCA_SONIXJ=m
-CONFIG_USB_GSPCA_SPCA500=m
-CONFIG_USB_GSPCA_SPCA501=m
-CONFIG_USB_GSPCA_SPCA505=m
-CONFIG_USB_GSPCA_SPCA506=m
-CONFIG_USB_GSPCA_SPCA508=m
-CONFIG_USB_GSPCA_SPCA561=m
-CONFIG_USB_GSPCA_SPCA1528=m
-CONFIG_USB_GSPCA_SQ905=m
-CONFIG_USB_GSPCA_SQ905C=m
-CONFIG_USB_GSPCA_SQ930X=m
-CONFIG_USB_GSPCA_STK014=m
-CONFIG_USB_GSPCA_STK1135=m
-CONFIG_USB_GSPCA_STV0680=m
-CONFIG_USB_GSPCA_SUNPLUS=m
-CONFIG_USB_GSPCA_T613=m
-CONFIG_USB_GSPCA_TOPRO=m
-CONFIG_USB_GSPCA_TOUPTEK=m
-CONFIG_USB_GSPCA_TV8532=m
-CONFIG_USB_GSPCA_VC032X=m
-CONFIG_USB_GSPCA_VICAM=m
-CONFIG_USB_GSPCA_XIRLINK_CIT=m
-CONFIG_USB_GSPCA_ZC3XX=m
-CONFIG_USB_PWC=m
-# CONFIG_USB_PWC_DEBUG is not set
-CONFIG_USB_PWC_INPUT_EVDEV=y
-CONFIG_VIDEO_CPIA2=m
-CONFIG_USB_ZR364XX=m
-CONFIG_USB_STKWEBCAM=m
-CONFIG_USB_S2255=m
-CONFIG_VIDEO_USBTV=m
-
-#
-# Analog TV USB devices
-#
-CONFIG_VIDEO_PVRUSB2=m
-CONFIG_VIDEO_PVRUSB2_SYSFS=y
-CONFIG_VIDEO_PVRUSB2_DVB=y
-# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
-CONFIG_VIDEO_HDPVR=m
-CONFIG_VIDEO_USBVISION=m
-CONFIG_VIDEO_STK1160_COMMON=m
-CONFIG_VIDEO_STK1160=m
-CONFIG_VIDEO_GO7007=m
-CONFIG_VIDEO_GO7007_USB=m
-CONFIG_VIDEO_GO7007_LOADER=m
-CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
-
-#
-# Analog/digital TV USB devices
-#
-CONFIG_VIDEO_AU0828=m
-CONFIG_VIDEO_AU0828_V4L2=y
-CONFIG_VIDEO_AU0828_RC=y
-CONFIG_VIDEO_CX231XX=m
-CONFIG_VIDEO_CX231XX_RC=y
-CONFIG_VIDEO_CX231XX_ALSA=m
-CONFIG_VIDEO_CX231XX_DVB=m
-CONFIG_VIDEO_TM6000=m
-CONFIG_VIDEO_TM6000_ALSA=m
-CONFIG_VIDEO_TM6000_DVB=m
-
-#
-# Digital TV USB devices
-#
-CONFIG_DVB_USB=m
-# CONFIG_DVB_USB_DEBUG is not set
-CONFIG_DVB_USB_DIB3000MC=m
-CONFIG_DVB_USB_A800=m
-CONFIG_DVB_USB_DIBUSB_MB=m
-CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
-CONFIG_DVB_USB_DIBUSB_MC=m
-CONFIG_DVB_USB_DIB0700=m
-CONFIG_DVB_USB_UMT_010=m
-CONFIG_DVB_USB_CXUSB=m
-CONFIG_DVB_USB_M920X=m
-CONFIG_DVB_USB_DIGITV=m
-CONFIG_DVB_USB_VP7045=m
-CONFIG_DVB_USB_VP702X=m
-CONFIG_DVB_USB_GP8PSK=m
-CONFIG_DVB_USB_NOVA_T_USB2=m
-CONFIG_DVB_USB_TTUSB2=m
-CONFIG_DVB_USB_DTT200U=m
-CONFIG_DVB_USB_OPERA1=m
-CONFIG_DVB_USB_AF9005=m
-CONFIG_DVB_USB_AF9005_REMOTE=m
-CONFIG_DVB_USB_PCTV452E=m
-CONFIG_DVB_USB_DW2102=m
-CONFIG_DVB_USB_CINERGY_T2=m
-CONFIG_DVB_USB_DTV5100=m
-CONFIG_DVB_USB_FRIIO=m
-CONFIG_DVB_USB_AZ6027=m
-CONFIG_DVB_USB_TECHNISAT_USB2=m
-CONFIG_DVB_USB_V2=m
-CONFIG_DVB_USB_AF9015=m
-CONFIG_DVB_USB_AF9035=m
-CONFIG_DVB_USB_ANYSEE=m
-CONFIG_DVB_USB_AU6610=m
-CONFIG_DVB_USB_AZ6007=m
-CONFIG_DVB_USB_CE6230=m
-CONFIG_DVB_USB_EC168=m
-CONFIG_DVB_USB_GL861=m
-CONFIG_DVB_USB_LME2510=m
-CONFIG_DVB_USB_MXL111SF=m
-CONFIG_DVB_USB_RTL28XXU=m
-CONFIG_DVB_USB_DVBSKY=m
-CONFIG_DVB_USB_ZD1301=m
-CONFIG_DVB_TTUSB_BUDGET=m
-CONFIG_DVB_TTUSB_DEC=m
-CONFIG_SMS_USB_DRV=m
-CONFIG_DVB_B2C2_FLEXCOP_USB=m
-# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
-CONFIG_DVB_AS102=m
-
-#
-# Webcam, TV (analog/digital) USB devices
-#
-CONFIG_VIDEO_EM28XX=m
-CONFIG_VIDEO_EM28XX_V4L2=m
-CONFIG_VIDEO_EM28XX_ALSA=m
-CONFIG_VIDEO_EM28XX_DVB=m
-CONFIG_VIDEO_EM28XX_RC=m
-
-#
-# Software defined radio USB devices
-#
-CONFIG_USB_AIRSPY=m
-CONFIG_USB_HACKRF=m
-CONFIG_USB_MSI2500=m
-
-#
-# USB HDMI CEC adapters
-#
-CONFIG_USB_PULSE8_CEC=m
-CONFIG_USB_RAINSHADOW_CEC=m
-CONFIG_MEDIA_PCI_SUPPORT=y
-
-#
-# Media capture support
-#
-CONFIG_VIDEO_MEYE=m
-CONFIG_VIDEO_SOLO6X10=m
-CONFIG_VIDEO_TW5864=m
-CONFIG_VIDEO_TW68=m
-CONFIG_VIDEO_TW686X=m
-CONFIG_VIDEO_ZORAN=m
-CONFIG_VIDEO_ZORAN_DC30=m
-CONFIG_VIDEO_ZORAN_ZR36060=m
-CONFIG_VIDEO_ZORAN_BUZ=m
-CONFIG_VIDEO_ZORAN_DC10=m
-CONFIG_VIDEO_ZORAN_LML33=m
-CONFIG_VIDEO_ZORAN_LML33R10=m
-CONFIG_VIDEO_ZORAN_AVS6EYES=m
-
-#
-# Media capture/analog TV support
-#
-CONFIG_VIDEO_IVTV=m
-# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
-CONFIG_VIDEO_IVTV_ALSA=m
-CONFIG_VIDEO_FB_IVTV=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DT3155=m
-
-#
-# Media capture/analog/hybrid TV support
-#
-CONFIG_VIDEO_CX18=m
-CONFIG_VIDEO_CX18_ALSA=m
-CONFIG_VIDEO_CX23885=m
-CONFIG_MEDIA_ALTERA_CI=m
-CONFIG_VIDEO_CX25821=m
-CONFIG_VIDEO_CX25821_ALSA=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_CX88_ALSA=m
-CONFIG_VIDEO_CX88_BLACKBIRD=m
-CONFIG_VIDEO_CX88_DVB=m
-CONFIG_VIDEO_CX88_ENABLE_VP3054=y
-CONFIG_VIDEO_CX88_VP3054=m
-CONFIG_VIDEO_CX88_MPEG=m
-CONFIG_VIDEO_BT848=m
-CONFIG_DVB_BT8XX=m
-CONFIG_VIDEO_SAA7134=m
-CONFIG_VIDEO_SAA7134_ALSA=m
-CONFIG_VIDEO_SAA7134_RC=y
-CONFIG_VIDEO_SAA7134_DVB=m
-CONFIG_VIDEO_SAA7134_GO7007=m
-CONFIG_VIDEO_SAA7164=m
-# CONFIG_VIDEO_COBALT is not set
-
-#
-# Media digital TV PCI Adapters
-#
-CONFIG_DVB_AV7110_IR=y
-CONFIG_DVB_AV7110=m
-CONFIG_DVB_AV7110_OSD=y
-CONFIG_DVB_BUDGET_CORE=m
-CONFIG_DVB_BUDGET=m
-CONFIG_DVB_BUDGET_CI=m
-CONFIG_DVB_BUDGET_AV=m
-CONFIG_DVB_BUDGET_PATCH=m
-CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
-CONFIG_DVB_PLUTO2=m
-CONFIG_DVB_DM1105=m
-CONFIG_DVB_PT1=m
-CONFIG_DVB_PT3=m
-CONFIG_MANTIS_CORE=m
-CONFIG_DVB_MANTIS=m
-CONFIG_DVB_HOPPER=m
-CONFIG_DVB_NGENE=m
-CONFIG_DVB_DDBRIDGE=m
-# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
-CONFIG_DVB_SMIPCIE=m
-CONFIG_DVB_NETUP_UNIDVB=m
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_VIDEO_CAFE_CCIC=m
-CONFIG_SOC_CAMERA=m
-CONFIG_SOC_CAMERA_PLATFORM=m
-CONFIG_V4L_MEM2MEM_DRIVERS=y
-CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
-CONFIG_VIDEO_SH_VEU=m
-# CONFIG_V4L_TEST_DRIVERS is not set
-CONFIG_DVB_PLATFORM_DRIVERS=y
-CONFIG_CEC_PLATFORM_DRIVERS=y
-CONFIG_SDR_PLATFORM_DRIVERS=y
-
-#
-# Supported MMC/SDIO adapters
-#
-CONFIG_SMS_SDIO_DRV=m
-CONFIG_RADIO_ADAPTERS=y
-CONFIG_RADIO_TEA575X=m
-CONFIG_RADIO_SI470X=y
-CONFIG_USB_SI470X=m
-CONFIG_I2C_SI470X=m
-CONFIG_RADIO_SI4713=m
-CONFIG_USB_SI4713=m
-CONFIG_PLATFORM_SI4713=m
-CONFIG_I2C_SI4713=m
-CONFIG_RADIO_SI476X=m
-CONFIG_USB_MR800=m
-CONFIG_USB_DSBR=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_SHARK=m
-CONFIG_RADIO_SHARK2=m
-CONFIG_USB_KEENE=m
-CONFIG_USB_RAREMONO=m
-CONFIG_USB_MA901=m
-CONFIG_RADIO_TEA5764=m
-CONFIG_RADIO_SAA7706H=m
-CONFIG_RADIO_TEF6862=m
-CONFIG_RADIO_WL1273=m
-
-#
-# Texas Instruments WL128x FM driver (ST based)
-#
-CONFIG_RADIO_WL128X=m
-
-#
-# Supported FireWire (IEEE 1394) Adapters
-#
-CONFIG_DVB_FIREDTV=m
-CONFIG_DVB_FIREDTV_INPUT=y
-CONFIG_MEDIA_COMMON_OPTIONS=y
-
-#
-# common driver options
-#
-CONFIG_VIDEO_CX2341X=m
-CONFIG_VIDEO_TVEEPROM=m
-CONFIG_CYPRESS_FIRMWARE=m
-CONFIG_DVB_B2C2_FLEXCOP=m
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_SMS_SIANO_MDTV=m
-CONFIG_SMS_SIANO_RC=y
-# CONFIG_SMS_SIANO_DEBUGFS is not set
-
-#
-# Media ancillary drivers (tuners, sensors, i2c, spi, frontends)
-#
-CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
-CONFIG_MEDIA_ATTACH=y
-CONFIG_VIDEO_IR_I2C=m
-
-#
-# Audio decoders, processors and mixers
-#
-CONFIG_VIDEO_TVAUDIO=m
-CONFIG_VIDEO_TDA7432=m
-CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TEA6415C=m
-CONFIG_VIDEO_TEA6420=m
-CONFIG_VIDEO_MSP3400=m
-CONFIG_VIDEO_CS3308=m
-CONFIG_VIDEO_CS5345=m
-CONFIG_VIDEO_CS53L32A=m
-CONFIG_VIDEO_UDA1342=m
-CONFIG_VIDEO_WM8775=m
-CONFIG_VIDEO_WM8739=m
-CONFIG_VIDEO_VP27SMPX=m
-CONFIG_VIDEO_SONY_BTF_MPX=m
-
-#
-# RDS decoders
-#
-CONFIG_VIDEO_SAA6588=m
-
-#
-# Video decoders
-#
-CONFIG_VIDEO_BT819=m
-CONFIG_VIDEO_BT856=m
-CONFIG_VIDEO_BT866=m
-CONFIG_VIDEO_KS0127=m
-CONFIG_VIDEO_SAA7110=m
-CONFIG_VIDEO_SAA711X=m
-CONFIG_VIDEO_TVP5150=m
-CONFIG_VIDEO_TW2804=m
-CONFIG_VIDEO_TW9903=m
-CONFIG_VIDEO_TW9906=m
-CONFIG_VIDEO_VPX3220=m
-
-#
-# Video and audio decoders
-#
-CONFIG_VIDEO_SAA717X=m
-CONFIG_VIDEO_CX25840=m
-
-#
-# Video encoders
-#
-CONFIG_VIDEO_SAA7127=m
-CONFIG_VIDEO_SAA7185=m
-CONFIG_VIDEO_ADV7170=m
-CONFIG_VIDEO_ADV7175=m
-
-#
-# Camera sensor devices
-#
-CONFIG_VIDEO_OV2640=m
-CONFIG_VIDEO_OV7640=m
-CONFIG_VIDEO_OV7670=m
-CONFIG_VIDEO_MT9M111=m
-CONFIG_VIDEO_MT9V011=m
-
-#
-# Flash devices
-#
-
-#
-# Video improvement chips
-#
-CONFIG_VIDEO_UPD64031A=m
-CONFIG_VIDEO_UPD64083=m
-
-#
-# Audio/Video compression chips
-#
-CONFIG_VIDEO_SAA6752HS=m
-
-#
-# SDR tuner chips
-#
-
-#
-# Miscellaneous helper chips
-#
-CONFIG_VIDEO_M52790=m
-
-#
-# Sensors used on soc_camera driver
-#
-
-#
-# soc_camera sensor drivers
-#
-CONFIG_SOC_CAMERA_IMX074=m
-CONFIG_SOC_CAMERA_MT9M001=m
-CONFIG_SOC_CAMERA_MT9M111=m
-CONFIG_SOC_CAMERA_MT9T031=m
-CONFIG_SOC_CAMERA_MT9T112=m
-CONFIG_SOC_CAMERA_MT9V022=m
-CONFIG_SOC_CAMERA_OV5642=m
-CONFIG_SOC_CAMERA_OV772X=m
-CONFIG_SOC_CAMERA_OV9640=m
-CONFIG_SOC_CAMERA_OV9740=m
-CONFIG_SOC_CAMERA_RJ54N1=m
-CONFIG_SOC_CAMERA_TW9910=m
-CONFIG_MEDIA_TUNER=m
-CONFIG_MEDIA_TUNER_SIMPLE=m
-CONFIG_MEDIA_TUNER_TDA8290=m
-CONFIG_MEDIA_TUNER_TDA827X=m
-CONFIG_MEDIA_TUNER_TDA18271=m
-CONFIG_MEDIA_TUNER_TDA9887=m
-CONFIG_MEDIA_TUNER_TEA5761=m
-CONFIG_MEDIA_TUNER_TEA5767=m
-CONFIG_MEDIA_TUNER_MSI001=m
-CONFIG_MEDIA_TUNER_MT20XX=m
-CONFIG_MEDIA_TUNER_MT2060=m
-CONFIG_MEDIA_TUNER_MT2063=m
-CONFIG_MEDIA_TUNER_MT2266=m
-CONFIG_MEDIA_TUNER_MT2131=m
-CONFIG_MEDIA_TUNER_QT1010=m
-CONFIG_MEDIA_TUNER_XC2028=m
-CONFIG_MEDIA_TUNER_XC5000=m
-CONFIG_MEDIA_TUNER_XC4000=m
-CONFIG_MEDIA_TUNER_MXL5005S=m
-CONFIG_MEDIA_TUNER_MXL5007T=m
-CONFIG_MEDIA_TUNER_MC44S803=m
-CONFIG_MEDIA_TUNER_MAX2165=m
-CONFIG_MEDIA_TUNER_TDA18218=m
-CONFIG_MEDIA_TUNER_FC0011=m
-CONFIG_MEDIA_TUNER_FC0012=m
-CONFIG_MEDIA_TUNER_FC0013=m
-CONFIG_MEDIA_TUNER_TDA18212=m
-CONFIG_MEDIA_TUNER_E4000=m
-CONFIG_MEDIA_TUNER_FC2580=m
-CONFIG_MEDIA_TUNER_M88RS6000T=m
-CONFIG_MEDIA_TUNER_TUA9001=m
-CONFIG_MEDIA_TUNER_SI2157=m
-CONFIG_MEDIA_TUNER_IT913X=m
-CONFIG_MEDIA_TUNER_R820T=m
-CONFIG_MEDIA_TUNER_MXL301RF=m
-CONFIG_MEDIA_TUNER_QM1D1C0042=m
-
-#
-# Multistandard (satellite) frontends
-#
-CONFIG_DVB_STB0899=m
-CONFIG_DVB_STB6100=m
-CONFIG_DVB_STV090x=m
-CONFIG_DVB_STV0910=m
-CONFIG_DVB_STV6110x=m
-CONFIG_DVB_STV6111=m
-CONFIG_DVB_MXL5XX=m
-CONFIG_DVB_M88DS3103=m
-
-#
-# Multistandard (cable + terrestrial) frontends
-#
-CONFIG_DVB_DRXK=m
-CONFIG_DVB_TDA18271C2DD=m
-CONFIG_DVB_SI2165=m
-CONFIG_DVB_MN88472=m
-CONFIG_DVB_MN88473=m
-
-#
-# DVB-S (satellite) frontends
-#
-CONFIG_DVB_CX24110=m
-CONFIG_DVB_CX24123=m
-CONFIG_DVB_MT312=m
-CONFIG_DVB_ZL10036=m
-CONFIG_DVB_ZL10039=m
-CONFIG_DVB_S5H1420=m
-CONFIG_DVB_STV0288=m
-CONFIG_DVB_STB6000=m
-CONFIG_DVB_STV0299=m
-CONFIG_DVB_STV6110=m
-CONFIG_DVB_STV0900=m
-CONFIG_DVB_TDA8083=m
-CONFIG_DVB_TDA10086=m
-CONFIG_DVB_TDA8261=m
-CONFIG_DVB_VES1X93=m
-CONFIG_DVB_TUNER_ITD1000=m
-CONFIG_DVB_TUNER_CX24113=m
-CONFIG_DVB_TDA826X=m
-CONFIG_DVB_TUA6100=m
-CONFIG_DVB_CX24116=m
-CONFIG_DVB_CX24117=m
-CONFIG_DVB_CX24120=m
-CONFIG_DVB_SI21XX=m
-CONFIG_DVB_TS2020=m
-CONFIG_DVB_DS3000=m
-CONFIG_DVB_MB86A16=m
-CONFIG_DVB_TDA10071=m
-
-#
-# DVB-T (terrestrial) frontends
-#
-CONFIG_DVB_SP8870=m
-CONFIG_DVB_SP887X=m
-CONFIG_DVB_CX22700=m
-CONFIG_DVB_CX22702=m
-CONFIG_DVB_DRXD=m
-CONFIG_DVB_L64781=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_NXT6000=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_ZL10353=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
-CONFIG_DVB_DIB7000M=m
-CONFIG_DVB_DIB7000P=m
-CONFIG_DVB_TDA10048=m
-CONFIG_DVB_AF9013=m
-CONFIG_DVB_EC100=m
-CONFIG_DVB_STV0367=m
-CONFIG_DVB_CXD2820R=m
-CONFIG_DVB_CXD2841ER=m
-CONFIG_DVB_RTL2830=m
-CONFIG_DVB_RTL2832=m
-CONFIG_DVB_RTL2832_SDR=m
-CONFIG_DVB_SI2168=m
-CONFIG_DVB_AS102_FE=m
-CONFIG_DVB_ZD1301_DEMOD=m
-CONFIG_DVB_GP8PSK_FE=m
-
-#
-# DVB-C (cable) frontends
-#
-CONFIG_DVB_VES1820=m
-CONFIG_DVB_TDA10021=m
-CONFIG_DVB_TDA10023=m
-CONFIG_DVB_STV0297=m
-
-#
-# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
-#
-CONFIG_DVB_NXT200X=m
-CONFIG_DVB_OR51211=m
-CONFIG_DVB_OR51132=m
-CONFIG_DVB_BCM3510=m
-CONFIG_DVB_LGDT330X=m
-CONFIG_DVB_LGDT3305=m
-CONFIG_DVB_LGDT3306A=m
-CONFIG_DVB_LG2160=m
-CONFIG_DVB_S5H1409=m
-CONFIG_DVB_AU8522=m
-CONFIG_DVB_AU8522_DTV=m
-CONFIG_DVB_AU8522_V4L=m
-CONFIG_DVB_S5H1411=m
-
-#
-# ISDB-T (terrestrial) frontends
-#
-CONFIG_DVB_S921=m
-CONFIG_DVB_DIB8000=m
-CONFIG_DVB_MB86A20S=m
-
-#
-# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
-#
-CONFIG_DVB_TC90522=m
-
-#
-# Digital terrestrial only tuners/PLL
-#
-CONFIG_DVB_PLL=m
-CONFIG_DVB_TUNER_DIB0070=m
-CONFIG_DVB_TUNER_DIB0090=m
-
-#
-# SEC control devices for DVB-S
-#
-CONFIG_DVB_DRX39XYJ=m
-CONFIG_DVB_LNBH25=m
-CONFIG_DVB_LNBP21=m
-CONFIG_DVB_LNBP22=m
-CONFIG_DVB_ISL6405=m
-CONFIG_DVB_ISL6421=m
-CONFIG_DVB_ISL6423=m
-CONFIG_DVB_A8293=m
-CONFIG_DVB_SP2=m
-CONFIG_DVB_LGS8GXX=m
-CONFIG_DVB_ATBM8830=m
-CONFIG_DVB_TDA665x=m
-CONFIG_DVB_IX2505V=m
-CONFIG_DVB_M88RS2000=m
-CONFIG_DVB_AF9033=m
-CONFIG_DVB_HORUS3A=m
-CONFIG_DVB_ASCOT2E=m
-CONFIG_DVB_HELENE=m
-
-#
-# Tools to develop new frontends
-#
-# CONFIG_DVB_DUMMY_FE is not set
-
-#
-# Graphics support
-#
-CONFIG_AGP=m
-CONFIG_AGP_AMD64=m
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_SIS=m
-CONFIG_AGP_VIA=m
-CONFIG_INTEL_GTT=m
-CONFIG_VGA_ARB=y
-CONFIG_VGA_ARB_MAX_GPUS=16
-CONFIG_VGA_SWITCHEROO=y
-CONFIG_DRM=m
-CONFIG_DRM_MIPI_DSI=y
-CONFIG_DRM_DP_AUX_CHARDEV=y
-# CONFIG_DRM_DEBUG_MM_SELFTEST is not set
-CONFIG_DRM_KMS_HELPER=m
-CONFIG_DRM_KMS_FB_HELPER=y
-CONFIG_DRM_FBDEV_EMULATION=y
-CONFIG_DRM_FBDEV_OVERALLOC=100
-# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
-CONFIG_DRM_TTM=m
-CONFIG_DRM_GEM_CMA_HELPER=y
-CONFIG_DRM_KMS_CMA_HELPER=y
-CONFIG_DRM_VM=y
-
-#
-# I2C encoder or helper chips
-#
-CONFIG_DRM_I2C_CH7006=m
-CONFIG_DRM_I2C_SIL164=m
-CONFIG_DRM_I2C_NXP_TDA998X=m
-CONFIG_DRM_RADEON=m
-# CONFIG_DRM_RADEON_USERPTR is not set
-CONFIG_DRM_AMDGPU=m
-CONFIG_DRM_AMDGPU_SI=y
-CONFIG_DRM_AMDGPU_CIK=y
-CONFIG_DRM_AMDGPU_USERPTR=y
-# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
-
-#
-# ACP (Audio CoProcessor) Configuration
-#
-CONFIG_DRM_AMD_ACP=y
-CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_DEBUG=5
-CONFIG_NOUVEAU_DEBUG_DEFAULT=3
-CONFIG_DRM_NOUVEAU_BACKLIGHT=y
-CONFIG_DRM_I915=m
-# CONFIG_DRM_I915_ALPHA_SUPPORT is not set
-CONFIG_DRM_I915_CAPTURE_ERROR=y
-CONFIG_DRM_I915_COMPRESS_ERROR=y
-CONFIG_DRM_I915_USERPTR=y
-CONFIG_DRM_I915_GVT=y
-CONFIG_DRM_I915_GVT_KVMGT=m
-CONFIG_DRM_VGEM=m
-CONFIG_DRM_VMWGFX=m
-CONFIG_DRM_VMWGFX_FBCON=y
-CONFIG_DRM_GMA500=m
-CONFIG_DRM_GMA600=y
-CONFIG_DRM_GMA3600=y
-CONFIG_DRM_UDL=m
-CONFIG_DRM_AST=m
-CONFIG_DRM_MGAG200=m
-CONFIG_DRM_CIRRUS_QEMU=m
-CONFIG_DRM_QXL=m
-CONFIG_DRM_BOCHS=m
-CONFIG_DRM_VIRTIO_GPU=m
-CONFIG_DRM_PANEL=y
-
-#
-# Display Panels
-#
-CONFIG_DRM_BRIDGE=y
-CONFIG_DRM_PANEL_BRIDGE=y
-
-#
-# Display Interface Bridges
-#
-CONFIG_DRM_ANALOGIX_ANX78XX=m
-CONFIG_HSA_AMD=m
-CONFIG_DRM_HISI_HIBMC=m
-CONFIG_DRM_TINYDRM=m
-CONFIG_TINYDRM_MIPI_DBI=m
-CONFIG_TINYDRM_MI0283QT=m
-CONFIG_TINYDRM_REPAPER=m
-CONFIG_TINYDRM_ST7586=m
-# CONFIG_DRM_LEGACY is not set
-# CONFIG_DRM_LIB_RANDOM is not set
-
-#
-# Frame buffer Devices
-#
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_CMDLINE=y
-CONFIG_FB_NOTIFY=y
-# CONFIG_FB_DDC is not set
-CONFIG_FB_BOOT_VESA_SUPPORT=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-CONFIG_FB_SYS_FILLRECT=m
-CONFIG_FB_SYS_COPYAREA=m
-CONFIG_FB_SYS_IMAGEBLIT=m
-# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set
-CONFIG_FB_FOREIGN_ENDIAN=y
-CONFIG_FB_BOTH_ENDIAN=y
-# CONFIG_FB_BIG_ENDIAN is not set
-# CONFIG_FB_LITTLE_ENDIAN is not set
-CONFIG_FB_SYS_FOPS=m
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-
-#
-# Frame buffer hardware drivers
-#
-# CONFIG_FB_CIRRUS is not set
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ARC is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-# CONFIG_FB_VGA16 is not set
-# CONFIG_FB_UVESA is not set
-CONFIG_FB_VESA=y
-CONFIG_FB_EFI=y
-# CONFIG_FB_N411 is not set
-# CONFIG_FB_HGA is not set
-# CONFIG_FB_OPENCORES is not set
-# CONFIG_FB_S1D13XXX is not set
-# CONFIG_FB_NVIDIA is not set
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_I740 is not set
-# CONFIG_FB_LE80578 is not set
-# CONFIG_FB_MATROX is not set
-# CONFIG_FB_RADEON is not set
-# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_ATY is not set
-# CONFIG_FB_S3 is not set
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_VIA is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_VT8623 is not set
-# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_ARK is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_CARMINE is not set
-# CONFIG_FB_SM501 is not set
-# CONFIG_FB_SMSCUFX is not set
-# CONFIG_FB_UDL is not set
-# CONFIG_FB_IBM_GXT4500 is not set
-# CONFIG_FB_VIRTUAL is not set
-# CONFIG_FB_METRONOME is not set
-# CONFIG_FB_MB862XX is not set
-# CONFIG_FB_BROADSHEET is not set
-# CONFIG_FB_AUO_K190X is not set
-# CONFIG_FB_HYPERV is not set
-CONFIG_FB_SIMPLE=y
-# CONFIG_FB_SM712 is not set
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=m
-CONFIG_LCD_L4F00242T03=m
-CONFIG_LCD_LMS283GF05=m
-CONFIG_LCD_LTV350QV=m
-CONFIG_LCD_ILI922X=m
-CONFIG_LCD_ILI9320=m
-CONFIG_LCD_TDO24M=m
-CONFIG_LCD_VGG2432A4=m
-CONFIG_LCD_PLATFORM=m
-CONFIG_LCD_S6E63M0=m
-CONFIG_LCD_LD9040=m
-CONFIG_LCD_AMS369FG06=m
-CONFIG_LCD_LMS501KF03=m
-CONFIG_LCD_HX8357=m
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_GENERIC=m
-CONFIG_BACKLIGHT_LM3533=m
-CONFIG_BACKLIGHT_PWM=m
-CONFIG_BACKLIGHT_DA9052=m
-CONFIG_BACKLIGHT_APPLE=m
-CONFIG_BACKLIGHT_PM8941_WLED=m
-CONFIG_BACKLIGHT_SAHARA=m
-CONFIG_BACKLIGHT_WM831X=m
-CONFIG_BACKLIGHT_ADP8860=m
-CONFIG_BACKLIGHT_ADP8870=m
-CONFIG_BACKLIGHT_PCF50633=m
-CONFIG_BACKLIGHT_LM3630A=m
-CONFIG_BACKLIGHT_LM3639=m
-CONFIG_BACKLIGHT_LP855X=m
-CONFIG_BACKLIGHT_SKY81452=m
-CONFIG_BACKLIGHT_TPS65217=m
-CONFIG_BACKLIGHT_GPIO=m
-CONFIG_BACKLIGHT_LV5207LP=m
-CONFIG_BACKLIGHT_BD6107=m
-CONFIG_BACKLIGHT_ARCXCNN=m
-# CONFIG_VGASTATE is not set
-CONFIG_HDMI=y
-
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
-CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
-# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_DUMMY_CONSOLE_COLUMNS=80
-CONFIG_DUMMY_CONSOLE_ROWS=25
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
-CONFIG_LOGO_LINUX_CLUT224=y
-CONFIG_SOUND=m
-CONFIG_SOUND_OSS_CORE=y
-CONFIG_SOUND_OSS_CORE_PRECLAIM=y
-CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_PCM_ELD=y
-CONFIG_SND_PCM_IEC958=y
-CONFIG_SND_DMAENGINE_PCM=m
-CONFIG_SND_HWDEP=m
-CONFIG_SND_SEQ_DEVICE=m
-CONFIG_SND_RAWMIDI=m
-CONFIG_SND_COMPRESS_OFFLOAD=m
-CONFIG_SND_JACK=y
-CONFIG_SND_JACK_INPUT_DEV=y
-CONFIG_SND_OSSEMUL=y
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_PCM_OSS_PLUGINS=y
-CONFIG_SND_PCM_TIMER=y
-CONFIG_SND_HRTIMER=m
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_MAX_CARDS=32
-CONFIG_SND_SUPPORT_OLD_API=y
-CONFIG_SND_PROC_FS=y
-CONFIG_SND_VERBOSE_PROCFS=y
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
-CONFIG_SND_VMASTER=y
-CONFIG_SND_DMA_SGBUF=y
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_SEQUENCER_OSS=m
-CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
-CONFIG_SND_SEQ_MIDI_EVENT=m
-CONFIG_SND_SEQ_MIDI=m
-CONFIG_SND_SEQ_MIDI_EMUL=m
-CONFIG_SND_SEQ_VIRMIDI=m
-CONFIG_SND_MPU401_UART=m
-CONFIG_SND_OPL3_LIB=m
-CONFIG_SND_OPL3_LIB_SEQ=m
-# CONFIG_SND_OPL4_LIB_SEQ is not set
-CONFIG_SND_VX_LIB=m
-CONFIG_SND_AC97_CODEC=m
-CONFIG_SND_DRIVERS=y
-# CONFIG_SND_PCSP is not set
-CONFIG_SND_DUMMY=m
-CONFIG_SND_ALOOP=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_MTPAV=m
-CONFIG_SND_MTS64=m
-CONFIG_SND_SERIAL_U16550=m
-CONFIG_SND_MPU401=m
-CONFIG_SND_PORTMAN2X4=m
-CONFIG_SND_AC97_POWER_SAVE=y
-CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
-CONFIG_SND_SB_COMMON=m
-CONFIG_SND_PCI=y
-CONFIG_SND_AD1889=m
-CONFIG_SND_ALS300=m
-CONFIG_SND_ALS4000=m
-CONFIG_SND_ALI5451=m
-CONFIG_SND_ASIHPI=m
-CONFIG_SND_ATIIXP=m
-CONFIG_SND_ATIIXP_MODEM=m
-CONFIG_SND_AU8810=m
-CONFIG_SND_AU8820=m
-CONFIG_SND_AU8830=m
-CONFIG_SND_AW2=m
-CONFIG_SND_AZT3328=m
-CONFIG_SND_BT87X=m
-CONFIG_SND_BT87X_OVERCLOCK=y
-CONFIG_SND_CA0106=m
-CONFIG_SND_CMIPCI=m
-CONFIG_SND_OXYGEN_LIB=m
-CONFIG_SND_OXYGEN=m
-CONFIG_SND_CS4281=m
-CONFIG_SND_CS46XX=m
-CONFIG_SND_CS46XX_NEW_DSP=y
-CONFIG_SND_CTXFI=m
-CONFIG_SND_DARLA20=m
-CONFIG_SND_GINA20=m
-CONFIG_SND_LAYLA20=m
-CONFIG_SND_DARLA24=m
-CONFIG_SND_GINA24=m
-CONFIG_SND_LAYLA24=m
-CONFIG_SND_MONA=m
-CONFIG_SND_MIA=m
-CONFIG_SND_ECHO3G=m
-CONFIG_SND_INDIGO=m
-CONFIG_SND_INDIGOIO=m
-CONFIG_SND_INDIGODJ=m
-CONFIG_SND_INDIGOIOX=m
-CONFIG_SND_INDIGODJX=m
-CONFIG_SND_EMU10K1=m
-CONFIG_SND_EMU10K1_SEQ=m
-CONFIG_SND_EMU10K1X=m
-CONFIG_SND_ENS1370=m
-CONFIG_SND_ENS1371=m
-CONFIG_SND_ES1938=m
-CONFIG_SND_ES1968=m
-CONFIG_SND_ES1968_INPUT=y
-CONFIG_SND_ES1968_RADIO=y
-CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X_BOOL=y
-CONFIG_SND_HDSP=m
-CONFIG_SND_HDSPM=m
-CONFIG_SND_ICE1712=m
-CONFIG_SND_ICE1724=m
-CONFIG_SND_INTEL8X0=m
-CONFIG_SND_INTEL8X0M=m
-CONFIG_SND_KORG1212=m
-CONFIG_SND_LOLA=m
-CONFIG_SND_LX6464ES=m
-CONFIG_SND_MAESTRO3=m
-CONFIG_SND_MAESTRO3_INPUT=y
-CONFIG_SND_MIXART=m
-CONFIG_SND_NM256=m
-CONFIG_SND_PCXHR=m
-CONFIG_SND_RIPTIDE=m
-CONFIG_SND_RME32=m
-CONFIG_SND_RME96=m
-CONFIG_SND_RME9652=m
-CONFIG_SND_SONICVIBES=m
-CONFIG_SND_TRIDENT=m
-CONFIG_SND_VIA82XX=m
-CONFIG_SND_VIA82XX_MODEM=m
-CONFIG_SND_VIRTUOSO=m
-CONFIG_SND_VX222=m
-CONFIG_SND_YMFPCI=m
-
-#
-# HD-Audio
-#
-CONFIG_SND_HDA=m
-CONFIG_SND_HDA_INTEL=m
-CONFIG_SND_HDA_HWDEP=y
-CONFIG_SND_HDA_RECONFIG=y
-CONFIG_SND_HDA_INPUT_BEEP=y
-CONFIG_SND_HDA_INPUT_BEEP_MODE=1
-CONFIG_SND_HDA_PATCH_LOADER=y
-CONFIG_SND_HDA_CODEC_REALTEK=m
-CONFIG_SND_HDA_CODEC_ANALOG=m
-CONFIG_SND_HDA_CODEC_SIGMATEL=m
-CONFIG_SND_HDA_CODEC_VIA=m
-CONFIG_SND_HDA_CODEC_HDMI=m
-CONFIG_SND_HDA_CODEC_CIRRUS=m
-CONFIG_SND_HDA_CODEC_CONEXANT=m
-CONFIG_SND_HDA_CODEC_CA0110=m
-CONFIG_SND_HDA_CODEC_CA0132=m
-CONFIG_SND_HDA_CODEC_CA0132_DSP=y
-CONFIG_SND_HDA_CODEC_CMEDIA=m
-CONFIG_SND_HDA_CODEC_SI3054=m
-CONFIG_SND_HDA_GENERIC=m
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
-CONFIG_SND_HDA_CORE=m
-CONFIG_SND_HDA_DSP_LOADER=y
-CONFIG_SND_HDA_I915=y
-CONFIG_SND_HDA_EXT_CORE=m
-CONFIG_SND_HDA_PREALLOC_SIZE=64
-CONFIG_SND_SPI=y
-CONFIG_SND_USB=y
-CONFIG_SND_USB_AUDIO=m
-CONFIG_SND_USB_UA101=m
-CONFIG_SND_USB_USX2Y=m
-CONFIG_SND_USB_CAIAQ=m
-CONFIG_SND_USB_CAIAQ_INPUT=y
-CONFIG_SND_USB_US122L=m
-CONFIG_SND_USB_6FIRE=m
-CONFIG_SND_USB_HIFACE=m
-CONFIG_SND_BCD2000=m
-CONFIG_SND_USB_LINE6=m
-CONFIG_SND_USB_POD=m
-CONFIG_SND_USB_PODHD=m
-CONFIG_SND_USB_TONEPORT=m
-CONFIG_SND_USB_VARIAX=m
-CONFIG_SND_FIREWIRE=y
-CONFIG_SND_FIREWIRE_LIB=m
-CONFIG_SND_DICE=m
-CONFIG_SND_OXFW=m
-CONFIG_SND_ISIGHT=m
-CONFIG_SND_FIREWORKS=m
-CONFIG_SND_BEBOB=m
-CONFIG_SND_FIREWIRE_DIGI00X=m
-CONFIG_SND_FIREWIRE_TASCAM=m
-CONFIG_SND_FIREWIRE_MOTU=m
-CONFIG_SND_FIREFACE=m
-CONFIG_SND_PCMCIA=y
-CONFIG_SND_VXPOCKET=m
-CONFIG_SND_PDAUDIOCF=m
-CONFIG_SND_SOC=m
-CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
-CONFIG_SND_SOC_COMPRESS=y
-CONFIG_SND_SOC_TOPOLOGY=y
-CONFIG_SND_SOC_AMD_ACP=m
-CONFIG_SND_ATMEL_SOC=m
-CONFIG_SND_DESIGNWARE_I2S=m
-CONFIG_SND_DESIGNWARE_PCM=y
-
-#
-# SoC Audio for Freescale CPUs
-#
-
-#
-# Common SoC Audio options for Freescale CPUs:
-#
-CONFIG_SND_SOC_FSL_ASRC=m
-CONFIG_SND_SOC_FSL_SAI=m
-CONFIG_SND_SOC_FSL_SSI=m
-CONFIG_SND_SOC_FSL_SPDIF=m
-CONFIG_SND_SOC_FSL_ESAI=m
-CONFIG_SND_SOC_IMX_AUDMUX=m
-CONFIG_SND_I2S_HI6210_I2S=m
-CONFIG_SND_SOC_IMG=y
-CONFIG_SND_SOC_IMG_I2S_IN=m
-CONFIG_SND_SOC_IMG_I2S_OUT=m
-CONFIG_SND_SOC_IMG_PARALLEL_OUT=m
-CONFIG_SND_SOC_IMG_SPDIF_IN=m
-CONFIG_SND_SOC_IMG_SPDIF_OUT=m
-CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
-CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
-CONFIG_SND_SST_IPC=m
-CONFIG_SND_SST_IPC_ACPI=m
-CONFIG_SND_SOC_INTEL_SST=m
-CONFIG_SND_SOC_INTEL_SST_FIRMWARE=m
-CONFIG_SND_SOC_INTEL_SST_ACPI=m
-CONFIG_SND_SOC_INTEL_SST_MATCH=m
-CONFIG_SND_SOC_INTEL_HASWELL=m
-CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
-CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m
-CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m
-CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
-CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
-CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
-CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
-CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
-CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
-CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
-CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m
-CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m
-CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH=m
-CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
-CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
-CONFIG_SND_SOC_INTEL_SKYLAKE=m
-CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m
-CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m
-CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
-
-#
-# STMicroelectronics STM32 SOC audio support
-#
-CONFIG_SND_SOC_XTFPGA_I2S=m
-CONFIG_ZX_TDM=m
-CONFIG_SND_SOC_I2C_AND_SPI=m
-
-#
-# CODEC drivers
-#
-# CONFIG_SND_SOC_AC97_CODEC is not set
-CONFIG_SND_SOC_ADAU_UTILS=m
-CONFIG_SND_SOC_ADAU1701=m
-CONFIG_SND_SOC_ADAU17X1=m
-CONFIG_SND_SOC_ADAU1761=m
-CONFIG_SND_SOC_ADAU1761_I2C=m
-CONFIG_SND_SOC_ADAU1761_SPI=m
-CONFIG_SND_SOC_ADAU7002=m
-CONFIG_SND_SOC_AK4104=m
-CONFIG_SND_SOC_AK4554=m
-CONFIG_SND_SOC_AK4613=m
-CONFIG_SND_SOC_AK4642=m
-CONFIG_SND_SOC_AK5386=m
-CONFIG_SND_SOC_ALC5623=m
-# CONFIG_SND_SOC_BT_SCO is not set
-CONFIG_SND_SOC_CS35L32=m
-CONFIG_SND_SOC_CS35L33=m
-CONFIG_SND_SOC_CS35L34=m
-CONFIG_SND_SOC_CS35L35=m
-CONFIG_SND_SOC_CS42L42=m
-CONFIG_SND_SOC_CS42L51=m
-CONFIG_SND_SOC_CS42L51_I2C=m
-CONFIG_SND_SOC_CS42L52=m
-CONFIG_SND_SOC_CS42L56=m
-CONFIG_SND_SOC_CS42L73=m
-CONFIG_SND_SOC_CS4265=m
-CONFIG_SND_SOC_CS4270=m
-CONFIG_SND_SOC_CS4271=m
-CONFIG_SND_SOC_CS4271_I2C=m
-CONFIG_SND_SOC_CS4271_SPI=m
-CONFIG_SND_SOC_CS42XX8=m
-CONFIG_SND_SOC_CS42XX8_I2C=m
-CONFIG_SND_SOC_CS43130=m
-CONFIG_SND_SOC_CS4349=m
-CONFIG_SND_SOC_CS53L30=m
-CONFIG_SND_SOC_DA7213=m
-CONFIG_SND_SOC_DA7219=m
-CONFIG_SND_SOC_DIO2125=m
-CONFIG_SND_SOC_DMIC=m
-CONFIG_SND_SOC_HDMI_CODEC=m
-CONFIG_SND_SOC_ES7134=m
-CONFIG_SND_SOC_ES8316=m
-CONFIG_SND_SOC_ES8328=m
-CONFIG_SND_SOC_ES8328_I2C=m
-CONFIG_SND_SOC_ES8328_SPI=m
-CONFIG_SND_SOC_GTM601=m
-CONFIG_SND_SOC_HDAC_HDMI=m
-CONFIG_SND_SOC_INNO_RK3036=m
-CONFIG_SND_SOC_MAX98090=m
-CONFIG_SND_SOC_MAX98357A=m
-CONFIG_SND_SOC_MAX98504=m
-CONFIG_SND_SOC_MAX98927=m
-CONFIG_SND_SOC_MAX9860=m
-CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
-CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
-CONFIG_SND_SOC_PCM1681=m
-CONFIG_SND_SOC_PCM179X=m
-CONFIG_SND_SOC_PCM179X_I2C=m
-CONFIG_SND_SOC_PCM179X_SPI=m
-CONFIG_SND_SOC_PCM3168A=m
-CONFIG_SND_SOC_PCM3168A_I2C=m
-CONFIG_SND_SOC_PCM3168A_SPI=m
-CONFIG_SND_SOC_PCM512x=m
-CONFIG_SND_SOC_PCM512x_I2C=m
-CONFIG_SND_SOC_PCM512x_SPI=m
-CONFIG_SND_SOC_RL6231=m
-CONFIG_SND_SOC_RL6347A=m
-CONFIG_SND_SOC_RT286=m
-CONFIG_SND_SOC_RT298=m
-CONFIG_SND_SOC_RT5514=m
-CONFIG_SND_SOC_RT5514_SPI=m
-CONFIG_SND_SOC_RT5616=m
-CONFIG_SND_SOC_RT5631=m
-CONFIG_SND_SOC_RT5640=m
-CONFIG_SND_SOC_RT5645=m
-CONFIG_SND_SOC_RT5651=m
-CONFIG_SND_SOC_RT5663=m
-CONFIG_SND_SOC_RT5670=m
-CONFIG_SND_SOC_RT5677=m
-CONFIG_SND_SOC_RT5677_SPI=m
-CONFIG_SND_SOC_SGTL5000=m
-CONFIG_SND_SOC_SI476X=m
-CONFIG_SND_SOC_SIGMADSP=m
-CONFIG_SND_SOC_SIGMADSP_I2C=m
-CONFIG_SND_SOC_SIGMADSP_REGMAP=m
-CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m
-CONFIG_SND_SOC_SPDIF=m
-CONFIG_SND_SOC_SSM2602=m
-CONFIG_SND_SOC_SSM2602_SPI=m
-CONFIG_SND_SOC_SSM2602_I2C=m
-CONFIG_SND_SOC_SSM4567=m
-CONFIG_SND_SOC_STA32X=m
-CONFIG_SND_SOC_STA350=m
-CONFIG_SND_SOC_STI_SAS=m
-CONFIG_SND_SOC_TAS2552=m
-CONFIG_SND_SOC_TAS5086=m
-CONFIG_SND_SOC_TAS571X=m
-CONFIG_SND_SOC_TAS5720=m
-CONFIG_SND_SOC_TFA9879=m
-CONFIG_SND_SOC_TLV320AIC23=m
-CONFIG_SND_SOC_TLV320AIC23_I2C=m
-CONFIG_SND_SOC_TLV320AIC23_SPI=m
-CONFIG_SND_SOC_TLV320AIC31XX=m
-CONFIG_SND_SOC_TLV320AIC3X=m
-CONFIG_SND_SOC_TS3A227E=m
-CONFIG_SND_SOC_WM8510=m
-CONFIG_SND_SOC_WM8523=m
-CONFIG_SND_SOC_WM8524=m
-CONFIG_SND_SOC_WM8580=m
-CONFIG_SND_SOC_WM8711=m
-CONFIG_SND_SOC_WM8728=m
-CONFIG_SND_SOC_WM8731=m
-CONFIG_SND_SOC_WM8737=m
-CONFIG_SND_SOC_WM8741=m
-CONFIG_SND_SOC_WM8750=m
-CONFIG_SND_SOC_WM8753=m
-CONFIG_SND_SOC_WM8770=m
-CONFIG_SND_SOC_WM8776=m
-CONFIG_SND_SOC_WM8804=m
-CONFIG_SND_SOC_WM8804_I2C=m
-CONFIG_SND_SOC_WM8804_SPI=m
-CONFIG_SND_SOC_WM8903=m
-CONFIG_SND_SOC_WM8960=m
-CONFIG_SND_SOC_WM8962=m
-CONFIG_SND_SOC_WM8974=m
-CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SOC_WM8985=m
-CONFIG_SND_SOC_ZX_AUD96P22=m
-CONFIG_SND_SOC_NAU8540=m
-CONFIG_SND_SOC_NAU8810=m
-CONFIG_SND_SOC_NAU8824=m
-CONFIG_SND_SOC_NAU8825=m
-CONFIG_SND_SOC_TPA6130A2=m
-CONFIG_SND_SIMPLE_CARD_UTILS=m
-CONFIG_SND_SIMPLE_CARD=m
-CONFIG_SND_X86=y
-CONFIG_HDMI_LPE_AUDIO=m
-CONFIG_SND_SYNTH_EMUX=m
-CONFIG_AC97_BUS=m
-
-#
-# HID support
-#
-CONFIG_HID=m
-CONFIG_HID_BATTERY_STRENGTH=y
-CONFIG_HIDRAW=y
-CONFIG_UHID=m
-CONFIG_HID_GENERIC=m
-
-#
-# Special HID drivers
-#
-CONFIG_HID_A4TECH=m
-CONFIG_HID_ACCUTOUCH=m
-CONFIG_HID_ACRUX=m
-CONFIG_HID_ACRUX_FF=y
-CONFIG_HID_APPLE=m
-CONFIG_HID_APPLEIR=m
-CONFIG_HID_ASUS=m
-CONFIG_HID_AUREAL=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_BETOP_FF=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_CORSAIR=m
-CONFIG_HID_PRODIKEYS=m
-CONFIG_HID_CMEDIA=m
-CONFIG_HID_CP2112=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_DRAGONRISE=m
-CONFIG_DRAGONRISE_FF=y
-CONFIG_HID_EMS_FF=m
-CONFIG_HID_ELECOM=m
-CONFIG_HID_ELO=m
-CONFIG_HID_EZKEY=m
-CONFIG_HID_GEMBIRD=m
-CONFIG_HID_GFRM=m
-CONFIG_HID_HOLTEK=m
-CONFIG_HOLTEK_FF=y
-CONFIG_HID_GT683R=m
-CONFIG_HID_KEYTOUCH=m
-CONFIG_HID_KYE=m
-CONFIG_HID_UCLOGIC=m
-CONFIG_HID_WALTOP=m
-CONFIG_HID_GYRATION=m
-CONFIG_HID_ICADE=m
-CONFIG_HID_ITE=m
-CONFIG_HID_TWINHAN=m
-CONFIG_HID_KENSINGTON=m
-CONFIG_HID_LCPOWER=m
-CONFIG_HID_LED=m
-CONFIG_HID_LENOVO=m
-CONFIG_HID_LOGITECH=m
-CONFIG_HID_LOGITECH_DJ=m
-CONFIG_HID_LOGITECH_HIDPP=m
-CONFIG_LOGITECH_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_LOGIG940_FF=y
-CONFIG_LOGIWHEELS_FF=y
-CONFIG_HID_MAGICMOUSE=m
-CONFIG_HID_MAYFLASH=m
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_MULTITOUCH=m
-CONFIG_HID_NTI=m
-CONFIG_HID_NTRIG=m
-CONFIG_HID_ORTEK=m
-CONFIG_HID_PANTHERLORD=m
-CONFIG_PANTHERLORD_FF=y
-CONFIG_HID_PENMOUNT=m
-CONFIG_HID_PETALYNX=m
-CONFIG_HID_PICOLCD=m
-CONFIG_HID_PICOLCD_FB=y
-CONFIG_HID_PICOLCD_BACKLIGHT=y
-CONFIG_HID_PICOLCD_LCD=y
-CONFIG_HID_PICOLCD_LEDS=y
-CONFIG_HID_PICOLCD_CIR=y
-CONFIG_HID_PLANTRONICS=m
-CONFIG_HID_PRIMAX=m
-CONFIG_HID_RETRODE=m
-CONFIG_HID_ROCCAT=m
-CONFIG_HID_SAITEK=m
-CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
-CONFIG_SONY_FF=y
-CONFIG_HID_SPEEDLINK=m
-CONFIG_HID_STEELSERIES=m
-CONFIG_HID_SUNPLUS=m
-CONFIG_HID_RMI=m
-CONFIG_HID_GREENASIA=m
-CONFIG_GREENASIA_FF=y
-CONFIG_HID_HYPERV_MOUSE=m
-CONFIG_HID_SMARTJOYPLUS=m
-CONFIG_SMARTJOYPLUS_FF=y
-CONFIG_HID_TIVO=m
-CONFIG_HID_TOPSEED=m
-CONFIG_HID_THINGM=m
-CONFIG_HID_THRUSTMASTER=m
-CONFIG_THRUSTMASTER_FF=y
-CONFIG_HID_UDRAW_PS3=m
-CONFIG_HID_WACOM=m
-CONFIG_HID_WIIMOTE=m
-CONFIG_HID_XINMO=m
-CONFIG_HID_ZEROPLUS=m
-CONFIG_ZEROPLUS_FF=y
-CONFIG_HID_ZYDACRON=m
-CONFIG_HID_SENSOR_HUB=m
-CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
-CONFIG_HID_ALPS=m
-
-#
-# USB HID support
-#
-CONFIG_USB_HID=m
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
-
-#
-# I2C HID support
-#
-CONFIG_I2C_HID=m
-
-#
-# Intel ISH HID support
-#
-CONFIG_INTEL_ISH_HID=m
-CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-CONFIG_USB_SUPPORT=y
-CONFIG_USB_COMMON=y
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB=m
-CONFIG_USB_PCI=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-
-#
-# Miscellaneous USB options
-#
-CONFIG_USB_DEFAULT_PERSIST=y
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_OTG=y
-# CONFIG_USB_OTG_WHITELIST is not set
-# CONFIG_USB_OTG_BLACKLIST_HUB is not set
-CONFIG_USB_OTG_FSM=m
-CONFIG_USB_LEDS_TRIGGER_USBPORT=m
-CONFIG_USB_MON=m
-CONFIG_USB_WUSB=m
-CONFIG_USB_WUSB_CBAF=m
-# CONFIG_USB_WUSB_CBAF_DEBUG is not set
-
-#
-# USB Host Controller Drivers
-#
-CONFIG_USB_C67X00_HCD=m
-CONFIG_USB_XHCI_HCD=m
-CONFIG_USB_XHCI_PCI=m
-CONFIG_USB_XHCI_PLATFORM=m
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_EHCI_TT_NEWSCHED=y
-CONFIG_USB_EHCI_PCI=m
-CONFIG_USB_EHCI_HCD_PLATFORM=m
-CONFIG_USB_OXU210HP_HCD=m
-CONFIG_USB_ISP116X_HCD=m
-CONFIG_USB_ISP1362_HCD=m
-CONFIG_USB_FOTG210_HCD=m
-CONFIG_USB_MAX3421_HCD=m
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_OHCI_HCD_PCI=m
-CONFIG_USB_OHCI_HCD_SSB=y
-CONFIG_USB_OHCI_HCD_PLATFORM=m
-CONFIG_USB_UHCI_HCD=m
-CONFIG_USB_U132_HCD=m
-CONFIG_USB_SL811_HCD=m
-# CONFIG_USB_SL811_HCD_ISO is not set
-CONFIG_USB_SL811_CS=m
-CONFIG_USB_R8A66597_HCD=m
-CONFIG_USB_WHCI_HCD=m
-CONFIG_USB_HWA_HCD=m
-CONFIG_USB_HCD_BCMA=m
-CONFIG_USB_HCD_SSB=m
-# CONFIG_USB_HCD_TEST_MODE is not set
-
-#
-# USB Device Class drivers
-#
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
-CONFIG_USB_WDM=m
-CONFIG_USB_TMC=m
-
-#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
-#
-
-#
-# also be needed; see USB_STORAGE Help for more info
-#
-CONFIG_USB_STORAGE=m
-# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_REALTEK=m
-CONFIG_REALTEK_AUTOPM=y
-CONFIG_USB_STORAGE_DATAFAB=m
-CONFIG_USB_STORAGE_FREECOM=m
-CONFIG_USB_STORAGE_ISD200=m
-CONFIG_USB_STORAGE_USBAT=m
-CONFIG_USB_STORAGE_SDDR09=m
-CONFIG_USB_STORAGE_SDDR55=m
-CONFIG_USB_STORAGE_JUMPSHOT=m
-CONFIG_USB_STORAGE_ALAUDA=m
-CONFIG_USB_STORAGE_ONETOUCH=m
-CONFIG_USB_STORAGE_KARMA=m
-CONFIG_USB_STORAGE_CYPRESS_ATACB=m
-CONFIG_USB_STORAGE_ENE_UB6250=m
-CONFIG_USB_UAS=m
-
-#
-# USB Imaging devices
-#
-CONFIG_USB_MDC800=m
-CONFIG_USB_MICROTEK=m
-CONFIG_USBIP_CORE=m
-CONFIG_USBIP_VHCI_HCD=m
-CONFIG_USBIP_VHCI_HC_PORTS=8
-CONFIG_USBIP_VHCI_NR_HCS=1
-CONFIG_USBIP_HOST=m
-CONFIG_USBIP_VUDC=m
-# CONFIG_USBIP_DEBUG is not set
-CONFIG_USB_MUSB_HDRC=m
-# CONFIG_USB_MUSB_HOST is not set
-# CONFIG_USB_MUSB_GADGET is not set
-CONFIG_USB_MUSB_DUAL_ROLE=y
-
-#
-# Platform Glue Layer
-#
-
-#
-# MUSB DMA mode
-#
-CONFIG_MUSB_PIO_ONLY=y
-CONFIG_USB_DWC3=m
-# CONFIG_USB_DWC3_ULPI is not set
-# CONFIG_USB_DWC3_HOST is not set
-# CONFIG_USB_DWC3_GADGET is not set
-CONFIG_USB_DWC3_DUAL_ROLE=y
-
-#
-# Platform Glue Driver Support
-#
-CONFIG_USB_DWC3_PCI=m
-CONFIG_USB_DWC2=m
-# CONFIG_USB_DWC2_HOST is not set
-
-#
-# Gadget/Dual-role mode requires USB Gadget support to be enabled
-#
-# CONFIG_USB_DWC2_PERIPHERAL is not set
-CONFIG_USB_DWC2_DUAL_ROLE=y
-CONFIG_USB_DWC2_PCI=m
-# CONFIG_USB_DWC2_DEBUG is not set
-# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
-CONFIG_USB_CHIPIDEA=m
-CONFIG_USB_CHIPIDEA_PCI=m
-CONFIG_USB_CHIPIDEA_UDC=y
-CONFIG_USB_CHIPIDEA_HOST=y
-# CONFIG_USB_CHIPIDEA_ULPI is not set
-CONFIG_USB_ISP1760=m
-CONFIG_USB_ISP1760_HCD=y
-CONFIG_USB_ISP1761_UDC=y
-# CONFIG_USB_ISP1760_HOST_ROLE is not set
-# CONFIG_USB_ISP1760_GADGET_ROLE is not set
-CONFIG_USB_ISP1760_DUAL_ROLE=y
-
-#
-# USB port drivers
-#
-CONFIG_USB_USS720=m
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_SIMPLE=m
-CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_ARK3116=m
-CONFIG_USB_SERIAL_BELKIN=m
-CONFIG_USB_SERIAL_CH341=m
-CONFIG_USB_SERIAL_WHITEHEAT=m
-CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP210X=m
-CONFIG_USB_SERIAL_CYPRESS_M8=m
-CONFIG_USB_SERIAL_EMPEG=m
-CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_VISOR=m
-CONFIG_USB_SERIAL_IPAQ=m
-CONFIG_USB_SERIAL_IR=m
-CONFIG_USB_SERIAL_EDGEPORT=m
-CONFIG_USB_SERIAL_EDGEPORT_TI=m
-CONFIG_USB_SERIAL_F81232=m
-CONFIG_USB_SERIAL_F8153X=m
-CONFIG_USB_SERIAL_GARMIN=m
-CONFIG_USB_SERIAL_IPW=m
-CONFIG_USB_SERIAL_IUU=m
-CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-CONFIG_USB_SERIAL_KEYSPAN=m
-CONFIG_USB_SERIAL_KLSI=m
-CONFIG_USB_SERIAL_KOBIL_SCT=m
-CONFIG_USB_SERIAL_MCT_U232=m
-CONFIG_USB_SERIAL_METRO=m
-CONFIG_USB_SERIAL_MOS7720=m
-CONFIG_USB_SERIAL_MOS7715_PARPORT=y
-CONFIG_USB_SERIAL_MOS7840=m
-CONFIG_USB_SERIAL_MXUPORT=m
-CONFIG_USB_SERIAL_NAVMAN=m
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_SERIAL_OTI6858=m
-CONFIG_USB_SERIAL_QCAUX=m
-CONFIG_USB_SERIAL_QUALCOMM=m
-CONFIG_USB_SERIAL_SPCP8X5=m
-CONFIG_USB_SERIAL_SAFE=m
-CONFIG_USB_SERIAL_SAFE_PADDED=y
-CONFIG_USB_SERIAL_SIERRAWIRELESS=m
-CONFIG_USB_SERIAL_SYMBOL=m
-CONFIG_USB_SERIAL_TI=m
-CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
-CONFIG_USB_SERIAL_WWAN=m
-CONFIG_USB_SERIAL_OPTION=m
-CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_SERIAL_OPTICON=m
-CONFIG_USB_SERIAL_XSENS_MT=m
-CONFIG_USB_SERIAL_WISHBONE=m
-CONFIG_USB_SERIAL_SSU100=m
-CONFIG_USB_SERIAL_QT2=m
-# CONFIG_USB_SERIAL_UPD78F0730 is not set
-# CONFIG_USB_SERIAL_DEBUG is not set
-
-#
-# USB Miscellaneous drivers
-#
-CONFIG_USB_EMI62=m
-CONFIG_USB_EMI26=m
-CONFIG_USB_ADUTUX=m
-CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
-CONFIG_USB_LEGOTOWER=m
-CONFIG_USB_LCD=m
-CONFIG_USB_CYPRESS_CY7C63=m
-CONFIG_USB_CYTHERM=m
-CONFIG_USB_IDMOUSE=m
-CONFIG_USB_FTDI_ELAN=m
-CONFIG_USB_APPLEDISPLAY=m
-CONFIG_USB_SISUSBVGA=m
-CONFIG_USB_SISUSBVGA_CON=y
-CONFIG_USB_LD=m
-CONFIG_USB_TRANCEVIBRATOR=m
-CONFIG_USB_IOWARRIOR=m
-CONFIG_USB_TEST=m
-CONFIG_USB_EHSET_TEST_FIXTURE=m
-CONFIG_USB_ISIGHTFW=m
-CONFIG_USB_YUREX=m
-CONFIG_USB_EZUSB_FX2=m
-CONFIG_USB_HUB_USB251XB=m
-CONFIG_USB_HSIC_USB3503=m
-CONFIG_USB_HSIC_USB4604=m
-CONFIG_USB_LINK_LAYER_TEST=m
-CONFIG_USB_CHAOSKEY=m
-CONFIG_USB_ATM=m
-CONFIG_USB_SPEEDTOUCH=m
-CONFIG_USB_CXACRU=m
-CONFIG_USB_UEAGLEATM=m
-CONFIG_USB_XUSBATM=m
-
-#
-# USB Physical Layer drivers
-#
-CONFIG_USB_PHY=y
-CONFIG_NOP_USB_XCEIV=m
-CONFIG_USB_GPIO_VBUS=m
-CONFIG_TAHVO_USB=m
-CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
-CONFIG_USB_ISP1301=m
-CONFIG_USB_GADGET=m
-# CONFIG_USB_GADGET_DEBUG is not set
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-CONFIG_USB_GADGET_VBUS_DRAW=2
-CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
-CONFIG_U_SERIAL_CONSOLE=y
-
-#
-# USB Peripheral Controller
-#
-CONFIG_USB_FOTG210_UDC=m
-CONFIG_USB_GR_UDC=m
-CONFIG_USB_R8A66597=m
-CONFIG_USB_PXA27X=m
-CONFIG_USB_MV_UDC=m
-CONFIG_USB_MV_U3D=m
-CONFIG_USB_SNP_CORE=m
-CONFIG_USB_M66592=m
-CONFIG_USB_BDC_UDC=m
-
-#
-# Platform Support
-#
-CONFIG_USB_BDC_PCI=m
-CONFIG_USB_AMD5536UDC=m
-CONFIG_USB_NET2272=m
-CONFIG_USB_NET2272_DMA=y
-CONFIG_USB_NET2280=m
-CONFIG_USB_GOKU=m
-CONFIG_USB_EG20T=m
-# CONFIG_USB_DUMMY_HCD is not set
-CONFIG_USB_LIBCOMPOSITE=m
-CONFIG_USB_F_ACM=m
-CONFIG_USB_F_SS_LB=m
-CONFIG_USB_U_SERIAL=m
-CONFIG_USB_U_ETHER=m
-CONFIG_USB_U_AUDIO=m
-CONFIG_USB_F_SERIAL=m
-CONFIG_USB_F_OBEX=m
-CONFIG_USB_F_NCM=m
-CONFIG_USB_F_ECM=m
-CONFIG_USB_F_PHONET=m
-CONFIG_USB_F_EEM=m
-CONFIG_USB_F_SUBSET=m
-CONFIG_USB_F_RNDIS=m
-CONFIG_USB_F_MASS_STORAGE=m
-CONFIG_USB_F_FS=m
-CONFIG_USB_F_UAC1=m
-CONFIG_USB_F_UAC2=m
-CONFIG_USB_F_UVC=m
-CONFIG_USB_F_MIDI=m
-CONFIG_USB_F_HID=m
-CONFIG_USB_F_PRINTER=m
-CONFIG_USB_F_TCM=m
-CONFIG_USB_CONFIGFS=m
-CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
-CONFIG_USB_CONFIGFS_OBEX=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
-CONFIG_USB_CONFIGFS_ECM_SUBSET=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_EEM=y
-CONFIG_USB_CONFIGFS_PHONET=y
-CONFIG_USB_CONFIGFS_MASS_STORAGE=y
-CONFIG_USB_CONFIGFS_F_LB_SS=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_UAC1=y
-# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set
-CONFIG_USB_CONFIGFS_F_UAC2=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_HID=y
-CONFIG_USB_CONFIGFS_F_UVC=y
-CONFIG_USB_CONFIGFS_F_PRINTER=y
-CONFIG_USB_CONFIGFS_F_TCM=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_ZERO_HNPTEST=y
-CONFIG_USB_AUDIO=m
-CONFIG_GADGET_UAC1=y
-# CONFIG_GADGET_UAC1_LEGACY is not set
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-CONFIG_USB_ETH_EEM=y
-CONFIG_USB_G_NCM=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FUNCTIONFS=m
-CONFIG_USB_FUNCTIONFS_ETH=y
-CONFIG_USB_FUNCTIONFS_RNDIS=y
-CONFIG_USB_FUNCTIONFS_GENERIC=y
-CONFIG_USB_MASS_STORAGE=m
-CONFIG_USB_GADGET_TARGET=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_MIDI_GADGET=m
-CONFIG_USB_G_PRINTER=m
-CONFIG_USB_CDC_COMPOSITE=m
-CONFIG_USB_G_NOKIA=m
-CONFIG_USB_G_ACM_MS=m
-CONFIG_USB_G_MULTI=m
-CONFIG_USB_G_MULTI_RNDIS=y
-CONFIG_USB_G_MULTI_CDC=y
-CONFIG_USB_G_HID=m
-# CONFIG_USB_G_DBGP is not set
-CONFIG_USB_G_WEBCAM=m
-
-#
-# USB Power Delivery and Type-C drivers
-#
-CONFIG_TYPEC=m
-CONFIG_TYPEC_UCSI=m
-CONFIG_UCSI_ACPI=m
-CONFIG_USB_LED_TRIG=y
-CONFIG_USB_ULPI_BUS=m
-CONFIG_UWB=m
-CONFIG_UWB_HWA=m
-CONFIG_UWB_WHCI=m
-CONFIG_UWB_I1480U=m
-CONFIG_MMC=m
-CONFIG_MMC_BLOCK=m
-CONFIG_MMC_BLOCK_MINORS=8
-CONFIG_SDIO_UART=m
-# CONFIG_MMC_TEST is not set
-
-#
-# MMC/SD/SDIO Host Controller Drivers
-#
-# CONFIG_MMC_DEBUG is not set
-CONFIG_MMC_SDHCI=m
-CONFIG_MMC_SDHCI_PCI=m
-CONFIG_MMC_RICOH_MMC=y
-CONFIG_MMC_SDHCI_ACPI=m
-CONFIG_MMC_SDHCI_PLTFM=m
-CONFIG_MMC_WBSD=m
-CONFIG_MMC_TIFM_SD=m
-CONFIG_MMC_SPI=m
-CONFIG_MMC_SDRICOH_CS=m
-CONFIG_MMC_CB710=m
-CONFIG_MMC_VIA_SDMMC=m
-CONFIG_MMC_VUB300=m
-CONFIG_MMC_USHC=m
-CONFIG_MMC_USDHI6ROL0=m
-CONFIG_MMC_REALTEK_PCI=m
-CONFIG_MMC_REALTEK_USB=m
-CONFIG_MMC_TOSHIBA_PCI=m
-CONFIG_MMC_MTK=m
-CONFIG_MMC_SDHCI_XENON=m
-CONFIG_MEMSTICK=m
-# CONFIG_MEMSTICK_DEBUG is not set
-
-#
-# MemoryStick drivers
-#
-# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
-CONFIG_MSPRO_BLOCK=m
-CONFIG_MS_BLOCK=m
-
-#
-# MemoryStick Host Controller Drivers
-#
-CONFIG_MEMSTICK_TIFM_MS=m
-CONFIG_MEMSTICK_JMICRON_38X=m
-CONFIG_MEMSTICK_R592=m
-CONFIG_MEMSTICK_REALTEK_PCI=m
-CONFIG_MEMSTICK_REALTEK_USB=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_CLASS_FLASH=m
-# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
-
-#
-# LED drivers
-#
-CONFIG_LEDS_AS3645A=m
-CONFIG_LEDS_LM3530=m
-CONFIG_LEDS_LM3533=m
-CONFIG_LEDS_LM3642=m
-CONFIG_LEDS_MT6323=m
-CONFIG_LEDS_PCA9532=m
-CONFIG_LEDS_PCA9532_GPIO=y
-CONFIG_LEDS_GPIO=m
-CONFIG_LEDS_LP3944=m
-CONFIG_LEDS_LP3952=m
-CONFIG_LEDS_LP55XX_COMMON=m
-CONFIG_LEDS_LP5521=m
-CONFIG_LEDS_LP5523=m
-CONFIG_LEDS_LP5562=m
-CONFIG_LEDS_LP8501=m
-CONFIG_LEDS_LP8860=m
-CONFIG_LEDS_CLEVO_MAIL=m
-CONFIG_LEDS_PCA955X=m
-# CONFIG_LEDS_PCA955X_GPIO is not set
-CONFIG_LEDS_PCA963X=m
-CONFIG_LEDS_WM831X_STATUS=m
-CONFIG_LEDS_DA9052=m
-CONFIG_LEDS_DAC124S085=m
-CONFIG_LEDS_PWM=m
-CONFIG_LEDS_REGULATOR=m
-CONFIG_LEDS_BD2802=m
-CONFIG_LEDS_INTEL_SS4200=m
-CONFIG_LEDS_LT3593=m
-CONFIG_LEDS_MC13783=m
-CONFIG_LEDS_TCA6507=m
-CONFIG_LEDS_TLC591XX=m
-CONFIG_LEDS_LM355x=m
-CONFIG_LEDS_MENF21BMC=m
-
-#
-# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
-#
-CONFIG_LEDS_BLINKM=m
-CONFIG_LEDS_MLXCPLD=m
-CONFIG_LEDS_USER=m
-CONFIG_LEDS_NIC78BX=m
-
-#
-# LED Triggers
-#
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_ONESHOT=m
-CONFIG_LEDS_TRIGGER_DISK=y
-# CONFIG_LEDS_TRIGGER_MTD is not set
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_CPU=y
-CONFIG_LEDS_TRIGGER_GPIO=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
-
-#
-# iptables trigger is under Netfilter config (LED target)
-#
-CONFIG_LEDS_TRIGGER_TRANSIENT=m
-CONFIG_LEDS_TRIGGER_CAMERA=m
-CONFIG_LEDS_TRIGGER_PANIC=y
-CONFIG_ACCESSIBILITY=y
-CONFIG_A11Y_BRAILLE_CONSOLE=y
-CONFIG_INFINIBAND=m
-CONFIG_INFINIBAND_USER_MAD=m
-CONFIG_INFINIBAND_USER_ACCESS=m
-# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set
-CONFIG_INFINIBAND_USER_MEM=y
-CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
-CONFIG_INFINIBAND_ADDR_TRANS=y
-CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
-CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_MTHCA_DEBUG=y
-CONFIG_INFINIBAND_QIB=m
-CONFIG_INFINIBAND_QIB_DCA=y
-CONFIG_INFINIBAND_CXGB3=m
-# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
-CONFIG_INFINIBAND_CXGB4=m
-CONFIG_INFINIBAND_I40IW=m
-CONFIG_MLX4_INFINIBAND=m
-CONFIG_MLX5_INFINIBAND=m
-CONFIG_INFINIBAND_NES=m
-# CONFIG_INFINIBAND_NES_DEBUG is not set
-CONFIG_INFINIBAND_OCRDMA=m
-CONFIG_INFINIBAND_VMWARE_PVRDMA=m
-CONFIG_INFINIBAND_USNIC=m
-CONFIG_INFINIBAND_IPOIB=m
-CONFIG_INFINIBAND_IPOIB_CM=y
-CONFIG_INFINIBAND_IPOIB_DEBUG=y
-# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
-CONFIG_INFINIBAND_SRP=m
-CONFIG_INFINIBAND_SRPT=m
-CONFIG_INFINIBAND_ISER=m
-CONFIG_INFINIBAND_ISERT=m
-CONFIG_INFINIBAND_OPA_VNIC=m
-CONFIG_INFINIBAND_RDMAVT=m
-CONFIG_RDMA_RXE=m
-CONFIG_INFINIBAND_HFI1=m
-# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
-# CONFIG_SDMA_VERBOSITY is not set
-CONFIG_INFINIBAND_QEDR=m
-CONFIG_INFINIBAND_BNXT_RE=m
-CONFIG_EDAC_ATOMIC_SCRUB=y
-CONFIG_EDAC_SUPPORT=y
-CONFIG_EDAC=y
-CONFIG_EDAC_LEGACY_SYSFS=y
-# CONFIG_EDAC_DEBUG is not set
-CONFIG_EDAC_DECODE_MCE=m
-CONFIG_EDAC_GHES=y
-CONFIG_EDAC_AMD64=m
-CONFIG_EDAC_AMD64_ERROR_INJECTION=y
-CONFIG_EDAC_E752X=m
-CONFIG_EDAC_I82975X=m
-CONFIG_EDAC_I3000=m
-CONFIG_EDAC_I3200=m
-CONFIG_EDAC_IE31200=m
-CONFIG_EDAC_X38=m
-CONFIG_EDAC_I5400=m
-CONFIG_EDAC_I7CORE=m
-CONFIG_EDAC_I5000=m
-CONFIG_EDAC_I5100=m
-CONFIG_EDAC_I7300=m
-CONFIG_EDAC_SBRIDGE=m
-CONFIG_EDAC_SKX=m
-CONFIG_EDAC_PND2=m
-CONFIG_RTC_LIB=y
-CONFIG_RTC_MC146818_LIB=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS=y
-CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
-CONFIG_RTC_SYSTOHC=y
-CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
-# CONFIG_RTC_DEBUG is not set
-CONFIG_RTC_NVMEM=y
-
-#
-# RTC interfaces
-#
-CONFIG_RTC_INTF_SYSFS=y
-CONFIG_RTC_INTF_PROC=y
-CONFIG_RTC_INTF_DEV=y
-CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-# CONFIG_RTC_DRV_TEST is not set
-
-#
-# I2C RTC drivers
-#
-CONFIG_RTC_DRV_88PM80X=m
-CONFIG_RTC_DRV_ABB5ZES3=m
-CONFIG_RTC_DRV_ABX80X=m
-CONFIG_RTC_DRV_DS1307=m
-CONFIG_RTC_DRV_DS1307_HWMON=y
-# CONFIG_RTC_DRV_DS1307_CENTURY is not set
-CONFIG_RTC_DRV_DS1374=m
-CONFIG_RTC_DRV_DS1374_WDT=y
-CONFIG_RTC_DRV_DS1672=m
-CONFIG_RTC_DRV_MAX6900=m
-CONFIG_RTC_DRV_MAX8907=m
-CONFIG_RTC_DRV_RS5C372=m
-CONFIG_RTC_DRV_ISL1208=m
-CONFIG_RTC_DRV_ISL12022=m
-CONFIG_RTC_DRV_X1205=m
-CONFIG_RTC_DRV_PCF8523=m
-CONFIG_RTC_DRV_PCF85063=m
-CONFIG_RTC_DRV_PCF8563=m
-CONFIG_RTC_DRV_PCF8583=m
-CONFIG_RTC_DRV_M41T80=m
-CONFIG_RTC_DRV_M41T80_WDT=y
-CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_S35390A=m
-CONFIG_RTC_DRV_FM3130=m
-CONFIG_RTC_DRV_RX8010=m
-CONFIG_RTC_DRV_RX8581=m
-CONFIG_RTC_DRV_RX8025=m
-CONFIG_RTC_DRV_EM3027=m
-CONFIG_RTC_DRV_RV8803=m
-
-#
-# SPI RTC drivers
-#
-CONFIG_RTC_DRV_M41T93=m
-CONFIG_RTC_DRV_M41T94=m
-CONFIG_RTC_DRV_DS1302=m
-CONFIG_RTC_DRV_DS1305=m
-CONFIG_RTC_DRV_DS1343=m
-CONFIG_RTC_DRV_DS1347=m
-CONFIG_RTC_DRV_DS1390=m
-CONFIG_RTC_DRV_MAX6916=m
-CONFIG_RTC_DRV_R9701=m
-CONFIG_RTC_DRV_RX4581=m
-CONFIG_RTC_DRV_RX6110=m
-CONFIG_RTC_DRV_RS5C348=m
-CONFIG_RTC_DRV_MAX6902=m
-CONFIG_RTC_DRV_PCF2123=m
-CONFIG_RTC_DRV_MCP795=m
-CONFIG_RTC_I2C_AND_SPI=m
-
-#
-# SPI and I2C RTC drivers
-#
-CONFIG_RTC_DRV_DS3232=m
-CONFIG_RTC_DRV_DS3232_HWMON=y
-CONFIG_RTC_DRV_PCF2127=m
-CONFIG_RTC_DRV_RV3029C2=m
-CONFIG_RTC_DRV_RV3029_HWMON=y
-
-#
-# Platform RTC drivers
-#
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_RTC_DRV_DS1286=m
-CONFIG_RTC_DRV_DS1511=m
-CONFIG_RTC_DRV_DS1553=m
-CONFIG_RTC_DRV_DS1685_FAMILY=m
-CONFIG_RTC_DRV_DS1685=y
-# CONFIG_RTC_DRV_DS1689 is not set
-# CONFIG_RTC_DRV_DS17285 is not set
-# CONFIG_RTC_DRV_DS17485 is not set
-# CONFIG_RTC_DRV_DS17885 is not set
-CONFIG_RTC_DS1685_PROC_REGS=y
-CONFIG_RTC_DS1685_SYSFS_REGS=y
-CONFIG_RTC_DRV_DS1742=m
-CONFIG_RTC_DRV_DS2404=m
-CONFIG_RTC_DRV_DA9052=m
-CONFIG_RTC_DRV_DA9063=m
-CONFIG_RTC_DRV_STK17TA8=m
-CONFIG_RTC_DRV_M48T86=m
-CONFIG_RTC_DRV_M48T35=m
-CONFIG_RTC_DRV_M48T59=m
-CONFIG_RTC_DRV_MSM6242=m
-CONFIG_RTC_DRV_BQ4802=m
-CONFIG_RTC_DRV_RP5C01=m
-CONFIG_RTC_DRV_V3020=m
-CONFIG_RTC_DRV_WM831X=m
-CONFIG_RTC_DRV_PCF50633=m
-
-#
-# on-CPU RTC drivers
-#
-CONFIG_RTC_DRV_FTRTC010=m
-CONFIG_RTC_DRV_PCAP=m
-CONFIG_RTC_DRV_MC13XXX=m
-CONFIG_RTC_DRV_MT6397=m
-
-#
-# HID Sensor RTC drivers
-#
-CONFIG_RTC_DRV_HID_SENSOR_TIME=m
-CONFIG_DMADEVICES=y
-# CONFIG_DMADEVICES_DEBUG is not set
-
-#
-# DMA Devices
-#
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_VIRTUAL_CHANNELS=y
-CONFIG_DMA_ACPI=y
-CONFIG_ALTERA_MSGDMA=m
-CONFIG_INTEL_IDMA64=m
-CONFIG_INTEL_IOATDMA=m
-CONFIG_INTEL_MIC_X100_DMA=m
-CONFIG_QCOM_HIDMA_MGMT=m
-CONFIG_QCOM_HIDMA=m
-CONFIG_DW_DMAC_CORE=y
-CONFIG_DW_DMAC=m
-CONFIG_DW_DMAC_PCI=y
-CONFIG_HSU_DMA=y
-
-#
-# DMA Clients
-#
-CONFIG_ASYNC_TX_DMA=y
-# CONFIG_DMATEST is not set
-CONFIG_DMA_ENGINE_RAID=y
-
-#
-# DMABUF options
-#
-CONFIG_SYNC_FILE=y
-# CONFIG_SW_SYNC is not set
-CONFIG_DCA=m
-CONFIG_AUXDISPLAY=y
-CONFIG_HD44780=m
-CONFIG_KS0108=m
-CONFIG_KS0108_PORT=0x378
-CONFIG_KS0108_DELAY=2
-CONFIG_CFAG12864B=m
-CONFIG_CFAG12864B_RATE=20
-CONFIG_IMG_ASCII_LCD=m
-CONFIG_PANEL=m
-CONFIG_PANEL_PARPORT=0
-CONFIG_PANEL_PROFILE=5
-# CONFIG_PANEL_CHANGE_MESSAGE is not set
-CONFIG_CHARLCD=m
-CONFIG_UIO=m
-CONFIG_UIO_CIF=m
-CONFIG_UIO_PDRV_GENIRQ=m
-CONFIG_UIO_DMEM_GENIRQ=m
-CONFIG_UIO_AEC=m
-CONFIG_UIO_SERCOS3=m
-CONFIG_UIO_PCI_GENERIC=m
-CONFIG_UIO_NETX=m
-CONFIG_UIO_PRUSS=m
-CONFIG_UIO_MF624=m
-CONFIG_UIO_HV_GENERIC=m
-CONFIG_VFIO_IOMMU_TYPE1=m
-CONFIG_VFIO_VIRQFD=m
-CONFIG_VFIO=m
-# CONFIG_VFIO_NOIOMMU is not set
-CONFIG_VFIO_PCI=m
-CONFIG_VFIO_PCI_VGA=y
-CONFIG_VFIO_PCI_MMAP=y
-CONFIG_VFIO_PCI_INTX=y
-CONFIG_VFIO_PCI_IGD=y
-CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
-CONFIG_IRQ_BYPASS_MANAGER=m
-CONFIG_VIRT_DRIVERS=y
-CONFIG_VIRTIO=m
-
-#
-# Virtio drivers
-#
-CONFIG_VIRTIO_PCI=m
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTIO_BALLOON=m
-CONFIG_VIRTIO_INPUT=m
-CONFIG_VIRTIO_MMIO=m
-CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-
-#
-# Microsoft Hyper-V guest support
-#
-CONFIG_HYPERV=m
-CONFIG_HYPERV_TSCPAGE=y
-CONFIG_HYPERV_UTILS=m
-CONFIG_HYPERV_BALLOON=m
-CONFIG_STAGING=y
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-CONFIG_IRDA_ULTRA=y
-
-#
-# IrDA options
-#
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-CONFIG_IRTTY_SIR=m
-
-#
-# Dongle support
-#
-CONFIG_DONGLE=y
-CONFIG_ESI_DONGLE=m
-CONFIG_ACTISYS_DONGLE=m
-CONFIG_TEKRAM_DONGLE=m
-CONFIG_TOIM3232_DONGLE=m
-CONFIG_LITELINK_DONGLE=m
-CONFIG_MA600_DONGLE=m
-CONFIG_GIRBIL_DONGLE=m
-CONFIG_MCP2120_DONGLE=m
-CONFIG_OLD_BELKIN_DONGLE=m
-CONFIG_ACT200L_DONGLE=m
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-
-#
-# FIR device drivers
-#
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_NSC_FIR=m
-CONFIG_WINBOND_FIR=m
-CONFIG_SMC_IRCC_FIR=m
-CONFIG_ALI_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_VIA_FIR=m
-CONFIG_MCS_FIR=m
-CONFIG_PRISM2_USB=m
-CONFIG_COMEDI=m
-# CONFIG_COMEDI_DEBUG is not set
-CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
-CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
-CONFIG_COMEDI_MISC_DRIVERS=y
-CONFIG_COMEDI_BOND=m
-CONFIG_COMEDI_TEST=m
-CONFIG_COMEDI_PARPORT=m
-CONFIG_COMEDI_SERIAL2002=m
-CONFIG_COMEDI_ISA_DRIVERS=y
-CONFIG_COMEDI_PCL711=m
-CONFIG_COMEDI_PCL724=m
-CONFIG_COMEDI_PCL726=m
-CONFIG_COMEDI_PCL730=m
-CONFIG_COMEDI_PCL812=m
-CONFIG_COMEDI_PCL816=m
-CONFIG_COMEDI_PCL818=m
-CONFIG_COMEDI_PCM3724=m
-CONFIG_COMEDI_AMPLC_DIO200_ISA=m
-CONFIG_COMEDI_AMPLC_PC236_ISA=m
-CONFIG_COMEDI_AMPLC_PC263_ISA=m
-CONFIG_COMEDI_RTI800=m
-CONFIG_COMEDI_RTI802=m
-CONFIG_COMEDI_DAC02=m
-CONFIG_COMEDI_DAS16M1=m
-CONFIG_COMEDI_DAS08_ISA=m
-CONFIG_COMEDI_DAS16=m
-CONFIG_COMEDI_DAS800=m
-CONFIG_COMEDI_DAS1800=m
-CONFIG_COMEDI_DAS6402=m
-CONFIG_COMEDI_DT2801=m
-CONFIG_COMEDI_DT2811=m
-CONFIG_COMEDI_DT2814=m
-CONFIG_COMEDI_DT2815=m
-CONFIG_COMEDI_DT2817=m
-CONFIG_COMEDI_DT282X=m
-CONFIG_COMEDI_DMM32AT=m
-CONFIG_COMEDI_FL512=m
-CONFIG_COMEDI_AIO_AIO12_8=m
-CONFIG_COMEDI_AIO_IIRO_16=m
-CONFIG_COMEDI_II_PCI20KC=m
-CONFIG_COMEDI_C6XDIGIO=m
-CONFIG_COMEDI_MPC624=m
-CONFIG_COMEDI_ADQ12B=m
-CONFIG_COMEDI_NI_AT_A2150=m
-CONFIG_COMEDI_NI_AT_AO=m
-CONFIG_COMEDI_NI_ATMIO=m
-CONFIG_COMEDI_NI_ATMIO16D=m
-CONFIG_COMEDI_NI_LABPC_ISA=m
-CONFIG_COMEDI_PCMAD=m
-CONFIG_COMEDI_PCMDA12=m
-CONFIG_COMEDI_PCMMIO=m
-CONFIG_COMEDI_PCMUIO=m
-CONFIG_COMEDI_MULTIQ3=m
-CONFIG_COMEDI_S526=m
-CONFIG_COMEDI_PCI_DRIVERS=m
-CONFIG_COMEDI_8255_PCI=m
-CONFIG_COMEDI_ADDI_WATCHDOG=m
-CONFIG_COMEDI_ADDI_APCI_1032=m
-CONFIG_COMEDI_ADDI_APCI_1500=m
-CONFIG_COMEDI_ADDI_APCI_1516=m
-CONFIG_COMEDI_ADDI_APCI_1564=m
-CONFIG_COMEDI_ADDI_APCI_16XX=m
-CONFIG_COMEDI_ADDI_APCI_2032=m
-CONFIG_COMEDI_ADDI_APCI_2200=m
-CONFIG_COMEDI_ADDI_APCI_3120=m
-CONFIG_COMEDI_ADDI_APCI_3501=m
-CONFIG_COMEDI_ADDI_APCI_3XXX=m
-CONFIG_COMEDI_ADL_PCI6208=m
-CONFIG_COMEDI_ADL_PCI7X3X=m
-CONFIG_COMEDI_ADL_PCI8164=m
-CONFIG_COMEDI_ADL_PCI9111=m
-CONFIG_COMEDI_ADL_PCI9118=m
-CONFIG_COMEDI_ADV_PCI1710=m
-CONFIG_COMEDI_ADV_PCI1720=m
-CONFIG_COMEDI_ADV_PCI1723=m
-CONFIG_COMEDI_ADV_PCI1724=m
-CONFIG_COMEDI_ADV_PCI1760=m
-CONFIG_COMEDI_ADV_PCI_DIO=m
-CONFIG_COMEDI_AMPLC_DIO200_PCI=m
-CONFIG_COMEDI_AMPLC_PC236_PCI=m
-CONFIG_COMEDI_AMPLC_PC263_PCI=m
-CONFIG_COMEDI_AMPLC_PCI224=m
-CONFIG_COMEDI_AMPLC_PCI230=m
-CONFIG_COMEDI_CONTEC_PCI_DIO=m
-CONFIG_COMEDI_DAS08_PCI=m
-CONFIG_COMEDI_DT3000=m
-CONFIG_COMEDI_DYNA_PCI10XX=m
-CONFIG_COMEDI_GSC_HPDI=m
-CONFIG_COMEDI_MF6X4=m
-CONFIG_COMEDI_ICP_MULTI=m
-CONFIG_COMEDI_DAQBOARD2000=m
-CONFIG_COMEDI_JR3_PCI=m
-CONFIG_COMEDI_KE_COUNTER=m
-CONFIG_COMEDI_CB_PCIDAS64=m
-CONFIG_COMEDI_CB_PCIDAS=m
-CONFIG_COMEDI_CB_PCIDDA=m
-CONFIG_COMEDI_CB_PCIMDAS=m
-CONFIG_COMEDI_CB_PCIMDDA=m
-CONFIG_COMEDI_ME4000=m
-CONFIG_COMEDI_ME_DAQ=m
-CONFIG_COMEDI_NI_6527=m
-CONFIG_COMEDI_NI_65XX=m
-CONFIG_COMEDI_NI_660X=m
-CONFIG_COMEDI_NI_670X=m
-CONFIG_COMEDI_NI_LABPC_PCI=m
-CONFIG_COMEDI_NI_PCIDIO=m
-CONFIG_COMEDI_NI_PCIMIO=m
-CONFIG_COMEDI_RTD520=m
-CONFIG_COMEDI_S626=m
-CONFIG_COMEDI_MITE=m
-CONFIG_COMEDI_NI_TIOCMD=m
-CONFIG_COMEDI_PCMCIA_DRIVERS=m
-CONFIG_COMEDI_CB_DAS16_CS=m
-CONFIG_COMEDI_DAS08_CS=m
-CONFIG_COMEDI_NI_DAQ_700_CS=m
-CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
-CONFIG_COMEDI_NI_LABPC_CS=m
-CONFIG_COMEDI_NI_MIO_CS=m
-CONFIG_COMEDI_QUATECH_DAQP_CS=m
-CONFIG_COMEDI_USB_DRIVERS=m
-CONFIG_COMEDI_DT9812=m
-CONFIG_COMEDI_NI_USB6501=m
-CONFIG_COMEDI_USBDUX=m
-CONFIG_COMEDI_USBDUXFAST=m
-CONFIG_COMEDI_USBDUXSIGMA=m
-CONFIG_COMEDI_VMK80XX=m
-CONFIG_COMEDI_8254=m
-CONFIG_COMEDI_8255=m
-CONFIG_COMEDI_8255_SA=m
-CONFIG_COMEDI_KCOMEDILIB=m
-CONFIG_COMEDI_AMPLC_DIO200=m
-CONFIG_COMEDI_AMPLC_PC236=m
-CONFIG_COMEDI_DAS08=m
-CONFIG_COMEDI_ISADMA=m
-CONFIG_COMEDI_NI_LABPC=m
-CONFIG_COMEDI_NI_LABPC_ISADMA=m
-CONFIG_COMEDI_NI_TIO=m
-CONFIG_RTL8192U=m
-CONFIG_RTLLIB=m
-CONFIG_RTLLIB_CRYPTO_CCMP=m
-CONFIG_RTLLIB_CRYPTO_TKIP=m
-CONFIG_RTLLIB_CRYPTO_WEP=m
-CONFIG_RTL8192E=m
-CONFIG_RTL8723BS=m
-CONFIG_R8712U=m
-CONFIG_R8188EU=m
-CONFIG_88EU_AP_MODE=y
-CONFIG_R8822BE=m
-CONFIG_RTLHALMAC_ST=m
-CONFIG_RTLPHYDM_ST=m
-CONFIG_RTLWIFI_DEBUG_ST=y
-CONFIG_RTS5208=m
-CONFIG_VT6655=m
-CONFIG_VT6656=m
-
-#
-# IIO staging drivers
-#
-
-#
-# Accelerometers
-#
-CONFIG_ADIS16201=m
-CONFIG_ADIS16203=m
-CONFIG_ADIS16209=m
-CONFIG_ADIS16240=m
-
-#
-# Analog to digital converters
-#
-CONFIG_AD7606=m
-CONFIG_AD7606_IFACE_PARALLEL=m
-CONFIG_AD7606_IFACE_SPI=m
-CONFIG_AD7780=m
-CONFIG_AD7816=m
-CONFIG_AD7192=m
-CONFIG_AD7280=m
-
-#
-# Analog digital bi-direction converters
-#
-CONFIG_ADT7316=m
-CONFIG_ADT7316_SPI=m
-CONFIG_ADT7316_I2C=m
-
-#
-# Capacitance to digital converters
-#
-CONFIG_AD7150=m
-CONFIG_AD7152=m
-CONFIG_AD7746=m
-
-#
-# Direct Digital Synthesis
-#
-CONFIG_AD9832=m
-CONFIG_AD9834=m
-
-#
-# Digital gyroscope sensors
-#
-CONFIG_ADIS16060=m
-
-#
-# Network Analyzer, Impedance Converters
-#
-CONFIG_AD5933=m
-
-#
-# Light sensors
-#
-CONFIG_TSL2x7x=m
-
-#
-# Active energy metering IC
-#
-CONFIG_ADE7753=m
-CONFIG_ADE7754=m
-CONFIG_ADE7758=m
-CONFIG_ADE7759=m
-CONFIG_ADE7854=m
-CONFIG_ADE7854_I2C=m
-CONFIG_ADE7854_SPI=m
-
-#
-# Resolver to digital converters
-#
-CONFIG_AD2S90=m
-CONFIG_AD2S1200=m
-CONFIG_AD2S1210=m
-
-#
-# Triggers - standalone
-#
-CONFIG_FB_SM750=m
-CONFIG_FB_XGI=m
-
-#
-# Speakup console speech
-#
-CONFIG_SPEAKUP=m
-CONFIG_SPEAKUP_SYNTH_ACNTSA=m
-CONFIG_SPEAKUP_SYNTH_APOLLO=m
-CONFIG_SPEAKUP_SYNTH_AUDPTR=m
-CONFIG_SPEAKUP_SYNTH_BNS=m
-CONFIG_SPEAKUP_SYNTH_DECTLK=m
-CONFIG_SPEAKUP_SYNTH_DECEXT=m
-CONFIG_SPEAKUP_SYNTH_LTLK=m
-CONFIG_SPEAKUP_SYNTH_SOFT=m
-CONFIG_SPEAKUP_SYNTH_SPKOUT=m
-CONFIG_SPEAKUP_SYNTH_TXPRT=m
-# CONFIG_SPEAKUP_SYNTH_DUMMY is not set
-CONFIG_STAGING_MEDIA=y
-CONFIG_INTEL_ATOMISP=y
-CONFIG_VIDEO_ATOMISP=m
-CONFIG_VIDEO_OV5693=m
-CONFIG_VIDEO_IMX=m
-CONFIG_VIDEO_OV2722=m
-CONFIG_VIDEO_GC2235=m
-CONFIG_VIDEO_OV8858=m
-CONFIG_VIDEO_MSRLIST_HELPER=m
-CONFIG_VIDEO_MT9M114=m
-CONFIG_VIDEO_AP1302=m
-CONFIG_VIDEO_GC0310=m
-CONFIG_VIDEO_OV2680=m
-CONFIG_VIDEO_LM3554=m
-CONFIG_I2C_BCM2048=m
-CONFIG_DVB_CXD2099=m
-CONFIG_LIRC_STAGING=y
-CONFIG_LIRC_ZILOG=m
-
-#
-# Android
-#
-CONFIG_LTE_GDM724X=m
-CONFIG_FIREWIRE_SERIAL=m
-CONFIG_FWTTY_MAX_TOTAL_PORTS=64
-CONFIG_FWTTY_MAX_CARD_PORTS=32
-CONFIG_MTD_SPINAND_MT29F=m
-CONFIG_MTD_SPINAND_ONDIEECC=y
-CONFIG_LNET=m
-CONFIG_LNET_MAX_PAYLOAD=1048576
-# CONFIG_LNET_SELFTEST is not set
-CONFIG_LNET_XPRT_IB=m
-CONFIG_LUSTRE_FS=m
-# CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK is not set
-CONFIG_DGNC=m
-CONFIG_GS_FPGABOOT=m
-CONFIG_CRYPTO_SKEIN=m
-CONFIG_UNISYSSPAR=y
-# CONFIG_UNISYS_VISORBUS is not set
-CONFIG_FB_TFT=m
-CONFIG_FB_TFT_AGM1264K_FL=m
-CONFIG_FB_TFT_BD663474=m
-CONFIG_FB_TFT_HX8340BN=m
-CONFIG_FB_TFT_HX8347D=m
-CONFIG_FB_TFT_HX8353D=m
-# CONFIG_FB_TFT_HX8357D is not set
-# CONFIG_FB_TFT_ILI9163 is not set
-CONFIG_FB_TFT_ILI9320=m
-CONFIG_FB_TFT_ILI9325=m
-CONFIG_FB_TFT_ILI9340=m
-CONFIG_FB_TFT_ILI9341=m
-CONFIG_FB_TFT_ILI9481=m
-CONFIG_FB_TFT_ILI9486=m
-CONFIG_FB_TFT_PCD8544=m
-CONFIG_FB_TFT_RA8875=m
-CONFIG_FB_TFT_S6D02A1=m
-CONFIG_FB_TFT_S6D1121=m
-CONFIG_FB_TFT_SH1106=m
-CONFIG_FB_TFT_SSD1289=m
-CONFIG_FB_TFT_SSD1305=m
-CONFIG_FB_TFT_SSD1306=m
-CONFIG_FB_TFT_SSD1325=m
-CONFIG_FB_TFT_SSD1331=m
-CONFIG_FB_TFT_SSD1351=m
-CONFIG_FB_TFT_ST7735R=m
-CONFIG_FB_TFT_ST7789V=m
-CONFIG_FB_TFT_TINYLCD=m
-CONFIG_FB_TFT_TLS8204=m
-CONFIG_FB_TFT_UC1611=m
-CONFIG_FB_TFT_UC1701=m
-CONFIG_FB_TFT_UPD161704=m
-CONFIG_FB_TFT_WATTEROTT=m
-CONFIG_FB_FLEX=m
-CONFIG_FB_TFT_FBTFT_DEVICE=m
-CONFIG_WILC1000=m
-CONFIG_WILC1000_SDIO=m
-CONFIG_WILC1000_SPI=m
-# CONFIG_WILC1000_HW_OOB_INTR is not set
-CONFIG_MOST=m
-CONFIG_MOSTCORE=m
-CONFIG_AIM_CDEV=m
-CONFIG_AIM_NETWORK=m
-CONFIG_AIM_SOUND=m
-CONFIG_AIM_V4L2=m
-CONFIG_HDM_DIM2=m
-CONFIG_HDM_I2C=m
-CONFIG_HDM_USB=m
-CONFIG_KS7010=m
-CONFIG_GREYBUS=m
-CONFIG_GREYBUS_ES2=m
-CONFIG_GREYBUS_AUDIO=m
-CONFIG_GREYBUS_BOOTROM=m
-CONFIG_GREYBUS_FIRMWARE=m
-CONFIG_GREYBUS_HID=m
-CONFIG_GREYBUS_LIGHT=m
-CONFIG_GREYBUS_LOG=m
-CONFIG_GREYBUS_LOOPBACK=m
-CONFIG_GREYBUS_POWER=m
-CONFIG_GREYBUS_RAW=m
-CONFIG_GREYBUS_VIBRATOR=m
-CONFIG_GREYBUS_BRIDGED_PHY=m
-CONFIG_GREYBUS_GPIO=m
-CONFIG_GREYBUS_I2C=m
-CONFIG_GREYBUS_PWM=m
-CONFIG_GREYBUS_SDIO=m
-CONFIG_GREYBUS_SPI=m
-CONFIG_GREYBUS_UART=m
-CONFIG_GREYBUS_USB=m
-
-#
-# USB Power Delivery and Type-C drivers
-#
-CONFIG_TYPEC_TCPM=m
-CONFIG_TYPEC_TCPCI=m
-CONFIG_TYPEC_FUSB302=m
-CONFIG_DRM_VBOXVIDEO=m
-CONFIG_PI433=m
-CONFIG_X86_PLATFORM_DEVICES=y
-CONFIG_ACER_WMI=m
-CONFIG_ACERHDF=m
-CONFIG_ALIENWARE_WMI=m
-CONFIG_ASUS_LAPTOP=m
-CONFIG_DELL_SMBIOS=m
-CONFIG_DELL_LAPTOP=m
-CONFIG_DELL_WMI=m
-CONFIG_DELL_WMI_AIO=m
-CONFIG_DELL_WMI_LED=m
-CONFIG_DELL_SMO8800=m
-CONFIG_DELL_RBTN=m
-CONFIG_FUJITSU_LAPTOP=m
-CONFIG_FUJITSU_TABLET=m
-CONFIG_AMILO_RFKILL=m
-CONFIG_HP_ACCEL=m
-CONFIG_HP_WIRELESS=m
-CONFIG_HP_WMI=m
-CONFIG_MSI_LAPTOP=m
-CONFIG_PANASONIC_LAPTOP=m
-CONFIG_COMPAL_LAPTOP=m
-CONFIG_SONY_LAPTOP=m
-CONFIG_SONYPI_COMPAT=y
-CONFIG_IDEAPAD_LAPTOP=m
-CONFIG_SURFACE3_WMI=m
-CONFIG_THINKPAD_ACPI=m
-CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
-# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
-# CONFIG_THINKPAD_ACPI_DEBUG is not set
-# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
-CONFIG_THINKPAD_ACPI_VIDEO=y
-CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
-CONFIG_SENSORS_HDAPS=m
-CONFIG_INTEL_MENLOW=m
-CONFIG_EEEPC_LAPTOP=m
-CONFIG_ASUS_WMI=m
-CONFIG_ASUS_NB_WMI=m
-CONFIG_EEEPC_WMI=m
-CONFIG_ASUS_WIRELESS=m
-CONFIG_ACPI_WMI=m
-CONFIG_WMI_BMOF=m
-CONFIG_MSI_WMI=m
-CONFIG_PEAQ_WMI=m
-CONFIG_TOPSTAR_LAPTOP=m
-CONFIG_ACPI_TOSHIBA=m
-CONFIG_TOSHIBA_BT_RFKILL=m
-CONFIG_TOSHIBA_HAPS=m
-CONFIG_TOSHIBA_WMI=m
-CONFIG_ACPI_CMPC=m
-CONFIG_INTEL_CHT_INT33FE=m
-CONFIG_INTEL_INT0002_VGPIO=m
-CONFIG_INTEL_HID_EVENT=m
-CONFIG_INTEL_VBTN=m
-CONFIG_INTEL_IPS=m
-CONFIG_INTEL_PMC_CORE=y
-CONFIG_IBM_RTL=m
-CONFIG_SAMSUNG_LAPTOP=m
-CONFIG_MXM_WMI=m
-CONFIG_INTEL_OAKTRAIL=m
-CONFIG_SAMSUNG_Q10=m
-CONFIG_APPLE_GMUX=m
-CONFIG_INTEL_RST=m
-CONFIG_INTEL_SMARTCONNECT=m
-CONFIG_PVPANIC=m
-CONFIG_INTEL_PMC_IPC=m
-CONFIG_INTEL_BXTWC_PMIC_TMU=m
-CONFIG_SURFACE_PRO3_BUTTON=m
-CONFIG_SURFACE_3_BUTTON=m
-CONFIG_INTEL_PUNIT_IPC=m
-CONFIG_INTEL_TELEMETRY=m
-CONFIG_MLX_PLATFORM=m
-CONFIG_MLX_CPLD_PLATFORM=m
-# CONFIG_INTEL_TURBO_MAX_3 is not set
-CONFIG_PMC_ATOM=y
-CONFIG_CHROME_PLATFORMS=y
-CONFIG_CHROMEOS_LAPTOP=m
-CONFIG_CHROMEOS_PSTORE=m
-CONFIG_CROS_EC_CHARDEV=m
-CONFIG_CROS_EC_LPC=m
-CONFIG_CROS_EC_LPC_MEC=y
-CONFIG_CROS_EC_PROTO=y
-CONFIG_CROS_KBD_LED_BACKLIGHT=m
-CONFIG_CLKDEV_LOOKUP=y
-CONFIG_HAVE_CLK_PREPARE=y
-CONFIG_COMMON_CLK=y
-
-#
-# Common Clock Framework
-#
-CONFIG_COMMON_CLK_WM831X=m
-CONFIG_COMMON_CLK_SI5351=m
-CONFIG_COMMON_CLK_CDCE706=m
-CONFIG_COMMON_CLK_CS2000_CP=m
-# CONFIG_COMMON_CLK_NXP is not set
-CONFIG_COMMON_CLK_PWM=m
-# CONFIG_COMMON_CLK_PXA is not set
-# CONFIG_COMMON_CLK_PIC32 is not set
-CONFIG_HWSPINLOCK=m
-
-#
-# Clock Source drivers
-#
-CONFIG_CLKEVT_I8253=y
-CONFIG_I8253_LOCK=y
-CONFIG_CLKBLD_I8253=y
-# CONFIG_ATMEL_PIT is not set
-# CONFIG_SH_TIMER_CMT is not set
-# CONFIG_SH_TIMER_MTU2 is not set
-# CONFIG_SH_TIMER_TMU is not set
-# CONFIG_EM_TIMER_STI is not set
-CONFIG_MAILBOX=y
-CONFIG_PCC=y
-CONFIG_ALTERA_MBOX=m
-CONFIG_IOMMU_API=y
-CONFIG_IOMMU_SUPPORT=y
-
-#
-# Generic IOMMU Pagetable Support
-#
-CONFIG_IOMMU_IOVA=y
-CONFIG_AMD_IOMMU=y
-CONFIG_AMD_IOMMU_V2=m
-CONFIG_DMAR_TABLE=y
-CONFIG_INTEL_IOMMU=y
-CONFIG_INTEL_IOMMU_SVM=y
-# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
-CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-CONFIG_IRQ_REMAP=y
-
-#
-# Remoteproc drivers
-#
-CONFIG_REMOTEPROC=m
-
-#
-# Rpmsg drivers
-#
-CONFIG_RPMSG=m
-CONFIG_RPMSG_CHAR=m
-CONFIG_RPMSG_QCOM_GLINK_NATIVE=m
-CONFIG_RPMSG_QCOM_GLINK_RPM=m
-
-#
-# SOC (System On Chip) specific Drivers
-#
-
-#
-# Amlogic SoC drivers
-#
-
-#
-# Broadcom SoC drivers
-#
-
-#
-# i.MX SoC drivers
-#
-
-#
-# Qualcomm SoC drivers
-#
-# CONFIG_SUNXI_SRAM is not set
-CONFIG_SOC_TI=y
-CONFIG_PM_DEVFREQ=y
-
-#
-# DEVFREQ Governors
-#
-CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
-CONFIG_DEVFREQ_GOV_PERFORMANCE=y
-CONFIG_DEVFREQ_GOV_POWERSAVE=y
-CONFIG_DEVFREQ_GOV_USERSPACE=y
-CONFIG_DEVFREQ_GOV_PASSIVE=m
-
-#
-# DEVFREQ Drivers
-#
-CONFIG_PM_DEVFREQ_EVENT=y
-CONFIG_EXTCON=y
-
-#
-# Extcon Device Drivers
-#
-CONFIG_EXTCON_ADC_JACK=m
-CONFIG_EXTCON_ARIZONA=m
-CONFIG_EXTCON_AXP288=m
-CONFIG_EXTCON_GPIO=m
-# CONFIG_EXTCON_INTEL_INT3496 is not set
-CONFIG_EXTCON_MAX14577=m
-CONFIG_EXTCON_MAX3355=m
-CONFIG_EXTCON_MAX77693=m
-CONFIG_EXTCON_RT8973A=m
-CONFIG_EXTCON_SM5502=m
-# CONFIG_EXTCON_USB_GPIO is not set
-CONFIG_EXTCON_USBC_CROS_EC=m
-CONFIG_MEMORY=y
-CONFIG_IIO=m
-CONFIG_IIO_BUFFER=y
-CONFIG_IIO_BUFFER_CB=m
-CONFIG_IIO_KFIFO_BUF=m
-CONFIG_IIO_TRIGGERED_BUFFER=m
-CONFIG_IIO_CONFIGFS=m
-CONFIG_IIO_TRIGGER=y
-CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
-CONFIG_IIO_SW_DEVICE=m
-CONFIG_IIO_SW_TRIGGER=m
-CONFIG_IIO_TRIGGERED_EVENT=m
-
-#
-# Accelerometers
-#
-CONFIG_BMA180=m
-CONFIG_BMA220=m
-CONFIG_BMC150_ACCEL=m
-CONFIG_BMC150_ACCEL_I2C=m
-CONFIG_BMC150_ACCEL_SPI=m
-CONFIG_DA280=m
-CONFIG_DA311=m
-CONFIG_DMARD09=m
-CONFIG_DMARD10=m
-CONFIG_HID_SENSOR_ACCEL_3D=m
-CONFIG_IIO_ST_ACCEL_3AXIS=m
-CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
-CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
-CONFIG_KXSD9=m
-CONFIG_KXSD9_SPI=m
-CONFIG_KXSD9_I2C=m
-CONFIG_KXCJK1013=m
-CONFIG_MC3230=m
-CONFIG_MMA7455=m
-CONFIG_MMA7455_I2C=m
-CONFIG_MMA7455_SPI=m
-CONFIG_MMA7660=m
-CONFIG_MMA8452=m
-CONFIG_MMA9551_CORE=m
-CONFIG_MMA9551=m
-CONFIG_MMA9553=m
-CONFIG_MXC4005=m
-CONFIG_MXC6255=m
-CONFIG_SCA3000=m
-CONFIG_STK8312=m
-CONFIG_STK8BA50=m
-
-#
-# Analog to digital converters
-#
-CONFIG_AD_SIGMA_DELTA=m
-CONFIG_AD7266=m
-CONFIG_AD7291=m
-CONFIG_AD7298=m
-CONFIG_AD7476=m
-CONFIG_AD7766=m
-CONFIG_AD7791=m
-CONFIG_AD7793=m
-CONFIG_AD7887=m
-CONFIG_AD7923=m
-CONFIG_AD799X=m
-CONFIG_AXP20X_ADC=m
-CONFIG_AXP288_ADC=m
-CONFIG_CC10001_ADC=m
-CONFIG_DA9150_GPADC=m
-CONFIG_DLN2_ADC=m
-CONFIG_HI8435=m
-CONFIG_HX711=m
-CONFIG_INA2XX_ADC=m
-CONFIG_LTC2471=m
-CONFIG_LTC2485=m
-CONFIG_LTC2497=m
-CONFIG_MAX1027=m
-CONFIG_MAX11100=m
-CONFIG_MAX1118=m
-CONFIG_MAX1363=m
-CONFIG_MAX9611=m
-CONFIG_MCP320X=m
-CONFIG_MCP3422=m
-CONFIG_MEN_Z188_ADC=m
-CONFIG_NAU7802=m
-CONFIG_QCOM_VADC_COMMON=m
-CONFIG_QCOM_SPMI_IADC=m
-CONFIG_QCOM_SPMI_VADC=m
-CONFIG_TI_ADC081C=m
-CONFIG_TI_ADC0832=m
-CONFIG_TI_ADC084S021=m
-CONFIG_TI_ADC12138=m
-CONFIG_TI_ADC108S102=m
-CONFIG_TI_ADC128S052=m
-CONFIG_TI_ADC161S626=m
-CONFIG_TI_ADS1015=m
-CONFIG_TI_ADS7950=m
-CONFIG_TI_AM335X_ADC=m
-CONFIG_TI_TLC4541=m
-CONFIG_VIPERBOARD_ADC=m
-
-#
-# Amplifiers
-#
-CONFIG_AD8366=m
-
-#
-# Chemical Sensors
-#
-CONFIG_ATLAS_PH_SENSOR=m
-CONFIG_CCS811=m
-CONFIG_IAQCORE=m
-CONFIG_VZ89X=m
-CONFIG_IIO_CROS_EC_SENSORS_CORE=m
-CONFIG_IIO_CROS_EC_SENSORS=m
-
-#
-# Hid Sensor IIO Common
-#
-CONFIG_HID_SENSOR_IIO_COMMON=m
-CONFIG_HID_SENSOR_IIO_TRIGGER=m
-CONFIG_IIO_MS_SENSORS_I2C=m
-
-#
-# SSP Sensor Common
-#
-CONFIG_IIO_SSP_SENSORS_COMMONS=m
-CONFIG_IIO_SSP_SENSORHUB=m
-CONFIG_IIO_ST_SENSORS_I2C=m
-CONFIG_IIO_ST_SENSORS_SPI=m
-CONFIG_IIO_ST_SENSORS_CORE=m
-
-#
-# Counters
-#
-
-#
-# Digital to analog converters
-#
-CONFIG_AD5064=m
-CONFIG_AD5360=m
-CONFIG_AD5380=m
-CONFIG_AD5421=m
-CONFIG_AD5446=m
-CONFIG_AD5449=m
-CONFIG_AD5592R_BASE=m
-CONFIG_AD5592R=m
-CONFIG_AD5593R=m
-CONFIG_AD5504=m
-CONFIG_AD5624R_SPI=m
-CONFIG_LTC2632=m
-CONFIG_AD5686=m
-CONFIG_AD5755=m
-CONFIG_AD5761=m
-CONFIG_AD5764=m
-CONFIG_AD5791=m
-CONFIG_AD7303=m
-CONFIG_AD8801=m
-CONFIG_M62332=m
-CONFIG_MAX517=m
-CONFIG_MCP4725=m
-CONFIG_MCP4922=m
-
-#
-# IIO dummy driver
-#
-# CONFIG_IIO_SIMPLE_DUMMY is not set
-
-#
-# Frequency Synthesizers DDS/PLL
-#
-
-#
-# Clock Generator/Distribution
-#
-CONFIG_AD9523=m
-
-#
-# Phase-Locked Loop (PLL) frequency synthesizers
-#
-CONFIG_ADF4350=m
-
-#
-# Digital gyroscope sensors
-#
-CONFIG_ADIS16080=m
-CONFIG_ADIS16130=m
-CONFIG_ADIS16136=m
-CONFIG_ADIS16260=m
-CONFIG_ADXRS450=m
-CONFIG_BMG160=m
-CONFIG_BMG160_I2C=m
-CONFIG_BMG160_SPI=m
-CONFIG_HID_SENSOR_GYRO_3D=m
-CONFIG_MPU3050=m
-CONFIG_MPU3050_I2C=m
-CONFIG_IIO_ST_GYRO_3AXIS=m
-CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
-CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
-CONFIG_ITG3200=m
-
-#
-# Health Sensors
-#
-
-#
-# Heart Rate Monitors
-#
-CONFIG_AFE4403=m
-CONFIG_AFE4404=m
-CONFIG_MAX30100=m
-CONFIG_MAX30102=m
-
-#
-# Humidity sensors
-#
-CONFIG_AM2315=m
-CONFIG_DHT11=m
-CONFIG_HDC100X=m
-CONFIG_HID_SENSOR_HUMIDITY=m
-CONFIG_HTS221=m
-CONFIG_HTS221_I2C=m
-CONFIG_HTS221_SPI=m
-CONFIG_HTU21=m
-CONFIG_SI7005=m
-CONFIG_SI7020=m
-
-#
-# Inertial measurement units
-#
-CONFIG_ADIS16400=m
-CONFIG_ADIS16480=m
-CONFIG_BMI160=m
-CONFIG_BMI160_I2C=m
-CONFIG_BMI160_SPI=m
-CONFIG_KMX61=m
-CONFIG_INV_MPU6050_IIO=m
-CONFIG_INV_MPU6050_I2C=m
-CONFIG_INV_MPU6050_SPI=m
-CONFIG_IIO_ST_LSM6DSX=m
-CONFIG_IIO_ST_LSM6DSX_I2C=m
-CONFIG_IIO_ST_LSM6DSX_SPI=m
-CONFIG_IIO_ADIS_LIB=m
-CONFIG_IIO_ADIS_LIB_BUFFER=y
-
-#
-# Light sensors
-#
-# CONFIG_ACPI_ALS is not set
-CONFIG_ADJD_S311=m
-CONFIG_AL3320A=m
-CONFIG_APDS9300=m
-CONFIG_APDS9960=m
-CONFIG_BH1750=m
-CONFIG_BH1780=m
-CONFIG_CM32181=m
-CONFIG_CM3232=m
-CONFIG_CM3323=m
-CONFIG_CM36651=m
-CONFIG_IIO_CROS_EC_LIGHT_PROX=m
-CONFIG_GP2AP020A00F=m
-CONFIG_SENSORS_ISL29018=m
-CONFIG_SENSORS_ISL29028=m
-CONFIG_ISL29125=m
-CONFIG_HID_SENSOR_ALS=m
-CONFIG_HID_SENSOR_PROX=m
-CONFIG_JSA1212=m
-CONFIG_RPR0521=m
-CONFIG_SENSORS_LM3533=m
-CONFIG_LTR501=m
-CONFIG_MAX44000=m
-CONFIG_OPT3001=m
-CONFIG_PA12203001=m
-CONFIG_SI1145=m
-CONFIG_STK3310=m
-CONFIG_TCS3414=m
-CONFIG_TCS3472=m
-CONFIG_SENSORS_TSL2563=m
-CONFIG_TSL2583=m
-CONFIG_TSL4531=m
-CONFIG_US5182D=m
-CONFIG_VCNL4000=m
-CONFIG_VEML6070=m
-CONFIG_VL6180=m
-
-#
-# Magnetometer sensors
-#
-CONFIG_AK8975=m
-CONFIG_AK09911=m
-CONFIG_BMC150_MAGN=m
-CONFIG_BMC150_MAGN_I2C=m
-CONFIG_BMC150_MAGN_SPI=m
-CONFIG_MAG3110=m
-CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
-CONFIG_MMC35240=m
-CONFIG_IIO_ST_MAGN_3AXIS=m
-CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
-CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
-CONFIG_SENSORS_HMC5843=m
-CONFIG_SENSORS_HMC5843_I2C=m
-CONFIG_SENSORS_HMC5843_SPI=m
-
-#
-# Multiplexers
-#
-
-#
-# Inclinometer sensors
-#
-CONFIG_HID_SENSOR_INCLINOMETER_3D=m
-CONFIG_HID_SENSOR_DEVICE_ROTATION=m
-
-#
-# Triggers - standalone
-#
-CONFIG_IIO_HRTIMER_TRIGGER=m
-CONFIG_IIO_INTERRUPT_TRIGGER=m
-CONFIG_IIO_TIGHTLOOP_TRIGGER=m
-CONFIG_IIO_SYSFS_TRIGGER=m
-
-#
-# Digital potentiometers
-#
-CONFIG_DS1803=m
-CONFIG_MAX5481=m
-CONFIG_MAX5487=m
-CONFIG_MCP4131=m
-CONFIG_MCP4531=m
-CONFIG_TPL0102=m
-
-#
-# Digital potentiostats
-#
-CONFIG_LMP91000=m
-
-#
-# Pressure sensors
-#
-CONFIG_ABP060MG=m
-CONFIG_BMP280=m
-CONFIG_BMP280_I2C=m
-CONFIG_BMP280_SPI=m
-CONFIG_IIO_CROS_EC_BARO=m
-CONFIG_HID_SENSOR_PRESS=m
-CONFIG_HP03=m
-CONFIG_MPL115=m
-CONFIG_MPL115_I2C=m
-CONFIG_MPL115_SPI=m
-CONFIG_MPL3115=m
-CONFIG_MS5611=m
-CONFIG_MS5611_I2C=m
-CONFIG_MS5611_SPI=m
-CONFIG_MS5637=m
-CONFIG_IIO_ST_PRESS=m
-CONFIG_IIO_ST_PRESS_I2C=m
-CONFIG_IIO_ST_PRESS_SPI=m
-CONFIG_T5403=m
-CONFIG_HP206C=m
-CONFIG_ZPA2326=m
-CONFIG_ZPA2326_I2C=m
-CONFIG_ZPA2326_SPI=m
-
-#
-# Lightning sensors
-#
-CONFIG_AS3935=m
-
-#
-# Proximity and distance sensors
-#
-CONFIG_LIDAR_LITE_V2=m
-CONFIG_SRF04=m
-CONFIG_SX9500=m
-CONFIG_SRF08=m
-
-#
-# Temperature sensors
-#
-CONFIG_MAXIM_THERMOCOUPLE=m
-CONFIG_HID_SENSOR_TEMP=m
-CONFIG_MLX90614=m
-CONFIG_TMP006=m
-CONFIG_TMP007=m
-CONFIG_TSYS01=m
-CONFIG_TSYS02D=m
-CONFIG_NTB=m
-CONFIG_NTB_AMD=m
-CONFIG_NTB_IDT=m
-CONFIG_NTB_INTEL=m
-CONFIG_NTB_PINGPONG=m
-CONFIG_NTB_TOOL=m
-CONFIG_NTB_PERF=m
-CONFIG_NTB_TRANSPORT=m
-CONFIG_VME_BUS=y
-
-#
-# VME Bridge Drivers
-#
-CONFIG_VME_CA91CX42=m
-CONFIG_VME_TSI148=m
-CONFIG_VME_FAKE=m
-
-#
-# VME Board Drivers
-#
-CONFIG_VMIVME_7805=m
-
-#
-# VME Device Drivers
-#
-CONFIG_VME_USER=m
-CONFIG_VME_PIO2=m
-CONFIG_PWM=y
-CONFIG_PWM_SYSFS=y
-CONFIG_PWM_CROS_EC=m
-CONFIG_PWM_LP3943=m
-CONFIG_PWM_LPSS=m
-CONFIG_PWM_LPSS_PCI=m
-CONFIG_PWM_LPSS_PLATFORM=m
-CONFIG_PWM_PCA9685=m
-CONFIG_ARM_GIC_MAX_NR=1
-CONFIG_IPACK_BUS=m
-CONFIG_BOARD_TPCI200=m
-CONFIG_SERIAL_IPOCTAL=m
-CONFIG_RESET_CONTROLLER=y
-# CONFIG_RESET_ATH79 is not set
-# CONFIG_RESET_BERLIN is not set
-# CONFIG_RESET_IMX7 is not set
-# CONFIG_RESET_LANTIQ is not set
-# CONFIG_RESET_LPC18XX is not set
-# CONFIG_RESET_MESON is not set
-# CONFIG_RESET_PISTACHIO is not set
-# CONFIG_RESET_SOCFPGA is not set
-# CONFIG_RESET_STM32 is not set
-# CONFIG_RESET_SUNXI is not set
-CONFIG_RESET_TI_SYSCON=m
-# CONFIG_RESET_ZYNQ is not set
-# CONFIG_RESET_TEGRA_BPMP is not set
-CONFIG_FMC=m
-CONFIG_FMC_FAKEDEV=m
-CONFIG_FMC_TRIVIAL=m
-CONFIG_FMC_WRITE_EEPROM=m
-CONFIG_FMC_CHARDEV=m
-
-#
-# PHY Subsystem
-#
-CONFIG_GENERIC_PHY=y
-CONFIG_BCM_KONA_USB2_PHY=m
-CONFIG_PHY_PXA_28NM_HSIC=m
-CONFIG_PHY_PXA_28NM_USB2=m
-CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_QCOM_USB_HS=m
-CONFIG_PHY_QCOM_USB_HSIC=m
-CONFIG_PHY_SAMSUNG_USB2=m
-# CONFIG_PHY_EXYNOS4210_USB2 is not set
-# CONFIG_PHY_EXYNOS4X12_USB2 is not set
-# CONFIG_PHY_EXYNOS5250_USB2 is not set
-CONFIG_PHY_TUSB1210=m
-CONFIG_POWERCAP=y
-CONFIG_INTEL_RAPL=m
-CONFIG_MCB=m
-CONFIG_MCB_PCI=m
-CONFIG_MCB_LPC=m
-
-#
-# Performance monitor support
-#
-CONFIG_RAS=y
-CONFIG_RAS_CEC=y
-CONFIG_THUNDERBOLT=m
-
-#
-# Android
-#
-# CONFIG_ANDROID is not set
-CONFIG_LIBNVDIMM=m
-CONFIG_BLK_DEV_PMEM=m
-CONFIG_ND_BLK=m
-CONFIG_ND_CLAIM=y
-CONFIG_ND_BTT=m
-CONFIG_BTT=y
-CONFIG_DAX=y
-CONFIG_DEV_DAX=m
-CONFIG_NVMEM=y
-CONFIG_STM=m
-# CONFIG_STM_DUMMY is not set
-CONFIG_STM_SOURCE_CONSOLE=m
-CONFIG_STM_SOURCE_HEARTBEAT=m
-CONFIG_INTEL_TH=m
-CONFIG_INTEL_TH_PCI=m
-CONFIG_INTEL_TH_GTH=m
-CONFIG_INTEL_TH_STH=m
-CONFIG_INTEL_TH_MSU=m
-CONFIG_INTEL_TH_PTI=m
-# CONFIG_INTEL_TH_DEBUG is not set
-CONFIG_FPGA=m
-CONFIG_FPGA_MGR_ALTERA_CVP=m
-CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
-CONFIG_FPGA_MGR_XILINX_SPI=m
-CONFIG_ALTERA_PR_IP_CORE=m
-
-#
-# FSI support
-#
-CONFIG_FSI=m
-CONFIG_FSI_MASTER_GPIO=m
-CONFIG_FSI_MASTER_HUB=m
-CONFIG_FSI_SCOM=m
-
-#
-# Firmware Drivers
-#
-CONFIG_EDD=m
-# CONFIG_EDD_OFF is not set
-CONFIG_FIRMWARE_MEMMAP=y
-CONFIG_DELL_RBU=m
-CONFIG_DCDBAS=m
-CONFIG_DMIID=y
-CONFIG_DMI_SYSFS=m
-CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
-CONFIG_ISCSI_IBFT_FIND=y
-CONFIG_ISCSI_IBFT=m
-CONFIG_FW_CFG_SYSFS=m
-# CONFIG_FW_CFG_SYSFS_CMDLINE is not set
-CONFIG_GOOGLE_FIRMWARE=y
-CONFIG_GOOGLE_SMI=m
-CONFIG_GOOGLE_COREBOOT_TABLE=m
-CONFIG_GOOGLE_COREBOOT_TABLE_ACPI=m
-CONFIG_GOOGLE_MEMCONSOLE=m
-CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY=m
-CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m
-CONFIG_GOOGLE_VPD=m
-
-#
-# EFI (Extensible Firmware Interface) Support
-#
-CONFIG_EFI_VARS=m
-CONFIG_EFI_ESRT=y
-CONFIG_EFI_VARS_PSTORE=m
-CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
-CONFIG_EFI_RUNTIME_MAP=y
-# CONFIG_EFI_FAKE_MEMMAP is not set
-CONFIG_EFI_RUNTIME_WRAPPERS=y
-CONFIG_EFI_BOOTLOADER_CONTROL=m
-CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_EFI_TEST=m
-CONFIG_APPLE_PROPERTIES=y
-CONFIG_RESET_ATTACK_MITIGATION=y
-CONFIG_UEFI_CPER=y
-CONFIG_EFI_DEV_PATH_PARSER=y
-
-#
-# Tegra firmware driver
-#
-
-#
-# File systems
-#
-CONFIG_DCACHE_WORD_ACCESS=y
-CONFIG_FS_IOMAP=y
-CONFIG_EXT2_FS=m
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=m
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=m
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
-CONFIG_EXT4_FS_ENCRYPTION=y
-# CONFIG_EXT4_DEBUG is not set
-CONFIG_JBD2=m
-# CONFIG_JBD2_DEBUG is not set
-CONFIG_FS_MBCACHE=m
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
-# CONFIG_XFS_WARN is not set
-# CONFIG_XFS_DEBUG is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-CONFIG_BTRFS_FS=m
-CONFIG_BTRFS_FS_POSIX_ACL=y
-# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
-# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
-# CONFIG_BTRFS_DEBUG is not set
-# CONFIG_BTRFS_ASSERT is not set
-CONFIG_NILFS2_FS=m
-CONFIG_F2FS_FS=m
-CONFIG_F2FS_STAT_FS=y
-CONFIG_F2FS_FS_XATTR=y
-CONFIG_F2FS_FS_POSIX_ACL=y
-CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_CHECK_FS=y
-CONFIG_F2FS_FS_ENCRYPTION=y
-# CONFIG_F2FS_FAULT_INJECTION is not set
-CONFIG_FS_DAX=y
-CONFIG_FS_POSIX_ACL=y
-CONFIG_EXPORTFS=y
-CONFIG_EXPORTFS_BLOCK_OPS=y
-CONFIG_FILE_LOCKING=y
-CONFIG_MANDATORY_FILE_LOCKING=y
-CONFIG_FS_ENCRYPTION=m
-CONFIG_FSNOTIFY=y
-CONFIG_DNOTIFY=y
-CONFIG_INOTIFY_USER=y
-CONFIG_FANOTIFY=y
-# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
-CONFIG_QUOTA=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-# CONFIG_QUOTA_DEBUG is not set
-CONFIG_QUOTA_TREE=m
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
-CONFIG_QUOTACTL=y
-CONFIG_QUOTACTL_COMPAT=y
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
-CONFIG_CUSE=m
-CONFIG_OVERLAY_FS=m
-# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
-CONFIG_OVERLAY_FS_INDEX=y
-
-#
-# Caches
-#
-CONFIG_FSCACHE=m
-CONFIG_FSCACHE_STATS=y
-# CONFIG_FSCACHE_HISTOGRAM is not set
-# CONFIG_FSCACHE_DEBUG is not set
-# CONFIG_FSCACHE_OBJECT_LIST is not set
-CONFIG_CACHEFILES=m
-# CONFIG_CACHEFILES_DEBUG is not set
-# CONFIG_CACHEFILES_HISTOGRAM is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="utf8"
-# CONFIG_FAT_DEFAULT_UTF8 is not set
-CONFIG_NTFS_FS=m
-# CONFIG_NTFS_DEBUG is not set
-CONFIG_NTFS_RW=y
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_PROC_PAGE_MONITOR=y
-# CONFIG_PROC_CHILDREN is not set
-CONFIG_KERNFS=y
-CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_TMPFS_XATTR=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
-CONFIG_CONFIGFS_FS=m
-CONFIG_EFIVAR_FS=m
-CONFIG_MISC_FILESYSTEMS=y
-CONFIG_ORANGEFS_FS=m
-CONFIG_ADFS_FS=m
-# CONFIG_ADFS_FS_RW is not set
-CONFIG_AFFS_FS=m
-CONFIG_ECRYPT_FS=m
-CONFIG_ECRYPT_FS_MESSAGING=y
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_HFSPLUS_FS_POSIX_ACL=y
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_FS_DEBUG=0
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_FS_POSIX_ACL=y
-CONFIG_JFFS2_FS_SECURITY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_RTIME=y
-CONFIG_JFFS2_RUBIN=y
-# CONFIG_JFFS2_CMODE_NONE is not set
-CONFIG_JFFS2_CMODE_PRIORITY=y
-# CONFIG_JFFS2_CMODE_SIZE is not set
-# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
-CONFIG_UBIFS_FS=m
-CONFIG_UBIFS_FS_ADVANCED_COMPR=y
-CONFIG_UBIFS_FS_LZO=y
-CONFIG_UBIFS_FS_ZLIB=y
-CONFIG_UBIFS_ATIME_SUPPORT=y
-CONFIG_UBIFS_FS_ENCRYPTION=y
-CONFIG_UBIFS_FS_SECURITY=y
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-# CONFIG_SQUASHFS_FILE_CACHE is not set
-CONFIG_SQUASHFS_FILE_DIRECT=y
-# CONFIG_SQUASHFS_DECOMP_SINGLE is not set
-# CONFIG_SQUASHFS_DECOMP_MULTI is not set
-CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
-CONFIG_SQUASHFS_XATTR=y
-CONFIG_SQUASHFS_ZLIB=y
-CONFIG_SQUASHFS_LZ4=y
-CONFIG_SQUASHFS_LZO=y
-CONFIG_SQUASHFS_XZ=y
-CONFIG_SQUASHFS_ZSTD=y
-CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y
-CONFIG_SQUASHFS_EMBEDDED=y
-CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
-CONFIG_VXFS_FS=m
-CONFIG_MINIX_FS=m
-CONFIG_OMFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_QNX6FS_FS=m
-# CONFIG_QNX6FS_DEBUG is not set
-CONFIG_ROMFS_FS=m
-# CONFIG_ROMFS_BACKED_BY_BLOCK is not set
-# CONFIG_ROMFS_BACKED_BY_MTD is not set
-CONFIG_ROMFS_BACKED_BY_BOTH=y
-CONFIG_ROMFS_ON_BLOCK=y
-CONFIG_ROMFS_ON_MTD=y
-CONFIG_PSTORE=y
-# CONFIG_PSTORE_ZLIB_COMPRESS is not set
-# CONFIG_PSTORE_LZO_COMPRESS is not set
-CONFIG_PSTORE_LZ4_COMPRESS=y
-# CONFIG_PSTORE_CONSOLE is not set
-CONFIG_PSTORE_PMSG=y
-CONFIG_PSTORE_RAM=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_UFS_DEBUG is not set
-CONFIG_EXOFS_FS=m
-# CONFIG_EXOFS_DEBUG is not set
-CONFIG_ORE=m
-CONFIG_NETWORK_FILESYSTEMS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V2=m
-CONFIG_NFS_V3=m
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=m
-CONFIG_NFS_SWAP=y
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_V4_2=y
-CONFIG_PNFS_FILE_LAYOUT=m
-CONFIG_PNFS_BLOCK=m
-CONFIG_PNFS_FLEXFILE_LAYOUT=m
-CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
-CONFIG_NFS_V4_1_MIGRATION=y
-CONFIG_NFS_V4_SECURITY_LABEL=y
-CONFIG_NFS_FSCACHE=y
-# CONFIG_NFS_USE_LEGACY_DNS is not set
-CONFIG_NFS_USE_KERNEL_DNS=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V2_ACL=y
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_PNFS=y
-CONFIG_NFSD_BLOCKLAYOUT=y
-CONFIG_NFSD_SCSILAYOUT=y
-CONFIG_NFSD_FLEXFILELAYOUT=y
-# CONFIG_NFSD_V4_SECURITY_LABEL is not set
-# CONFIG_NFSD_FAULT_INJECTION is not set
-CONFIG_GRACE_PERIOD=m
-CONFIG_LOCKD=m
-CONFIG_LOCKD_V4=y
-CONFIG_NFS_ACL_SUPPORT=m
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=m
-CONFIG_SUNRPC_GSS=m
-CONFIG_SUNRPC_BACKCHANNEL=y
-CONFIG_SUNRPC_SWAP=y
-CONFIG_RPCSEC_GSS_KRB5=m
-# CONFIG_SUNRPC_DEBUG is not set
-CONFIG_SUNRPC_XPRT_RDMA=m
-CONFIG_CEPH_FS=m
-CONFIG_CEPH_FSCACHE=y
-CONFIG_CEPH_FS_POSIX_ACL=y
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_STATS2=y
-CONFIG_CIFS_WEAK_PW_HASH=y
-CONFIG_CIFS_UPCALL=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_ACL=y
-# CONFIG_CIFS_DEBUG is not set
-CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_CIFS_SMB311=y
-CONFIG_CIFS_FSCACHE=y
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_SMALLDOS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
-CONFIG_CODA_FS=m
-CONFIG_AFS_FS=m
-# CONFIG_AFS_DEBUG is not set
-CONFIG_AFS_FSCACHE=y
-CONFIG_9P_FS=m
-CONFIG_9P_FSCACHE=y
-CONFIG_9P_FS_POSIX_ACL=y
-CONFIG_9P_FS_SECURITY=y
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_MAC_ROMAN=m
-CONFIG_NLS_MAC_CELTIC=m
-CONFIG_NLS_MAC_CENTEURO=m
-CONFIG_NLS_MAC_CROATIAN=m
-CONFIG_NLS_MAC_CYRILLIC=m
-CONFIG_NLS_MAC_GAELIC=m
-CONFIG_NLS_MAC_GREEK=m
-CONFIG_NLS_MAC_ICELAND=m
-CONFIG_NLS_MAC_INUIT=m
-CONFIG_NLS_MAC_ROMANIAN=m
-CONFIG_NLS_MAC_TURKISH=m
-CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Kernel hacking
-#
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-
-#
-# printk and dmesg options
-#
-# CONFIG_PRINTK_TIME is not set
-CONFIG_CONSOLE_LOGLEVEL_DEFAULT=1
-CONFIG_MESSAGE_LOGLEVEL_DEFAULT=1
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_DYNAMIC_DEBUG is not set
-
-#
-# Compile-time checks and compiler options
-#
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=0
-CONFIG_STRIP_ASM_SYMS=y
-# CONFIG_READABLE_ASM is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_PAGE_OWNER is not set
-CONFIG_DEBUG_FS=y
-# CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_SECTION_MISMATCH is not set
-CONFIG_SECTION_MISMATCH_WARN_ONLY=y
-CONFIG_STACK_VALIDATION=y
-# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
-CONFIG_MAGIC_SYSRQ_SERIAL=y
-CONFIG_DEBUG_KERNEL=y
-
-#
-# Memory Debugging
-#
-# CONFIG_PAGE_EXTENSION is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_PAGE_POISONING is not set
-# CONFIG_DEBUG_PAGE_REF is not set
-# CONFIG_DEBUG_RODATA_TEST is not set
-# CONFIG_DEBUG_OBJECTS is not set
-CONFIG_SLUB_DEBUG_ON=y
-# CONFIG_SLUB_STATS is not set
-CONFIG_HAVE_DEBUG_KMEMLEAK=y
-# CONFIG_DEBUG_KMEMLEAK is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_VM is not set
-CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
-# CONFIG_DEBUG_VIRTUAL is not set
-CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_PER_CPU_MAPS is not set
-CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-CONFIG_HAVE_ARCH_KASAN=y
-# CONFIG_KASAN is not set
-CONFIG_ARCH_HAS_KCOV=y
-# CONFIG_KCOV is not set
-# CONFIG_DEBUG_SHIRQ is not set
-
-#
-# Debug Lockups and Hangs
-#
-# CONFIG_SOFTLOCKUP_DETECTOR is not set
-CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
-# CONFIG_HARDLOCKUP_DETECTOR is not set
-# CONFIG_DETECT_HUNG_TASK is not set
-# CONFIG_WQ_WATCHDOG is not set
-# CONFIG_PANIC_ON_OOPS is not set
-CONFIG_PANIC_ON_OOPS_VALUE=0
-CONFIG_PANIC_TIMEOUT=0
-CONFIG_SCHED_DEBUG=y
-CONFIG_SCHED_INFO=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SCHED_STACK_END_CHECK=y
-# CONFIG_DEBUG_TIMEKEEPING is not set
-# CONFIG_DEBUG_PREEMPT is not set
-
-#
-# Lock Debugging (spinlocks, mutexes, etc...)
-#
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
-# CONFIG_DEBUG_LOCK_ALLOC is not set
-# CONFIG_PROVE_LOCKING is not set
-# CONFIG_LOCK_STAT is not set
-# CONFIG_DEBUG_ATOMIC_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_LOCK_TORTURE_TEST is not set
-# CONFIG_WW_MUTEX_SELFTEST is not set
-CONFIG_STACKTRACE=y
-# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
-# CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
-CONFIG_DEBUG_SG=y
-CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_DEBUG_CREDENTIALS=y
-
-#
-# RCU Debugging
-#
-# CONFIG_PROVE_RCU is not set
-CONFIG_TORTURE_TEST=m
-CONFIG_RCU_PERF_TEST=m
-# CONFIG_RCU_TORTURE_TEST is not set
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-# CONFIG_RCU_TRACE is not set
-# CONFIG_RCU_EQS_DEBUG is not set
-# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
-# CONFIG_NOTIFIER_ERROR_INJECTION is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-CONFIG_USER_STACKTRACE_SUPPORT=y
-CONFIG_NOP_TRACER=y
-CONFIG_HAVE_FUNCTION_TRACER=y
-CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
-CONFIG_HAVE_DYNAMIC_FTRACE=y
-CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
-CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
-CONFIG_HAVE_FENTRY=y
-CONFIG_HAVE_C_RECORDMCOUNT=y
-CONFIG_TRACE_CLOCK=y
-CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
-CONFIG_TRACING=y
-CONFIG_TRACING_SUPPORT=y
-CONFIG_FTRACE=y
-# CONFIG_FUNCTION_TRACER is not set
-# CONFIG_IRQSOFF_TRACER is not set
-# CONFIG_PREEMPT_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_HWLAT_TRACER is not set
-# CONFIG_ENABLE_DEFAULT_TRACERS is not set
-# CONFIG_FTRACE_SYSCALLS is not set
-# CONFIG_TRACER_SNAPSHOT is not set
-CONFIG_BRANCH_PROFILE_NONE=y
-# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
-# CONFIG_STACK_TRACER is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-CONFIG_KPROBE_EVENTS=y
-CONFIG_UPROBE_EVENTS=y
-CONFIG_BPF_EVENTS=y
-CONFIG_PROBE_EVENTS=y
-# CONFIG_MMIOTRACE is not set
-# CONFIG_HIST_TRIGGERS is not set
-# CONFIG_TRACEPOINT_BENCHMARK is not set
-# CONFIG_RING_BUFFER_BENCHMARK is not set
-# CONFIG_RING_BUFFER_STARTUP_TEST is not set
-# CONFIG_TRACE_EVAL_MAP_FILE is not set
-# CONFIG_TRACING_EVENTS_GPIO is not set
-# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
-# CONFIG_DMA_API_DEBUG is not set
-
-#
-# Runtime Testing
-#
-CONFIG_LKDTM=m
-# CONFIG_TEST_LIST_SORT is not set
-# CONFIG_TEST_SORT is not set
-# CONFIG_KPROBES_SANITY_TEST is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_RBTREE_TEST is not set
-# CONFIG_INTERVAL_TREE_TEST is not set
-# CONFIG_PERCPU_TEST is not set
-# CONFIG_ATOMIC64_SELFTEST is not set
-# CONFIG_ASYNC_RAID6_TEST is not set
-# CONFIG_TEST_HEXDUMP is not set
-# CONFIG_TEST_STRING_HELPERS is not set
-# CONFIG_TEST_KSTRTOX is not set
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-# CONFIG_TEST_RHASHTABLE is not set
-CONFIG_TEST_HASH=m
-# CONFIG_TEST_PARMAN is not set
-CONFIG_TEST_LKM=m
-# CONFIG_TEST_USER_COPY is not set
-# CONFIG_TEST_BPF is not set
-# CONFIG_TEST_FIRMWARE is not set
-CONFIG_TEST_SYSCTL=m
-# CONFIG_TEST_UDELAY is not set
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_MEMTEST=y
-# CONFIG_BUG_ON_DATA_CORRUPTION is not set
-# CONFIG_SAMPLES is not set
-CONFIG_HAVE_ARCH_KGDB=y
-# CONFIG_KGDB is not set
-CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
-# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set
-# CONFIG_UBSAN is not set
-CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
-CONFIG_STRICT_DEVMEM=y
-CONFIG_IO_STRICT_DEVMEM=y
-CONFIG_EARLY_PRINTK_USB=y
-CONFIG_X86_VERBOSE_BOOTUP=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_EARLY_PRINTK_DBGP is not set
-# CONFIG_EARLY_PRINTK_EFI is not set
-CONFIG_EARLY_PRINTK_USB_XDBC=y
-CONFIG_X86_PTDUMP_CORE=y
-# CONFIG_X86_PTDUMP is not set
-# CONFIG_EFI_PGT_DUMP is not set
-CONFIG_DEBUG_WX=y
-CONFIG_DOUBLEFAULT=y
-# CONFIG_DEBUG_TLBFLUSH is not set
-# CONFIG_IOMMU_DEBUG is not set
-# CONFIG_IOMMU_STRESS is not set
-CONFIG_HAVE_MMIOTRACE_SUPPORT=y
-# CONFIG_X86_DECODER_SELFTEST is not set
-CONFIG_IO_DELAY_TYPE_0X80=0
-CONFIG_IO_DELAY_TYPE_0XED=1
-CONFIG_IO_DELAY_TYPE_UDELAY=2
-CONFIG_IO_DELAY_TYPE_NONE=3
-CONFIG_IO_DELAY_0X80=y
-# CONFIG_IO_DELAY_0XED is not set
-# CONFIG_IO_DELAY_UDELAY is not set
-# CONFIG_IO_DELAY_NONE is not set
-CONFIG_DEFAULT_IO_DELAY_TYPE=0
-# CONFIG_DEBUG_BOOT_PARAMS is not set
-# CONFIG_CPA_DEBUG is not set
-# CONFIG_OPTIMIZE_INLINING is not set
-# CONFIG_DEBUG_ENTRY is not set
-# CONFIG_DEBUG_NMI_SELFTEST is not set
-CONFIG_X86_DEBUG_FPU=y
-# CONFIG_PUNIT_ATOM_DEBUG is not set
-CONFIG_UNWINDER_ORC=y
-# CONFIG_UNWINDER_FRAME_POINTER is not set
-
-#
-# Security options
-#
-CONFIG_KEYS=y
-CONFIG_KEYS_COMPAT=y
-CONFIG_PERSISTENT_KEYRINGS=y
-# CONFIG_BIG_KEYS is not set
-CONFIG_TRUSTED_KEYS=m
-CONFIG_ENCRYPTED_KEYS=m
-# CONFIG_KEY_DH_OPERATIONS is not set
-CONFIG_SECURITY_DMESG_RESTRICT=y
-CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
-CONFIG_SECURITY_TIOCSTI_RESTRICT=y
-CONFIG_SECURITY=y
-# CONFIG_SECURITY_WRITABLE_HOOKS is not set
-CONFIG_SECURITYFS=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_PAGE_TABLE_ISOLATION=y
-# CONFIG_SECURITY_INFINIBAND is not set
-# CONFIG_SECURITY_NETWORK_XFRM is not set
-CONFIG_SECURITY_PATH=y
-CONFIG_INTEL_TXT=y
-CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_FORTIFY_SOURCE=y
-CONFIG_PAGE_SANITIZE=y
-CONFIG_PAGE_SANITIZE_VERIFY=y
-# CONFIG_STATIC_USERMODEHELPER is not set
-# CONFIG_SECURITY_SELINUX is not set
-# CONFIG_SECURITY_SMACK is not set
-# CONFIG_SECURITY_TOMOYO is not set
-CONFIG_SECURITY_APPARMOR=y
-CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
-CONFIG_SECURITY_APPARMOR_HASH=y
-CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y
-# CONFIG_SECURITY_APPARMOR_DEBUG is not set
-# CONFIG_SECURITY_LOADPIN is not set
-CONFIG_SECURITY_YAMA=y
-# CONFIG_INTEGRITY is not set
-CONFIG_DEFAULT_SECURITY_APPARMOR=y
-# CONFIG_DEFAULT_SECURITY_DAC is not set
-CONFIG_DEFAULT_SECURITY="apparmor"
-CONFIG_XOR_BLOCKS=m
-CONFIG_ASYNC_CORE=m
-CONFIG_ASYNC_MEMCPY=m
-CONFIG_ASYNC_XOR=m
-CONFIG_ASYNC_PQ=m
-CONFIG_ASYNC_RAID6_RECOV=m
-CONFIG_CRYPTO=y
-
-#
-# Crypto core or helper
-#
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_AEAD=m
-CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_BLKCIPHER2=y
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_HASH2=y
-CONFIG_CRYPTO_RNG=m
-CONFIG_CRYPTO_RNG2=y
-CONFIG_CRYPTO_RNG_DEFAULT=m
-CONFIG_CRYPTO_AKCIPHER2=y
-CONFIG_CRYPTO_AKCIPHER=y
-CONFIG_CRYPTO_KPP2=y
-CONFIG_CRYPTO_KPP=m
-CONFIG_CRYPTO_ACOMP2=y
-CONFIG_CRYPTO_RSA=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
-CONFIG_CRYPTO_MANAGER=y
-CONFIG_CRYPTO_MANAGER2=y
-CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_NULL2=y
-CONFIG_CRYPTO_PCRYPT=m
-CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ABLK_HELPER=m
-CONFIG_CRYPTO_SIMD=m
-CONFIG_CRYPTO_GLUE_HELPER_X86=m
-CONFIG_CRYPTO_ENGINE=m
-
-#
-# Authenticated Encryption with Associated Data
-#
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_SEQIV=m
-CONFIG_CRYPTO_ECHAINIV=m
-
-#
-# Block modes
-#
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_CTR=m
-CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
-
-#
-# Hash modes
-#
-CONFIG_CRYPTO_CMAC=m
-CONFIG_CRYPTO_HMAC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-
-#
-# Digest
-#
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CRC32C_INTEL=m
-CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRC32_PCLMUL=m
-CONFIG_CRYPTO_CRCT10DIF=y
-CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
-CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_POLY1305=m
-CONFIG_CRYPTO_POLY1305_X86_64=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
-CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA1_SSSE3=m
-CONFIG_CRYPTO_SHA256_SSSE3=m
-CONFIG_CRYPTO_SHA512_SSSE3=m
-CONFIG_CRYPTO_SHA1_MB=m
-CONFIG_CRYPTO_SHA256_MB=m
-CONFIG_CRYPTO_SHA512_MB=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
-
-#
-# Ciphers
-#
-CONFIG_CRYPTO_AES=y
-CONFIG_CRYPTO_AES_TI=m
-CONFIG_CRYPTO_AES_X86_64=m
-CONFIG_CRYPTO_AES_NI_INTEL=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_BLOWFISH_COMMON=m
-CONFIG_CRYPTO_BLOWFISH_X86_64=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAMELLIA_X86_64=m
-CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
-CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
-CONFIG_CRYPTO_CAST_COMMON=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST5_AVX_X86_64=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_CAST6_AVX_X86_64=m
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_DES3_EDE_X86_64=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_CHACHA20=m
-CONFIG_CRYPTO_CHACHA20_X86_64=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
-CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
-CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_TWOFISH_X86_64=m
-CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
-CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
-
-#
-# Compression
-#
-CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_842=m
-CONFIG_CRYPTO_LZ4=m
-CONFIG_CRYPTO_LZ4HC=m
-
-#
-# Random Number Generation
-#
-CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_DRBG_MENU=m
-CONFIG_CRYPTO_DRBG_HMAC=y
-CONFIG_CRYPTO_DRBG_HASH=y
-CONFIG_CRYPTO_DRBG_CTR=y
-CONFIG_CRYPTO_DRBG=m
-CONFIG_CRYPTO_JITTERENTROPY=m
-CONFIG_CRYPTO_USER_API=m
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
-CONFIG_CRYPTO_USER_API_RNG=m
-CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_HASH_INFO=y
-CONFIG_CRYPTO_HW=y
-CONFIG_CRYPTO_DEV_PADLOCK=m
-CONFIG_CRYPTO_DEV_PADLOCK_AES=m
-CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
-# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set
-CONFIG_CRYPTO_DEV_CCP=y
-CONFIG_CRYPTO_DEV_CCP_DD=m
-CONFIG_CRYPTO_DEV_SP_CCP=y
-CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
-CONFIG_CRYPTO_DEV_QAT=m
-CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
-CONFIG_CRYPTO_DEV_QAT_C3XXX=m
-CONFIG_CRYPTO_DEV_QAT_C62X=m
-CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
-CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
-CONFIG_CRYPTO_DEV_QAT_C62XVF=m
-CONFIG_CRYPTO_DEV_NITROX=m
-CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
-CONFIG_CRYPTO_DEV_CHELSIO=m
-CONFIG_CRYPTO_DEV_VIRTIO=m
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
-CONFIG_X509_CERTIFICATE_PARSER=y
-CONFIG_PKCS7_MESSAGE_PARSER=y
-# CONFIG_PKCS7_TEST_KEY is not set
-# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set
-
-#
-# Certificates for signature checking
-#
-CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
-CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_SYSTEM_TRUSTED_KEYS=""
-# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
-CONFIG_SECONDARY_TRUSTED_KEYRING=y
-CONFIG_SYSTEM_BLACKLIST_KEYRING=y
-CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
-CONFIG_HAVE_KVM=y
-CONFIG_HAVE_KVM_IRQCHIP=y
-CONFIG_HAVE_KVM_IRQFD=y
-CONFIG_HAVE_KVM_IRQ_ROUTING=y
-CONFIG_HAVE_KVM_EVENTFD=y
-CONFIG_KVM_MMIO=y
-CONFIG_KVM_ASYNC_PF=y
-CONFIG_HAVE_KVM_MSI=y
-CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
-CONFIG_KVM_VFIO=y
-CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
-CONFIG_KVM_COMPAT=y
-CONFIG_HAVE_KVM_IRQ_BYPASS=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=m
-CONFIG_KVM_INTEL=m
-CONFIG_KVM_AMD=m
-# CONFIG_KVM_MMU_AUDIT is not set
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_SCSI=m
-CONFIG_VHOST_VSOCK=m
-CONFIG_VHOST=m
-# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
-CONFIG_BINARY_PRINTF=y
-
-#
-# Library routines
-#
-CONFIG_RAID6_PQ=m
-CONFIG_BITREVERSE=y
-# CONFIG_HAVE_ARCH_BITREVERSE is not set
-CONFIG_RATIONAL=y
-CONFIG_GENERIC_STRNCPY_FROM_USER=y
-CONFIG_GENERIC_STRNLEN_USER=y
-CONFIG_GENERIC_NET_UTILS=y
-CONFIG_GENERIC_FIND_FIRST_BIT=y
-CONFIG_GENERIC_PCI_IOMAP=y
-CONFIG_GENERIC_IOMAP=y
-CONFIG_GENERIC_IO=y
-CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
-CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC16=m
-CONFIG_CRC_T10DIF=y
-CONFIG_CRC_ITU_T=m
-CONFIG_CRC32=y
-# CONFIG_CRC32_SELFTEST is not set
-CONFIG_CRC32_SLICEBY8=y
-# CONFIG_CRC32_SLICEBY4 is not set
-# CONFIG_CRC32_SARWATE is not set
-# CONFIG_CRC32_BIT is not set
-CONFIG_CRC4=m
-CONFIG_CRC7=m
-CONFIG_LIBCRC32C=m
-CONFIG_CRC8=m
-CONFIG_XXHASH=m
-# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
-# CONFIG_RANDOM32_SELFTEST is not set
-CONFIG_842_COMPRESS=m
-CONFIG_842_DECOMPRESS=m
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_LZ4_COMPRESS=y
-CONFIG_LZ4HC_COMPRESS=m
-CONFIG_LZ4_DECOMPRESS=y
-CONFIG_ZSTD_COMPRESS=m
-CONFIG_ZSTD_DECOMPRESS=m
-CONFIG_XZ_DEC=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
-CONFIG_XZ_DEC_BCJ=y
-# CONFIG_XZ_DEC_TEST is not set
-CONFIG_DECOMPRESS_GZIP=y
-CONFIG_DECOMPRESS_BZIP2=y
-CONFIG_DECOMPRESS_LZMA=y
-CONFIG_DECOMPRESS_XZ=y
-CONFIG_DECOMPRESS_LZO=y
-CONFIG_DECOMPRESS_LZ4=y
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_REED_SOLOMON=m
-CONFIG_REED_SOLOMON_ENC8=y
-CONFIG_REED_SOLOMON_DEC8=y
-CONFIG_REED_SOLOMON_DEC16=y
-CONFIG_BCH=m
-CONFIG_BCH_CONST_PARAMS=y
-CONFIG_TEXTSEARCH=y
-CONFIG_TEXTSEARCH_KMP=m
-CONFIG_TEXTSEARCH_BM=m
-CONFIG_TEXTSEARCH_FSM=m
-CONFIG_BTREE=y
-CONFIG_INTERVAL_TREE=y
-CONFIG_RADIX_TREE_MULTIORDER=y
-CONFIG_ASSOCIATIVE_ARRAY=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT_MAP=y
-CONFIG_HAS_DMA=y
-# CONFIG_DMA_NOOP_OPS is not set
-CONFIG_DMA_VIRT_OPS=y
-CONFIG_CHECK_SIGNATURE=y
-CONFIG_CPUMASK_OFFSTACK=y
-CONFIG_CPU_RMAP=y
-CONFIG_DQL=y
-CONFIG_GLOB=y
-# CONFIG_GLOB_SELFTEST is not set
-CONFIG_NLATTR=y
-CONFIG_LRU_CACHE=m
-CONFIG_CLZ_TAB=y
-CONFIG_CORDIC=m
-CONFIG_DDR=y
-CONFIG_IRQ_POLL=y
-CONFIG_MPILIB=y
-CONFIG_OID_REGISTRY=y
-CONFIG_UCS2_STRING=y
-CONFIG_FONT_SUPPORT=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-# CONFIG_FONT_6x11 is not set
-# CONFIG_FONT_7x14 is not set
-# CONFIG_FONT_PEARL_8x8 is not set
-# CONFIG_FONT_ACORN_8x8 is not set
-# CONFIG_FONT_MINI_4x6 is not set
-# CONFIG_FONT_6x10 is not set
-# CONFIG_FONT_10x18 is not set
-# CONFIG_FONT_SUN8x16 is not set
-# CONFIG_FONT_SUN12x22 is not set
-# CONFIG_SG_SPLIT is not set
-CONFIG_SG_POOL=y
-CONFIG_ARCH_HAS_SG_CHAIN=y
-CONFIG_ARCH_HAS_PMEM_API=y
-CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
-CONFIG_SBITMAP=y
-CONFIG_PARMAN=m
-# CONFIG_STRING_SELFTEST is not set
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch
deleted file mode 100644
index 2376edae..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 808998fe1..18d3321ef 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -1339,6 +1339,10 @@ void sched_init_numa(void)
- if (!sched_domains_numa_distance)
- return;
-
-+ /* Includes NUMA identity node at level 0. */
-+ sched_domains_numa_distance[level++] = curr_distance;
-+ sched_domains_numa_levels = level;
-+
- /*
- * O(nr_nodes^2) deduplicating selection sort -- in order to find the
- * unique distances in the node_distance() table.
-@@ -1386,8 +1390,7 @@ void sched_init_numa(void)
- return;
-
- /*
-- * 'level' contains the number of unique distances, excluding the
-- * identity distance node_distance(i,i).
-+ * 'level' contains the number of unique distances
- *
- * The sched_domains_numa_distance[] array includes the actual distance
- * numbers.
-@@ -1448,10 +1451,19 @@ void sched_init_numa(void)
- for (i = 0; sched_domain_topology[i].mask; i++)
- tl[i] = sched_domain_topology[i];
-
-+ /*
-+ * Add the NUMA identity distance, aka single NODE.
-+ */
-+ tl[i++] = (struct sched_domain_topology_level){
-+ .mask = sd_numa_mask,
-+ .numa_level = 0,
-+ SD_INIT_NAME(NODE)
-+ };
-+
- /*
- * .. and append 'j' levels of NUMA goodness.
- */
-- for (j = 0; j < level; i++, j++) {
-+ for (j = 1; j < level; i++, j++) {
- tl[i] = (struct sched_domain_topology_level){
- .mask = sd_numa_mask,
- .sd_flags = cpu_numa_flags,
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch
deleted file mode 100644
index b1e8a9b0..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch
+++ /dev/null
@@ -1,177 +0,0 @@
-diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
-index ce3b91f22..0721e1756 100644
---- a/drivers/hwmon/k10temp.c
-+++ b/drivers/hwmon/k10temp.c
-@@ -36,6 +36,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
- /* Provide lock for writing to NB_SMU_IND_ADDR */
- static DEFINE_MUTEX(nb_smu_ind_mutex);
-
-+#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
-+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
-+#endif
-+
- /* CPUID function 0x80000001, ebx */
- #define CPUID_PKGTYPE_MASK 0xf0000000
- #define CPUID_PKGTYPE_F 0x00000000
-@@ -61,31 +65,72 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
- */
- #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-
--static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
-- int offset, u32 *val)
-+/* F17h M01h Access througn SMN */
-+#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
-+
-+struct k10temp_data {
-+ struct pci_dev *pdev;
-+ void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
-+ int temp_offset;
-+};
-+
-+struct tctl_offset {
-+ u8 model;
-+ char const *id;
-+ int offset;
-+};
-+
-+static const struct tctl_offset tctl_offset_table[] = {
-+ { 0x17, "AMD Ryzen 5 1600X", 20000 },
-+ { 0x17, "AMD Ryzen 7 1700X", 20000 },
-+ { 0x17, "AMD Ryzen 7 1800X", 20000 },
-+ { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
-+ { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
-+ { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
-+ { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
-+ { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
-+};
-+
-+static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
-+{
-+ pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
-+}
-+
-+static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
-+ unsigned int base, int offset, u32 *val)
- {
- mutex_lock(&nb_smu_ind_mutex);
- pci_bus_write_config_dword(pdev->bus, devfn,
-- 0xb8, offset);
-+ base, offset);
- pci_bus_read_config_dword(pdev->bus, devfn,
-- 0xbc, val);
-+ base + 4, val);
- mutex_unlock(&nb_smu_ind_mutex);
- }
-
-+static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
-+{
-+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
-+ F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
-+}
-+
-+static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
-+{
-+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
-+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
-+}
-+
- static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
-+ struct k10temp_data *data = dev_get_drvdata(dev);
- u32 regval;
-- struct pci_dev *pdev = dev_get_drvdata(dev);
--
-- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
-- amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
-- F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
-- &regval);
-- } else {
-- pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
-- }
-- return sprintf(buf, "%u\n", (regval >> 21) * 125);
-+ unsigned int temp;
-+
-+ data->read_tempreg(data->pdev, &regval);
-+ temp = (regval >> 21) * 125;
-+ temp -= data->temp_offset;
-+
-+ return sprintf(buf, "%u\n", temp);
- }
-
- static ssize_t temp1_max_show(struct device *dev,
-@@ -98,11 +143,12 @@ static ssize_t show_temp_crit(struct device *dev,
- struct device_attribute *devattr, char *buf)
- {
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-+ struct k10temp_data *data = dev_get_drvdata(dev);
- int show_hyst = attr->index;
- u32 regval;
- int value;
-
-- pci_read_config_dword(dev_get_drvdata(dev),
-+ pci_read_config_dword(data->pdev,
- REG_HARDWARE_THERMAL_CONTROL, &regval);
- value = ((regval >> 16) & 0x7f) * 500 + 52000;
- if (show_hyst)
-@@ -119,7 +165,8 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
- struct attribute *attr, int index)
- {
- struct device *dev = container_of(kobj, struct device, kobj);
-- struct pci_dev *pdev = dev_get_drvdata(dev);
-+ struct k10temp_data *data = dev_get_drvdata(dev);
-+ struct pci_dev *pdev = data->pdev;
-
- if (index >= 2) {
- u32 reg_caps, reg_htc;
-@@ -187,7 +234,9 @@ static int k10temp_probe(struct pci_dev *pdev,
- {
- int unreliable = has_erratum_319(pdev);
- struct device *dev = &pdev->dev;
-+ struct k10temp_data *data;
- struct device *hwmon_dev;
-+ int i;
-
- if (unreliable) {
- if (!force) {
-@@ -199,7 +248,31 @@ static int k10temp_probe(struct pci_dev *pdev,
- "unreliable CPU thermal sensor; check erratum 319\n");
- }
-
-- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev,
-+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
-+ if (!data)
-+ return -ENOMEM;
-+
-+ data->pdev = pdev;
-+
-+ if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
-+ boot_cpu_data.x86_model == 0x70))
-+ data->read_tempreg = read_tempreg_nb_f15;
-+ else if (boot_cpu_data.x86 == 0x17)
-+ data->read_tempreg = read_tempreg_nb_f17;
-+ else
-+ data->read_tempreg = read_tempreg_pci;
-+
-+ for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
-+ const struct tctl_offset *entry = &tctl_offset_table[i];
-+
-+ if (boot_cpu_data.x86 == entry->model &&
-+ strstr(boot_cpu_data.x86_model_id, entry->id)) {
-+ data->temp_offset = entry->offset;
-+ break;
-+ }
-+ }
-+
-+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
- k10temp_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
- }
-@@ -214,6 +287,7 @@ static const struct pci_device_id k10temp_id_table[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
-+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
- {}
- };
- MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-linux-hardened.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-linux-hardened.patch
deleted file mode 100644
index 9280791e..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-linux-hardened.patch
+++ /dev/null
@@ -1,2868 +0,0 @@
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 7d8b17ce8804..7e4f071c3bf2 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -490,16 +490,6 @@
- nosocket -- Disable socket memory accounting.
- nokmem -- Disable kernel memory accounting.
-
-- checkreqprot [SELINUX] Set initial checkreqprot flag value.
-- Format: { "0" | "1" }
-- See security/selinux/Kconfig help text.
-- 0 -- check protection applied by kernel (includes
-- any implied execute protection).
-- 1 -- check protection requested by application.
-- Default value is set via a kernel config option.
-- Value can be changed at runtime via
-- /selinux/checkreqprot.
--
- cio_ignore= [S390]
- See Documentation/s390/CommonIO for details.
- clk_ignore_unused
-@@ -2984,6 +2974,11 @@
- the specified number of seconds. This is to be used if
- your oopses keep scrolling off the screen.
-
-+ extra_latent_entropy
-+ Enable a very simple form of latent entropy extraction
-+ from the first 4GB of memory as the bootmem allocator
-+ passes the memory pages to the buddy allocator.
-+
- pcbit= [HW,ISDN]
-
- pcd. [PARIDE]
-diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
-index 694968c7523c..002d86416ef8 100644
---- a/Documentation/sysctl/kernel.txt
-+++ b/Documentation/sysctl/kernel.txt
-@@ -91,6 +91,7 @@ show up in /proc/sys/kernel:
- - sysctl_writes_strict
- - tainted
- - threads-max
-+- tiocsti_restrict
- - unknown_nmi_panic
- - watchdog
- - watchdog_thresh
-@@ -999,6 +1000,26 @@ available RAM pages threads-max is reduced accordingly.
-
- ==============================================================
-
-+tiocsti_restrict:
-+
-+This toggle indicates whether unprivileged users are prevented
-+from using the TIOCSTI ioctl to inject commands into other processes
-+which share a tty session.
-+
-+When tiocsti_restrict is set to (0) there are no restrictions(accept
-+the default restriction of only being able to injection commands into
-+one's own tty). When tiocsti_restrict is set to (1), users must
-+have CAP_SYS_ADMIN to use the TIOCSTI ioctl.
-+
-+When user namespaces are in use, the check for the capability
-+CAP_SYS_ADMIN is done against the user namespace that originally
-+opened the tty.
-+
-+The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the
-+default value of tiocsti_restrict.
-+
-+==============================================================
-+
- unknown_nmi_panic:
-
- The value in this file affects behavior of handling NMI. When the
-diff --git a/Makefile b/Makefile
-index 70cc37cb3e99..edc3de99b3cd 100644
---- a/Makefile
-+++ b/Makefile
-@@ -714,6 +714,9 @@ endif
- KBUILD_CFLAGS += $(stackp-flag)
-
- ifeq ($(cc-name),clang)
-+ifdef CONFIG_LOCAL_INIT
-+KBUILD_CFLAGS += -fsanitize=local-init
-+endif
- KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
- KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
- KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-diff --git a/arch/Kconfig b/arch/Kconfig
-index 77b3e21c4844..3dff252446ac 100644
---- a/arch/Kconfig
-+++ b/arch/Kconfig
-@@ -446,6 +446,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
- is some slowdown of the boot process (about 0.5%) and fork and
- irq processing.
-
-+ When extra_latent_entropy is passed on the kernel command line,
-+ entropy will be extracted from up to the first 4GB of RAM while the
-+ runtime memory allocator is being initialized. This costs even more
-+ slowdown of the boot process.
-+
- Note that entropy extracted this way is not cryptographically
- secure!
-
-@@ -539,7 +544,7 @@ config CC_STACKPROTECTOR
- choice
- prompt "Stack Protector buffer overflow detection"
- depends on HAVE_CC_STACKPROTECTOR
-- default CC_STACKPROTECTOR_NONE
-+ default CC_STACKPROTECTOR_STRONG
- help
- This option turns on the "stack-protector" GCC feature. This
- feature puts, at the beginning of functions, a canary value on
-@@ -741,7 +746,7 @@ config ARCH_MMAP_RND_BITS
- int "Number of bits to use for ASLR of mmap base address" if EXPERT
- range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
- default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
-- default ARCH_MMAP_RND_BITS_MIN
-+ default ARCH_MMAP_RND_BITS_MAX
- depends on HAVE_ARCH_MMAP_RND_BITS
- help
- This value can be used to select the number of bits to use to
-@@ -775,7 +780,7 @@ config ARCH_MMAP_RND_COMPAT_BITS
- int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
- range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
- default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
-- default ARCH_MMAP_RND_COMPAT_BITS_MIN
-+ default ARCH_MMAP_RND_COMPAT_BITS_MAX
- depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
- help
- This value can be used to select the number of bits to use to
-@@ -958,6 +963,7 @@ config ARCH_HAS_REFCOUNT
-
- config REFCOUNT_FULL
- bool "Perform full reference count validation at the expense of speed"
-+ default y
- help
- Enabling this switches the refcounting infrastructure from a fast
- unchecked atomic_t implementation to a fully state checked
-diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index c30cd78b6918..ba32a283f027 100644
---- a/arch/arm64/Kconfig
-+++ b/arch/arm64/Kconfig
-@@ -926,6 +926,7 @@ endif
-
- config ARM64_SW_TTBR0_PAN
- bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
-+ default y
- help
- Enabling this option prevents the kernel from accessing
- user-space memory directly by pointing TTBR0_EL1 to a reserved
-@@ -1052,6 +1053,7 @@ config RANDOMIZE_BASE
- bool "Randomize the address of the kernel image"
- select ARM64_MODULE_PLTS if MODULES
- select RELOCATABLE
-+ default y
- help
- Randomizes the virtual address at which the kernel image is
- loaded, as a security feature that deters exploit attempts
-diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
-index cc6bd559af85..01d5442d4722 100644
---- a/arch/arm64/Kconfig.debug
-+++ b/arch/arm64/Kconfig.debug
-@@ -45,6 +45,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
- config DEBUG_WX
- bool "Warn on W+X mappings at boot"
- select ARM64_PTDUMP_CORE
-+ default y
- ---help---
- Generate a warning if any W+X mappings are found at boot.
-
-diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
-index b05796578e7a..8f6e2099717d 100644
---- a/arch/arm64/configs/defconfig
-+++ b/arch/arm64/configs/defconfig
-@@ -1,4 +1,3 @@
--CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_AUDIT=y
- CONFIG_NO_HZ_IDLE=y
-diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
-index 33be513ef24c..6f0c0e3ef0dd 100644
---- a/arch/arm64/include/asm/elf.h
-+++ b/arch/arm64/include/asm/elf.h
-@@ -114,10 +114,10 @@
-
- /*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
-- * 64-bit, this is above 4GB to leave the entire 32-bit address
-+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
-+#define ELF_ET_DYN_BASE 0x100000000UL
-
- #ifndef __ASSEMBLY__
-
-@@ -158,10 +158,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- /* 1GB of VA */
- #ifdef CONFIG_COMPAT
- #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
-- 0x7ff >> (PAGE_SHIFT - 12) : \
-- 0x3ffff >> (PAGE_SHIFT - 12))
-+ ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
-+ ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
- #else
--#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
-+#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
- #endif
-
- #ifdef __AARCH64EB__
-diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
-index 9e773732520c..91359f45b5fc 100644
---- a/arch/arm64/kernel/process.c
-+++ b/arch/arm64/kernel/process.c
-@@ -419,9 +419,9 @@ unsigned long arch_align_stack(unsigned long sp)
- unsigned long arch_randomize_brk(struct mm_struct *mm)
- {
- if (is_compat_task())
-- return randomize_page(mm->brk, SZ_32M);
-+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
- else
-- return randomize_page(mm->brk, SZ_1G);
-+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
- }
-
- /*
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 4f393eb9745f..1a31f8fc82ed 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -1145,8 +1145,7 @@ config VM86
- default X86_LEGACY_VM86
-
- config X86_16BIT
-- bool "Enable support for 16-bit segments" if EXPERT
-- default y
-+ bool "Enable support for 16-bit segments"
- depends on MODIFY_LDT_SYSCALL
- ---help---
- This option is required by programs like Wine to run 16-bit
-@@ -2220,7 +2219,7 @@ config COMPAT_VDSO
- choice
- prompt "vsyscall table for legacy applications"
- depends on X86_64
-- default LEGACY_VSYSCALL_EMULATE
-+ default LEGACY_VSYSCALL_NONE
- help
- Legacy user code that does not know how to find the vDSO expects
- to be able to issue three syscalls by calling fixed addresses in
-@@ -2310,8 +2309,7 @@ config CMDLINE_OVERRIDE
- be set to 'N' under normal conditions.
-
- config MODIFY_LDT_SYSCALL
-- bool "Enable the LDT (local descriptor table)" if EXPERT
-- default y
-+ bool "Enable the LDT (local descriptor table)"
- ---help---
- Linux can allow user programs to install a per-process x86
- Local Descriptor Table (LDT) using the modify_ldt(2) system
-diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 6293a8768a91..add82e0f1df3 100644
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -101,6 +101,7 @@ config EFI_PGT_DUMP
- config DEBUG_WX
- bool "Warn on W+X mappings at boot"
- select X86_PTDUMP_CORE
-+ default y
- ---help---
- Generate a warning if any W+X mappings are found at boot.
-
-diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
-index e32fc1f274d8..d08acc76502a 100644
---- a/arch/x86/configs/x86_64_defconfig
-+++ b/arch/x86/configs/x86_64_defconfig
-@@ -1,5 +1,4 @@
- # CONFIG_LOCALVERSION_AUTO is not set
--CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_BSD_PROCESS_ACCT=y
- CONFIG_TASKSTATS=y
-diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
-index 1911310959f8..bba8dbbc07a8 100644
---- a/arch/x86/entry/vdso/vma.c
-+++ b/arch/x86/entry/vdso/vma.c
-@@ -203,55 +203,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
- }
-
- #ifdef CONFIG_X86_64
--/*
-- * Put the vdso above the (randomized) stack with another randomized
-- * offset. This way there is no hole in the middle of address space.
-- * To save memory make sure it is still in the same PTE as the stack
-- * top. This doesn't give that many random bits.
-- *
-- * Note that this algorithm is imperfect: the distribution of the vdso
-- * start address within a PMD is biased toward the end.
-- *
-- * Only used for the 64-bit and x32 vdsos.
-- */
--static unsigned long vdso_addr(unsigned long start, unsigned len)
--{
-- unsigned long addr, end;
-- unsigned offset;
--
-- /*
-- * Round up the start address. It can start out unaligned as a result
-- * of stack start randomization.
-- */
-- start = PAGE_ALIGN(start);
--
-- /* Round the lowest possible end address up to a PMD boundary. */
-- end = (start + len + PMD_SIZE - 1) & PMD_MASK;
-- if (end >= TASK_SIZE_MAX)
-- end = TASK_SIZE_MAX;
-- end -= len;
--
-- if (end > start) {
-- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
-- addr = start + (offset << PAGE_SHIFT);
-- } else {
-- addr = start;
-- }
--
-- /*
-- * Forcibly align the final address in case we have a hardware
-- * issue that requires alignment for performance reasons.
-- */
-- addr = align_vdso_addr(addr);
--
-- return addr;
--}
--
- static int map_vdso_randomized(const struct vdso_image *image)
- {
-- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
--
-- return map_vdso(image, addr);
-+ return map_vdso(image, 0);
- }
- #endif
-
-diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index 3a091cea36c5..0931c05a3348 100644
---- a/arch/x86/include/asm/elf.h
-+++ b/arch/x86/include/asm/elf.h
-@@ -249,11 +249,11 @@ extern int force_personality32;
-
- /*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
-- * 64-bit, this is above 4GB to leave the entire 32-bit address
-+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
- #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
-- (DEFAULT_MAP_WINDOW / 3 * 2))
-+ 0x100000000UL)
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
-@@ -312,8 +312,8 @@ extern unsigned long get_mmap_base(int is_legacy);
-
- #ifdef CONFIG_X86_32
-
--#define __STACK_RND_MASK(is32bit) (0x7ff)
--#define STACK_RND_MASK (0x7ff)
-+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
-+#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
-
- #define ARCH_DLINFO ARCH_DLINFO_IA32
-
-@@ -322,7 +322,11 @@ extern unsigned long get_mmap_base(int is_legacy);
- #else /* CONFIG_X86_32 */
-
- /* 1GB for 64bit, 8MB for 32bit */
--#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
-+#ifdef CONFIG_COMPAT
-+#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
-+#else
-+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
-+#endif
- #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
-
- #define ARCH_DLINFO \
-@@ -380,5 +384,4 @@ struct va_alignment {
- } ____cacheline_aligned;
-
- extern struct va_alignment va_align;
--extern unsigned long align_vdso_addr(unsigned long);
- #endif /* _ASM_X86_ELF_H */
-diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
-index e31040333f0c..14f3f214c9d1 100644
---- a/arch/x86/include/asm/tlbflush.h
-+++ b/arch/x86/include/asm/tlbflush.h
-@@ -302,6 +302,7 @@ static inline void cr4_set_bits(unsigned long mask)
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- if ((cr4 | mask) != cr4) {
- cr4 |= mask;
- this_cpu_write(cpu_tlbstate.cr4, cr4);
-@@ -315,6 +316,7 @@ static inline void cr4_clear_bits(unsigned long mask)
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- if ((cr4 & ~mask) != cr4) {
- cr4 &= ~mask;
- this_cpu_write(cpu_tlbstate.cr4, cr4);
-@@ -327,6 +329,7 @@ static inline void cr4_toggle_bits(unsigned long mask)
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- cr4 ^= mask;
- this_cpu_write(cpu_tlbstate.cr4, cr4);
- __write_cr4(cr4);
-@@ -435,6 +438,7 @@ static inline void __native_flush_tlb_global(void)
- raw_local_irq_save(flags);
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- /* toggle PGE */
- native_write_cr4(cr4 ^ X86_CR4_PGE);
- /* write old PGE again and flush TLBs */
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 51e49f6fe8e1..7ee813033624 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -1669,7 +1669,6 @@ void cpu_init(void)
- wrmsrl(MSR_KERNEL_GS_BASE, 0);
- barrier();
-
-- x86_configure_nx();
- x2apic_setup();
-
- /*
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index a98d1cdd6299..7426eb5d1c03 100644
---- a/arch/x86/kernel/process.c
-+++ b/arch/x86/kernel/process.c
-@@ -40,6 +40,8 @@
- #include <asm/desc.h>
- #include <asm/prctl.h>
- #include <asm/spec-ctrl.h>
-+#include <asm/elf.h>
-+#include <linux/sizes.h>
-
- #include "process.h"
-
-@@ -782,7 +784,10 @@ unsigned long arch_align_stack(unsigned long sp)
-
- unsigned long arch_randomize_brk(struct mm_struct *mm)
- {
-- return randomize_page(mm->brk, 0x02000000);
-+ if (mmap_is_ia32())
-+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
-+ else
-+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
- }
-
- /*
-diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index a63fe77b3217..e1085e76043e 100644
---- a/arch/x86/kernel/sys_x86_64.c
-+++ b/arch/x86/kernel/sys_x86_64.c
-@@ -54,13 +54,6 @@ static unsigned long get_align_bits(void)
- return va_align.bits & get_align_mask();
- }
-
--unsigned long align_vdso_addr(unsigned long addr)
--{
-- unsigned long align_mask = get_align_mask();
-- addr = (addr + align_mask) & ~align_mask;
-- return addr | get_align_bits();
--}
--
- static int __init control_va_addr_alignment(char *str)
- {
- /* guard against enabling this on other CPU families */
-@@ -122,10 +115,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
- }
-
- *begin = get_mmap_base(1);
-- if (in_compat_syscall())
-- *end = task_size_32bit();
-- else
-- *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
-+ *end = get_mmap_base(0);
- }
-
- unsigned long
-@@ -206,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
-- info.low_limit = PAGE_SIZE;
-+ info.low_limit = get_mmap_base(1);
- info.high_limit = get_mmap_base(0);
-
- /*
-diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 3141e67ec24c..e93173193f60 100644
---- a/arch/x86/mm/init_32.c
-+++ b/arch/x86/mm/init_32.c
-@@ -558,7 +558,7 @@ static void __init pagetable_init(void)
- permanent_kmaps_init(pgd_base);
- }
-
--pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
-+pteval_t __supported_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
- EXPORT_SYMBOL_GPL(__supported_pte_mask);
-
- /* user-defined highmem size */
-@@ -865,7 +865,7 @@ int arch_remove_memory(u64 start, u64 size)
- #endif
- #endif
-
--int kernel_set_to_readonly __read_mostly;
-+int kernel_set_to_readonly __ro_after_init;
-
- void set_kernel_text_rw(void)
- {
-@@ -917,12 +917,11 @@ void mark_rodata_ro(void)
- unsigned long start = PFN_ALIGN(_text);
- unsigned long size = PFN_ALIGN(_etext) - start;
-
-+ kernel_set_to_readonly = 1;
- set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
- printk(KERN_INFO "Write protecting the kernel text: %luk\n",
- size >> 10);
-
-- kernel_set_to_readonly = 1;
--
- #ifdef CONFIG_CPA_DEBUG
- printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
- start, start+size);
-diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 624edfbff02d..54bb0705dd53 100644
---- a/arch/x86/mm/init_64.c
-+++ b/arch/x86/mm/init_64.c
-@@ -65,7 +65,7 @@
- * around without checking the pgd every time.
- */
-
--pteval_t __supported_pte_mask __read_mostly = ~0;
-+pteval_t __supported_pte_mask __ro_after_init = ~0;
- EXPORT_SYMBOL_GPL(__supported_pte_mask);
-
- int force_personality32;
-@@ -1179,7 +1179,7 @@ void __init mem_init(void)
- mem_init_print_info(NULL);
- }
-
--int kernel_set_to_readonly;
-+int kernel_set_to_readonly __ro_after_init;
-
- void set_kernel_text_rw(void)
- {
-@@ -1228,9 +1228,8 @@ void mark_rodata_ro(void)
-
- printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
- (end - start) >> 10);
-- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
--
- kernel_set_to_readonly = 1;
-+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
-
- /*
- * The rodata/data/bss/brk section (but not the kernel text!)
-diff --git a/block/blk-softirq.c b/block/blk-softirq.c
-index 01e2b353a2b9..9aeddca4a29f 100644
---- a/block/blk-softirq.c
-+++ b/block/blk-softirq.c
-@@ -20,7 +20,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
--static __latent_entropy void blk_done_softirq(struct softirq_action *h)
-+static __latent_entropy void blk_done_softirq(void)
- {
- struct list_head *cpu_list, local_list;
-
-diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 04f406d7e973..60d8c59fa824 100644
---- a/drivers/ata/libata-core.c
-+++ b/drivers/ata/libata-core.c
-@@ -5148,7 +5148,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
- struct ata_port *ap;
- unsigned int tag;
-
-- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
-+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- ap = qc->ap;
-
- qc->flags = 0;
-@@ -5165,7 +5165,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
- struct ata_port *ap;
- struct ata_link *link;
-
-- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
-+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
- ap = qc->ap;
- link = qc->dev->link;
-diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
-index c28dca0c613d..d4813f0d25ca 100644
---- a/drivers/char/Kconfig
-+++ b/drivers/char/Kconfig
-@@ -9,7 +9,6 @@ source "drivers/tty/Kconfig"
-
- config DEVMEM
- bool "/dev/mem virtual device support"
-- default y
- help
- Say Y here if you want to support the /dev/mem device.
- The /dev/mem device is used to access areas of physical
-@@ -568,7 +567,6 @@ config TELCLOCK
- config DEVPORT
- bool "/dev/port character device"
- depends on ISA || PCI
-- default y
- help
- Say Y here if you want to support the /dev/port device. The /dev/port
- device is similar to /dev/mem, but for I/O ports.
-diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
-index e105532bfba8..e07d52bb9b62 100644
---- a/drivers/media/dvb-frontends/cx24116.c
-+++ b/drivers/media/dvb-frontends/cx24116.c
-@@ -1462,7 +1462,7 @@ static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
- return cx24116_read_status(fe, status);
- }
-
--static int cx24116_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
-index d37cb7762bd6..97e0feff0ede 100644
---- a/drivers/media/dvb-frontends/cx24117.c
-+++ b/drivers/media/dvb-frontends/cx24117.c
-@@ -1555,7 +1555,7 @@ static int cx24117_tune(struct dvb_frontend *fe, bool re_tune,
- return cx24117_read_status(fe, status);
- }
-
--static int cx24117_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
-index 7f11dcc94d85..01da670760ba 100644
---- a/drivers/media/dvb-frontends/cx24120.c
-+++ b/drivers/media/dvb-frontends/cx24120.c
-@@ -1491,7 +1491,7 @@ static int cx24120_tune(struct dvb_frontend *fe, bool re_tune,
- return cx24120_read_status(fe, status);
- }
-
--static int cx24120_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
-index 1d59d1d3bd82..41cd0e9ea199 100644
---- a/drivers/media/dvb-frontends/cx24123.c
-+++ b/drivers/media/dvb-frontends/cx24123.c
-@@ -1005,7 +1005,7 @@ static int cx24123_tune(struct dvb_frontend *fe,
- return retval;
- }
-
--static int cx24123_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
-index f6ebbb47b9b2..3e0d8cbd76da 100644
---- a/drivers/media/dvb-frontends/cxd2820r_core.c
-+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
-@@ -403,7 +403,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
- return DVBFE_ALGO_SEARCH_ERROR;
- }
-
--static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_CUSTOM;
- }
-diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
-index e8ac8c3e2ec0..e0f4ba8302d1 100644
---- a/drivers/media/dvb-frontends/mb86a20s.c
-+++ b/drivers/media/dvb-frontends/mb86a20s.c
-@@ -2055,7 +2055,7 @@ static void mb86a20s_release(struct dvb_frontend *fe)
- kfree(state);
- }
-
--static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
-index 274544a3ae0e..9ef9b9bc1bd2 100644
---- a/drivers/media/dvb-frontends/s921.c
-+++ b/drivers/media/dvb-frontends/s921.c
-@@ -464,7 +464,7 @@ static int s921_tune(struct dvb_frontend *fe,
- return rc;
- }
-
--static int s921_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
-index 7166d2279465..fa682f9fdc4b 100644
---- a/drivers/media/pci/bt8xx/dst.c
-+++ b/drivers/media/pci/bt8xx/dst.c
-@@ -1657,7 +1657,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
- return 0;
- }
-
--static int dst_get_tuning_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe)
- {
- return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
- }
-diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
-index f75f69556be7..d913a6050e8c 100644
---- a/drivers/media/pci/pt1/va1j5jf8007s.c
-+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
-@@ -98,7 +98,7 @@ static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr)
- return 0;
- }
-
--static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
-index 63fda79a75c0..4115c3ccd4a8 100644
---- a/drivers/media/pci/pt1/va1j5jf8007t.c
-+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
-@@ -88,7 +88,7 @@ static int va1j5jf8007t_read_snr(struct dvb_frontend *fe, u16 *snr)
- return 0;
- }
-
--static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
-index 981b3ef71e47..9883da1da383 100644
---- a/drivers/misc/lkdtm_core.c
-+++ b/drivers/misc/lkdtm_core.c
-@@ -78,7 +78,7 @@ static irqreturn_t jp_handle_irq_event(unsigned int irq,
- return 0;
- }
-
--static void jp_tasklet_action(struct softirq_action *a)
-+static void jp_tasklet_action(void)
- {
- lkdtm_handler();
- jprobe_return();
-diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
-index b811442c5ce6..4f62a63cbcb1 100644
---- a/drivers/tty/Kconfig
-+++ b/drivers/tty/Kconfig
-@@ -122,7 +122,6 @@ config UNIX98_PTYS
-
- config LEGACY_PTYS
- bool "Legacy (BSD) PTY support"
-- default y
- ---help---
- A pseudo terminal (PTY) is a software device consisting of two
- halves: a master and a slave. The slave device behaves identical to
-diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index 417b81c67fe9..4e9bb7851ab1 100644
---- a/drivers/tty/tty_io.c
-+++ b/drivers/tty/tty_io.c
-@@ -171,6 +171,7 @@ static void free_tty_struct(struct tty_struct *tty)
- put_device(tty->dev);
- kfree(tty->write_buf);
- tty->magic = 0xDEADDEAD;
-+ put_user_ns(tty->owner_user_ns);
- kfree(tty);
- }
-
-@@ -2167,11 +2168,19 @@ static int tty_fasync(int fd, struct file *filp, int on)
- * FIXME: may race normal receive processing
- */
-
-+int tiocsti_restrict = IS_ENABLED(CONFIG_SECURITY_TIOCSTI_RESTRICT);
-+
- static int tiocsti(struct tty_struct *tty, char __user *p)
- {
- char ch, mbz = 0;
- struct tty_ldisc *ld;
-
-+ if (tiocsti_restrict &&
-+ !ns_capable(tty->owner_user_ns, CAP_SYS_ADMIN)) {
-+ dev_warn_ratelimited(tty->dev,
-+ "Denied TIOCSTI ioctl for non-privileged process\n");
-+ return -EPERM;
-+ }
- if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (get_user(ch, p))
-@@ -2854,6 +2863,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
- tty->index = idx;
- tty_line_name(driver, idx, tty->name);
- tty->dev = tty_get_device(tty);
-+ tty->owner_user_ns = get_user_ns(current_user_ns());
-
- return tty;
- }
-diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index a073cb5be013..e9dfece7b7ce 100644
---- a/drivers/usb/core/hub.c
-+++ b/drivers/usb/core/hub.c
-@@ -38,6 +38,8 @@
- #define USB_VENDOR_GENESYS_LOGIC 0x05e3
- #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
-
-+extern int deny_new_usb;
-+
- /* Protect struct usb_device->state and ->children members
- * Note: Both are also protected by ->dev.sem, except that ->state can
- * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
-@@ -4818,6 +4820,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
- goto done;
- return;
- }
-+
-+ if (deny_new_usb) {
-+ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
-+ goto done;
-+ }
-+
- if (hub_is_superspeed(hub->hdev))
- unit_load = 150;
- else
-diff --git a/fs/exec.c b/fs/exec.c
-index 0da4d748b4e6..69fcee853363 100644
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -62,6 +62,7 @@
- #include <linux/oom.h>
- #include <linux/compat.h>
- #include <linux/vmalloc.h>
-+#include <linux/random.h>
-
- #include <linux/uaccess.h>
- #include <asm/mmu_context.h>
-@@ -321,6 +322,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
- arch_bprm_mm_init(mm, vma);
- up_write(&mm->mmap_sem);
- bprm->p = vma->vm_end - sizeof(void *);
-+ if (randomize_va_space)
-+ bprm->p ^= get_random_int() & ~PAGE_MASK;
- return 0;
- err:
- up_write(&mm->mmap_sem);
-diff --git a/fs/namei.c b/fs/namei.c
-index d1e467b7b9de..0d96ad71b700 100644
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -902,10 +902,10 @@ static inline void put_link(struct nameidata *nd)
- path_put(&last->link);
- }
-
--int sysctl_protected_symlinks __read_mostly = 0;
--int sysctl_protected_hardlinks __read_mostly = 0;
--int sysctl_protected_fifos __read_mostly;
--int sysctl_protected_regular __read_mostly;
-+int sysctl_protected_symlinks __read_mostly = 1;
-+int sysctl_protected_hardlinks __read_mostly = 1;
-+int sysctl_protected_fifos __read_mostly = 2;
-+int sysctl_protected_regular __read_mostly = 2;
-
- /**
- * may_follow_link - Check symlink following for unsafe situations
-diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
-index 5f93cfacb3d1..cea0d7d3b23e 100644
---- a/fs/nfs/Kconfig
-+++ b/fs/nfs/Kconfig
-@@ -195,4 +195,3 @@ config NFS_DEBUG
- bool
- depends on NFS_FS && SUNRPC_DEBUG
- select CRC32
-- default y
-diff --git a/fs/pipe.c b/fs/pipe.c
-index 8ef7d7bef775..b82f305ec13d 100644
---- a/fs/pipe.c
-+++ b/fs/pipe.c
-@@ -38,7 +38,7 @@ unsigned int pipe_max_size = 1048576;
- /*
- * Minimum pipe size, as required by POSIX
- */
--unsigned int pipe_min_size = PAGE_SIZE;
-+unsigned int pipe_min_size __read_only = PAGE_SIZE;
-
- /* Maximum allocatable pages per user. Hard limit is unset by default, soft
- * matches default values.
-diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
-index 1ade1206bb89..60b0f76dec47 100644
---- a/fs/proc/Kconfig
-+++ b/fs/proc/Kconfig
-@@ -39,7 +39,6 @@ config PROC_KCORE
- config PROC_VMCORE
- bool "/proc/vmcore support"
- depends on PROC_FS && CRASH_DUMP
-- default y
- help
- Exports the dump image of crashed kernel in ELF format.
-
-diff --git a/fs/stat.c b/fs/stat.c
-index 873785dae022..d3c2ada8b9c7 100644
---- a/fs/stat.c
-+++ b/fs/stat.c
-@@ -40,8 +40,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
- stat->gid = inode->i_gid;
- stat->rdev = inode->i_rdev;
- stat->size = i_size_read(inode);
-- stat->atime = inode->i_atime;
-- stat->mtime = inode->i_mtime;
-+ if (is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
-+ stat->atime = inode->i_ctime;
-+ stat->mtime = inode->i_ctime;
-+ } else {
-+ stat->atime = inode->i_atime;
-+ stat->mtime = inode->i_mtime;
-+ }
- stat->ctime = inode->i_ctime;
- stat->blksize = i_blocksize(inode);
- stat->blocks = inode->i_blocks;
-@@ -75,9 +80,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
- stat->result_mask |= STATX_BASIC_STATS;
- request_mask &= STATX_ALL;
- query_flags &= KSTAT_QUERY_FLAGS;
-- if (inode->i_op->getattr)
-- return inode->i_op->getattr(path, stat, request_mask,
-- query_flags);
-+ if (inode->i_op->getattr) {
-+ int retval = inode->i_op->getattr(path, stat, request_mask, query_flags);
-+ if (!retval && is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
-+ stat->atime = stat->ctime;
-+ stat->mtime = stat->ctime;
-+ }
-+ return retval;
-+ }
-
- generic_fillattr(inode, stat);
- return 0;
-diff --git a/include/linux/cache.h b/include/linux/cache.h
-index 750621e41d1c..e7157c18c62c 100644
---- a/include/linux/cache.h
-+++ b/include/linux/cache.h
-@@ -31,6 +31,8 @@
- #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
- #endif
-
-+#define __read_only __ro_after_init
-+
- #ifndef ____cacheline_aligned
- #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
- #endif
-diff --git a/include/linux/capability.h b/include/linux/capability.h
-index f640dcbc880c..2b4f5d651f19 100644
---- a/include/linux/capability.h
-+++ b/include/linux/capability.h
-@@ -207,6 +207,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
- extern bool has_ns_capability_noaudit(struct task_struct *t,
- struct user_namespace *ns, int cap);
- extern bool capable(int cap);
-+extern bool capable_noaudit(int cap);
- extern bool ns_capable(struct user_namespace *ns, int cap);
- extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
- #else
-@@ -232,6 +233,10 @@ static inline bool capable(int cap)
- {
- return true;
- }
-+static inline bool capable_noaudit(int cap)
-+{
-+ return true;
-+}
- static inline bool ns_capable(struct user_namespace *ns, int cap)
- {
- return true;
-diff --git a/include/linux/fs.h b/include/linux/fs.h
-index f6a577edec67..fa3a6caeca6c 100644
---- a/include/linux/fs.h
-+++ b/include/linux/fs.h
-@@ -3383,4 +3383,15 @@ static inline bool dir_relax_shared(struct inode *inode)
- extern bool path_noexec(const struct path *path);
- extern void inode_nohighmem(struct inode *inode);
-
-+extern int device_sidechannel_restrict;
-+
-+static inline bool is_sidechannel_device(const struct inode *inode)
-+{
-+ umode_t mode;
-+ if (!device_sidechannel_restrict)
-+ return false;
-+ mode = inode->i_mode;
-+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
-+}
-+
- #endif /* _LINUX_FS_H */
-diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
-index bdaf22582f6e..326ff15d4637 100644
---- a/include/linux/fsnotify.h
-+++ b/include/linux/fsnotify.h
-@@ -181,6 +181,9 @@ static inline void fsnotify_access(struct file *file)
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_ACCESS;
-
-+ if (is_sidechannel_device(inode))
-+ return;
-+
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
-@@ -199,6 +202,9 @@ static inline void fsnotify_modify(struct file *file)
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_MODIFY;
-
-+ if (is_sidechannel_device(inode))
-+ return;
-+
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
-diff --git a/include/linux/gfp.h b/include/linux/gfp.h
-index b041f94678de..fd8bb5a78b75 100644
---- a/include/linux/gfp.h
-+++ b/include/linux/gfp.h
-@@ -518,9 +518,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
- extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-
--void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
-+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
- void free_pages_exact(void *virt, size_t size);
--void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
-+void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((alloc_size(2)));
-
- #define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask), 0)
-diff --git a/include/linux/highmem.h b/include/linux/highmem.h
-index 776f90f3a1cd..3f5c47000059 100644
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -191,6 +191,13 @@ static inline void clear_highpage(struct page *page)
- kunmap_atomic(kaddr);
- }
-
-+static inline void verify_zero_highpage(struct page *page)
-+{
-+ void *kaddr = kmap_atomic(page);
-+ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
-+ kunmap_atomic(kaddr);
-+}
-+
- static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
-diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
-index 69c238210325..ee487ea4f48f 100644
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -485,7 +485,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
-
- struct softirq_action
- {
-- void (*action)(struct softirq_action *);
-+ void (*action)(void);
- };
-
- asmlinkage void do_softirq(void);
-@@ -500,7 +500,7 @@ static inline void do_softirq_own_stack(void)
- }
- #endif
-
--extern void open_softirq(int nr, void (*action)(struct softirq_action *));
-+extern void __init open_softirq(int nr, void (*action)(void));
- extern void softirq_init(void);
- extern void __raise_softirq_irqoff(unsigned int nr);
-
-diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
-index df32d2508290..c992d130b94d 100644
---- a/include/linux/kobject_ns.h
-+++ b/include/linux/kobject_ns.h
-@@ -46,7 +46,7 @@ struct kobj_ns_type_operations {
- void (*drop_ns)(void *);
- };
-
--int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
-+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
- int kobj_ns_type_registered(enum kobj_ns_type type);
- const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
- const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 58f2263de4de..e90dc5d98c7f 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -525,7 +525,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
- }
- #endif
-
--extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((alloc_size(1)));
- static inline void *kvmalloc(size_t size, gfp_t flags)
- {
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-diff --git a/include/linux/percpu.h b/include/linux/percpu.h
-index 296bbe49d5d1..b26652c9a98d 100644
---- a/include/linux/percpu.h
-+++ b/include/linux/percpu.h
-@@ -129,7 +129,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
- #endif
-
--extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
-+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
- extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
- extern bool is_kernel_percpu_address(unsigned long addr);
-
-@@ -137,8 +137,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
- extern void __init setup_per_cpu_areas(void);
- #endif
-
--extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
--extern void __percpu *__alloc_percpu(size_t size, size_t align);
-+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((alloc_size(1)));
-+extern void __percpu *__alloc_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
- extern void free_percpu(void __percpu *__pdata);
- extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
-
-diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 8e22f24ded6a..b7fecdfa6de5 100644
---- a/include/linux/perf_event.h
-+++ b/include/linux/perf_event.h
-@@ -1165,6 +1165,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
- int perf_event_max_stack_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
-+static inline bool perf_paranoid_any(void)
-+{
-+ return sysctl_perf_event_paranoid > 2;
-+}
-+
- static inline bool perf_paranoid_tracepoint_raw(void)
- {
- return sysctl_perf_event_paranoid > -1;
-diff --git a/include/linux/slab.h b/include/linux/slab.h
-index ae5ed6492d54..fd0786124504 100644
---- a/include/linux/slab.h
-+++ b/include/linux/slab.h
-@@ -146,8 +146,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
- /*
- * Common kmalloc functions provided by all allocators
- */
--void * __must_check __krealloc(const void *, size_t, gfp_t);
--void * __must_check krealloc(const void *, size_t, gfp_t);
-+void * __must_check __krealloc(const void *, size_t, gfp_t) __attribute__((alloc_size(2)));
-+void * __must_check krealloc(const void *, size_t, gfp_t) __attribute((alloc_size(2)));
- void kfree(const void *);
- void kzfree(const void *);
- size_t ksize(const void *);
-@@ -324,7 +324,7 @@ static __always_inline int kmalloc_index(size_t size)
- }
- #endif /* !CONFIG_SLOB */
-
--void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
- void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
- void kmem_cache_free(struct kmem_cache *, void *);
-
-@@ -348,7 +348,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
- }
-
- #ifdef CONFIG_NUMA
--void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
- void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
- #else
- static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-@@ -473,7 +473,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
- * for general use, and so are not documented here. For a full list of
- * potential flags, always refer to linux/gfp.h.
- */
--static __always_inline void *kmalloc(size_t size, gfp_t flags)
-+static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags)
- {
- if (__builtin_constant_p(size)) {
- if (size > KMALLOC_MAX_CACHE_SIZE)
-@@ -513,7 +513,7 @@ static __always_inline int kmalloc_size(int n)
- return 0;
- }
-
--static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-+static __always_inline __attribute__((alloc_size(1))) void *kmalloc_node(size_t size, gfp_t flags, int node)
- {
- #ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
-diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
-index f8ced87a2efe..cd61c8d2aa6e 100644
---- a/include/linux/slub_def.h
-+++ b/include/linux/slub_def.h
-@@ -121,6 +121,11 @@ struct kmem_cache {
- unsigned long random;
- #endif
-
-+#ifdef CONFIG_SLAB_CANARY
-+ unsigned long random_active;
-+ unsigned long random_inactive;
-+#endif
-+
- #ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
-diff --git a/include/linux/string.h b/include/linux/string.h
-index 96115bf561b4..f93d908c5bbc 100644
---- a/include/linux/string.h
-+++ b/include/linux/string.h
-@@ -234,10 +234,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob
- void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
- void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
-
-+#ifdef CONFIG_FORTIFY_SOURCE_STRICT_STRING
-+#define __string_size(p) __builtin_object_size(p, 1)
-+#else
-+#define __string_size(p) __builtin_object_size(p, 0)
-+#endif
-+
- #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
- __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
-@@ -247,7 +253,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
-
- __FORTIFY_INLINE char *strcat(char *p, const char *q)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
- if (p_size == (size_t)-1)
- return __builtin_strcat(p, q);
- if (strlcat(p, q, p_size) >= p_size)
-@@ -258,7 +264,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q)
- __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
- {
- __kernel_size_t ret;
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
-
- /* Work around gcc excess stack consumption issue */
- if (p_size == (size_t)-1 ||
-@@ -273,7 +279,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
- extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
- __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
- __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
- if (p_size <= ret && maxlen != ret)
- fortify_panic(__func__);
-@@ -285,8 +291,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
- __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
- {
- size_t ret;
-- size_t p_size = __builtin_object_size(p, 0);
-- size_t q_size = __builtin_object_size(q, 0);
-+ size_t p_size = __string_size(p);
-+ size_t q_size = __string_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __real_strlcpy(p, q, size);
- ret = strlen(q);
-@@ -306,8 +312,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
- __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
- {
- size_t p_len, copy_len;
-- size_t p_size = __builtin_object_size(p, 0);
-- size_t q_size = __builtin_object_size(q, 0);
-+ size_t p_size = __string_size(p);
-+ size_t q_size = __string_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strncat(p, q, count);
- p_len = strlen(p);
-@@ -420,8 +426,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
- /* defined after fortified strlen and memcpy to reuse them */
- __FORTIFY_INLINE char *strcpy(char *p, const char *q)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-- size_t q_size = __builtin_object_size(q, 0);
-+ size_t p_size = __string_size(p);
-+ size_t q_size = __string_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strcpy(p, q);
- memcpy(p, q, strlen(q) + 1);
-diff --git a/include/linux/tty.h b/include/linux/tty.h
-index 1dd587ba6d88..9a9a04fb641d 100644
---- a/include/linux/tty.h
-+++ b/include/linux/tty.h
-@@ -13,6 +13,7 @@
- #include <uapi/linux/tty.h>
- #include <linux/rwsem.h>
- #include <linux/llist.h>
-+#include <linux/user_namespace.h>
-
-
- /*
-@@ -335,6 +336,7 @@ struct tty_struct {
- /* If the tty has a pending do_SAK, queue it here - akpm */
- struct work_struct SAK_work;
- struct tty_port *port;
-+ struct user_namespace *owner_user_ns;
- } __randomize_layout;
-
- /* Each of a tty's open files has private_data pointing to tty_file_private */
-@@ -344,6 +346,8 @@ struct tty_file_private {
- struct list_head list;
- };
-
-+extern int tiocsti_restrict;
-+
- /* tty magic number */
- #define TTY_MAGIC 0x5401
-
-diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
-index 1e5d8c392f15..66d0e49c9987 100644
---- a/include/linux/vmalloc.h
-+++ b/include/linux/vmalloc.h
-@@ -68,19 +68,19 @@ static inline void vmalloc_init(void)
- }
- #endif
-
--extern void *vmalloc(unsigned long size);
--extern void *vzalloc(unsigned long size);
--extern void *vmalloc_user(unsigned long size);
--extern void *vmalloc_node(unsigned long size, int node);
--extern void *vzalloc_node(unsigned long size, int node);
--extern void *vmalloc_exec(unsigned long size);
--extern void *vmalloc_32(unsigned long size);
--extern void *vmalloc_32_user(unsigned long size);
--extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
-+extern void *vmalloc(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vzalloc(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_user(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
-+extern void *vzalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
-+extern void *vmalloc_exec(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_32(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_32_user(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __attribute__((alloc_size(1)));
- extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
-- const void *caller);
-+ const void *caller) __attribute__((alloc_size(1)));
- #ifndef CONFIG_MMU
- extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
- static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
-diff --git a/init/Kconfig b/init/Kconfig
-index 46075327c165..0c78750bc76d 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -309,6 +309,7 @@ config USELIB
- config AUDIT
- bool "Auditing support"
- depends on NET
-+ default y
- help
- Enable auditing infrastructure that can be used with another
- kernel subsystem, such as SELinux (which requires this for
-@@ -1052,6 +1053,12 @@ config CC_OPTIMIZE_FOR_SIZE
-
- endchoice
-
-+config LOCAL_INIT
-+ bool "Zero uninitialized locals"
-+ help
-+ Zero-fill uninitialized local variables, other than variable-length
-+ arrays. Requires compiler support.
-+
- config SYSCTL
- bool
-
-@@ -1361,8 +1368,7 @@ config SHMEM
- which may be appropriate on small systems without swap.
-
- config AIO
-- bool "Enable AIO support" if EXPERT
-- default y
-+ bool "Enable AIO support"
- help
- This option enables POSIX asynchronous I/O which may by used
- by some high performance threaded applications. Disabling
-@@ -1491,7 +1497,7 @@ config VM_EVENT_COUNTERS
-
- config SLUB_DEBUG
- default y
-- bool "Enable SLUB debugging support" if EXPERT
-+ bool "Enable SLUB debugging support"
- depends on SLUB && SYSFS
- help
- SLUB has extensive debug support features. Disabling these can
-@@ -1515,7 +1521,6 @@ config SLUB_MEMCG_SYSFS_ON
-
- config COMPAT_BRK
- bool "Disable heap randomization"
-- default y
- help
- Randomizing heap placement makes heap exploits harder, but it
- also breaks ancient binaries (including anything libc5 based).
-@@ -1562,7 +1567,6 @@ endchoice
-
- config SLAB_MERGE_DEFAULT
- bool "Allow slab caches to be merged"
-- default y
- help
- For reduced kernel memory fragmentation, slab caches can be
- merged when they share the same size and other characteristics.
-@@ -1575,9 +1579,9 @@ config SLAB_MERGE_DEFAULT
- command line.
-
- config SLAB_FREELIST_RANDOM
-- default n
- depends on SLAB || SLUB
- bool "SLAB freelist randomization"
-+ default y
- help
- Randomizes the freelist order used on creating new pages. This
- security feature reduces the predictability of the kernel slab
-@@ -1586,12 +1590,56 @@ config SLAB_FREELIST_RANDOM
- config SLAB_FREELIST_HARDENED
- bool "Harden slab freelist metadata"
- depends on SLUB
-+ default y
- help
- Many kernel heap attacks try to target slab cache metadata and
- other infrastructure. This options makes minor performance
- sacrifies to harden the kernel slab allocator against common
- freelist exploit methods.
-
-+config SLAB_HARDENED
-+ default y
-+ depends on SLUB
-+ bool "Hardened SLAB infrastructure"
-+ help
-+ Make minor performance sacrifices to harden the kernel slab
-+ allocator.
-+
-+config SLAB_CANARY
-+ depends on SLUB
-+ depends on !SLAB_MERGE_DEFAULT
-+ bool "SLAB canaries"
-+ default y
-+ help
-+ Place canaries at the end of kernel slab allocations, sacrificing
-+ some performance and memory usage for security.
-+
-+ Canaries can detect some forms of heap corruption when allocations
-+ are freed and as part of the HARDENED_USERCOPY feature. It provides
-+ basic use-after-free detection for HARDENED_USERCOPY.
-+
-+ Canaries absorb small overflows (rendering them harmless), mitigate
-+ non-NUL terminated C string overflows on 64-bit via a guaranteed zero
-+ byte and provide basic double-free detection.
-+
-+config SLAB_SANITIZE
-+ bool "Sanitize SLAB allocations"
-+ depends on SLUB
-+ default y
-+ help
-+ Zero fill slab allocations on free, reducing the lifetime of
-+ sensitive data and helping to mitigate use-after-free bugs.
-+
-+ For slabs with debug poisoning enabling, this has no impact.
-+
-+config SLAB_SANITIZE_VERIFY
-+ depends on SLAB_SANITIZE && PAGE_SANITIZE
-+ default y
-+ bool "Verify sanitized SLAB allocations"
-+ help
-+ Verify that newly allocated slab allocations are zeroed to detect
-+ write-after-free bugs.
-+
- config SLUB_CPU_PARTIAL
- default y
- depends on SLUB && SMP
-diff --git a/kernel/audit.c b/kernel/audit.c
-index d301276bca58..d55a1e290cea 100644
---- a/kernel/audit.c
-+++ b/kernel/audit.c
-@@ -1575,6 +1575,9 @@ static int __init audit_enable(char *str)
- audit_default = !!simple_strtol(str, NULL, 0);
- if (!audit_default)
- audit_initialized = AUDIT_DISABLED;
-+ else
-+ audit_initialized = AUDIT_UNINITIALIZED;
-+
- audit_enabled = audit_default;
- audit_ever_enabled = !!audit_enabled;
-
-diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
-index d203a5d6b726..2a6c3e2c57a6 100644
---- a/kernel/bpf/core.c
-+++ b/kernel/bpf/core.c
-@@ -539,7 +539,7 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
- bpf_prog_unlock_free(fp);
- }
-
--int bpf_jit_harden __read_mostly;
-+int bpf_jit_harden __read_mostly = 2;
-
- static int bpf_jit_blind_insn(const struct bpf_insn *from,
- const struct bpf_insn *aux,
-diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index 5c9deed4524e..6d90aabecfc7 100644
---- a/kernel/bpf/syscall.c
-+++ b/kernel/bpf/syscall.c
-@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(prog_idr_lock);
- static DEFINE_IDR(map_idr);
- static DEFINE_SPINLOCK(map_idr_lock);
-
--int sysctl_unprivileged_bpf_disabled __read_mostly;
-+int sysctl_unprivileged_bpf_disabled __read_mostly = 1;
-
- static const struct bpf_map_ops * const bpf_map_types[] = {
- #define BPF_PROG_TYPE(_id, _ops)
-diff --git a/kernel/capability.c b/kernel/capability.c
-index 1e1c0236f55b..452062fe45ce 100644
---- a/kernel/capability.c
-+++ b/kernel/capability.c
-@@ -431,6 +431,12 @@ bool capable(int cap)
- return ns_capable(&init_user_ns, cap);
- }
- EXPORT_SYMBOL(capable);
-+
-+bool capable_noaudit(int cap)
-+{
-+ return ns_capable_noaudit(&init_user_ns, cap);
-+}
-+EXPORT_SYMBOL(capable_noaudit);
- #endif /* CONFIG_MULTIUSER */
-
- /**
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 991af683ef9e..66f66b648707 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask;
- * 0 - disallow raw tracepoint access for unpriv
- * 1 - disallow cpu events for unpriv
- * 2 - disallow kernel profiling for unpriv
-+ * 3 - disallow all unpriv perf event use
- */
-+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
-+int sysctl_perf_event_paranoid __read_mostly = 3;
-+#else
- int sysctl_perf_event_paranoid __read_mostly = 2;
-+#endif
-
- /* Minimum for 512 kiB + 1 user control page */
- int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
-@@ -9984,6 +9989,9 @@ SYSCALL_DEFINE5(perf_event_open,
- if (flags & ~PERF_FLAG_ALL)
- return -EINVAL;
-
-+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
-+ return -EACCES;
-+
- err = perf_copy_attr(attr_uptr, &attr);
- if (err)
- return err;
-diff --git a/kernel/fork.c b/kernel/fork.c
-index 6d6ce2c3a364..951a76b3dc32 100644
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -102,6 +102,11 @@
-
- #define CREATE_TRACE_POINTS
- #include <trace/events/task.h>
-+#ifdef CONFIG_USER_NS
-+extern int unprivileged_userns_clone;
-+#else
-+#define unprivileged_userns_clone 0
-+#endif
-
- /*
- * Minimum number of threads to boot the kernel
-@@ -1555,6 +1560,10 @@ static __latent_entropy struct task_struct *copy_process(
- if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
- return ERR_PTR(-EINVAL);
-
-+ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
-+ if (!capable(CAP_SYS_ADMIN))
-+ return ERR_PTR(-EPERM);
-+
- /*
- * Thread groups must share signals as well, and detached threads
- * can only be started up within the thread group.
-@@ -2357,6 +2366,12 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
- if (unshare_flags & CLONE_NEWNS)
- unshare_flags |= CLONE_FS;
-
-+ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
-+ err = -EPERM;
-+ if (!capable(CAP_SYS_ADMIN))
-+ goto bad_unshare_out;
-+ }
-+
- err = check_unshare_flags(unshare_flags);
- if (err)
- goto bad_unshare_out;
-diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
-index 0972a8e09d08..00dde7aad47a 100644
---- a/kernel/power/snapshot.c
-+++ b/kernel/power/snapshot.c
-@@ -1136,7 +1136,7 @@ void free_basic_memory_bitmaps(void)
-
- void clear_free_pages(void)
- {
--#ifdef CONFIG_PAGE_POISONING_ZERO
-+#if defined(CONFIG_PAGE_POISONING_ZERO) || defined(CONFIG_PAGE_SANITIZE)
- struct memory_bitmap *bm = free_pages_map;
- unsigned long pfn;
-
-@@ -1153,7 +1153,7 @@ void clear_free_pages(void)
- }
- memory_bm_position_reset(bm);
- pr_info("PM: free pages cleared after restore\n");
--#endif /* PAGE_POISONING_ZERO */
-+#endif /* PAGE_POISONING_ZERO || PAGE_SANITIZE */
- }
-
- /**
-diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
-index a64eee0db39e..4d7de378fe4c 100644
---- a/kernel/rcu/tiny.c
-+++ b/kernel/rcu/tiny.c
-@@ -164,7 +164,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
- }
- }
-
--static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-+static __latent_entropy void rcu_process_callbacks(void)
- {
- __rcu_process_callbacks(&rcu_sched_ctrlblk);
- __rcu_process_callbacks(&rcu_bh_ctrlblk);
-diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
-index 710ce1d6b982..4013b634e820 100644
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -2927,7 +2927,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
- /*
- * Do RCU core processing for the current CPU.
- */
--static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-+static __latent_entropy void rcu_process_callbacks(void)
- {
- struct rcu_state *rsp;
-
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index f33b24080b1c..99c5e423906f 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -8982,7 +8982,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
- * run_rebalance_domains is triggered when needed from the scheduler tick.
- * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
- */
--static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
-+static __latent_entropy void run_rebalance_domains(void)
- {
- struct rq *this_rq = this_rq();
- enum cpu_idle_type idle = this_rq->idle_balance ?
-diff --git a/kernel/softirq.c b/kernel/softirq.c
-index a4c87cf27f9d..efb97a8dc568 100644
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
- EXPORT_SYMBOL(irq_stat);
- #endif
-
--static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
-+static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE);
-
- DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
-
-@@ -285,7 +285,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
-- h->action(h);
-+ h->action();
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
-@@ -448,7 +448,7 @@ void __raise_softirq_irqoff(unsigned int nr)
- or_softirq_pending(1UL << nr);
- }
-
--void open_softirq(int nr, void (*action)(struct softirq_action *))
-+void __init open_softirq(int nr, void (*action)(void))
- {
- softirq_vec[nr].action = action;
- }
-@@ -490,7 +490,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
- }
- EXPORT_SYMBOL(__tasklet_hi_schedule);
-
--static __latent_entropy void tasklet_action(struct softirq_action *a)
-+static __latent_entropy void tasklet_action(void)
- {
- struct tasklet_struct *list;
-
-@@ -526,7 +526,7 @@ static __latent_entropy void tasklet_action(struct softirq_action *a)
- }
- }
-
--static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
-+static __latent_entropy void tasklet_hi_action(void)
- {
- struct tasklet_struct *list;
-
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index d330b1ce3b94..050278b12928 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -66,6 +66,7 @@
- #include <linux/kexec.h>
- #include <linux/bpf.h>
- #include <linux/mount.h>
-+#include <linux/tty.h>
-
- #include <linux/uaccess.h>
- #include <asm/processor.h>
-@@ -98,12 +99,19 @@
- #if defined(CONFIG_SYSCTL)
-
- /* External variables not in a header file. */
-+#if IS_ENABLED(CONFIG_USB)
-+int deny_new_usb __read_mostly = 0;
-+EXPORT_SYMBOL(deny_new_usb);
-+#endif
- extern int suid_dumpable;
- #ifdef CONFIG_COREDUMP
- extern int core_uses_pid;
- extern char core_pattern[];
- extern unsigned int core_pipe_limit;
- #endif
-+#ifdef CONFIG_USER_NS
-+extern int unprivileged_userns_clone;
-+#endif
- extern int pid_max;
- extern int pid_max_min, pid_max_max;
- extern int percpu_pagelist_fraction;
-@@ -115,40 +123,43 @@ extern int sysctl_nr_trim_pages;
-
- /* Constants used for minimum and maximum */
- #ifdef CONFIG_LOCKUP_DETECTOR
--static int sixty = 60;
-+static int sixty __read_only = 60;
- #endif
-
--static int __maybe_unused neg_one = -1;
-+static int __maybe_unused neg_one __read_only = -1;
-
- static int zero;
--static int __maybe_unused one = 1;
--static int __maybe_unused two = 2;
--static int __maybe_unused four = 4;
--static unsigned long one_ul = 1;
--static int one_hundred = 100;
--static int one_thousand = 1000;
-+static int __maybe_unused one __read_only = 1;
-+static int __maybe_unused two __read_only = 2;
-+static int __maybe_unused four __read_only = 4;
-+static unsigned long one_ul __read_only = 1;
-+static int one_hundred __read_only = 100;
-+static int one_thousand __read_only = 1000;
- #ifdef CONFIG_PRINTK
--static int ten_thousand = 10000;
-+static int ten_thousand __read_only = 10000;
- #endif
- #ifdef CONFIG_PERF_EVENTS
--static int six_hundred_forty_kb = 640 * 1024;
-+static int six_hundred_forty_kb __read_only = 640 * 1024;
- #endif
-
- /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
--static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
-+static unsigned long dirty_bytes_min __read_only = 2 * PAGE_SIZE;
-
- /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
--static int maxolduid = 65535;
--static int minolduid;
-+static int maxolduid __read_only = 65535;
-+static int minolduid __read_only;
-
--static int ngroups_max = NGROUPS_MAX;
-+static int ngroups_max __read_only = NGROUPS_MAX;
- static const int cap_last_cap = CAP_LAST_CAP;
-
- /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
- #ifdef CONFIG_DETECT_HUNG_TASK
--static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
-+static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ);
- #endif
-
-+int device_sidechannel_restrict __read_mostly = 1;
-+EXPORT_SYMBOL(device_sidechannel_restrict);
-+
- #ifdef CONFIG_INOTIFY_USER
- #include <linux/inotify.h>
- #endif
-@@ -286,19 +297,19 @@ static struct ctl_table sysctl_base_table[] = {
- };
-
- #ifdef CONFIG_SCHED_DEBUG
--static int min_sched_granularity_ns = 100000; /* 100 usecs */
--static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
--static int min_wakeup_granularity_ns; /* 0 usecs */
--static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
-+static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
-+static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
-+static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
-+static int max_wakeup_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
- #ifdef CONFIG_SMP
--static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
--static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
-+static int min_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_NONE;
-+static int max_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_END-1;
- #endif /* CONFIG_SMP */
- #endif /* CONFIG_SCHED_DEBUG */
-
- #ifdef CONFIG_COMPACTION
--static int min_extfrag_threshold;
--static int max_extfrag_threshold = 1000;
-+static int min_extfrag_threshold __read_only;
-+static int max_extfrag_threshold __read_only = 1000;
- #endif
-
- static struct ctl_table kern_table[] = {
-@@ -512,6 +523,15 @@ static struct ctl_table kern_table[] = {
- .proc_handler = proc_dointvec,
- },
- #endif
-+#ifdef CONFIG_USER_NS
-+ {
-+ .procname = "unprivileged_userns_clone",
-+ .data = &unprivileged_userns_clone,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+#endif
- #ifdef CONFIG_PROC_SYSCTL
- {
- .procname = "tainted",
-@@ -853,6 +873,37 @@ static struct ctl_table kern_table[] = {
- .extra1 = &zero,
- .extra2 = &two,
- },
-+#endif
-+#if defined CONFIG_TTY
-+ {
-+ .procname = "tiocsti_restrict",
-+ .data = &tiocsti_restrict,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
-+#endif
-+ {
-+ .procname = "device_sidechannel_restrict",
-+ .data = &device_sidechannel_restrict,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
-+#if IS_ENABLED(CONFIG_USB)
-+ {
-+ .procname = "deny_new_usb",
-+ .data = &deny_new_usb,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
- #endif
- {
- .procname = "ngroups_max",
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index f17c76a1a05f..50f079d11488 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1640,7 +1640,7 @@ static inline void __run_timers(struct timer_base *base)
- /*
- * This function runs timers and the timer-tq in bottom half context.
- */
--static __latent_entropy void run_timer_softirq(struct softirq_action *h)
-+static __latent_entropy void run_timer_softirq(void)
- {
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
-
-diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
-index ed80a88980f0..ff6d27d06af0 100644
---- a/kernel/user_namespace.c
-+++ b/kernel/user_namespace.c
-@@ -24,6 +24,9 @@
- #include <linux/projid.h>
- #include <linux/fs_struct.h>
-
-+/* sysctl */
-+int unprivileged_userns_clone;
-+
- static struct kmem_cache *user_ns_cachep __read_mostly;
- static DEFINE_MUTEX(userns_state_mutex);
-
-diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index 62d0e25c054c..3953072277eb 100644
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -937,6 +937,7 @@ endmenu # "Debug lockups and hangs"
-
- config PANIC_ON_OOPS
- bool "Panic on Oops"
-+ default y
- help
- Say Y here to enable the kernel to panic when it oopses. This
- has the same effect as setting oops=panic on the kernel command
-@@ -946,7 +947,7 @@ config PANIC_ON_OOPS
- anything erroneous after an oops which could result in data
- corruption or other issues.
-
-- Say N if unsure.
-+ Say Y if unsure.
-
- config PANIC_ON_OOPS_VALUE
- int
-@@ -1319,6 +1320,7 @@ config DEBUG_BUGVERBOSE
- config DEBUG_LIST
- bool "Debug linked list manipulation"
- depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
-+ default y
- help
- Enable this to turn on extended checks in the linked-list
- walking routines.
-@@ -1932,6 +1934,7 @@ config MEMTEST
- config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
-+ default y
- help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
-@@ -1952,7 +1955,7 @@ config STRICT_DEVMEM
- bool "Filter access to /dev/mem"
- depends on MMU && DEVMEM
- depends on ARCH_HAS_DEVMEM_IS_ALLOWED
-- default y if TILE || PPC
-+ default y
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- of memory, including kernel and userspace memory. Accidental
-@@ -1971,6 +1974,7 @@ config STRICT_DEVMEM
- config IO_STRICT_DEVMEM
- bool "Filter I/O access to /dev/mem"
- depends on STRICT_DEVMEM
-+ default y
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- io-memory regardless of whether a driver is actively using that
-diff --git a/lib/irq_poll.c b/lib/irq_poll.c
-index 86a709954f5a..6f15787fcb1b 100644
---- a/lib/irq_poll.c
-+++ b/lib/irq_poll.c
-@@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop)
- }
- EXPORT_SYMBOL(irq_poll_complete);
-
--static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
-+static void __latent_entropy irq_poll_softirq(void)
- {
- struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
- int rearm = 0, budget = irq_poll_budget;
-diff --git a/lib/kobject.c b/lib/kobject.c
-index bbbb067de8ec..fec2f780cf9b 100644
---- a/lib/kobject.c
-+++ b/lib/kobject.c
-@@ -956,9 +956,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
-
-
- static DEFINE_SPINLOCK(kobj_ns_type_lock);
--static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
-+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __ro_after_init;
-
--int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
-+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
- {
- enum kobj_ns_type type = ops->type;
- int error;
-diff --git a/lib/nlattr.c b/lib/nlattr.c
-index 3d8295c85505..3fa3b3409d69 100644
---- a/lib/nlattr.c
-+++ b/lib/nlattr.c
-@@ -341,6 +341,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
- {
- int minlen = min_t(int, count, nla_len(src));
-
-+ BUG_ON(minlen < 0);
-+
- memcpy(dest, nla_data(src), minlen);
- if (count > minlen)
- memset(dest + minlen, 0, count - minlen);
-diff --git a/lib/vsprintf.c b/lib/vsprintf.c
-index 4a990f3fd345..3df8db5af0ba 100644
---- a/lib/vsprintf.c
-+++ b/lib/vsprintf.c
-@@ -1588,7 +1588,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
- return widen_string(buf, buf - buf_start, end, spec);
- }
-
--int kptr_restrict __read_mostly;
-+int kptr_restrict __read_mostly = 2;
-
- /*
- * Show a '%p' thing. A kernel extension is that the '%p' is followed
-diff --git a/mm/Kconfig b/mm/Kconfig
-index 59efbd3337e0..c070e14ec83d 100644
---- a/mm/Kconfig
-+++ b/mm/Kconfig
-@@ -319,7 +319,8 @@ config KSM
- config DEFAULT_MMAP_MIN_ADDR
- int "Low address space to protect from user allocation"
- depends on MMU
-- default 4096
-+ default 32768 if ARM || (ARM64 && COMPAT)
-+ default 65536
- help
- This is the portion of low virtual memory which should be protected
- from userspace allocation. Keeping a user from writing to low pages
-diff --git a/mm/mmap.c b/mm/mmap.c
-index 2398776195d2..a8ffa2223ad1 100644
---- a/mm/mmap.c
-+++ b/mm/mmap.c
-@@ -220,6 +220,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
-
- newbrk = PAGE_ALIGN(brk);
- oldbrk = PAGE_ALIGN(mm->brk);
-+ /* properly handle unaligned min_brk as an empty heap */
-+ if (min_brk & ~PAGE_MASK) {
-+ if (brk == min_brk)
-+ newbrk -= PAGE_SIZE;
-+ if (mm->brk == min_brk)
-+ oldbrk -= PAGE_SIZE;
-+ }
- if (oldbrk == newbrk)
- goto set_brk;
-
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index a2f365f40433..5e726e59de20 100644
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -67,6 +67,7 @@
- #include <linux/ftrace.h>
- #include <linux/lockdep.h>
- #include <linux/nmi.h>
-+#include <linux/random.h>
-
- #include <asm/sections.h>
- #include <asm/tlbflush.h>
-@@ -98,6 +99,15 @@ int _node_numa_mem_[MAX_NUMNODES];
- DEFINE_MUTEX(pcpu_drain_mutex);
- DEFINE_PER_CPU(struct work_struct, pcpu_drain);
-
-+bool __meminitdata extra_latent_entropy;
-+
-+static int __init setup_extra_latent_entropy(char *str)
-+{
-+ extra_latent_entropy = true;
-+ return 0;
-+}
-+early_param("extra_latent_entropy", setup_extra_latent_entropy);
-+
- #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
- volatile unsigned long latent_entropy __latent_entropy;
- EXPORT_SYMBOL(latent_entropy);
-@@ -1063,6 +1073,13 @@ static __always_inline bool free_pages_prepare(struct page *page,
- debug_check_no_obj_freed(page_address(page),
- PAGE_SIZE << order);
- }
-+
-+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE)) {
-+ int i;
-+ for (i = 0; i < (1 << order); i++)
-+ clear_highpage(page + i);
-+ }
-+
- arch_free_page(page, order);
- kernel_poison_pages(page, 1 << order, 0);
- kernel_map_pages(page, 1 << order, 0);
-@@ -1278,6 +1295,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
- __ClearPageReserved(p);
- set_page_count(p, 0);
-
-+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
-+ unsigned long hash = 0;
-+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
-+ const unsigned long *data = lowmem_page_address(page);
-+
-+ for (index = 0; index < end; index++)
-+ hash ^= hash + data[index];
-+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
-+ latent_entropy ^= hash;
-+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
-+#else
-+ add_device_randomness((const void *)&hash, sizeof(hash));
-+#endif
-+ }
-+
- page_zone(page)->managed_pages += nr_pages;
- set_page_refcounted(page);
- __free_pages(page, order);
-@@ -1718,8 +1750,8 @@ static inline int check_new_page(struct page *page)
-
- static inline bool free_pages_prezeroed(void)
- {
-- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
-- page_poisoning_enabled();
-+ return IS_ENABLED(CONFIG_PAGE_SANITIZE) ||
-+ (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled());
- }
-
- #ifdef CONFIG_DEBUG_VM
-@@ -1776,6 +1808,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
-
- post_alloc_hook(page, order, gfp_flags);
-
-+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY)) {
-+ for (i = 0; i < (1 << order); i++)
-+ verify_zero_highpage(page + i);
-+ }
-+
- if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
- for (i = 0; i < (1 << order); i++)
- clear_highpage(page + i);
-diff --git a/mm/slab.h b/mm/slab.h
-index 485d9fbb8802..436461588804 100644
---- a/mm/slab.h
-+++ b/mm/slab.h
-@@ -311,7 +311,11 @@ static inline bool is_root_cache(struct kmem_cache *s)
- static inline bool slab_equal_or_root(struct kmem_cache *s,
- struct kmem_cache *p)
- {
-+#ifdef CONFIG_SLAB_HARDENED
-+ return p == s;
-+#else
- return true;
-+#endif
- }
-
- static inline const char *cache_name(struct kmem_cache *s)
-@@ -363,18 +367,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
- * to not do even the assignment. In that case, slab_equal_or_root
- * will also be a constant.
- */
-- if (!memcg_kmem_enabled() &&
-+ if (!IS_ENABLED(CONFIG_SLAB_HARDENED) &&
-+ !memcg_kmem_enabled() &&
- !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
- return s;
-
- page = virt_to_head_page(x);
-+#ifdef CONFIG_SLAB_HARDENED
-+ BUG_ON(!PageSlab(page));
-+#endif
- cachep = page->slab_cache;
- if (slab_equal_or_root(cachep, s))
- return cachep;
-
- pr_err("%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name);
-+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
-+ BUG_ON(1);
-+#else
- WARN_ON_ONCE(1);
-+#endif
- return s;
- }
-
-@@ -399,7 +411,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
- * back there or track user information then we can
- * only use the space before that information.
- */
-- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
-+ if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
-diff --git a/mm/slab_common.c b/mm/slab_common.c
-index f6764cf162b8..015c8e4df318 100644
---- a/mm/slab_common.c
-+++ b/mm/slab_common.c
-@@ -26,10 +26,10 @@
-
- #include "slab.h"
-
--enum slab_state slab_state;
-+enum slab_state slab_state __ro_after_init;
- LIST_HEAD(slab_caches);
- DEFINE_MUTEX(slab_mutex);
--struct kmem_cache *kmem_cache;
-+struct kmem_cache *kmem_cache __ro_after_init;
-
- static LIST_HEAD(slab_caches_to_rcu_destroy);
- static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
-@@ -49,7 +49,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
- /*
- * Merge control. If this is set then no merging of slab caches will occur.
- */
--static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
-+static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
-
- static int __init setup_slab_nomerge(char *str)
- {
-@@ -931,7 +931,7 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
- * of two cache sizes there. The size of larger slabs can be determined using
- * fls.
- */
--static s8 size_index[24] = {
-+static s8 size_index[24] __ro_after_init = {
- 3, /* 8 */
- 4, /* 16 */
- 5, /* 24 */
-diff --git a/mm/slub.c b/mm/slub.c
-index 220d42e592ef..3decf87b1cf2 100644
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -125,6 +125,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
- #endif
- }
-
-+static inline bool has_sanitize(struct kmem_cache *s)
-+{
-+ return IS_ENABLED(CONFIG_SLAB_SANITIZE) && !(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON));
-+}
-+
-+static inline bool has_sanitize_verify(struct kmem_cache *s)
-+{
-+ return IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && has_sanitize(s);
-+}
-+
- void *fixup_red_left(struct kmem_cache *s, void *p)
- {
- if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
-@@ -297,6 +307,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
- *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
- }
-
-+#ifdef CONFIG_SLAB_CANARY
-+static inline unsigned long *get_canary(struct kmem_cache *s, void *object)
-+{
-+ if (s->offset)
-+ return object + s->offset + sizeof(void *);
-+ return object + s->inuse;
-+}
-+
-+static inline unsigned long get_canary_value(const void *canary, unsigned long value)
-+{
-+ return (value ^ (unsigned long)canary) & CANARY_MASK;
-+}
-+
-+static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value)
-+{
-+ unsigned long *canary = get_canary(s, object);
-+ *canary = get_canary_value(canary, value);
-+}
-+
-+static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value)
-+{
-+ unsigned long *canary = get_canary(s, object);
-+ BUG_ON(*canary != get_canary_value(canary, value));
-+}
-+#else
-+#define set_canary(s, object, value)
-+#define check_canary(s, object, value)
-+#endif
-+
- /* Loop over all objects in a slab */
- #define for_each_object(__p, __s, __addr, __objects) \
- for (__p = fixup_red_left(__s, __addr); \
-@@ -484,13 +523,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
- * Debug settings:
- */
- #if defined(CONFIG_SLUB_DEBUG_ON)
--static int slub_debug = DEBUG_DEFAULT_FLAGS;
-+static int slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS;
- #else
--static int slub_debug;
-+static int slub_debug __ro_after_init;
- #endif
-
--static char *slub_debug_slabs;
--static int disable_higher_order_debug;
-+static char *slub_debug_slabs __ro_after_init;
-+static int disable_higher_order_debug __ro_after_init;
-
- /*
- * slub is about to manipulate internal object metadata. This memory lies
-@@ -550,6 +589,9 @@ static struct track *get_track(struct kmem_cache *s, void *object,
- else
- p = object + s->inuse;
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ p = (void *)p + sizeof(void *);
-+
- return p + alloc;
- }
-
-@@ -688,6 +730,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
- else
- off = s->inuse;
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ off += sizeof(void *);
-+
- if (s->flags & SLAB_STORE_USER)
- off += 2 * sizeof(struct track);
-
-@@ -817,6 +862,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
- /* Freepointer is placed after the object. */
- off += sizeof(void *);
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ off += sizeof(void *);
-+
- if (s->flags & SLAB_STORE_USER)
- /* We also have user information there */
- off += 2 * sizeof(struct track);
-@@ -1416,8 +1464,9 @@ static void setup_object(struct kmem_cache *s, struct page *page,
- void *object)
- {
- setup_object_debug(s, page, object);
-+ set_canary(s, object, s->random_inactive);
- kasan_init_slab_obj(s, object);
-- if (unlikely(s->ctor)) {
-+ if (unlikely(s->ctor) && !has_sanitize_verify(s)) {
- kasan_unpoison_object_data(s, object);
- s->ctor(object);
- kasan_poison_object_data(s, object);
-@@ -2717,9 +2766,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
- stat(s, ALLOC_FASTPATH);
- }
-
-- if (unlikely(gfpflags & __GFP_ZERO) && object)
-+ if (has_sanitize_verify(s) && object) {
-+ size_t offset = s->offset ? 0 : sizeof(void *);
-+ BUG_ON(memchr_inv(object + offset, 0, s->object_size - offset));
-+ if (s->ctor)
-+ s->ctor(object);
-+ if (unlikely(gfpflags & __GFP_ZERO) && offset)
-+ memset(object, 0, sizeof(void *));
-+ } else if (unlikely(gfpflags & __GFP_ZERO) && object)
- memset(object, 0, s->object_size);
-
-+ if (object) {
-+ check_canary(s, object, s->random_inactive);
-+ set_canary(s, object, s->random_active);
-+ }
-+
- slab_post_alloc_hook(s, gfpflags, 1, &object);
-
- return object;
-@@ -2926,6 +2987,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
- void *tail_obj = tail ? : head;
- struct kmem_cache_cpu *c;
- unsigned long tid;
-+ bool sanitize = has_sanitize(s);
-+
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY) || sanitize) {
-+ __maybe_unused int offset = s->offset ? 0 : sizeof(void *);
-+ void *x = head;
-+
-+ while (1) {
-+ check_canary(s, x, s->random_active);
-+ set_canary(s, x, s->random_inactive);
-+
-+ if (sanitize) {
-+ memset(x + offset, 0, s->object_size - offset);
-+ if (!IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && s->ctor)
-+ s->ctor(x);
-+ }
-+ if (x == tail_obj)
-+ break;
-+ x = get_freepointer(s, x);
-+ }
-+ }
-+
- redo:
- /*
- * Determine the currently cpus per cpu slab.
-@@ -3104,7 +3186,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- void **p)
- {
- struct kmem_cache_cpu *c;
-- int i;
-+ int i, k;
-
- /* memcg and kmem_cache debug support */
- s = slab_pre_alloc_hook(s, flags);
-@@ -3141,13 +3223,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- local_irq_enable();
-
- /* Clear memory outside IRQ disabled fastpath loop */
-- if (unlikely(flags & __GFP_ZERO)) {
-+ if (has_sanitize_verify(s)) {
-+ int j;
-+
-+ for (j = 0; j < i; j++) {
-+ size_t offset = s->offset ? 0 : sizeof(void *);
-+ BUG_ON(memchr_inv(p[j] + offset, 0, s->object_size - offset));
-+ if (s->ctor)
-+ s->ctor(p[j]);
-+ if (unlikely(flags & __GFP_ZERO) && offset)
-+ memset(p[j], 0, sizeof(void *));
-+ }
-+ } else if (unlikely(flags & __GFP_ZERO)) {
- int j;
-
- for (j = 0; j < i; j++)
- memset(p[j], 0, s->object_size);
- }
-
-+ for (k = 0; k < i; k++) {
-+ check_canary(s, p[k], s->random_inactive);
-+ set_canary(s, p[k], s->random_active);
-+ }
-+
- /* memcg and kmem_cache debug support */
- slab_post_alloc_hook(s, flags, size, p);
- return i;
-@@ -3179,9 +3277,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
- * and increases the number of allocations possible without having to
- * take the list_lock.
- */
--static int slub_min_order;
--static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
--static int slub_min_objects;
-+static int slub_min_order __ro_after_init;
-+static int slub_max_order __ro_after_init = PAGE_ALLOC_COSTLY_ORDER;
-+static int slub_min_objects __ro_after_init;
-
- /*
- * Calculate the order of allocation given an slab object size.
-@@ -3351,6 +3449,7 @@ static void early_kmem_cache_node_alloc(int node)
- init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
- init_tracking(kmem_cache_node, n);
- #endif
-+ set_canary(kmem_cache_node, n, kmem_cache_node->random_active);
- kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
- GFP_KERNEL);
- init_kmem_cache_node(n);
-@@ -3507,6 +3606,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
- size += sizeof(void *);
- }
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ size += sizeof(void *);
-+
- #ifdef CONFIG_SLUB_DEBUG
- if (flags & SLAB_STORE_USER)
- /*
-@@ -3577,6 +3679,10 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
- #ifdef CONFIG_SLAB_FREELIST_HARDENED
- s->random = get_random_long();
- #endif
-+#ifdef CONFIG_SLAB_CANARY
-+ s->random_active = get_random_long();
-+ s->random_inactive = get_random_long();
-+#endif
-
- if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
- s->reserved = sizeof(struct rcu_head);
-@@ -3841,6 +3947,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
- offset -= s->red_left_pad;
- }
-
-+ check_canary(s, (void *)ptr - offset, s->random_active);
-+
- /* Allow address range falling entirely within object size. */
- if (offset <= object_size && n <= object_size - offset)
- return NULL;
-@@ -3859,7 +3967,11 @@ static size_t __ksize(const void *object)
- page = virt_to_head_page(object);
-
- if (unlikely(!PageSlab(page))) {
-+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
-+ BUG_ON(!PageCompound(page));
-+#else
- WARN_ON(!PageCompound(page));
-+#endif
- return PAGE_SIZE << compound_order(page);
- }
-
-@@ -4724,7 +4836,7 @@ enum slab_stat_type {
- #define SO_TOTAL (1 << SL_TOTAL)
-
- #ifdef CONFIG_MEMCG
--static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
-+static bool memcg_sysfs_enabled __ro_after_init = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
-
- static int __init setup_slub_memcg_sysfs(char *str)
- {
-diff --git a/mm/swap.c b/mm/swap.c
-index a77d68f2c1b6..d1f1d75f4d1f 100644
---- a/mm/swap.c
-+++ b/mm/swap.c
-@@ -92,6 +92,13 @@ static void __put_compound_page(struct page *page)
- if (!PageHuge(page))
- __page_cache_release(page);
- dtor = get_compound_page_dtor(page);
-+ if (!PageHuge(page))
-+ BUG_ON(dtor != free_compound_page
-+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-+ && dtor != free_transhuge_page
-+#endif
-+ );
-+
- (*dtor)(page);
- }
-
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 4337450a5fdb..5a3c7d217719 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -4117,7 +4117,7 @@ int netif_rx_ni(struct sk_buff *skb)
- }
- EXPORT_SYMBOL(netif_rx_ni);
-
--static __latent_entropy void net_tx_action(struct softirq_action *h)
-+static __latent_entropy void net_tx_action(void)
- {
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-
-@@ -5635,7 +5635,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
- return work;
- }
-
--static __latent_entropy void net_rx_action(struct softirq_action *h)
-+static __latent_entropy void net_rx_action(void)
- {
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
- unsigned long time_limit = jiffies +
-diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
-index f48fe6fc7e8c..d78c52835c08 100644
---- a/net/ipv4/Kconfig
-+++ b/net/ipv4/Kconfig
-@@ -261,6 +261,7 @@ config IP_PIMSM_V2
-
- config SYN_COOKIES
- bool "IP: TCP syncookie support"
-+ default y
- ---help---
- Normal TCP/IP networking is open to an attack known as "SYN
- flooding". This denial-of-service attack prevents legitimate remote
-diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
-index 18bc8738e989..d2866f6dd736 100644
---- a/scripts/mod/modpost.c
-+++ b/scripts/mod/modpost.c
-@@ -37,6 +37,7 @@ static int vmlinux_section_warnings = 1;
- static int warn_unresolved = 0;
- /* How a symbol is exported */
- static int sec_mismatch_count = 0;
-+static int writable_fptr_count = 0;
- static int sec_mismatch_verbose = 1;
- static int sec_mismatch_fatal = 0;
- /* ignore missing files */
-@@ -965,6 +966,7 @@ enum mismatch {
- ANY_EXIT_TO_ANY_INIT,
- EXPORT_TO_INIT_EXIT,
- EXTABLE_TO_NON_TEXT,
-+ DATA_TO_TEXT
- };
-
- /**
-@@ -1091,6 +1093,12 @@ static const struct sectioncheck sectioncheck[] = {
- .good_tosec = {ALL_TEXT_SECTIONS , NULL},
- .mismatch = EXTABLE_TO_NON_TEXT,
- .handler = extable_mismatch_handler,
-+},
-+/* Do not reference code from writable data */
-+{
-+ .fromsec = { DATA_SECTIONS, NULL },
-+ .bad_tosec = { ALL_TEXT_SECTIONS, NULL },
-+ .mismatch = DATA_TO_TEXT
- }
- };
-
-@@ -1240,10 +1248,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
- continue;
- if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
- continue;
-- if (sym->st_value == addr)
-- return sym;
- /* Find a symbol nearby - addr are maybe negative */
- d = sym->st_value - addr;
-+ if (d == 0)
-+ return sym;
- if (d < 0)
- d = addr - sym->st_value;
- if (d < distance) {
-@@ -1402,7 +1410,11 @@ static void report_sec_mismatch(const char *modname,
- char *prl_from;
- char *prl_to;
-
-- sec_mismatch_count++;
-+ if (mismatch->mismatch == DATA_TO_TEXT)
-+ writable_fptr_count++;
-+ else
-+ sec_mismatch_count++;
-+
- if (!sec_mismatch_verbose)
- return;
-
-@@ -1526,6 +1538,14 @@ static void report_sec_mismatch(const char *modname,
- fatal("There's a special handler for this mismatch type, "
- "we should never get here.");
- break;
-+ case DATA_TO_TEXT:
-+#if 0
-+ fprintf(stderr,
-+ "The %s %s:%s references\n"
-+ "the %s %s:%s%s\n",
-+ from, fromsec, fromsym, to, tosec, tosym, to_p);
-+#endif
-+ break;
- }
- fprintf(stderr, "\n");
- }
-@@ -2539,6 +2559,14 @@ int main(int argc, char **argv)
- }
- }
- free(buf.p);
-+ if (writable_fptr_count) {
-+ if (!sec_mismatch_verbose) {
-+ warn("modpost: Found %d writable function pointer(s).\n"
-+ "To see full details build your kernel with:\n"
-+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
-+ writable_fptr_count);
-+ }
-+ }
-
- return err;
- }
-diff --git a/security/Kconfig b/security/Kconfig
-index 87f2a6f842fd..7bdbb7edf5bf 100644
---- a/security/Kconfig
-+++ b/security/Kconfig
-@@ -8,7 +8,7 @@ source security/keys/Kconfig
-
- config SECURITY_DMESG_RESTRICT
- bool "Restrict unprivileged access to the kernel syslog"
-- default n
-+ default y
- help
- This enforces restrictions on unprivileged users reading the kernel
- syslog via dmesg(8).
-@@ -18,10 +18,34 @@ config SECURITY_DMESG_RESTRICT
-
- If you are unsure how to answer this question, answer N.
-
-+config SECURITY_PERF_EVENTS_RESTRICT
-+ bool "Restrict unprivileged use of performance events"
-+ depends on PERF_EVENTS
-+ default y
-+ help
-+ If you say Y here, the kernel.perf_event_paranoid sysctl
-+ will be set to 3 by default, and no unprivileged use of the
-+ perf_event_open syscall will be permitted unless it is
-+ changed.
-+
-+config SECURITY_TIOCSTI_RESTRICT
-+ bool "Restrict unprivileged use of tiocsti command injection"
-+ default y
-+ help
-+ This enforces restrictions on unprivileged users injecting commands
-+ into other processes which share a tty session using the TIOCSTI
-+ ioctl. This option makes TIOCSTI use require CAP_SYS_ADMIN.
-+
-+ If this option is not selected, no restrictions will be enforced
-+ unless the tiocsti_restrict sysctl is explicitly set to (1).
-+
-+ If you are unsure how to answer this question, answer N.
-+
- config SECURITY
- bool "Enable different security models"
- depends on SYSFS
- depends on MULTIUSER
-+ default y
- help
- This allows you to choose different security modules to be
- configured into your kernel.
-@@ -48,6 +72,7 @@ config SECURITYFS
- config SECURITY_NETWORK
- bool "Socket and Networking Security Hooks"
- depends on SECURITY
-+ default y
- help
- This enables the socket and networking security hooks.
- If enabled, a security module can use these hooks to
-@@ -155,6 +180,7 @@ config HARDENED_USERCOPY
- depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
- select BUG
- imply STRICT_DEVMEM
-+ default y
- help
- This option checks for obviously wrong memory regions when
- copying memory to/from the kernel (via copy_to_user() and
-@@ -178,10 +204,36 @@ config HARDENED_USERCOPY_PAGESPAN
- config FORTIFY_SOURCE
- bool "Harden common str/mem functions against buffer overflows"
- depends on ARCH_HAS_FORTIFY_SOURCE
-+ default y
- help
- Detect overflows of buffers in common string and memory functions
- where the compiler can determine and validate the buffer sizes.
-
-+config FORTIFY_SOURCE_STRICT_STRING
-+ bool "Harden common functions against buffer overflows"
-+ depends on FORTIFY_SOURCE
-+ depends on EXPERT
-+ help
-+ Perform stricter overflow checks catching overflows within objects
-+ for common C string functions rather than only between objects.
-+
-+ This is not yet intended for production use, only bug finding.
-+
-+config PAGE_SANITIZE
-+ bool "Sanitize pages"
-+ default y
-+ help
-+ Zero fill page allocations on free, reducing the lifetime of
-+ sensitive data and helping to mitigate use-after-free bugs.
-+
-+config PAGE_SANITIZE_VERIFY
-+ bool "Verify sanitized pages"
-+ depends on PAGE_SANITIZE
-+ default y
-+ help
-+ Verify that newly allocated pages are zeroed to detect
-+ write-after-free bugs.
-+
- config STATIC_USERMODEHELPER
- bool "Force all usermode helper calls through a single binary"
- help
-diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
-index 8af7a690eb40..6539694b0fd3 100644
---- a/security/selinux/Kconfig
-+++ b/security/selinux/Kconfig
-@@ -2,7 +2,7 @@ config SECURITY_SELINUX
- bool "NSA SELinux Support"
- depends on SECURITY_NETWORK && AUDIT && NET && INET
- select NETWORK_SECMARK
-- default n
-+ default y
- help
- This selects NSA Security-Enhanced Linux (SELinux).
- You will also need a policy configuration and a labeled filesystem.
-@@ -79,23 +79,3 @@ config SECURITY_SELINUX_AVC_STATS
- This option collects access vector cache statistics to
- /selinux/avc/cache_stats, which may be monitored via
- tools such as avcstat.
--
--config SECURITY_SELINUX_CHECKREQPROT_VALUE
-- int "NSA SELinux checkreqprot default value"
-- depends on SECURITY_SELINUX
-- range 0 1
-- default 0
-- help
-- This option sets the default value for the 'checkreqprot' flag
-- that determines whether SELinux checks the protection requested
-- by the application or the protection that will be applied by the
-- kernel (including any implied execute for read-implies-exec) for
-- mmap and mprotect calls. If this option is set to 0 (zero),
-- SELinux will default to checking the protection that will be applied
-- by the kernel. If this option is set to 1 (one), SELinux will
-- default to checking the protection requested by the application.
-- The checkreqprot flag may be changed from the default via the
-- 'checkreqprot=' boot parameter. It may also be changed at runtime
-- via /selinux/checkreqprot if authorized by policy.
--
-- If you are unsure how to answer this question, answer 0.
-diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
-index 1649cd18eb0b..067f35559aa7 100644
---- a/security/selinux/include/objsec.h
-+++ b/security/selinux/include/objsec.h
-@@ -150,6 +150,6 @@ struct pkey_security_struct {
- u32 sid; /* SID of pkey */
- };
-
--extern unsigned int selinux_checkreqprot;
-+extern const unsigned int selinux_checkreqprot;
-
- #endif /* _SELINUX_OBJSEC_H_ */
-diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
-index 00eed842c491..8f7b8d7e6f91 100644
---- a/security/selinux/selinuxfs.c
-+++ b/security/selinux/selinuxfs.c
-@@ -41,16 +41,7 @@
- #include "objsec.h"
- #include "conditional.h"
-
--unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
--
--static int __init checkreqprot_setup(char *str)
--{
-- unsigned long checkreqprot;
-- if (!kstrtoul(str, 0, &checkreqprot))
-- selinux_checkreqprot = checkreqprot ? 1 : 0;
-- return 1;
--}
--__setup("checkreqprot=", checkreqprot_setup);
-+const unsigned int selinux_checkreqprot;
-
- static DEFINE_MUTEX(sel_mutex);
-
-@@ -610,10 +601,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
- return PTR_ERR(page);
-
- length = -EINVAL;
-- if (sscanf(page, "%u", &new_value) != 1)
-+ if (sscanf(page, "%u", &new_value) != 1 || new_value)
- goto out;
-
-- selinux_checkreqprot = new_value ? 1 : 0;
- length = count;
- out:
- kfree(page);
-diff --git a/security/yama/Kconfig b/security/yama/Kconfig
-index 96b27405558a..485c1b85c325 100644
---- a/security/yama/Kconfig
-+++ b/security/yama/Kconfig
-@@ -1,7 +1,7 @@
- config SECURITY_YAMA
- bool "Yama support"
- depends on SECURITY
-- default n
-+ default y
- help
- This selects Yama, which extends DAC support with additional
- system-wide security settings beyond regular Linux discretionary
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-mute-pps_state_mismatch.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-mute-pps_state_mismatch.patch
deleted file mode 100644
index dc1d254b..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-mute-pps_state_mismatch.patch
+++ /dev/null
@@ -1,16 +0,0 @@
-diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
-index 09f274419..595bc5844 100644
---- a/drivers/gpu/drm/i915/intel_dp.c
-+++ b/drivers/gpu/drm/i915/intel_dp.c
-@@ -5249,7 +5249,10 @@ intel_pps_verify_state(struct drm_i915_private *dev_priv,
-
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
-- DRM_ERROR("PPS state mismatch\n");
-+ /* seem buggy on 4.14.x .. mute that for now
-+ * even is not a real solution ..
-+ * DRM_ERROR("PPS state mismatch\n");
-+ */
- intel_pps_dump_state("sw", sw);
- intel_pps_dump_state("hw", &hw);
- }
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch
deleted file mode 100644
index b6be46cc..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 093f2ceba..808998fe1 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -1164,6 +1164,7 @@ sd_init(struct sched_domain_topology_level *tl,
- sd->smt_gain = 1178; /* ~15% */
-
- } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
-+ sd->flags |= SD_PREFER_SIBLING;
- sd->imbalance_pct = 117;
- sd->cache_nice_tries = 1;
- sd->busy_idx = 2;
diff --git a/sys-kernel/linux-image-redcore-lts/files/4.14-uksm-linux-hardened.patch b/sys-kernel/linux-image-redcore-lts/files/4.14-uksm-linux-hardened.patch
deleted file mode 100644
index f0596117..00000000
--- a/sys-kernel/linux-image-redcore-lts/files/4.14-uksm-linux-hardened.patch
+++ /dev/null
@@ -1,6919 +0,0 @@
-diff -Nur a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
---- a/Documentation/vm/00-INDEX 2018-05-25 15:18:02.000000000 +0100
-+++ b/Documentation/vm/00-INDEX 2018-05-26 19:30:55.783140311 +0100
-@@ -20,6 +20,8 @@
- - description of the idle page tracking feature.
- ksm.txt
- - how to use the Kernel Samepage Merging feature.
-+uksm.txt
-+ - Introduction to Ultra KSM
- numa
- - information about NUMA specific code in the Linux vm.
- numa_memory_policy.txt
-diff -Nur a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt
---- a/Documentation/vm/uksm.txt 1970-01-01 01:00:00.000000000 +0100
-+++ b/Documentation/vm/uksm.txt 2018-05-26 19:30:55.783140311 +0100
-@@ -0,0 +1,61 @@
-+The Ultra Kernel Samepage Merging feature
-+----------------------------------------------
-+/*
-+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
-+ *
-+ * This is an improvement upon KSM. Some basic data structures and routines
-+ * are borrowed from ksm.c .
-+ *
-+ * Its new features:
-+ * 1. Full system scan:
-+ * It automatically scans all user processes' anonymous VMAs. Kernel-user
-+ * interaction to submit a memory area to KSM is no longer needed.
-+ *
-+ * 2. Rich area detection:
-+ * It automatically detects rich areas containing abundant duplicated
-+ * pages based. Rich areas are given a full scan speed. Poor areas are
-+ * sampled at a reasonable speed with very low CPU consumption.
-+ *
-+ * 3. Ultra Per-page scan speed improvement:
-+ * A new hash algorithm is proposed. As a result, on a machine with
-+ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
-+ * can scan memory areas that does not contain duplicated pages at speed of
-+ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
-+ * 477MB/sec ~ 923MB/sec.
-+ *
-+ * 4. Thrashing area avoidance:
-+ * Thrashing area(an VMA that has frequent Ksm page break-out) can be
-+ * filtered out. My benchmark shows it's more efficient than KSM's per-page
-+ * hash value based volatile page detection.
-+ *
-+ *
-+ * 5. Misc changes upon KSM:
-+ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
-+ * comparison. It's much faster than default C version on x86.
-+ * * rmap_item now has an struct *page member to loosely cache a
-+ * address-->page mapping, which reduces too much time-costly
-+ * follow_page().
-+ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
-+ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_
-+ * ksm is needed for this case.
-+ *
-+ * 6. Full Zero Page consideration(contributed by Figo Zhang)
-+ * Now uksmd consider full zero pages as special pages and merge them to an
-+ * special unswappable uksm zero page.
-+ */
-+
-+ChangeLog:
-+
-+2012-05-05 The creation of this Doc
-+2012-05-08 UKSM 0.1.1.1 libc crash bug fix, api clean up, doc clean up.
-+2012-05-28 UKSM 0.1.1.2 bug fix release
-+2012-06-26 UKSM 0.1.2-beta1 first beta release for 0.1.2
-+2012-07-2 UKSM 0.1.2-beta2
-+2012-07-10 UKSM 0.1.2-beta3
-+2012-07-26 UKSM 0.1.2 Fine grained speed control, more scan optimization.
-+2012-10-13 UKSM 0.1.2.1 Bug fixes.
-+2012-12-31 UKSM 0.1.2.2 Minor bug fixes.
-+2014-07-02 UKSM 0.1.2.3 Fix a " __this_cpu_read() in preemptible bug".
-+2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings.
-+2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation.
-+2017-02-26 UKSM 0.1.2.6 Fix a bug in hugetlbpage handling and a race bug with page migration.
-diff -Nur a/fs/exec.c b/fs/exec.c
---- a/fs/exec.c 2018-05-26 19:24:34.831782903 +0100
-+++ b/fs/exec.c 2018-05-26 19:31:18.404873956 +0100
-@@ -63,6 +63,7 @@
- #include <linux/compat.h>
- #include <linux/vmalloc.h>
- #include <linux/random.h>
-+#include <linux/ksm.h>
-
- #include <linux/uaccess.h>
- #include <asm/mmu_context.h>
-@@ -1377,6 +1378,7 @@
- /* An exec changes our domain. We are no longer part of the thread
- group */
- current->self_exec_id++;
-+
- flush_signal_handlers(current, 0);
- }
- EXPORT_SYMBOL(setup_new_exec);
-diff -Nur a/fs/proc/meminfo.c b/fs/proc/meminfo.c
---- a/fs/proc/meminfo.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/fs/proc/meminfo.c 2018-05-26 19:30:55.784140344 +0100
-@@ -118,6 +118,10 @@
- global_zone_page_state(NR_KERNEL_STACK_KB));
- show_val_kb(m, "PageTables: ",
- global_zone_page_state(NR_PAGETABLE));
-+#ifdef CONFIG_UKSM
-+ show_val_kb(m, "KsmZeroPages: ",
-+ global_zone_page_state(NR_UKSM_ZERO_PAGES));
-+#endif
- #ifdef CONFIG_QUICKLIST
- show_val_kb(m, "Quicklists: ", quicklist_total_size());
- #endif
-diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
---- a/include/asm-generic/pgtable.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/asm-generic/pgtable.h 2018-05-26 19:30:55.784140344 +0100
-@@ -789,12 +789,25 @@
- extern void untrack_pfn_moved(struct vm_area_struct *vma);
- #endif
-
-+#ifdef CONFIG_UKSM
-+static inline int is_uksm_zero_pfn(unsigned long pfn)
-+{
-+ extern unsigned long uksm_zero_pfn;
-+ return pfn == uksm_zero_pfn;
-+}
-+#else
-+static inline int is_uksm_zero_pfn(unsigned long pfn)
-+{
-+ return 0;
-+}
-+#endif
-+
- #ifdef __HAVE_COLOR_ZERO_PAGE
- static inline int is_zero_pfn(unsigned long pfn)
- {
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
-- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
-+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn);
- }
-
- #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
-@@ -803,7 +816,7 @@
- static inline int is_zero_pfn(unsigned long pfn)
- {
- extern unsigned long zero_pfn;
-- return pfn == zero_pfn;
-+ return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn));
- }
-
- static inline unsigned long my_zero_pfn(unsigned long addr)
-diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h
---- a/include/linux/ksm.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/linux/ksm.h 2018-05-26 19:30:55.784140344 +0100
-@@ -21,21 +21,6 @@
- #ifdef CONFIG_KSM
- int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags);
--int __ksm_enter(struct mm_struct *mm);
--void __ksm_exit(struct mm_struct *mm);
--
--static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
--{
-- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
-- return __ksm_enter(mm);
-- return 0;
--}
--
--static inline void ksm_exit(struct mm_struct *mm)
--{
-- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
-- __ksm_exit(mm);
--}
-
- static inline struct stable_node *page_stable_node(struct page *page)
- {
-@@ -65,6 +50,33 @@
- void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
- void ksm_migrate_page(struct page *newpage, struct page *oldpage);
-
-+#ifdef CONFIG_KSM_LEGACY
-+int __ksm_enter(struct mm_struct *mm);
-+void __ksm_exit(struct mm_struct *mm);
-+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-+{
-+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
-+ return __ksm_enter(mm);
-+ return 0;
-+}
-+
-+static inline void ksm_exit(struct mm_struct *mm)
-+{
-+ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
-+ __ksm_exit(mm);
-+}
-+
-+#elif defined(CONFIG_UKSM)
-+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-+{
-+ return 0;
-+}
-+
-+static inline void ksm_exit(struct mm_struct *mm)
-+{
-+}
-+#endif /* !CONFIG_UKSM */
-+
- #else /* !CONFIG_KSM */
-
- static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-@@ -106,4 +118,6 @@
- #endif /* CONFIG_MMU */
- #endif /* !CONFIG_KSM */
-
-+#include <linux/uksm.h>
-+
- #endif /* __LINUX_KSM_H */
-diff -Nur a/include/linux/mm_types.h b/include/linux/mm_types.h
---- a/include/linux/mm_types.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/linux/mm_types.h 2018-05-26 19:30:55.784140344 +0100
-@@ -337,6 +337,9 @@
- struct mempolicy *vm_policy; /* NUMA policy for the VMA */
- #endif
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
-+#ifdef CONFIG_UKSM
-+ struct vma_slot *uksm_vma_slot;
-+#endif
- } __randomize_layout;
-
- struct core_thread {
-diff -Nur a/include/linux/mmzone.h b/include/linux/mmzone.h
---- a/include/linux/mmzone.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/linux/mmzone.h 2018-05-26 19:30:55.785140376 +0100
-@@ -148,6 +148,9 @@
- NR_ZSPAGES, /* allocated in zsmalloc */
- #endif
- NR_FREE_CMA_PAGES,
-+#ifdef CONFIG_UKSM
-+ NR_UKSM_ZERO_PAGES,
-+#endif
- NR_VM_ZONE_STAT_ITEMS };
-
- enum node_stat_item {
-@@ -872,7 +875,7 @@
- }
-
- /**
-- * is_highmem - helper function to quickly check if a struct zone is a
-+ * is_highmem - helper function to quickly check if a struct zone is a
- * highmem zone or not. This is an attempt to keep references
- * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
- * @zone - pointer to struct zone variable
-diff -Nur a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h
---- a/include/linux/sradix-tree.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/include/linux/sradix-tree.h 2018-05-26 19:30:55.785140376 +0100
-@@ -0,0 +1,77 @@
-+#ifndef _LINUX_SRADIX_TREE_H
-+#define _LINUX_SRADIX_TREE_H
-+
-+
-+#define INIT_SRADIX_TREE(root, mask) \
-+do { \
-+ (root)->height = 0; \
-+ (root)->gfp_mask = (mask); \
-+ (root)->rnode = NULL; \
-+} while (0)
-+
-+#define ULONG_BITS (sizeof(unsigned long) * 8)
-+#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-+//#define SRADIX_TREE_MAP_SHIFT 6
-+//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT)
-+//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1)
-+
-+struct sradix_tree_node {
-+ unsigned int height; /* Height from the bottom */
-+ unsigned int count;
-+ unsigned int fulls; /* Number of full sublevel trees */
-+ struct sradix_tree_node *parent;
-+ void *stores[0];
-+};
-+
-+/* A simple radix tree implementation */
-+struct sradix_tree_root {
-+ unsigned int height;
-+ struct sradix_tree_node *rnode;
-+
-+ /* Where found to have available empty stores in its sublevels */
-+ struct sradix_tree_node *enter_node;
-+ unsigned int shift;
-+ unsigned int stores_size;
-+ unsigned int mask;
-+ unsigned long min; /* The first hole index */
-+ unsigned long num;
-+ //unsigned long *height_to_maxindex;
-+
-+ /* How the node is allocated and freed. */
-+ struct sradix_tree_node *(*alloc)(void);
-+ void (*free)(struct sradix_tree_node *node);
-+
-+ /* When a new node is added and removed */
-+ void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child);
-+ void (*assign)(struct sradix_tree_node *node, unsigned int index, void *item);
-+ void (*rm)(struct sradix_tree_node *node, unsigned int offset);
-+};
-+
-+struct sradix_tree_path {
-+ struct sradix_tree_node *node;
-+ int offset;
-+};
-+
-+static inline
-+void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift)
-+{
-+ root->height = 0;
-+ root->rnode = NULL;
-+ root->shift = shift;
-+ root->stores_size = 1UL << shift;
-+ root->mask = root->stores_size - 1;
-+}
-+
-+
-+extern void *sradix_tree_next(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index,
-+ int (*iter)(void *, unsigned long));
-+
-+extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num);
-+
-+extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index);
-+
-+extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index);
-+
-+#endif /* _LINUX_SRADIX_TREE_H */
-diff -Nur a/include/linux/uksm.h b/include/linux/uksm.h
---- a/include/linux/uksm.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/include/linux/uksm.h 2018-05-26 19:30:55.785140376 +0100
-@@ -0,0 +1,149 @@
-+#ifndef __LINUX_UKSM_H
-+#define __LINUX_UKSM_H
-+/*
-+ * Memory merging support.
-+ *
-+ * This code enables dynamic sharing of identical pages found in different
-+ * memory areas, even if they are not shared by fork().
-+ */
-+
-+/* if !CONFIG_UKSM this file should not be compiled at all. */
-+#ifdef CONFIG_UKSM
-+
-+#include <linux/bitops.h>
-+#include <linux/mm.h>
-+#include <linux/pagemap.h>
-+#include <linux/rmap.h>
-+#include <linux/sched.h>
-+
-+extern unsigned long zero_pfn __read_mostly;
-+extern unsigned long uksm_zero_pfn __read_mostly;
-+extern struct page *empty_uksm_zero_page;
-+
-+/* must be done before linked to mm */
-+extern void uksm_vma_add_new(struct vm_area_struct *vma);
-+extern void uksm_remove_vma(struct vm_area_struct *vma);
-+
-+#define UKSM_SLOT_NEED_SORT (1 << 0)
-+#define UKSM_SLOT_NEED_RERAND (1 << 1)
-+#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */
-+#define UKSM_SLOT_FUL_SCANNED (1 << 3)
-+#define UKSM_SLOT_IN_UKSM (1 << 4)
-+
-+struct vma_slot {
-+ struct sradix_tree_node *snode;
-+ unsigned long sindex;
-+
-+ struct list_head slot_list;
-+ unsigned long fully_scanned_round;
-+ unsigned long dedup_num;
-+ unsigned long pages_scanned;
-+ unsigned long this_sampled;
-+ unsigned long last_scanned;
-+ unsigned long pages_to_scan;
-+ struct scan_rung *rung;
-+ struct page **rmap_list_pool;
-+ unsigned int *pool_counts;
-+ unsigned long pool_size;
-+ struct vm_area_struct *vma;
-+ struct mm_struct *mm;
-+ unsigned long ctime_j;
-+ unsigned long pages;
-+ unsigned long flags;
-+ unsigned long pages_cowed; /* pages cowed this round */
-+ unsigned long pages_merged; /* pages merged this round */
-+ unsigned long pages_bemerged;
-+
-+ /* when it has page merged in this eval round */
-+ struct list_head dedup_list;
-+};
-+
-+static inline void uksm_unmap_zero_page(pte_t pte)
-+{
-+ if (pte_pfn(pte) == uksm_zero_pfn)
-+ __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
-+}
-+
-+static inline void uksm_map_zero_page(pte_t pte)
-+{
-+ if (pte_pfn(pte) == uksm_zero_pfn)
-+ __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
-+}
-+
-+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
-+{
-+ if (vma->uksm_vma_slot && PageKsm(page))
-+ vma->uksm_vma_slot->pages_cowed++;
-+}
-+
-+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
-+{
-+ if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn)
-+ vma->uksm_vma_slot->pages_cowed++;
-+}
-+
-+static inline int uksm_flags_can_scan(unsigned long vm_flags)
-+{
-+#ifdef VM_SAO
-+ if (vm_flags & VM_SAO)
-+ return 0;
-+#endif
-+
-+ return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND |
-+ VM_HUGETLB | VM_MIXEDMAP | VM_SHARED
-+ | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN));
-+}
-+
-+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
-+{
-+ if (uksm_flags_can_scan(*vm_flags_p))
-+ *vm_flags_p |= VM_MERGEABLE;
-+}
-+
-+/*
-+ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will
-+ * be removed when uksm zero page patch is stable enough.
-+ */
-+static inline void uksm_bugon_zeropage(pte_t pte)
-+{
-+ BUG_ON(pte_pfn(pte) == uksm_zero_pfn);
-+}
-+#else
-+static inline void uksm_vma_add_new(struct vm_area_struct *vma)
-+{
-+}
-+
-+static inline void uksm_remove_vma(struct vm_area_struct *vma)
-+{
-+}
-+
-+static inline void uksm_unmap_zero_page(pte_t pte)
-+{
-+}
-+
-+static inline void uksm_map_zero_page(pte_t pte)
-+{
-+}
-+
-+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
-+{
-+}
-+
-+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
-+{
-+}
-+
-+static inline int uksm_flags_can_scan(unsigned long vm_flags)
-+{
-+ return 0;
-+}
-+
-+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
-+{
-+}
-+
-+static inline void uksm_bugon_zeropage(pte_t pte)
-+{
-+}
-+#endif /* !CONFIG_UKSM */
-+#endif /* __LINUX_UKSM_H */
-diff -Nur a/kernel/fork.c b/kernel/fork.c
---- a/kernel/fork.c 2018-05-26 19:24:34.840783196 +0100
-+++ b/kernel/fork.c 2018-05-26 19:30:55.785140376 +0100
-@@ -655,7 +655,7 @@
- goto fail_nomem;
- charge = len;
- }
-- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+ tmp = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
- if (!tmp)
- goto fail_nomem;
- *tmp = *mpnt;
-@@ -714,7 +714,7 @@
- __vma_link_rb(mm, tmp, rb_link, rb_parent);
- rb_link = &tmp->vm_rb.rb_right;
- rb_parent = &tmp->vm_rb;
--
-+ uksm_vma_add_new(tmp);
- mm->map_count++;
- if (!(tmp->vm_flags & VM_WIPEONFORK))
- retval = copy_page_range(mm, oldmm, mpnt);
-diff -Nur a/lib/Makefile b/lib/Makefile
---- a/lib/Makefile 2018-05-25 15:18:02.000000000 +0100
-+++ b/lib/Makefile 2018-05-26 19:30:55.786140408 +0100
-@@ -18,7 +18,7 @@
- KCOV_INSTRUMENT_dynamic_debug.o := n
-
- lib-y := ctype.o string.o vsprintf.o cmdline.o \
-- rbtree.o radix-tree.o dump_stack.o timerqueue.o\
-+ rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\
- idr.o int_sqrt.o extable.o \
- sha1.o chacha20.o irq_regs.o argv_split.o \
- flex_proportions.o ratelimit.o show_mem.o \
-diff -Nur a/lib/sradix-tree.c b/lib/sradix-tree.c
---- a/lib/sradix-tree.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/lib/sradix-tree.c 2018-05-26 19:30:55.786140408 +0100
-@@ -0,0 +1,476 @@
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/gcd.h>
-+#include <linux/sradix-tree.h>
-+
-+static inline int sradix_node_full(struct sradix_tree_root *root, struct sradix_tree_node *node)
-+{
-+ return node->fulls == root->stores_size ||
-+ (node->height == 1 && node->count == root->stores_size);
-+}
-+
-+/*
-+ * Extend a sradix tree so it can store key @index.
-+ */
-+static int sradix_tree_extend(struct sradix_tree_root *root, unsigned long index)
-+{
-+ struct sradix_tree_node *node;
-+ unsigned int height;
-+
-+ if (unlikely(root->rnode == NULL)) {
-+ if (!(node = root->alloc()))
-+ return -ENOMEM;
-+
-+ node->height = 1;
-+ root->rnode = node;
-+ root->height = 1;
-+ }
-+
-+ /* Figure out what the height should be. */
-+ height = root->height;
-+ index >>= root->shift * height;
-+
-+ while (index) {
-+ index >>= root->shift;
-+ height++;
-+ }
-+
-+ while (height > root->height) {
-+ unsigned int newheight;
-+
-+ if (!(node = root->alloc()))
-+ return -ENOMEM;
-+
-+ /* Increase the height. */
-+ node->stores[0] = root->rnode;
-+ root->rnode->parent = node;
-+ if (root->extend)
-+ root->extend(node, root->rnode);
-+
-+ newheight = root->height + 1;
-+ node->height = newheight;
-+ node->count = 1;
-+ if (sradix_node_full(root, root->rnode))
-+ node->fulls = 1;
-+
-+ root->rnode = node;
-+ root->height = newheight;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Search the next item from the current node, that is not NULL
-+ * and can satify root->iter().
-+ */
-+void *sradix_tree_next(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index,
-+ int (*iter)(void *item, unsigned long height))
-+{
-+ unsigned long offset;
-+ void *item;
-+
-+ if (unlikely(node == NULL)) {
-+ node = root->rnode;
-+ for (offset = 0; offset < root->stores_size; offset++) {
-+ item = node->stores[offset];
-+ if (item && (!iter || iter(item, node->height)))
-+ break;
-+ }
-+
-+ if (unlikely(offset >= root->stores_size))
-+ return NULL;
-+
-+ if (node->height == 1)
-+ return item;
-+ else
-+ goto go_down;
-+ }
-+
-+ while (node) {
-+ offset = (index & root->mask) + 1;
-+ for (; offset < root->stores_size; offset++) {
-+ item = node->stores[offset];
-+ if (item && (!iter || iter(item, node->height)))
-+ break;
-+ }
-+
-+ if (offset < root->stores_size)
-+ break;
-+
-+ node = node->parent;
-+ index >>= root->shift;
-+ }
-+
-+ if (!node)
-+ return NULL;
-+
-+ while (node->height > 1) {
-+go_down:
-+ node = item;
-+ for (offset = 0; offset < root->stores_size; offset++) {
-+ item = node->stores[offset];
-+ if (item && (!iter || iter(item, node->height)))
-+ break;
-+ }
-+
-+ if (unlikely(offset >= root->stores_size))
-+ return NULL;
-+ }
-+
-+ BUG_ON(offset > root->stores_size);
-+
-+ return item;
-+}
-+
-+/*
-+ * Blindly insert the item to the tree. Typically, we reuse the
-+ * first empty store item.
-+ */
-+int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num)
-+{
-+ unsigned long index;
-+ unsigned int height;
-+ struct sradix_tree_node *node, *tmp = NULL;
-+ int offset, offset_saved;
-+ void **store = NULL;
-+ int error, i, j, shift;
-+
-+go_on:
-+ index = root->min;
-+
-+ if (root->enter_node && !sradix_node_full(root, root->enter_node)) {
-+ node = root->enter_node;
-+ BUG_ON((index >> (root->shift * root->height)));
-+ } else {
-+ node = root->rnode;
-+ if (node == NULL || (index >> (root->shift * root->height))
-+ || sradix_node_full(root, node)) {
-+ error = sradix_tree_extend(root, index);
-+ if (error)
-+ return error;
-+
-+ node = root->rnode;
-+ }
-+ }
-+
-+
-+ height = node->height;
-+ shift = (height - 1) * root->shift;
-+ offset = (index >> shift) & root->mask;
-+ while (shift > 0) {
-+ offset_saved = offset;
-+ for (; offset < root->stores_size; offset++) {
-+ store = &node->stores[offset];
-+ tmp = *store;
-+
-+ if (!tmp || !sradix_node_full(root, tmp))
-+ break;
-+ }
-+ BUG_ON(offset >= root->stores_size);
-+
-+ if (offset != offset_saved) {
-+ index += (offset - offset_saved) << shift;
-+ index &= ~((1UL << shift) - 1);
-+ }
-+
-+ if (!tmp) {
-+ if (!(tmp = root->alloc()))
-+ return -ENOMEM;
-+
-+ tmp->height = shift / root->shift;
-+ *store = tmp;
-+ tmp->parent = node;
-+ node->count++;
-+// if (root->extend)
-+// root->extend(node, tmp);
-+ }
-+
-+ node = tmp;
-+ shift -= root->shift;
-+ offset = (index >> shift) & root->mask;
-+ }
-+
-+ BUG_ON(node->height != 1);
-+
-+
-+ store = &node->stores[offset];
-+ for (i = 0, j = 0;
-+ j < root->stores_size - node->count &&
-+ i < root->stores_size - offset && j < num; i++) {
-+ if (!store[i]) {
-+ store[i] = item[j];
-+ if (root->assign)
-+ root->assign(node, index + i, item[j]);
-+ j++;
-+ }
-+ }
-+
-+ node->count += j;
-+ root->num += j;
-+ num -= j;
-+
-+ while (sradix_node_full(root, node)) {
-+ node = node->parent;
-+ if (!node)
-+ break;
-+
-+ node->fulls++;
-+ }
-+
-+ if (unlikely(!node)) {
-+ /* All nodes are full */
-+ root->min = 1 << (root->height * root->shift);
-+ root->enter_node = NULL;
-+ } else {
-+ root->min = index + i - 1;
-+ root->min |= (1UL << (node->height - 1)) - 1;
-+ root->min++;
-+ root->enter_node = node;
-+ }
-+
-+ if (num) {
-+ item += j;
-+ goto go_on;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * sradix_tree_shrink - shrink height of a sradix tree to minimal
-+ * @root sradix tree root
-+ *
-+ */
-+static inline void sradix_tree_shrink(struct sradix_tree_root *root)
-+{
-+ /* try to shrink tree height */
-+ while (root->height > 1) {
-+ struct sradix_tree_node *to_free = root->rnode;
-+
-+ /*
-+ * The candidate node has more than one child, or its child
-+ * is not at the leftmost store, we cannot shrink.
-+ */
-+ if (to_free->count != 1 || !to_free->stores[0])
-+ break;
-+
-+ root->rnode = to_free->stores[0];
-+ root->rnode->parent = NULL;
-+ root->height--;
-+ if (unlikely(root->enter_node == to_free))
-+ root->enter_node = NULL;
-+ root->free(to_free);
-+ }
-+}
-+
-+/*
-+ * Del the item on the known leaf node and index
-+ */
-+void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index)
-+{
-+ unsigned int offset;
-+ struct sradix_tree_node *start, *end;
-+
-+ BUG_ON(node->height != 1);
-+
-+ start = node;
-+ while (node && !(--node->count))
-+ node = node->parent;
-+
-+ end = node;
-+ if (!node) {
-+ root->rnode = NULL;
-+ root->height = 0;
-+ root->min = 0;
-+ root->num = 0;
-+ root->enter_node = NULL;
-+ } else {
-+ offset = (index >> (root->shift * (node->height - 1))) & root->mask;
-+ if (root->rm)
-+ root->rm(node, offset);
-+ node->stores[offset] = NULL;
-+ root->num--;
-+ if (root->min > index) {
-+ root->min = index;
-+ root->enter_node = node;
-+ }
-+ }
-+
-+ if (start != end) {
-+ do {
-+ node = start;
-+ start = start->parent;
-+ if (unlikely(root->enter_node == node))
-+ root->enter_node = end;
-+ root->free(node);
-+ } while (start != end);
-+
-+ /*
-+ * Note that shrink may free "end", so enter_node still need to
-+ * be checked inside.
-+ */
-+ sradix_tree_shrink(root);
-+ } else if (node->count == root->stores_size - 1) {
-+ /* It WAS a full leaf node. Update the ancestors */
-+ node = node->parent;
-+ while (node) {
-+ node->fulls--;
-+ if (node->fulls != root->stores_size - 1)
-+ break;
-+
-+ node = node->parent;
-+ }
-+ }
-+}
-+
-+void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index)
-+{
-+ unsigned int height, offset;
-+ struct sradix_tree_node *node;
-+ int shift;
-+
-+ node = root->rnode;
-+ if (node == NULL || (index >> (root->shift * root->height)))
-+ return NULL;
-+
-+ height = root->height;
-+ shift = (height - 1) * root->shift;
-+
-+ do {
-+ offset = (index >> shift) & root->mask;
-+ node = node->stores[offset];
-+ if (!node)
-+ return NULL;
-+
-+ shift -= root->shift;
-+ } while (shift >= 0);
-+
-+ return node;
-+}
-+
-+/*
-+ * Return the item if it exists, otherwise create it in place
-+ * and return the created item.
-+ */
-+void *sradix_tree_lookup_create(struct sradix_tree_root *root,
-+ unsigned long index, void *(*item_alloc)(void))
-+{
-+ unsigned int height, offset;
-+ struct sradix_tree_node *node, *tmp;
-+ void *item;
-+ int shift, error;
-+
-+ if (root->rnode == NULL || (index >> (root->shift * root->height))) {
-+ if (item_alloc) {
-+ error = sradix_tree_extend(root, index);
-+ if (error)
-+ return NULL;
-+ } else {
-+ return NULL;
-+ }
-+ }
-+
-+ node = root->rnode;
-+ height = root->height;
-+ shift = (height - 1) * root->shift;
-+
-+ do {
-+ offset = (index >> shift) & root->mask;
-+ if (!node->stores[offset]) {
-+ if (!(tmp = root->alloc()))
-+ return NULL;
-+
-+ tmp->height = shift / root->shift;
-+ node->stores[offset] = tmp;
-+ tmp->parent = node;
-+ node->count++;
-+ node = tmp;
-+ } else {
-+ node = node->stores[offset];
-+ }
-+
-+ shift -= root->shift;
-+ } while (shift > 0);
-+
-+ BUG_ON(node->height != 1);
-+ offset = index & root->mask;
-+ if (node->stores[offset]) {
-+ return node->stores[offset];
-+ } else if (item_alloc) {
-+ if (!(item = item_alloc()))
-+ return NULL;
-+
-+ node->stores[offset] = item;
-+
-+ /*
-+ * NOTE: we do NOT call root->assign here, since this item is
-+ * newly created by us having no meaning. Caller can call this
-+ * if it's necessary to do so.
-+ */
-+
-+ node->count++;
-+ root->num++;
-+
-+ while (sradix_node_full(root, node)) {
-+ node = node->parent;
-+ if (!node)
-+ break;
-+
-+ node->fulls++;
-+ }
-+
-+ if (unlikely(!node)) {
-+ /* All nodes are full */
-+ root->min = 1 << (root->height * root->shift);
-+ } else {
-+ if (root->min == index) {
-+ root->min |= (1UL << (node->height - 1)) - 1;
-+ root->min++;
-+ root->enter_node = node;
-+ }
-+ }
-+
-+ return item;
-+ } else {
-+ return NULL;
-+ }
-+
-+}
-+
-+int sradix_tree_delete(struct sradix_tree_root *root, unsigned long index)
-+{
-+ unsigned int height, offset;
-+ struct sradix_tree_node *node;
-+ int shift;
-+
-+ node = root->rnode;
-+ if (node == NULL || (index >> (root->shift * root->height)))
-+ return -ENOENT;
-+
-+ height = root->height;
-+ shift = (height - 1) * root->shift;
-+
-+ do {
-+ offset = (index >> shift) & root->mask;
-+ node = node->stores[offset];
-+ if (!node)
-+ return -ENOENT;
-+
-+ shift -= root->shift;
-+ } while (shift > 0);
-+
-+ offset = index & root->mask;
-+ if (!node->stores[offset])
-+ return -ENOENT;
-+
-+ sradix_tree_delete_from_leaf(root, node, index);
-+
-+ return 0;
-+}
-diff -Nur a/mm/Kconfig b/mm/Kconfig
---- a/mm/Kconfig 2018-05-26 19:24:34.846783391 +0100
-+++ b/mm/Kconfig 2018-05-26 19:30:55.786140408 +0100
-@@ -315,6 +315,32 @@
- See Documentation/vm/ksm.txt for more information: KSM is inactive
- until a program has madvised that an area is MADV_MERGEABLE, and
- root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
-+choice
-+ prompt "Choose UKSM/KSM strategy"
-+ default UKSM
-+ depends on KSM
-+ help
-+ This option allows to select a UKSM/KSM stragety.
-+
-+config UKSM
-+ bool "Ultra-KSM for page merging"
-+ depends on KSM
-+ help
-+ UKSM is inspired by the Linux kernel project \u2014 KSM(Kernel Same
-+ page Merging), but with a fundamentally rewritten core algorithm. With
-+ an advanced algorithm, UKSM now can transparently scans all anonymously
-+ mapped user space applications with an significantly improved scan speed
-+ and CPU efficiency. Since KVM is friendly to KSM, KVM can also benefit from
-+ UKSM. Now UKSM has its first stable release and first real world enterprise user.
-+ For more information, please goto its project page.
-+ (www.kerneldedup.org)
-+
-+config KSM_LEGACY
-+ bool "Legacy KSM implementation"
-+ depends on KSM
-+ help
-+ The legacy KSM implementation from Red Hat.
-+endchoice
-
- config DEFAULT_MMAP_MIN_ADDR
- int "Low address space to protect from user allocation"
-diff -Nur a/mm/Makefile b/mm/Makefile
---- a/mm/Makefile 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/Makefile 2018-05-26 19:30:55.786140408 +0100
-@@ -65,7 +65,8 @@
- obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
- obj-$(CONFIG_SLOB) += slob.o
- obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
--obj-$(CONFIG_KSM) += ksm.o
-+obj-$(CONFIG_KSM_LEGACY) += ksm.o
-+obj-$(CONFIG_UKSM) += uksm.o
- obj-$(CONFIG_PAGE_POISONING) += page_poison.o
- obj-$(CONFIG_SLAB) += slab.o
- obj-$(CONFIG_SLUB) += slub.o
-diff -Nur a/mm/memory.c b/mm/memory.c
---- a/mm/memory.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/memory.c 2018-05-26 19:30:55.787140441 +0100
-@@ -129,6 +129,25 @@
-
- unsigned long highest_memmap_pfn __read_mostly;
-
-+#ifdef CONFIG_UKSM
-+unsigned long uksm_zero_pfn __read_mostly;
-+EXPORT_SYMBOL_GPL(uksm_zero_pfn);
-+struct page *empty_uksm_zero_page;
-+
-+static int __init setup_uksm_zero_page(void)
-+{
-+ empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0);
-+ if (!empty_uksm_zero_page)
-+ panic("Oh boy, that early out of memory?");
-+
-+ SetPageReserved(empty_uksm_zero_page);
-+ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
-+
-+ return 0;
-+}
-+core_initcall(setup_uksm_zero_page);
-+#endif
-+
- /*
- * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
- */
-@@ -140,6 +159,7 @@
- core_initcall(init_zero_pfn);
-
-
-+
- #if defined(SPLIT_RSS_COUNTING)
-
- void sync_mm_rss(struct mm_struct *mm)
-@@ -1035,6 +1055,9 @@
- get_page(page);
- page_dup_rmap(page, false);
- rss[mm_counter(page)]++;
-+
-+ /* Should return NULL in vm_normal_page() */
-+ uksm_bugon_zeropage(pte);
- } else if (pte_devmap(pte)) {
- page = pte_page(pte);
-
-@@ -1048,6 +1071,8 @@
- page_dup_rmap(page, false);
- rss[mm_counter(page)]++;
- }
-+ } else {
-+ uksm_map_zero_page(pte);
- }
-
- out_set_pte:
-@@ -1317,8 +1342,10 @@
- ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
- tlb_remove_tlb_entry(tlb, pte, addr);
-- if (unlikely(!page))
-+ if (unlikely(!page)) {
-+ uksm_unmap_zero_page(ptent);
- continue;
-+ }
-
- if (!PageAnon(page)) {
- if (pte_dirty(ptent)) {
-@@ -2318,8 +2345,10 @@
- clear_page(kaddr);
- kunmap_atomic(kaddr);
- flush_dcache_page(dst);
-- } else
-+ } else {
- copy_user_highpage(dst, src, va, vma);
-+ uksm_cow_page(vma, src);
-+ }
- }
-
- static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
-@@ -2468,6 +2497,7 @@
- vmf->address);
- if (!new_page)
- goto oom;
-+ uksm_cow_pte(vma, vmf->orig_pte);
- } else {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
- vmf->address);
-@@ -2494,7 +2524,9 @@
- mm_counter_file(old_page));
- inc_mm_counter_fast(mm, MM_ANONPAGES);
- }
-+ uksm_bugon_zeropage(vmf->orig_pte);
- } else {
-+ uksm_unmap_zero_page(vmf->orig_pte);
- inc_mm_counter_fast(mm, MM_ANONPAGES);
- }
- flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
-diff -Nur a/mm/mmap.c b/mm/mmap.c
---- a/mm/mmap.c 2018-05-26 19:24:34.847783423 +0100
-+++ b/mm/mmap.c 2018-05-26 19:30:55.788140473 +0100
-@@ -45,6 +45,7 @@
- #include <linux/moduleparam.h>
- #include <linux/pkeys.h>
- #include <linux/oom.h>
-+#include <linux/ksm.h>
-
- #include <linux/uaccess.h>
- #include <asm/cacheflush.h>
-@@ -173,6 +174,7 @@
- if (vma->vm_file)
- fput(vma->vm_file);
- mpol_put(vma_policy(vma));
-+ uksm_remove_vma(vma);
- kmem_cache_free(vm_area_cachep, vma);
- return next;
- }
-@@ -699,9 +701,16 @@
- long adjust_next = 0;
- int remove_next = 0;
-
-+/*
-+ * to avoid deadlock, ksm_remove_vma must be done before any spin_lock is
-+ * acquired
-+ */
-+ uksm_remove_vma(vma);
-+
- if (next && !insert) {
- struct vm_area_struct *exporter = NULL, *importer = NULL;
-
-+ uksm_remove_vma(next);
- if (end >= next->vm_end) {
- /*
- * vma expands, overlapping all the next, and
-@@ -834,6 +843,7 @@
- end_changed = true;
- }
- vma->vm_pgoff = pgoff;
-+
- if (adjust_next) {
- next->vm_start += adjust_next << PAGE_SHIFT;
- next->vm_pgoff += adjust_next;
-@@ -939,6 +949,7 @@
- if (remove_next == 2) {
- remove_next = 1;
- end = next->vm_end;
-+ uksm_remove_vma(next);
- goto again;
- }
- else if (next)
-@@ -965,10 +976,14 @@
- */
- VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
- }
-+ } else {
-+ if (next && !insert)
-+ uksm_vma_add_new(next);
- }
- if (insert && file)
- uprobe_mmap(insert);
-
-+ uksm_vma_add_new(vma);
- validate_mm(mm);
-
- return 0;
-@@ -1385,6 +1400,9 @@
- vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
- mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
-
-+ /* If uksm is enabled, we add VM_MERGEABLE to new VMAs. */
-+ uksm_vm_flags_mod(&vm_flags);
-+
- if (flags & MAP_LOCKED)
- if (!can_do_mlock())
- return -EPERM;
-@@ -1724,6 +1742,7 @@
- allow_write_access(file);
- }
- file = vma->vm_file;
-+ uksm_vma_add_new(vma);
- out:
- perf_event_mmap(vma);
-
-@@ -1765,6 +1784,7 @@
- if (vm_flags & VM_DENYWRITE)
- allow_write_access(file);
- free_vma:
-+ uksm_remove_vma(vma);
- kmem_cache_free(vm_area_cachep, vma);
- unacct_error:
- if (charged)
-@@ -2589,6 +2609,8 @@
- else
- err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
-
-+ uksm_vma_add_new(new);
-+
- /* Success. */
- if (!err)
- return 0;
-@@ -2881,6 +2903,7 @@
- if ((flags & (~VM_EXEC)) != 0)
- return -EINVAL;
- flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-+ uksm_vm_flags_mod(&flags);
-
- error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
- if (offset_in_page(error))
-@@ -2938,6 +2961,7 @@
- vma->vm_flags = flags;
- vma->vm_page_prot = vm_get_page_prot(flags);
- vma_link(mm, vma, prev, rb_link, rb_parent);
-+ uksm_vma_add_new(vma);
- out:
- perf_event_mmap(vma);
- mm->total_vm += len >> PAGE_SHIFT;
-@@ -3015,6 +3039,12 @@
- up_write(&mm->mmap_sem);
- }
-
-+ /*
-+ * Taking write lock on mmap_sem does not harm others,
-+ * but it's crucial for uksm to avoid races.
-+ */
-+ down_write(&mm->mmap_sem);
-+
- if (mm->locked_vm) {
- vma = mm->mmap;
- while (vma) {
-@@ -3049,6 +3079,11 @@
- vma = remove_vma(vma);
- }
- vm_unacct_memory(nr_accounted);
-+
-+ mm->mmap = NULL;
-+ mm->mm_rb = RB_ROOT;
-+ vmacache_invalidate(mm);
-+ up_write(&mm->mmap_sem);
- }
-
- /* Insert vm structure into process list sorted by address
-@@ -3158,6 +3193,7 @@
- new_vma->vm_ops->open(new_vma);
- vma_link(mm, new_vma, prev, rb_link, rb_parent);
- *need_rmap_locks = false;
-+ uksm_vma_add_new(new_vma);
- }
- return new_vma;
-
-@@ -3308,6 +3344,7 @@
- vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
-
- perf_event_mmap(vma);
-+ uksm_vma_add_new(vma);
-
- return vma;
-
-diff -Nur a/mm/rmap.c b/mm/rmap.c
---- a/mm/rmap.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/rmap.c 2018-05-26 19:30:55.788140473 +0100
-@@ -1013,9 +1013,9 @@
-
- /**
- * __page_set_anon_rmap - set up new anonymous rmap
-- * @page: Page to add to rmap
-+ * @page: Page to add to rmap
- * @vma: VM area to add page to.
-- * @address: User virtual address of the mapping
-+ * @address: User virtual address of the mapping
- * @exclusive: the page is exclusively owned by the current process
- */
- static void __page_set_anon_rmap(struct page *page,
-diff -Nur a/mm/uksm.c b/mm/uksm.c
---- a/mm/uksm.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/mm/uksm.c 2018-05-26 19:30:55.791140570 +0100
-@@ -0,0 +1,5584 @@
-+/*
-+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
-+ *
-+ * This is an improvement upon KSM. Some basic data structures and routines
-+ * are borrowed from ksm.c .
-+ *
-+ * Its new features:
-+ * 1. Full system scan:
-+ * It automatically scans all user processes' anonymous VMAs. Kernel-user
-+ * interaction to submit a memory area to KSM is no longer needed.
-+ *
-+ * 2. Rich area detection:
-+ * It automatically detects rich areas containing abundant duplicated
-+ * pages based. Rich areas are given a full scan speed. Poor areas are
-+ * sampled at a reasonable speed with very low CPU consumption.
-+ *
-+ * 3. Ultra Per-page scan speed improvement:
-+ * A new hash algorithm is proposed. As a result, on a machine with
-+ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
-+ * can scan memory areas that does not contain duplicated pages at speed of
-+ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
-+ * 477MB/sec ~ 923MB/sec.
-+ *
-+ * 4. Thrashing area avoidance:
-+ * Thrashing area(an VMA that has frequent Ksm page break-out) can be
-+ * filtered out. My benchmark shows it's more efficient than KSM's per-page
-+ * hash value based volatile page detection.
-+ *
-+ *
-+ * 5. Misc changes upon KSM:
-+ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
-+ * comparison. It's much faster than default C version on x86.
-+ * * rmap_item now has an struct *page member to loosely cache a
-+ * address-->page mapping, which reduces too much time-costly
-+ * follow_page().
-+ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
-+ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_
-+ * ksm is needed for this case.
-+ *
-+ * 6. Full Zero Page consideration(contributed by Figo Zhang)
-+ * Now uksmd consider full zero pages as special pages and merge them to an
-+ * special unswappable uksm zero page.
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/fs.h>
-+#include <linux/mman.h>
-+#include <linux/sched.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/coredump.h>
-+#include <linux/sched/cputime.h>
-+#include <linux/rwsem.h>
-+#include <linux/pagemap.h>
-+#include <linux/rmap.h>
-+#include <linux/spinlock.h>
-+#include <linux/jhash.h>
-+#include <linux/delay.h>
-+#include <linux/kthread.h>
-+#include <linux/wait.h>
-+#include <linux/slab.h>
-+#include <linux/rbtree.h>
-+#include <linux/memory.h>
-+#include <linux/mmu_notifier.h>
-+#include <linux/swap.h>
-+#include <linux/ksm.h>
-+#include <linux/crypto.h>
-+#include <linux/scatterlist.h>
-+#include <crypto/hash.h>
-+#include <linux/random.h>
-+#include <linux/math64.h>
-+#include <linux/gcd.h>
-+#include <linux/freezer.h>
-+#include <linux/oom.h>
-+#include <linux/numa.h>
-+#include <linux/sradix-tree.h>
-+
-+#include <asm/tlbflush.h>
-+#include "internal.h"
-+
-+#ifdef CONFIG_X86
-+#undef memcmp
-+
-+#ifdef CONFIG_X86_32
-+#define memcmp memcmpx86_32
-+/*
-+ * Compare 4-byte-aligned address s1 and s2, with length n
-+ */
-+int memcmpx86_32(void *s1, void *s2, size_t n)
-+{
-+ size_t num = n / 4;
-+ register int res;
-+
-+ __asm__ __volatile__
-+ (
-+ "testl %3,%3\n\t"
-+ "repe; cmpsd\n\t"
-+ "je 1f\n\t"
-+ "sbbl %0,%0\n\t"
-+ "orl $1,%0\n"
-+ "1:"
-+ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
-+ : "0" (0)
-+ : "cc");
-+
-+ return res;
-+}
-+
-+/*
-+ * Check the page is all zero ?
-+ */
-+static int is_full_zero(const void *s1, size_t len)
-+{
-+ unsigned char same;
-+
-+ len /= 4;
-+
-+ __asm__ __volatile__
-+ ("repe; scasl;"
-+ "sete %0"
-+ : "=qm" (same), "+D" (s1), "+c" (len)
-+ : "a" (0)
-+ : "cc");
-+
-+ return same;
-+}
-+
-+
-+#elif defined(CONFIG_X86_64)
-+#define memcmp memcmpx86_64
-+/*
-+ * Compare 8-byte-aligned address s1 and s2, with length n
-+ */
-+int memcmpx86_64(void *s1, void *s2, size_t n)
-+{
-+ size_t num = n / 8;
-+ register int res;
-+
-+ __asm__ __volatile__
-+ (
-+ "testq %q3,%q3\n\t"
-+ "repe; cmpsq\n\t"
-+ "je 1f\n\t"
-+ "sbbq %q0,%q0\n\t"
-+ "orq $1,%q0\n"
-+ "1:"
-+ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
-+ : "0" (0)
-+ : "cc");
-+
-+ return res;
-+}
-+
-+static int is_full_zero(const void *s1, size_t len)
-+{
-+ unsigned char same;
-+
-+ len /= 8;
-+
-+ __asm__ __volatile__
-+ ("repe; scasq;"
-+ "sete %0"
-+ : "=qm" (same), "+D" (s1), "+c" (len)
-+ : "a" (0)
-+ : "cc");
-+
-+ return same;
-+}
-+
-+#endif
-+#else
-+static int is_full_zero(const void *s1, size_t len)
-+{
-+ unsigned long *src = s1;
-+ int i;
-+
-+ len /= sizeof(*src);
-+
-+ for (i = 0; i < len; i++) {
-+ if (src[i])
-+ return 0;
-+ }
-+
-+ return 1;
-+}
-+#endif
-+
-+#define UKSM_RUNG_ROUND_FINISHED (1 << 0)
-+#define TIME_RATIO_SCALE 10000
-+
-+#define SLOT_TREE_NODE_SHIFT 8
-+#define SLOT_TREE_NODE_STORE_SIZE (1UL << SLOT_TREE_NODE_SHIFT)
-+struct slot_tree_node {
-+ unsigned long size;
-+ struct sradix_tree_node snode;
-+ void *stores[SLOT_TREE_NODE_STORE_SIZE];
-+};
-+
-+static struct kmem_cache *slot_tree_node_cachep;
-+
-+static struct sradix_tree_node *slot_tree_node_alloc(void)
-+{
-+ struct slot_tree_node *p;
-+
-+ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (!p)
-+ return NULL;
-+
-+ return &p->snode;
-+}
-+
-+static void slot_tree_node_free(struct sradix_tree_node *node)
-+{
-+ struct slot_tree_node *p;
-+
-+ p = container_of(node, struct slot_tree_node, snode);
-+ kmem_cache_free(slot_tree_node_cachep, p);
-+}
-+
-+static void slot_tree_node_extend(struct sradix_tree_node *parent,
-+ struct sradix_tree_node *child)
-+{
-+ struct slot_tree_node *p, *c;
-+
-+ p = container_of(parent, struct slot_tree_node, snode);
-+ c = container_of(child, struct slot_tree_node, snode);
-+
-+ p->size += c->size;
-+}
-+
-+void slot_tree_node_assign(struct sradix_tree_node *node,
-+ unsigned int index, void *item)
-+{
-+ struct vma_slot *slot = item;
-+ struct slot_tree_node *cur;
-+
-+ slot->snode = node;
-+ slot->sindex = index;
-+
-+ while (node) {
-+ cur = container_of(node, struct slot_tree_node, snode);
-+ cur->size += slot->pages;
-+ node = node->parent;
-+ }
-+}
-+
-+void slot_tree_node_rm(struct sradix_tree_node *node, unsigned int offset)
-+{
-+ struct vma_slot *slot;
-+ struct slot_tree_node *cur;
-+ unsigned long pages;
-+
-+ if (node->height == 1) {
-+ slot = node->stores[offset];
-+ pages = slot->pages;
-+ } else {
-+ cur = container_of(node->stores[offset],
-+ struct slot_tree_node, snode);
-+ pages = cur->size;
-+ }
-+
-+ while (node) {
-+ cur = container_of(node, struct slot_tree_node, snode);
-+ cur->size -= pages;
-+ node = node->parent;
-+ }
-+}
-+
-+unsigned long slot_iter_index;
-+int slot_iter(void *item, unsigned long height)
-+{
-+ struct slot_tree_node *node;
-+ struct vma_slot *slot;
-+
-+ if (height == 1) {
-+ slot = item;
-+ if (slot_iter_index < slot->pages) {
-+ /*in this one*/
-+ return 1;
-+ } else {
-+ slot_iter_index -= slot->pages;
-+ return 0;
-+ }
-+
-+ } else {
-+ node = container_of(item, struct slot_tree_node, snode);
-+ if (slot_iter_index < node->size) {
-+ /*in this one*/
-+ return 1;
-+ } else {
-+ slot_iter_index -= node->size;
-+ return 0;
-+ }
-+ }
-+}
-+
-+
-+static inline void slot_tree_init_root(struct sradix_tree_root *root)
-+{
-+ init_sradix_tree_root(root, SLOT_TREE_NODE_SHIFT);
-+ root->alloc = slot_tree_node_alloc;
-+ root->free = slot_tree_node_free;
-+ root->extend = slot_tree_node_extend;
-+ root->assign = slot_tree_node_assign;
-+ root->rm = slot_tree_node_rm;
-+}
-+
-+void slot_tree_init(void)
-+{
-+ slot_tree_node_cachep = kmem_cache_create("slot_tree_node",
-+ sizeof(struct slot_tree_node), 0,
-+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
-+ NULL);
-+}
-+
-+
-+/* Each rung of this ladder is a list of VMAs having a same scan ratio */
-+struct scan_rung {
-+ //struct list_head scanned_list;
-+ struct sradix_tree_root vma_root;
-+ struct sradix_tree_root vma_root2;
-+
-+ struct vma_slot *current_scan;
-+ unsigned long current_offset;
-+
-+ /*
-+ * The initial value for current_offset, it should loop over
-+ * [0~ step - 1] to let all slot have its chance to be scanned.
-+ */
-+ unsigned long offset_init;
-+ unsigned long step; /* dynamic step for current_offset */
-+ unsigned int flags;
-+ unsigned long pages_to_scan;
-+ //unsigned long fully_scanned_slots;
-+ /*
-+ * a little bit tricky - if cpu_time_ratio > 0, then the value is the
-+ * the cpu time ratio it can spend in rung_i for every scan
-+ * period. if < 0, then it is the cpu time ratio relative to the
-+ * max cpu percentage user specified. Both in unit of
-+ * 1/TIME_RATIO_SCALE
-+ */
-+ int cpu_ratio;
-+
-+ /*
-+ * How long it will take for all slots in this rung to be fully
-+ * scanned? If it's zero, we don't care about the cover time:
-+ * it's fully scanned.
-+ */
-+ unsigned int cover_msecs;
-+ //unsigned long vma_num;
-+ //unsigned long pages; /* Sum of all slot's pages in rung */
-+};
-+
-+/**
-+ * node of either the stable or unstale rbtree
-+ *
-+ */
-+struct tree_node {
-+ struct rb_node node; /* link in the main (un)stable rbtree */
-+ struct rb_root sub_root; /* rb_root for sublevel collision rbtree */
-+ u32 hash;
-+ unsigned long count; /* TODO: merged with sub_root */
-+ struct list_head all_list; /* all tree nodes in stable/unstable tree */
-+};
-+
-+/**
-+ * struct stable_node - node of the stable rbtree
-+ * @node: rb node of this ksm page in the stable tree
-+ * @hlist: hlist head of rmap_items using this ksm page
-+ * @kpfn: page frame number of this ksm page
-+ */
-+struct stable_node {
-+ struct rb_node node; /* link in sub-rbtree */
-+ struct tree_node *tree_node; /* it's tree node root in stable tree, NULL if it's in hell list */
-+ struct hlist_head hlist;
-+ unsigned long kpfn;
-+ u32 hash_max; /* if ==0 then it's not been calculated yet */
-+ struct list_head all_list; /* in a list for all stable nodes */
-+};
-+
-+/**
-+ * struct node_vma - group rmap_items linked in a same stable
-+ * node together.
-+ */
-+struct node_vma {
-+ union {
-+ struct vma_slot *slot;
-+ unsigned long key; /* slot is used as key sorted on hlist */
-+ };
-+ struct hlist_node hlist;
-+ struct hlist_head rmap_hlist;
-+ struct stable_node *head;
-+};
-+
-+/**
-+ * struct rmap_item - reverse mapping item for virtual addresses
-+ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
-+ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
-+ * @mm: the memory structure this rmap_item is pointing into
-+ * @address: the virtual address this rmap_item tracks (+ flags in low bits)
-+ * @node: rb node of this rmap_item in the unstable tree
-+ * @head: pointer to stable_node heading this list in the stable tree
-+ * @hlist: link into hlist of rmap_items hanging off that stable_node
-+ */
-+struct rmap_item {
-+ struct vma_slot *slot;
-+ struct page *page;
-+ unsigned long address; /* + low bits used for flags below */
-+ unsigned long hash_round;
-+ unsigned long entry_index;
-+ union {
-+ struct {/* when in unstable tree */
-+ struct rb_node node;
-+ struct tree_node *tree_node;
-+ u32 hash_max;
-+ };
-+ struct { /* when in stable tree */
-+ struct node_vma *head;
-+ struct hlist_node hlist;
-+ struct anon_vma *anon_vma;
-+ };
-+ };
-+} __aligned(4);
-+
-+struct rmap_list_entry {
-+ union {
-+ struct rmap_item *item;
-+ unsigned long addr;
-+ };
-+ /* lowest bit is used for is_addr tag */
-+} __aligned(4); /* 4 aligned to fit in to pages*/
-+
-+
-+/* Basic data structure definition ends */
-+
-+
-+/*
-+ * Flags for rmap_item to judge if it's listed in the stable/unstable tree.
-+ * The flags use the low bits of rmap_item.address
-+ */
-+#define UNSTABLE_FLAG 0x1
-+#define STABLE_FLAG 0x2
-+#define get_rmap_addr(x) ((x)->address & PAGE_MASK)
-+
-+/*
-+ * rmap_list_entry helpers
-+ */
-+#define IS_ADDR_FLAG 1
-+#define is_addr(ptr) ((unsigned long)(ptr) & IS_ADDR_FLAG)
-+#define set_is_addr(ptr) ((ptr) |= IS_ADDR_FLAG)
-+#define get_clean_addr(ptr) (((ptr) & ~(__typeof__(ptr))IS_ADDR_FLAG))
-+
-+
-+/*
-+ * High speed caches for frequently allocated and freed structs
-+ */
-+static struct kmem_cache *rmap_item_cache;
-+static struct kmem_cache *stable_node_cache;
-+static struct kmem_cache *node_vma_cache;
-+static struct kmem_cache *vma_slot_cache;
-+static struct kmem_cache *tree_node_cache;
-+#define UKSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("uksm_"#__struct,\
-+ sizeof(struct __struct), __alignof__(struct __struct),\
-+ (__flags), NULL)
-+
-+/* Array of all scan_rung, uksm_scan_ladder[0] having the minimum scan ratio */
-+#define SCAN_LADDER_SIZE 4
-+static struct scan_rung uksm_scan_ladder[SCAN_LADDER_SIZE];
-+
-+/* The evaluation rounds uksmd has finished */
-+static unsigned long long uksm_eval_round = 1;
-+
-+/*
-+ * we add 1 to this var when we consider we should rebuild the whole
-+ * unstable tree.
-+ */
-+static unsigned long uksm_hash_round = 1;
-+
-+/*
-+ * How many times the whole memory is scanned.
-+ */
-+static unsigned long long fully_scanned_round = 1;
-+
-+/* The total number of virtual pages of all vma slots */
-+static u64 uksm_pages_total;
-+
-+/* The number of pages has been scanned since the start up */
-+static u64 uksm_pages_scanned;
-+
-+static u64 scanned_virtual_pages;
-+
-+/* The number of pages has been scanned since last encode_benefit call */
-+static u64 uksm_pages_scanned_last;
-+
-+/* If the scanned number is tooo large, we encode it here */
-+static u64 pages_scanned_stored;
-+
-+static unsigned long pages_scanned_base;
-+
-+/* The number of nodes in the stable tree */
-+static unsigned long uksm_pages_shared;
-+
-+/* The number of page slots additionally sharing those nodes */
-+static unsigned long uksm_pages_sharing;
-+
-+/* The number of nodes in the unstable tree */
-+static unsigned long uksm_pages_unshared;
-+
-+/*
-+ * Milliseconds ksmd should sleep between scans,
-+ * >= 100ms to be consistent with
-+ * scan_time_to_sleep_msec()
-+ */
-+static unsigned int uksm_sleep_jiffies;
-+
-+/* The real value for the uksmd next sleep */
-+static unsigned int uksm_sleep_real;
-+
-+/* Saved value for user input uksm_sleep_jiffies when it's enlarged */
-+static unsigned int uksm_sleep_saved;
-+
-+/* Max percentage of cpu utilization ksmd can take to scan in one batch */
-+static unsigned int uksm_max_cpu_percentage;
-+
-+static int uksm_cpu_governor;
-+
-+static char *uksm_cpu_governor_str[4] = { "full", "medium", "low", "quiet" };
-+
-+struct uksm_cpu_preset_s {
-+ int cpu_ratio[SCAN_LADDER_SIZE];
-+ unsigned int cover_msecs[SCAN_LADDER_SIZE];
-+ unsigned int max_cpu; /* percentage */
-+};
-+
-+struct uksm_cpu_preset_s uksm_cpu_preset[4] = {
-+ { {20, 40, -2500, -10000}, {1000, 500, 200, 50}, 95},
-+ { {20, 30, -2500, -10000}, {1000, 500, 400, 100}, 50},
-+ { {10, 20, -5000, -10000}, {1500, 1000, 1000, 250}, 20},
-+ { {10, 20, 40, 75}, {2000, 1000, 1000, 1000}, 1},
-+};
-+
-+/* The default value for uksm_ema_page_time if it's not initialized */
-+#define UKSM_PAGE_TIME_DEFAULT 500
-+
-+/*cost to scan one page by expotional moving average in nsecs */
-+static unsigned long uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
-+
-+/* The expotional moving average alpha weight, in percentage. */
-+#define EMA_ALPHA 20
-+
-+/*
-+ * The threshold used to filter out thrashing areas,
-+ * If it == 0, filtering is disabled, otherwise it's the percentage up-bound
-+ * of the thrashing ratio of all areas. Any area with a bigger thrashing ratio
-+ * will be considered as having a zero duplication ratio.
-+ */
-+static unsigned int uksm_thrash_threshold = 50;
-+
-+/* How much dedup ratio is considered to be abundant*/
-+static unsigned int uksm_abundant_threshold = 10;
-+
-+/* All slots having merged pages in this eval round. */
-+struct list_head vma_slot_dedup = LIST_HEAD_INIT(vma_slot_dedup);
-+
-+/* How many times the ksmd has slept since startup */
-+static unsigned long long uksm_sleep_times;
-+
-+#define UKSM_RUN_STOP 0
-+#define UKSM_RUN_MERGE 1
-+static unsigned int uksm_run = 1;
-+
-+static DECLARE_WAIT_QUEUE_HEAD(uksm_thread_wait);
-+static DEFINE_MUTEX(uksm_thread_mutex);
-+
-+/*
-+ * List vma_slot_new is for newly created vma_slot waiting to be added by
-+ * ksmd. If one cannot be added(e.g. due to it's too small), it's moved to
-+ * vma_slot_noadd. vma_slot_del is the list for vma_slot whose corresponding
-+ * VMA has been removed/freed.
-+ */
-+struct list_head vma_slot_new = LIST_HEAD_INIT(vma_slot_new);
-+struct list_head vma_slot_noadd = LIST_HEAD_INIT(vma_slot_noadd);
-+struct list_head vma_slot_del = LIST_HEAD_INIT(vma_slot_del);
-+static DEFINE_SPINLOCK(vma_slot_list_lock);
-+
-+/* The unstable tree heads */
-+static struct rb_root root_unstable_tree = RB_ROOT;
-+
-+/*
-+ * All tree_nodes are in a list to be freed at once when unstable tree is
-+ * freed after each scan round.
-+ */
-+static struct list_head unstable_tree_node_list =
-+ LIST_HEAD_INIT(unstable_tree_node_list);
-+
-+/* List contains all stable nodes */
-+static struct list_head stable_node_list = LIST_HEAD_INIT(stable_node_list);
-+
-+/*
-+ * When the hash strength is changed, the stable tree must be delta_hashed and
-+ * re-structured. We use two set of below structs to speed up the
-+ * re-structuring of stable tree.
-+ */
-+static struct list_head
-+stable_tree_node_list[2] = {LIST_HEAD_INIT(stable_tree_node_list[0]),
-+ LIST_HEAD_INIT(stable_tree_node_list[1])};
-+
-+static struct list_head *stable_tree_node_listp = &stable_tree_node_list[0];
-+static struct rb_root root_stable_tree[2] = {RB_ROOT, RB_ROOT};
-+static struct rb_root *root_stable_treep = &root_stable_tree[0];
-+static unsigned long stable_tree_index;
-+
-+/* The hash strength needed to hash a full page */
-+#define HASH_STRENGTH_FULL (PAGE_SIZE / sizeof(u32))
-+
-+/* The hash strength needed for loop-back hashing */
-+#define HASH_STRENGTH_MAX (HASH_STRENGTH_FULL + 10)
-+
-+/* The random offsets in a page */
-+static u32 *random_nums;
-+
-+/* The hash strength */
-+static unsigned long hash_strength = HASH_STRENGTH_FULL >> 4;
-+
-+/* The delta value each time the hash strength increases or decreases */
-+static unsigned long hash_strength_delta;
-+#define HASH_STRENGTH_DELTA_MAX 5
-+
-+/* The time we have saved due to random_sample_hash */
-+static u64 rshash_pos;
-+
-+/* The time we have wasted due to hash collision */
-+static u64 rshash_neg;
-+
-+struct uksm_benefit {
-+ u64 pos;
-+ u64 neg;
-+ u64 scanned;
-+ unsigned long base;
-+} benefit;
-+
-+/*
-+ * The relative cost of memcmp, compared to 1 time unit of random sample
-+ * hash, this value is tested when ksm module is initialized
-+ */
-+static unsigned long memcmp_cost;
-+
-+static unsigned long rshash_neg_cont_zero;
-+static unsigned long rshash_cont_obscure;
-+
-+/* The possible states of hash strength adjustment heuristic */
-+enum rshash_states {
-+ RSHASH_STILL,
-+ RSHASH_TRYUP,
-+ RSHASH_TRYDOWN,
-+ RSHASH_NEW,
-+ RSHASH_PRE_STILL,
-+};
-+
-+/* The possible direction we are about to adjust hash strength */
-+enum rshash_direct {
-+ GO_UP,
-+ GO_DOWN,
-+ OBSCURE,
-+ STILL,
-+};
-+
-+/* random sampling hash state machine */
-+static struct {
-+ enum rshash_states state;
-+ enum rshash_direct pre_direct;
-+ u8 below_count;
-+ /* Keep a lookup window of size 5, iff above_count/below_count > 3
-+ * in this window we stop trying.
-+ */
-+ u8 lookup_window_index;
-+ u64 stable_benefit;
-+ unsigned long turn_point_down;
-+ unsigned long turn_benefit_down;
-+ unsigned long turn_point_up;
-+ unsigned long turn_benefit_up;
-+ unsigned long stable_point;
-+} rshash_state;
-+
-+/*zero page hash table, hash_strength [0 ~ HASH_STRENGTH_MAX]*/
-+static u32 *zero_hash_table;
-+
-+static inline struct node_vma *alloc_node_vma(void)
-+{
-+ struct node_vma *node_vma;
-+
-+ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (node_vma) {
-+ INIT_HLIST_HEAD(&node_vma->rmap_hlist);
-+ INIT_HLIST_NODE(&node_vma->hlist);
-+ }
-+ return node_vma;
-+}
-+
-+static inline void free_node_vma(struct node_vma *node_vma)
-+{
-+ kmem_cache_free(node_vma_cache, node_vma);
-+}
-+
-+
-+static inline struct vma_slot *alloc_vma_slot(void)
-+{
-+ struct vma_slot *slot;
-+
-+ /*
-+ * In case ksm is not initialized by now.
-+ * Oops, we need to consider the call site of uksm_init() in the future.
-+ */
-+ if (!vma_slot_cache)
-+ return NULL;
-+
-+ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (slot) {
-+ INIT_LIST_HEAD(&slot->slot_list);
-+ INIT_LIST_HEAD(&slot->dedup_list);
-+ slot->flags |= UKSM_SLOT_NEED_RERAND;
-+ }
-+ return slot;
-+}
-+
-+static inline void free_vma_slot(struct vma_slot *vma_slot)
-+{
-+ kmem_cache_free(vma_slot_cache, vma_slot);
-+}
-+
-+
-+
-+static inline struct rmap_item *alloc_rmap_item(void)
-+{
-+ struct rmap_item *rmap_item;
-+
-+ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (rmap_item) {
-+ /* bug on lowest bit is not clear for flag use */
-+ BUG_ON(is_addr(rmap_item));
-+ }
-+ return rmap_item;
-+}
-+
-+static inline void free_rmap_item(struct rmap_item *rmap_item)
-+{
-+ rmap_item->slot = NULL; /* debug safety */
-+ kmem_cache_free(rmap_item_cache, rmap_item);
-+}
-+
-+static inline struct stable_node *alloc_stable_node(void)
-+{
-+ struct stable_node *node;
-+
-+ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (!node)
-+ return NULL;
-+
-+ INIT_HLIST_HEAD(&node->hlist);
-+ list_add(&node->all_list, &stable_node_list);
-+ return node;
-+}
-+
-+static inline void free_stable_node(struct stable_node *stable_node)
-+{
-+ list_del(&stable_node->all_list);
-+ kmem_cache_free(stable_node_cache, stable_node);
-+}
-+
-+static inline struct tree_node *alloc_tree_node(struct list_head *list)
-+{
-+ struct tree_node *node;
-+
-+ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (!node)
-+ return NULL;
-+
-+ list_add(&node->all_list, list);
-+ return node;
-+}
-+
-+static inline void free_tree_node(struct tree_node *node)
-+{
-+ list_del(&node->all_list);
-+ kmem_cache_free(tree_node_cache, node);
-+}
-+
-+static void uksm_drop_anon_vma(struct rmap_item *rmap_item)
-+{
-+ struct anon_vma *anon_vma = rmap_item->anon_vma;
-+
-+ put_anon_vma(anon_vma);
-+}
-+
-+
-+/**
-+ * Remove a stable node from stable_tree, may unlink from its tree_node and
-+ * may remove its parent tree_node if no other stable node is pending.
-+ *
-+ * @stable_node The node need to be removed
-+ * @unlink_rb Will this node be unlinked from the rbtree?
-+ * @remove_tree_ node Will its tree_node be removed if empty?
-+ */
-+static void remove_node_from_stable_tree(struct stable_node *stable_node,
-+ int unlink_rb, int remove_tree_node)
-+{
-+ struct node_vma *node_vma;
-+ struct rmap_item *rmap_item;
-+ struct hlist_node *n;
-+
-+ if (!hlist_empty(&stable_node->hlist)) {
-+ hlist_for_each_entry_safe(node_vma, n,
-+ &stable_node->hlist, hlist) {
-+ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
-+ uksm_pages_sharing--;
-+
-+ uksm_drop_anon_vma(rmap_item);
-+ rmap_item->address &= PAGE_MASK;
-+ }
-+ free_node_vma(node_vma);
-+ cond_resched();
-+ }
-+
-+ /* the last one is counted as shared */
-+ uksm_pages_shared--;
-+ uksm_pages_sharing++;
-+ }
-+
-+ if (stable_node->tree_node && unlink_rb) {
-+ rb_erase(&stable_node->node,
-+ &stable_node->tree_node->sub_root);
-+
-+ if (RB_EMPTY_ROOT(&stable_node->tree_node->sub_root) &&
-+ remove_tree_node) {
-+ rb_erase(&stable_node->tree_node->node,
-+ root_stable_treep);
-+ free_tree_node(stable_node->tree_node);
-+ } else {
-+ stable_node->tree_node->count--;
-+ }
-+ }
-+
-+ free_stable_node(stable_node);
-+}
-+
-+
-+/*
-+ * get_uksm_page: checks if the page indicated by the stable node
-+ * is still its ksm page, despite having held no reference to it.
-+ * In which case we can trust the content of the page, and it
-+ * returns the gotten page; but if the page has now been zapped,
-+ * remove the stale node from the stable tree and return NULL.
-+ *
-+ * You would expect the stable_node to hold a reference to the ksm page.
-+ * But if it increments the page's count, swapping out has to wait for
-+ * ksmd to come around again before it can free the page, which may take
-+ * seconds or even minutes: much too unresponsive. So instead we use a
-+ * "keyhole reference": access to the ksm page from the stable node peeps
-+ * out through its keyhole to see if that page still holds the right key,
-+ * pointing back to this stable node. This relies on freeing a PageAnon
-+ * page to reset its page->mapping to NULL, and relies on no other use of
-+ * a page to put something that might look like our key in page->mapping.
-+ *
-+ * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
-+ * but this is different - made simpler by uksm_thread_mutex being held, but
-+ * interesting for assuming that no other use of the struct page could ever
-+ * put our expected_mapping into page->mapping (or a field of the union which
-+ * coincides with page->mapping). The RCU calls are not for KSM at all, but
-+ * to keep the page_count protocol described with page_cache_get_speculative.
-+ *
-+ * Note: it is possible that get_uksm_page() will return NULL one moment,
-+ * then page the next, if the page is in between page_freeze_refs() and
-+ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
-+ * is on its way to being freed; but it is an anomaly to bear in mind.
-+ *
-+ * @unlink_rb: if the removal of this node will firstly unlink from
-+ * its rbtree. stable_node_reinsert will prevent this when restructuring the
-+ * node from its old tree.
-+ *
-+ * @remove_tree_node: if this is the last one of its tree_node, will the
-+ * tree_node be freed ? If we are inserting stable node, this tree_node may
-+ * be reused, so don't free it.
-+ */
-+static struct page *get_uksm_page(struct stable_node *stable_node,
-+ int unlink_rb, int remove_tree_node)
-+{
-+ struct page *page;
-+ void *expected_mapping;
-+ unsigned long kpfn;
-+
-+ expected_mapping = (void *)((unsigned long)stable_node |
-+ PAGE_MAPPING_KSM);
-+again:
-+ kpfn = READ_ONCE(stable_node->kpfn);
-+ page = pfn_to_page(kpfn);
-+
-+ /*
-+ * page is computed from kpfn, so on most architectures reading
-+ * page->mapping is naturally ordered after reading node->kpfn,
-+ * but on Alpha we need to be more careful.
-+ */
-+ smp_read_barrier_depends();
-+
-+ if (READ_ONCE(page->mapping) != expected_mapping)
-+ goto stale;
-+
-+ /*
-+ * We cannot do anything with the page while its refcount is 0.
-+ * Usually 0 means free, or tail of a higher-order page: in which
-+ * case this node is no longer referenced, and should be freed;
-+ * however, it might mean that the page is under page_freeze_refs().
-+ * The __remove_mapping() case is easy, again the node is now stale;
-+ * but if page is swapcache in migrate_page_move_mapping(), it might
-+ * still be our page, in which case it's essential to keep the node.
-+ */
-+ while (!get_page_unless_zero(page)) {
-+ /*
-+ * Another check for page->mapping != expected_mapping would
-+ * work here too. We have chosen the !PageSwapCache test to
-+ * optimize the common case, when the page is or is about to
-+ * be freed: PageSwapCache is cleared (under spin_lock_irq)
-+ * in the freeze_refs section of __remove_mapping(); but Anon
-+ * page->mapping reset to NULL later, in free_pages_prepare().
-+ */
-+ if (!PageSwapCache(page))
-+ goto stale;
-+ cpu_relax();
-+ }
-+
-+ if (READ_ONCE(page->mapping) != expected_mapping) {
-+ put_page(page);
-+ goto stale;
-+ }
-+
-+ lock_page(page);
-+ if (READ_ONCE(page->mapping) != expected_mapping) {
-+ unlock_page(page);
-+ put_page(page);
-+ goto stale;
-+ }
-+ unlock_page(page);
-+ return page;
-+stale:
-+ /*
-+ * We come here from above when page->mapping or !PageSwapCache
-+ * suggests that the node is stale; but it might be under migration.
-+ * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
-+ * before checking whether node->kpfn has been changed.
-+ */
-+ smp_rmb();
-+ if (stable_node->kpfn != kpfn)
-+ goto again;
-+
-+ remove_node_from_stable_tree(stable_node, unlink_rb, remove_tree_node);
-+
-+ return NULL;
-+}
-+
-+/*
-+ * Removing rmap_item from stable or unstable tree.
-+ * This function will clean the information from the stable/unstable tree.
-+ */
-+static inline void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
-+{
-+ if (rmap_item->address & STABLE_FLAG) {
-+ struct stable_node *stable_node;
-+ struct node_vma *node_vma;
-+ struct page *page;
-+
-+ node_vma = rmap_item->head;
-+ stable_node = node_vma->head;
-+ page = get_uksm_page(stable_node, 1, 1);
-+ if (!page)
-+ goto out;
-+
-+ /*
-+ * page lock is needed because it's racing with
-+ * try_to_unmap_ksm(), etc.
-+ */
-+ lock_page(page);
-+ hlist_del(&rmap_item->hlist);
-+
-+ if (hlist_empty(&node_vma->rmap_hlist)) {
-+ hlist_del(&node_vma->hlist);
-+ free_node_vma(node_vma);
-+ }
-+ unlock_page(page);
-+
-+ put_page(page);
-+ if (hlist_empty(&stable_node->hlist)) {
-+ /* do NOT call remove_node_from_stable_tree() here,
-+ * it's possible for a forked rmap_item not in
-+ * stable tree while the in-tree rmap_items were
-+ * deleted.
-+ */
-+ uksm_pages_shared--;
-+ } else
-+ uksm_pages_sharing--;
-+
-+
-+ uksm_drop_anon_vma(rmap_item);
-+ } else if (rmap_item->address & UNSTABLE_FLAG) {
-+ if (rmap_item->hash_round == uksm_hash_round) {
-+
-+ rb_erase(&rmap_item->node,
-+ &rmap_item->tree_node->sub_root);
-+ if (RB_EMPTY_ROOT(&rmap_item->tree_node->sub_root)) {
-+ rb_erase(&rmap_item->tree_node->node,
-+ &root_unstable_tree);
-+
-+ free_tree_node(rmap_item->tree_node);
-+ } else
-+ rmap_item->tree_node->count--;
-+ }
-+ uksm_pages_unshared--;
-+ }
-+
-+ rmap_item->address &= PAGE_MASK;
-+ rmap_item->hash_max = 0;
-+
-+out:
-+ cond_resched(); /* we're called from many long loops */
-+}
-+
-+static inline int slot_in_uksm(struct vma_slot *slot)
-+{
-+ return list_empty(&slot->slot_list);
-+}
-+
-+/*
-+ * Test if the mm is exiting
-+ */
-+static inline bool uksm_test_exit(struct mm_struct *mm)
-+{
-+ return atomic_read(&mm->mm_users) == 0;
-+}
-+
-+static inline unsigned long vma_pool_size(struct vma_slot *slot)
-+{
-+ return round_up(sizeof(struct rmap_list_entry) * slot->pages,
-+ PAGE_SIZE) >> PAGE_SHIFT;
-+}
-+
-+#define CAN_OVERFLOW_U64(x, delta) (U64_MAX - (x) < (delta))
-+
-+/* must be done with sem locked */
-+static int slot_pool_alloc(struct vma_slot *slot)
-+{
-+ unsigned long pool_size;
-+
-+ if (slot->rmap_list_pool)
-+ return 0;
-+
-+ pool_size = vma_pool_size(slot);
-+ slot->rmap_list_pool = kcalloc(pool_size, sizeof(struct page *),
-+ GFP_KERNEL);
-+ if (!slot->rmap_list_pool)
-+ return -ENOMEM;
-+
-+ slot->pool_counts = kcalloc(pool_size, sizeof(unsigned int),
-+ GFP_KERNEL);
-+ if (!slot->pool_counts) {
-+ kfree(slot->rmap_list_pool);
-+ return -ENOMEM;
-+ }
-+
-+ slot->pool_size = pool_size;
-+ BUG_ON(CAN_OVERFLOW_U64(uksm_pages_total, slot->pages));
-+ slot->flags |= UKSM_SLOT_IN_UKSM;
-+ uksm_pages_total += slot->pages;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Called after vma is unlinked from its mm
-+ */
-+void uksm_remove_vma(struct vm_area_struct *vma)
-+{
-+ struct vma_slot *slot;
-+
-+ if (!vma->uksm_vma_slot)
-+ return;
-+
-+ spin_lock(&vma_slot_list_lock);
-+ slot = vma->uksm_vma_slot;
-+ if (!slot)
-+ goto out;
-+
-+ if (slot_in_uksm(slot)) {
-+ /**
-+ * This slot has been added by ksmd, so move to the del list
-+ * waiting ksmd to free it.
-+ */
-+ list_add_tail(&slot->slot_list, &vma_slot_del);
-+ } else {
-+ /**
-+ * It's still on new list. It's ok to free slot directly.
-+ */
-+ list_del(&slot->slot_list);
-+ free_vma_slot(slot);
-+ }
-+out:
-+ vma->uksm_vma_slot = NULL;
-+ spin_unlock(&vma_slot_list_lock);
-+}
-+
-+/**
-+ * Need to do two things:
-+ * 1. check if slot was moved to del list
-+ * 2. make sure the mmap_sem is manipulated under valid vma.
-+ *
-+ * My concern here is that in some cases, this may make
-+ * vma_slot_list_lock() waiters to serialized further by some
-+ * sem->wait_lock, can this really be expensive?
-+ *
-+ *
-+ * @return
-+ * 0: if successfully locked mmap_sem
-+ * -ENOENT: this slot was moved to del list
-+ * -EBUSY: vma lock failed
-+ */
-+static int try_down_read_slot_mmap_sem(struct vma_slot *slot)
-+{
-+ struct vm_area_struct *vma;
-+ struct mm_struct *mm;
-+ struct rw_semaphore *sem;
-+
-+ spin_lock(&vma_slot_list_lock);
-+
-+ /* the slot_list was removed and inited from new list, when it enters
-+ * uksm_list. If now it's not empty, then it must be moved to del list
-+ */
-+ if (!slot_in_uksm(slot)) {
-+ spin_unlock(&vma_slot_list_lock);
-+ return -ENOENT;
-+ }
-+
-+ BUG_ON(slot->pages != vma_pages(slot->vma));
-+ /* Ok, vma still valid */
-+ vma = slot->vma;
-+ mm = vma->vm_mm;
-+ sem = &mm->mmap_sem;
-+
-+ if (uksm_test_exit(mm)) {
-+ spin_unlock(&vma_slot_list_lock);
-+ return -ENOENT;
-+ }
-+
-+ if (down_read_trylock(sem)) {
-+ spin_unlock(&vma_slot_list_lock);
-+ if (slot_pool_alloc(slot)) {
-+ uksm_remove_vma(vma);
-+ up_read(sem);
-+ return -ENOENT;
-+ }
-+ return 0;
-+ }
-+
-+ spin_unlock(&vma_slot_list_lock);
-+ return -EBUSY;
-+}
-+
-+static inline unsigned long
-+vma_page_address(struct page *page, struct vm_area_struct *vma)
-+{
-+ pgoff_t pgoff = page->index;
-+ unsigned long address;
-+
-+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
-+ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
-+ /* page should be within @vma mapping range */
-+ return -EFAULT;
-+ }
-+ return address;
-+}
-+
-+
-+/* return 0 on success with the item's mmap_sem locked */
-+static inline int get_mergeable_page_lock_mmap(struct rmap_item *item)
-+{
-+ struct mm_struct *mm;
-+ struct vma_slot *slot = item->slot;
-+ int err = -EINVAL;
-+
-+ struct page *page;
-+
-+ /*
-+ * try_down_read_slot_mmap_sem() returns non-zero if the slot
-+ * has been removed by uksm_remove_vma().
-+ */
-+ if (try_down_read_slot_mmap_sem(slot))
-+ return -EBUSY;
-+
-+ mm = slot->vma->vm_mm;
-+
-+ if (uksm_test_exit(mm))
-+ goto failout_up;
-+
-+ page = item->page;
-+ rcu_read_lock();
-+ if (!get_page_unless_zero(page)) {
-+ rcu_read_unlock();
-+ goto failout_up;
-+ }
-+
-+ /* No need to consider huge page here. */
-+ if (item->slot->vma->anon_vma != page_anon_vma(page) ||
-+ vma_page_address(page, item->slot->vma) != get_rmap_addr(item)) {
-+ /*
-+ * TODO:
-+ * should we release this item becase of its stale page
-+ * mapping?
-+ */
-+ put_page(page);
-+ rcu_read_unlock();
-+ goto failout_up;
-+ }
-+ rcu_read_unlock();
-+ return 0;
-+
-+failout_up:
-+ up_read(&mm->mmap_sem);
-+ return err;
-+}
-+
-+/*
-+ * What kind of VMA is considered ?
-+ */
-+static inline int vma_can_enter(struct vm_area_struct *vma)
-+{
-+ return uksm_flags_can_scan(vma->vm_flags);
-+}
-+
-+/*
-+ * Called whenever a fresh new vma is created A new vma_slot.
-+ * is created and inserted into a global list Must be called.
-+ * after vma is inserted to its mm.
-+ */
-+void uksm_vma_add_new(struct vm_area_struct *vma)
-+{
-+ struct vma_slot *slot;
-+
-+ if (!vma_can_enter(vma)) {
-+ vma->uksm_vma_slot = NULL;
-+ return;
-+ }
-+
-+ slot = alloc_vma_slot();
-+ if (!slot) {
-+ vma->uksm_vma_slot = NULL;
-+ return;
-+ }
-+
-+ vma->uksm_vma_slot = slot;
-+ vma->vm_flags |= VM_MERGEABLE;
-+ slot->vma = vma;
-+ slot->mm = vma->vm_mm;
-+ slot->ctime_j = jiffies;
-+ slot->pages = vma_pages(vma);
-+ spin_lock(&vma_slot_list_lock);
-+ list_add_tail(&slot->slot_list, &vma_slot_new);
-+ spin_unlock(&vma_slot_list_lock);
-+}
-+
-+/* 32/3 < they < 32/2 */
-+#define shiftl 8
-+#define shiftr 12
-+
-+#define HASH_FROM_TO(from, to) \
-+for (index = from; index < to; index++) { \
-+ pos = random_nums[index]; \
-+ hash += key[pos]; \
-+ hash += (hash << shiftl); \
-+ hash ^= (hash >> shiftr); \
-+}
-+
-+
-+#define HASH_FROM_DOWN_TO(from, to) \
-+for (index = from - 1; index >= to; index--) { \
-+ hash ^= (hash >> shiftr); \
-+ hash ^= (hash >> (shiftr*2)); \
-+ hash -= (hash << shiftl); \
-+ hash += (hash << (shiftl*2)); \
-+ pos = random_nums[index]; \
-+ hash -= key[pos]; \
-+}
-+
-+/*
-+ * The main random sample hash function.
-+ */
-+static u32 random_sample_hash(void *addr, u32 hash_strength)
-+{
-+ u32 hash = 0xdeadbeef;
-+ int index, pos, loop = hash_strength;
-+ u32 *key = (u32 *)addr;
-+
-+ if (loop > HASH_STRENGTH_FULL)
-+ loop = HASH_STRENGTH_FULL;
-+
-+ HASH_FROM_TO(0, loop);
-+
-+ if (hash_strength > HASH_STRENGTH_FULL) {
-+ loop = hash_strength - HASH_STRENGTH_FULL;
-+ HASH_FROM_TO(0, loop);
-+ }
-+
-+ return hash;
-+}
-+
-+
-+/**
-+ * It's used when hash strength is adjusted
-+ *
-+ * @addr The page's virtual address
-+ * @from The original hash strength
-+ * @to The hash strength changed to
-+ * @hash The hash value generated with "from" hash value
-+ *
-+ * return the hash value
-+ */
-+static u32 delta_hash(void *addr, int from, int to, u32 hash)
-+{
-+ u32 *key = (u32 *)addr;
-+ int index, pos; /* make sure they are int type */
-+
-+ if (to > from) {
-+ if (from >= HASH_STRENGTH_FULL) {
-+ from -= HASH_STRENGTH_FULL;
-+ to -= HASH_STRENGTH_FULL;
-+ HASH_FROM_TO(from, to);
-+ } else if (to <= HASH_STRENGTH_FULL) {
-+ HASH_FROM_TO(from, to);
-+ } else {
-+ HASH_FROM_TO(from, HASH_STRENGTH_FULL);
-+ HASH_FROM_TO(0, to - HASH_STRENGTH_FULL);
-+ }
-+ } else {
-+ if (from <= HASH_STRENGTH_FULL) {
-+ HASH_FROM_DOWN_TO(from, to);
-+ } else if (to >= HASH_STRENGTH_FULL) {
-+ from -= HASH_STRENGTH_FULL;
-+ to -= HASH_STRENGTH_FULL;
-+ HASH_FROM_DOWN_TO(from, to);
-+ } else {
-+ HASH_FROM_DOWN_TO(from - HASH_STRENGTH_FULL, 0);
-+ HASH_FROM_DOWN_TO(HASH_STRENGTH_FULL, to);
-+ }
-+ }
-+
-+ return hash;
-+}
-+
-+/**
-+ *
-+ * Called when: rshash_pos or rshash_neg is about to overflow or a scan round
-+ * has finished.
-+ *
-+ * return 0 if no page has been scanned since last call, 1 otherwise.
-+ */
-+static inline int encode_benefit(void)
-+{
-+ u64 scanned_delta, pos_delta, neg_delta;
-+ unsigned long base = benefit.base;
-+
-+ scanned_delta = uksm_pages_scanned - uksm_pages_scanned_last;
-+
-+ if (!scanned_delta)
-+ return 0;
-+
-+ scanned_delta >>= base;
-+ pos_delta = rshash_pos >> base;
-+ neg_delta = rshash_neg >> base;
-+
-+ if (CAN_OVERFLOW_U64(benefit.pos, pos_delta) ||
-+ CAN_OVERFLOW_U64(benefit.neg, neg_delta) ||
-+ CAN_OVERFLOW_U64(benefit.scanned, scanned_delta)) {
-+ benefit.scanned >>= 1;
-+ benefit.neg >>= 1;
-+ benefit.pos >>= 1;
-+ benefit.base++;
-+ scanned_delta >>= 1;
-+ pos_delta >>= 1;
-+ neg_delta >>= 1;
-+ }
-+
-+ benefit.pos += pos_delta;
-+ benefit.neg += neg_delta;
-+ benefit.scanned += scanned_delta;
-+
-+ BUG_ON(!benefit.scanned);
-+
-+ rshash_pos = rshash_neg = 0;
-+ uksm_pages_scanned_last = uksm_pages_scanned;
-+
-+ return 1;
-+}
-+
-+static inline void reset_benefit(void)
-+{
-+ benefit.pos = 0;
-+ benefit.neg = 0;
-+ benefit.base = 0;
-+ benefit.scanned = 0;
-+}
-+
-+static inline void inc_rshash_pos(unsigned long delta)
-+{
-+ if (CAN_OVERFLOW_U64(rshash_pos, delta))
-+ encode_benefit();
-+
-+ rshash_pos += delta;
-+}
-+
-+static inline void inc_rshash_neg(unsigned long delta)
-+{
-+ if (CAN_OVERFLOW_U64(rshash_neg, delta))
-+ encode_benefit();
-+
-+ rshash_neg += delta;
-+}
-+
-+
-+static inline u32 page_hash(struct page *page, unsigned long hash_strength,
-+ int cost_accounting)
-+{
-+ u32 val;
-+ unsigned long delta;
-+
-+ void *addr = kmap_atomic(page);
-+
-+ val = random_sample_hash(addr, hash_strength);
-+ kunmap_atomic(addr);
-+
-+ if (cost_accounting) {
-+ if (hash_strength < HASH_STRENGTH_FULL)
-+ delta = HASH_STRENGTH_FULL - hash_strength;
-+ else
-+ delta = 0;
-+
-+ inc_rshash_pos(delta);
-+ }
-+
-+ return val;
-+}
-+
-+static int memcmp_pages(struct page *page1, struct page *page2,
-+ int cost_accounting)
-+{
-+ char *addr1, *addr2;
-+ int ret;
-+
-+ addr1 = kmap_atomic(page1);
-+ addr2 = kmap_atomic(page2);
-+ ret = memcmp(addr1, addr2, PAGE_SIZE);
-+ kunmap_atomic(addr2);
-+ kunmap_atomic(addr1);
-+
-+ if (cost_accounting)
-+ inc_rshash_neg(memcmp_cost);
-+
-+ return ret;
-+}
-+
-+static inline int pages_identical(struct page *page1, struct page *page2)
-+{
-+ return !memcmp_pages(page1, page2, 0);
-+}
-+
-+static inline int is_page_full_zero(struct page *page)
-+{
-+ char *addr;
-+ int ret;
-+
-+ addr = kmap_atomic(page);
-+ ret = is_full_zero(addr, PAGE_SIZE);
-+ kunmap_atomic(addr);
-+
-+ return ret;
-+}
-+
-+static int write_protect_page(struct vm_area_struct *vma, struct page *page,
-+ pte_t *orig_pte, pte_t *old_pte)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ struct page_vma_mapped_walk pvmw = {
-+ .page = page,
-+ .vma = vma,
-+ };
-+ int swapped;
-+ int err = -EFAULT;
-+ unsigned long mmun_start; /* For mmu_notifiers */
-+ unsigned long mmun_end; /* For mmu_notifiers */
-+
-+ pvmw.address = page_address_in_vma(page, vma);
-+ if (pvmw.address == -EFAULT)
-+ goto out;
-+
-+ BUG_ON(PageTransCompound(page));
-+
-+ mmun_start = pvmw.address;
-+ mmun_end = pvmw.address + PAGE_SIZE;
-+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-+
-+ if (!page_vma_mapped_walk(&pvmw))
-+ goto out_mn;
-+ if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
-+ goto out_unlock;
-+
-+ if (old_pte)
-+ *old_pte = *pvmw.pte;
-+
-+ if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
-+ (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || mm_tlb_flush_pending(mm)) {
-+ pte_t entry;
-+
-+ swapped = PageSwapCache(page);
-+ flush_cache_page(vma, pvmw.address, page_to_pfn(page));
-+ /*
-+ * Ok this is tricky, when get_user_pages_fast() run it doesn't
-+ * take any lock, therefore the check that we are going to make
-+ * with the pagecount against the mapcount is racey and
-+ * O_DIRECT can happen right after the check.
-+ * So we clear the pte and flush the tlb before the check
-+ * this assure us that no O_DIRECT can happen after the check
-+ * or in the middle of the check.
-+ */
-+ entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
-+ /*
-+ * Check that no O_DIRECT or similar I/O is in progress on the
-+ * page
-+ */
-+ if (page_mapcount(page) + 1 + swapped != page_count(page)) {
-+ set_pte_at(mm, pvmw.address, pvmw.pte, entry);
-+ goto out_unlock;
-+ }
-+ if (pte_dirty(entry))
-+ set_page_dirty(page);
-+
-+ if (pte_protnone(entry))
-+ entry = pte_mkclean(pte_clear_savedwrite(entry));
-+ else
-+ entry = pte_mkclean(pte_wrprotect(entry));
-+
-+ set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
-+ }
-+ *orig_pte = *pvmw.pte;
-+ err = 0;
-+
-+out_unlock:
-+ page_vma_mapped_walk_done(&pvmw);
-+out_mn:
-+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-+out:
-+ return err;
-+}
-+
-+#define MERGE_ERR_PGERR 1 /* the page is invalid cannot continue */
-+#define MERGE_ERR_COLLI 2 /* there is a collision */
-+#define MERGE_ERR_COLLI_MAX 3 /* collision at the max hash strength */
-+#define MERGE_ERR_CHANGED 4 /* the page has changed since last hash */
-+
-+
-+/**
-+ * replace_page - replace page in vma by new ksm page
-+ * @vma: vma that holds the pte pointing to page
-+ * @page: the page we are replacing by kpage
-+ * @kpage: the ksm page we replace page by
-+ * @orig_pte: the original value of the pte
-+ *
-+ * Returns 0 on success, MERGE_ERR_PGERR on failure.
-+ */
-+static int replace_page(struct vm_area_struct *vma, struct page *page,
-+ struct page *kpage, pte_t orig_pte)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ pgd_t *pgd;
-+ p4d_t *p4d;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *ptep;
-+ spinlock_t *ptl;
-+ pte_t entry;
-+
-+ unsigned long addr;
-+ int err = MERGE_ERR_PGERR;
-+ unsigned long mmun_start; /* For mmu_notifiers */
-+ unsigned long mmun_end; /* For mmu_notifiers */
-+
-+ addr = page_address_in_vma(page, vma);
-+ if (addr == -EFAULT)
-+ goto out;
-+
-+ pgd = pgd_offset(mm, addr);
-+ if (!pgd_present(*pgd))
-+ goto out;
-+
-+ p4d = p4d_offset(pgd, addr);
-+ pud = pud_offset(p4d, addr);
-+ if (!pud_present(*pud))
-+ goto out;
-+
-+ pmd = pmd_offset(pud, addr);
-+ BUG_ON(pmd_trans_huge(*pmd));
-+ if (!pmd_present(*pmd))
-+ goto out;
-+
-+ mmun_start = addr;
-+ mmun_end = addr + PAGE_SIZE;
-+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-+
-+ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
-+ if (!pte_same(*ptep, orig_pte)) {
-+ pte_unmap_unlock(ptep, ptl);
-+ goto out_mn;
-+ }
-+
-+ flush_cache_page(vma, addr, pte_pfn(*ptep));
-+ ptep_clear_flush_notify(vma, addr, ptep);
-+ entry = mk_pte(kpage, vma->vm_page_prot);
-+
-+ /* special treatment is needed for zero_page */
-+ if ((page_to_pfn(kpage) == uksm_zero_pfn) ||
-+ (page_to_pfn(kpage) == zero_pfn)) {
-+ entry = pte_mkspecial(entry);
-+ dec_mm_counter(mm, MM_ANONPAGES);
-+ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
-+ } else {
-+ get_page(kpage);
-+ page_add_anon_rmap(kpage, vma, addr, false);
-+ }
-+
-+ set_pte_at_notify(mm, addr, ptep, entry);
-+
-+ page_remove_rmap(page, false);
-+ if (!page_mapped(page))
-+ try_to_free_swap(page);
-+ put_page(page);
-+
-+ pte_unmap_unlock(ptep, ptl);
-+ err = 0;
-+out_mn:
-+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-+out:
-+ return err;
-+}
-+
-+
-+/**
-+ * Fully hash a page with HASH_STRENGTH_MAX return a non-zero hash value. The
-+ * zero hash value at HASH_STRENGTH_MAX is used to indicated that its
-+ * hash_max member has not been calculated.
-+ *
-+ * @page The page needs to be hashed
-+ * @hash_old The hash value calculated with current hash strength
-+ *
-+ * return the new hash value calculated at HASH_STRENGTH_MAX
-+ */
-+static inline u32 page_hash_max(struct page *page, u32 hash_old)
-+{
-+ u32 hash_max = 0;
-+ void *addr;
-+
-+ addr = kmap_atomic(page);
-+ hash_max = delta_hash(addr, hash_strength,
-+ HASH_STRENGTH_MAX, hash_old);
-+
-+ kunmap_atomic(addr);
-+
-+ if (!hash_max)
-+ hash_max = 1;
-+
-+ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
-+ return hash_max;
-+}
-+
-+/*
-+ * We compare the hash again, to ensure that it is really a hash collision
-+ * instead of being caused by page write.
-+ */
-+static inline int check_collision(struct rmap_item *rmap_item,
-+ u32 hash)
-+{
-+ int err;
-+ struct page *page = rmap_item->page;
-+
-+ /* if this rmap_item has already been hash_maxed, then the collision
-+ * must appears in the second-level rbtree search. In this case we check
-+ * if its hash_max value has been changed. Otherwise, the collision
-+ * happens in the first-level rbtree search, so we check against it's
-+ * current hash value.
-+ */
-+ if (rmap_item->hash_max) {
-+ inc_rshash_neg(memcmp_cost);
-+ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
-+
-+ if (rmap_item->hash_max == page_hash_max(page, hash))
-+ err = MERGE_ERR_COLLI;
-+ else
-+ err = MERGE_ERR_CHANGED;
-+ } else {
-+ inc_rshash_neg(memcmp_cost + hash_strength);
-+
-+ if (page_hash(page, hash_strength, 0) == hash)
-+ err = MERGE_ERR_COLLI;
-+ else
-+ err = MERGE_ERR_CHANGED;
-+ }
-+
-+ return err;
-+}
-+
-+/**
-+ * Try to merge a rmap_item.page with a kpage in stable node. kpage must
-+ * already be a ksm page.
-+ *
-+ * @return 0 if the pages were merged, -EFAULT otherwise.
-+ */
-+static int try_to_merge_with_uksm_page(struct rmap_item *rmap_item,
-+ struct page *kpage, u32 hash)
-+{
-+ struct vm_area_struct *vma = rmap_item->slot->vma;
-+ struct mm_struct *mm = vma->vm_mm;
-+ pte_t orig_pte = __pte(0);
-+ int err = MERGE_ERR_PGERR;
-+ struct page *page;
-+
-+ if (uksm_test_exit(mm))
-+ goto out;
-+
-+ page = rmap_item->page;
-+
-+ if (page == kpage) { /* ksm page forked */
-+ err = 0;
-+ goto out;
-+ }
-+
-+ /*
-+ * We need the page lock to read a stable PageSwapCache in
-+ * write_protect_page(). We use trylock_page() instead of
-+ * lock_page() because we don't want to wait here - we
-+ * prefer to continue scanning and merging different pages,
-+ * then come back to this page when it is unlocked.
-+ */
-+ if (!trylock_page(page))
-+ goto out;
-+
-+ if (!PageAnon(page) || !PageKsm(kpage))
-+ goto out_unlock;
-+
-+ if (PageTransCompound(page)) {
-+ err = split_huge_page(page);
-+ if (err)
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * If this anonymous page is mapped only here, its pte may need
-+ * to be write-protected. If it's mapped elsewhere, all of its
-+ * ptes are necessarily already write-protected. But in either
-+ * case, we need to lock and check page_count is not raised.
-+ */
-+ if (write_protect_page(vma, page, &orig_pte, NULL) == 0) {
-+ if (pages_identical(page, kpage))
-+ err = replace_page(vma, page, kpage, orig_pte);
-+ else
-+ err = check_collision(rmap_item, hash);
-+ }
-+
-+ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
-+ munlock_vma_page(page);
-+ if (!PageMlocked(kpage)) {
-+ unlock_page(page);
-+ lock_page(kpage);
-+ mlock_vma_page(kpage);
-+ page = kpage; /* for final unlock */
-+ }
-+ }
-+
-+out_unlock:
-+ unlock_page(page);
-+out:
-+ return err;
-+}
-+
-+
-+
-+/**
-+ * If two pages fail to merge in try_to_merge_two_pages, then we have a chance
-+ * to restore a page mapping that has been changed in try_to_merge_two_pages.
-+ *
-+ * @return 0 on success.
-+ */
-+static int restore_uksm_page_pte(struct vm_area_struct *vma, unsigned long addr,
-+ pte_t orig_pte, pte_t wprt_pte)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ pgd_t *pgd;
-+ p4d_t *p4d;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *ptep;
-+ spinlock_t *ptl;
-+
-+ int err = -EFAULT;
-+
-+ pgd = pgd_offset(mm, addr);
-+ if (!pgd_present(*pgd))
-+ goto out;
-+
-+ p4d = p4d_offset(pgd, addr);
-+ pud = pud_offset(p4d, addr);
-+ if (!pud_present(*pud))
-+ goto out;
-+
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ goto out;
-+
-+ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
-+ if (!pte_same(*ptep, wprt_pte)) {
-+ /* already copied, let it be */
-+ pte_unmap_unlock(ptep, ptl);
-+ goto out;
-+ }
-+
-+ /*
-+ * Good boy, still here. When we still get the ksm page, it does not
-+ * return to the free page pool, there is no way that a pte was changed
-+ * to other page and gets back to this page. And remind that ksm page
-+ * do not reuse in do_wp_page(). So it's safe to restore the original
-+ * pte.
-+ */
-+ flush_cache_page(vma, addr, pte_pfn(*ptep));
-+ ptep_clear_flush_notify(vma, addr, ptep);
-+ set_pte_at_notify(mm, addr, ptep, orig_pte);
-+
-+ pte_unmap_unlock(ptep, ptl);
-+ err = 0;
-+out:
-+ return err;
-+}
-+
-+/**
-+ * try_to_merge_two_pages() - take two identical pages and prepare
-+ * them to be merged into one page(rmap_item->page)
-+ *
-+ * @return 0 if we successfully merged two identical pages into
-+ * one ksm page. MERGE_ERR_COLLI if it's only a hash collision
-+ * search in rbtree. MERGE_ERR_CHANGED if rmap_item has been
-+ * changed since it's hashed. MERGE_ERR_PGERR otherwise.
-+ *
-+ */
-+static int try_to_merge_two_pages(struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ u32 hash)
-+{
-+ pte_t orig_pte1 = __pte(0), orig_pte2 = __pte(0);
-+ pte_t wprt_pte1 = __pte(0), wprt_pte2 = __pte(0);
-+ struct vm_area_struct *vma1 = rmap_item->slot->vma;
-+ struct vm_area_struct *vma2 = tree_rmap_item->slot->vma;
-+ struct page *page = rmap_item->page;
-+ struct page *tree_page = tree_rmap_item->page;
-+ int err = MERGE_ERR_PGERR;
-+ struct address_space *saved_mapping;
-+
-+
-+ if (rmap_item->page == tree_rmap_item->page)
-+ goto out;
-+
-+ if (!trylock_page(page))
-+ goto out;
-+
-+ if (!PageAnon(page))
-+ goto out_unlock;
-+
-+ if (PageTransCompound(page)) {
-+ err = split_huge_page(page);
-+ if (err)
-+ goto out_unlock;
-+ }
-+
-+ if (write_protect_page(vma1, page, &wprt_pte1, &orig_pte1) != 0) {
-+ unlock_page(page);
-+ goto out;
-+ }
-+
-+ /*
-+ * While we hold page lock, upgrade page from
-+ * PageAnon+anon_vma to PageKsm+NULL stable_node:
-+ * stable_tree_insert() will update stable_node.
-+ */
-+ saved_mapping = page->mapping;
-+ set_page_stable_node(page, NULL);
-+ mark_page_accessed(page);
-+ if (!PageDirty(page))
-+ SetPageDirty(page);
-+
-+ unlock_page(page);
-+
-+ if (!trylock_page(tree_page))
-+ goto restore_out;
-+
-+ if (!PageAnon(tree_page)) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+
-+ if (PageTransCompound(tree_page)) {
-+ err = split_huge_page(tree_page);
-+ if (err) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+ }
-+
-+ if (write_protect_page(vma2, tree_page, &wprt_pte2, &orig_pte2) != 0) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+
-+ if (pages_identical(page, tree_page)) {
-+ err = replace_page(vma2, tree_page, page, wprt_pte2);
-+ if (err) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+
-+ if ((vma2->vm_flags & VM_LOCKED)) {
-+ munlock_vma_page(tree_page);
-+ if (!PageMlocked(page)) {
-+ unlock_page(tree_page);
-+ lock_page(page);
-+ mlock_vma_page(page);
-+ tree_page = page; /* for final unlock */
-+ }
-+ }
-+
-+ unlock_page(tree_page);
-+
-+ goto out; /* success */
-+
-+ } else {
-+ if (tree_rmap_item->hash_max &&
-+ tree_rmap_item->hash_max == rmap_item->hash_max) {
-+ err = MERGE_ERR_COLLI_MAX;
-+ } else if (page_hash(page, hash_strength, 0) ==
-+ page_hash(tree_page, hash_strength, 0)) {
-+ inc_rshash_neg(memcmp_cost + hash_strength * 2);
-+ err = MERGE_ERR_COLLI;
-+ } else {
-+ err = MERGE_ERR_CHANGED;
-+ }
-+
-+ unlock_page(tree_page);
-+ }
-+
-+restore_out:
-+ lock_page(page);
-+ if (!restore_uksm_page_pte(vma1, get_rmap_addr(rmap_item),
-+ orig_pte1, wprt_pte1))
-+ page->mapping = saved_mapping;
-+
-+out_unlock:
-+ unlock_page(page);
-+out:
-+ return err;
-+}
-+
-+static inline int hash_cmp(u32 new_val, u32 node_val)
-+{
-+ if (new_val > node_val)
-+ return 1;
-+ else if (new_val < node_val)
-+ return -1;
-+ else
-+ return 0;
-+}
-+
-+static inline u32 rmap_item_hash_max(struct rmap_item *item, u32 hash)
-+{
-+ u32 hash_max = item->hash_max;
-+
-+ if (!hash_max) {
-+ hash_max = page_hash_max(item->page, hash);
-+
-+ item->hash_max = hash_max;
-+ }
-+
-+ return hash_max;
-+}
-+
-+
-+
-+/**
-+ * stable_tree_search() - search the stable tree for a page
-+ *
-+ * @item: the rmap_item we are comparing with
-+ * @hash: the hash value of this item->page already calculated
-+ *
-+ * @return the page we have found, NULL otherwise. The page returned has
-+ * been gotten.
-+ */
-+static struct page *stable_tree_search(struct rmap_item *item, u32 hash)
-+{
-+ struct rb_node *node = root_stable_treep->rb_node;
-+ struct tree_node *tree_node;
-+ unsigned long hash_max;
-+ struct page *page = item->page;
-+ struct stable_node *stable_node;
-+
-+ stable_node = page_stable_node(page);
-+ if (stable_node) {
-+ /* ksm page forked, that is
-+ * if (PageKsm(page) && !in_stable_tree(rmap_item))
-+ * it's actually gotten once outside.
-+ */
-+ get_page(page);
-+ return page;
-+ }
-+
-+ while (node) {
-+ int cmp;
-+
-+ tree_node = rb_entry(node, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0)
-+ node = node->rb_left;
-+ else if (cmp > 0)
-+ node = node->rb_right;
-+ else
-+ break;
-+ }
-+
-+ if (!node)
-+ return NULL;
-+
-+ if (tree_node->count == 1) {
-+ stable_node = rb_entry(tree_node->sub_root.rb_node,
-+ struct stable_node, node);
-+ BUG_ON(!stable_node);
-+
-+ goto get_page_out;
-+ }
-+
-+ /*
-+ * ok, we have to search the second
-+ * level subtree, hash the page to a
-+ * full strength.
-+ */
-+ node = tree_node->sub_root.rb_node;
-+ BUG_ON(!node);
-+ hash_max = rmap_item_hash_max(item, hash);
-+
-+ while (node) {
-+ int cmp;
-+
-+ stable_node = rb_entry(node, struct stable_node, node);
-+
-+ cmp = hash_cmp(hash_max, stable_node->hash_max);
-+
-+ if (cmp < 0)
-+ node = node->rb_left;
-+ else if (cmp > 0)
-+ node = node->rb_right;
-+ else
-+ goto get_page_out;
-+ }
-+
-+ return NULL;
-+
-+get_page_out:
-+ page = get_uksm_page(stable_node, 1, 1);
-+ return page;
-+}
-+
-+static int try_merge_rmap_item(struct rmap_item *item,
-+ struct page *kpage,
-+ struct page *tree_page)
-+{
-+ struct vm_area_struct *vma = item->slot->vma;
-+ struct page_vma_mapped_walk pvmw = {
-+ .page = kpage,
-+ .vma = vma,
-+ };
-+
-+ pvmw.address = get_rmap_addr(item);
-+ if (!page_vma_mapped_walk(&pvmw))
-+ return 0;
-+
-+ if (pte_write(*pvmw.pte)) {
-+ /* has changed, abort! */
-+ page_vma_mapped_walk_done(&pvmw);
-+ return 0;
-+ }
-+
-+ get_page(tree_page);
-+ page_add_anon_rmap(tree_page, vma, pvmw.address, false);
-+
-+ flush_cache_page(vma, pvmw.address, page_to_pfn(kpage));
-+ ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
-+ set_pte_at_notify(vma->vm_mm, pvmw.address, pvmw.pte,
-+ mk_pte(tree_page, vma->vm_page_prot));
-+
-+ page_remove_rmap(kpage, false);
-+ put_page(kpage);
-+
-+ page_vma_mapped_walk_done(&pvmw);
-+
-+ return 1;
-+}
-+
-+/**
-+ * try_to_merge_with_stable_page() - when two rmap_items need to be inserted
-+ * into stable tree, the page was found to be identical to a stable ksm page,
-+ * this is the last chance we can merge them into one.
-+ *
-+ * @item1: the rmap_item holding the page which we wanted to insert
-+ * into stable tree.
-+ * @item2: the other rmap_item we found when unstable tree search
-+ * @oldpage: the page currently mapped by the two rmap_items
-+ * @tree_page: the page we found identical in stable tree node
-+ * @success1: return if item1 is successfully merged
-+ * @success2: return if item2 is successfully merged
-+ */
-+static void try_merge_with_stable(struct rmap_item *item1,
-+ struct rmap_item *item2,
-+ struct page **kpage,
-+ struct page *tree_page,
-+ int *success1, int *success2)
-+{
-+ struct vm_area_struct *vma1 = item1->slot->vma;
-+ struct vm_area_struct *vma2 = item2->slot->vma;
-+ *success1 = 0;
-+ *success2 = 0;
-+
-+ if (unlikely(*kpage == tree_page)) {
-+ /* I don't think this can really happen */
-+ pr_warn("UKSM: unexpected condition detected in "
-+ "%s -- *kpage == tree_page !\n", __func__);
-+ *success1 = 1;
-+ *success2 = 1;
-+ return;
-+ }
-+
-+ if (!PageAnon(*kpage) || !PageKsm(*kpage))
-+ goto failed;
-+
-+ if (!trylock_page(tree_page))
-+ goto failed;
-+
-+ /* If the oldpage is still ksm and still pointed
-+ * to in the right place, and still write protected,
-+ * we are confident it's not changed, no need to
-+ * memcmp anymore.
-+ * be ware, we cannot take nested pte locks,
-+ * deadlock risk.
-+ */
-+ if (!try_merge_rmap_item(item1, *kpage, tree_page))
-+ goto unlock_failed;
-+
-+ /* ok, then vma2, remind that pte1 already set */
-+ if (!try_merge_rmap_item(item2, *kpage, tree_page))
-+ goto success_1;
-+
-+ *success2 = 1;
-+success_1:
-+ *success1 = 1;
-+
-+
-+ if ((*success1 && vma1->vm_flags & VM_LOCKED) ||
-+ (*success2 && vma2->vm_flags & VM_LOCKED)) {
-+ munlock_vma_page(*kpage);
-+ if (!PageMlocked(tree_page))
-+ mlock_vma_page(tree_page);
-+ }
-+
-+ /*
-+ * We do not need oldpage any more in the caller, so can break the lock
-+ * now.
-+ */
-+ unlock_page(*kpage);
-+ *kpage = tree_page; /* Get unlocked outside. */
-+ return;
-+
-+unlock_failed:
-+ unlock_page(tree_page);
-+failed:
-+ return;
-+}
-+
-+static inline void stable_node_hash_max(struct stable_node *node,
-+ struct page *page, u32 hash)
-+{
-+ u32 hash_max = node->hash_max;
-+
-+ if (!hash_max) {
-+ hash_max = page_hash_max(page, hash);
-+ node->hash_max = hash_max;
-+ }
-+}
-+
-+static inline
-+struct stable_node *new_stable_node(struct tree_node *tree_node,
-+ struct page *kpage, u32 hash_max)
-+{
-+ struct stable_node *new_stable_node;
-+
-+ new_stable_node = alloc_stable_node();
-+ if (!new_stable_node)
-+ return NULL;
-+
-+ new_stable_node->kpfn = page_to_pfn(kpage);
-+ new_stable_node->hash_max = hash_max;
-+ new_stable_node->tree_node = tree_node;
-+ set_page_stable_node(kpage, new_stable_node);
-+
-+ return new_stable_node;
-+}
-+
-+static inline
-+struct stable_node *first_level_insert(struct tree_node *tree_node,
-+ struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ struct page **kpage, u32 hash,
-+ int *success1, int *success2)
-+{
-+ int cmp;
-+ struct page *tree_page;
-+ u32 hash_max = 0;
-+ struct stable_node *stable_node, *new_snode;
-+ struct rb_node *parent = NULL, **new;
-+
-+ /* this tree node contains no sub-tree yet */
-+ stable_node = rb_entry(tree_node->sub_root.rb_node,
-+ struct stable_node, node);
-+
-+ tree_page = get_uksm_page(stable_node, 1, 0);
-+ if (tree_page) {
-+ cmp = memcmp_pages(*kpage, tree_page, 1);
-+ if (!cmp) {
-+ try_merge_with_stable(rmap_item, tree_rmap_item, kpage,
-+ tree_page, success1, success2);
-+ put_page(tree_page);
-+ if (!*success1 && !*success2)
-+ goto failed;
-+
-+ return stable_node;
-+
-+ } else {
-+ /*
-+ * collision in first level try to create a subtree.
-+ * A new node need to be created.
-+ */
-+ put_page(tree_page);
-+
-+ stable_node_hash_max(stable_node, tree_page,
-+ tree_node->hash);
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+ cmp = hash_cmp(hash_max, stable_node->hash_max);
-+
-+ parent = &stable_node->node;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto failed;
-+ }
-+
-+ } else {
-+ /* the only stable_node deleted, we reuse its tree_node.
-+ */
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+
-+ new_snode = new_stable_node(tree_node, *kpage, hash_max);
-+ if (!new_snode)
-+ goto failed;
-+
-+ rb_link_node(&new_snode->node, parent, new);
-+ rb_insert_color(&new_snode->node, &tree_node->sub_root);
-+ tree_node->count++;
-+ *success1 = *success2 = 1;
-+
-+ return new_snode;
-+
-+failed:
-+ return NULL;
-+}
-+
-+static inline
-+struct stable_node *stable_subtree_insert(struct tree_node *tree_node,
-+ struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ struct page **kpage, u32 hash,
-+ int *success1, int *success2)
-+{
-+ struct page *tree_page;
-+ u32 hash_max;
-+ struct stable_node *stable_node, *new_snode;
-+ struct rb_node *parent, **new;
-+
-+research:
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ BUG_ON(!*new);
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+ while (*new) {
-+ int cmp;
-+
-+ stable_node = rb_entry(*new, struct stable_node, node);
-+
-+ cmp = hash_cmp(hash_max, stable_node->hash_max);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else {
-+ tree_page = get_uksm_page(stable_node, 1, 0);
-+ if (tree_page) {
-+ cmp = memcmp_pages(*kpage, tree_page, 1);
-+ if (!cmp) {
-+ try_merge_with_stable(rmap_item,
-+ tree_rmap_item, kpage,
-+ tree_page, success1, success2);
-+
-+ put_page(tree_page);
-+ if (!*success1 && !*success2)
-+ goto failed;
-+ /*
-+ * successfully merged with a stable
-+ * node
-+ */
-+ return stable_node;
-+ } else {
-+ put_page(tree_page);
-+ goto failed;
-+ }
-+ } else {
-+ /*
-+ * stable node may be deleted,
-+ * and subtree maybe
-+ * restructed, cannot
-+ * continue, research it.
-+ */
-+ if (tree_node->count) {
-+ goto research;
-+ } else {
-+ /* reuse the tree node*/
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+ }
-+ }
-+ }
-+
-+ new_snode = new_stable_node(tree_node, *kpage, hash_max);
-+ if (!new_snode)
-+ goto failed;
-+
-+ rb_link_node(&new_snode->node, parent, new);
-+ rb_insert_color(&new_snode->node, &tree_node->sub_root);
-+ tree_node->count++;
-+ *success1 = *success2 = 1;
-+
-+ return new_snode;
-+
-+failed:
-+ return NULL;
-+}
-+
-+
-+/**
-+ * stable_tree_insert() - try to insert a merged page in unstable tree to
-+ * the stable tree
-+ *
-+ * @kpage: the page need to be inserted
-+ * @hash: the current hash of this page
-+ * @rmap_item: the rmap_item being scanned
-+ * @tree_rmap_item: the rmap_item found on unstable tree
-+ * @success1: return if rmap_item is merged
-+ * @success2: return if tree_rmap_item is merged
-+ *
-+ * @return the stable_node on stable tree if at least one
-+ * rmap_item is inserted into stable tree, NULL
-+ * otherwise.
-+ */
-+static struct stable_node *
-+stable_tree_insert(struct page **kpage, u32 hash,
-+ struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ int *success1, int *success2)
-+{
-+ struct rb_node **new = &root_stable_treep->rb_node;
-+ struct rb_node *parent = NULL;
-+ struct stable_node *stable_node;
-+ struct tree_node *tree_node;
-+ u32 hash_max = 0;
-+
-+ *success1 = *success2 = 0;
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_node = rb_entry(*new, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else
-+ break;
-+ }
-+
-+ if (*new) {
-+ if (tree_node->count == 1) {
-+ stable_node = first_level_insert(tree_node, rmap_item,
-+ tree_rmap_item, kpage,
-+ hash, success1, success2);
-+ } else {
-+ stable_node = stable_subtree_insert(tree_node,
-+ rmap_item, tree_rmap_item, kpage,
-+ hash, success1, success2);
-+ }
-+ } else {
-+
-+ /* no tree node found */
-+ tree_node = alloc_tree_node(stable_tree_node_listp);
-+ if (!tree_node) {
-+ stable_node = NULL;
-+ goto out;
-+ }
-+
-+ stable_node = new_stable_node(tree_node, *kpage, hash_max);
-+ if (!stable_node) {
-+ free_tree_node(tree_node);
-+ goto out;
-+ }
-+
-+ tree_node->hash = hash;
-+ rb_link_node(&tree_node->node, parent, new);
-+ rb_insert_color(&tree_node->node, root_stable_treep);
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+
-+ rb_link_node(&stable_node->node, parent, new);
-+ rb_insert_color(&stable_node->node, &tree_node->sub_root);
-+ tree_node->count++;
-+ *success1 = *success2 = 1;
-+ }
-+
-+out:
-+ return stable_node;
-+}
-+
-+
-+/**
-+ * get_tree_rmap_item_page() - try to get the page and lock the mmap_sem
-+ *
-+ * @return 0 on success, -EBUSY if unable to lock the mmap_sem,
-+ * -EINVAL if the page mapping has been changed.
-+ */
-+static inline int get_tree_rmap_item_page(struct rmap_item *tree_rmap_item)
-+{
-+ int err;
-+
-+ err = get_mergeable_page_lock_mmap(tree_rmap_item);
-+
-+ if (err == -EINVAL) {
-+ /* its page map has been changed, remove it */
-+ remove_rmap_item_from_tree(tree_rmap_item);
-+ }
-+
-+ /* The page is gotten and mmap_sem is locked now. */
-+ return err;
-+}
-+
-+
-+/**
-+ * unstable_tree_search_insert() - search an unstable tree rmap_item with the
-+ * same hash value. Get its page and trylock the mmap_sem
-+ */
-+static inline
-+struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
-+ u32 hash)
-+
-+{
-+ struct rb_node **new = &root_unstable_tree.rb_node;
-+ struct rb_node *parent = NULL;
-+ struct tree_node *tree_node;
-+ u32 hash_max;
-+ struct rmap_item *tree_rmap_item;
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_node = rb_entry(*new, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else
-+ break;
-+ }
-+
-+ if (*new) {
-+ /* got the tree_node */
-+ if (tree_node->count == 1) {
-+ tree_rmap_item = rb_entry(tree_node->sub_root.rb_node,
-+ struct rmap_item, node);
-+ BUG_ON(!tree_rmap_item);
-+
-+ goto get_page_out;
-+ }
-+
-+ /* well, search the collision subtree */
-+ new = &tree_node->sub_root.rb_node;
-+ BUG_ON(!*new);
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_rmap_item = rb_entry(*new, struct rmap_item,
-+ node);
-+
-+ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
-+ parent = *new;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto get_page_out;
-+ }
-+ } else {
-+ /* alloc a new tree_node */
-+ tree_node = alloc_tree_node(&unstable_tree_node_list);
-+ if (!tree_node)
-+ return NULL;
-+
-+ tree_node->hash = hash;
-+ rb_link_node(&tree_node->node, parent, new);
-+ rb_insert_color(&tree_node->node, &root_unstable_tree);
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+
-+ /* did not found even in sub-tree */
-+ rmap_item->tree_node = tree_node;
-+ rmap_item->address |= UNSTABLE_FLAG;
-+ rmap_item->hash_round = uksm_hash_round;
-+ rb_link_node(&rmap_item->node, parent, new);
-+ rb_insert_color(&rmap_item->node, &tree_node->sub_root);
-+
-+ uksm_pages_unshared++;
-+ return NULL;
-+
-+get_page_out:
-+ if (tree_rmap_item->page == rmap_item->page)
-+ return NULL;
-+
-+ if (get_tree_rmap_item_page(tree_rmap_item))
-+ return NULL;
-+
-+ return tree_rmap_item;
-+}
-+
-+static void hold_anon_vma(struct rmap_item *rmap_item,
-+ struct anon_vma *anon_vma)
-+{
-+ rmap_item->anon_vma = anon_vma;
-+ get_anon_vma(anon_vma);
-+}
-+
-+
-+/**
-+ * stable_tree_append() - append a rmap_item to a stable node. Deduplication
-+ * ratio statistics is done in this function.
-+ *
-+ */
-+static void stable_tree_append(struct rmap_item *rmap_item,
-+ struct stable_node *stable_node, int logdedup)
-+{
-+ struct node_vma *node_vma = NULL, *new_node_vma, *node_vma_cont = NULL;
-+ unsigned long key = (unsigned long)rmap_item->slot;
-+ unsigned long factor = rmap_item->slot->rung->step;
-+
-+ BUG_ON(!stable_node);
-+ rmap_item->address |= STABLE_FLAG;
-+
-+ if (hlist_empty(&stable_node->hlist)) {
-+ uksm_pages_shared++;
-+ goto node_vma_new;
-+ } else {
-+ uksm_pages_sharing++;
-+ }
-+
-+ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
-+ if (node_vma->key >= key)
-+ break;
-+
-+ if (logdedup) {
-+ node_vma->slot->pages_bemerged += factor;
-+ if (list_empty(&node_vma->slot->dedup_list))
-+ list_add(&node_vma->slot->dedup_list,
-+ &vma_slot_dedup);
-+ }
-+ }
-+
-+ if (node_vma) {
-+ if (node_vma->key == key) {
-+ node_vma_cont = hlist_entry_safe(node_vma->hlist.next, struct node_vma, hlist);
-+ goto node_vma_ok;
-+ } else if (node_vma->key > key) {
-+ node_vma_cont = node_vma;
-+ }
-+ }
-+
-+node_vma_new:
-+ /* no same vma already in node, alloc a new node_vma */
-+ new_node_vma = alloc_node_vma();
-+ BUG_ON(!new_node_vma);
-+ new_node_vma->head = stable_node;
-+ new_node_vma->slot = rmap_item->slot;
-+
-+ if (!node_vma) {
-+ hlist_add_head(&new_node_vma->hlist, &stable_node->hlist);
-+ } else if (node_vma->key != key) {
-+ if (node_vma->key < key)
-+ hlist_add_behind(&new_node_vma->hlist, &node_vma->hlist);
-+ else {
-+ hlist_add_before(&new_node_vma->hlist,
-+ &node_vma->hlist);
-+ }
-+
-+ }
-+ node_vma = new_node_vma;
-+
-+node_vma_ok: /* ok, ready to add to the list */
-+ rmap_item->head = node_vma;
-+ hlist_add_head(&rmap_item->hlist, &node_vma->rmap_hlist);
-+ hold_anon_vma(rmap_item, rmap_item->slot->vma->anon_vma);
-+ if (logdedup) {
-+ rmap_item->slot->pages_merged++;
-+ if (node_vma_cont) {
-+ node_vma = node_vma_cont;
-+ hlist_for_each_entry_continue(node_vma, hlist) {
-+ node_vma->slot->pages_bemerged += factor;
-+ if (list_empty(&node_vma->slot->dedup_list))
-+ list_add(&node_vma->slot->dedup_list,
-+ &vma_slot_dedup);
-+ }
-+ }
-+ }
-+}
-+
-+/*
-+ * We use break_ksm to break COW on a ksm page: it's a stripped down
-+ *
-+ * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
-+ * put_page(page);
-+ *
-+ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
-+ * in case the application has unmapped and remapped mm,addr meanwhile.
-+ * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
-+ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
-+ */
-+static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
-+{
-+ struct page *page;
-+ int ret = 0;
-+
-+ do {
-+ cond_resched();
-+ page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
-+ if (IS_ERR_OR_NULL(page))
-+ break;
-+ if (PageKsm(page)) {
-+ ret = handle_mm_fault(vma, addr,
-+ FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
-+ } else
-+ ret = VM_FAULT_WRITE;
-+ put_page(page);
-+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
-+ /*
-+ * We must loop because handle_mm_fault() may back out if there's
-+ * any difficulty e.g. if pte accessed bit gets updated concurrently.
-+ *
-+ * VM_FAULT_WRITE is what we have been hoping for: it indicates that
-+ * COW has been broken, even if the vma does not permit VM_WRITE;
-+ * but note that a concurrent fault might break PageKsm for us.
-+ *
-+ * VM_FAULT_SIGBUS could occur if we race with truncation of the
-+ * backing file, which also invalidates anonymous pages: that's
-+ * okay, that truncation will have unmapped the PageKsm for us.
-+ *
-+ * VM_FAULT_OOM: at the time of writing (late July 2009), setting
-+ * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
-+ * current task has TIF_MEMDIE set, and will be OOM killed on return
-+ * to user; and ksmd, having no mm, would never be chosen for that.
-+ *
-+ * But if the mm is in a limited mem_cgroup, then the fault may fail
-+ * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
-+ * even ksmd can fail in this way - though it's usually breaking ksm
-+ * just to undo a merge it made a moment before, so unlikely to oom.
-+ *
-+ * That's a pity: we might therefore have more kernel pages allocated
-+ * than we're counting as nodes in the stable tree; but uksm_do_scan
-+ * will retry to break_cow on each pass, so should recover the page
-+ * in due course. The important thing is to not let VM_MERGEABLE
-+ * be cleared while any such pages might remain in the area.
-+ */
-+ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
-+}
-+
-+static void break_cow(struct rmap_item *rmap_item)
-+{
-+ struct vm_area_struct *vma = rmap_item->slot->vma;
-+ struct mm_struct *mm = vma->vm_mm;
-+ unsigned long addr = get_rmap_addr(rmap_item);
-+
-+ if (uksm_test_exit(mm))
-+ goto out;
-+
-+ break_ksm(vma, addr);
-+out:
-+ return;
-+}
-+
-+/*
-+ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
-+ * than check every pte of a given vma, the locking doesn't quite work for
-+ * that - an rmap_item is assigned to the stable tree after inserting ksm
-+ * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
-+ * rmap_items from parent to child at fork time (so as not to waste time
-+ * if exit comes before the next scan reaches it).
-+ *
-+ * Similarly, although we'd like to remove rmap_items (so updating counts
-+ * and freeing memory) when unmerging an area, it's easier to leave that
-+ * to the next pass of ksmd - consider, for example, how ksmd might be
-+ * in cmp_and_merge_page on one of the rmap_items we would be removing.
-+ */
-+inline int unmerge_uksm_pages(struct vm_area_struct *vma,
-+ unsigned long start, unsigned long end)
-+{
-+ unsigned long addr;
-+ int err = 0;
-+
-+ for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
-+ if (uksm_test_exit(vma->vm_mm))
-+ break;
-+ if (signal_pending(current))
-+ err = -ERESTARTSYS;
-+ else
-+ err = break_ksm(vma, addr);
-+ }
-+ return err;
-+}
-+
-+static inline void inc_uksm_pages_scanned(void)
-+{
-+ u64 delta;
-+
-+
-+ if (uksm_pages_scanned == U64_MAX) {
-+ encode_benefit();
-+
-+ delta = uksm_pages_scanned >> pages_scanned_base;
-+
-+ if (CAN_OVERFLOW_U64(pages_scanned_stored, delta)) {
-+ pages_scanned_stored >>= 1;
-+ delta >>= 1;
-+ pages_scanned_base++;
-+ }
-+
-+ pages_scanned_stored += delta;
-+
-+ uksm_pages_scanned = uksm_pages_scanned_last = 0;
-+ }
-+
-+ uksm_pages_scanned++;
-+}
-+
-+static inline int find_zero_page_hash(int strength, u32 hash)
-+{
-+ return (zero_hash_table[strength] == hash);
-+}
-+
-+static
-+int cmp_and_merge_zero_page(struct vm_area_struct *vma, struct page *page)
-+{
-+ struct page *zero_page = empty_uksm_zero_page;
-+ struct mm_struct *mm = vma->vm_mm;
-+ pte_t orig_pte = __pte(0);
-+ int err = -EFAULT;
-+
-+ if (uksm_test_exit(mm))
-+ goto out;
-+
-+ if (!trylock_page(page))
-+ goto out;
-+
-+ if (!PageAnon(page))
-+ goto out_unlock;
-+
-+ if (PageTransCompound(page)) {
-+ err = split_huge_page(page);
-+ if (err)
-+ goto out_unlock;
-+ }
-+
-+ if (write_protect_page(vma, page, &orig_pte, 0) == 0) {
-+ if (is_page_full_zero(page))
-+ err = replace_page(vma, page, zero_page, orig_pte);
-+ }
-+
-+out_unlock:
-+ unlock_page(page);
-+out:
-+ return err;
-+}
-+
-+/*
-+ * cmp_and_merge_page() - first see if page can be merged into the stable
-+ * tree; if not, compare hash to previous and if it's the same, see if page
-+ * can be inserted into the unstable tree, or merged with a page already there
-+ * and both transferred to the stable tree.
-+ *
-+ * @page: the page that we are searching identical page to.
-+ * @rmap_item: the reverse mapping into the virtual address of this page
-+ */
-+static void cmp_and_merge_page(struct rmap_item *rmap_item, u32 hash)
-+{
-+ struct rmap_item *tree_rmap_item;
-+ struct page *page;
-+ struct page *kpage = NULL;
-+ u32 hash_max;
-+ int err;
-+ unsigned int success1, success2;
-+ struct stable_node *snode;
-+ int cmp;
-+ struct rb_node *parent = NULL, **new;
-+
-+ remove_rmap_item_from_tree(rmap_item);
-+ page = rmap_item->page;
-+
-+ /* We first start with searching the page inside the stable tree */
-+ kpage = stable_tree_search(rmap_item, hash);
-+ if (kpage) {
-+ err = try_to_merge_with_uksm_page(rmap_item, kpage,
-+ hash);
-+ if (!err) {
-+ /*
-+ * The page was successfully merged, add
-+ * its rmap_item to the stable tree.
-+ * page lock is needed because it's
-+ * racing with try_to_unmap_ksm(), etc.
-+ */
-+ lock_page(kpage);
-+ snode = page_stable_node(kpage);
-+ stable_tree_append(rmap_item, snode, 1);
-+ unlock_page(kpage);
-+ put_page(kpage);
-+ return; /* success */
-+ }
-+ put_page(kpage);
-+
-+ /*
-+ * if it's a collision and it has been search in sub-rbtree
-+ * (hash_max != 0), we want to abort, because if it is
-+ * successfully merged in unstable tree, the collision trends to
-+ * happen again.
-+ */
-+ if (err == MERGE_ERR_COLLI && rmap_item->hash_max)
-+ return;
-+ }
-+
-+ tree_rmap_item =
-+ unstable_tree_search_insert(rmap_item, hash);
-+ if (tree_rmap_item) {
-+ err = try_to_merge_two_pages(rmap_item, tree_rmap_item, hash);
-+ /*
-+ * As soon as we merge this page, we want to remove the
-+ * rmap_item of the page we have merged with from the unstable
-+ * tree, and insert it instead as new node in the stable tree.
-+ */
-+ if (!err) {
-+ kpage = page;
-+ remove_rmap_item_from_tree(tree_rmap_item);
-+ lock_page(kpage);
-+ snode = stable_tree_insert(&kpage, hash,
-+ rmap_item, tree_rmap_item,
-+ &success1, &success2);
-+
-+ /*
-+ * Do not log dedup for tree item, it's not counted as
-+ * scanned in this round.
-+ */
-+ if (success2)
-+ stable_tree_append(tree_rmap_item, snode, 0);
-+
-+ /*
-+ * The order of these two stable append is important:
-+ * we are scanning rmap_item.
-+ */
-+ if (success1)
-+ stable_tree_append(rmap_item, snode, 1);
-+
-+ /*
-+ * The original kpage may be unlocked inside
-+ * stable_tree_insert() already. This page
-+ * should be unlocked before doing
-+ * break_cow().
-+ */
-+ unlock_page(kpage);
-+
-+ if (!success1)
-+ break_cow(rmap_item);
-+
-+ if (!success2)
-+ break_cow(tree_rmap_item);
-+
-+ } else if (err == MERGE_ERR_COLLI) {
-+ BUG_ON(tree_rmap_item->tree_node->count > 1);
-+
-+ rmap_item_hash_max(tree_rmap_item,
-+ tree_rmap_item->tree_node->hash);
-+
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
-+ parent = &tree_rmap_item->node;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto put_up_out;
-+
-+ rmap_item->tree_node = tree_rmap_item->tree_node;
-+ rmap_item->address |= UNSTABLE_FLAG;
-+ rmap_item->hash_round = uksm_hash_round;
-+ rb_link_node(&rmap_item->node, parent, new);
-+ rb_insert_color(&rmap_item->node,
-+ &tree_rmap_item->tree_node->sub_root);
-+ rmap_item->tree_node->count++;
-+ } else {
-+ /*
-+ * either one of the page has changed or they collide
-+ * at the max hash, we consider them as ill items.
-+ */
-+ remove_rmap_item_from_tree(tree_rmap_item);
-+ }
-+put_up_out:
-+ put_page(tree_rmap_item->page);
-+ up_read(&tree_rmap_item->slot->vma->vm_mm->mmap_sem);
-+ }
-+}
-+
-+
-+
-+
-+static inline unsigned long get_pool_index(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = (sizeof(struct rmap_list_entry *) * index) >> PAGE_SHIFT;
-+ if (pool_index >= slot->pool_size)
-+ BUG();
-+ return pool_index;
-+}
-+
-+static inline unsigned long index_page_offset(unsigned long index)
-+{
-+ return offset_in_page(sizeof(struct rmap_list_entry *) * index);
-+}
-+
-+static inline
-+struct rmap_list_entry *get_rmap_list_entry(struct vma_slot *slot,
-+ unsigned long index, int need_alloc)
-+{
-+ unsigned long pool_index;
-+ struct page *page;
-+ void *addr;
-+
-+
-+ pool_index = get_pool_index(slot, index);
-+ if (!slot->rmap_list_pool[pool_index]) {
-+ if (!need_alloc)
-+ return NULL;
-+
-+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
-+ if (!page)
-+ return NULL;
-+
-+ slot->rmap_list_pool[pool_index] = page;
-+ }
-+
-+ addr = kmap(slot->rmap_list_pool[pool_index]);
-+ addr += index_page_offset(index);
-+
-+ return addr;
-+}
-+
-+static inline void put_rmap_list_entry(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ BUG_ON(!slot->rmap_list_pool[pool_index]);
-+ kunmap(slot->rmap_list_pool[pool_index]);
-+}
-+
-+static inline int entry_is_new(struct rmap_list_entry *entry)
-+{
-+ return !entry->item;
-+}
-+
-+static inline unsigned long get_index_orig_addr(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ return slot->vma->vm_start + (index << PAGE_SHIFT);
-+}
-+
-+static inline unsigned long get_entry_address(struct rmap_list_entry *entry)
-+{
-+ unsigned long addr;
-+
-+ if (is_addr(entry->addr))
-+ addr = get_clean_addr(entry->addr);
-+ else if (entry->item)
-+ addr = get_rmap_addr(entry->item);
-+ else
-+ BUG();
-+
-+ return addr;
-+}
-+
-+static inline struct rmap_item *get_entry_item(struct rmap_list_entry *entry)
-+{
-+ if (is_addr(entry->addr))
-+ return NULL;
-+
-+ return entry->item;
-+}
-+
-+static inline void inc_rmap_list_pool_count(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ BUG_ON(!slot->rmap_list_pool[pool_index]);
-+ slot->pool_counts[pool_index]++;
-+}
-+
-+static inline void dec_rmap_list_pool_count(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ BUG_ON(!slot->rmap_list_pool[pool_index]);
-+ BUG_ON(!slot->pool_counts[pool_index]);
-+ slot->pool_counts[pool_index]--;
-+}
-+
-+static inline int entry_has_rmap(struct rmap_list_entry *entry)
-+{
-+ return !is_addr(entry->addr) && entry->item;
-+}
-+
-+static inline void swap_entries(struct rmap_list_entry *entry1,
-+ unsigned long index1,
-+ struct rmap_list_entry *entry2,
-+ unsigned long index2)
-+{
-+ struct rmap_list_entry tmp;
-+
-+ /* swapping two new entries is meaningless */
-+ BUG_ON(entry_is_new(entry1) && entry_is_new(entry2));
-+
-+ tmp = *entry1;
-+ *entry1 = *entry2;
-+ *entry2 = tmp;
-+
-+ if (entry_has_rmap(entry1))
-+ entry1->item->entry_index = index1;
-+
-+ if (entry_has_rmap(entry2))
-+ entry2->item->entry_index = index2;
-+
-+ if (entry_has_rmap(entry1) && !entry_has_rmap(entry2)) {
-+ inc_rmap_list_pool_count(entry1->item->slot, index1);
-+ dec_rmap_list_pool_count(entry1->item->slot, index2);
-+ } else if (!entry_has_rmap(entry1) && entry_has_rmap(entry2)) {
-+ inc_rmap_list_pool_count(entry2->item->slot, index2);
-+ dec_rmap_list_pool_count(entry2->item->slot, index1);
-+ }
-+}
-+
-+static inline void free_entry_item(struct rmap_list_entry *entry)
-+{
-+ unsigned long index;
-+ struct rmap_item *item;
-+
-+ if (!is_addr(entry->addr)) {
-+ BUG_ON(!entry->item);
-+ item = entry->item;
-+ entry->addr = get_rmap_addr(item);
-+ set_is_addr(entry->addr);
-+ index = item->entry_index;
-+ remove_rmap_item_from_tree(item);
-+ dec_rmap_list_pool_count(item->slot, index);
-+ free_rmap_item(item);
-+ }
-+}
-+
-+static inline int pool_entry_boundary(unsigned long index)
-+{
-+ unsigned long linear_addr;
-+
-+ linear_addr = sizeof(struct rmap_list_entry *) * index;
-+ return index && !offset_in_page(linear_addr);
-+}
-+
-+static inline void try_free_last_pool(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ if (slot->rmap_list_pool[pool_index] &&
-+ !slot->pool_counts[pool_index]) {
-+ __free_page(slot->rmap_list_pool[pool_index]);
-+ slot->rmap_list_pool[pool_index] = NULL;
-+ slot->flags |= UKSM_SLOT_NEED_SORT;
-+ }
-+
-+}
-+
-+static inline unsigned long vma_item_index(struct vm_area_struct *vma,
-+ struct rmap_item *item)
-+{
-+ return (get_rmap_addr(item) - vma->vm_start) >> PAGE_SHIFT;
-+}
-+
-+static int within_same_pool(struct vma_slot *slot,
-+ unsigned long i, unsigned long j)
-+{
-+ unsigned long pool_i, pool_j;
-+
-+ pool_i = get_pool_index(slot, i);
-+ pool_j = get_pool_index(slot, j);
-+
-+ return (pool_i == pool_j);
-+}
-+
-+static void sort_rmap_entry_list(struct vma_slot *slot)
-+{
-+ unsigned long i, j;
-+ struct rmap_list_entry *entry, *swap_entry;
-+
-+ entry = get_rmap_list_entry(slot, 0, 0);
-+ for (i = 0; i < slot->pages; ) {
-+
-+ if (!entry)
-+ goto skip_whole_pool;
-+
-+ if (entry_is_new(entry))
-+ goto next_entry;
-+
-+ if (is_addr(entry->addr)) {
-+ entry->addr = 0;
-+ goto next_entry;
-+ }
-+
-+ j = vma_item_index(slot->vma, entry->item);
-+ if (j == i)
-+ goto next_entry;
-+
-+ if (within_same_pool(slot, i, j))
-+ swap_entry = entry + j - i;
-+ else
-+ swap_entry = get_rmap_list_entry(slot, j, 1);
-+
-+ swap_entries(entry, i, swap_entry, j);
-+ if (!within_same_pool(slot, i, j))
-+ put_rmap_list_entry(slot, j);
-+ continue;
-+
-+skip_whole_pool:
-+ i += PAGE_SIZE / sizeof(*entry);
-+ if (i < slot->pages)
-+ entry = get_rmap_list_entry(slot, i, 0);
-+ continue;
-+
-+next_entry:
-+ if (i >= slot->pages - 1 ||
-+ !within_same_pool(slot, i, i + 1)) {
-+ put_rmap_list_entry(slot, i);
-+ if (i + 1 < slot->pages)
-+ entry = get_rmap_list_entry(slot, i + 1, 0);
-+ } else
-+ entry++;
-+ i++;
-+ continue;
-+ }
-+
-+ /* free empty pool entries which contain no rmap_item */
-+ /* CAN be simplied to based on only pool_counts when bug freed !!!!! */
-+ for (i = 0; i < slot->pool_size; i++) {
-+ unsigned char has_rmap;
-+ void *addr;
-+
-+ if (!slot->rmap_list_pool[i])
-+ continue;
-+
-+ has_rmap = 0;
-+ addr = kmap(slot->rmap_list_pool[i]);
-+ BUG_ON(!addr);
-+ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
-+ entry = (struct rmap_list_entry *)addr + j;
-+ if (is_addr(entry->addr))
-+ continue;
-+ if (!entry->item)
-+ continue;
-+ has_rmap = 1;
-+ }
-+ kunmap(slot->rmap_list_pool[i]);
-+ if (!has_rmap) {
-+ BUG_ON(slot->pool_counts[i]);
-+ __free_page(slot->rmap_list_pool[i]);
-+ slot->rmap_list_pool[i] = NULL;
-+ }
-+ }
-+
-+ slot->flags &= ~UKSM_SLOT_NEED_SORT;
-+}
-+
-+/*
-+ * vma_fully_scanned() - if all the pages in this slot have been scanned.
-+ */
-+static inline int vma_fully_scanned(struct vma_slot *slot)
-+{
-+ return slot->pages_scanned == slot->pages;
-+}
-+
-+/**
-+ * get_next_rmap_item() - Get the next rmap_item in a vma_slot according to
-+ * its random permutation. This function is embedded with the random
-+ * permutation index management code.
-+ */
-+static struct rmap_item *get_next_rmap_item(struct vma_slot *slot, u32 *hash)
-+{
-+ unsigned long rand_range, addr, swap_index, scan_index;
-+ struct rmap_item *item = NULL;
-+ struct rmap_list_entry *scan_entry, *swap_entry = NULL;
-+ struct page *page;
-+
-+ scan_index = swap_index = slot->pages_scanned % slot->pages;
-+
-+ if (pool_entry_boundary(scan_index))
-+ try_free_last_pool(slot, scan_index - 1);
-+
-+ if (vma_fully_scanned(slot)) {
-+ if (slot->flags & UKSM_SLOT_NEED_SORT)
-+ slot->flags |= UKSM_SLOT_NEED_RERAND;
-+ else
-+ slot->flags &= ~UKSM_SLOT_NEED_RERAND;
-+ if (slot->flags & UKSM_SLOT_NEED_SORT)
-+ sort_rmap_entry_list(slot);
-+ }
-+
-+ scan_entry = get_rmap_list_entry(slot, scan_index, 1);
-+ if (!scan_entry)
-+ return NULL;
-+
-+ if (entry_is_new(scan_entry)) {
-+ scan_entry->addr = get_index_orig_addr(slot, scan_index);
-+ set_is_addr(scan_entry->addr);
-+ }
-+
-+ if (slot->flags & UKSM_SLOT_NEED_RERAND) {
-+ rand_range = slot->pages - scan_index;
-+ BUG_ON(!rand_range);
-+ swap_index = scan_index + (prandom_u32() % rand_range);
-+ }
-+
-+ if (swap_index != scan_index) {
-+ swap_entry = get_rmap_list_entry(slot, swap_index, 1);
-+ if (entry_is_new(swap_entry)) {
-+ swap_entry->addr = get_index_orig_addr(slot,
-+ swap_index);
-+ set_is_addr(swap_entry->addr);
-+ }
-+ swap_entries(scan_entry, scan_index, swap_entry, swap_index);
-+ }
-+
-+ addr = get_entry_address(scan_entry);
-+ item = get_entry_item(scan_entry);
-+ BUG_ON(addr > slot->vma->vm_end || addr < slot->vma->vm_start);
-+
-+ page = follow_page(slot->vma, addr, FOLL_GET);
-+ if (IS_ERR_OR_NULL(page))
-+ goto nopage;
-+
-+ if (!PageAnon(page))
-+ goto putpage;
-+
-+ /*check is zero_page pfn or uksm_zero_page*/
-+ if ((page_to_pfn(page) == zero_pfn)
-+ || (page_to_pfn(page) == uksm_zero_pfn))
-+ goto putpage;
-+
-+ flush_anon_page(slot->vma, page, addr);
-+ flush_dcache_page(page);
-+
-+
-+ *hash = page_hash(page, hash_strength, 1);
-+ inc_uksm_pages_scanned();
-+ /*if the page content all zero, re-map to zero-page*/
-+ if (find_zero_page_hash(hash_strength, *hash)) {
-+ if (!cmp_and_merge_zero_page(slot->vma, page)) {
-+ slot->pages_merged++;
-+
-+ /* For full-zero pages, no need to create rmap item */
-+ goto putpage;
-+ } else {
-+ inc_rshash_neg(memcmp_cost / 2);
-+ }
-+ }
-+
-+ if (!item) {
-+ item = alloc_rmap_item();
-+ if (item) {
-+ /* It has already been zeroed */
-+ item->slot = slot;
-+ item->address = addr;
-+ item->entry_index = scan_index;
-+ scan_entry->item = item;
-+ inc_rmap_list_pool_count(slot, scan_index);
-+ } else
-+ goto putpage;
-+ }
-+
-+ BUG_ON(item->slot != slot);
-+ /* the page may have changed */
-+ item->page = page;
-+ put_rmap_list_entry(slot, scan_index);
-+ if (swap_entry)
-+ put_rmap_list_entry(slot, swap_index);
-+ return item;
-+
-+putpage:
-+ put_page(page);
-+ page = NULL;
-+nopage:
-+ /* no page, store addr back and free rmap_item if possible */
-+ free_entry_item(scan_entry);
-+ put_rmap_list_entry(slot, scan_index);
-+ if (swap_entry)
-+ put_rmap_list_entry(slot, swap_index);
-+ return NULL;
-+}
-+
-+static inline int in_stable_tree(struct rmap_item *rmap_item)
-+{
-+ return rmap_item->address & STABLE_FLAG;
-+}
-+
-+/**
-+ * scan_vma_one_page() - scan the next page in a vma_slot. Called with
-+ * mmap_sem locked.
-+ */
-+static noinline void scan_vma_one_page(struct vma_slot *slot)
-+{
-+ u32 hash;
-+ struct mm_struct *mm;
-+ struct rmap_item *rmap_item = NULL;
-+ struct vm_area_struct *vma = slot->vma;
-+
-+ mm = vma->vm_mm;
-+ BUG_ON(!mm);
-+ BUG_ON(!slot);
-+
-+ rmap_item = get_next_rmap_item(slot, &hash);
-+ if (!rmap_item)
-+ goto out1;
-+
-+ if (PageKsm(rmap_item->page) && in_stable_tree(rmap_item))
-+ goto out2;
-+
-+ cmp_and_merge_page(rmap_item, hash);
-+out2:
-+ put_page(rmap_item->page);
-+out1:
-+ slot->pages_scanned++;
-+ slot->this_sampled++;
-+ if (slot->fully_scanned_round != fully_scanned_round)
-+ scanned_virtual_pages++;
-+
-+ if (vma_fully_scanned(slot))
-+ slot->fully_scanned_round = fully_scanned_round;
-+}
-+
-+static inline unsigned long rung_get_pages(struct scan_rung *rung)
-+{
-+ struct slot_tree_node *node;
-+
-+ if (!rung->vma_root.rnode)
-+ return 0;
-+
-+ node = container_of(rung->vma_root.rnode, struct slot_tree_node, snode);
-+
-+ return node->size;
-+}
-+
-+#define RUNG_SAMPLED_MIN 3
-+
-+static inline
-+void uksm_calc_rung_step(struct scan_rung *rung,
-+ unsigned long page_time, unsigned long ratio)
-+{
-+ unsigned long sampled, pages;
-+
-+ /* will be fully scanned ? */
-+ if (!rung->cover_msecs) {
-+ rung->step = 1;
-+ return;
-+ }
-+
-+ sampled = rung->cover_msecs * (NSEC_PER_MSEC / TIME_RATIO_SCALE)
-+ * ratio / page_time;
-+
-+ /*
-+ * Before we finsish a scan round and expensive per-round jobs,
-+ * we need to have a chance to estimate the per page time. So
-+ * the sampled number can not be too small.
-+ */
-+ if (sampled < RUNG_SAMPLED_MIN)
-+ sampled = RUNG_SAMPLED_MIN;
-+
-+ pages = rung_get_pages(rung);
-+ if (likely(pages > sampled))
-+ rung->step = pages / sampled;
-+ else
-+ rung->step = 1;
-+}
-+
-+static inline int step_need_recalc(struct scan_rung *rung)
-+{
-+ unsigned long pages, stepmax;
-+
-+ pages = rung_get_pages(rung);
-+ stepmax = pages / RUNG_SAMPLED_MIN;
-+
-+ return pages && (rung->step > pages ||
-+ (stepmax && rung->step > stepmax));
-+}
-+
-+static inline
-+void reset_current_scan(struct scan_rung *rung, int finished, int step_recalc)
-+{
-+ struct vma_slot *slot;
-+
-+ if (finished)
-+ rung->flags |= UKSM_RUNG_ROUND_FINISHED;
-+
-+ if (step_recalc || step_need_recalc(rung)) {
-+ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
-+ BUG_ON(step_need_recalc(rung));
-+ }
-+
-+ slot_iter_index = prandom_u32() % rung->step;
-+ BUG_ON(!rung->vma_root.rnode);
-+ slot = sradix_tree_next(&rung->vma_root, NULL, 0, slot_iter);
-+ BUG_ON(!slot);
-+
-+ rung->current_scan = slot;
-+ rung->current_offset = slot_iter_index;
-+}
-+
-+static inline struct sradix_tree_root *slot_get_root(struct vma_slot *slot)
-+{
-+ return &slot->rung->vma_root;
-+}
-+
-+/*
-+ * return if resetted.
-+ */
-+static int advance_current_scan(struct scan_rung *rung)
-+{
-+ unsigned short n;
-+ struct vma_slot *slot, *next = NULL;
-+
-+ BUG_ON(!rung->vma_root.num);
-+
-+ slot = rung->current_scan;
-+ n = (slot->pages - rung->current_offset) % rung->step;
-+ slot_iter_index = rung->step - n;
-+ next = sradix_tree_next(&rung->vma_root, slot->snode,
-+ slot->sindex, slot_iter);
-+
-+ if (next) {
-+ rung->current_offset = slot_iter_index;
-+ rung->current_scan = next;
-+ return 0;
-+ } else {
-+ reset_current_scan(rung, 1, 0);
-+ return 1;
-+ }
-+}
-+
-+static inline void rung_rm_slot(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung = slot->rung;
-+ struct sradix_tree_root *root;
-+
-+ if (rung->current_scan == slot)
-+ advance_current_scan(rung);
-+
-+ root = slot_get_root(slot);
-+ sradix_tree_delete_from_leaf(root, slot->snode, slot->sindex);
-+ slot->snode = NULL;
-+ if (step_need_recalc(rung)) {
-+ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
-+ BUG_ON(step_need_recalc(rung));
-+ }
-+
-+ /* In case advance_current_scan loop back to this slot again */
-+ if (rung->vma_root.num && rung->current_scan == slot)
-+ reset_current_scan(slot->rung, 1, 0);
-+}
-+
-+static inline void rung_add_new_slots(struct scan_rung *rung,
-+ struct vma_slot **slots, unsigned long num)
-+{
-+ int err;
-+ struct vma_slot *slot;
-+ unsigned long i;
-+ struct sradix_tree_root *root = &rung->vma_root;
-+
-+ err = sradix_tree_enter(root, (void **)slots, num);
-+ BUG_ON(err);
-+
-+ for (i = 0; i < num; i++) {
-+ slot = slots[i];
-+ slot->rung = rung;
-+ BUG_ON(vma_fully_scanned(slot));
-+ }
-+
-+ if (rung->vma_root.num == num)
-+ reset_current_scan(rung, 0, 1);
-+}
-+
-+static inline int rung_add_one_slot(struct scan_rung *rung,
-+ struct vma_slot *slot)
-+{
-+ int err;
-+
-+ err = sradix_tree_enter(&rung->vma_root, (void **)&slot, 1);
-+ if (err)
-+ return err;
-+
-+ slot->rung = rung;
-+ if (rung->vma_root.num == 1)
-+ reset_current_scan(rung, 0, 1);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Return true if the slot is deleted from its rung.
-+ */
-+static inline int vma_rung_enter(struct vma_slot *slot, struct scan_rung *rung)
-+{
-+ struct scan_rung *old_rung = slot->rung;
-+ int err;
-+
-+ if (old_rung == rung)
-+ return 0;
-+
-+ rung_rm_slot(slot);
-+ err = rung_add_one_slot(rung, slot);
-+ if (err) {
-+ err = rung_add_one_slot(old_rung, slot);
-+ WARN_ON(err); /* OOPS, badly OOM, we lost this slot */
-+ }
-+
-+ return 1;
-+}
-+
-+static inline int vma_rung_up(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung;
-+
-+ rung = slot->rung;
-+ if (slot->rung != &uksm_scan_ladder[SCAN_LADDER_SIZE-1])
-+ rung++;
-+
-+ return vma_rung_enter(slot, rung);
-+}
-+
-+static inline int vma_rung_down(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung;
-+
-+ rung = slot->rung;
-+ if (slot->rung != &uksm_scan_ladder[0])
-+ rung--;
-+
-+ return vma_rung_enter(slot, rung);
-+}
-+
-+/**
-+ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
-+ */
-+static unsigned long cal_dedup_ratio(struct vma_slot *slot)
-+{
-+ unsigned long ret;
-+ unsigned long pages;
-+
-+ pages = slot->this_sampled;
-+ if (!pages)
-+ return 0;
-+
-+ BUG_ON(slot->pages_scanned == slot->last_scanned);
-+
-+ ret = slot->pages_merged;
-+
-+ /* Thrashing area filtering */
-+ if (ret && uksm_thrash_threshold) {
-+ if (slot->pages_cowed * 100 / slot->pages_merged
-+ > uksm_thrash_threshold) {
-+ ret = 0;
-+ } else {
-+ ret = slot->pages_merged - slot->pages_cowed;
-+ }
-+ }
-+
-+ return ret * 100 / pages;
-+}
-+
-+/**
-+ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
-+ */
-+static unsigned long cal_dedup_ratio_old(struct vma_slot *slot)
-+{
-+ unsigned long ret;
-+ unsigned long pages;
-+
-+ pages = slot->pages;
-+ if (!pages)
-+ return 0;
-+
-+ ret = slot->pages_bemerged;
-+
-+ /* Thrashing area filtering */
-+ if (ret && uksm_thrash_threshold) {
-+ if (slot->pages_cowed * 100 / slot->pages_bemerged
-+ > uksm_thrash_threshold) {
-+ ret = 0;
-+ } else {
-+ ret = slot->pages_bemerged - slot->pages_cowed;
-+ }
-+ }
-+
-+ return ret * 100 / pages;
-+}
-+
-+/**
-+ * stable_node_reinsert() - When the hash_strength has been adjusted, the
-+ * stable tree need to be restructured, this is the function re-inserting the
-+ * stable node.
-+ */
-+static inline void stable_node_reinsert(struct stable_node *new_node,
-+ struct page *page,
-+ struct rb_root *root_treep,
-+ struct list_head *tree_node_listp,
-+ u32 hash)
-+{
-+ struct rb_node **new = &root_treep->rb_node;
-+ struct rb_node *parent = NULL;
-+ struct stable_node *stable_node;
-+ struct tree_node *tree_node;
-+ struct page *tree_page;
-+ int cmp;
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_node = rb_entry(*new, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else
-+ break;
-+ }
-+
-+ if (*new) {
-+ /* find a stable tree node with same first level hash value */
-+ stable_node_hash_max(new_node, page, hash);
-+ if (tree_node->count == 1) {
-+ stable_node = rb_entry(tree_node->sub_root.rb_node,
-+ struct stable_node, node);
-+ tree_page = get_uksm_page(stable_node, 1, 0);
-+ if (tree_page) {
-+ stable_node_hash_max(stable_node,
-+ tree_page, hash);
-+ put_page(tree_page);
-+
-+ /* prepare for stable node insertion */
-+
-+ cmp = hash_cmp(new_node->hash_max,
-+ stable_node->hash_max);
-+ parent = &stable_node->node;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto failed;
-+
-+ goto add_node;
-+ } else {
-+ /* the only stable_node deleted, the tree node
-+ * was not deleted.
-+ */
-+ goto tree_node_reuse;
-+ }
-+ }
-+
-+ /* well, search the collision subtree */
-+ new = &tree_node->sub_root.rb_node;
-+ parent = NULL;
-+ BUG_ON(!*new);
-+ while (*new) {
-+ int cmp;
-+
-+ stable_node = rb_entry(*new, struct stable_node, node);
-+
-+ cmp = hash_cmp(new_node->hash_max,
-+ stable_node->hash_max);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else {
-+ /* oh, no, still a collision */
-+ goto failed;
-+ }
-+ }
-+
-+ goto add_node;
-+ }
-+
-+ /* no tree node found */
-+ tree_node = alloc_tree_node(tree_node_listp);
-+ if (!tree_node) {
-+ pr_err("UKSM: memory allocation error!\n");
-+ goto failed;
-+ } else {
-+ tree_node->hash = hash;
-+ rb_link_node(&tree_node->node, parent, new);
-+ rb_insert_color(&tree_node->node, root_treep);
-+
-+tree_node_reuse:
-+ /* prepare for stable node insertion */
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+
-+add_node:
-+ rb_link_node(&new_node->node, parent, new);
-+ rb_insert_color(&new_node->node, &tree_node->sub_root);
-+ new_node->tree_node = tree_node;
-+ tree_node->count++;
-+ return;
-+
-+failed:
-+ /* This can only happen when two nodes have collided
-+ * in two levels.
-+ */
-+ new_node->tree_node = NULL;
-+ return;
-+}
-+
-+static inline void free_all_tree_nodes(struct list_head *list)
-+{
-+ struct tree_node *node, *tmp;
-+
-+ list_for_each_entry_safe(node, tmp, list, all_list) {
-+ free_tree_node(node);
-+ }
-+}
-+
-+/**
-+ * stable_tree_delta_hash() - Delta hash the stable tree from previous hash
-+ * strength to the current hash_strength. It re-structures the hole tree.
-+ */
-+static inline void stable_tree_delta_hash(u32 prev_hash_strength)
-+{
-+ struct stable_node *node, *tmp;
-+ struct rb_root *root_new_treep;
-+ struct list_head *new_tree_node_listp;
-+
-+ stable_tree_index = (stable_tree_index + 1) % 2;
-+ root_new_treep = &root_stable_tree[stable_tree_index];
-+ new_tree_node_listp = &stable_tree_node_list[stable_tree_index];
-+ *root_new_treep = RB_ROOT;
-+ BUG_ON(!list_empty(new_tree_node_listp));
-+
-+ /*
-+ * we need to be safe, the node could be removed by get_uksm_page()
-+ */
-+ list_for_each_entry_safe(node, tmp, &stable_node_list, all_list) {
-+ void *addr;
-+ struct page *node_page;
-+ u32 hash;
-+
-+ /*
-+ * We are completely re-structuring the stable nodes to a new
-+ * stable tree. We don't want to touch the old tree unlinks and
-+ * old tree_nodes. The old tree_nodes will be freed at once.
-+ */
-+ node_page = get_uksm_page(node, 0, 0);
-+ if (!node_page)
-+ continue;
-+
-+ if (node->tree_node) {
-+ hash = node->tree_node->hash;
-+
-+ addr = kmap_atomic(node_page);
-+
-+ hash = delta_hash(addr, prev_hash_strength,
-+ hash_strength, hash);
-+ kunmap_atomic(addr);
-+ } else {
-+ /*
-+ *it was not inserted to rbtree due to collision in last
-+ *round scan.
-+ */
-+ hash = page_hash(node_page, hash_strength, 0);
-+ }
-+
-+ stable_node_reinsert(node, node_page, root_new_treep,
-+ new_tree_node_listp, hash);
-+ put_page(node_page);
-+ }
-+
-+ root_stable_treep = root_new_treep;
-+ free_all_tree_nodes(stable_tree_node_listp);
-+ BUG_ON(!list_empty(stable_tree_node_listp));
-+ stable_tree_node_listp = new_tree_node_listp;
-+}
-+
-+static inline void inc_hash_strength(unsigned long delta)
-+{
-+ hash_strength += 1 << delta;
-+ if (hash_strength > HASH_STRENGTH_MAX)
-+ hash_strength = HASH_STRENGTH_MAX;
-+}
-+
-+static inline void dec_hash_strength(unsigned long delta)
-+{
-+ unsigned long change = 1 << delta;
-+
-+ if (hash_strength <= change + 1)
-+ hash_strength = 1;
-+ else
-+ hash_strength -= change;
-+}
-+
-+static inline void inc_hash_strength_delta(void)
-+{
-+ hash_strength_delta++;
-+ if (hash_strength_delta > HASH_STRENGTH_DELTA_MAX)
-+ hash_strength_delta = HASH_STRENGTH_DELTA_MAX;
-+}
-+
-+static inline unsigned long get_current_neg_ratio(void)
-+{
-+ u64 pos = benefit.pos;
-+ u64 neg = benefit.neg;
-+
-+ if (!neg)
-+ return 0;
-+
-+ if (!pos || neg > pos)
-+ return 100;
-+
-+ if (neg > div64_u64(U64_MAX, 100))
-+ pos = div64_u64(pos, 100);
-+ else
-+ neg *= 100;
-+
-+ return div64_u64(neg, pos);
-+}
-+
-+static inline unsigned long get_current_benefit(void)
-+{
-+ u64 pos = benefit.pos;
-+ u64 neg = benefit.neg;
-+ u64 scanned = benefit.scanned;
-+
-+ if (neg > pos)
-+ return 0;
-+
-+ return div64_u64((pos - neg), scanned);
-+}
-+
-+static inline int judge_rshash_direction(void)
-+{
-+ u64 current_neg_ratio, stable_benefit;
-+ u64 current_benefit, delta = 0;
-+ int ret = STILL;
-+
-+ /*
-+ * Try to probe a value after the boot, and in case the system
-+ * are still for a long time.
-+ */
-+ if ((fully_scanned_round & 0xFFULL) == 10) {
-+ ret = OBSCURE;
-+ goto out;
-+ }
-+
-+ current_neg_ratio = get_current_neg_ratio();
-+
-+ if (current_neg_ratio == 0) {
-+ rshash_neg_cont_zero++;
-+ if (rshash_neg_cont_zero > 2)
-+ return GO_DOWN;
-+ else
-+ return STILL;
-+ }
-+ rshash_neg_cont_zero = 0;
-+
-+ if (current_neg_ratio > 90) {
-+ ret = GO_UP;
-+ goto out;
-+ }
-+
-+ current_benefit = get_current_benefit();
-+ stable_benefit = rshash_state.stable_benefit;
-+
-+ if (!stable_benefit) {
-+ ret = OBSCURE;
-+ goto out;
-+ }
-+
-+ if (current_benefit > stable_benefit)
-+ delta = current_benefit - stable_benefit;
-+ else if (current_benefit < stable_benefit)
-+ delta = stable_benefit - current_benefit;
-+
-+ delta = div64_u64(100 * delta, stable_benefit);
-+
-+ if (delta > 50) {
-+ rshash_cont_obscure++;
-+ if (rshash_cont_obscure > 2)
-+ return OBSCURE;
-+ else
-+ return STILL;
-+ }
-+
-+out:
-+ rshash_cont_obscure = 0;
-+ return ret;
-+}
-+
-+/**
-+ * rshash_adjust() - The main function to control the random sampling state
-+ * machine for hash strength adapting.
-+ *
-+ * return true if hash_strength has changed.
-+ */
-+static inline int rshash_adjust(void)
-+{
-+ unsigned long prev_hash_strength = hash_strength;
-+
-+ if (!encode_benefit())
-+ return 0;
-+
-+ switch (rshash_state.state) {
-+ case RSHASH_STILL:
-+ switch (judge_rshash_direction()) {
-+ case GO_UP:
-+ if (rshash_state.pre_direct == GO_DOWN)
-+ hash_strength_delta = 0;
-+
-+ inc_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ rshash_state.stable_benefit = get_current_benefit();
-+ rshash_state.pre_direct = GO_UP;
-+ break;
-+
-+ case GO_DOWN:
-+ if (rshash_state.pre_direct == GO_UP)
-+ hash_strength_delta = 0;
-+
-+ dec_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ rshash_state.stable_benefit = get_current_benefit();
-+ rshash_state.pre_direct = GO_DOWN;
-+ break;
-+
-+ case OBSCURE:
-+ rshash_state.stable_point = hash_strength;
-+ rshash_state.turn_point_down = hash_strength;
-+ rshash_state.turn_point_up = hash_strength;
-+ rshash_state.turn_benefit_down = get_current_benefit();
-+ rshash_state.turn_benefit_up = get_current_benefit();
-+ rshash_state.lookup_window_index = 0;
-+ rshash_state.state = RSHASH_TRYDOWN;
-+ dec_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ break;
-+
-+ case STILL:
-+ break;
-+ default:
-+ BUG();
-+ }
-+ break;
-+
-+ case RSHASH_TRYDOWN:
-+ if (rshash_state.lookup_window_index++ % 5 == 0)
-+ rshash_state.below_count = 0;
-+
-+ if (get_current_benefit() < rshash_state.stable_benefit)
-+ rshash_state.below_count++;
-+ else if (get_current_benefit() >
-+ rshash_state.turn_benefit_down) {
-+ rshash_state.turn_point_down = hash_strength;
-+ rshash_state.turn_benefit_down = get_current_benefit();
-+ }
-+
-+ if (rshash_state.below_count >= 3 ||
-+ judge_rshash_direction() == GO_UP ||
-+ hash_strength == 1) {
-+ hash_strength = rshash_state.stable_point;
-+ hash_strength_delta = 0;
-+ inc_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ rshash_state.lookup_window_index = 0;
-+ rshash_state.state = RSHASH_TRYUP;
-+ hash_strength_delta = 0;
-+ } else {
-+ dec_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ }
-+ break;
-+
-+ case RSHASH_TRYUP:
-+ if (rshash_state.lookup_window_index++ % 5 == 0)
-+ rshash_state.below_count = 0;
-+
-+ if (get_current_benefit() < rshash_state.turn_benefit_down)
-+ rshash_state.below_count++;
-+ else if (get_current_benefit() > rshash_state.turn_benefit_up) {
-+ rshash_state.turn_point_up = hash_strength;
-+ rshash_state.turn_benefit_up = get_current_benefit();
-+ }
-+
-+ if (rshash_state.below_count >= 3 ||
-+ judge_rshash_direction() == GO_DOWN ||
-+ hash_strength == HASH_STRENGTH_MAX) {
-+ hash_strength = rshash_state.turn_benefit_up >
-+ rshash_state.turn_benefit_down ?
-+ rshash_state.turn_point_up :
-+ rshash_state.turn_point_down;
-+
-+ rshash_state.state = RSHASH_PRE_STILL;
-+ } else {
-+ inc_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ }
-+
-+ break;
-+
-+ case RSHASH_NEW:
-+ case RSHASH_PRE_STILL:
-+ rshash_state.stable_benefit = get_current_benefit();
-+ rshash_state.state = RSHASH_STILL;
-+ hash_strength_delta = 0;
-+ break;
-+ default:
-+ BUG();
-+ }
-+
-+ /* rshash_neg = rshash_pos = 0; */
-+ reset_benefit();
-+
-+ if (prev_hash_strength != hash_strength)
-+ stable_tree_delta_hash(prev_hash_strength);
-+
-+ return prev_hash_strength != hash_strength;
-+}
-+
-+/**
-+ * round_update_ladder() - The main function to do update of all the
-+ * adjustments whenever a scan round is finished.
-+ */
-+static noinline void round_update_ladder(void)
-+{
-+ int i;
-+ unsigned long dedup;
-+ struct vma_slot *slot, *tmp_slot;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++)
-+ uksm_scan_ladder[i].flags &= ~UKSM_RUNG_ROUND_FINISHED;
-+
-+ list_for_each_entry_safe(slot, tmp_slot, &vma_slot_dedup, dedup_list) {
-+
-+ /* slot may be rung_rm_slot() when mm exits */
-+ if (slot->snode) {
-+ dedup = cal_dedup_ratio_old(slot);
-+ if (dedup && dedup >= uksm_abundant_threshold)
-+ vma_rung_up(slot);
-+ }
-+
-+ slot->pages_bemerged = 0;
-+ slot->pages_cowed = 0;
-+
-+ list_del_init(&slot->dedup_list);
-+ }
-+}
-+
-+static void uksm_del_vma_slot(struct vma_slot *slot)
-+{
-+ int i, j;
-+ struct rmap_list_entry *entry;
-+
-+ if (slot->snode) {
-+ /*
-+ * In case it just failed when entering the rung, it's not
-+ * necessary.
-+ */
-+ rung_rm_slot(slot);
-+ }
-+
-+ if (!list_empty(&slot->dedup_list))
-+ list_del(&slot->dedup_list);
-+
-+ if (!slot->rmap_list_pool || !slot->pool_counts) {
-+ /* In case it OOMed in uksm_vma_enter() */
-+ goto out;
-+ }
-+
-+ for (i = 0; i < slot->pool_size; i++) {
-+ void *addr;
-+
-+ if (!slot->rmap_list_pool[i])
-+ continue;
-+
-+ addr = kmap(slot->rmap_list_pool[i]);
-+ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
-+ entry = (struct rmap_list_entry *)addr + j;
-+ if (is_addr(entry->addr))
-+ continue;
-+ if (!entry->item)
-+ continue;
-+
-+ remove_rmap_item_from_tree(entry->item);
-+ free_rmap_item(entry->item);
-+ slot->pool_counts[i]--;
-+ }
-+ BUG_ON(slot->pool_counts[i]);
-+ kunmap(slot->rmap_list_pool[i]);
-+ __free_page(slot->rmap_list_pool[i]);
-+ }
-+ kfree(slot->rmap_list_pool);
-+ kfree(slot->pool_counts);
-+
-+out:
-+ slot->rung = NULL;
-+ if (slot->flags & UKSM_SLOT_IN_UKSM) {
-+ BUG_ON(uksm_pages_total < slot->pages);
-+ uksm_pages_total -= slot->pages;
-+ }
-+
-+ if (slot->fully_scanned_round == fully_scanned_round)
-+ scanned_virtual_pages -= slot->pages;
-+ else
-+ scanned_virtual_pages -= slot->pages_scanned;
-+ free_vma_slot(slot);
-+}
-+
-+
-+#define SPIN_LOCK_PERIOD 32
-+static struct vma_slot *cleanup_slots[SPIN_LOCK_PERIOD];
-+static inline void cleanup_vma_slots(void)
-+{
-+ struct vma_slot *slot;
-+ int i;
-+
-+ i = 0;
-+ spin_lock(&vma_slot_list_lock);
-+ while (!list_empty(&vma_slot_del)) {
-+ slot = list_entry(vma_slot_del.next,
-+ struct vma_slot, slot_list);
-+ list_del(&slot->slot_list);
-+ cleanup_slots[i++] = slot;
-+ if (i == SPIN_LOCK_PERIOD) {
-+ spin_unlock(&vma_slot_list_lock);
-+ while (--i >= 0)
-+ uksm_del_vma_slot(cleanup_slots[i]);
-+ i = 0;
-+ spin_lock(&vma_slot_list_lock);
-+ }
-+ }
-+ spin_unlock(&vma_slot_list_lock);
-+
-+ while (--i >= 0)
-+ uksm_del_vma_slot(cleanup_slots[i]);
-+}
-+
-+/*
-+ * Expotional moving average formula
-+ */
-+static inline unsigned long ema(unsigned long curr, unsigned long last_ema)
-+{
-+ /*
-+ * For a very high burst, even the ema cannot work well, a false very
-+ * high per-page time estimation can result in feedback in very high
-+ * overhead of context switch and rung update -- this will then lead
-+ * to higher per-paper time, this may not converge.
-+ *
-+ * Instead, we try to approach this value in a binary manner.
-+ */
-+ if (curr > last_ema * 10)
-+ return last_ema * 2;
-+
-+ return (EMA_ALPHA * curr + (100 - EMA_ALPHA) * last_ema) / 100;
-+}
-+
-+/*
-+ * convert cpu ratio in 1/TIME_RATIO_SCALE configured by user to
-+ * nanoseconds based on current uksm_sleep_jiffies.
-+ */
-+static inline unsigned long cpu_ratio_to_nsec(unsigned int ratio)
-+{
-+ return NSEC_PER_USEC * jiffies_to_usecs(uksm_sleep_jiffies) /
-+ (TIME_RATIO_SCALE - ratio) * ratio;
-+}
-+
-+
-+static inline unsigned long rung_real_ratio(int cpu_time_ratio)
-+{
-+ unsigned long ret;
-+
-+ BUG_ON(!cpu_time_ratio);
-+
-+ if (cpu_time_ratio > 0)
-+ ret = cpu_time_ratio;
-+ else
-+ ret = (unsigned long)(-cpu_time_ratio) *
-+ uksm_max_cpu_percentage / 100UL;
-+
-+ return ret ? ret : 1;
-+}
-+
-+static noinline void uksm_calc_scan_pages(void)
-+{
-+ struct scan_rung *ladder = uksm_scan_ladder;
-+ unsigned long sleep_usecs, nsecs;
-+ unsigned long ratio;
-+ int i;
-+ unsigned long per_page;
-+
-+ if (uksm_ema_page_time > 100000 ||
-+ (((unsigned long) uksm_eval_round & (256UL - 1)) == 0UL))
-+ uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
-+
-+ per_page = uksm_ema_page_time;
-+ BUG_ON(!per_page);
-+
-+ /*
-+ * For every 8 eval round, we try to probe a uksm_sleep_jiffies value
-+ * based on saved user input.
-+ */
-+ if (((unsigned long) uksm_eval_round & (8UL - 1)) == 0UL)
-+ uksm_sleep_jiffies = uksm_sleep_saved;
-+
-+ /* We require a rung scan at least 1 page in a period. */
-+ nsecs = per_page;
-+ ratio = rung_real_ratio(ladder[0].cpu_ratio);
-+ if (cpu_ratio_to_nsec(ratio) < nsecs) {
-+ sleep_usecs = nsecs * (TIME_RATIO_SCALE - ratio) / ratio
-+ / NSEC_PER_USEC;
-+ uksm_sleep_jiffies = usecs_to_jiffies(sleep_usecs) + 1;
-+ }
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ ratio = rung_real_ratio(ladder[i].cpu_ratio);
-+ ladder[i].pages_to_scan = cpu_ratio_to_nsec(ratio) /
-+ per_page;
-+ BUG_ON(!ladder[i].pages_to_scan);
-+ uksm_calc_rung_step(&ladder[i], per_page, ratio);
-+ }
-+}
-+
-+/*
-+ * From the scan time of this round (ns) to next expected min sleep time
-+ * (ms), be careful of the possible overflows. ratio is taken from
-+ * rung_real_ratio()
-+ */
-+static inline
-+unsigned int scan_time_to_sleep(unsigned long long scan_time, unsigned long ratio)
-+{
-+ scan_time >>= 20; /* to msec level now */
-+ BUG_ON(scan_time > (ULONG_MAX / TIME_RATIO_SCALE));
-+
-+ return (unsigned int) ((unsigned long) scan_time *
-+ (TIME_RATIO_SCALE - ratio) / ratio);
-+}
-+
-+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-+
-+static void uksm_vma_enter(struct vma_slot **slots, unsigned long num)
-+{
-+ struct scan_rung *rung;
-+
-+ rung = &uksm_scan_ladder[0];
-+ rung_add_new_slots(rung, slots, num);
-+}
-+
-+static struct vma_slot *batch_slots[SLOT_TREE_NODE_STORE_SIZE];
-+
-+static void uksm_enter_all_slots(void)
-+{
-+ struct vma_slot *slot;
-+ unsigned long index;
-+ struct list_head empty_vma_list;
-+ int i;
-+
-+ i = 0;
-+ index = 0;
-+ INIT_LIST_HEAD(&empty_vma_list);
-+
-+ spin_lock(&vma_slot_list_lock);
-+ while (!list_empty(&vma_slot_new)) {
-+ slot = list_entry(vma_slot_new.next,
-+ struct vma_slot, slot_list);
-+
-+ if (!slot->vma->anon_vma) {
-+ list_move(&slot->slot_list, &empty_vma_list);
-+ } else if (vma_can_enter(slot->vma)) {
-+ batch_slots[index++] = slot;
-+ list_del_init(&slot->slot_list);
-+ } else {
-+ list_move(&slot->slot_list, &vma_slot_noadd);
-+ }
-+
-+ if (++i == SPIN_LOCK_PERIOD ||
-+ (index && !(index % SLOT_TREE_NODE_STORE_SIZE))) {
-+ spin_unlock(&vma_slot_list_lock);
-+
-+ if (index && !(index % SLOT_TREE_NODE_STORE_SIZE)) {
-+ uksm_vma_enter(batch_slots, index);
-+ index = 0;
-+ }
-+ i = 0;
-+ cond_resched();
-+ spin_lock(&vma_slot_list_lock);
-+ }
-+ }
-+
-+ list_splice(&empty_vma_list, &vma_slot_new);
-+
-+ spin_unlock(&vma_slot_list_lock);
-+
-+ if (index)
-+ uksm_vma_enter(batch_slots, index);
-+
-+}
-+
-+static inline int rung_round_finished(struct scan_rung *rung)
-+{
-+ return rung->flags & UKSM_RUNG_ROUND_FINISHED;
-+}
-+
-+static inline void judge_slot(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung = slot->rung;
-+ unsigned long dedup;
-+ int deleted;
-+
-+ dedup = cal_dedup_ratio(slot);
-+ if (vma_fully_scanned(slot) && uksm_thrash_threshold)
-+ deleted = vma_rung_enter(slot, &uksm_scan_ladder[0]);
-+ else if (dedup && dedup >= uksm_abundant_threshold)
-+ deleted = vma_rung_up(slot);
-+ else
-+ deleted = vma_rung_down(slot);
-+
-+ slot->pages_merged = 0;
-+ slot->pages_cowed = 0;
-+ slot->this_sampled = 0;
-+
-+ if (vma_fully_scanned(slot))
-+ slot->pages_scanned = 0;
-+
-+ slot->last_scanned = slot->pages_scanned;
-+
-+ /* If its deleted in above, then rung was already advanced. */
-+ if (!deleted)
-+ advance_current_scan(rung);
-+}
-+
-+
-+static inline int hash_round_finished(void)
-+{
-+ if (scanned_virtual_pages > (uksm_pages_total >> 2)) {
-+ scanned_virtual_pages = 0;
-+ if (uksm_pages_scanned)
-+ fully_scanned_round++;
-+
-+ return 1;
-+ } else {
-+ return 0;
-+ }
-+}
-+
-+#define UKSM_MMSEM_BATCH 5
-+#define BUSY_RETRY 100
-+
-+/**
-+ * uksm_do_scan() - the main worker function.
-+ */
-+static noinline void uksm_do_scan(void)
-+{
-+ struct vma_slot *slot, *iter;
-+ struct mm_struct *busy_mm;
-+ unsigned char round_finished, all_rungs_emtpy;
-+ int i, err, mmsem_batch;
-+ unsigned long pcost;
-+ long long delta_exec;
-+ unsigned long vpages, max_cpu_ratio;
-+ unsigned long long start_time, end_time, scan_time;
-+ unsigned int expected_jiffies;
-+
-+ might_sleep();
-+
-+ vpages = 0;
-+
-+ start_time = task_sched_runtime(current);
-+ max_cpu_ratio = 0;
-+ mmsem_batch = 0;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE;) {
-+ struct scan_rung *rung = &uksm_scan_ladder[i];
-+ unsigned long ratio;
-+ int busy_retry;
-+
-+ if (!rung->pages_to_scan) {
-+ i++;
-+ continue;
-+ }
-+
-+ if (!rung->vma_root.num) {
-+ rung->pages_to_scan = 0;
-+ i++;
-+ continue;
-+ }
-+
-+ ratio = rung_real_ratio(rung->cpu_ratio);
-+ if (ratio > max_cpu_ratio)
-+ max_cpu_ratio = ratio;
-+
-+ busy_retry = BUSY_RETRY;
-+ /*
-+ * Do not consider rung_round_finished() here, just used up the
-+ * rung->pages_to_scan quota.
-+ */
-+ while (rung->pages_to_scan && rung->vma_root.num &&
-+ likely(!freezing(current))) {
-+ int reset = 0;
-+
-+ slot = rung->current_scan;
-+
-+ BUG_ON(vma_fully_scanned(slot));
-+
-+ if (mmsem_batch)
-+ err = 0;
-+ else
-+ err = try_down_read_slot_mmap_sem(slot);
-+
-+ if (err == -ENOENT) {
-+rm_slot:
-+ rung_rm_slot(slot);
-+ continue;
-+ }
-+
-+ busy_mm = slot->mm;
-+
-+ if (err == -EBUSY) {
-+ /* skip other vmas on the same mm */
-+ do {
-+ reset = advance_current_scan(rung);
-+ iter = rung->current_scan;
-+ busy_retry--;
-+ if (iter->vma->vm_mm != busy_mm ||
-+ !busy_retry || reset)
-+ break;
-+ } while (1);
-+
-+ if (iter->vma->vm_mm != busy_mm) {
-+ continue;
-+ } else {
-+ /* scan round finsished */
-+ break;
-+ }
-+ }
-+
-+ BUG_ON(!vma_can_enter(slot->vma));
-+ if (uksm_test_exit(slot->vma->vm_mm)) {
-+ mmsem_batch = 0;
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ goto rm_slot;
-+ }
-+
-+ if (mmsem_batch)
-+ mmsem_batch--;
-+ else
-+ mmsem_batch = UKSM_MMSEM_BATCH;
-+
-+ /* Ok, we have take the mmap_sem, ready to scan */
-+ scan_vma_one_page(slot);
-+ rung->pages_to_scan--;
-+ vpages++;
-+
-+ if (rung->current_offset + rung->step > slot->pages - 1
-+ || vma_fully_scanned(slot)) {
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ judge_slot(slot);
-+ mmsem_batch = 0;
-+ } else {
-+ rung->current_offset += rung->step;
-+ if (!mmsem_batch)
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ }
-+
-+ busy_retry = BUSY_RETRY;
-+ cond_resched();
-+ }
-+
-+ if (mmsem_batch) {
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ mmsem_batch = 0;
-+ }
-+
-+ if (freezing(current))
-+ break;
-+
-+ cond_resched();
-+ }
-+ end_time = task_sched_runtime(current);
-+ delta_exec = end_time - start_time;
-+
-+ if (freezing(current))
-+ return;
-+
-+ cleanup_vma_slots();
-+ uksm_enter_all_slots();
-+
-+ round_finished = 1;
-+ all_rungs_emtpy = 1;
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ struct scan_rung *rung = &uksm_scan_ladder[i];
-+
-+ if (rung->vma_root.num) {
-+ all_rungs_emtpy = 0;
-+ if (!rung_round_finished(rung))
-+ round_finished = 0;
-+ }
-+ }
-+
-+ if (all_rungs_emtpy)
-+ round_finished = 0;
-+
-+ if (round_finished) {
-+ round_update_ladder();
-+ uksm_eval_round++;
-+
-+ if (hash_round_finished() && rshash_adjust()) {
-+ /* Reset the unstable root iff hash strength changed */
-+ uksm_hash_round++;
-+ root_unstable_tree = RB_ROOT;
-+ free_all_tree_nodes(&unstable_tree_node_list);
-+ }
-+
-+ /*
-+ * A number of pages can hang around indefinitely on per-cpu
-+ * pagevecs, raised page count preventing write_protect_page
-+ * from merging them. Though it doesn't really matter much,
-+ * it is puzzling to see some stuck in pages_volatile until
-+ * other activity jostles them out, and they also prevented
-+ * LTP's KSM test from succeeding deterministically; so drain
-+ * them here (here rather than on entry to uksm_do_scan(),
-+ * so we don't IPI too often when pages_to_scan is set low).
-+ */
-+ lru_add_drain_all();
-+ }
-+
-+
-+ if (vpages && delta_exec > 0) {
-+ pcost = (unsigned long) delta_exec / vpages;
-+ if (likely(uksm_ema_page_time))
-+ uksm_ema_page_time = ema(pcost, uksm_ema_page_time);
-+ else
-+ uksm_ema_page_time = pcost;
-+ }
-+
-+ uksm_calc_scan_pages();
-+ uksm_sleep_real = uksm_sleep_jiffies;
-+ /* in case of radical cpu bursts, apply the upper bound */
-+ end_time = task_sched_runtime(current);
-+ if (max_cpu_ratio && end_time > start_time) {
-+ scan_time = end_time - start_time;
-+ expected_jiffies = msecs_to_jiffies(
-+ scan_time_to_sleep(scan_time, max_cpu_ratio));
-+
-+ if (expected_jiffies > uksm_sleep_real)
-+ uksm_sleep_real = expected_jiffies;
-+
-+ /* We have a 1 second up bound for responsiveness. */
-+ if (jiffies_to_msecs(uksm_sleep_real) > MSEC_PER_SEC)
-+ uksm_sleep_real = msecs_to_jiffies(1000);
-+ }
-+
-+ return;
-+}
-+
-+static int ksmd_should_run(void)
-+{
-+ return uksm_run & UKSM_RUN_MERGE;
-+}
-+
-+static int uksm_scan_thread(void *nothing)
-+{
-+ set_freezable();
-+ set_user_nice(current, 5);
-+
-+ while (!kthread_should_stop()) {
-+ mutex_lock(&uksm_thread_mutex);
-+ if (ksmd_should_run())
-+ uksm_do_scan();
-+ mutex_unlock(&uksm_thread_mutex);
-+
-+ try_to_freeze();
-+
-+ if (ksmd_should_run()) {
-+ schedule_timeout_interruptible(uksm_sleep_real);
-+ uksm_sleep_times++;
-+ } else {
-+ wait_event_freezable(uksm_thread_wait,
-+ ksmd_should_run() || kthread_should_stop());
-+ }
-+ }
-+ return 0;
-+}
-+
-+void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
-+{
-+ struct stable_node *stable_node;
-+ struct node_vma *node_vma;
-+ struct rmap_item *rmap_item;
-+ int search_new_forks = 0;
-+ unsigned long address;
-+
-+ VM_BUG_ON_PAGE(!PageKsm(page), page);
-+ VM_BUG_ON_PAGE(!PageLocked(page), page);
-+
-+ stable_node = page_stable_node(page);
-+ if (!stable_node)
-+ return;
-+again:
-+ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
-+ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
-+ struct anon_vma *anon_vma = rmap_item->anon_vma;
-+ struct anon_vma_chain *vmac;
-+ struct vm_area_struct *vma;
-+
-+ cond_resched();
-+ anon_vma_lock_read(anon_vma);
-+ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
-+ 0, ULONG_MAX) {
-+ cond_resched();
-+ vma = vmac->vma;
-+ address = get_rmap_addr(rmap_item);
-+
-+ if (address < vma->vm_start ||
-+ address >= vma->vm_end)
-+ continue;
-+
-+ if ((rmap_item->slot->vma == vma) ==
-+ search_new_forks)
-+ continue;
-+
-+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
-+ continue;
-+
-+ if (!rwc->rmap_one(page, vma, address, rwc->arg)) {
-+ anon_vma_unlock_read(anon_vma);
-+ return;
-+ }
-+
-+ if (rwc->done && rwc->done(page)) {
-+ anon_vma_unlock_read(anon_vma);
-+ return;
-+ }
-+ }
-+ anon_vma_unlock_read(anon_vma);
-+ }
-+ }
-+ if (!search_new_forks++)
-+ goto again;
-+}
-+
-+#ifdef CONFIG_MIGRATION
-+/* Common ksm interface but may be specific to uksm */
-+void ksm_migrate_page(struct page *newpage, struct page *oldpage)
-+{
-+ struct stable_node *stable_node;
-+
-+ VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
-+ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
-+ VM_BUG_ON(newpage->mapping != oldpage->mapping);
-+
-+ stable_node = page_stable_node(newpage);
-+ if (stable_node) {
-+ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
-+ stable_node->kpfn = page_to_pfn(newpage);
-+ /*
-+ * newpage->mapping was set in advance; now we need smp_wmb()
-+ * to make sure that the new stable_node->kpfn is visible
-+ * to get_ksm_page() before it can see that oldpage->mapping
-+ * has gone stale (or that PageSwapCache has been cleared).
-+ */
-+ smp_wmb();
-+ set_page_stable_node(oldpage, NULL);
-+ }
-+}
-+#endif /* CONFIG_MIGRATION */
-+
-+#ifdef CONFIG_MEMORY_HOTREMOVE
-+static struct stable_node *uksm_check_stable_tree(unsigned long start_pfn,
-+ unsigned long end_pfn)
-+{
-+ struct rb_node *node;
-+
-+ for (node = rb_first(root_stable_treep); node; node = rb_next(node)) {
-+ struct stable_node *stable_node;
-+
-+ stable_node = rb_entry(node, struct stable_node, node);
-+ if (stable_node->kpfn >= start_pfn &&
-+ stable_node->kpfn < end_pfn)
-+ return stable_node;
-+ }
-+ return NULL;
-+}
-+
-+static int uksm_memory_callback(struct notifier_block *self,
-+ unsigned long action, void *arg)
-+{
-+ struct memory_notify *mn = arg;
-+ struct stable_node *stable_node;
-+
-+ switch (action) {
-+ case MEM_GOING_OFFLINE:
-+ /*
-+ * Keep it very simple for now: just lock out ksmd and
-+ * MADV_UNMERGEABLE while any memory is going offline.
-+ * mutex_lock_nested() is necessary because lockdep was alarmed
-+ * that here we take uksm_thread_mutex inside notifier chain
-+ * mutex, and later take notifier chain mutex inside
-+ * uksm_thread_mutex to unlock it. But that's safe because both
-+ * are inside mem_hotplug_mutex.
-+ */
-+ mutex_lock_nested(&uksm_thread_mutex, SINGLE_DEPTH_NESTING);
-+ break;
-+
-+ case MEM_OFFLINE:
-+ /*
-+ * Most of the work is done by page migration; but there might
-+ * be a few stable_nodes left over, still pointing to struct
-+ * pages which have been offlined: prune those from the tree.
-+ */
-+ while ((stable_node = uksm_check_stable_tree(mn->start_pfn,
-+ mn->start_pfn + mn->nr_pages)) != NULL)
-+ remove_node_from_stable_tree(stable_node, 1, 1);
-+ /* fallthrough */
-+
-+ case MEM_CANCEL_OFFLINE:
-+ mutex_unlock(&uksm_thread_mutex);
-+ break;
-+ }
-+ return NOTIFY_OK;
-+}
-+#endif /* CONFIG_MEMORY_HOTREMOVE */
-+
-+#ifdef CONFIG_SYSFS
-+/*
-+ * This all compiles without CONFIG_SYSFS, but is a waste of space.
-+ */
-+
-+#define UKSM_ATTR_RO(_name) \
-+ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
-+#define UKSM_ATTR(_name) \
-+ static struct kobj_attribute _name##_attr = \
-+ __ATTR(_name, 0644, _name##_show, _name##_store)
-+
-+static ssize_t max_cpu_percentage_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_max_cpu_percentage);
-+}
-+
-+static ssize_t max_cpu_percentage_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ unsigned long max_cpu_percentage;
-+ int err;
-+
-+ err = kstrtoul(buf, 10, &max_cpu_percentage);
-+ if (err || max_cpu_percentage > 100)
-+ return -EINVAL;
-+
-+ if (max_cpu_percentage == 100)
-+ max_cpu_percentage = 99;
-+ else if (max_cpu_percentage < 10)
-+ max_cpu_percentage = 10;
-+
-+ uksm_max_cpu_percentage = max_cpu_percentage;
-+
-+ return count;
-+}
-+UKSM_ATTR(max_cpu_percentage);
-+
-+static ssize_t sleep_millisecs_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", jiffies_to_msecs(uksm_sleep_jiffies));
-+}
-+
-+static ssize_t sleep_millisecs_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ unsigned long msecs;
-+ int err;
-+
-+ err = kstrtoul(buf, 10, &msecs);
-+ if (err || msecs > MSEC_PER_SEC)
-+ return -EINVAL;
-+
-+ uksm_sleep_jiffies = msecs_to_jiffies(msecs);
-+ uksm_sleep_saved = uksm_sleep_jiffies;
-+
-+ return count;
-+}
-+UKSM_ATTR(sleep_millisecs);
-+
-+
-+static ssize_t cpu_governor_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
-+ int i;
-+
-+ buf[0] = '\0';
-+ for (i = 0; i < n ; i++) {
-+ if (uksm_cpu_governor == i)
-+ strcat(buf, "[");
-+
-+ strcat(buf, uksm_cpu_governor_str[i]);
-+
-+ if (uksm_cpu_governor == i)
-+ strcat(buf, "]");
-+
-+ strcat(buf, " ");
-+ }
-+ strcat(buf, "\n");
-+
-+ return strlen(buf);
-+}
-+
-+static inline void init_performance_values(void)
-+{
-+ int i;
-+ struct scan_rung *rung;
-+ struct uksm_cpu_preset_s *preset = uksm_cpu_preset + uksm_cpu_governor;
-+
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = uksm_scan_ladder + i;
-+ rung->cpu_ratio = preset->cpu_ratio[i];
-+ rung->cover_msecs = preset->cover_msecs[i];
-+ }
-+
-+ uksm_max_cpu_percentage = preset->max_cpu;
-+}
-+
-+static ssize_t cpu_governor_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
-+
-+ for (n--; n >= 0 ; n--) {
-+ if (!strncmp(buf, uksm_cpu_governor_str[n],
-+ strlen(uksm_cpu_governor_str[n])))
-+ break;
-+ }
-+
-+ if (n < 0)
-+ return -EINVAL;
-+ else
-+ uksm_cpu_governor = n;
-+
-+ init_performance_values();
-+
-+ return count;
-+}
-+UKSM_ATTR(cpu_governor);
-+
-+static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_run);
-+}
-+
-+static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int err;
-+ unsigned long flags;
-+
-+ err = kstrtoul(buf, 10, &flags);
-+ if (err || flags > UINT_MAX)
-+ return -EINVAL;
-+ if (flags > UKSM_RUN_MERGE)
-+ return -EINVAL;
-+
-+ mutex_lock(&uksm_thread_mutex);
-+ if (uksm_run != flags)
-+ uksm_run = flags;
-+ mutex_unlock(&uksm_thread_mutex);
-+
-+ if (flags & UKSM_RUN_MERGE)
-+ wake_up_interruptible(&uksm_thread_wait);
-+
-+ return count;
-+}
-+UKSM_ATTR(run);
-+
-+static ssize_t abundant_threshold_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_abundant_threshold);
-+}
-+
-+static ssize_t abundant_threshold_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int err;
-+ unsigned long flags;
-+
-+ err = kstrtoul(buf, 10, &flags);
-+ if (err || flags > 99)
-+ return -EINVAL;
-+
-+ uksm_abundant_threshold = flags;
-+
-+ return count;
-+}
-+UKSM_ATTR(abundant_threshold);
-+
-+static ssize_t thrash_threshold_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_thrash_threshold);
-+}
-+
-+static ssize_t thrash_threshold_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int err;
-+ unsigned long flags;
-+
-+ err = kstrtoul(buf, 10, &flags);
-+ if (err || flags > 99)
-+ return -EINVAL;
-+
-+ uksm_thrash_threshold = flags;
-+
-+ return count;
-+}
-+UKSM_ATTR(thrash_threshold);
-+
-+static ssize_t cpu_ratios_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int i, size;
-+ struct scan_rung *rung;
-+ char *p = buf;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+
-+ if (rung->cpu_ratio > 0)
-+ size = sprintf(p, "%d ", rung->cpu_ratio);
-+ else
-+ size = sprintf(p, "MAX/%d ",
-+ TIME_RATIO_SCALE / -rung->cpu_ratio);
-+
-+ p += size;
-+ }
-+
-+ *p++ = '\n';
-+ *p = '\0';
-+
-+ return p - buf;
-+}
-+
-+static ssize_t cpu_ratios_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int i, cpuratios[SCAN_LADDER_SIZE], err;
-+ unsigned long value;
-+ struct scan_rung *rung;
-+ char *p, *end = NULL;
-+
-+ p = kzalloc(count, GFP_KERNEL);
-+ if (!p)
-+ return -ENOMEM;
-+
-+ memcpy(p, buf, count);
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ if (i != SCAN_LADDER_SIZE - 1) {
-+ end = strchr(p, ' ');
-+ if (!end)
-+ return -EINVAL;
-+
-+ *end = '\0';
-+ }
-+
-+ if (strstr(p, "MAX/")) {
-+ p = strchr(p, '/') + 1;
-+ err = kstrtoul(p, 10, &value);
-+ if (err || value > TIME_RATIO_SCALE || !value)
-+ return -EINVAL;
-+
-+ cpuratios[i] = -(int) (TIME_RATIO_SCALE / value);
-+ } else {
-+ err = kstrtoul(p, 10, &value);
-+ if (err || value > TIME_RATIO_SCALE || !value)
-+ return -EINVAL;
-+
-+ cpuratios[i] = value;
-+ }
-+
-+ p = end + 1;
-+ }
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+
-+ rung->cpu_ratio = cpuratios[i];
-+ }
-+
-+ return count;
-+}
-+UKSM_ATTR(cpu_ratios);
-+
-+static ssize_t eval_intervals_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int i, size;
-+ struct scan_rung *rung;
-+ char *p = buf;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+ size = sprintf(p, "%u ", rung->cover_msecs);
-+ p += size;
-+ }
-+
-+ *p++ = '\n';
-+ *p = '\0';
-+
-+ return p - buf;
-+}
-+
-+static ssize_t eval_intervals_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int i, err;
-+ unsigned long values[SCAN_LADDER_SIZE];
-+ struct scan_rung *rung;
-+ char *p, *end = NULL;
-+ ssize_t ret = count;
-+
-+ p = kzalloc(count + 2, GFP_KERNEL);
-+ if (!p)
-+ return -ENOMEM;
-+
-+ memcpy(p, buf, count);
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ if (i != SCAN_LADDER_SIZE - 1) {
-+ end = strchr(p, ' ');
-+ if (!end) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ *end = '\0';
-+ }
-+
-+ err = kstrtoul(p, 10, &values[i]);
-+ if (err) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ p = end + 1;
-+ }
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+
-+ rung->cover_msecs = values[i];
-+ }
-+
-+out:
-+ kfree(p);
-+ return ret;
-+}
-+UKSM_ATTR(eval_intervals);
-+
-+static ssize_t ema_per_page_time_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_ema_page_time);
-+}
-+UKSM_ATTR_RO(ema_per_page_time);
-+
-+static ssize_t pages_shared_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_pages_shared);
-+}
-+UKSM_ATTR_RO(pages_shared);
-+
-+static ssize_t pages_sharing_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_pages_sharing);
-+}
-+UKSM_ATTR_RO(pages_sharing);
-+
-+static ssize_t pages_unshared_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_pages_unshared);
-+}
-+UKSM_ATTR_RO(pages_unshared);
-+
-+static ssize_t full_scans_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%llu\n", fully_scanned_round);
-+}
-+UKSM_ATTR_RO(full_scans);
-+
-+static ssize_t pages_scanned_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ unsigned long base = 0;
-+ u64 delta, ret;
-+
-+ if (pages_scanned_stored) {
-+ base = pages_scanned_base;
-+ ret = pages_scanned_stored;
-+ delta = uksm_pages_scanned >> base;
-+ if (CAN_OVERFLOW_U64(ret, delta)) {
-+ ret >>= 1;
-+ delta >>= 1;
-+ base++;
-+ ret += delta;
-+ }
-+ } else {
-+ ret = uksm_pages_scanned;
-+ }
-+
-+ while (ret > ULONG_MAX) {
-+ ret >>= 1;
-+ base++;
-+ }
-+
-+ if (base)
-+ return sprintf(buf, "%lu * 2^%lu\n", (unsigned long)ret, base);
-+ else
-+ return sprintf(buf, "%lu\n", (unsigned long)ret);
-+}
-+UKSM_ATTR_RO(pages_scanned);
-+
-+static ssize_t hash_strength_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", hash_strength);
-+}
-+UKSM_ATTR_RO(hash_strength);
-+
-+static ssize_t sleep_times_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%llu\n", uksm_sleep_times);
-+}
-+UKSM_ATTR_RO(sleep_times);
-+
-+
-+static struct attribute *uksm_attrs[] = {
-+ &max_cpu_percentage_attr.attr,
-+ &sleep_millisecs_attr.attr,
-+ &cpu_governor_attr.attr,
-+ &run_attr.attr,
-+ &ema_per_page_time_attr.attr,
-+ &pages_shared_attr.attr,
-+ &pages_sharing_attr.attr,
-+ &pages_unshared_attr.attr,
-+ &full_scans_attr.attr,
-+ &pages_scanned_attr.attr,
-+ &hash_strength_attr.attr,
-+ &sleep_times_attr.attr,
-+ &thrash_threshold_attr.attr,
-+ &abundant_threshold_attr.attr,
-+ &cpu_ratios_attr.attr,
-+ &eval_intervals_attr.attr,
-+ NULL,
-+};
-+
-+static struct attribute_group uksm_attr_group = {
-+ .attrs = uksm_attrs,
-+ .name = "uksm",
-+};
-+#endif /* CONFIG_SYSFS */
-+
-+static inline void init_scan_ladder(void)
-+{
-+ int i;
-+ struct scan_rung *rung;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = uksm_scan_ladder + i;
-+ slot_tree_init_root(&rung->vma_root);
-+ }
-+
-+ init_performance_values();
-+ uksm_calc_scan_pages();
-+}
-+
-+static inline int cal_positive_negative_costs(void)
-+{
-+ struct page *p1, *p2;
-+ unsigned char *addr1, *addr2;
-+ unsigned long i, time_start, hash_cost;
-+ unsigned long loopnum = 0;
-+
-+ /*IMPORTANT: volatile is needed to prevent over-optimization by gcc. */
-+ volatile u32 hash;
-+ volatile int ret;
-+
-+ p1 = alloc_page(GFP_KERNEL);
-+ if (!p1)
-+ return -ENOMEM;
-+
-+ p2 = alloc_page(GFP_KERNEL);
-+ if (!p2)
-+ return -ENOMEM;
-+
-+ addr1 = kmap_atomic(p1);
-+ addr2 = kmap_atomic(p2);
-+ memset(addr1, prandom_u32(), PAGE_SIZE);
-+ memcpy(addr2, addr1, PAGE_SIZE);
-+
-+ /* make sure that the two pages differ in last byte */
-+ addr2[PAGE_SIZE-1] = ~addr2[PAGE_SIZE-1];
-+ kunmap_atomic(addr2);
-+ kunmap_atomic(addr1);
-+
-+ time_start = jiffies;
-+ while (jiffies - time_start < 100) {
-+ for (i = 0; i < 100; i++)
-+ hash = page_hash(p1, HASH_STRENGTH_FULL, 0);
-+ loopnum += 100;
-+ }
-+ hash_cost = (jiffies - time_start);
-+
-+ time_start = jiffies;
-+ for (i = 0; i < loopnum; i++)
-+ ret = pages_identical(p1, p2);
-+ memcmp_cost = HASH_STRENGTH_FULL * (jiffies - time_start);
-+ memcmp_cost /= hash_cost;
-+ pr_info("UKSM: relative memcmp_cost = %lu "
-+ "hash=%u cmp_ret=%d.\n",
-+ memcmp_cost, hash, ret);
-+
-+ __free_page(p1);
-+ __free_page(p2);
-+ return 0;
-+}
-+
-+static int init_zeropage_hash_table(void)
-+{
-+ struct page *page;
-+ char *addr;
-+ int i;
-+
-+ page = alloc_page(GFP_KERNEL);
-+ if (!page)
-+ return -ENOMEM;
-+
-+ addr = kmap_atomic(page);
-+ memset(addr, 0, PAGE_SIZE);
-+ kunmap_atomic(addr);
-+
-+ zero_hash_table = kmalloc_array(HASH_STRENGTH_MAX, sizeof(u32),
-+ GFP_KERNEL);
-+ if (!zero_hash_table)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < HASH_STRENGTH_MAX; i++)
-+ zero_hash_table[i] = page_hash(page, i, 0);
-+
-+ __free_page(page);
-+
-+ return 0;
-+}
-+
-+static inline int init_random_sampling(void)
-+{
-+ unsigned long i;
-+
-+ random_nums = kmalloc(PAGE_SIZE, GFP_KERNEL);
-+ if (!random_nums)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < HASH_STRENGTH_FULL; i++)
-+ random_nums[i] = i;
-+
-+ for (i = 0; i < HASH_STRENGTH_FULL; i++) {
-+ unsigned long rand_range, swap_index, tmp;
-+
-+ rand_range = HASH_STRENGTH_FULL - i;
-+ swap_index = i + prandom_u32() % rand_range;
-+ tmp = random_nums[i];
-+ random_nums[i] = random_nums[swap_index];
-+ random_nums[swap_index] = tmp;
-+ }
-+
-+ rshash_state.state = RSHASH_NEW;
-+ rshash_state.below_count = 0;
-+ rshash_state.lookup_window_index = 0;
-+
-+ return cal_positive_negative_costs();
-+}
-+
-+static int __init uksm_slab_init(void)
-+{
-+ rmap_item_cache = UKSM_KMEM_CACHE(rmap_item, 0);
-+ if (!rmap_item_cache)
-+ goto out;
-+
-+ stable_node_cache = UKSM_KMEM_CACHE(stable_node, 0);
-+ if (!stable_node_cache)
-+ goto out_free1;
-+
-+ node_vma_cache = UKSM_KMEM_CACHE(node_vma, 0);
-+ if (!node_vma_cache)
-+ goto out_free2;
-+
-+ vma_slot_cache = UKSM_KMEM_CACHE(vma_slot, 0);
-+ if (!vma_slot_cache)
-+ goto out_free3;
-+
-+ tree_node_cache = UKSM_KMEM_CACHE(tree_node, 0);
-+ if (!tree_node_cache)
-+ goto out_free4;
-+
-+ return 0;
-+
-+out_free4:
-+ kmem_cache_destroy(vma_slot_cache);
-+out_free3:
-+ kmem_cache_destroy(node_vma_cache);
-+out_free2:
-+ kmem_cache_destroy(stable_node_cache);
-+out_free1:
-+ kmem_cache_destroy(rmap_item_cache);
-+out:
-+ return -ENOMEM;
-+}
-+
-+static void __init uksm_slab_free(void)
-+{
-+ kmem_cache_destroy(stable_node_cache);
-+ kmem_cache_destroy(rmap_item_cache);
-+ kmem_cache_destroy(node_vma_cache);
-+ kmem_cache_destroy(vma_slot_cache);
-+ kmem_cache_destroy(tree_node_cache);
-+}
-+
-+/* Common interface to ksm, different to it. */
-+int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
-+ unsigned long end, int advice, unsigned long *vm_flags)
-+{
-+ int err;
-+
-+ switch (advice) {
-+ case MADV_MERGEABLE:
-+ return 0; /* just ignore the advice */
-+
-+ case MADV_UNMERGEABLE:
-+ if (!(*vm_flags & VM_MERGEABLE) || !uksm_flags_can_scan(*vm_flags))
-+ return 0; /* just ignore the advice */
-+
-+ if (vma->anon_vma) {
-+ err = unmerge_uksm_pages(vma, start, end);
-+ if (err)
-+ return err;
-+ }
-+
-+ uksm_remove_vma(vma);
-+ *vm_flags &= ~VM_MERGEABLE;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Common interface to ksm, actually the same. */
-+struct page *ksm_might_need_to_copy(struct page *page,
-+ struct vm_area_struct *vma, unsigned long address)
-+{
-+ struct anon_vma *anon_vma = page_anon_vma(page);
-+ struct page *new_page;
-+
-+ if (PageKsm(page)) {
-+ if (page_stable_node(page))
-+ return page; /* no need to copy it */
-+ } else if (!anon_vma) {
-+ return page; /* no need to copy it */
-+ } else if (anon_vma->root == vma->anon_vma->root &&
-+ page->index == linear_page_index(vma, address)) {
-+ return page; /* still no need to copy it */
-+ }
-+ if (!PageUptodate(page))
-+ return page; /* let do_swap_page report the error */
-+
-+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-+ if (new_page) {
-+ copy_user_highpage(new_page, page, address, vma);
-+
-+ SetPageDirty(new_page);
-+ __SetPageUptodate(new_page);
-+ __SetPageLocked(new_page);
-+ }
-+
-+ return new_page;
-+}
-+
-+static int __init uksm_init(void)
-+{
-+ struct task_struct *uksm_thread;
-+ int err;
-+
-+ uksm_sleep_jiffies = msecs_to_jiffies(100);
-+ uksm_sleep_saved = uksm_sleep_jiffies;
-+
-+ slot_tree_init();
-+ init_scan_ladder();
-+
-+
-+ err = init_random_sampling();
-+ if (err)
-+ goto out_free2;
-+
-+ err = uksm_slab_init();
-+ if (err)
-+ goto out_free1;
-+
-+ err = init_zeropage_hash_table();
-+ if (err)
-+ goto out_free0;
-+
-+ uksm_thread = kthread_run(uksm_scan_thread, NULL, "uksmd");
-+ if (IS_ERR(uksm_thread)) {
-+ pr_err("uksm: creating kthread failed\n");
-+ err = PTR_ERR(uksm_thread);
-+ goto out_free;
-+ }
-+
-+#ifdef CONFIG_SYSFS
-+ err = sysfs_create_group(mm_kobj, &uksm_attr_group);
-+ if (err) {
-+ pr_err("uksm: register sysfs failed\n");
-+ kthread_stop(uksm_thread);
-+ goto out_free;
-+ }
-+#else
-+ uksm_run = UKSM_RUN_MERGE; /* no way for user to start it */
-+
-+#endif /* CONFIG_SYSFS */
-+
-+#ifdef CONFIG_MEMORY_HOTREMOVE
-+ /*
-+ * Choose a high priority since the callback takes uksm_thread_mutex:
-+ * later callbacks could only be taking locks which nest within that.
-+ */
-+ hotplug_memory_notifier(uksm_memory_callback, 100);
-+#endif
-+ return 0;
-+
-+out_free:
-+ kfree(zero_hash_table);
-+out_free0:
-+ uksm_slab_free();
-+out_free1:
-+ kfree(random_nums);
-+out_free2:
-+ kfree(uksm_scan_ladder);
-+ return err;
-+}
-+
-+#ifdef MODULE
-+subsys_initcall(ksm_init);
-+#else
-+late_initcall(uksm_init);
-+#endif
-+
-diff -Nur a/mm/vmstat.c b/mm/vmstat.c
---- a/mm/vmstat.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/vmstat.c 2018-05-26 19:30:55.791140570 +0100
-@@ -1091,6 +1091,9 @@
- "nr_dirtied",
- "nr_written",
-
-+#ifdef CONFIG_UKSM
-+ "nr_uksm_zero_pages",
-+#endif
- /* enum writeback_stat_item counters */
- "nr_dirty_threshold",
- "nr_dirty_background_threshold",
diff --git a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.95-r1.ebuild b/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.95-r1.ebuild
deleted file mode 100644
index 333a4b50..00000000
--- a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.14.95-r1.ebuild
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 1999-2017 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=6
-
-inherit eutils
-
-EXTRAVERSION="redcore-lts-r1"
-KV_FULL="${PV}-${EXTRAVERSION}"
-KV_MAJOR="4.14"
-
-DESCRIPTION="Official Redcore Linux Kernel Image"
-HOMEPAGE="https://redcorelinux.org"
-SRC_URI="https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-${PV}.tar.xz"
-
-KEYWORDS="amd64"
-LICENSE="GPL-2"
-SLOT="${PVR}"
-IUSE="+cryptsetup +dmraid +dracut +dkms +mdadm"
-
-RESTRICT="binchecks strip mirror"
-DEPEND="
- app-arch/lz4
- app-arch/xz-utils
- sys-devel/autoconf
- sys-devel/bc
- sys-devel/make
- cryptsetup? ( sys-fs/cryptsetup )
- dmraid? ( sys-fs/dmraid )
- dracut? ( >=sys-kernel/dracut-0.44-r8 )
- dkms? ( sys-kernel/dkms sys-kernel/linux-sources-redcore-lts:${SLOT} )
- mdadm? ( sys-fs/mdadm )
- >=sys-kernel/linux-firmware-20180314"
-RDEPEND="${DEPEND}"
-
-PATCHES=(
- "${FILESDIR}"/"${KV_MAJOR}"-introduce-NUMA-identity-node-sched-domain.patch
- "${FILESDIR}"/"${KV_MAJOR}"-k10temp-add-ZEN-support.patch
- "${FILESDIR}"/"${KV_MAJOR}"-mute-pps_state_mismatch.patch
- "${FILESDIR}"/"${KV_MAJOR}"-restore-SD_PREFER_SIBLING-on-MC-domains.patch
- "${FILESDIR}"/"${KV_MAJOR}"-Revert-ath10k-activate-user-space-firmware-loading.patch
- "${FILESDIR}"/"${KV_MAJOR}"-linux-hardened.patch
- "${FILESDIR}"/"${KV_MAJOR}"-uksm-linux-hardened.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0002-Make-preemptible-kernel-default.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0006-Convert-msleep-to-use-hrtimers-when-active.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0014-Swap-sucks.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0016-unfuck-MuQSS-on-linux-4_14_15+.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0017-unfuck-MuQSS-on-linux-4_14_75+.patch
-)
-
-S="${WORKDIR}"/linux-"${PV}"
-
-pkg_setup() {
- export KBUILD_BUILD_USER="nexus"
- export KBUILD_BUILD_HOST="nexus.redcorelinux.org"
-
- export REAL_ARCH="$ARCH"
- unset ARCH ; unset LDFLAGS #will interfere with Makefile if set
-}
-
-src_prepare() {
- default
- emake mrproper
- sed -ri "s|^(EXTRAVERSION =).*|\1 -${EXTRAVERSION}|" Makefile
- cp "${FILESDIR}"/"${KV_MAJOR}"-amd64.config .config
- rm -rf $(find . -type f|grep -F \.orig)
-}
-
-src_compile() {
- emake prepare modules_prepare bzImage modules
-}
-
-src_install() {
- dodir boot
- insinto boot
- newins .config config-"${KV_FULL}"
- newins System.map System.map-"${KV_FULL}"
- newins arch/x86/boot/bzImage vmlinuz-"${KV_FULL}"
-
- dodir usr/src/linux-"${KV_FULL}"
- insinto usr/src/linux-"${KV_FULL}"
- doins Module.symvers
- doins System.map
- exeinto usr/src/linux-"${KV_FULL}"
- doexe vmlinux
-
- emake INSTALL_MOD_PATH="${D}" modules_install
-
- rm -f "${D}"lib/modules/"${KV_FULL}"/build
- rm -f "${D}"lib/modules/"${KV_FULL}"/source
- export local KSYMS
- for KSYMS in build source ; do
- dosym ../../../usr/src/linux-"${KV_FULL}" lib/modules/"${KV_FULL}"/"${KSYMS}"
- done
-}
-
-_grub2_update_grubcfg() {
- if [[ -x $(which grub2-mkconfig) ]]; then
- elog "Updating GRUB-2 bootloader configuration, please wait"
- grub2-mkconfig -o "${ROOT}"boot/grub/grub.cfg
- else
- elog "It looks like you're not using GRUB-2, you must update bootloader configuration by hand"
- fi
-}
-
-_dracut_initrd_create() {
- if [[ -x $(which dracut) ]]; then
- elog "Generating initrd for "${KV_FULL}", please wait"
- addpredict /etc/ld.so.cache~
- dracut -N -f --kver="${KV_FULL}" "${ROOT}"boot/initrd-"${KV_FULL}"
- else
- elog "It looks like you're not using dracut, you must generate an initrd by hand"
- fi
-}
-
-_dracut_initrd_delete() {
- rm -rf "${ROOT}"boot/initrd-"${KV_FULL}"
-}
-
-_dkms_modules_delete() {
- if [[ -x $(which dkms) ]] ; then
- export local DKMSMOD
- for DKMSMOD in $(dkms status | cut -d " " -f1,2 | sed -e 's/,//g' | sed -e 's/ /\//g' | sed -e 's/://g') ; do
- dkms remove "${DKMSMOD}" -k "${KV_FULL}"
- done
- fi
-}
-
-_kernel_modules_delete() {
- rm -rf "${ROOT}"lib/modules/"${KV_FULL}"
-}
-
-pkg_postinst() {
- if [ $(stat -c %d:%i /) == $(stat -c %d:%i /proc/1/root/.) ]; then
- if use dracut; then
- _dracut_initrd_create
- fi
- _grub2_update_grubcfg
- fi
-}
-
-pkg_postrm() {
- if [ $(stat -c %d:%i /) == $(stat -c %d:%i /proc/1/root/.) ]; then
- if use dracut; then
- _dracut_initrd_delete
- fi
- _grub2_update_grubcfg
- fi
- if use dkms; then
- _dkms_modules_delete
- fi
- _kernel_modules_delete
-}
diff --git a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.19.20-r1.ebuild b/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.19.20-r2.ebuild
index 19bc96c9..0e5c5507 100644
--- a/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.19.20-r1.ebuild
+++ b/sys-kernel/linux-image-redcore-lts/linux-image-redcore-lts-4.19.20-r2.ebuild
@@ -5,7 +5,7 @@ EAPI=6
inherit eutils
-EXTRAVERSION="redcore-lts-r1"
+EXTRAVERSION="redcore-lts-r2"
KV_FULL="${PV}-${EXTRAVERSION}"
KV_MAJOR="4.19"
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
deleted file mode 100644
index a81dbeac..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
+++ /dev/null
@@ -1,9560 +0,0 @@
-diff -Nur a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
---- a/arch/powerpc/platforms/cell/spufs/sched.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/arch/powerpc/platforms/cell/spufs/sched.c 2019-01-05 20:22:51.089998199 +0000
-@@ -65,11 +65,6 @@
- static struct timer_list spuloadavg_timer;
-
- /*
-- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
-- */
--#define NORMAL_PRIO 120
--
--/*
- * Frequency of the spu scheduler tick. By default we do one SPU scheduler
- * tick for every 10 CPU scheduler ticks.
- */
-diff -Nur a/arch/x86/Kconfig b/arch/x86/Kconfig
---- a/arch/x86/Kconfig 2019-01-05 20:17:13.829237906 +0000
-+++ b/arch/x86/Kconfig 2019-01-05 20:30:14.244135060 +0000
-@@ -957,6 +957,20 @@
- config SCHED_SMT
- def_bool y if SMP
-
-+config SMT_NICE
-+ bool "SMT (Hyperthreading) aware nice priority and policy support"
-+ depends on SCHED_MUQSS && SCHED_SMT
-+ default y
-+ ---help---
-+ Enabling Hyperthreading on Intel CPUs decreases the effectiveness
-+ of the use of 'nice' levels and different scheduling policies
-+ (e.g. realtime) due to sharing of CPU power between hyperthreads.
-+ SMT nice support makes each logical CPU aware of what is running on
-+ its hyperthread siblings, maintaining appropriate distribution of
-+ CPU according to nice levels and scheduling policies at the expense
-+ of slightly increased overhead.
-+ If unsure say Y here.
-+
- config SCHED_MC
- def_bool y
- prompt "Multi-core scheduler support"
-diff -Nur a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
---- a/Documentation/scheduler/sched-BFS.txt 1970-01-01 01:00:00.000000000 +0100
-+++ b/Documentation/scheduler/sched-BFS.txt 2019-01-05 20:22:51.089998199 +0000
-@@ -0,0 +1,351 @@
-+BFS - The Brain Fuck Scheduler by Con Kolivas.
-+
-+Goals.
-+
-+The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to
-+completely do away with the complex designs of the past for the cpu process
-+scheduler and instead implement one that is very simple in basic design.
-+The main focus of BFS is to achieve excellent desktop interactivity and
-+responsiveness without heuristics and tuning knobs that are difficult to
-+understand, impossible to model and predict the effect of, and when tuned to
-+one workload cause massive detriment to another.
-+
-+
-+Design summary.
-+
-+BFS is best described as a single runqueue, O(n) lookup, earliest effective
-+virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
-+deadline first) and my previous Staircase Deadline scheduler. Each component
-+shall be described in order to understand the significance of, and reasoning for
-+it. The codebase when the first stable version was released was approximately
-+9000 lines less code than the existing mainline linux kernel scheduler (in
-+2.6.31). This does not even take into account the removal of documentation and
-+the cgroups code that is not used.
-+
-+Design reasoning.
-+
-+The single runqueue refers to the queued but not running processes for the
-+entire system, regardless of the number of CPUs. The reason for going back to
-+a single runqueue design is that once multiple runqueues are introduced,
-+per-CPU or otherwise, there will be complex interactions as each runqueue will
-+be responsible for the scheduling latency and fairness of the tasks only on its
-+own runqueue, and to achieve fairness and low latency across multiple CPUs, any
-+advantage in throughput of having CPU local tasks causes other disadvantages.
-+This is due to requiring a very complex balancing system to at best achieve some
-+semblance of fairness across CPUs and can only maintain relatively low latency
-+for tasks bound to the same CPUs, not across them. To increase said fairness
-+and latency across CPUs, the advantage of local runqueue locking, which makes
-+for better scalability, is lost due to having to grab multiple locks.
-+
-+A significant feature of BFS is that all accounting is done purely based on CPU
-+used and nowhere is sleep time used in any way to determine entitlement or
-+interactivity. Interactivity "estimators" that use some kind of sleep/run
-+algorithm are doomed to fail to detect all interactive tasks, and to falsely tag
-+tasks that aren't interactive as being so. The reason for this is that it is
-+close to impossible to determine that when a task is sleeping, whether it is
-+doing it voluntarily, as in a userspace application waiting for input in the
-+form of a mouse click or otherwise, or involuntarily, because it is waiting for
-+another thread, process, I/O, kernel activity or whatever. Thus, such an
-+estimator will introduce corner cases, and more heuristics will be required to
-+cope with those corner cases, introducing more corner cases and failed
-+interactivity detection and so on. Interactivity in BFS is built into the design
-+by virtue of the fact that tasks that are waking up have not used up their quota
-+of CPU time, and have earlier effective deadlines, thereby making it very likely
-+they will preempt any CPU bound task of equivalent nice level. See below for
-+more information on the virtual deadline mechanism. Even if they do not preempt
-+a running task, because the rr interval is guaranteed to have a bound upper
-+limit on how long a task will wait for, it will be scheduled within a timeframe
-+that will not cause visible interface jitter.
-+
-+
-+Design details.
-+
-+Task insertion.
-+
-+BFS inserts tasks into each relevant queue as an O(1) insertion into a double
-+linked list. On insertion, *every* running queue is checked to see if the newly
-+queued task can run on any idle queue, or preempt the lowest running task on the
-+system. This is how the cross-CPU scheduling of BFS achieves significantly lower
-+latency per extra CPU the system has. In this case the lookup is, in the worst
-+case scenario, O(n) where n is the number of CPUs on the system.
-+
-+Data protection.
-+
-+BFS has one single lock protecting the process local data of every task in the
-+global queue. Thus every insertion, removal and modification of task data in the
-+global runqueue needs to grab the global lock. However, once a task is taken by
-+a CPU, the CPU has its own local data copy of the running process' accounting
-+information which only that CPU accesses and modifies (such as during a
-+timer tick) thus allowing the accounting data to be updated lockless. Once a
-+CPU has taken a task to run, it removes it from the global queue. Thus the
-+global queue only ever has, at most,
-+
-+ (number of tasks requesting cpu time) - (number of logical CPUs) + 1
-+
-+tasks in the global queue. This value is relevant for the time taken to look up
-+tasks during scheduling. This will increase if many tasks with CPU affinity set
-+in their policy to limit which CPUs they're allowed to run on if they outnumber
-+the number of CPUs. The +1 is because when rescheduling a task, the CPU's
-+currently running task is put back on the queue. Lookup will be described after
-+the virtual deadline mechanism is explained.
-+
-+Virtual deadline.
-+
-+The key to achieving low latency, scheduling fairness, and "nice level"
-+distribution in BFS is entirely in the virtual deadline mechanism. The one
-+tunable in BFS is the rr_interval, or "round robin interval". This is the
-+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
-+tasks of the same nice level will be running for, or looking at it the other
-+way around, the longest duration two tasks of the same nice level will be
-+delayed for. When a task requests cpu time, it is given a quota (time_slice)
-+equal to the rr_interval and a virtual deadline. The virtual deadline is
-+offset from the current time in jiffies by this equation:
-+
-+ jiffies + (prio_ratio * rr_interval)
-+
-+The prio_ratio is determined as a ratio compared to the baseline of nice -20
-+and increases by 10% per nice level. The deadline is a virtual one only in that
-+no guarantee is placed that a task will actually be scheduled by this time, but
-+it is used to compare which task should go next. There are three components to
-+how a task is next chosen. First is time_slice expiration. If a task runs out
-+of its time_slice, it is descheduled, the time_slice is refilled, and the
-+deadline reset to that formula above. Second is sleep, where a task no longer
-+is requesting CPU for whatever reason. The time_slice and deadline are _not_
-+adjusted in this case and are just carried over for when the task is next
-+scheduled. Third is preemption, and that is when a newly waking task is deemed
-+higher priority than a currently running task on any cpu by virtue of the fact
-+that it has an earlier virtual deadline than the currently running task. The
-+earlier deadline is the key to which task is next chosen for the first and
-+second cases. Once a task is descheduled, it is put back on the queue, and an
-+O(n) lookup of all queued-but-not-running tasks is done to determine which has
-+the earliest deadline and that task is chosen to receive CPU next.
-+
-+The CPU proportion of different nice tasks works out to be approximately the
-+
-+ (prio_ratio difference)^2
-+
-+The reason it is squared is that a task's deadline does not change while it is
-+running unless it runs out of time_slice. Thus, even if the time actually
-+passes the deadline of another task that is queued, it will not get CPU time
-+unless the current running task deschedules, and the time "base" (jiffies) is
-+constantly moving.
-+
-+Task lookup.
-+
-+BFS has 103 priority queues. 100 of these are dedicated to the static priority
-+of realtime tasks, and the remaining 3 are, in order of best to worst priority,
-+SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
-+scheduling). When a task of these priorities is queued, a bitmap of running
-+priorities is set showing which of these priorities has tasks waiting for CPU
-+time. When a CPU is made to reschedule, the lookup for the next task to get
-+CPU time is performed in the following way:
-+
-+First the bitmap is checked to see what static priority tasks are queued. If
-+any realtime priorities are found, the corresponding queue is checked and the
-+first task listed there is taken (provided CPU affinity is suitable) and lookup
-+is complete. If the priority corresponds to a SCHED_ISO task, they are also
-+taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
-+to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
-+stage, every task in the runlist that corresponds to that priority is checked
-+to see which has the earliest set deadline, and (provided it has suitable CPU
-+affinity) it is taken off the runqueue and given the CPU. If a task has an
-+expired deadline, it is taken and the rest of the lookup aborted (as they are
-+chosen in FIFO order).
-+
-+Thus, the lookup is O(n) in the worst case only, where n is as described
-+earlier, as tasks may be chosen before the whole task list is looked over.
-+
-+
-+Scalability.
-+
-+The major limitations of BFS will be that of scalability, as the separate
-+runqueue designs will have less lock contention as the number of CPUs rises.
-+However they do not scale linearly even with separate runqueues as multiple
-+runqueues will need to be locked concurrently on such designs to be able to
-+achieve fair CPU balancing, to try and achieve some sort of nice-level fairness
-+across CPUs, and to achieve low enough latency for tasks on a busy CPU when
-+other CPUs would be more suited. BFS has the advantage that it requires no
-+balancing algorithm whatsoever, as balancing occurs by proxy simply because
-+all CPUs draw off the global runqueue, in priority and deadline order. Despite
-+the fact that scalability is _not_ the prime concern of BFS, it both shows very
-+good scalability to smaller numbers of CPUs and is likely a more scalable design
-+at these numbers of CPUs.
-+
-+It also has some very low overhead scalability features built into the design
-+when it has been deemed their overhead is so marginal that they're worth adding.
-+The first is the local copy of the running process' data to the CPU it's running
-+on to allow that data to be updated lockless where possible. Then there is
-+deference paid to the last CPU a task was running on, by trying that CPU first
-+when looking for an idle CPU to use the next time it's scheduled. Finally there
-+is the notion of cache locality beyond the last running CPU. The sched_domains
-+information is used to determine the relative virtual "cache distance" that
-+other CPUs have from the last CPU a task was running on. CPUs with shared
-+caches, such as SMT siblings, or multicore CPUs with shared caches, are treated
-+as cache local. CPUs without shared caches are treated as not cache local, and
-+CPUs on different NUMA nodes are treated as very distant. This "relative cache
-+distance" is used by modifying the virtual deadline value when doing lookups.
-+Effectively, the deadline is unaltered between "cache local" CPUs, doubled for
-+"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning
-+behind the doubling of deadlines is as follows. The real cost of migrating a
-+task from one CPU to another is entirely dependant on the cache footprint of
-+the task, how cache intensive the task is, how long it's been running on that
-+CPU to take up the bulk of its cache, how big the CPU cache is, how fast and
-+how layered the CPU cache is, how fast a context switch is... and so on. In
-+other words, it's close to random in the real world where we do more than just
-+one sole workload. The only thing we can be sure of is that it's not free. So
-+BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs
-+is more important than cache locality, and cache locality only plays a part
-+after that. Doubling the effective deadline is based on the premise that the
-+"cache local" CPUs will tend to work on the same tasks up to double the number
-+of cache local CPUs, and once the workload is beyond that amount, it is likely
-+that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA
-+is a value I pulled out of my arse.
-+
-+When choosing an idle CPU for a waking task, the cache locality is determined
-+according to where the task last ran and then idle CPUs are ranked from best
-+to worst to choose the most suitable idle CPU based on cache locality, NUMA
-+node locality and hyperthread sibling business. They are chosen in the
-+following preference (if idle):
-+
-+* Same core, idle or busy cache, idle threads
-+* Other core, same cache, idle or busy cache, idle threads.
-+* Same node, other CPU, idle cache, idle threads.
-+* Same node, other CPU, busy cache, idle threads.
-+* Same core, busy threads.
-+* Other core, same cache, busy threads.
-+* Same node, other CPU, busy threads.
-+* Other node, other CPU, idle cache, idle threads.
-+* Other node, other CPU, busy cache, idle threads.
-+* Other node, other CPU, busy threads.
-+
-+This shows the SMT or "hyperthread" awareness in the design as well which will
-+choose a real idle core first before a logical SMT sibling which already has
-+tasks on the physical CPU.
-+
-+Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
-+However this benchmarking was performed on an earlier design that was far less
-+scalable than the current one so it's hard to know how scalable it is in terms
-+of both CPUs (due to the global runqueue) and heavily loaded machines (due to
-+O(n) lookup) at this stage. Note that in terms of scalability, the number of
-+_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
-+quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
-+results are very promising indeed, without needing to tweak any knobs, features
-+or options. Benchmark contributions are most welcome.
-+
-+
-+Features
-+
-+As the initial prime target audience for BFS was the average desktop user, it
-+was designed to not need tweaking, tuning or have features set to obtain benefit
-+from it. Thus the number of knobs and features has been kept to an absolute
-+minimum and should not require extra user input for the vast majority of cases.
-+There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval
-+and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
-+to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
-+support for CGROUPS. The average user should neither need to know what these
-+are, nor should they need to be using them to have good desktop behaviour.
-+
-+rr_interval
-+
-+There is only one "scheduler" tunable, the round robin interval. This can be
-+accessed in
-+
-+ /proc/sys/kernel/rr_interval
-+
-+The value is in milliseconds, and the default value is set to 6 on a
-+uniprocessor machine, and automatically set to a progressively higher value on
-+multiprocessor machines. The reasoning behind increasing the value on more CPUs
-+is that the effective latency is decreased by virtue of there being more CPUs on
-+BFS (for reasons explained above), and increasing the value allows for less
-+cache contention and more throughput. Valid values are from 1 to 1000
-+Decreasing the value will decrease latencies at the cost of decreasing
-+throughput, while increasing it will improve throughput, but at the cost of
-+worsening latencies. The accuracy of the rr interval is limited by HZ resolution
-+of the kernel configuration. Thus, the worst case latencies are usually slightly
-+higher than this actual value. The default value of 6 is not an arbitrary one.
-+It is based on the fact that humans can detect jitter at approximately 7ms, so
-+aiming for much lower latencies is pointless under most circumstances. It is
-+worth noting this fact when comparing the latency performance of BFS to other
-+schedulers. Worst case latencies being higher than 7ms are far worse than
-+average latencies not being in the microsecond range.
-+
-+Isochronous scheduling.
-+
-+Isochronous scheduling is a unique scheduling policy designed to provide
-+near-real-time performance to unprivileged (ie non-root) users without the
-+ability to starve the machine indefinitely. Isochronous tasks (which means
-+"same time") are set using, for example, the schedtool application like so:
-+
-+ schedtool -I -e amarok
-+
-+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
-+is that it has a priority level between true realtime tasks and SCHED_NORMAL
-+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
-+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
-+rate). However if ISO tasks run for more than a tunable finite amount of time,
-+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
-+time is the percentage of _total CPU_ available across the machine, configurable
-+as a percentage in the following "resource handling" tunable (as opposed to a
-+scheduler tunable):
-+
-+ /proc/sys/kernel/iso_cpu
-+
-+and is set to 70% by default. It is calculated over a rolling 5 second average
-+Because it is the total CPU available, it means that on a multi CPU machine, it
-+is possible to have an ISO task running as realtime scheduling indefinitely on
-+just one CPU, as the other CPUs will be available. Setting this to 100 is the
-+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
-+ability to run any pseudo-realtime tasks.
-+
-+A feature of BFS is that it detects when an application tries to obtain a
-+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
-+appropriate privileges to use those policies. When it detects this, it will
-+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
-+Because some applications constantly set their policy as well as their nice
-+level, there is potential for them to undo the override specified by the user
-+on the command line of setting the policy to SCHED_ISO. To counter this, once
-+a task has been set to SCHED_ISO policy, it needs superuser privileges to set
-+it back to SCHED_NORMAL. This will ensure the task remains ISO and all child
-+processes and threads will also inherit the ISO policy.
-+
-+Idleprio scheduling.
-+
-+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
-+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
-+ultra low priority tasks to be run in the background that have virtually no
-+effect on the foreground tasks. This is ideally suited to distributed computing
-+clients (like setiathome, folding, mprime etc) but can also be used to start
-+a video encode or so on without any slowdown of other tasks. To avoid this
-+policy from grabbing shared resources and holding them indefinitely, if it
-+detects a state where the task is waiting on I/O, the machine is about to
-+suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As
-+per the Isochronous task management, once a task has been scheduled as IDLEPRIO,
-+it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can
-+be set to start as SCHED_IDLEPRIO with the schedtool command like so:
-+
-+ schedtool -D -e ./mprime
-+
-+Subtick accounting.
-+
-+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
-+the accounting is done by simply determining what is happening at the precise
-+moment a timer tick fires off. This becomes increasingly inaccurate as the
-+timer tick frequency (HZ) is lowered. It is possible to create an application
-+which uses almost 100% CPU, yet by being descheduled at the right time, records
-+zero CPU usage. While the main problem with this is that there are possible
-+security implications, it is also difficult to determine how much CPU a task
-+really does use. BFS tries to use the sub-tick accounting from the TSC clock,
-+where possible, to determine real CPU usage. This is not entirely reliable, but
-+is far more likely to produce accurate CPU usage data than the existing designs
-+and will not show tasks as consuming no CPU usage when they actually are. Thus,
-+the amount of CPU reported as being used by BFS will more accurately represent
-+how much CPU the task itself is using (as is shown for example by the 'time'
-+application), so the reported values may be quite different to other schedulers.
-+Values reported as the 'load' are more prone to problems with this design, but
-+per process values are closer to real usage. When comparing throughput of BFS
-+to other designs, it is important to compare the actual completed work in terms
-+of total wall clock time taken and total work done, rather than the reported
-+"cpu usage".
-+
-+
-+Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010
-diff -Nur a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt
---- a/Documentation/scheduler/sched-MuQSS.txt 1970-01-01 01:00:00.000000000 +0100
-+++ b/Documentation/scheduler/sched-MuQSS.txt 2019-01-05 20:22:51.089998199 +0000
-@@ -0,0 +1,347 @@
-+MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas.
-+
-+MuQSS is a per-cpu runqueue variant of the original BFS scheduler with
-+one 8 level skiplist per runqueue, and fine grained locking for much more
-+scalability.
-+
-+
-+Goals.
-+
-+The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from
-+here on (pronounced mux) is to completely do away with the complex designs of
-+the past for the cpu process scheduler and instead implement one that is very
-+simple in basic design. The main focus of MuQSS is to achieve excellent desktop
-+interactivity and responsiveness without heuristics and tuning knobs that are
-+difficult to understand, impossible to model and predict the effect of, and when
-+tuned to one workload cause massive detriment to another, while still being
-+scalable to many CPUs and processes.
-+
-+
-+Design summary.
-+
-+MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1)
-+lookup, earliest effective virtual deadline first tickless design, loosely based
-+on EEVDF (earliest eligible virtual deadline first) and my previous Staircase
-+Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler.
-+Each component shall be described in order to understand the significance of,
-+and reasoning for it.
-+
-+
-+Design reasoning.
-+
-+In BFS, the use of a single runqueue across all CPUs meant that each CPU would
-+need to scan the entire runqueue looking for the process with the earliest
-+deadline and schedule that next, regardless of which CPU it originally came
-+from. This made BFS deterministic with respect to latency and provided
-+guaranteed latencies dependent on number of processes and CPUs. The single
-+runqueue, however, meant that all CPUs would compete for the single lock
-+protecting it, which would lead to increasing lock contention as the number of
-+CPUs rose and appeared to limit scalability of common workloads beyond 16
-+logical CPUs. Additionally, the O(n) lookup of the runqueue list obviously
-+increased overhead proportionate to the number of queued proecesses and led to
-+cache thrashing while iterating over the linked list.
-+
-+MuQSS is an evolution of BFS, designed to maintain the same scheduling
-+decision mechanism and be virtually deterministic without relying on the
-+constrained design of the single runqueue by splitting out the single runqueue
-+to be per-CPU and use skiplists instead of linked lists.
-+
-+The original reason for going back to a single runqueue design for BFS was that
-+once multiple runqueues are introduced, per-CPU or otherwise, there will be
-+complex interactions as each runqueue will be responsible for the scheduling
-+latency and fairness of the tasks only on its own runqueue, and to achieve
-+fairness and low latency across multiple CPUs, any advantage in throughput of
-+having CPU local tasks causes other disadvantages. This is due to requiring a
-+very complex balancing system to at best achieve some semblance of fairness
-+across CPUs and can only maintain relatively low latency for tasks bound to the
-+same CPUs, not across them. To increase said fairness and latency across CPUs,
-+the advantage of local runqueue locking, which makes for better scalability, is
-+lost due to having to grab multiple locks.
-+
-+MuQSS works around the problems inherent in multiple runqueue designs by
-+making its skip lists priority ordered and through novel use of lockless
-+examination of each other runqueue it can decide if it should take the earliest
-+deadline task from another runqueue for latency reasons, or for CPU balancing
-+reasons. It still does not have a balancing system, choosing to allow the
-+next task scheduling decision and task wakeup CPU choice to allow balancing to
-+happen by virtue of its choices.
-+
-+
-+Design details.
-+
-+Custom skip list implementation:
-+
-+To avoid the overhead of building up and tearing down skip list structures,
-+the variant used by MuQSS has a number of optimisations making it specific for
-+its use case in the scheduler. It uses static arrays of 8 'levels' instead of
-+building up and tearing down structures dynamically. This makes each runqueue
-+only scale O(log N) up to 64k tasks. However as there is one runqueue per CPU
-+it means that it scales O(log N) up to 64k x number of logical CPUs which is
-+far beyond the realistic task limits each CPU could handle. By being 8 levels
-+it also makes the array exactly one cacheline in size. Additionally, each
-+skip list node is bidirectional making insertion and removal amortised O(1),
-+being O(k) where k is 1-8. Uniquely, we are only ever interested in the very
-+first entry in each list at all times with MuQSS, so there is never a need to
-+do a search and thus look up is always O(1). In interactive mode, the queues
-+will be searched beyond their first entry if the first task is not suitable
-+for affinity or SMT nice reasons.
-+
-+Task insertion:
-+
-+MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into
-+a custom skip list as described above (based on the original design by William
-+Pugh). Insertion is ordered in such a way that there is never a need to do a
-+search by ordering tasks according to static priority primarily, and then
-+virtual deadline at the time of insertion.
-+
-+Niffies:
-+
-+Niffies are a monotonic forward moving timer not unlike the "jiffies" but are
-+of nanosecond resolution. Niffies are calculated per-runqueue from the high
-+resolution TSC timers, and in order to maintain fairness are synchronised
-+between CPUs whenever both runqueues are locked concurrently.
-+
-+Virtual deadline:
-+
-+The key to achieving low latency, scheduling fairness, and "nice level"
-+distribution in MuQSS is entirely in the virtual deadline mechanism. The one
-+tunable in MuQSS is the rr_interval, or "round robin interval". This is the
-+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
-+tasks of the same nice level will be running for, or looking at it the other
-+way around, the longest duration two tasks of the same nice level will be
-+delayed for. When a task requests cpu time, it is given a quota (time_slice)
-+equal to the rr_interval and a virtual deadline. The virtual deadline is
-+offset from the current time in niffies by this equation:
-+
-+ niffies + (prio_ratio * rr_interval)
-+
-+The prio_ratio is determined as a ratio compared to the baseline of nice -20
-+and increases by 10% per nice level. The deadline is a virtual one only in that
-+no guarantee is placed that a task will actually be scheduled by this time, but
-+it is used to compare which task should go next. There are three components to
-+how a task is next chosen. First is time_slice expiration. If a task runs out
-+of its time_slice, it is descheduled, the time_slice is refilled, and the
-+deadline reset to that formula above. Second is sleep, where a task no longer
-+is requesting CPU for whatever reason. The time_slice and deadline are _not_
-+adjusted in this case and are just carried over for when the task is next
-+scheduled. Third is preemption, and that is when a newly waking task is deemed
-+higher priority than a currently running task on any cpu by virtue of the fact
-+that it has an earlier virtual deadline than the currently running task. The
-+earlier deadline is the key to which task is next chosen for the first and
-+second cases.
-+
-+The CPU proportion of different nice tasks works out to be approximately the
-+
-+ (prio_ratio difference)^2
-+
-+The reason it is squared is that a task's deadline does not change while it is
-+running unless it runs out of time_slice. Thus, even if the time actually
-+passes the deadline of another task that is queued, it will not get CPU time
-+unless the current running task deschedules, and the time "base" (niffies) is
-+constantly moving.
-+
-+Task lookup:
-+
-+As tasks are already pre-ordered according to anticipated scheduling order in
-+the skip lists, lookup for the next suitable task per-runqueue is always a
-+matter of simply selecting the first task in the 0th level skip list entry.
-+In order to maintain optimal latency and fairness across CPUs, MuQSS does a
-+novel examination of every other runqueue in cache locality order, choosing the
-+best task across all runqueues. This provides near-determinism of how long any
-+task across the entire system may wait before receiving CPU time. The other
-+runqueues are first examine lockless and then trylocked to minimise the
-+potential lock contention if they are likely to have a suitable better task.
-+Each other runqueue lock is only held for as long as it takes to examine the
-+entry for suitability. In "interactive" mode, the default setting, MuQSS will
-+look for the best deadline task across all CPUs, while in !interactive mode,
-+it will only select a better deadline task from another CPU if it is more
-+heavily laden than the current one.
-+
-+Lookup is therefore O(k) where k is number of CPUs.
-+
-+
-+Latency.
-+
-+Through the use of virtual deadlines to govern the scheduling order of normal
-+tasks, queue-to-activation latency per runqueue is guaranteed to be bound by
-+the rr_interval tunable which is set to 6ms by default. This means that the
-+longest a CPU bound task will wait for more CPU is proportional to the number
-+of running tasks and in the common case of 0-2 running tasks per CPU, will be
-+under the 7ms threshold for human perception of jitter. Additionally, as newly
-+woken tasks will have an early deadline from their previous runtime, the very
-+tasks that are usually latency sensitive will have the shortest interval for
-+activation, usually preempting any existing CPU bound tasks.
-+
-+Tickless expiry:
-+
-+A feature of MuQSS is that it is not tied to the resolution of the chosen tick
-+rate in Hz, instead depending entirely on the high resolution timers where
-+possible for sub-millisecond accuracy on timeouts regarless of the underlying
-+tick rate. This allows MuQSS to be run with the low overhead of low Hz rates
-+such as 100 by default, benefiting from the improved throughput and lower
-+power usage it provides. Another advantage of this approach is that in
-+combination with the Full No HZ option, which disables ticks on running task
-+CPUs instead of just idle CPUs, the tick can be disabled at all times
-+regardless of how many tasks are running instead of being limited to just one
-+running task. Note that this option is NOT recommended for regular desktop
-+users.
-+
-+
-+Scalability and balancing.
-+
-+Unlike traditional approaches where balancing is a combination of CPU selection
-+at task wakeup and intermittent balancing based on a vast array of rules set
-+according to architecture, busyness calculations and special case management,
-+MuQSS indirectly balances on the fly at task wakeup and next task selection.
-+During initialisation, MuQSS creates a cache coherency ordered list of CPUs for
-+each logical CPU and uses this to aid task/CPU selection when CPUs are busy.
-+Additionally it selects any idle CPUs, if they are available, at any time over
-+busy CPUs according to the following preference:
-+
-+ * Same thread, idle or busy cache, idle or busy threads
-+ * Other core, same cache, idle or busy cache, idle threads.
-+ * Same node, other CPU, idle cache, idle threads.
-+ * Same node, other CPU, busy cache, idle threads.
-+ * Other core, same cache, busy threads.
-+ * Same node, other CPU, busy threads.
-+ * Other node, other CPU, idle cache, idle threads.
-+ * Other node, other CPU, busy cache, idle threads.
-+ * Other node, other CPU, busy threads.
-+
-+Mux is therefore SMT, MC and Numa aware without the need for extra
-+intermittent balancing to maintain CPUs busy and make the most of cache
-+coherency.
-+
-+
-+Features
-+
-+As the initial prime target audience for MuQSS was the average desktop user, it
-+was designed to not need tweaking, tuning or have features set to obtain benefit
-+from it. Thus the number of knobs and features has been kept to an absolute
-+minimum and should not require extra user input for the vast majority of cases.
-+There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval,
-+interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO
-+policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS
-+does _not_ now feature is support for CGROUPS. The average user should neither
-+need to know what these are, nor should they need to be using them to have good
-+desktop behaviour. However since some applications refuse to work without
-+cgroups, one can enable them with MuQSS as a stub and the filesystem will be
-+created which will allow the applications to work.
-+
-+rr_interval:
-+
-+ /proc/sys/kernel/rr_interval
-+
-+The value is in milliseconds, and the default value is set to 6. Valid values
-+are from 1 to 1000 Decreasing the value will decrease latencies at the cost of
-+decreasing throughput, while increasing it will improve throughput, but at the
-+cost of worsening latencies. It is based on the fact that humans can detect
-+jitter at approximately 7ms, so aiming for much lower latencies is pointless
-+under most circumstances. It is worth noting this fact when comparing the
-+latency performance of MuQSS to other schedulers. Worst case latencies being
-+higher than 7ms are far worse than average latencies not being in the
-+microsecond range.
-+
-+interactive:
-+
-+ /proc/sys/kernel/interactive
-+
-+The value is a simple boolean of 1 for on and 0 for off and is set to on by
-+default. Disabling this will disable the near-determinism of MuQSS when
-+selecting the next task by not examining all CPUs for the earliest deadline
-+task, or which CPU to wake to, instead prioritising CPU balancing for improved
-+throughput. Latency will still be bound by rr_interval, but on a per-CPU basis
-+instead of across the whole system.
-+
-+Isochronous scheduling:
-+
-+Isochronous scheduling is a unique scheduling policy designed to provide
-+near-real-time performance to unprivileged (ie non-root) users without the
-+ability to starve the machine indefinitely. Isochronous tasks (which means
-+"same time") are set using, for example, the schedtool application like so:
-+
-+ schedtool -I -e amarok
-+
-+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
-+is that it has a priority level between true realtime tasks and SCHED_NORMAL
-+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
-+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
-+rate). However if ISO tasks run for more than a tunable finite amount of time,
-+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
-+time is the percentage of CPU available per CPU, configurable as a percentage in
-+the following "resource handling" tunable (as opposed to a scheduler tunable):
-+
-+iso_cpu:
-+
-+ /proc/sys/kernel/iso_cpu
-+
-+and is set to 70% by default. It is calculated over a rolling 5 second average
-+Because it is the total CPU available, it means that on a multi CPU machine, it
-+is possible to have an ISO task running as realtime scheduling indefinitely on
-+just one CPU, as the other CPUs will be available. Setting this to 100 is the
-+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
-+ability to run any pseudo-realtime tasks.
-+
-+A feature of MuQSS is that it detects when an application tries to obtain a
-+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
-+appropriate privileges to use those policies. When it detects this, it will
-+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
-+
-+
-+Idleprio scheduling:
-+
-+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
-+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
-+ultra low priority tasks to be run in the background that have virtually no
-+effect on the foreground tasks. This is ideally suited to distributed computing
-+clients (like setiathome, folding, mprime etc) but can also be used to start a
-+video encode or so on without any slowdown of other tasks. To avoid this policy
-+from grabbing shared resources and holding them indefinitely, if it detects a
-+state where the task is waiting on I/O, the machine is about to suspend to ram
-+and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has
-+been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without
-+superuser privileges since it is effectively a lower scheduling policy. Tasks
-+can be set to start as SCHED_IDLEPRIO with the schedtool command like so:
-+
-+schedtool -D -e ./mprime
-+
-+Subtick accounting:
-+
-+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
-+the accounting is done by simply determining what is happening at the precise
-+moment a timer tick fires off. This becomes increasingly inaccurate as the timer
-+tick frequency (HZ) is lowered. It is possible to create an application which
-+uses almost 100% CPU, yet by being descheduled at the right time, records zero
-+CPU usage. While the main problem with this is that there are possible security
-+implications, it is also difficult to determine how much CPU a task really does
-+use. Mux uses sub-tick accounting from the TSC clock to determine real CPU
-+usage. Thus, the amount of CPU reported as being used by MuQSS will more
-+accurately represent how much CPU the task itself is using (as is shown for
-+example by the 'time' application), so the reported values may be quite
-+different to other schedulers. When comparing throughput of MuQSS to other
-+designs, it is important to compare the actual completed work in terms of total
-+wall clock time taken and total work done, rather than the reported "cpu usage".
-+
-+Symmetric MultiThreading (SMT) aware nice:
-+
-+SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the
-+logical CPU count rises by adding thread units to each CPU core, allowing more
-+than one task to be run simultaneously on the same core, the disadvantage of it
-+is that the CPU power is shared between the tasks, not summating to the power
-+of two CPUs. The practical upshot of this is that two tasks running on
-+separate threads of the same core run significantly slower than if they had one
-+core each to run on. While smart CPU selection allows each task to have a core
-+to itself whenever available (as is done on MuQSS), it cannot offset the
-+slowdown that occurs when the cores are all loaded and only a thread is left.
-+Most of the time this is harmless as the CPU is effectively overloaded at this
-+point and the extra thread is of benefit. However when running a niced task in
-+the presence of an un-niced task (say nice 19 v nice 0), the nice task gets
-+precisely the same amount of CPU power as the unniced one. MuQSS has an
-+optional configuration feature known as SMT-NICE which selectively idles the
-+secondary niced thread for a period proportional to the nice difference,
-+allowing CPU distribution according to nice level to be maintained, at the
-+expense of a small amount of extra overhead. If this is configured in on a
-+machine without SMT threads, the overhead is minimal.
-+
-+
-+Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016
-diff -Nur a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
---- a/Documentation/sysctl/kernel.txt 2019-01-05 20:17:13.829237906 +0000
-+++ b/Documentation/sysctl/kernel.txt 2019-01-05 20:22:51.089998199 +0000
-@@ -39,6 +39,7 @@
- - hung_task_timeout_secs
- - hung_task_warnings
- - kexec_load_disabled
-+- iso_cpu
- - kptr_restrict
- - l2cr [ PPC only ]
- - modprobe ==> Documentation/debugging-modules.txt
-@@ -73,6 +74,7 @@
- - randomize_va_space
- - real-root-dev ==> Documentation/admin-guide/initrd.rst
- - reboot-cmd [ SPARC only ]
-+- rr_interval
- - rtsig-max
- - rtsig-nr
- - seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst
-@@ -95,6 +97,7 @@
- - unknown_nmi_panic
- - watchdog
- - watchdog_thresh
-+- yield_type
- - version
-
- ==============================================================
-@@ -397,6 +400,16 @@
-
- ==============================================================
-
-+iso_cpu: (MuQSS CPU scheduler only).
-+
-+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
-+run effectively at realtime priority, averaged over a rolling five
-+seconds over the -whole- system, meaning all cpus.
-+
-+Set to 70 (percent) by default.
-+
-+==============================================================
-+
- l2cr: (PPC only)
-
- This flag controls the L2 cache of G3 processor boards. If
-@@ -823,6 +836,20 @@
-
- ==============================================================
-
-+rr_interval: (MuQSS CPU scheduler only)
-+
-+This is the smallest duration that any cpu process scheduling unit
-+will run for. Increasing this value can increase throughput of cpu
-+bound tasks substantially but at the expense of increased latencies
-+overall. Conversely decreasing it will decrease average and maximum
-+latencies but at the expense of throughput. This value is in
-+milliseconds and the default value chosen depends on the number of
-+cpus available at scheduler initialisation with a minimum of 6.
-+
-+Valid values are from 1-1000.
-+
-+==============================================================
-+
- rtsig-max & rtsig-nr:
-
- The file rtsig-max can be used to tune the maximum number
-@@ -1081,3 +1108,13 @@
- tunable to zero will disable lockup detection altogether.
-
- ==============================================================
-+
-+yield_type: (MuQSS CPU scheduler only)
-+
-+This determines what type of yield calls to sched_yield will perform.
-+
-+ 0: No yield.
-+ 1: Yield only to better priority/deadline tasks. (default)
-+ 2: Expire timeslice and recalculate deadline.
-+
-+==============================================================
-diff -Nur a/fs/proc/base.c b/fs/proc/base.c
---- a/fs/proc/base.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/fs/proc/base.c 2019-01-05 20:22:51.089998199 +0000
-@@ -481,7 +481,7 @@
- seq_printf(m, "0 0 0\n");
- else
- seq_printf(m, "%llu %llu %lu\n",
-- (unsigned long long)task->se.sum_exec_runtime,
-+ (unsigned long long)tsk_seruntime(task),
- (unsigned long long)task->sched_info.run_delay,
- task->sched_info.pcount);
-
-diff -Nur a/include/linux/init_task.h b/include/linux/init_task.h
---- a/include/linux/init_task.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/init_task.h 2019-01-05 20:22:51.089998199 +0000
-@@ -172,8 +172,6 @@
- # define INIT_VTIME(tsk)
- #endif
-
--#define INIT_TASK_COMM "swapper"
--
- #ifdef CONFIG_RT_MUTEXES
- # define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT_CACHED, \
-@@ -223,6 +221,80 @@
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-+#ifdef CONFIG_SCHED_MUQSS
-+#define INIT_TASK_COMM "MuQSS"
-+#define INIT_TASK(tsk) \
-+{ \
-+ INIT_TASK_TI(tsk) \
-+ .state = 0, \
-+ .stack = init_stack, \
-+ .usage = ATOMIC_INIT(2), \
-+ .flags = PF_KTHREAD, \
-+ .prio = NORMAL_PRIO, \
-+ .static_prio = MAX_PRIO-20, \
-+ .normal_prio = NORMAL_PRIO, \
-+ .deadline = 0, \
-+ .policy = SCHED_NORMAL, \
-+ .cpus_allowed = CPU_MASK_ALL, \
-+ .mm = NULL, \
-+ .active_mm = &init_mm, \
-+ .restart_block = { \
-+ .fn = do_no_restart_syscall, \
-+ }, \
-+ .time_slice = 1000000, \
-+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
-+ INIT_PUSHABLE_TASKS(tsk) \
-+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
-+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
-+ .real_parent = &tsk, \
-+ .parent = &tsk, \
-+ .children = LIST_HEAD_INIT(tsk.children), \
-+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
-+ .group_leader = &tsk, \
-+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
-+ RCU_POINTER_INITIALIZER(cred, &init_cred), \
-+ .comm = INIT_TASK_COMM, \
-+ .thread = INIT_THREAD, \
-+ .fs = &init_fs, \
-+ .files = &init_files, \
-+ .signal = &init_signals, \
-+ .sighand = &init_sighand, \
-+ .nsproxy = &init_nsproxy, \
-+ .pending = { \
-+ .list = LIST_HEAD_INIT(tsk.pending.list), \
-+ .signal = {{0}}}, \
-+ .blocked = {{0}}, \
-+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
-+ .journal_info = NULL, \
-+ INIT_CPU_TIMERS(tsk) \
-+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
-+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
-+ .pids = { \
-+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
-+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
-+ }, \
-+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
-+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
-+ INIT_IDS \
-+ INIT_PERF_EVENTS(tsk) \
-+ INIT_TRACE_IRQFLAGS \
-+ INIT_LOCKDEP \
-+ INIT_FTRACE_GRAPH \
-+ INIT_TRACE_RECURSION \
-+ INIT_TASK_RCU_PREEMPT(tsk) \
-+ INIT_TASK_RCU_TASKS(tsk) \
-+ INIT_CPUSET_SEQ(tsk) \
-+ INIT_RT_MUTEXES(tsk) \
-+ INIT_PREV_CPUTIME(tsk) \
-+ INIT_VTIME(tsk) \
-+ INIT_NUMA_BALANCING(tsk) \
-+ INIT_KASAN(tsk) \
-+ INIT_LIVEPATCH(tsk) \
-+ INIT_TASK_SECURITY \
-+}
-+#else /* CONFIG_SCHED_MUQSS */
-+#define INIT_TASK_COMM "swapper"
- #define INIT_TASK(tsk) \
- { \
- INIT_TASK_TI(tsk) \
-@@ -300,7 +372,7 @@
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
- }
--
-+#endif /* CONFIG_SCHED_MUQSS */
-
- /* Attach to the init_task data structure for proper alignment */
- #define __init_task_data __attribute__((__section__(".data..init_task")))
-diff -Nur a/include/linux/ioprio.h b/include/linux/ioprio.h
---- a/include/linux/ioprio.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/ioprio.h 2019-01-05 20:22:51.089998199 +0000
-@@ -52,6 +52,8 @@
- */
- static inline int task_nice_ioprio(struct task_struct *task)
- {
-+ if (iso_task(task))
-+ return 0;
- return (task_nice(task) + 20) / 5;
- }
-
-diff -Nur a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
---- a/include/linux/sched/nohz.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched/nohz.h 2019-01-05 20:22:51.089998199 +0000
-@@ -6,7 +6,7 @@
- * This is the interface between the scheduler and nohz/dynticks:
- */
-
--#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
- extern void cpu_load_update_nohz_start(void);
- extern void cpu_load_update_nohz_stop(void);
- #else
-@@ -23,7 +23,7 @@
- static inline void set_cpu_sd_state_idle(void) { }
- #endif
-
--#ifdef CONFIG_NO_HZ_COMMON
-+#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_MUQSS)
- void calc_load_nohz_start(void);
- void calc_load_nohz_stop(void);
- #else
-diff -Nur a/include/linux/sched/prio.h b/include/linux/sched/prio.h
---- a/include/linux/sched/prio.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched/prio.h 2019-01-05 20:22:51.089998199 +0000
-@@ -20,8 +20,20 @@
- */
-
- #define MAX_USER_RT_PRIO 100
-+
-+#ifdef CONFIG_SCHED_MUQSS
-+/* Note different MAX_RT_PRIO */
-+#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
-+
-+#define ISO_PRIO (MAX_RT_PRIO)
-+#define NORMAL_PRIO (MAX_RT_PRIO + 1)
-+#define IDLE_PRIO (MAX_RT_PRIO + 2)
-+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
-+#else /* CONFIG_SCHED_MUQSS */
- #define MAX_RT_PRIO MAX_USER_RT_PRIO
-
-+#endif /* CONFIG_SCHED_MUQSS */
-+
- #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
- #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
-
-diff -Nur a/include/linux/sched/task.h b/include/linux/sched/task.h
---- a/include/linux/sched/task.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched/task.h 2019-01-05 20:22:51.089998199 +0000
-@@ -80,7 +80,7 @@
- extern void free_task(struct task_struct *tsk);
-
- /* sched_exec is called by processes performing an exec */
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_MUQSS)
- extern void sched_exec(void);
- #else
- #define sched_exec() {}
-diff -Nur a/include/linux/sched.h b/include/linux/sched.h
---- a/include/linux/sched.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/linux/sched.h 2019-01-05 20:22:51.089998199 +0000
-@@ -27,6 +27,9 @@
- #include <linux/signal_types.h>
- #include <linux/mm_types_task.h>
- #include <linux/task_io_accounting.h>
-+#ifdef CONFIG_SCHED_MUQSS
-+#include <linux/skip_list.h>
-+#endif
-
- /* task_struct member predeclarations (sorted alphabetically): */
- struct audit_context;
-@@ -579,9 +582,11 @@
- unsigned int flags;
- unsigned int ptrace;
-
-+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_MUQSS)
-+ int on_cpu;
-+#endif
- #ifdef CONFIG_SMP
- struct llist_node wake_entry;
-- int on_cpu;
- #ifdef CONFIG_THREAD_INFO_IN_TASK
- /* Current CPU: */
- unsigned int cpu;
-@@ -598,10 +603,25 @@
- int static_prio;
- int normal_prio;
- unsigned int rt_priority;
-+#ifdef CONFIG_SCHED_MUQSS
-+ int time_slice;
-+ u64 deadline;
-+ skiplist_node node; /* Skip list node */
-+ u64 last_ran;
-+ u64 sched_time; /* sched_clock time spent running */
-+#ifdef CONFIG_SMT_NICE
-+ int smt_bias; /* Policy/nice level bias across smt siblings */
-+#endif
-+#ifdef CONFIG_HOTPLUG_CPU
-+ bool zerobound; /* Bound to CPU0 for hotplug */
-+#endif
-+ unsigned long rt_timeout;
-+#else /* CONFIG_SCHED_MUQSS */
-
- const struct sched_class *sched_class;
- struct sched_entity se;
- struct sched_rt_entity rt;
-+#endif
- #ifdef CONFIG_CGROUP_SCHED
- struct task_group *sched_task_group;
- #endif
-@@ -751,6 +771,10 @@
- u64 utimescaled;
- u64 stimescaled;
- #endif
-+#ifdef CONFIG_SCHED_MUQSS
-+ /* Unbanked cpu time */
-+ unsigned long utime_ns, stime_ns;
-+#endif
- u64 gtime;
- struct prev_cputime prev_cputime;
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-@@ -1155,6 +1179,40 @@
- */
- };
-
-+#ifdef CONFIG_SCHED_MUQSS
-+#define tsk_seruntime(t) ((t)->sched_time)
-+#define tsk_rttimeout(t) ((t)->rt_timeout)
-+
-+static inline void tsk_cpus_current(struct task_struct *p)
-+{
-+}
-+
-+void print_scheduler_version(void);
-+
-+static inline bool iso_task(struct task_struct *p)
-+{
-+ return (p->policy == SCHED_ISO);
-+}
-+#else /* CFS */
-+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
-+#define tsk_rttimeout(t) ((t)->rt.timeout)
-+
-+static inline void tsk_cpus_current(struct task_struct *p)
-+{
-+ p->nr_cpus_allowed = current->nr_cpus_allowed;
-+}
-+
-+static inline void print_scheduler_version(void)
-+{
-+ printk(KERN_INFO "CFS CPU scheduler.\n");
-+}
-+
-+static inline bool iso_task(struct task_struct *p)
-+{
-+ return false;
-+}
-+#endif /* CONFIG_SCHED_MUQSS */
-+
- static inline struct pid *task_pid(struct task_struct *task)
- {
- return task->pids[PIDTYPE_PID].pid;
-diff -Nur a/include/linux/skip_list.h b/include/linux/skip_list.h
---- a/include/linux/skip_list.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/include/linux/skip_list.h 2019-01-05 20:22:51.089998199 +0000
-@@ -0,0 +1,33 @@
-+#ifndef _LINUX_SKIP_LISTS_H
-+#define _LINUX_SKIP_LISTS_H
-+typedef u64 keyType;
-+typedef void *valueType;
-+
-+typedef struct nodeStructure skiplist_node;
-+
-+struct nodeStructure {
-+ int level; /* Levels in this structure */
-+ keyType key;
-+ valueType value;
-+ skiplist_node *next[8];
-+ skiplist_node *prev[8];
-+};
-+
-+typedef struct listStructure {
-+ int entries;
-+ int level; /* Maximum level of the list
-+ (1 more than the number of levels in the list) */
-+ skiplist_node *header; /* pointer to header */
-+} skiplist;
-+
-+void skiplist_init(skiplist_node *slnode);
-+skiplist *new_skiplist(skiplist_node *slnode);
-+void free_skiplist(skiplist *l);
-+void skiplist_node_init(skiplist_node *node);
-+void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed);
-+void skiplist_delete(skiplist *l, skiplist_node *node);
-+
-+static inline bool skiplist_node_empty(skiplist_node *node) {
-+ return (!node->next[0]);
-+}
-+#endif /* _LINUX_SKIP_LISTS_H */
-diff -Nur a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
---- a/include/uapi/linux/sched.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/include/uapi/linux/sched.h 2019-01-05 20:22:51.089998199 +0000
-@@ -37,9 +37,16 @@
- #define SCHED_FIFO 1
- #define SCHED_RR 2
- #define SCHED_BATCH 3
--/* SCHED_ISO: reserved but not implemented yet */
-+/* SCHED_ISO: Implemented on MuQSS only */
- #define SCHED_IDLE 5
-+#ifdef CONFIG_SCHED_MUQSS
-+#define SCHED_ISO 4
-+#define SCHED_IDLEPRIO SCHED_IDLE
-+#define SCHED_MAX (SCHED_IDLEPRIO)
-+#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
-+#else /* CONFIG_SCHED_MUQSS */
- #define SCHED_DEADLINE 6
-+#endif /* CONFIG_SCHED_MUQSS */
-
- /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
- #define SCHED_RESET_ON_FORK 0x40000000
-diff -Nur a/init/Kconfig b/init/Kconfig
---- a/init/Kconfig 2019-01-05 20:17:13.849238543 +0000
-+++ b/init/Kconfig 2019-01-05 20:22:51.089998199 +0000
-@@ -38,6 +38,18 @@
-
- menu "General setup"
-
-+config SCHED_MUQSS
-+ bool "MuQSS cpu scheduler"
-+ select HIGH_RES_TIMERS
-+ ---help---
-+ The Multiple Queue Skiplist Scheduler for excellent interactivity and
-+ responsiveness on the desktop and highly scalable deterministic
-+ low latency on any hardware.
-+
-+ Say Y here.
-+ default y
-+
-+
- config BROKEN
- bool
-
-@@ -621,6 +633,7 @@
- depends on ARCH_SUPPORTS_NUMA_BALANCING
- depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
- depends on SMP && NUMA && MIGRATION
-+ depends on !SCHED_MUQSS
- help
- This option adds support for automatic NUMA aware memory/task placement.
- The mechanism is quite primitive and is based on migrating memory when
-@@ -723,9 +736,13 @@
- help
- This feature lets CPU scheduler recognize task groups and control CPU
- bandwidth allocation to such task groups. It uses cgroups to group
-- tasks.
-+ tasks. In combination with MuQSS this is purely a STUB to create the
-+ files associated with the CPU controller cgroup but most of the
-+ controls do nothing. This is useful for working in environments and
-+ with applications that will only work if this control group is
-+ present.
-
--if CGROUP_SCHED
-+if CGROUP_SCHED && !SCHED_MUQSS
- config FAIR_GROUP_SCHED
- bool "Group scheduling for SCHED_OTHER"
- depends on CGROUP_SCHED
-@@ -832,6 +849,7 @@
-
- config CGROUP_CPUACCT
- bool "Simple CPU accounting controller"
-+ depends on !SCHED_MUQSS
- help
- Provides a simple controller for monitoring the
- total CPU consumed by the tasks in a cgroup.
-@@ -950,6 +968,7 @@
-
- config SCHED_AUTOGROUP
- bool "Automatic process group scheduling"
-+ depends on !SCHED_MUQSS
- select CGROUPS
- select CGROUP_SCHED
- select FAIR_GROUP_SCHED
-diff -Nur a/init/main.c b/init/main.c
---- a/init/main.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/init/main.c 2019-01-05 20:22:51.089998199 +0000
-@@ -841,7 +841,6 @@
- return ret;
- }
-
--
- extern initcall_t __initcall_start[];
- extern initcall_t __initcall0_start[];
- extern initcall_t __initcall1_start[];
-@@ -1008,6 +1007,8 @@
-
- rcu_end_inkernel_boot();
-
-+ print_scheduler_version();
-+
- if (ramdisk_execute_command) {
- ret = run_init_process(ramdisk_execute_command);
- if (!ret)
-diff -Nur a/kernel/delayacct.c b/kernel/delayacct.c
---- a/kernel/delayacct.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/delayacct.c 2019-01-05 20:22:51.089998199 +0000
-@@ -115,7 +115,7 @@
- */
- t1 = tsk->sched_info.pcount;
- t2 = tsk->sched_info.run_delay;
-- t3 = tsk->se.sum_exec_runtime;
-+ t3 = tsk_seruntime(tsk);
-
- d->cpu_count += t1;
-
-diff -Nur a/kernel/exit.c b/kernel/exit.c
---- a/kernel/exit.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/exit.c 2019-01-05 20:22:51.089998199 +0000
-@@ -129,7 +129,7 @@
- sig->curr_target = next_thread(tsk);
- }
-
-- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
-+ add_device_randomness((const void*) &tsk_seruntime(tsk),
- sizeof(unsigned long long));
-
- /*
-@@ -150,7 +150,7 @@
- sig->inblock += task_io_get_inblock(tsk);
- sig->oublock += task_io_get_oublock(tsk);
- task_io_accounting_add(&sig->ioac, &tsk->ioac);
-- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
-+ sig->sum_sched_runtime += tsk_seruntime(tsk);
- sig->nr_threads--;
- __unhash_process(tsk, group_dead);
- write_sequnlock(&sig->stats_lock);
-diff -Nur a/kernel/kthread.c b/kernel/kthread.c
---- a/kernel/kthread.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/kthread.c 2019-01-05 20:22:51.099998516 +0000
-@@ -410,6 +410,34 @@
- }
- EXPORT_SYMBOL(kthread_bind);
-
-+#if defined(CONFIG_SCHED_MUQSS) && defined(CONFIG_SMP)
-+extern void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
-+
-+/*
-+ * new_kthread_bind is a special variant of __kthread_bind_mask.
-+ * For new threads to work on muqss we want to call do_set_cpus_allowed
-+ * without the task_cpu being set and the task rescheduled until they're
-+ * rescheduled on their own so we call __do_set_cpus_allowed directly which
-+ * only changes the cpumask. This is particularly important for smpboot threads
-+ * to work.
-+ */
-+static void new_kthread_bind(struct task_struct *p, unsigned int cpu)
-+{
-+ unsigned long flags;
-+
-+ if (WARN_ON(!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)))
-+ return;
-+
-+ /* It's safe because the task is inactive. */
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ __do_set_cpus_allowed(p, cpumask_of(cpu));
-+ p->flags |= PF_NO_SETAFFINITY;
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+#else
-+#define new_kthread_bind(p, cpu) kthread_bind(p, cpu)
-+#endif
-+
- /**
- * kthread_create_on_cpu - Create a cpu bound kthread
- * @threadfn: the function to run until signal_pending(current).
-@@ -431,7 +459,7 @@
- cpu);
- if (IS_ERR(p))
- return p;
-- kthread_bind(p, cpu);
-+ new_kthread_bind(p, cpu);
- /* CPU hotplug need to bind once again when unparking the thread. */
- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
- to_kthread(p)->cpu = cpu;
-diff -Nur a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
---- a/kernel/livepatch/transition.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/livepatch/transition.c 2019-01-05 20:22:51.099998516 +0000
-@@ -277,6 +277,12 @@
- return 0;
- }
-
-+#ifdef CONFIG_SCHED_MUQSS
-+typedef unsigned long rq_flags_t;
-+#else
-+typedef struct rq_flags rq_flag_t;
-+#endif
-+
- /*
- * Try to safely switch a task to the target patch state. If it's currently
- * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
-@@ -285,7 +291,7 @@
- static bool klp_try_switch_task(struct task_struct *task)
- {
- struct rq *rq;
-- struct rq_flags flags;
-+ rq_flags_t flags;
- int ret;
- bool success = false;
- char err_buf[STACK_ERR_BUF_SIZE];
-diff -Nur a/kernel/Makefile b/kernel/Makefile
---- a/kernel/Makefile 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/Makefile 2019-01-05 20:22:51.099998516 +0000
-@@ -10,7 +10,7 @@
- extable.o params.o \
- kthread.o sys_ni.o nsproxy.o \
- notifier.o ksysfs.o cred.o reboot.o \
-- async.o range.o smpboot.o ucount.o
-+ async.o range.o smpboot.o ucount.o skip_list.o
-
- obj-$(CONFIG_MODULES) += kmod.o
- obj-$(CONFIG_MULTIUSER) += groups.o
-diff -Nur a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
---- a/kernel/rcu/Kconfig 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/rcu/Kconfig 2019-01-05 20:22:51.099998516 +0000
-@@ -93,7 +93,7 @@
- config CONTEXT_TRACKING_FORCE
- bool "Force context tracking"
- depends on CONTEXT_TRACKING
-- default y if !NO_HZ_FULL
-+ default y if !NO_HZ_FULL && !SCHED_MUQSS
- help
- The major pre-requirement for full dynticks to work is to
- support the context tracking subsystem. But there are also
-diff -Nur a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
---- a/kernel/sched/cpufreq_schedutil.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/cpufreq_schedutil.c 2019-01-05 20:22:51.099998516 +0000
-@@ -176,6 +176,17 @@
- return cpufreq_driver_resolve_freq(policy, freq);
- }
-
-+#ifdef CONFIG_SCHED_MUQSS
-+static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ *util = rq->load_avg;
-+ if (*util > SCHED_CAPACITY_SCALE)
-+ *util = SCHED_CAPACITY_SCALE;
-+ *max = SCHED_CAPACITY_SCALE;
-+}
-+#else /* CONFIG_SCHED_MUQSS */
- static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
-@@ -186,6 +197,7 @@
- *util = min(rq->cfs.avg.util_avg, cfs_max);
- *max = cfs_max;
- }
-+#endif /* CONFIG_SCHED_MUQSS */
-
- static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
- unsigned int flags)
-diff -Nur a/kernel/sched/cputime.c b/kernel/sched/cputime.c
---- a/kernel/sched/cputime.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/cputime.c 2019-01-05 20:22:51.099998516 +0000
-@@ -270,26 +270,6 @@
- return accounted;
- }
-
--#ifdef CONFIG_64BIT
--static inline u64 read_sum_exec_runtime(struct task_struct *t)
--{
-- return t->se.sum_exec_runtime;
--}
--#else
--static u64 read_sum_exec_runtime(struct task_struct *t)
--{
-- u64 ns;
-- struct rq_flags rf;
-- struct rq *rq;
--
-- rq = task_rq_lock(t, &rf);
-- ns = t->se.sum_exec_runtime;
-- task_rq_unlock(rq, t, &rf);
--
-- return ns;
--}
--#endif
--
- /*
- * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
- * tasks (sum on group iteration) belonging to @tsk's group.
-@@ -661,7 +641,7 @@
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- struct task_cputime cputime = {
-- .sum_exec_runtime = p->se.sum_exec_runtime,
-+ .sum_exec_runtime = tsk_seruntime(p),
- };
-
- task_cputime(p, &cputime.utime, &cputime.stime);
-diff -Nur a/kernel/sched/idle.c b/kernel/sched/idle.c
---- a/kernel/sched/idle.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/idle.c 2019-01-05 20:22:51.099998516 +0000
-@@ -209,6 +209,9 @@
- */
- static void do_idle(void)
- {
-+ int cpu = smp_processor_id();
-+ bool pending = false;
-+
- /*
- * If the arch has a polling bit, we maintain an invariant:
- *
-@@ -220,13 +223,16 @@
-
- __current_set_polling();
- quiet_vmstat();
-- tick_nohz_idle_enter();
-+ if (unlikely(softirq_pending(cpu)))
-+ pending = true;
-+ else
-+ tick_nohz_idle_enter();
-
- while (!need_resched()) {
- check_pgt_cache();
- rmb();
-
-- if (cpu_is_offline(smp_processor_id())) {
-+ if (cpu_is_offline(cpu)) {
- cpuhp_report_idle_dead();
- arch_cpu_idle_dead();
- }
-@@ -255,7 +261,8 @@
- * an IPI to fold the state for us.
- */
- preempt_set_need_resched();
-- tick_nohz_idle_exit();
-+ if (!pending)
-+ tick_nohz_idle_exit();
- __current_clr_polling();
-
- /*
-diff -Nur a/kernel/sched/Makefile b/kernel/sched/Makefile
---- a/kernel/sched/Makefile 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/Makefile 2019-01-05 20:22:51.099998516 +0000
-@@ -16,14 +16,20 @@
- CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
- endif
-
--obj-y += core.o loadavg.o clock.o cputime.o
-+ifdef CONFIG_SCHED_MUQSS
-+obj-y += MuQSS.o clock.o
-+else
-+obj-y += core.o loadavg.o clock.o
- obj-y += idle_task.o fair.o rt.o deadline.o
--obj-y += wait.o wait_bit.o swait.o completion.o idle.o
--obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
-+obj-$(CONFIG_SMP) += cpudeadline.o stop_task.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
--obj-$(CONFIG_SCHEDSTATS) += stats.o
- obj-$(CONFIG_SCHED_DEBUG) += debug.o
- obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
-+endif
-+obj-y += cputime.o
-+obj-y += wait.o wait_bit.o swait.o completion.o idle.o
-+obj-$(CONFIG_SMP) += cpupri.o topology.o
-+obj-$(CONFIG_SCHEDSTATS) += stats.o
- obj-$(CONFIG_CPU_FREQ) += cpufreq.o
- obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
- obj-$(CONFIG_MEMBARRIER) += membarrier.o
-diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
---- a/kernel/sched/MuQSS.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/kernel/sched/MuQSS.c 2019-01-05 20:22:51.099998516 +0000
-@@ -0,0 +1,6923 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * kernel/sched/MuQSS.c, was kernel/sched.c
-+ *
-+ * Kernel scheduler and related syscalls
-+ *
-+ * Copyright (C) 1991-2002 Linus Torvalds
-+ *
-+ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
-+ * make semaphores SMP safe
-+ * 1998-11-19 Implemented schedule_timeout() and related stuff
-+ * by Andrea Arcangeli
-+ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
-+ * hybrid priority-list and round-robin design with
-+ * an array-switch method of distributing timeslices
-+ * and per-CPU runqueues. Cleanups and useful suggestions
-+ * by Davide Libenzi, preemptible kernel bits by Robert Love.
-+ * 2003-09-03 Interactivity tuning by Con Kolivas.
-+ * 2004-04-02 Scheduler domains code by Nick Piggin
-+ * 2007-04-15 Work begun on replacing all interactivity tuning with a
-+ * fair scheduling design by Con Kolivas.
-+ * 2007-05-05 Load balancing (smp-nice) and other improvements
-+ * by Peter Williams
-+ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
-+ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
-+ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
-+ * Thomas Gleixner, Mike Kravetz
-+ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes
-+ * a whole lot of those previous things.
-+ * 2016-10-01 Multiple Queue Skiplist Scheduler scalable evolution of BFS
-+ * scheduler by Con Kolivas.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/sched/clock.h>
-+#include <uapi/linux/sched/types.h>
-+#include <linux/sched/loadavg.h>
-+#include <linux/sched/hotplug.h>
-+#include <linux/wait_bit.h>
-+#include <linux/cpuset.h>
-+#include <linux/delayacct.h>
-+#include <linux/init_task.h>
-+#include <linux/binfmts.h>
-+#include <linux/context_tracking.h>
-+#include <linux/rcupdate_wait.h>
-+#include <linux/skip_list.h>
-+
-+#include <linux/blkdev.h>
-+#include <linux/kprobes.h>
-+#include <linux/mmu_context.h>
-+#include <linux/module.h>
-+#include <linux/nmi.h>
-+#include <linux/prefetch.h>
-+#include <linux/profile.h>
-+#include <linux/security.h>
-+#include <linux/syscalls.h>
-+#include <linux/tick.h>
-+
-+#include <asm/switch_to.h>
-+#include <asm/tlb.h>
-+#ifdef CONFIG_PARAVIRT
-+#include <asm/paravirt.h>
-+#endif
-+
-+#include "../workqueue_internal.h"
-+#include "../smpboot.h"
-+
-+#define CREATE_TRACE_POINTS
-+#include <trace/events/sched.h>
-+
-+#include "MuQSS.h"
-+
-+#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
-+#define rt_task(p) rt_prio((p)->prio)
-+#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
-+#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
-+ (policy) == SCHED_RR)
-+#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
-+
-+#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO)
-+#define idleprio_task(p) unlikely(is_idle_policy((p)->policy))
-+#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO)
-+
-+#define is_iso_policy(policy) ((policy) == SCHED_ISO)
-+#define iso_task(p) unlikely(is_iso_policy((p)->policy))
-+#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO)
-+
-+#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
-+
-+#define ISO_PERIOD (5 * HZ)
-+
-+#define STOP_PRIO (MAX_RT_PRIO - 1)
-+
-+/*
-+ * Some helpers for converting to/from various scales. Use shifts to get
-+ * approximate multiples of ten for less overhead.
-+ */
-+#define JIFFIES_TO_NS(TIME) ((TIME) * (1073741824 / HZ))
-+#define JIFFY_NS (1073741824 / HZ)
-+#define JIFFY_US (1048576 / HZ)
-+#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS)
-+#define HALF_JIFFY_NS (1073741824 / HZ / 2)
-+#define HALF_JIFFY_US (1048576 / HZ / 2)
-+#define MS_TO_NS(TIME) ((TIME) << 20)
-+#define MS_TO_US(TIME) ((TIME) << 10)
-+#define NS_TO_MS(TIME) ((TIME) >> 20)
-+#define NS_TO_US(TIME) ((TIME) >> 10)
-+#define US_TO_NS(TIME) ((TIME) << 10)
-+
-+#define RESCHED_US (100) /* Reschedule if less than this many μs left */
-+
-+void print_scheduler_version(void)
-+{
-+ printk(KERN_INFO "MuQSS CPU scheduler v0.162 by Con Kolivas.\n");
-+}
-+
-+/*
-+ * This is the time all tasks within the same priority round robin.
-+ * Value is in ms and set to a minimum of 6ms.
-+ * Tunable via /proc interface.
-+ */
-+int rr_interval __read_mostly = 6;
-+
-+/*
-+ * Tunable to choose whether to prioritise latency or throughput, simple
-+ * binary yes or no
-+ */
-+int sched_interactive __read_mostly = 1;
-+
-+/*
-+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
-+ * are allowed to run five seconds as real time tasks. This is the total over
-+ * all online cpus.
-+ */
-+int sched_iso_cpu __read_mostly = 70;
-+
-+/*
-+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
-+ * 0: No yield.
-+ * 1: Yield only to better priority/deadline tasks. (default)
-+ * 2: Expire timeslice and recalculate deadline.
-+ */
-+int sched_yield_type __read_mostly = 1;
-+
-+/*
-+ * The relative length of deadline for each priority(nice) level.
-+ */
-+static int prio_ratios[NICE_WIDTH] __read_mostly;
-+
-+/*
-+ * The quota handed out to tasks of all priority levels when refilling their
-+ * time_slice.
-+ */
-+static inline int timeslice(void)
-+{
-+ return MS_TO_US(rr_interval);
-+}
-+
-+#ifdef CONFIG_SMP
-+static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
-+#endif
-+
-+/* CPUs with isolated domains */
-+cpumask_var_t cpu_isolated_map;
-+
-+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#ifdef CONFIG_SMP
-+struct rq *cpu_rq(int cpu)
-+{
-+ return &per_cpu(runqueues, (cpu));
-+}
-+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-+
-+/*
-+ * For asym packing, by default the lower numbered cpu has higher priority.
-+ */
-+int __weak arch_asym_cpu_priority(int cpu)
-+{
-+ return -cpu;
-+}
-+
-+int __weak arch_sd_sibling_asym_packing(void)
-+{
-+ return 0*SD_ASYM_PACKING;
-+}
-+#else
-+struct rq *uprq;
-+#endif /* CONFIG_SMP */
-+
-+#include "stats.h"
-+
-+#ifndef prepare_arch_switch
-+# define prepare_arch_switch(next) do { } while (0)
-+#endif
-+#ifndef finish_arch_switch
-+# define finish_arch_switch(prev) do { } while (0)
-+#endif
-+#ifndef finish_arch_post_lock_switch
-+# define finish_arch_post_lock_switch() do { } while (0)
-+#endif
-+
-+/*
-+ * All common locking functions performed on rq->lock. rq->clock is local to
-+ * the CPU accessing it so it can be modified just with interrupts disabled
-+ * when we're not updating niffies.
-+ * Looking up task_rq must be done under rq->lock to be safe.
-+ */
-+
-+/*
-+ * RQ-clock updating methods:
-+ */
-+
-+static void update_rq_clock_task(struct rq *rq, s64 delta)
-+{
-+/*
-+ * In theory, the compile should just see 0 here, and optimize out the call
-+ * to sched_rt_avg_update. But I don't trust it...
-+ */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-+
-+ /*
-+ * Since irq_time is only updated on {soft,}irq_exit, we might run into
-+ * this case when a previous update_rq_clock() happened inside a
-+ * {soft,}irq region.
-+ *
-+ * When this happens, we stop ->clock_task and only update the
-+ * prev_irq_time stamp to account for the part that fit, so that a next
-+ * update will consume the rest. This ensures ->clock_task is
-+ * monotonic.
-+ *
-+ * It does however cause some slight miss-attribution of {soft,}irq
-+ * time, a more accurate solution would be to update the irq_time using
-+ * the current rq->clock timestamp, except that would require using
-+ * atomic ops.
-+ */
-+ if (irq_delta > delta)
-+ irq_delta = delta;
-+
-+ rq->prev_irq_time += irq_delta;
-+ delta -= irq_delta;
-+#endif
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+ if (static_key_false((&paravirt_steal_rq_enabled))) {
-+ s64 steal = paravirt_steal_clock(cpu_of(rq));
-+
-+ steal -= rq->prev_steal_time_rq;
-+
-+ if (unlikely(steal > delta))
-+ steal = delta;
-+
-+ rq->prev_steal_time_rq += steal;
-+
-+ delta -= steal;
-+ }
-+#endif
-+ rq->clock_task += delta;
-+}
-+
-+static inline void update_rq_clock(struct rq *rq)
-+{
-+ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-+
-+ if (unlikely(delta < 0))
-+ return;
-+ rq->clock += delta;
-+ update_rq_clock_task(rq, delta);
-+}
-+
-+/*
-+ * Niffies are a globally increasing nanosecond counter. They're only used by
-+ * update_load_avg and time_slice_expired, however deadlines are based on them
-+ * across CPUs. Update them whenever we will call one of those functions, and
-+ * synchronise them across CPUs whenever we hold both runqueue locks.
-+ */
-+static inline void update_clocks(struct rq *rq)
-+{
-+ s64 ndiff, minndiff;
-+ long jdiff;
-+
-+ update_rq_clock(rq);
-+ ndiff = rq->clock - rq->old_clock;
-+ rq->old_clock = rq->clock;
-+ jdiff = jiffies - rq->last_jiffy;
-+
-+ /* Subtract any niffies added by balancing with other rqs */
-+ ndiff -= rq->niffies - rq->last_niffy;
-+ minndiff = JIFFIES_TO_NS(jdiff) - rq->niffies + rq->last_jiffy_niffies;
-+ if (minndiff < 0)
-+ minndiff = 0;
-+ ndiff = max(ndiff, minndiff);
-+ rq->niffies += ndiff;
-+ rq->last_niffy = rq->niffies;
-+ if (jdiff) {
-+ rq->last_jiffy += jdiff;
-+ rq->last_jiffy_niffies = rq->niffies;
-+ }
-+}
-+
-+static inline int task_on_rq_queued(struct task_struct *p)
-+{
-+ return p->on_rq == TASK_ON_RQ_QUEUED;
-+}
-+
-+static inline int task_on_rq_migrating(struct task_struct *p)
-+{
-+ return p->on_rq == TASK_ON_RQ_MIGRATING;
-+}
-+
-+static inline int rq_trylock(struct rq *rq)
-+ __acquires(rq->lock)
-+{
-+ return raw_spin_trylock(&rq->lock);
-+}
-+
-+/*
-+ * Any time we have two runqueues locked we use that as an opportunity to
-+ * synchronise niffies to the highest value as idle ticks may have artificially
-+ * kept niffies low on one CPU and the truth can only be later.
-+ */
-+static inline void synchronise_niffies(struct rq *rq1, struct rq *rq2)
-+{
-+ if (rq1->niffies > rq2->niffies)
-+ rq2->niffies = rq1->niffies;
-+ else
-+ rq1->niffies = rq2->niffies;
-+}
-+
-+/*
-+ * double_rq_lock - safely lock two runqueues
-+ *
-+ * Note this does not disable interrupts like task_rq_lock,
-+ * you need to do so manually before calling.
-+ */
-+
-+/* For when we know rq1 != rq2 */
-+static inline void __double_rq_lock(struct rq *rq1, struct rq *rq2)
-+ __acquires(rq1->lock)
-+ __acquires(rq2->lock)
-+{
-+ if (rq1 < rq2) {
-+ raw_spin_lock(&rq1->lock);
-+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
-+ } else {
-+ raw_spin_lock(&rq2->lock);
-+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
-+ }
-+}
-+
-+static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
-+ __acquires(rq1->lock)
-+ __acquires(rq2->lock)
-+{
-+ BUG_ON(!irqs_disabled());
-+ if (rq1 == rq2) {
-+ raw_spin_lock(&rq1->lock);
-+ __acquire(rq2->lock); /* Fake it out ;) */
-+ } else
-+ __double_rq_lock(rq1, rq2);
-+ synchronise_niffies(rq1, rq2);
-+}
-+
-+/*
-+ * double_rq_unlock - safely unlock two runqueues
-+ *
-+ * Note this does not restore interrupts like task_rq_unlock,
-+ * you need to do so manually after calling.
-+ */
-+static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
-+ __releases(rq1->lock)
-+ __releases(rq2->lock)
-+{
-+ raw_spin_unlock(&rq1->lock);
-+ if (rq1 != rq2)
-+ raw_spin_unlock(&rq2->lock);
-+ else
-+ __release(rq2->lock);
-+}
-+
-+static inline void lock_all_rqs(void)
-+{
-+ int cpu;
-+
-+ preempt_disable();
-+ for_each_possible_cpu(cpu) {
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ do_raw_spin_lock(&rq->lock);
-+ }
-+}
-+
-+static inline void unlock_all_rqs(void)
-+{
-+ int cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ do_raw_spin_unlock(&rq->lock);
-+ }
-+ preempt_enable();
-+}
-+
-+/* Specially nest trylock an rq */
-+static inline bool trylock_rq(struct rq *this_rq, struct rq *rq)
-+{
-+ if (unlikely(!do_raw_spin_trylock(&rq->lock)))
-+ return false;
-+ spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_);
-+ synchronise_niffies(this_rq, rq);
-+ return true;
-+}
-+
-+/* Unlock a specially nested trylocked rq */
-+static inline void unlock_rq(struct rq *rq)
-+{
-+ spin_release(&rq->lock.dep_map, 1, _RET_IP_);
-+ do_raw_spin_unlock(&rq->lock);
-+}
-+
-+/*
-+ * cmpxchg based fetch_or, macro so it works for different integer types
-+ */
-+#define fetch_or(ptr, mask) \
-+ ({ \
-+ typeof(ptr) _ptr = (ptr); \
-+ typeof(mask) _mask = (mask); \
-+ typeof(*_ptr) _old, _val = *_ptr; \
-+ \
-+ for (;;) { \
-+ _old = cmpxchg(_ptr, _val, _val | _mask); \
-+ if (_old == _val) \
-+ break; \
-+ _val = _old; \
-+ } \
-+ _old; \
-+})
-+
-+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
-+/*
-+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
-+ * this avoids any races wrt polling state changes and thereby avoids
-+ * spurious IPIs.
-+ */
-+static bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+ struct thread_info *ti = task_thread_info(p);
-+ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
-+}
-+
-+/*
-+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
-+ *
-+ * If this returns true, then the idle task promises to call
-+ * sched_ttwu_pending() and reschedule soon.
-+ */
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+ struct thread_info *ti = task_thread_info(p);
-+ typeof(ti->flags) old, val = READ_ONCE(ti->flags);
-+
-+ for (;;) {
-+ if (!(val & _TIF_POLLING_NRFLAG))
-+ return false;
-+ if (val & _TIF_NEED_RESCHED)
-+ return true;
-+ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
-+ if (old == val)
-+ break;
-+ val = old;
-+ }
-+ return true;
-+}
-+
-+#else
-+static bool set_nr_and_not_polling(struct task_struct *p)
-+{
-+ set_tsk_need_resched(p);
-+ return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+static bool set_nr_if_polling(struct task_struct *p)
-+{
-+ return false;
-+}
-+#endif
-+#endif
-+
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+ struct wake_q_node *node = &task->wake_q;
-+
-+ /*
-+ * Atomically grab the task, if ->wake_q is !nil already it means
-+ * its already queued (either by us or someone else) and will get the
-+ * wakeup due to that.
-+ *
-+ * This cmpxchg() implies a full barrier, which pairs with the write
-+ * barrier implied by the wakeup in wake_up_q().
-+ */
-+ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
-+ return;
-+
-+ get_task_struct(task);
-+
-+ /*
-+ * The head is context local, there can be no concurrency.
-+ */
-+ *head->lastp = node;
-+ head->lastp = &node->next;
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+ struct wake_q_node *node = head->first;
-+
-+ while (node != WAKE_Q_TAIL) {
-+ struct task_struct *task;
-+
-+ task = container_of(node, struct task_struct, wake_q);
-+ BUG_ON(!task);
-+ /* Task can safely be re-inserted now */
-+ node = node->next;
-+ task->wake_q.next = NULL;
-+
-+ /*
-+ * wake_up_process() implies a wmb() to pair with the queueing
-+ * in wake_q_add() so as not to miss wakeups.
-+ */
-+ wake_up_process(task);
-+ put_task_struct(task);
-+ }
-+}
-+
-+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-+{
-+ next->on_cpu = 1;
-+}
-+
-+static inline void smp_sched_reschedule(int cpu)
-+{
-+ if (likely(cpu_online(cpu)))
-+ smp_send_reschedule(cpu);
-+}
-+
-+/*
-+ * resched_task - mark a task 'to be rescheduled now'.
-+ *
-+ * On UP this means the setting of the need_resched flag, on SMP it
-+ * might also involve a cross-CPU call to trigger the scheduler on
-+ * the target CPU.
-+ */
-+void resched_task(struct task_struct *p)
-+{
-+ int cpu;
-+#ifdef CONFIG_LOCKDEP
-+ /* Kernel threads call this when creating workqueues while still
-+ * inactive from __kthread_bind_mask, holding only the pi_lock */
-+ if (!(p->flags & PF_KTHREAD)) {
-+ struct rq *rq = task_rq(p);
-+
-+ lockdep_assert_held(&rq->lock);
-+ }
-+#endif
-+ if (test_tsk_need_resched(p))
-+ return;
-+
-+ cpu = task_cpu(p);
-+ if (cpu == smp_processor_id()) {
-+ set_tsk_need_resched(p);
-+ set_preempt_need_resched();
-+ return;
-+ }
-+
-+ if (set_nr_and_not_polling(p))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+/*
-+ * A task that is not running or queued will not have a node set.
-+ * A task that is queued but not running will have a node set.
-+ * A task that is currently running will have ->on_cpu set but no node set.
-+ */
-+static inline bool task_queued(struct task_struct *p)
-+{
-+ return !skiplist_node_empty(&p->node);
-+}
-+
-+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
-+static inline void resched_if_idle(struct rq *rq);
-+
-+/* Dodgy workaround till we figure out where the softirqs are going */
-+static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
-+{
-+ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
-+ do_softirq_own_stack();
-+}
-+
-+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-+{
-+#ifdef CONFIG_SMP
-+ /*
-+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
-+ * We must ensure this doesn't happen until the switch is completely
-+ * finished.
-+ *
-+ * In particular, the load of prev->state in finish_task_switch() must
-+ * happen before this.
-+ *
-+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
-+ */
-+ smp_store_release(&prev->on_cpu, 0);
-+#endif
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+ /* this is a valid case when another task releases the spinlock */
-+ rq->lock.owner = current;
-+#endif
-+ /*
-+ * If we are tracking spinlock dependencies then we have to
-+ * fix up the runqueue lock - which gets 'carried over' from
-+ * prev into current:
-+ */
-+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * If prev was marked as migrating to another CPU in return_task, drop
-+ * the local runqueue lock but leave interrupts disabled and grab the
-+ * remote lock we're migrating it to before enabling them.
-+ */
-+ if (unlikely(task_on_rq_migrating(prev))) {
-+ sched_info_dequeued(rq, prev);
-+ /*
-+ * We move the ownership of prev to the new cpu now. ttwu can't
-+ * activate prev to the wrong cpu since it has to grab this
-+ * runqueue in ttwu_remote.
-+ */
-+#ifdef CONFIG_THREAD_INFO_IN_TASK
-+ prev->cpu = prev->wake_cpu;
-+#else
-+ task_thread_info(prev)->cpu = prev->wake_cpu;
-+#endif
-+ raw_spin_unlock(&rq->lock);
-+
-+ raw_spin_lock(&prev->pi_lock);
-+ rq = __task_rq_lock(prev);
-+ /* Check that someone else hasn't already queued prev */
-+ if (likely(!task_queued(prev))) {
-+ enqueue_task(rq, prev, 0);
-+ prev->on_rq = TASK_ON_RQ_QUEUED;
-+ /* Wake up the CPU if it's not already running */
-+ resched_if_idle(rq);
-+ }
-+ raw_spin_unlock(&prev->pi_lock);
-+ }
-+#endif
-+ /* Accurately set nr_running here for load average calculations */
-+ rq->nr_running = rq->sl->entries + !rq_idle(rq);
-+ rq_unlock(rq);
-+
-+ do_pending_softirq(rq, current);
-+
-+ local_irq_enable();
-+}
-+
-+static inline bool deadline_before(u64 deadline, u64 time)
-+{
-+ return (deadline < time);
-+}
-+
-+/*
-+ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
-+ * is the key to everything. It distributes cpu fairly amongst tasks of the
-+ * same nice value, it proportions cpu according to nice level, it means the
-+ * task that last woke up the longest ago has the earliest deadline, thus
-+ * ensuring that interactive tasks get low latency on wake up. The CPU
-+ * proportion works out to the square of the virtual deadline difference, so
-+ * this equation will give nice 19 3% CPU compared to nice 0.
-+ */
-+static inline u64 prio_deadline_diff(int user_prio)
-+{
-+ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128));
-+}
-+
-+static inline u64 task_deadline_diff(struct task_struct *p)
-+{
-+ return prio_deadline_diff(TASK_USER_PRIO(p));
-+}
-+
-+static inline u64 static_deadline_diff(int static_prio)
-+{
-+ return prio_deadline_diff(USER_PRIO(static_prio));
-+}
-+
-+static inline int longest_deadline_diff(void)
-+{
-+ return prio_deadline_diff(39);
-+}
-+
-+static inline int ms_longest_deadline_diff(void)
-+{
-+ return NS_TO_MS(longest_deadline_diff());
-+}
-+
-+static inline bool rq_local(struct rq *rq);
-+
-+#ifndef SCHED_CAPACITY_SCALE
-+#define SCHED_CAPACITY_SCALE 1024
-+#endif
-+
-+static inline int rq_load(struct rq *rq)
-+{
-+ return rq->nr_running;
-+}
-+
-+/*
-+ * Update the load average for feeding into cpu frequency governors. Use a
-+ * rough estimate of a rolling average with ~ time constant of 32ms.
-+ * 80/128 ~ 0.63. * 80 / 32768 / 128 == * 5 / 262144
-+ * Make sure a call to update_clocks has been made before calling this to get
-+ * an updated rq->niffies.
-+ */
-+static void update_load_avg(struct rq *rq, unsigned int flags)
-+{
-+ unsigned long us_interval, curload;
-+ long load;
-+
-+ if (unlikely(rq->niffies <= rq->load_update))
-+ return;
-+
-+ us_interval = NS_TO_US(rq->niffies - rq->load_update);
-+ curload = rq_load(rq);
-+ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
-+ if (unlikely(load < 0))
-+ load = 0;
-+ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
-+ rq->load_avg = load;
-+
-+ rq->load_update = rq->niffies;
-+ if (likely(rq_local(rq)))
-+ cpufreq_trigger(rq, flags);
-+}
-+
-+/*
-+ * Removing from the runqueue. Enter with rq locked. Deleting a task
-+ * from the skip list is done via the stored node reference in the task struct
-+ * and does not require a full look up. Thus it occurs in O(k) time where k
-+ * is the "level" of the list the task was stored at - usually < 4, max 8.
-+ */
-+static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
-+{
-+ skiplist_delete(rq->sl, &p->node);
-+ rq->best_key = rq->node.next[0]->key;
-+ update_clocks(rq);
-+
-+ if (!(flags & DEQUEUE_SAVE))
-+ sched_info_dequeued(task_rq(p), p);
-+ update_load_avg(rq, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_RCU
-+static bool rcu_read_critical(struct task_struct *p)
-+{
-+ return p->rcu_read_unlock_special.b.blocked;
-+}
-+#else /* CONFIG_PREEMPT_RCU */
-+#define rcu_read_critical(p) (false)
-+#endif /* CONFIG_PREEMPT_RCU */
-+
-+/*
-+ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
-+ * an idle task, we ensure none of the following conditions are met.
-+ */
-+static bool idleprio_suitable(struct task_struct *p)
-+{
-+ return (!(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)) &&
-+ !signal_pending(p) && !rcu_read_critical(p) && !freezing(p));
-+}
-+
-+/*
-+ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
-+ * that the iso_refractory flag is not set.
-+ */
-+static inline bool isoprio_suitable(struct rq *rq)
-+{
-+ return !rq->iso_refractory;
-+}
-+
-+/*
-+ * Adding to the runqueue. Enter with rq locked.
-+ */
-+static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
-+{
-+ unsigned int randseed, cflags = 0;
-+ u64 sl_id;
-+
-+ if (!rt_task(p)) {
-+ /* Check it hasn't gotten rt from PI */
-+ if ((idleprio_task(p) && idleprio_suitable(p)) ||
-+ (iso_task(p) && isoprio_suitable(rq)))
-+ p->prio = p->normal_prio;
-+ else
-+ p->prio = NORMAL_PRIO;
-+ }
-+ /*
-+ * The sl_id key passed to the skiplist generates a sorted list.
-+ * Realtime and sched iso tasks run FIFO so they only need be sorted
-+ * according to priority. The skiplist will put tasks of the same
-+ * key inserted later in FIFO order. Tasks of sched normal, batch
-+ * and idleprio are sorted according to their deadlines. Idleprio
-+ * tasks are offset by an impossibly large deadline value ensuring
-+ * they get sorted into last positions, but still according to their
-+ * own deadlines. This creates a "landscape" of skiplists running
-+ * from priority 0 realtime in first place to the lowest priority
-+ * idleprio tasks last. Skiplist insertion is an O(log n) process.
-+ */
-+ if (p->prio <= ISO_PRIO) {
-+ sl_id = p->prio;
-+ cflags = SCHED_CPUFREQ_RT;
-+ } else {
-+ sl_id = p->deadline;
-+ if (idleprio_task(p)) {
-+ if (p->prio == IDLE_PRIO)
-+ sl_id |= 0xF000000000000000;
-+ else
-+ sl_id += longest_deadline_diff();
-+ }
-+ }
-+ /*
-+ * Some architectures don't have better than microsecond resolution
-+ * so mask out ~microseconds as the random seed for skiplist insertion.
-+ */
-+ update_clocks(rq);
-+ if (!(flags & ENQUEUE_RESTORE))
-+ sched_info_queued(rq, p);
-+ randseed = (rq->niffies >> 10) & 0xFFFFFFFF;
-+ skiplist_insert(rq->sl, &p->node, sl_id, p, randseed);
-+ rq->best_key = rq->node.next[0]->key;
-+ if (p->in_iowait)
-+ cflags |= SCHED_CPUFREQ_IOWAIT;
-+ update_load_avg(rq, cflags);
-+}
-+
-+/*
-+ * Returns the relative length of deadline all compared to the shortest
-+ * deadline which is that of nice -20.
-+ */
-+static inline int task_prio_ratio(struct task_struct *p)
-+{
-+ return prio_ratios[TASK_USER_PRIO(p)];
-+}
-+
-+/*
-+ * task_timeslice - all tasks of all priorities get the exact same timeslice
-+ * length. CPU distribution is handled by giving different deadlines to
-+ * tasks of different priorities. Use 128 as the base value for fast shifts.
-+ */
-+static inline int task_timeslice(struct task_struct *p)
-+{
-+ return (rr_interval * task_prio_ratio(p) / 128);
-+}
-+
-+#ifdef CONFIG_SMP
-+/* Entered with rq locked */
-+static inline void resched_if_idle(struct rq *rq)
-+{
-+ if (rq_idle(rq))
-+ resched_task(rq->curr);
-+}
-+
-+static inline bool rq_local(struct rq *rq)
-+{
-+ return (rq->cpu == smp_processor_id());
-+}
-+#ifdef CONFIG_SMT_NICE
-+static const cpumask_t *thread_cpumask(int cpu);
-+
-+/* Find the best real time priority running on any SMT siblings of cpu and if
-+ * none are running, the static priority of the best deadline task running.
-+ * The lookups to the other runqueues is done lockless as the occasional wrong
-+ * value would be harmless. */
-+static int best_smt_bias(struct rq *this_rq)
-+{
-+ int other_cpu, best_bias = 0;
-+
-+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
-+ struct rq *rq = cpu_rq(other_cpu);
-+
-+ if (rq_idle(rq))
-+ continue;
-+ if (unlikely(!rq->online))
-+ continue;
-+ if (!rq->rq_mm)
-+ continue;
-+ if (likely(rq->rq_smt_bias > best_bias))
-+ best_bias = rq->rq_smt_bias;
-+ }
-+ return best_bias;
-+}
-+
-+static int task_prio_bias(struct task_struct *p)
-+{
-+ if (rt_task(p))
-+ return 1 << 30;
-+ else if (task_running_iso(p))
-+ return 1 << 29;
-+ else if (task_running_idle(p))
-+ return 0;
-+ return MAX_PRIO - p->static_prio;
-+}
-+
-+static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq)
-+{
-+ return true;
-+}
-+
-+static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule;
-+
-+/* We've already decided p can run on CPU, now test if it shouldn't for SMT
-+ * nice reasons. */
-+static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq)
-+{
-+ int best_bias, task_bias;
-+
-+ /* Kernel threads always run */
-+ if (unlikely(!p->mm))
-+ return true;
-+ if (rt_task(p))
-+ return true;
-+ if (!idleprio_suitable(p))
-+ return true;
-+ best_bias = best_smt_bias(this_rq);
-+ /* The smt siblings are all idle or running IDLEPRIO */
-+ if (best_bias < 1)
-+ return true;
-+ task_bias = task_prio_bias(p);
-+ if (task_bias < 1)
-+ return false;
-+ if (task_bias >= best_bias)
-+ return true;
-+ /* Dither 25% cpu of normal tasks regardless of nice difference */
-+ if (best_bias % 4 == 1)
-+ return true;
-+ /* Sorry, you lose */
-+ return false;
-+}
-+#else /* CONFIG_SMT_NICE */
-+#define smt_schedule(p, this_rq) (true)
-+#endif /* CONFIG_SMT_NICE */
-+
-+static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask)
-+{
-+ set_bit(cpu, (volatile unsigned long *)cpumask);
-+}
-+
-+/*
-+ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
-+ * allow easy lookup of whether any suitable idle CPUs are available.
-+ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
-+ * idle_cpus variable than to do a full bitmask check when we are busy. The
-+ * bits are set atomically but read locklessly as occasional false positive /
-+ * negative is harmless.
-+ */
-+static inline void set_cpuidle_map(int cpu)
-+{
-+ if (likely(cpu_online(cpu)))
-+ atomic_set_cpu(cpu, &cpu_idle_map);
-+}
-+
-+static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
-+{
-+ clear_bit(cpu, (volatile unsigned long *)cpumask);
-+}
-+
-+static inline void clear_cpuidle_map(int cpu)
-+{
-+ atomic_clear_cpu(cpu, &cpu_idle_map);
-+}
-+
-+static bool suitable_idle_cpus(struct task_struct *p)
-+{
-+ return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map));
-+}
-+
-+/*
-+ * Resched current on rq. We don't know if rq is local to this CPU nor if it
-+ * is locked so we do not use an intermediate variable for the task to avoid
-+ * having it dereferenced.
-+ */
-+static void resched_curr(struct rq *rq)
-+{
-+ int cpu;
-+
-+ if (test_tsk_need_resched(rq->curr))
-+ return;
-+
-+ rq->preempt = rq->curr;
-+ cpu = rq->cpu;
-+
-+ /* We're doing this without holding the rq lock if it's not task_rq */
-+
-+ if (cpu == smp_processor_id()) {
-+ set_tsk_need_resched(rq->curr);
-+ set_preempt_need_resched();
-+ return;
-+ }
-+
-+ if (set_nr_and_not_polling(rq->curr))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+#define CPUIDLE_DIFF_THREAD (1)
-+#define CPUIDLE_DIFF_CORE (2)
-+#define CPUIDLE_CACHE_BUSY (4)
-+#define CPUIDLE_DIFF_CPU (8)
-+#define CPUIDLE_THREAD_BUSY (16)
-+#define CPUIDLE_DIFF_NODE (32)
-+
-+/*
-+ * The best idle CPU is chosen according to the CPUIDLE ranking above where the
-+ * lowest value would give the most suitable CPU to schedule p onto next. The
-+ * order works out to be the following:
-+ *
-+ * Same thread, idle or busy cache, idle or busy threads
-+ * Other core, same cache, idle or busy cache, idle threads.
-+ * Same node, other CPU, idle cache, idle threads.
-+ * Same node, other CPU, busy cache, idle threads.
-+ * Other core, same cache, busy threads.
-+ * Same node, other CPU, busy threads.
-+ * Other node, other CPU, idle cache, idle threads.
-+ * Other node, other CPU, busy cache, idle threads.
-+ * Other node, other CPU, busy threads.
-+ */
-+static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
-+{
-+ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY |
-+ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE |
-+ CPUIDLE_DIFF_THREAD;
-+ int cpu_tmp;
-+
-+ if (cpumask_test_cpu(best_cpu, tmpmask))
-+ goto out;
-+
-+ for_each_cpu(cpu_tmp, tmpmask) {
-+ int ranking, locality;
-+ struct rq *tmp_rq;
-+
-+ ranking = 0;
-+ tmp_rq = cpu_rq(cpu_tmp);
-+
-+ locality = rq->cpu_locality[cpu_tmp];
-+#ifdef CONFIG_NUMA
-+ if (locality > 3)
-+ ranking |= CPUIDLE_DIFF_NODE;
-+ else
-+#endif
-+ if (locality > 2)
-+ ranking |= CPUIDLE_DIFF_CPU;
-+#ifdef CONFIG_SCHED_MC
-+ else if (locality == 2)
-+ ranking |= CPUIDLE_DIFF_CORE;
-+ else if (!(tmp_rq->cache_idle(tmp_rq)))
-+ ranking |= CPUIDLE_CACHE_BUSY;
-+#endif
-+#ifdef CONFIG_SCHED_SMT
-+ if (locality == 1)
-+ ranking |= CPUIDLE_DIFF_THREAD;
-+ if (!(tmp_rq->siblings_idle(tmp_rq)))
-+ ranking |= CPUIDLE_THREAD_BUSY;
-+#endif
-+ if (ranking < best_ranking) {
-+ best_cpu = cpu_tmp;
-+ best_ranking = ranking;
-+ }
-+ }
-+out:
-+ return best_cpu;
-+}
-+
-+bool cpus_share_cache(int this_cpu, int that_cpu)
-+{
-+ struct rq *this_rq = cpu_rq(this_cpu);
-+
-+ return (this_rq->cpu_locality[that_cpu] < 3);
-+}
-+
-+/* As per resched_curr but only will resched idle task */
-+static inline void resched_idle(struct rq *rq)
-+{
-+ if (test_tsk_need_resched(rq->idle))
-+ return;
-+
-+ rq->preempt = rq->idle;
-+
-+ set_tsk_need_resched(rq->idle);
-+
-+ if (rq_local(rq)) {
-+ set_preempt_need_resched();
-+ return;
-+ }
-+
-+ smp_sched_reschedule(rq->cpu);
-+}
-+
-+static struct rq *resched_best_idle(struct task_struct *p, int cpu)
-+{
-+ cpumask_t tmpmask;
-+ struct rq *rq;
-+ int best_cpu;
-+
-+ cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map);
-+ best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask);
-+ rq = cpu_rq(best_cpu);
-+ if (!smt_schedule(p, rq))
-+ return NULL;
-+ rq->preempt = p;
-+ resched_idle(rq);
-+ return rq;
-+}
-+
-+static inline void resched_suitable_idle(struct task_struct *p)
-+{
-+ if (suitable_idle_cpus(p))
-+ resched_best_idle(p, task_cpu(p));
-+}
-+
-+static inline struct rq *rq_order(struct rq *rq, int cpu)
-+{
-+ return rq->rq_order[cpu];
-+}
-+#else /* CONFIG_SMP */
-+static inline void set_cpuidle_map(int cpu)
-+{
-+}
-+
-+static inline void clear_cpuidle_map(int cpu)
-+{
-+}
-+
-+static inline bool suitable_idle_cpus(struct task_struct *p)
-+{
-+ return uprq->curr == uprq->idle;
-+}
-+
-+static inline void resched_suitable_idle(struct task_struct *p)
-+{
-+}
-+
-+static inline void resched_curr(struct rq *rq)
-+{
-+ resched_task(rq->curr);
-+}
-+
-+static inline void resched_if_idle(struct rq *rq)
-+{
-+}
-+
-+static inline bool rq_local(struct rq *rq)
-+{
-+ return true;
-+}
-+
-+static inline struct rq *rq_order(struct rq *rq, int cpu)
-+{
-+ return rq;
-+}
-+
-+static inline bool smt_schedule(struct task_struct *p, struct rq *rq)
-+{
-+ return true;
-+}
-+#endif /* CONFIG_SMP */
-+
-+static inline int normal_prio(struct task_struct *p)
-+{
-+ if (has_rt_policy(p))
-+ return MAX_RT_PRIO - 1 - p->rt_priority;
-+ if (idleprio_task(p))
-+ return IDLE_PRIO;
-+ if (iso_task(p))
-+ return ISO_PRIO;
-+ return NORMAL_PRIO;
-+}
-+
-+/*
-+ * Calculate the current priority, i.e. the priority
-+ * taken into account by the scheduler. This value might
-+ * be boosted by RT tasks as it will be RT if the task got
-+ * RT-boosted. If not then it returns p->normal_prio.
-+ */
-+static int effective_prio(struct task_struct *p)
-+{
-+ p->normal_prio = normal_prio(p);
-+ /*
-+ * If we are RT tasks or we were boosted to RT priority,
-+ * keep the priority unchanged. Otherwise, update priority
-+ * to the normal priority:
-+ */
-+ if (!rt_prio(p->prio))
-+ return p->normal_prio;
-+ return p->prio;
-+}
-+
-+/*
-+ * activate_task - move a task to the runqueue. Enter with rq locked.
-+ */
-+static void activate_task(struct task_struct *p, struct rq *rq)
-+{
-+ resched_if_idle(rq);
-+
-+ /*
-+ * Sleep time is in units of nanosecs, so shift by 20 to get a
-+ * milliseconds-range estimation of the amount of time that the task
-+ * spent sleeping:
-+ */
-+ if (unlikely(prof_on == SLEEP_PROFILING)) {
-+ if (p->state == TASK_UNINTERRUPTIBLE)
-+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
-+ (rq->niffies - p->last_ran) >> 20);
-+ }
-+
-+ p->prio = effective_prio(p);
-+ if (task_contributes_to_load(p))
-+ rq->nr_uninterruptible--;
-+
-+ enqueue_task(rq, p, 0);
-+ p->on_rq = TASK_ON_RQ_QUEUED;
-+}
-+
-+/*
-+ * deactivate_task - If it's running, it's not on the runqueue and we can just
-+ * decrement the nr_running. Enter with rq locked.
-+ */
-+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
-+{
-+ if (task_contributes_to_load(p))
-+ rq->nr_uninterruptible++;
-+
-+ p->on_rq = 0;
-+ sched_info_dequeued(rq, p);
-+}
-+
-+#ifdef CONFIG_SMP
-+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-+{
-+ struct rq *rq;
-+
-+ if (task_cpu(p) == new_cpu)
-+ return;
-+
-+ /* Do NOT call set_task_cpu on a currently queued task as we will not
-+ * be reliably holding the rq lock after changing CPU. */
-+ BUG_ON(task_queued(p));
-+ rq = task_rq(p);
-+
-+#ifdef CONFIG_LOCKDEP
-+ /*
-+ * The caller should hold either p->pi_lock or rq->lock, when changing
-+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-+ *
-+ * Furthermore, all task_rq users should acquire both locks, see
-+ * task_rq_lock().
-+ */
-+ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-+ lockdep_is_held(&rq->lock)));
-+#endif
-+
-+ trace_sched_migrate_task(p, new_cpu);
-+ perf_event_task_migrate(p);
-+
-+ /*
-+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-+ * successfully executed on another CPU. We must ensure that updates of
-+ * per-task data have been completed by this moment.
-+ */
-+ smp_wmb();
-+
-+ p->wake_cpu = new_cpu;
-+
-+ if (task_running(rq, p)) {
-+ /*
-+ * We should only be calling this on a running task if we're
-+ * holding rq lock.
-+ */
-+ lockdep_assert_held(&rq->lock);
-+
-+ /*
-+ * We can't change the task_thread_info CPU on a running task
-+ * as p will still be protected by the rq lock of the CPU it
-+ * is still running on so we only set the wake_cpu for it to be
-+ * lazily updated once off the CPU.
-+ */
-+ return;
-+ }
-+
-+#ifdef CONFIG_THREAD_INFO_IN_TASK
-+ p->cpu = new_cpu;
-+#else
-+ task_thread_info(p)->cpu = new_cpu;
-+#endif
-+ /* We're no longer protecting p after this point since we're holding
-+ * the wrong runqueue lock. */
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * Move a task off the runqueue and take it to a cpu for it will
-+ * become the running task.
-+ */
-+static inline void take_task(struct rq *rq, int cpu, struct task_struct *p)
-+{
-+ struct rq *p_rq = task_rq(p);
-+
-+ dequeue_task(p_rq, p, DEQUEUE_SAVE);
-+ if (p_rq != rq) {
-+ sched_info_dequeued(p_rq, p);
-+ sched_info_queued(rq, p);
-+ }
-+ set_task_cpu(p, cpu);
-+}
-+
-+/*
-+ * Returns a descheduling task to the runqueue unless it is being
-+ * deactivated.
-+ */
-+static inline void return_task(struct task_struct *p, struct rq *rq,
-+ int cpu, bool deactivate)
-+{
-+ if (deactivate)
-+ deactivate_task(p, rq);
-+ else {
-+#ifdef CONFIG_SMP
-+ /*
-+ * set_task_cpu was called on the running task that doesn't
-+ * want to deactivate so it has to be enqueued to a different
-+ * CPU and we need its lock. Tag it to be moved with as the
-+ * lock is dropped in finish_lock_switch.
-+ */
-+ if (unlikely(p->wake_cpu != cpu))
-+ p->on_rq = TASK_ON_RQ_MIGRATING;
-+ else
-+#endif
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ }
-+}
-+
-+/* Enter with rq lock held. We know p is on the local cpu */
-+static inline void __set_tsk_resched(struct task_struct *p)
-+{
-+ set_tsk_need_resched(p);
-+ set_preempt_need_resched();
-+}
-+
-+/**
-+ * task_curr - is this task currently executing on a CPU?
-+ * @p: the task in question.
-+ *
-+ * Return: 1 if the task is currently executing. 0 otherwise.
-+ */
-+inline int task_curr(const struct task_struct *p)
-+{
-+ return cpu_curr(task_cpu(p)) == p;
-+}
-+
-+#ifdef CONFIG_SMP
-+/*
-+ * wait_task_inactive - wait for a thread to unschedule.
-+ *
-+ * If @match_state is nonzero, it's the @p->state value just checked and
-+ * not expected to change. If it changes, i.e. @p might have woken up,
-+ * then return zero. When we succeed in waiting for @p to be off its CPU,
-+ * we return a positive number (its total switch count). If a second call
-+ * a short while later returns the same number, the caller can be sure that
-+ * @p has remained unscheduled the whole time.
-+ *
-+ * The caller must ensure that the task *will* unschedule sometime soon,
-+ * else this function might spin for a *long* time. This function can't
-+ * be called with interrupts off, or it may introduce deadlock with
-+ * smp_call_function() if an IPI is sent by the same process we are
-+ * waiting to become inactive.
-+ */
-+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
-+{
-+ int running, queued;
-+ unsigned long flags;
-+ unsigned long ncsw;
-+ struct rq *rq;
-+
-+ for (;;) {
-+ rq = task_rq(p);
-+
-+ /*
-+ * If the task is actively running on another CPU
-+ * still, just relax and busy-wait without holding
-+ * any locks.
-+ *
-+ * NOTE! Since we don't hold any locks, it's not
-+ * even sure that "rq" stays as the right runqueue!
-+ * But we don't care, since this will return false
-+ * if the runqueue has changed and p is actually now
-+ * running somewhere else!
-+ */
-+ while (task_running(rq, p)) {
-+ if (match_state && unlikely(p->state != match_state))
-+ return 0;
-+ cpu_relax();
-+ }
-+
-+ /*
-+ * Ok, time to look more closely! We need the rq
-+ * lock now, to be *sure*. If we're wrong, we'll
-+ * just go back and repeat.
-+ */
-+ rq = task_rq_lock(p, &flags);
-+ trace_sched_wait_task(p);
-+ running = task_running(rq, p);
-+ queued = task_on_rq_queued(p);
-+ ncsw = 0;
-+ if (!match_state || p->state == match_state)
-+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-+ task_rq_unlock(rq, p, &flags);
-+
-+ /*
-+ * If it changed from the expected state, bail out now.
-+ */
-+ if (unlikely(!ncsw))
-+ break;
-+
-+ /*
-+ * Was it really running after all now that we
-+ * checked with the proper locks actually held?
-+ *
-+ * Oops. Go back and try again..
-+ */
-+ if (unlikely(running)) {
-+ cpu_relax();
-+ continue;
-+ }
-+
-+ /*
-+ * It's not enough that it's not actively running,
-+ * it must be off the runqueue _entirely_, and not
-+ * preempted!
-+ *
-+ * So if it was still runnable (but just not actively
-+ * running right now), it's preempted, and we should
-+ * yield - it could be a while.
-+ */
-+ if (unlikely(queued)) {
-+ ktime_t to = NSEC_PER_SEC / HZ;
-+
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
-+ continue;
-+ }
-+
-+ /*
-+ * Ahh, all good. It wasn't running, and it wasn't
-+ * runnable, which means that it will never become
-+ * running in the future either. We're all done!
-+ */
-+ break;
-+ }
-+
-+ return ncsw;
-+}
-+
-+/***
-+ * kick_process - kick a running thread to enter/exit the kernel
-+ * @p: the to-be-kicked thread
-+ *
-+ * Cause a process which is running on another CPU to enter
-+ * kernel-mode, without any delay. (to get signals handled.)
-+ *
-+ * NOTE: this function doesn't have to take the runqueue lock,
-+ * because all it wants to ensure is that the remote task enters
-+ * the kernel. If the IPI races and the task has been migrated
-+ * to another CPU then no harm is done and the purpose has been
-+ * achieved as well.
-+ */
-+void kick_process(struct task_struct *p)
-+{
-+ int cpu;
-+
-+ preempt_disable();
-+ cpu = task_cpu(p);
-+ if ((cpu != smp_processor_id()) && task_curr(p))
-+ smp_sched_reschedule(cpu);
-+ preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(kick_process);
-+#endif
-+
-+/*
-+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
-+ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
-+ * between themselves, they cooperatively multitask. An idle rq scores as
-+ * prio PRIO_LIMIT so it is always preempted.
-+ */
-+static inline bool
-+can_preempt(struct task_struct *p, int prio, u64 deadline)
-+{
-+ /* Better static priority RT task or better policy preemption */
-+ if (p->prio < prio)
-+ return true;
-+ if (p->prio > prio)
-+ return false;
-+ if (p->policy == SCHED_BATCH)
-+ return false;
-+ /* SCHED_NORMAL and ISO will preempt based on deadline */
-+ if (!deadline_before(p->deadline, deadline))
-+ return false;
-+ return true;
-+}
-+
-+#ifdef CONFIG_SMP
-+/*
-+ * Check to see if p can run on cpu, and if not, whether there are any online
-+ * CPUs it can run on instead.
-+ */
-+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
-+{
-+ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed)))
-+ return true;
-+ return false;
-+}
-+#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
-+
-+static void try_preempt(struct task_struct *p, struct rq *this_rq)
-+{
-+ int i, this_entries = rq_load(this_rq);
-+ cpumask_t tmp;
-+
-+ if (suitable_idle_cpus(p) && resched_best_idle(p, task_cpu(p)))
-+ return;
-+
-+ /* IDLEPRIO tasks never preempt anything but idle */
-+ if (p->policy == SCHED_IDLEPRIO)
-+ return;
-+
-+ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed);
-+
-+ for (i = 0; i < num_possible_cpus(); i++) {
-+ struct rq *rq = this_rq->rq_order[i];
-+
-+ if (!cpumask_test_cpu(rq->cpu, &tmp))
-+ continue;
-+
-+ if (!sched_interactive && rq != this_rq && rq_load(rq) <= this_entries)
-+ continue;
-+ if (smt_schedule(p, rq) && can_preempt(p, rq->rq_prio, rq->rq_deadline)) {
-+ /* We set rq->preempting lockless, it's a hint only */
-+ rq->preempting = p;
-+ resched_curr(rq);
-+ return;
-+ }
-+ }
-+}
-+
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+ const struct cpumask *new_mask, bool check);
-+#else /* CONFIG_SMP */
-+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
-+{
-+ return false;
-+}
-+
-+static void try_preempt(struct task_struct *p, struct rq *this_rq)
-+{
-+ if (p->policy == SCHED_IDLEPRIO)
-+ return;
-+ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
-+ resched_curr(uprq);
-+}
-+
-+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
-+ const struct cpumask *new_mask, bool check)
-+{
-+ return set_cpus_allowed_ptr(p, new_mask);
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * wake flags
-+ */
-+#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
-+#define WF_FORK 0x02 /* child wakeup after fork */
-+#define WF_MIGRATED 0x04 /* internal use, task got migrated */
-+
-+static void
-+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-+{
-+ struct rq *rq;
-+
-+ if (!schedstat_enabled())
-+ return;
-+
-+ rq = this_rq();
-+
-+#ifdef CONFIG_SMP
-+ if (cpu == rq->cpu)
-+ schedstat_inc(rq->ttwu_local);
-+ else {
-+ struct sched_domain *sd;
-+
-+ rcu_read_lock();
-+ for_each_domain(rq->cpu, sd) {
-+ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-+ schedstat_inc(sd->ttwu_wake_remote);
-+ break;
-+ }
-+ }
-+ rcu_read_unlock();
-+ }
-+
-+#endif /* CONFIG_SMP */
-+
-+ schedstat_inc(rq->ttwu_count);
-+}
-+
-+static inline void ttwu_activate(struct rq *rq, struct task_struct *p)
-+{
-+ activate_task(p, rq);
-+
-+ /* if a worker is waking up, notify the workqueue */
-+ if (p->flags & PF_WQ_WORKER)
-+ wq_worker_waking_up(p, cpu_of(rq));
-+}
-+
-+/*
-+ * Mark the task runnable and perform wakeup-preemption.
-+ */
-+static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+ /*
-+ * Sync wakeups (i.e. those types of wakeups where the waker
-+ * has indicated that it will leave the CPU in short order)
-+ * don't trigger a preemption if there are no idle cpus,
-+ * instead waiting for current to deschedule.
-+ */
-+ if (wake_flags & WF_SYNC)
-+ resched_suitable_idle(p);
-+ else
-+ try_preempt(p, rq);
-+ p->state = TASK_RUNNING;
-+ trace_sched_wakeup(p);
-+}
-+
-+static void
-+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
-+{
-+ lockdep_assert_held(&rq->lock);
-+
-+#ifdef CONFIG_SMP
-+ if (p->sched_contributes_to_load)
-+ rq->nr_uninterruptible--;
-+#endif
-+
-+ ttwu_activate(rq, p);
-+ ttwu_do_wakeup(rq, p, wake_flags);
-+}
-+
-+/*
-+ * Called in case the task @p isn't fully descheduled from its runqueue,
-+ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
-+ * since all we need to do is flip p->state to TASK_RUNNING, since
-+ * the task is still ->on_rq.
-+ */
-+static int ttwu_remote(struct task_struct *p, int wake_flags)
-+{
-+ struct rq *rq;
-+ int ret = 0;
-+
-+ rq = __task_rq_lock(p);
-+ if (likely(task_on_rq_queued(p))) {
-+ ttwu_do_wakeup(rq, p, wake_flags);
-+ ret = 1;
-+ }
-+ __task_rq_unlock(rq);
-+
-+ return ret;
-+}
-+
-+#ifdef CONFIG_SMP
-+void sched_ttwu_pending(void)
-+{
-+ struct rq *rq = this_rq();
-+ struct llist_node *llist = llist_del_all(&rq->wake_list);
-+ struct task_struct *p, *t;
-+ unsigned long flags;
-+
-+ if (!llist)
-+ return;
-+
-+ rq_lock_irqsave(rq, &flags);
-+
-+ llist_for_each_entry_safe(p, t, llist, wake_entry)
-+ ttwu_do_activate(rq, p, 0);
-+
-+ rq_unlock_irqrestore(rq, &flags);
-+}
-+
-+void scheduler_ipi(void)
-+{
-+ /*
-+ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
-+ * TIF_NEED_RESCHED remotely (for the first time) will also send
-+ * this IPI.
-+ */
-+ preempt_fold_need_resched();
-+
-+ if (llist_empty(&this_rq()->wake_list) && (!idle_cpu(smp_processor_id()) || need_resched()))
-+ return;
-+
-+ /*
-+ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
-+ * traditionally all their work was done from the interrupt return
-+ * path. Now that we actually do some work, we need to make sure
-+ * we do call them.
-+ *
-+ * Some archs already do call them, luckily irq_enter/exit nest
-+ * properly.
-+ *
-+ * Arguably we should visit all archs and update all handlers,
-+ * however a fair share of IPIs are still resched only so this would
-+ * somewhat pessimize the simple resched case.
-+ */
-+ irq_enter();
-+ sched_ttwu_pending();
-+ irq_exit();
-+}
-+
-+static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
-+ if (!set_nr_if_polling(rq->idle))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+ }
-+}
-+
-+void wake_up_if_idle(int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ rcu_read_lock();
-+
-+ if (!is_idle_task(rcu_dereference(rq->curr)))
-+ goto out;
-+
-+ if (set_nr_if_polling(rq->idle)) {
-+ trace_sched_wake_idle_without_ipi(cpu);
-+ } else {
-+ rq_lock_irqsave(rq, &flags);
-+ if (likely(is_idle_task(rq->curr)))
-+ smp_sched_reschedule(cpu);
-+ /* Else cpu is not in idle, do nothing here */
-+ rq_unlock_irqrestore(rq, &flags);
-+ }
-+
-+out:
-+ rcu_read_unlock();
-+}
-+
-+static int valid_task_cpu(struct task_struct *p)
-+{
-+ cpumask_t valid_mask;
-+
-+ if (p->flags & PF_KTHREAD)
-+ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_online_mask);
-+ else
-+ cpumask_and(&valid_mask, &p->cpus_allowed, cpu_active_mask);
-+
-+ if (unlikely(!cpumask_weight(&valid_mask))) {
-+ /* Hotplug boot threads do this before the CPU is up */
-+ printk(KERN_INFO "SCHED: No cpumask for %s/%d weight %d\n", p->comm, p->pid, cpumask_weight(&p->cpus_allowed));
-+ return cpumask_any(&p->cpus_allowed);
-+ }
-+ return cpumask_any(&valid_mask);
-+}
-+
-+/*
-+ * For a task that's just being woken up we have a valuable balancing
-+ * opportunity so choose the nearest cache most lightly loaded runqueue.
-+ * Entered with rq locked and returns with the chosen runqueue locked.
-+ */
-+static inline int select_best_cpu(struct task_struct *p)
-+{
-+ unsigned int idlest = ~0U;
-+ struct rq *rq = NULL;
-+ int i;
-+
-+ if (suitable_idle_cpus(p)) {
-+ int cpu = task_cpu(p);
-+
-+ if (unlikely(needs_other_cpu(p, cpu)))
-+ cpu = valid_task_cpu(p);
-+ rq = resched_best_idle(p, cpu);
-+ if (likely(rq))
-+ return rq->cpu;
-+ }
-+
-+ for (i = 0; i < num_possible_cpus(); i++) {
-+ struct rq *other_rq = task_rq(p)->rq_order[i];
-+ int entries;
-+
-+ if (!other_rq->online)
-+ continue;
-+ if (needs_other_cpu(p, other_rq->cpu))
-+ continue;
-+ entries = rq_load(other_rq);
-+ if (entries >= idlest)
-+ continue;
-+ idlest = entries;
-+ rq = other_rq;
-+ }
-+ if (unlikely(!rq))
-+ return task_cpu(p);
-+ return rq->cpu;
-+}
-+#else /* CONFIG_SMP */
-+static int valid_task_cpu(struct task_struct *p)
-+{
-+ return 0;
-+}
-+
-+static inline int select_best_cpu(struct task_struct *p)
-+{
-+ return 0;
-+}
-+
-+static struct rq *resched_best_idle(struct task_struct *p, int cpu)
-+{
-+ return NULL;
-+}
-+#endif /* CONFIG_SMP */
-+
-+static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+
-+#if defined(CONFIG_SMP)
-+ if (!cpus_share_cache(smp_processor_id(), cpu)) {
-+ sched_clock_cpu(cpu); /* Sync clocks across CPUs */
-+ ttwu_queue_remote(p, cpu, wake_flags);
-+ return;
-+ }
-+#endif
-+ rq_lock(rq);
-+ ttwu_do_activate(rq, p, wake_flags);
-+ rq_unlock(rq);
-+}
-+
-+/***
-+ * try_to_wake_up - wake up a thread
-+ * @p: the thread to be awakened
-+ * @state: the mask of task states that can be woken
-+ * @wake_flags: wake modifier flags (WF_*)
-+ *
-+ * Put it on the run-queue if it's not already there. The "current"
-+ * thread is always on the run-queue (except when the actual
-+ * re-schedule is in progress), and as such you're allowed to do
-+ * the simpler "current->state = TASK_RUNNING" to mark yourself
-+ * runnable without the overhead of this.
-+ *
-+ * Return: %true if @p was woken up, %false if it was already running.
-+ * or @state didn't match @p's state.
-+ */
-+static int
-+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
-+{
-+ unsigned long flags;
-+ int cpu, success = 0;
-+
-+ /*
-+ * If we are going to wake up a thread waiting for CONDITION we
-+ * need to ensure that CONDITION=1 done by the caller can not be
-+ * reordered with p->state check below. This pairs with mb() in
-+ * set_current_state() the waiting thread does.
-+ */
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ smp_mb__after_spinlock();
-+ /* state is a volatile long, どうして、分からない */
-+ if (!((unsigned int)p->state & state))
-+ goto out;
-+
-+ trace_sched_waking(p);
-+
-+ /* We're going to change ->state: */
-+ success = 1;
-+ cpu = task_cpu(p);
-+
-+ /*
-+ * Ensure we load p->on_rq _after_ p->state, otherwise it would
-+ * be possible to, falsely, observe p->on_rq == 0 and get stuck
-+ * in smp_cond_load_acquire() below.
-+ *
-+ * sched_ttwu_pending() try_to_wake_up()
-+ * [S] p->on_rq = 1; [L] P->state
-+ * UNLOCK rq->lock -----.
-+ * \
-+ * +--- RMB
-+ * schedule() /
-+ * LOCK rq->lock -----'
-+ * UNLOCK rq->lock
-+ *
-+ * [task p]
-+ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
-+ *
-+ * Pairs with the UNLOCK+LOCK on rq->lock from the
-+ * last wakeup of our task and the schedule that got our task
-+ * current.
-+ */
-+ smp_rmb();
-+ if (p->on_rq && ttwu_remote(p, wake_flags))
-+ goto stat;
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
-+ * possible to, falsely, observe p->on_cpu == 0.
-+ *
-+ * One must be running (->on_cpu == 1) in order to remove oneself
-+ * from the runqueue.
-+ *
-+ * [S] ->on_cpu = 1; [L] ->on_rq
-+ * UNLOCK rq->lock
-+ * RMB
-+ * LOCK rq->lock
-+ * [S] ->on_rq = 0; [L] ->on_cpu
-+ *
-+ * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
-+ * from the consecutive calls to schedule(); the first switching to our
-+ * task, the second putting it to sleep.
-+ */
-+ smp_rmb();
-+
-+ /*
-+ * If the owning (remote) CPU is still in the middle of schedule() with
-+ * this task as prev, wait until its done referencing the task.
-+ *
-+ * Pairs with the smp_store_release() in finish_lock_switch().
-+ *
-+ * This ensures that tasks getting woken will be fully ordered against
-+ * their previous state and preserve Program Order.
-+ */
-+ smp_cond_load_acquire(&p->on_cpu, !VAL);
-+
-+ p->sched_contributes_to_load = !!task_contributes_to_load(p);
-+ p->state = TASK_WAKING;
-+
-+ if (p->in_iowait) {
-+ delayacct_blkio_end();
-+ atomic_dec(&task_rq(p)->nr_iowait);
-+ }
-+
-+ cpu = select_best_cpu(p);
-+ if (task_cpu(p) != cpu)
-+ set_task_cpu(p, cpu);
-+
-+#else /* CONFIG_SMP */
-+
-+ if (p->in_iowait) {
-+ delayacct_blkio_end();
-+ atomic_dec(&task_rq(p)->nr_iowait);
-+ }
-+
-+#endif /* CONFIG_SMP */
-+
-+ ttwu_queue(p, cpu, wake_flags);
-+stat:
-+ ttwu_stat(p, cpu, wake_flags);
-+out:
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+ return success;
-+}
-+
-+/**
-+ * try_to_wake_up_local - try to wake up a local task with rq lock held
-+ * @p: the thread to be awakened
-+ *
-+ * Put @p on the run-queue if it's not already there. The caller must
-+ * ensure that rq is locked and, @p is not the current task.
-+ * rq stays locked over invocation.
-+ */
-+static void try_to_wake_up_local(struct task_struct *p)
-+{
-+ struct rq *rq = task_rq(p);
-+
-+ if (WARN_ON_ONCE(rq != this_rq()) ||
-+ WARN_ON_ONCE(p == current))
-+ return;
-+
-+ lockdep_assert_held(&rq->lock);
-+
-+ if (!raw_spin_trylock(&p->pi_lock)) {
-+ /*
-+ * This is OK, because current is on_cpu, which avoids it being
-+ * picked for load-balance and preemption/IRQs are still
-+ * disabled avoiding further scheduler activity on it and we've
-+ * not yet picked a replacement task.
-+ */
-+ rq_unlock(rq);
-+ raw_spin_lock(&p->pi_lock);
-+ rq_lock(rq);
-+ }
-+
-+ if (!(p->state & TASK_NORMAL))
-+ goto out;
-+
-+ trace_sched_waking(p);
-+
-+ if (!task_on_rq_queued(p)) {
-+ if (p->in_iowait) {
-+ delayacct_blkio_end();
-+ atomic_dec(&rq->nr_iowait);
-+ }
-+ ttwu_activate(rq, p);
-+ }
-+
-+ ttwu_do_wakeup(rq, p, 0);
-+ ttwu_stat(p, smp_processor_id(), 0);
-+out:
-+ raw_spin_unlock(&p->pi_lock);
-+}
-+
-+/**
-+ * wake_up_process - Wake up a specific process
-+ * @p: The process to be woken up.
-+ *
-+ * Attempt to wake up the nominated process and move it to the set of runnable
-+ * processes.
-+ *
-+ * Return: 1 if the process was woken up, 0 if it was already running.
-+ *
-+ * It may be assumed that this function implies a write memory barrier before
-+ * changing the task state if and only if any tasks are woken up.
-+ */
-+int wake_up_process(struct task_struct *p)
-+{
-+ return try_to_wake_up(p, TASK_NORMAL, 0);
-+}
-+EXPORT_SYMBOL(wake_up_process);
-+
-+int wake_up_state(struct task_struct *p, unsigned int state)
-+{
-+ return try_to_wake_up(p, state, 0);
-+}
-+
-+static void time_slice_expired(struct task_struct *p, struct rq *rq);
-+
-+/*
-+ * Perform scheduler related setup for a newly forked process p.
-+ * p is forked by current.
-+ */
-+int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
-+{
-+ unsigned long flags;
-+ int cpu = get_cpu();
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+ INIT_HLIST_HEAD(&p->preempt_notifiers);
-+#endif
-+ /*
-+ * We mark the process as NEW here. This guarantees that
-+ * nobody will actually run it, and a signal or other external
-+ * event cannot wake it up and insert it on the runqueue either.
-+ */
-+ p->state = TASK_NEW;
-+
-+ /*
-+ * The process state is set to the same value of the process executing
-+ * do_fork() code. That is running. This guarantees that nobody will
-+ * actually run it, and a signal or other external event cannot wake
-+ * it up and insert it on the runqueue either.
-+ */
-+
-+ /* Should be reset in fork.c but done here for ease of MuQSS patching */
-+ p->on_cpu =
-+ p->on_rq =
-+ p->utime =
-+ p->stime =
-+ p->sched_time =
-+ p->stime_ns =
-+ p->utime_ns = 0;
-+ skiplist_node_init(&p->node);
-+
-+ /*
-+ * Revert to default priority/policy on fork if requested.
-+ */
-+ if (unlikely(p->sched_reset_on_fork)) {
-+ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
-+ p->policy = SCHED_NORMAL;
-+ p->normal_prio = normal_prio(p);
-+ }
-+
-+ if (PRIO_TO_NICE(p->static_prio) < 0) {
-+ p->static_prio = NICE_TO_PRIO(0);
-+ p->normal_prio = p->static_prio;
-+ }
-+
-+ /*
-+ * We don't need the reset flag anymore after the fork. It has
-+ * fulfilled its duty:
-+ */
-+ p->sched_reset_on_fork = 0;
-+ }
-+
-+ /*
-+ * Silence PROVE_RCU.
-+ */
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ set_task_cpu(p, cpu);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+#ifdef CONFIG_SCHED_INFO
-+ if (unlikely(sched_info_on()))
-+ memset(&p->sched_info, 0, sizeof(p->sched_info));
-+#endif
-+ init_task_preempt_count(p);
-+
-+ put_cpu();
-+ return 0;
-+}
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
-+static bool __initdata __sched_schedstats = false;
-+
-+static void set_schedstats(bool enabled)
-+{
-+ if (enabled)
-+ static_branch_enable(&sched_schedstats);
-+ else
-+ static_branch_disable(&sched_schedstats);
-+}
-+
-+void force_schedstat_enabled(void)
-+{
-+ if (!schedstat_enabled()) {
-+ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
-+ static_branch_enable(&sched_schedstats);
-+ }
-+}
-+
-+static int __init setup_schedstats(char *str)
-+{
-+ int ret = 0;
-+ if (!str)
-+ goto out;
-+
-+ /*
-+ * This code is called before jump labels have been set up, so we can't
-+ * change the static branch directly just yet. Instead set a temporary
-+ * variable so init_schedstats() can do it later.
-+ */
-+ if (!strcmp(str, "enable")) {
-+ __sched_schedstats = true;
-+ ret = 1;
-+ } else if (!strcmp(str, "disable")) {
-+ __sched_schedstats = false;
-+ ret = 1;
-+ }
-+out:
-+ if (!ret)
-+ pr_warn("Unable to parse schedstats=\n");
-+
-+ return ret;
-+}
-+__setup("schedstats=", setup_schedstats);
-+
-+static void __init init_schedstats(void)
-+{
-+ set_schedstats(__sched_schedstats);
-+}
-+
-+#ifdef CONFIG_PROC_SYSCTL
-+int sysctl_schedstats(struct ctl_table *table, int write,
-+ void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+ struct ctl_table t;
-+ int err;
-+ int state = static_branch_likely(&sched_schedstats);
-+
-+ if (write && !capable(CAP_SYS_ADMIN))
-+ return -EPERM;
-+
-+ t = *table;
-+ t.data = &state;
-+ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
-+ if (err < 0)
-+ return err;
-+ if (write)
-+ set_schedstats(state);
-+ return err;
-+}
-+#endif /* CONFIG_PROC_SYSCTL */
-+#else /* !CONFIG_SCHEDSTATS */
-+static inline void init_schedstats(void) {}
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
-+
-+static void account_task_cpu(struct rq *rq, struct task_struct *p)
-+{
-+ update_clocks(rq);
-+ /* This isn't really a context switch but accounting is the same */
-+ update_cpu_clock_switch(rq, p);
-+ p->last_ran = rq->niffies;
-+}
-+
-+bool sched_smp_initialized __read_mostly;
-+
-+static inline int hrexpiry_enabled(struct rq *rq)
-+{
-+ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
-+ return 0;
-+ return hrtimer_is_hres_active(&rq->hrexpiry_timer);
-+}
-+
-+/*
-+ * Use HR-timers to deliver accurate preemption points.
-+ */
-+static inline void hrexpiry_clear(struct rq *rq)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return;
-+ if (hrtimer_active(&rq->hrexpiry_timer))
-+ hrtimer_cancel(&rq->hrexpiry_timer);
-+}
-+
-+/*
-+ * High-resolution time_slice expiry.
-+ * Runs from hardirq context with interrupts disabled.
-+ */
-+static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
-+{
-+ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
-+ struct task_struct *p;
-+
-+ /* This can happen during CPU hotplug / resume */
-+ if (unlikely(cpu_of(rq) != smp_processor_id()))
-+ goto out;
-+
-+ /*
-+ * We're doing this without the runqueue lock but this should always
-+ * be run on the local CPU. Time slice should run out in __schedule
-+ * but we set it to zero here in case niffies is slightly less.
-+ */
-+ p = rq->curr;
-+ p->time_slice = 0;
-+ __set_tsk_resched(p);
-+out:
-+ return HRTIMER_NORESTART;
-+}
-+
-+/*
-+ * Called to set the hrexpiry timer state.
-+ *
-+ * called with irqs disabled from the local CPU only
-+ */
-+static void hrexpiry_start(struct rq *rq, u64 delay)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return;
-+
-+ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
-+ HRTIMER_MODE_REL_PINNED);
-+}
-+
-+static void init_rq_hrexpiry(struct rq *rq)
-+{
-+ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ rq->hrexpiry_timer.function = hrexpiry;
-+}
-+
-+static inline int rq_dither(struct rq *rq)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return HALF_JIFFY_US;
-+ return 0;
-+}
-+
-+/*
-+ * wake_up_new_task - wake up a newly created task for the first time.
-+ *
-+ * This function will do some initial scheduler statistics housekeeping
-+ * that must be done for every newly created context, then puts the task
-+ * on the runqueue and wakes it.
-+ */
-+void wake_up_new_task(struct task_struct *p)
-+{
-+ struct task_struct *parent, *rq_curr;
-+ struct rq *rq, *new_rq;
-+ unsigned long flags;
-+
-+ parent = p->parent;
-+
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ p->state = TASK_RUNNING;
-+ /* Task_rq can't change yet on a new task */
-+ new_rq = rq = task_rq(p);
-+ if (unlikely(needs_other_cpu(p, task_cpu(p)))) {
-+ set_task_cpu(p, valid_task_cpu(p));
-+ new_rq = task_rq(p);
-+ }
-+
-+ double_rq_lock(rq, new_rq);
-+ rq_curr = rq->curr;
-+
-+ /*
-+ * Make sure we do not leak PI boosting priority to the child.
-+ */
-+ p->prio = rq_curr->normal_prio;
-+
-+ trace_sched_wakeup_new(p);
-+
-+ /*
-+ * Share the timeslice between parent and child, thus the
-+ * total amount of pending timeslices in the system doesn't change,
-+ * resulting in more scheduling fairness. If it's negative, it won't
-+ * matter since that's the same as being 0. rq->rq_deadline is only
-+ * modified within schedule() so it is always equal to
-+ * current->deadline.
-+ */
-+ account_task_cpu(rq, rq_curr);
-+ p->last_ran = rq_curr->last_ran;
-+ if (likely(rq_curr->policy != SCHED_FIFO)) {
-+ rq_curr->time_slice /= 2;
-+ if (rq_curr->time_slice < RESCHED_US) {
-+ /*
-+ * Forking task has run out of timeslice. Reschedule it and
-+ * start its child with a new time slice and deadline. The
-+ * child will end up running first because its deadline will
-+ * be slightly earlier.
-+ */
-+ __set_tsk_resched(rq_curr);
-+ time_slice_expired(p, new_rq);
-+ if (suitable_idle_cpus(p))
-+ resched_best_idle(p, task_cpu(p));
-+ else if (unlikely(rq != new_rq))
-+ try_preempt(p, new_rq);
-+ } else {
-+ p->time_slice = rq_curr->time_slice;
-+ if (rq_curr == parent && rq == new_rq && !suitable_idle_cpus(p)) {
-+ /*
-+ * The VM isn't cloned, so we're in a good position to
-+ * do child-runs-first in anticipation of an exec. This
-+ * usually avoids a lot of COW overhead.
-+ */
-+ __set_tsk_resched(rq_curr);
-+ } else {
-+ /*
-+ * Adjust the hrexpiry since rq_curr will keep
-+ * running and its timeslice has been shortened.
-+ */
-+ hrexpiry_start(rq, US_TO_NS(rq_curr->time_slice));
-+ try_preempt(p, new_rq);
-+ }
-+ }
-+ } else {
-+ time_slice_expired(p, new_rq);
-+ try_preempt(p, new_rq);
-+ }
-+ activate_task(p, new_rq);
-+ double_rq_unlock(rq, new_rq);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+}
-+
-+#ifdef CONFIG_PREEMPT_NOTIFIERS
-+
-+static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
-+
-+void preempt_notifier_inc(void)
-+{
-+ static_key_slow_inc(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
-+
-+void preempt_notifier_dec(void)
-+{
-+ static_key_slow_dec(&preempt_notifier_key);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
-+
-+/**
-+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
-+ * @notifier: notifier struct to register
-+ */
-+void preempt_notifier_register(struct preempt_notifier *notifier)
-+{
-+ if (!static_key_false(&preempt_notifier_key))
-+ WARN(1, "registering preempt_notifier while notifiers disabled\n");
-+
-+ hlist_add_head(&notifier->link, &current->preempt_notifiers);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_register);
-+
-+/**
-+ * preempt_notifier_unregister - no longer interested in preemption notifications
-+ * @notifier: notifier struct to unregister
-+ *
-+ * This is *not* safe to call from within a preemption notifier.
-+ */
-+void preempt_notifier_unregister(struct preempt_notifier *notifier)
-+{
-+ hlist_del(&notifier->link);
-+}
-+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
-+
-+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+ struct preempt_notifier *notifier;
-+
-+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+ notifier->ops->sched_in(notifier, raw_smp_processor_id());
-+}
-+
-+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+ if (static_key_false(&preempt_notifier_key))
-+ __fire_sched_in_preempt_notifiers(curr);
-+}
-+
-+static void
-+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+ struct task_struct *next)
-+{
-+ struct preempt_notifier *notifier;
-+
-+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
-+ notifier->ops->sched_out(notifier, next);
-+}
-+
-+static __always_inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+ struct task_struct *next)
-+{
-+ if (static_key_false(&preempt_notifier_key))
-+ __fire_sched_out_preempt_notifiers(curr, next);
-+}
-+
-+#else /* !CONFIG_PREEMPT_NOTIFIERS */
-+
-+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
-+{
-+}
-+
-+static inline void
-+fire_sched_out_preempt_notifiers(struct task_struct *curr,
-+ struct task_struct *next)
-+{
-+}
-+
-+#endif /* CONFIG_PREEMPT_NOTIFIERS */
-+
-+/**
-+ * prepare_task_switch - prepare to switch tasks
-+ * @rq: the runqueue preparing to switch
-+ * @next: the task we are going to switch to.
-+ *
-+ * This is called with the rq lock held and interrupts off. It must
-+ * be paired with a subsequent finish_task_switch after the context
-+ * switch.
-+ *
-+ * prepare_task_switch sets up locking and calls architecture specific
-+ * hooks.
-+ */
-+static inline void
-+prepare_task_switch(struct rq *rq, struct task_struct *prev,
-+ struct task_struct *next)
-+{
-+ sched_info_switch(rq, prev, next);
-+ perf_event_task_sched_out(prev, next);
-+ fire_sched_out_preempt_notifiers(prev, next);
-+ prepare_lock_switch(rq, next);
-+ prepare_arch_switch(next);
-+}
-+
-+/**
-+ * finish_task_switch - clean up after a task-switch
-+ * @rq: runqueue associated with task-switch
-+ * @prev: the thread we just switched away from.
-+ *
-+ * finish_task_switch must be called after the context switch, paired
-+ * with a prepare_task_switch call before the context switch.
-+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
-+ * and do any other architecture-specific cleanup actions.
-+ *
-+ * Note that we may have delayed dropping an mm in context_switch(). If
-+ * so, we finish that here outside of the runqueue lock. (Doing it
-+ * with the lock held can cause deadlocks; see schedule() for
-+ * details.)
-+ *
-+ * The context switch have flipped the stack from under us and restored the
-+ * local variables which were saved when this task called schedule() in the
-+ * past. prev == current is still correct but we need to recalculate this_rq
-+ * because prev may have moved to another CPU.
-+ */
-+static void finish_task_switch(struct task_struct *prev)
-+ __releases(rq->lock)
-+{
-+ struct rq *rq = this_rq();
-+ struct mm_struct *mm = rq->prev_mm;
-+ long prev_state;
-+
-+ /*
-+ * The previous task will have left us with a preempt_count of 2
-+ * because it left us after:
-+ *
-+ * schedule()
-+ * preempt_disable(); // 1
-+ * __schedule()
-+ * raw_spin_lock_irq(&rq->lock) // 2
-+ *
-+ * Also, see FORK_PREEMPT_COUNT.
-+ */
-+ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
-+ "corrupted preempt_count: %s/%d/0x%x\n",
-+ current->comm, current->pid, preempt_count()))
-+ preempt_count_set(FORK_PREEMPT_COUNT);
-+
-+ rq->prev_mm = NULL;
-+
-+ /*
-+ * A task struct has one reference for the use as "current".
-+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
-+ * schedule one last time. The schedule call will never return, and
-+ * the scheduled task must drop that reference.
-+ *
-+ * We must observe prev->state before clearing prev->on_cpu (in
-+ * finish_lock_switch), otherwise a concurrent wakeup can get prev
-+ * running on another CPU and we could rave with its RUNNING -> DEAD
-+ * transition, resulting in a double drop.
-+ */
-+ prev_state = prev->state;
-+ vtime_task_switch(prev);
-+ perf_event_task_sched_in(prev, current);
-+ /*
-+ * The membarrier system call requires a full memory barrier
-+ * after storing to rq->curr, before going back to user-space.
-+ *
-+ * TODO: This smp_mb__after_unlock_lock can go away if PPC end
-+ * up adding a full barrier to switch_mm(), or we should figure
-+ * out if a smp_mb__after_unlock_lock is really the proper API
-+ * to use.
-+ */
-+ smp_mb__after_unlock_lock();
-+ finish_lock_switch(rq, prev);
-+ finish_arch_post_lock_switch();
-+
-+ fire_sched_in_preempt_notifiers(current);
-+ if (mm)
-+ mmdrop(mm);
-+ if (unlikely(prev_state == TASK_DEAD)) {
-+ /*
-+ * Remove function-return probe instances associated with this
-+ * task and put them back on the free list.
-+ */
-+ kprobe_flush_task(prev);
-+
-+ /* Task is done with its stack. */
-+ put_task_stack(prev);
-+
-+ put_task_struct(prev);
-+ }
-+}
-+
-+/**
-+ * schedule_tail - first thing a freshly forked thread must call.
-+ * @prev: the thread we just switched away from.
-+ */
-+asmlinkage __visible void schedule_tail(struct task_struct *prev)
-+{
-+ /*
-+ * New tasks start with FORK_PREEMPT_COUNT, see there and
-+ * finish_task_switch() for details.
-+ *
-+ * finish_task_switch() will drop rq->lock() and lower preempt_count
-+ * and the preempt_enable() will end up enabling preemption (on
-+ * PREEMPT_COUNT kernels).
-+ */
-+
-+ finish_task_switch(prev);
-+ preempt_enable();
-+
-+ if (current->set_child_tid)
-+ put_user(task_pid_vnr(current), current->set_child_tid);
-+}
-+
-+/*
-+ * context_switch - switch to the new MM and the new thread's register state.
-+ */
-+static __always_inline void
-+context_switch(struct rq *rq, struct task_struct *prev,
-+ struct task_struct *next)
-+{
-+ struct mm_struct *mm, *oldmm;
-+
-+ prepare_task_switch(rq, prev, next);
-+
-+ mm = next->mm;
-+ oldmm = prev->active_mm;
-+ /*
-+ * For paravirt, this is coupled with an exit in switch_to to
-+ * combine the page table reload and the switch backend into
-+ * one hypercall.
-+ */
-+ arch_start_context_switch(prev);
-+
-+ if (!mm) {
-+ next->active_mm = oldmm;
-+ mmgrab(oldmm);
-+ enter_lazy_tlb(oldmm, next);
-+ } else
-+ switch_mm_irqs_off(oldmm, mm, next);
-+
-+ if (!prev->mm) {
-+ prev->active_mm = NULL;
-+ rq->prev_mm = oldmm;
-+ }
-+ /*
-+ * Since the runqueue lock will be released by the next
-+ * task (which is an invalid locking op but in the case
-+ * of the scheduler it's an obvious special-case), so we
-+ * do an early lockdep release here:
-+ */
-+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-+
-+ /* Here we just switch the register state and the stack. */
-+ switch_to(prev, next, prev);
-+ barrier();
-+
-+ finish_task_switch(prev);
-+}
-+
-+/*
-+ * nr_running, nr_uninterruptible and nr_context_switches:
-+ *
-+ * externally visible scheduler statistics: current number of runnable
-+ * threads, total number of context switches performed since bootup.
-+ */
-+unsigned long nr_running(void)
-+{
-+ unsigned long i, sum = 0;
-+
-+ for_each_online_cpu(i)
-+ sum += cpu_rq(i)->nr_running;
-+
-+ return sum;
-+}
-+
-+static unsigned long nr_uninterruptible(void)
-+{
-+ unsigned long i, sum = 0;
-+
-+ for_each_online_cpu(i)
-+ sum += cpu_rq(i)->nr_uninterruptible;
-+
-+ return sum;
-+}
-+
-+/*
-+ * Check if only the current task is running on the CPU.
-+ *
-+ * Caution: this function does not check that the caller has disabled
-+ * preemption, thus the result might have a time-of-check-to-time-of-use
-+ * race. The caller is responsible to use it correctly, for example:
-+ *
-+ * - from a non-preemptable section (of course)
-+ *
-+ * - from a thread that is bound to a single CPU
-+ *
-+ * - in a loop with very short iterations (e.g. a polling loop)
-+ */
-+bool single_task_running(void)
-+{
-+ struct rq *rq = cpu_rq(smp_processor_id());
-+
-+ if (rq_load(rq) == 1)
-+ return true;
-+ else
-+ return false;
-+}
-+EXPORT_SYMBOL(single_task_running);
-+
-+unsigned long long nr_context_switches(void)
-+{
-+ int i;
-+ unsigned long long sum = 0;
-+
-+ for_each_possible_cpu(i)
-+ sum += cpu_rq(i)->nr_switches;
-+
-+ return sum;
-+}
-+
-+/*
-+ * IO-wait accounting, and how its mostly bollocks (on SMP).
-+ *
-+ * The idea behind IO-wait account is to account the idle time that we could
-+ * have spend running if it were not for IO. That is, if we were to improve the
-+ * storage performance, we'd have a proportional reduction in IO-wait time.
-+ *
-+ * This all works nicely on UP, where, when a task blocks on IO, we account
-+ * idle time as IO-wait, because if the storage were faster, it could've been
-+ * running and we'd not be idle.
-+ *
-+ * This has been extended to SMP, by doing the same for each CPU. This however
-+ * is broken.
-+ *
-+ * Imagine for instance the case where two tasks block on one CPU, only the one
-+ * CPU will have IO-wait accounted, while the other has regular idle. Even
-+ * though, if the storage were faster, both could've ran at the same time,
-+ * utilising both CPUs.
-+ *
-+ * This means, that when looking globally, the current IO-wait accounting on
-+ * SMP is a lower bound, by reason of under accounting.
-+ *
-+ * Worse, since the numbers are provided per CPU, they are sometimes
-+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
-+ * associated with any one particular CPU, it can wake to another CPU than it
-+ * blocked on. This means the per CPU IO-wait number is meaningless.
-+ *
-+ * Task CPU affinities can make all that even more 'interesting'.
-+ */
-+
-+unsigned long nr_iowait(void)
-+{
-+ unsigned long i, sum = 0;
-+
-+ for_each_possible_cpu(i)
-+ sum += atomic_read(&cpu_rq(i)->nr_iowait);
-+
-+ return sum;
-+}
-+
-+/*
-+ * Consumers of these two interfaces, like for example the cpufreq menu
-+ * governor are using nonsensical data. Boosting frequency for a CPU that has
-+ * IO-wait which might not even end up running the task when it does become
-+ * runnable.
-+ */
-+
-+unsigned long nr_iowait_cpu(int cpu)
-+{
-+ struct rq *this = cpu_rq(cpu);
-+ return atomic_read(&this->nr_iowait);
-+}
-+
-+unsigned long nr_active(void)
-+{
-+ return nr_running() + nr_uninterruptible();
-+}
-+
-+/*
-+ * I/O wait is the number of running or queued tasks with their ->rq pointer
-+ * set to this cpu as being the CPU they're more likely to run on.
-+ */
-+void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
-+{
-+ struct rq *rq = this_rq();
-+
-+ *nr_waiters = atomic_read(&rq->nr_iowait);
-+ *load = rq_load(rq);
-+}
-+
-+/* Variables and functions for calc_load */
-+static unsigned long calc_load_update;
-+unsigned long avenrun[3];
-+EXPORT_SYMBOL(avenrun);
-+
-+/**
-+ * get_avenrun - get the load average array
-+ * @loads: pointer to dest load array
-+ * @offset: offset to add
-+ * @shift: shift count to shift the result left
-+ *
-+ * These values are estimates at best, so no need for locking.
-+ */
-+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-+{
-+ loads[0] = (avenrun[0] + offset) << shift;
-+ loads[1] = (avenrun[1] + offset) << shift;
-+ loads[2] = (avenrun[2] + offset) << shift;
-+}
-+
-+static unsigned long
-+calc_load(unsigned long load, unsigned long exp, unsigned long active)
-+{
-+ unsigned long newload;
-+
-+ newload = load * exp + active * (FIXED_1 - exp);
-+ if (active >= load)
-+ newload += FIXED_1-1;
-+
-+ return newload / FIXED_1;
-+}
-+
-+/*
-+ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
-+ */
-+void calc_global_load(unsigned long ticks)
-+{
-+ long active;
-+
-+ if (time_before(jiffies, READ_ONCE(calc_load_update)))
-+ return;
-+ active = nr_active() * FIXED_1;
-+
-+ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
-+ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
-+ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
-+
-+ calc_load_update = jiffies + LOAD_FREQ;
-+}
-+
-+DEFINE_PER_CPU(struct kernel_stat, kstat);
-+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-+
-+EXPORT_PER_CPU_SYMBOL(kstat);
-+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-+
-+#ifdef CONFIG_PARAVIRT
-+static inline u64 steal_ticks(u64 steal)
-+{
-+ if (unlikely(steal > NSEC_PER_SEC))
-+ return div_u64(steal, TICK_NSEC);
-+
-+ return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
-+}
-+#endif
-+
-+#ifndef nsecs_to_cputime
-+# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
-+#endif
-+
-+/*
-+ * On each tick, add the number of nanoseconds to the unbanked variables and
-+ * once one tick's worth has accumulated, account it allowing for accurate
-+ * sub-tick accounting and totals.
-+ */
-+static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long ns)
-+{
-+ u64 *cpustat = kcpustat_this_cpu->cpustat;
-+ unsigned long ticks;
-+
-+ if (atomic_read(&rq->nr_iowait) > 0) {
-+ rq->iowait_ns += ns;
-+ if (rq->iowait_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->iowait_ns);
-+ cpustat[CPUTIME_IOWAIT] += (__force u64)TICK_NSEC * ticks;
-+ rq->iowait_ns %= JIFFY_NS;
-+ }
-+ } else {
-+ rq->idle_ns += ns;
-+ if (rq->idle_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->idle_ns);
-+ cpustat[CPUTIME_IDLE] += (__force u64)TICK_NSEC * ticks;
-+ rq->idle_ns %= JIFFY_NS;
-+ }
-+ }
-+ acct_update_integrals(idle);
-+}
-+
-+static void pc_system_time(struct rq *rq, struct task_struct *p,
-+ int hardirq_offset, unsigned long ns)
-+{
-+ u64 *cpustat = kcpustat_this_cpu->cpustat;
-+ unsigned long ticks;
-+
-+ p->stime_ns += ns;
-+ if (p->stime_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(p->stime_ns);
-+ p->stime_ns %= JIFFY_NS;
-+ p->stime += (__force u64)TICK_NSEC * ticks;
-+ account_group_system_time(p, TICK_NSEC * ticks);
-+ }
-+ p->sched_time += ns;
-+ account_group_exec_runtime(p, ns);
-+
-+ if (hardirq_count() - hardirq_offset) {
-+ rq->irq_ns += ns;
-+ if (rq->irq_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->irq_ns);
-+ cpustat[CPUTIME_IRQ] += (__force u64)TICK_NSEC * ticks;
-+ rq->irq_ns %= JIFFY_NS;
-+ }
-+ } else if (in_serving_softirq()) {
-+ rq->softirq_ns += ns;
-+ if (rq->softirq_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->softirq_ns);
-+ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
-+ rq->softirq_ns %= JIFFY_NS;
-+ }
-+ } else {
-+ rq->system_ns += ns;
-+ if (rq->system_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->system_ns);
-+ cpustat[CPUTIME_SYSTEM] += (__force u64)TICK_NSEC * ticks;
-+ rq->system_ns %= JIFFY_NS;
-+ }
-+ }
-+ acct_update_integrals(p);
-+}
-+
-+static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
-+{
-+ u64 *cpustat = kcpustat_this_cpu->cpustat;
-+ unsigned long ticks;
-+
-+ p->utime_ns += ns;
-+ if (p->utime_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(p->utime_ns);
-+ p->utime_ns %= JIFFY_NS;
-+ p->utime += (__force u64)TICK_NSEC * ticks;
-+ account_group_user_time(p, TICK_NSEC * ticks);
-+ }
-+ p->sched_time += ns;
-+ account_group_exec_runtime(p, ns);
-+
-+ if (this_cpu_ksoftirqd() == p) {
-+ /*
-+ * ksoftirqd time do not get accounted in cpu_softirq_time.
-+ * So, we have to handle it separately here.
-+ */
-+ rq->softirq_ns += ns;
-+ if (rq->softirq_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->softirq_ns);
-+ cpustat[CPUTIME_SOFTIRQ] += (__force u64)TICK_NSEC * ticks;
-+ rq->softirq_ns %= JIFFY_NS;
-+ }
-+ }
-+
-+ if (task_nice(p) > 0 || idleprio_task(p)) {
-+ rq->nice_ns += ns;
-+ if (rq->nice_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->nice_ns);
-+ cpustat[CPUTIME_NICE] += (__force u64)TICK_NSEC * ticks;
-+ rq->nice_ns %= JIFFY_NS;
-+ }
-+ } else {
-+ rq->user_ns += ns;
-+ if (rq->user_ns >= JIFFY_NS) {
-+ ticks = NS_TO_JIFFIES(rq->user_ns);
-+ cpustat[CPUTIME_USER] += (__force u64)TICK_NSEC * ticks;
-+ rq->user_ns %= JIFFY_NS;
-+ }
-+ }
-+ acct_update_integrals(p);
-+}
-+
-+/*
-+ * This is called on clock ticks.
-+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
-+ * CPU scheduler quota accounting is also performed here in microseconds.
-+ */
-+static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
-+{
-+ s64 account_ns = rq->niffies - p->last_ran;
-+ struct task_struct *idle = rq->idle;
-+
-+ /* Accurate tick timekeeping */
-+ if (user_mode(get_irq_regs()))
-+ pc_user_time(rq, p, account_ns);
-+ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) {
-+ pc_system_time(rq, p, HARDIRQ_OFFSET, account_ns);
-+ } else
-+ pc_idle_time(rq, idle, account_ns);
-+
-+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
-+ if (p->policy != SCHED_FIFO && p != idle)
-+ p->time_slice -= NS_TO_US(account_ns);
-+
-+ p->last_ran = rq->niffies;
-+}
-+
-+/*
-+ * This is called on context switches.
-+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
-+ * CPU scheduler quota accounting is also performed here in microseconds.
-+ */
-+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
-+{
-+ s64 account_ns = rq->niffies - p->last_ran;
-+ struct task_struct *idle = rq->idle;
-+
-+ /* Accurate subtick timekeeping */
-+ if (p != idle)
-+ pc_user_time(rq, p, account_ns);
-+ else
-+ pc_idle_time(rq, idle, account_ns);
-+
-+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
-+ if (p->policy != SCHED_FIFO && p != idle)
-+ p->time_slice -= NS_TO_US(account_ns);
-+}
-+
-+/*
-+ * Return any ns on the sched_clock that have not yet been accounted in
-+ * @p in case that task is currently running.
-+ *
-+ * Called with task_rq_lock(p) held.
-+ */
-+static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
-+{
-+ u64 ns = 0;
-+
-+ /*
-+ * Must be ->curr _and_ ->on_rq. If dequeued, we would
-+ * project cycles that may never be accounted to this
-+ * thread, breaking clock_gettime().
-+ */
-+ if (p == rq->curr && task_on_rq_queued(p)) {
-+ update_clocks(rq);
-+ ns = rq->niffies - p->last_ran;
-+ }
-+
-+ return ns;
-+}
-+
-+/*
-+ * Return accounted runtime for the task.
-+ * Return separately the current's pending runtime that have not been
-+ * accounted yet.
-+ *
-+ */
-+unsigned long long task_sched_runtime(struct task_struct *p)
-+{
-+ unsigned long flags;
-+ struct rq *rq;
-+ u64 ns;
-+
-+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
-+ /*
-+ * 64-bit doesn't need locks to atomically read a 64bit value.
-+ * So we have a optimization chance when the task's delta_exec is 0.
-+ * Reading ->on_cpu is racy, but this is ok.
-+ *
-+ * If we race with it leaving CPU, we'll take a lock. So we're correct.
-+ * If we race with it entering CPU, unaccounted time is 0. This is
-+ * indistinguishable from the read occurring a few cycles earlier.
-+ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
-+ * been accounted, so we're correct here as well.
-+ */
-+ if (!p->on_cpu || !task_on_rq_queued(p))
-+ return tsk_seruntime(p);
-+#endif
-+
-+ rq = task_rq_lock(p, &flags);
-+ ns = p->sched_time + do_task_delta_exec(p, rq);
-+ task_rq_unlock(rq, p, &flags);
-+
-+ return ns;
-+}
-+
-+/*
-+ * Functions to test for when SCHED_ISO tasks have used their allocated
-+ * quota as real time scheduling and convert them back to SCHED_NORMAL. All
-+ * data is modified only by the local runqueue during scheduler_tick with
-+ * interrupts disabled.
-+ */
-+
-+/*
-+ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
-+ * tasks and set the refractory flag if necessary. There is 10% hysteresis
-+ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
-+ * slow division.
-+ */
-+static inline void iso_tick(struct rq *rq)
-+{
-+ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
-+ rq->iso_ticks += 100;
-+ if (rq->iso_ticks > ISO_PERIOD * sched_iso_cpu) {
-+ rq->iso_refractory = true;
-+ if (unlikely(rq->iso_ticks > ISO_PERIOD * 100))
-+ rq->iso_ticks = ISO_PERIOD * 100;
-+ }
-+}
-+
-+/* No SCHED_ISO task was running so decrease rq->iso_ticks */
-+static inline void no_iso_tick(struct rq *rq, int ticks)
-+{
-+ if (rq->iso_ticks > 0 || rq->iso_refractory) {
-+ rq->iso_ticks = rq->iso_ticks * (ISO_PERIOD - ticks) / ISO_PERIOD;
-+ if (rq->iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) {
-+ rq->iso_refractory = false;
-+ if (unlikely(rq->iso_ticks < 0))
-+ rq->iso_ticks = 0;
-+ }
-+ }
-+}
-+
-+/* This manages tasks that have run out of timeslice during a scheduler_tick */
-+static void task_running_tick(struct rq *rq)
-+{
-+ struct task_struct *p = rq->curr;
-+
-+ /*
-+ * If a SCHED_ISO task is running we increment the iso_ticks. In
-+ * order to prevent SCHED_ISO tasks from causing starvation in the
-+ * presence of true RT tasks we account those as iso_ticks as well.
-+ */
-+ if (rt_task(p) || task_running_iso(p))
-+ iso_tick(rq);
-+ else
-+ no_iso_tick(rq, 1);
-+
-+ /* SCHED_FIFO tasks never run out of timeslice. */
-+ if (p->policy == SCHED_FIFO)
-+ return;
-+
-+ if (iso_task(p)) {
-+ if (task_running_iso(p)) {
-+ if (rq->iso_refractory) {
-+ /*
-+ * SCHED_ISO task is running as RT and limit
-+ * has been hit. Force it to reschedule as
-+ * SCHED_NORMAL by zeroing its time_slice
-+ */
-+ p->time_slice = 0;
-+ }
-+ } else if (!rq->iso_refractory) {
-+ /* Can now run again ISO. Reschedule to pick up prio */
-+ goto out_resched;
-+ }
-+ }
-+
-+ /*
-+ * Tasks that were scheduled in the first half of a tick are not
-+ * allowed to run into the 2nd half of the next tick if they will
-+ * run out of time slice in the interim. Otherwise, if they have
-+ * less than RESCHED_US μs of time slice left they will be rescheduled.
-+ * Dither is used as a backup for when hrexpiry is disabled or high res
-+ * timers not configured in.
-+ */
-+ if (p->time_slice - rq->dither >= RESCHED_US)
-+ return;
-+out_resched:
-+ rq_lock(rq);
-+ __set_tsk_resched(p);
-+ rq_unlock(rq);
-+}
-+
-+#ifdef CONFIG_NO_HZ_FULL
-+/*
-+ * We can stop the timer tick any time highres timers are active since
-+ * we rely entirely on highres timeouts for task expiry rescheduling.
-+ */
-+static void sched_stop_tick(struct rq *rq, int cpu)
-+{
-+ if (!hrexpiry_enabled(rq))
-+ return;
-+ if (!tick_nohz_full_enabled())
-+ return;
-+ if (!tick_nohz_full_cpu(cpu))
-+ return;
-+ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+
-+static inline void sched_start_tick(struct rq *rq, int cpu)
-+{
-+ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
-+}
-+
-+/**
-+ * scheduler_tick_max_deferment
-+ *
-+ * Keep at least one tick per second when a single
-+ * active task is running.
-+ *
-+ * This makes sure that uptime continues to move forward, even
-+ * with a very low granularity.
-+ *
-+ * Return: Maximum deferment in nanoseconds.
-+ */
-+u64 scheduler_tick_max_deferment(void)
-+{
-+ struct rq *rq = this_rq();
-+ unsigned long next, now = READ_ONCE(jiffies);
-+
-+ next = rq->last_jiffy + HZ;
-+
-+ if (time_before_eq(next, now))
-+ return 0;
-+
-+ return jiffies_to_nsecs(next - now);
-+}
-+#else
-+static inline void sched_stop_tick(struct rq *rq, int cpu)
-+{
-+}
-+
-+static inline void sched_start_tick(struct rq *rq, int cpu)
-+{
-+}
-+#endif
-+
-+/*
-+ * This function gets called by the timer code, with HZ frequency.
-+ * We call it with interrupts disabled.
-+ */
-+void scheduler_tick(void)
-+{
-+ int cpu __maybe_unused = smp_processor_id();
-+ struct rq *rq = cpu_rq(cpu);
-+
-+ sched_clock_tick();
-+ update_clocks(rq);
-+ update_load_avg(rq, 0);
-+ update_cpu_clock_tick(rq, rq->curr);
-+ if (!rq_idle(rq))
-+ task_running_tick(rq);
-+ else if (rq->last_jiffy > rq->last_scheduler_tick)
-+ no_iso_tick(rq, rq->last_jiffy - rq->last_scheduler_tick);
-+ rq->last_scheduler_tick = rq->last_jiffy;
-+ rq->last_tick = rq->clock;
-+ perf_event_task_tick();
-+ sched_stop_tick(rq, cpu);
-+}
-+
-+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
-+ defined(CONFIG_PREEMPT_TRACER))
-+/*
-+ * If the value passed in is equal to the current preempt count
-+ * then we just disabled preemption. Start timing the latency.
-+ */
-+static inline void preempt_latency_start(int val)
-+{
-+ if (preempt_count() == val) {
-+ unsigned long ip = get_lock_parent_ip();
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ current->preempt_disable_ip = ip;
-+#endif
-+ trace_preempt_off(CALLER_ADDR0, ip);
-+ }
-+}
-+
-+void preempt_count_add(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ /*
-+ * Underflow?
-+ */
-+ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
-+ return;
-+#endif
-+ __preempt_count_add(val);
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ /*
-+ * Spinlock count overflowing soon?
-+ */
-+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
-+ PREEMPT_MASK - 10);
-+#endif
-+ preempt_latency_start(val);
-+}
-+EXPORT_SYMBOL(preempt_count_add);
-+NOKPROBE_SYMBOL(preempt_count_add);
-+
-+/*
-+ * If the value passed in equals to the current preempt count
-+ * then we just enabled preemption. Stop timing the latency.
-+ */
-+static inline void preempt_latency_stop(int val)
-+{
-+ if (preempt_count() == val)
-+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
-+}
-+
-+void preempt_count_sub(int val)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ /*
-+ * Underflow?
-+ */
-+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
-+ return;
-+ /*
-+ * Is the spinlock portion underflowing?
-+ */
-+ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
-+ !(preempt_count() & PREEMPT_MASK)))
-+ return;
-+#endif
-+
-+ preempt_latency_stop(val);
-+ __preempt_count_sub(val);
-+}
-+EXPORT_SYMBOL(preempt_count_sub);
-+NOKPROBE_SYMBOL(preempt_count_sub);
-+
-+#else
-+static inline void preempt_latency_start(int val) { }
-+static inline void preempt_latency_stop(int val) { }
-+#endif
-+
-+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
-+{
-+#ifdef CONFIG_DEBUG_PREEMPT
-+ return p->preempt_disable_ip;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/*
-+ * The time_slice is only refilled when it is empty and that is when we set a
-+ * new deadline. Make sure update_clocks has been called recently to update
-+ * rq->niffies.
-+ */
-+static void time_slice_expired(struct task_struct *p, struct rq *rq)
-+{
-+ p->time_slice = timeslice();
-+ p->deadline = rq->niffies + task_deadline_diff(p);
-+#ifdef CONFIG_SMT_NICE
-+ if (!p->mm)
-+ p->smt_bias = 0;
-+ else if (rt_task(p))
-+ p->smt_bias = 1 << 30;
-+ else if (task_running_iso(p))
-+ p->smt_bias = 1 << 29;
-+ else if (idleprio_task(p)) {
-+ if (task_running_idle(p))
-+ p->smt_bias = 0;
-+ else
-+ p->smt_bias = 1;
-+ } else if (--p->smt_bias < 1)
-+ p->smt_bias = MAX_PRIO - p->static_prio;
-+#endif
-+}
-+
-+/*
-+ * Timeslices below RESCHED_US are considered as good as expired as there's no
-+ * point rescheduling when there's so little time left. SCHED_BATCH tasks
-+ * have been flagged be not latency sensitive and likely to be fully CPU
-+ * bound so every time they're rescheduled they have their time_slice
-+ * refilled, but get a new later deadline to have little effect on
-+ * SCHED_NORMAL tasks.
-+
-+ */
-+static inline void check_deadline(struct task_struct *p, struct rq *rq)
-+{
-+ if (p->time_slice < RESCHED_US || batch_task(p))
-+ time_slice_expired(p, rq);
-+}
-+
-+/*
-+ * Task selection with skiplists is a simple matter of picking off the first
-+ * task in the sorted list, an O(1) operation. The lookup is amortised O(1)
-+ * being bound to the number of processors.
-+ *
-+ * Runqueues are selectively locked based on their unlocked data and then
-+ * unlocked if not needed. At most 3 locks will be held at any time and are
-+ * released as soon as they're no longer needed. All balancing between CPUs
-+ * is thus done here in an extremely simple first come best fit manner.
-+ *
-+ * This iterates over runqueues in cache locality order. In interactive mode
-+ * it iterates over all CPUs and finds the task with the best key/deadline.
-+ * In non-interactive mode it will only take a task if it's from the current
-+ * runqueue or a runqueue with more tasks than the current one with a better
-+ * key/deadline.
-+ */
-+#ifdef CONFIG_SMP
-+static inline struct task_struct
-+*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
-+{
-+ struct rq *locked = NULL, *chosen = NULL;
-+ struct task_struct *edt = idle;
-+ int i, best_entries = 0;
-+ u64 best_key = ~0ULL;
-+
-+ for (i = 0; i < num_possible_cpus(); i++) {
-+ struct rq *other_rq = rq_order(rq, i);
-+ int entries = other_rq->sl->entries;
-+ skiplist_node *next;
-+
-+ /*
-+ * Check for queued entres lockless first. The local runqueue
-+ * is locked so entries will always be accurate.
-+ */
-+ if (!sched_interactive) {
-+ /*
-+ * Don't reschedule balance across nodes unless the CPU
-+ * is idle.
-+ */
-+ if (edt != idle && rq->cpu_locality[other_rq->cpu] > 3)
-+ break;
-+ if (entries <= best_entries)
-+ continue;
-+ } else if (!entries)
-+ continue;
-+
-+ /* if (i) implies other_rq != rq */
-+ if (i) {
-+ /* Check for best id queued lockless first */
-+ if (other_rq->best_key >= best_key)
-+ continue;
-+
-+ if (unlikely(!trylock_rq(rq, other_rq)))
-+ continue;
-+
-+ /* Need to reevaluate entries after locking */
-+ entries = other_rq->sl->entries;
-+ if (unlikely(!entries)) {
-+ unlock_rq(other_rq);
-+ continue;
-+ }
-+ }
-+
-+ next = &other_rq->node;
-+ /*
-+ * In interactive mode we check beyond the best entry on other
-+ * runqueues if we can't get the best for smt or affinity
-+ * reasons.
-+ */
-+ while ((next = next->next[0]) != &other_rq->node) {
-+ struct task_struct *p;
-+ u64 key = next->key;
-+
-+ /* Reevaluate key after locking */
-+ if (key >= best_key)
-+ break;
-+
-+ p = next->value;
-+ if (!smt_schedule(p, rq)) {
-+ if (i && !sched_interactive)
-+ break;
-+ continue;
-+ }
-+
-+ /* Make sure affinity is ok */
-+ if (i) {
-+ if (needs_other_cpu(p, cpu)) {
-+ if (sched_interactive)
-+ continue;
-+ break;
-+ }
-+ /* From this point on p is the best so far */
-+ if (locked)
-+ unlock_rq(locked);
-+ chosen = locked = other_rq;
-+ }
-+ best_entries = entries;
-+ best_key = key;
-+ edt = p;
-+ break;
-+ }
-+ /* rq->preempting is a hint only as the state may have changed
-+ * since it was set with the resched call but if we have met
-+ * the condition we can break out here. */
-+ if (edt == rq->preempting)
-+ break;
-+ if (i && other_rq != chosen)
-+ unlock_rq(other_rq);
-+ }
-+
-+ if (likely(edt != idle))
-+ take_task(rq, cpu, edt);
-+
-+ if (locked)
-+ unlock_rq(locked);
-+
-+ rq->preempting = NULL;
-+
-+ return edt;
-+}
-+#else /* CONFIG_SMP */
-+static inline struct task_struct
-+*earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
-+{
-+ struct task_struct *edt;
-+
-+ if (unlikely(!rq->sl->entries))
-+ return idle;
-+ edt = rq->node.next[0]->value;
-+ take_task(rq, cpu, edt);
-+ return edt;
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * Print scheduling while atomic bug:
-+ */
-+static noinline void __schedule_bug(struct task_struct *prev)
-+{
-+ /* Save this before calling printk(), since that will clobber it */
-+ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+ if (oops_in_progress)
-+ return;
-+
-+ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
-+ prev->comm, prev->pid, preempt_count());
-+
-+ debug_show_held_locks(prev);
-+ print_modules();
-+ if (irqs_disabled())
-+ print_irqtrace_events(prev);
-+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
-+ && in_atomic_preempt_off()) {
-+ pr_err("Preemption disabled at:");
-+ print_ip_sym(preempt_disable_ip);
-+ pr_cont("\n");
-+ }
-+ dump_stack();
-+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+
-+/*
-+ * Various schedule()-time debugging checks and statistics:
-+ */
-+static inline void schedule_debug(struct task_struct *prev)
-+{
-+#ifdef CONFIG_SCHED_STACK_END_CHECK
-+ if (task_stack_end_corrupted(prev))
-+ panic("corrupted stack end detected inside scheduler\n");
-+#endif
-+
-+ if (unlikely(in_atomic_preempt_off())) {
-+ __schedule_bug(prev);
-+ preempt_count_set(PREEMPT_DISABLED);
-+ }
-+ rcu_sleep_check();
-+
-+ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
-+
-+ schedstat_inc(this_rq()->sched_count);
-+}
-+
-+/*
-+ * The currently running task's information is all stored in rq local data
-+ * which is only modified by the local CPU.
-+ */
-+static inline void set_rq_task(struct rq *rq, struct task_struct *p)
-+{
-+ if (p == rq->idle || p->policy == SCHED_FIFO)
-+ hrexpiry_clear(rq);
-+ else
-+ hrexpiry_start(rq, US_TO_NS(p->time_slice));
-+ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
-+ rq->dither = 0;
-+ else
-+ rq->dither = rq_dither(rq);
-+
-+ rq->rq_deadline = p->deadline;
-+ rq->rq_prio = p->prio;
-+#ifdef CONFIG_SMT_NICE
-+ rq->rq_mm = p->mm;
-+ rq->rq_smt_bias = p->smt_bias;
-+#endif
-+}
-+
-+#ifdef CONFIG_SMT_NICE
-+static void check_no_siblings(struct rq __maybe_unused *this_rq) {}
-+static void wake_no_siblings(struct rq __maybe_unused *this_rq) {}
-+static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings;
-+static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings;
-+
-+/* Iterate over smt siblings when we've scheduled a process on cpu and decide
-+ * whether they should continue running or be descheduled. */
-+static void check_smt_siblings(struct rq *this_rq)
-+{
-+ int other_cpu;
-+
-+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
-+ struct task_struct *p;
-+ struct rq *rq;
-+
-+ rq = cpu_rq(other_cpu);
-+ if (rq_idle(rq))
-+ continue;
-+ p = rq->curr;
-+ if (!smt_schedule(p, this_rq))
-+ resched_curr(rq);
-+ }
-+}
-+
-+static void wake_smt_siblings(struct rq *this_rq)
-+{
-+ int other_cpu;
-+
-+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
-+ struct rq *rq;
-+
-+ rq = cpu_rq(other_cpu);
-+ if (rq_idle(rq))
-+ resched_idle(rq);
-+ }
-+}
-+#else
-+static void check_siblings(struct rq __maybe_unused *this_rq) {}
-+static void wake_siblings(struct rq __maybe_unused *this_rq) {}
-+#endif
-+
-+/*
-+ * schedule() is the main scheduler function.
-+ *
-+ * The main means of driving the scheduler and thus entering this function are:
-+ *
-+ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
-+ *
-+ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
-+ * paths. For example, see arch/x86/entry_64.S.
-+ *
-+ * To drive preemption between tasks, the scheduler sets the flag in timer
-+ * interrupt handler scheduler_tick().
-+ *
-+ * 3. Wakeups don't really cause entry into schedule(). They add a
-+ * task to the run-queue and that's it.
-+ *
-+ * Now, if the new task added to the run-queue preempts the current
-+ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
-+ * called on the nearest possible occasion:
-+ *
-+ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
-+ *
-+ * - in syscall or exception context, at the next outmost
-+ * preempt_enable(). (this might be as soon as the wake_up()'s
-+ * spin_unlock()!)
-+ *
-+ * - in IRQ context, return from interrupt-handler to
-+ * preemptible context
-+ *
-+ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
-+ * then at the next:
-+ *
-+ * - cond_resched() call
-+ * - explicit schedule() call
-+ * - return from syscall or exception to user-space
-+ * - return from interrupt-handler to user-space
-+ *
-+ * WARNING: must be called with preemption disabled!
-+ */
-+static void __sched notrace __schedule(bool preempt)
-+{
-+ struct task_struct *prev, *next, *idle;
-+ unsigned long *switch_count;
-+ bool deactivate = false;
-+ struct rq *rq;
-+ u64 niffies;
-+ int cpu;
-+
-+ cpu = smp_processor_id();
-+ rq = cpu_rq(cpu);
-+ prev = rq->curr;
-+ idle = rq->idle;
-+
-+ schedule_debug(prev);
-+
-+ local_irq_disable();
-+ rcu_note_context_switch(preempt);
-+
-+ /*
-+ * Make sure that signal_pending_state()->signal_pending() below
-+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
-+ * done by the caller to avoid the race with signal_wake_up().
-+ */
-+ rq_lock(rq);
-+ smp_mb__after_spinlock();
-+#ifdef CONFIG_SMP
-+ if (rq->preempt) {
-+ /*
-+ * Make sure resched_curr hasn't triggered a preemption
-+ * locklessly on a task that has since scheduled away. Spurious
-+ * wakeup of idle is okay though.
-+ */
-+ if (unlikely(preempt && prev != idle && !test_tsk_need_resched(prev))) {
-+ rq->preempt = NULL;
-+ clear_preempt_need_resched();
-+ rq_unlock_irq(rq);
-+ return;
-+ }
-+ rq->preempt = NULL;
-+ }
-+#endif
-+
-+ switch_count = &prev->nivcsw;
-+ if (!preempt && prev->state) {
-+ if (unlikely(signal_pending_state(prev->state, prev))) {
-+ prev->state = TASK_RUNNING;
-+ } else {
-+ deactivate = true;
-+ prev->on_rq = 0;
-+
-+ if (prev->in_iowait) {
-+ atomic_inc(&rq->nr_iowait);
-+ delayacct_blkio_start();
-+ }
-+
-+ /*
-+ * If a worker is going to sleep, notify and
-+ * ask workqueue whether it wants to wake up a
-+ * task to maintain concurrency. If so, wake
-+ * up the task.
-+ */
-+ if (prev->flags & PF_WQ_WORKER) {
-+ struct task_struct *to_wakeup;
-+
-+ to_wakeup = wq_worker_sleeping(prev);
-+ if (to_wakeup)
-+ try_to_wake_up_local(to_wakeup);
-+ }
-+ }
-+ switch_count = &prev->nvcsw;
-+ }
-+
-+ /*
-+ * Store the niffy value here for use by the next task's last_ran
-+ * below to avoid losing niffies due to update_clocks being called
-+ * again after this point.
-+ */
-+ update_clocks(rq);
-+ niffies = rq->niffies;
-+ update_cpu_clock_switch(rq, prev);
-+
-+ clear_tsk_need_resched(prev);
-+ clear_preempt_need_resched();
-+
-+ if (idle != prev) {
-+ check_deadline(prev, rq);
-+ return_task(prev, rq, cpu, deactivate);
-+ }
-+
-+ next = earliest_deadline_task(rq, cpu, idle);
-+ if (likely(next->prio != PRIO_LIMIT))
-+ clear_cpuidle_map(cpu);
-+ else {
-+ set_cpuidle_map(cpu);
-+ update_load_avg(rq, 0);
-+ }
-+
-+ set_rq_task(rq, next);
-+ next->last_ran = niffies;
-+
-+ if (likely(prev != next)) {
-+ /*
-+ * Don't reschedule an idle task or deactivated tasks
-+ */
-+ if (prev != idle && !deactivate)
-+ resched_suitable_idle(prev);
-+ if (next != idle)
-+ check_siblings(rq);
-+ else
-+ wake_siblings(rq);
-+ rq->nr_switches++;
-+ rq->curr = next;
-+ /*
-+ * The membarrier system call requires each architecture
-+ * to have a full memory barrier after updating
-+ * rq->curr, before returning to user-space. For TSO
-+ * (e.g. x86), the architecture must provide its own
-+ * barrier in switch_mm(). For weakly ordered machines
-+ * for which spin_unlock() acts as a full memory
-+ * barrier, finish_lock_switch() in common code takes
-+ * care of this barrier. For weakly ordered machines for
-+ * which spin_unlock() acts as a RELEASE barrier (only
-+ * arm64 and PowerPC), arm64 has a full barrier in
-+ * switch_to(), and PowerPC has
-+ * smp_mb__after_unlock_lock() before
-+ * finish_lock_switch().
-+ */
-+ ++*switch_count;
-+
-+ trace_sched_switch(preempt, prev, next);
-+ context_switch(rq, prev, next); /* unlocks the rq */
-+ } else {
-+ check_siblings(rq);
-+ rq_unlock(rq);
-+ do_pending_softirq(rq, next);
-+ local_irq_enable();
-+ }
-+}
-+
-+void __noreturn do_task_dead(void)
-+{
-+ /*
-+ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
-+ * when the following two conditions become true.
-+ * - There is race condition of mmap_sem (It is acquired by
-+ * exit_mm()), and
-+ * - SMI occurs before setting TASK_RUNINNG.
-+ * (or hypervisor of virtual machine switches to other guest)
-+ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
-+ *
-+ * To avoid it, we have to wait for releasing tsk->pi_lock which
-+ * is held by try_to_wake_up()
-+ */
-+ raw_spin_lock_irq(&current->pi_lock);
-+ raw_spin_unlock_irq(&current->pi_lock);
-+
-+ /* Causes final put_task_struct in finish_task_switch(). */
-+ __set_current_state(TASK_DEAD);
-+
-+ /* Tell freezer to ignore us: */
-+ current->flags |= PF_NOFREEZE;
-+ __schedule(false);
-+ BUG();
-+
-+ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-+ for (;;)
-+ cpu_relax();
-+}
-+
-+static inline void sched_submit_work(struct task_struct *tsk)
-+{
-+ if (!tsk->state || tsk_is_pi_blocked(tsk) ||
-+ preempt_count() ||
-+ signal_pending_state(tsk->state, tsk))
-+ return;
-+
-+ /*
-+ * If we are going to sleep and we have plugged IO queued,
-+ * make sure to submit it to avoid deadlocks.
-+ */
-+ if (blk_needs_flush_plug(tsk))
-+ blk_schedule_flush_plug(tsk);
-+}
-+
-+asmlinkage __visible void __sched schedule(void)
-+{
-+ struct task_struct *tsk = current;
-+
-+ sched_submit_work(tsk);
-+ do {
-+ preempt_disable();
-+ __schedule(false);
-+ sched_preempt_enable_no_resched();
-+ } while (need_resched());
-+}
-+
-+EXPORT_SYMBOL(schedule);
-+
-+/*
-+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
-+ * state (have scheduled out non-voluntarily) by making sure that all
-+ * tasks have either left the run queue or have gone into user space.
-+ * As idle tasks do not do either, they must not ever be preempted
-+ * (schedule out non-voluntarily).
-+ *
-+ * schedule_idle() is similar to schedule_preempt_disable() except that it
-+ * never enables preemption because it does not call sched_submit_work().
-+ */
-+void __sched schedule_idle(void)
-+{
-+ /*
-+ * As this skips calling sched_submit_work(), which the idle task does
-+ * regardless because that function is a nop when the task is in a
-+ * TASK_RUNNING state, make sure this isn't used someplace that the
-+ * current task can be in any other state. Note, idle is always in the
-+ * TASK_RUNNING state.
-+ */
-+ WARN_ON_ONCE(current->state);
-+ do {
-+ __schedule(false);
-+ } while (need_resched());
-+}
-+
-+#ifdef CONFIG_CONTEXT_TRACKING
-+asmlinkage __visible void __sched schedule_user(void)
-+{
-+ /*
-+ * If we come here after a random call to set_need_resched(),
-+ * or we have been woken up remotely but the IPI has not yet arrived,
-+ * we haven't yet exited the RCU idle mode. Do it here manually until
-+ * we find a better solution.
-+ *
-+ * NB: There are buggy callers of this function. Ideally we
-+ * should warn if prev_state != IN_USER, but that will trigger
-+ * too frequently to make sense yet.
-+ */
-+ enum ctx_state prev_state = exception_enter();
-+ schedule();
-+ exception_exit(prev_state);
-+}
-+#endif
-+
-+/**
-+ * schedule_preempt_disabled - called with preemption disabled
-+ *
-+ * Returns with preemption disabled. Note: preempt_count must be 1
-+ */
-+void __sched schedule_preempt_disabled(void)
-+{
-+ sched_preempt_enable_no_resched();
-+ schedule();
-+ preempt_disable();
-+}
-+
-+static void __sched notrace preempt_schedule_common(void)
-+{
-+ do {
-+ /*
-+ * Because the function tracer can trace preempt_count_sub()
-+ * and it also uses preempt_enable/disable_notrace(), if
-+ * NEED_RESCHED is set, the preempt_enable_notrace() called
-+ * by the function tracer will call this function again and
-+ * cause infinite recursion.
-+ *
-+ * Preemption must be disabled here before the function
-+ * tracer can trace. Break up preempt_disable() into two
-+ * calls. One to disable preemption without fear of being
-+ * traced. The other to still record the preemption latency,
-+ * which can also be traced by the function tracer.
-+ */
-+ preempt_disable_notrace();
-+ preempt_latency_start(1);
-+ __schedule(true);
-+ preempt_latency_stop(1);
-+ preempt_enable_no_resched_notrace();
-+
-+ /*
-+ * Check again in case we missed a preemption opportunity
-+ * between schedule and now.
-+ */
-+ } while (need_resched());
-+}
-+
-+#ifdef CONFIG_PREEMPT
-+/*
-+ * this is the entry point to schedule() from in-kernel preemption
-+ * off of preempt_enable. Kernel preemptions off return from interrupt
-+ * occur there and call schedule directly.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule(void)
-+{
-+ /*
-+ * If there is a non-zero preempt_count or interrupts are disabled,
-+ * we do not want to preempt the current task. Just return..
-+ */
-+ if (likely(!preemptible()))
-+ return;
-+
-+ preempt_schedule_common();
-+}
-+NOKPROBE_SYMBOL(preempt_schedule);
-+EXPORT_SYMBOL(preempt_schedule);
-+
-+/**
-+ * preempt_schedule_notrace - preempt_schedule called by tracing
-+ *
-+ * The tracing infrastructure uses preempt_enable_notrace to prevent
-+ * recursion and tracing preempt enabling caused by the tracing
-+ * infrastructure itself. But as tracing can happen in areas coming
-+ * from userspace or just about to enter userspace, a preempt enable
-+ * can occur before user_exit() is called. This will cause the scheduler
-+ * to be called when the system is still in usermode.
-+ *
-+ * To prevent this, the preempt_enable_notrace will use this function
-+ * instead of preempt_schedule() to exit user context if needed before
-+ * calling the scheduler.
-+ */
-+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
-+{
-+ enum ctx_state prev_ctx;
-+
-+ if (likely(!preemptible()))
-+ return;
-+
-+ do {
-+ /*
-+ * Because the function tracer can trace preempt_count_sub()
-+ * and it also uses preempt_enable/disable_notrace(), if
-+ * NEED_RESCHED is set, the preempt_enable_notrace() called
-+ * by the function tracer will call this function again and
-+ * cause infinite recursion.
-+ *
-+ * Preemption must be disabled here before the function
-+ * tracer can trace. Break up preempt_disable() into two
-+ * calls. One to disable preemption without fear of being
-+ * traced. The other to still record the preemption latency,
-+ * which can also be traced by the function tracer.
-+ */
-+ preempt_disable_notrace();
-+ preempt_latency_start(1);
-+ /*
-+ * Needs preempt disabled in case user_exit() is traced
-+ * and the tracer calls preempt_enable_notrace() causing
-+ * an infinite recursion.
-+ */
-+ prev_ctx = exception_enter();
-+ __schedule(true);
-+ exception_exit(prev_ctx);
-+
-+ preempt_latency_stop(1);
-+ preempt_enable_no_resched_notrace();
-+ } while (need_resched());
-+}
-+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
-+
-+#endif /* CONFIG_PREEMPT */
-+
-+/*
-+ * this is the entry point to schedule() from kernel preemption
-+ * off of irq context.
-+ * Note, that this is called and return with irqs disabled. This will
-+ * protect us against recursive calling from irq.
-+ */
-+asmlinkage __visible void __sched preempt_schedule_irq(void)
-+{
-+ enum ctx_state prev_state;
-+
-+ /* Catch callers which need to be fixed */
-+ BUG_ON(preempt_count() || !irqs_disabled());
-+
-+ prev_state = exception_enter();
-+
-+ do {
-+ preempt_disable();
-+ local_irq_enable();
-+ __schedule(true);
-+ local_irq_disable();
-+ sched_preempt_enable_no_resched();
-+ } while (need_resched());
-+
-+ exception_exit(prev_state);
-+}
-+
-+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
-+ void *key)
-+{
-+ return try_to_wake_up(curr->private, mode, wake_flags);
-+}
-+EXPORT_SYMBOL(default_wake_function);
-+
-+#ifdef CONFIG_RT_MUTEXES
-+
-+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
-+{
-+ if (pi_task)
-+ prio = min(prio, pi_task->prio);
-+
-+ return prio;
-+}
-+
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+ struct task_struct *pi_task = rt_mutex_get_top_task(p);
-+
-+ return __rt_effective_prio(pi_task, prio);
-+}
-+
-+/*
-+ * rt_mutex_setprio - set the current priority of a task
-+ * @p: task to boost
-+ * @pi_task: donor task
-+ *
-+ * This function changes the 'effective' priority of a task. It does
-+ * not touch ->normal_prio like __setscheduler().
-+ *
-+ * Used by the rt_mutex code to implement priority inheritance
-+ * logic. Call site only calls if the priority of the task changed.
-+ */
-+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
-+{
-+ int prio, oldprio;
-+ struct rq *rq;
-+
-+ /* XXX used to be waiter->prio, not waiter->task->prio */
-+ prio = __rt_effective_prio(pi_task, p->normal_prio);
-+
-+ /*
-+ * If nothing changed; bail early.
-+ */
-+ if (p->pi_top_task == pi_task && prio == p->prio)
-+ return;
-+
-+ rq = __task_rq_lock(p);
-+ update_rq_clock(rq);
-+ /*
-+ * Set under pi_lock && rq->lock, such that the value can be used under
-+ * either lock.
-+ *
-+ * Note that there is loads of tricky to make this pointer cache work
-+ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
-+ * ensure a task is de-boosted (pi_task is set to NULL) before the
-+ * task is allowed to run again (and can exit). This ensures the pointer
-+ * points to a blocked task -- which guaratees the task is present.
-+ */
-+ p->pi_top_task = pi_task;
-+
-+ /*
-+ * For FIFO/RR we only need to set prio, if that matches we're done.
-+ */
-+ if (prio == p->prio)
-+ goto out_unlock;
-+
-+ /*
-+ * Idle task boosting is a nono in general. There is one
-+ * exception, when PREEMPT_RT and NOHZ is active:
-+ *
-+ * The idle task calls get_next_timer_interrupt() and holds
-+ * the timer wheel base->lock on the CPU and another CPU wants
-+ * to access the timer (probably to cancel it). We can safely
-+ * ignore the boosting request, as the idle CPU runs this code
-+ * with interrupts disabled and will complete the lock
-+ * protected section without being interrupted. So there is no
-+ * real need to boost.
-+ */
-+ if (unlikely(p == rq->idle)) {
-+ WARN_ON(p != rq->curr);
-+ WARN_ON(p->pi_blocked_on);
-+ goto out_unlock;
-+ }
-+
-+ trace_sched_pi_setprio(p, pi_task);
-+ oldprio = p->prio;
-+ p->prio = prio;
-+ if (task_running(rq, p)){
-+ if (prio > oldprio)
-+ resched_task(p);
-+ } else if (task_queued(p)) {
-+ dequeue_task(rq, p, DEQUEUE_SAVE);
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ if (prio < oldprio)
-+ try_preempt(p, rq);
-+ }
-+out_unlock:
-+ __task_rq_unlock(rq);
-+}
-+#else
-+static inline int rt_effective_prio(struct task_struct *p, int prio)
-+{
-+ return prio;
-+}
-+#endif
-+
-+/*
-+ * Adjust the deadline for when the priority is to change, before it's
-+ * changed.
-+ */
-+static inline void adjust_deadline(struct task_struct *p, int new_prio)
-+{
-+ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p);
-+}
-+
-+void set_user_nice(struct task_struct *p, long nice)
-+{
-+ int new_static, old_static;
-+ unsigned long flags;
-+ struct rq *rq;
-+
-+ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
-+ return;
-+ new_static = NICE_TO_PRIO(nice);
-+ /*
-+ * We have to be careful, if called from sys_setpriority(),
-+ * the task might be in the middle of scheduling on another CPU.
-+ */
-+ rq = task_rq_lock(p, &flags);
-+ update_rq_clock(rq);
-+
-+ /*
-+ * The RT priorities are set via sched_setscheduler(), but we still
-+ * allow the 'normal' nice value to be set - but as expected
-+ * it wont have any effect on scheduling until the task is
-+ * not SCHED_NORMAL/SCHED_BATCH:
-+ */
-+ if (has_rt_policy(p)) {
-+ p->static_prio = new_static;
-+ goto out_unlock;
-+ }
-+
-+ adjust_deadline(p, new_static);
-+ old_static = p->static_prio;
-+ p->static_prio = new_static;
-+ p->prio = effective_prio(p);
-+
-+ if (task_queued(p)) {
-+ dequeue_task(rq, p, DEQUEUE_SAVE);
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ if (new_static < old_static)
-+ try_preempt(p, rq);
-+ } else if (task_running(rq, p)) {
-+ set_rq_task(rq, p);
-+ if (old_static < new_static)
-+ resched_task(p);
-+ }
-+out_unlock:
-+ task_rq_unlock(rq, p, &flags);
-+}
-+EXPORT_SYMBOL(set_user_nice);
-+
-+/*
-+ * can_nice - check if a task can reduce its nice value
-+ * @p: task
-+ * @nice: nice value
-+ */
-+int can_nice(const struct task_struct *p, const int nice)
-+{
-+ /* Convert nice value [19,-20] to rlimit style value [1,40] */
-+ int nice_rlim = nice_to_rlimit(nice);
-+
-+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
-+ capable(CAP_SYS_NICE));
-+}
-+
-+#ifdef __ARCH_WANT_SYS_NICE
-+
-+/*
-+ * sys_nice - change the priority of the current process.
-+ * @increment: priority increment
-+ *
-+ * sys_setpriority is a more generic, but much slower function that
-+ * does similar things.
-+ */
-+SYSCALL_DEFINE1(nice, int, increment)
-+{
-+ long nice, retval;
-+
-+ /*
-+ * Setpriority might change our priority at the same moment.
-+ * We don't have to worry. Conceptually one call occurs first
-+ * and we have a single winner.
-+ */
-+
-+ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
-+ nice = task_nice(current) + increment;
-+
-+ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
-+ if (increment < 0 && !can_nice(current, nice))
-+ return -EPERM;
-+
-+ retval = security_task_setnice(current, nice);
-+ if (retval)
-+ return retval;
-+
-+ set_user_nice(current, nice);
-+ return 0;
-+}
-+
-+#endif
-+
-+/**
-+ * task_prio - return the priority value of a given task.
-+ * @p: the task in question.
-+ *
-+ * Return: The priority value as seen by users in /proc.
-+ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
-+ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
-+ */
-+int task_prio(const struct task_struct *p)
-+{
-+ int delta, prio = p->prio - MAX_RT_PRIO;
-+
-+ /* rt tasks and iso tasks */
-+ if (prio <= 0)
-+ goto out;
-+
-+ /* Convert to ms to avoid overflows */
-+ delta = NS_TO_MS(p->deadline - task_rq(p)->niffies);
-+ if (unlikely(delta < 0))
-+ delta = 0;
-+ delta = delta * 40 / ms_longest_deadline_diff();
-+ if (delta <= 80)
-+ prio += delta;
-+ if (idleprio_task(p))
-+ prio += 40;
-+out:
-+ return prio;
-+}
-+
-+/**
-+ * idle_cpu - is a given CPU idle currently?
-+ * @cpu: the processor in question.
-+ *
-+ * Return: 1 if the CPU is currently idle. 0 otherwise.
-+ */
-+int idle_cpu(int cpu)
-+{
-+ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
-+}
-+
-+/**
-+ * idle_task - return the idle task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * Return: The idle task for the CPU @cpu.
-+ */
-+struct task_struct *idle_task(int cpu)
-+{
-+ return cpu_rq(cpu)->idle;
-+}
-+
-+/**
-+ * find_process_by_pid - find a process with a matching PID value.
-+ * @pid: the pid in question.
-+ *
-+ * The task of @pid, if found. %NULL otherwise.
-+ */
-+static inline struct task_struct *find_process_by_pid(pid_t pid)
-+{
-+ return pid ? find_task_by_vpid(pid) : current;
-+}
-+
-+/* Actually do priority change: must hold rq lock. */
-+static void __setscheduler(struct task_struct *p, struct rq *rq, int policy,
-+ int prio, bool keep_boost)
-+{
-+ int oldrtprio, oldprio;
-+
-+ p->policy = policy;
-+ oldrtprio = p->rt_priority;
-+ p->rt_priority = prio;
-+ p->normal_prio = normal_prio(p);
-+ oldprio = p->prio;
-+ /*
-+ * Keep a potential priority boosting if called from
-+ * sched_setscheduler().
-+ */
-+ p->prio = normal_prio(p);
-+ if (keep_boost)
-+ p->prio = rt_effective_prio(p, p->prio);
-+
-+ if (task_running(rq, p)) {
-+ set_rq_task(rq, p);
-+ resched_task(p);
-+ } else if (task_queued(p)) {
-+ dequeue_task(rq, p, DEQUEUE_SAVE);
-+ enqueue_task(rq, p, ENQUEUE_RESTORE);
-+ if (p->prio < oldprio || p->rt_priority > oldrtprio)
-+ try_preempt(p, rq);
-+ }
-+}
-+
-+/*
-+ * Check the target process has a UID that matches the current process's
-+ */
-+static bool check_same_owner(struct task_struct *p)
-+{
-+ const struct cred *cred = current_cred(), *pcred;
-+ bool match;
-+
-+ rcu_read_lock();
-+ pcred = __task_cred(p);
-+ match = (uid_eq(cred->euid, pcred->euid) ||
-+ uid_eq(cred->euid, pcred->uid));
-+ rcu_read_unlock();
-+ return match;
-+}
-+
-+static int
-+__sched_setscheduler(struct task_struct *p, int policy,
-+ const struct sched_param *param, bool user, bool pi)
-+{
-+ struct sched_param zero_param = { .sched_priority = 0 };
-+ unsigned long flags, rlim_rtprio = 0;
-+ int retval, oldpolicy = -1;
-+ int reset_on_fork;
-+ struct rq *rq;
-+
-+ /* The pi code expects interrupts enabled */
-+ BUG_ON(pi && in_interrupt());
-+
-+ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
-+ unsigned long lflags;
-+
-+ if (!lock_task_sighand(p, &lflags))
-+ return -ESRCH;
-+ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
-+ unlock_task_sighand(p, &lflags);
-+ if (rlim_rtprio)
-+ goto recheck;
-+ /*
-+ * If the caller requested an RT policy without having the
-+ * necessary rights, we downgrade the policy to SCHED_ISO.
-+ * We also set the parameter to zero to pass the checks.
-+ */
-+ policy = SCHED_ISO;
-+ param = &zero_param;
-+ }
-+recheck:
-+ /* Double check policy once rq lock held */
-+ if (policy < 0) {
-+ reset_on_fork = p->sched_reset_on_fork;
-+ policy = oldpolicy = p->policy;
-+ } else {
-+ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
-+ policy &= ~SCHED_RESET_ON_FORK;
-+
-+ if (!SCHED_RANGE(policy))
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * Valid priorities for SCHED_FIFO and SCHED_RR are
-+ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
-+ * SCHED_BATCH is 0.
-+ */
-+ if (param->sched_priority < 0 ||
-+ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) ||
-+ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1))
-+ return -EINVAL;
-+ if (is_rt_policy(policy) != (param->sched_priority != 0))
-+ return -EINVAL;
-+
-+ /*
-+ * Allow unprivileged RT tasks to decrease priority:
-+ */
-+ if (user && !capable(CAP_SYS_NICE)) {
-+ if (is_rt_policy(policy)) {
-+ unsigned long rlim_rtprio =
-+ task_rlimit(p, RLIMIT_RTPRIO);
-+
-+ /* Can't set/change the rt policy */
-+ if (policy != p->policy && !rlim_rtprio)
-+ return -EPERM;
-+
-+ /* Can't increase priority */
-+ if (param->sched_priority > p->rt_priority &&
-+ param->sched_priority > rlim_rtprio)
-+ return -EPERM;
-+ } else {
-+ switch (p->policy) {
-+ /*
-+ * Can only downgrade policies but not back to
-+ * SCHED_NORMAL
-+ */
-+ case SCHED_ISO:
-+ if (policy == SCHED_ISO)
-+ goto out;
-+ if (policy != SCHED_NORMAL)
-+ return -EPERM;
-+ break;
-+ case SCHED_BATCH:
-+ if (policy == SCHED_BATCH)
-+ goto out;
-+ if (policy != SCHED_IDLEPRIO)
-+ return -EPERM;
-+ break;
-+ case SCHED_IDLEPRIO:
-+ if (policy == SCHED_IDLEPRIO)
-+ goto out;
-+ return -EPERM;
-+ default:
-+ break;
-+ }
-+ }
-+
-+ /* Can't change other user's priorities */
-+ if (!check_same_owner(p))
-+ return -EPERM;
-+
-+ /* Normal users shall not reset the sched_reset_on_fork flag: */
-+ if (p->sched_reset_on_fork && !reset_on_fork)
-+ return -EPERM;
-+ }
-+
-+ if (user) {
-+ retval = security_task_setscheduler(p);
-+ if (retval)
-+ return retval;
-+ }
-+
-+ /*
-+ * Make sure no PI-waiters arrive (or leave) while we are
-+ * changing the priority of the task:
-+ *
-+ * To be able to change p->policy safely, the runqueue lock must be
-+ * held.
-+ */
-+ rq = task_rq_lock(p, &flags);
-+ update_rq_clock(rq);
-+
-+ /*
-+ * Changing the policy of the stop threads its a very bad idea:
-+ */
-+ if (p == rq->stop) {
-+ task_rq_unlock(rq, p, &flags);
-+ return -EINVAL;
-+ }
-+
-+ /*
-+ * If not changing anything there's no need to proceed further:
-+ */
-+ if (unlikely(policy == p->policy && (!is_rt_policy(policy) ||
-+ param->sched_priority == p->rt_priority))) {
-+ task_rq_unlock(rq, p, &flags);
-+ return 0;
-+ }
-+
-+ /* Re-check policy now with rq lock held */
-+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
-+ policy = oldpolicy = -1;
-+ task_rq_unlock(rq, p, &flags);
-+ goto recheck;
-+ }
-+ p->sched_reset_on_fork = reset_on_fork;
-+
-+ __setscheduler(p, rq, policy, param->sched_priority, pi);
-+ task_rq_unlock(rq, p, &flags);
-+
-+ if (pi)
-+ rt_mutex_adjust_pi(p);
-+out:
-+ return 0;
-+}
-+
-+/**
-+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ *
-+ * NOTE that the task may be already dead.
-+ */
-+int sched_setscheduler(struct task_struct *p, int policy,
-+ const struct sched_param *param)
-+{
-+ return __sched_setscheduler(p, policy, param, true, true);
-+}
-+
-+EXPORT_SYMBOL_GPL(sched_setscheduler);
-+
-+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
-+{
-+ const struct sched_param param = { .sched_priority = attr->sched_priority };
-+ int policy = attr->sched_policy;
-+
-+ return __sched_setscheduler(p, policy, &param, true, true);
-+}
-+EXPORT_SYMBOL_GPL(sched_setattr);
-+
-+/**
-+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
-+ * @p: the task in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Just like sched_setscheduler, only don't bother checking if the
-+ * current context has permission. For example, this is needed in
-+ * stop_machine(): we create temporary high priority worker threads,
-+ * but our caller might not have that capability.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
-+ const struct sched_param *param)
-+{
-+ return __sched_setscheduler(p, policy, param, false, true);
-+}
-+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
-+
-+static int
-+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
-+{
-+ struct sched_param lparam;
-+ struct task_struct *p;
-+ int retval;
-+
-+ if (!param || pid < 0)
-+ return -EINVAL;
-+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
-+ return -EFAULT;
-+
-+ rcu_read_lock();
-+ retval = -ESRCH;
-+ p = find_process_by_pid(pid);
-+ if (p != NULL)
-+ retval = sched_setscheduler(p, policy, &lparam);
-+ rcu_read_unlock();
-+
-+ return retval;
-+}
-+
-+/*
-+ * Mimics kernel/events/core.c perf_copy_attr().
-+ */
-+static int sched_copy_attr(struct sched_attr __user *uattr,
-+ struct sched_attr *attr)
-+{
-+ u32 size;
-+ int ret;
-+
-+ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
-+ return -EFAULT;
-+
-+ /* Zero the full structure, so that a short copy will be nice: */
-+ memset(attr, 0, sizeof(*attr));
-+
-+ ret = get_user(size, &uattr->size);
-+ if (ret)
-+ return ret;
-+
-+ /* Bail out on silly large: */
-+ if (size > PAGE_SIZE)
-+ goto err_size;
-+
-+ /* ABI compatibility quirk: */
-+ if (!size)
-+ size = SCHED_ATTR_SIZE_VER0;
-+
-+ if (size < SCHED_ATTR_SIZE_VER0)
-+ goto err_size;
-+
-+ /*
-+ * If we're handed a bigger struct than we know of,
-+ * ensure all the unknown bits are 0 - i.e. new
-+ * user-space does not rely on any kernel feature
-+ * extensions we dont know about yet.
-+ */
-+ if (size > sizeof(*attr)) {
-+ unsigned char __user *addr;
-+ unsigned char __user *end;
-+ unsigned char val;
-+
-+ addr = (void __user *)uattr + sizeof(*attr);
-+ end = (void __user *)uattr + size;
-+
-+ for (; addr < end; addr++) {
-+ ret = get_user(val, addr);
-+ if (ret)
-+ return ret;
-+ if (val)
-+ goto err_size;
-+ }
-+ size = sizeof(*attr);
-+ }
-+
-+ ret = copy_from_user(attr, uattr, size);
-+ if (ret)
-+ return -EFAULT;
-+
-+ /*
-+ * XXX: Do we want to be lenient like existing syscalls; or do we want
-+ * to be strict and return an error on out-of-bounds values?
-+ */
-+ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
-+
-+ /* sched/core.c uses zero here but we already know ret is zero */
-+ return 0;
-+
-+err_size:
-+ put_user(sizeof(*attr), &uattr->size);
-+ return -E2BIG;
-+}
-+
-+/*
-+ * sched_setparam() passes in -1 for its policy, to let the functions
-+ * it calls know not to change it.
-+ */
-+#define SETPARAM_POLICY -1
-+
-+/**
-+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
-+ * @pid: the pid in question.
-+ * @policy: new policy.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
-+{
-+ if (policy < 0)
-+ return -EINVAL;
-+
-+ return do_sched_setscheduler(pid, policy, param);
-+}
-+
-+/**
-+ * sys_sched_setparam - set/change the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the new RT priority.
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
-+}
-+
-+/**
-+ * sys_sched_setattr - same as above, but with extended sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ */
-+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
-+ unsigned int, flags)
-+{
-+ struct sched_attr attr;
-+ struct task_struct *p;
-+ int retval;
-+
-+ if (!uattr || pid < 0 || flags)
-+ return -EINVAL;
-+
-+ retval = sched_copy_attr(uattr, &attr);
-+ if (retval)
-+ return retval;
-+
-+ if ((int)attr.sched_policy < 0)
-+ return -EINVAL;
-+
-+ rcu_read_lock();
-+ retval = -ESRCH;
-+ p = find_process_by_pid(pid);
-+ if (p != NULL)
-+ retval = sched_setattr(p, &attr);
-+ rcu_read_unlock();
-+
-+ return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
-+ * @pid: the pid in question.
-+ *
-+ * Return: On success, the policy of the thread. Otherwise, a negative error
-+ * code.
-+ */
-+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
-+{
-+ struct task_struct *p;
-+ int retval = -EINVAL;
-+
-+ if (pid < 0)
-+ goto out_nounlock;
-+
-+ retval = -ESRCH;
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ if (p) {
-+ retval = security_task_getscheduler(p);
-+ if (!retval)
-+ retval = p->policy;
-+ }
-+ rcu_read_unlock();
-+
-+out_nounlock:
-+ return retval;
-+}
-+
-+/**
-+ * sys_sched_getscheduler - get the RT priority of a thread
-+ * @pid: the pid in question.
-+ * @param: structure containing the RT priority.
-+ *
-+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
-+ * code.
-+ */
-+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
-+{
-+ struct sched_param lp = { .sched_priority = 0 };
-+ struct task_struct *p;
-+ int retval = -EINVAL;
-+
-+ if (!param || pid < 0)
-+ goto out_nounlock;
-+
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ retval = -ESRCH;
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ if (has_rt_policy(p))
-+ lp.sched_priority = p->rt_priority;
-+ rcu_read_unlock();
-+
-+ /*
-+ * This one might sleep, we cannot do it with a spinlock held ...
-+ */
-+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-+
-+out_nounlock:
-+ return retval;
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ return retval;
-+}
-+
-+static int sched_read_attr(struct sched_attr __user *uattr,
-+ struct sched_attr *attr,
-+ unsigned int usize)
-+{
-+ int ret;
-+
-+ if (!access_ok(VERIFY_WRITE, uattr, usize))
-+ return -EFAULT;
-+
-+ /*
-+ * If we're handed a smaller struct than we know of,
-+ * ensure all the unknown bits are 0 - i.e. old
-+ * user-space does not get uncomplete information.
-+ */
-+ if (usize < sizeof(*attr)) {
-+ unsigned char *addr;
-+ unsigned char *end;
-+
-+ addr = (void *)attr + usize;
-+ end = (void *)attr + sizeof(*attr);
-+
-+ for (; addr < end; addr++) {
-+ if (*addr)
-+ return -EFBIG;
-+ }
-+
-+ attr->size = usize;
-+ }
-+
-+ ret = copy_to_user(uattr, attr, attr->size);
-+ if (ret)
-+ return -EFAULT;
-+
-+ /* sched/core.c uses zero here but we already know ret is zero */
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
-+ * @pid: the pid in question.
-+ * @uattr: structure containing the extended parameters.
-+ * @size: sizeof(attr) for fwd/bwd comp.
-+ * @flags: for future extension.
-+ */
-+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
-+ unsigned int, size, unsigned int, flags)
-+{
-+ struct sched_attr attr = {
-+ .size = sizeof(struct sched_attr),
-+ };
-+ struct task_struct *p;
-+ int retval;
-+
-+ if (!uattr || pid < 0 || size > PAGE_SIZE ||
-+ size < SCHED_ATTR_SIZE_VER0 || flags)
-+ return -EINVAL;
-+
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ retval = -ESRCH;
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ attr.sched_policy = p->policy;
-+ if (rt_task(p))
-+ attr.sched_priority = p->rt_priority;
-+ else
-+ attr.sched_nice = task_nice(p);
-+
-+ rcu_read_unlock();
-+
-+ retval = sched_read_attr(uattr, &attr, size);
-+ return retval;
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ return retval;
-+}
-+
-+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
-+{
-+ cpumask_var_t cpus_allowed, new_mask;
-+ struct task_struct *p;
-+ int retval;
-+
-+ rcu_read_lock();
-+
-+ p = find_process_by_pid(pid);
-+ if (!p) {
-+ rcu_read_unlock();
-+ return -ESRCH;
-+ }
-+
-+ /* Prevent p going away */
-+ get_task_struct(p);
-+ rcu_read_unlock();
-+
-+ if (p->flags & PF_NO_SETAFFINITY) {
-+ retval = -EINVAL;
-+ goto out_put_task;
-+ }
-+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
-+ retval = -ENOMEM;
-+ goto out_put_task;
-+ }
-+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
-+ retval = -ENOMEM;
-+ goto out_free_cpus_allowed;
-+ }
-+ retval = -EPERM;
-+ if (!check_same_owner(p)) {
-+ rcu_read_lock();
-+ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
-+ rcu_read_unlock();
-+ goto out_unlock;
-+ }
-+ rcu_read_unlock();
-+ }
-+
-+ retval = security_task_setscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ cpuset_cpus_allowed(p, cpus_allowed);
-+ cpumask_and(new_mask, in_mask, cpus_allowed);
-+again:
-+ retval = __set_cpus_allowed_ptr(p, new_mask, true);
-+
-+ if (!retval) {
-+ cpuset_cpus_allowed(p, cpus_allowed);
-+ if (!cpumask_subset(new_mask, cpus_allowed)) {
-+ /*
-+ * We must have raced with a concurrent cpuset
-+ * update. Just reset the cpus_allowed to the
-+ * cpuset's cpus_allowed
-+ */
-+ cpumask_copy(new_mask, cpus_allowed);
-+ goto again;
-+ }
-+ }
-+out_unlock:
-+ free_cpumask_var(new_mask);
-+out_free_cpus_allowed:
-+ free_cpumask_var(cpus_allowed);
-+out_put_task:
-+ put_task_struct(p);
-+ return retval;
-+}
-+
-+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
-+ cpumask_t *new_mask)
-+{
-+ if (len < cpumask_size())
-+ cpumask_clear(new_mask);
-+ else if (len > cpumask_size())
-+ len = cpumask_size();
-+
-+ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
-+}
-+
-+
-+/**
-+ * sys_sched_setaffinity - set the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to the new CPU mask
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
-+ unsigned long __user *, user_mask_ptr)
-+{
-+ cpumask_var_t new_mask;
-+ int retval;
-+
-+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
-+ return -ENOMEM;
-+
-+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
-+ if (retval == 0)
-+ retval = sched_setaffinity(pid, new_mask);
-+ free_cpumask_var(new_mask);
-+ return retval;
-+}
-+
-+long sched_getaffinity(pid_t pid, cpumask_t *mask)
-+{
-+ struct task_struct *p;
-+ unsigned long flags;
-+ int retval;
-+
-+ get_online_cpus();
-+ rcu_read_lock();
-+
-+ retval = -ESRCH;
-+ p = find_process_by_pid(pid);
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ put_online_cpus();
-+
-+ return retval;
-+}
-+
-+/**
-+ * sys_sched_getaffinity - get the CPU affinity of a process
-+ * @pid: pid of the process
-+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
-+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
-+ *
-+ * Return: 0 on success. An error code otherwise.
-+ */
-+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
-+ unsigned long __user *, user_mask_ptr)
-+{
-+ int ret;
-+ cpumask_var_t mask;
-+
-+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
-+ return -EINVAL;
-+ if (len & (sizeof(unsigned long)-1))
-+ return -EINVAL;
-+
-+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
-+ return -ENOMEM;
-+
-+ ret = sched_getaffinity(pid, mask);
-+ if (ret == 0) {
-+ size_t retlen = min_t(size_t, len, cpumask_size());
-+
-+ if (copy_to_user(user_mask_ptr, mask, retlen))
-+ ret = -EFAULT;
-+ else
-+ ret = retlen;
-+ }
-+ free_cpumask_var(mask);
-+
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_yield - yield the current processor to other threads.
-+ *
-+ * This function yields the current CPU to other tasks. It does this by
-+ * scheduling away the current task. If it still has the earliest deadline
-+ * it will be scheduled again as the next task.
-+ *
-+ * Return: 0.
-+ */
-+SYSCALL_DEFINE0(sched_yield)
-+{
-+ struct rq *rq;
-+
-+ if (!sched_yield_type)
-+ goto out;
-+
-+ local_irq_disable();
-+ rq = this_rq();
-+ rq_lock(rq);
-+
-+ if (sched_yield_type > 1)
-+ time_slice_expired(current, rq);
-+ schedstat_inc(rq->yld_count);
-+
-+ /*
-+ * Since we are going to call schedule() anyway, there's
-+ * no need to preempt or enable interrupts:
-+ */
-+ preempt_disable();
-+ rq_unlock(rq);
-+ sched_preempt_enable_no_resched();
-+
-+ schedule();
-+out:
-+ return 0;
-+}
-+
-+#ifndef CONFIG_PREEMPT
-+int __sched _cond_resched(void)
-+{
-+ if (should_resched(0)) {
-+ preempt_schedule_common();
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(_cond_resched);
-+#endif
-+
-+/*
-+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
-+ * call schedule, and on return reacquire the lock.
-+ *
-+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
-+ * operations here to prevent schedule() from being called twice (once via
-+ * spin_unlock(), once by hand).
-+ */
-+int __cond_resched_lock(spinlock_t *lock)
-+{
-+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
-+ int ret = 0;
-+
-+ lockdep_assert_held(lock);
-+
-+ if (spin_needbreak(lock) || resched) {
-+ spin_unlock(lock);
-+ if (resched)
-+ preempt_schedule_common();
-+ else
-+ cpu_relax();
-+ ret = 1;
-+ spin_lock(lock);
-+ }
-+ return ret;
-+}
-+EXPORT_SYMBOL(__cond_resched_lock);
-+
-+int __sched __cond_resched_softirq(void)
-+{
-+ BUG_ON(!in_softirq());
-+
-+ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
-+ local_bh_enable();
-+ preempt_schedule_common();
-+ local_bh_disable();
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL(__cond_resched_softirq);
-+
-+/**
-+ * yield - yield the current processor to other threads.
-+ *
-+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
-+ *
-+ * The scheduler is at all times free to pick the calling task as the most
-+ * eligible task to run, if removing the yield() call from your code breaks
-+ * it, its already broken.
-+ *
-+ * Typical broken usage is:
-+ *
-+ * while (!event)
-+ * yield();
-+ *
-+ * where one assumes that yield() will let 'the other' process run that will
-+ * make event true. If the current task is a SCHED_FIFO task that will never
-+ * happen. Never use yield() as a progress guarantee!!
-+ *
-+ * If you want to use yield() to wait for something, use wait_event().
-+ * If you want to use yield() to be 'nice' for others, use cond_resched().
-+ * If you still want to use yield(), do not!
-+ */
-+void __sched yield(void)
-+{
-+ set_current_state(TASK_RUNNING);
-+ sys_sched_yield();
-+}
-+EXPORT_SYMBOL(yield);
-+
-+/**
-+ * yield_to - yield the current processor to another thread in
-+ * your thread group, or accelerate that thread toward the
-+ * processor it's on.
-+ * @p: target task
-+ * @preempt: whether task preemption is allowed or not
-+ *
-+ * It's the caller's job to ensure that the target task struct
-+ * can't go away on us before we can do any checks.
-+ *
-+ * Return:
-+ * true (>0) if we indeed boosted the target task.
-+ * false (0) if we failed to boost the target.
-+ * -ESRCH if there's no task to yield to.
-+ */
-+int __sched yield_to(struct task_struct *p, bool preempt)
-+{
-+ struct task_struct *rq_p;
-+ struct rq *rq, *p_rq;
-+ unsigned long flags;
-+ int yielded = 0;
-+
-+ local_irq_save(flags);
-+ rq = this_rq();
-+
-+again:
-+ p_rq = task_rq(p);
-+ /*
-+ * If we're the only runnable task on the rq and target rq also
-+ * has only one task, there's absolutely no point in yielding.
-+ */
-+ if (task_running(p_rq, p) || p->state) {
-+ yielded = -ESRCH;
-+ goto out_irq;
-+ }
-+
-+ double_rq_lock(rq, p_rq);
-+ if (unlikely(task_rq(p) != p_rq)) {
-+ double_rq_unlock(rq, p_rq);
-+ goto again;
-+ }
-+
-+ yielded = 1;
-+ schedstat_inc(rq->yld_count);
-+ rq_p = rq->curr;
-+ if (p->deadline > rq_p->deadline)
-+ p->deadline = rq_p->deadline;
-+ p->time_slice += rq_p->time_slice;
-+ if (p->time_slice > timeslice())
-+ p->time_slice = timeslice();
-+ time_slice_expired(rq_p, rq);
-+ if (preempt && rq != p_rq)
-+ resched_task(p_rq->curr);
-+ double_rq_unlock(rq, p_rq);
-+out_irq:
-+ local_irq_restore(flags);
-+
-+ if (yielded > 0)
-+ schedule();
-+ return yielded;
-+}
-+EXPORT_SYMBOL_GPL(yield_to);
-+
-+int io_schedule_prepare(void)
-+{
-+ int old_iowait = current->in_iowait;
-+
-+ current->in_iowait = 1;
-+ blk_schedule_flush_plug(current);
-+
-+ return old_iowait;
-+}
-+
-+void io_schedule_finish(int token)
-+{
-+ current->in_iowait = token;
-+}
-+
-+/*
-+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
-+ * that process accounting knows that this is a task in IO wait state.
-+ *
-+ * But don't do that if it is a deliberate, throttling IO wait (this task
-+ * has set its backing_dev_info: the queue against which it should throttle)
-+ */
-+
-+long __sched io_schedule_timeout(long timeout)
-+{
-+ int token;
-+ long ret;
-+
-+ token = io_schedule_prepare();
-+ ret = schedule_timeout(timeout);
-+ io_schedule_finish(token);
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL(io_schedule_timeout);
-+
-+void io_schedule(void)
-+{
-+ int token;
-+
-+ token = io_schedule_prepare();
-+ schedule();
-+ io_schedule_finish(token);
-+}
-+EXPORT_SYMBOL(io_schedule);
-+
-+/**
-+ * sys_sched_get_priority_max - return maximum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the maximum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
-+{
-+ int ret = -EINVAL;
-+
-+ switch (policy) {
-+ case SCHED_FIFO:
-+ case SCHED_RR:
-+ ret = MAX_USER_RT_PRIO-1;
-+ break;
-+ case SCHED_NORMAL:
-+ case SCHED_BATCH:
-+ case SCHED_ISO:
-+ case SCHED_IDLEPRIO:
-+ ret = 0;
-+ break;
-+ }
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_get_priority_min - return minimum RT priority.
-+ * @policy: scheduling class.
-+ *
-+ * Return: On success, this syscall returns the minimum
-+ * rt_priority that can be used by a given scheduling class.
-+ * On failure, a negative error code is returned.
-+ */
-+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
-+{
-+ int ret = -EINVAL;
-+
-+ switch (policy) {
-+ case SCHED_FIFO:
-+ case SCHED_RR:
-+ ret = 1;
-+ break;
-+ case SCHED_NORMAL:
-+ case SCHED_BATCH:
-+ case SCHED_ISO:
-+ case SCHED_IDLEPRIO:
-+ ret = 0;
-+ break;
-+ }
-+ return ret;
-+}
-+
-+/**
-+ * sys_sched_rr_get_interval - return the default timeslice of a process.
-+ * @pid: pid of the process.
-+ * @interval: userspace pointer to the timeslice value.
-+ *
-+ *
-+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
-+ * an error code.
-+ */
-+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
-+ struct timespec __user *, interval)
-+{
-+ struct task_struct *p;
-+ unsigned int time_slice;
-+ unsigned long flags;
-+ struct timespec t;
-+ struct rq *rq;
-+ int retval;
-+
-+ if (pid < 0)
-+ return -EINVAL;
-+
-+ retval = -ESRCH;
-+ rcu_read_lock();
-+ p = find_process_by_pid(pid);
-+ if (!p)
-+ goto out_unlock;
-+
-+ retval = security_task_getscheduler(p);
-+ if (retval)
-+ goto out_unlock;
-+
-+ rq = task_rq_lock(p, &flags);
-+ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p));
-+ task_rq_unlock(rq, p, &flags);
-+
-+ rcu_read_unlock();
-+ t = ns_to_timespec(time_slice);
-+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
-+ return retval;
-+
-+out_unlock:
-+ rcu_read_unlock();
-+ return retval;
-+}
-+
-+void sched_show_task(struct task_struct *p)
-+{
-+ unsigned long free = 0;
-+ int ppid;
-+
-+ if (!try_get_task_stack(p))
-+ return;
-+
-+ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p));
-+
-+ if (p->state == TASK_RUNNING)
-+ printk(KERN_CONT " running task ");
-+#ifdef CONFIG_DEBUG_STACK_USAGE
-+ free = stack_not_used(p);
-+#endif
-+ ppid = 0;
-+ rcu_read_lock();
-+ if (pid_alive(p))
-+ ppid = task_pid_nr(rcu_dereference(p->real_parent));
-+ rcu_read_unlock();
-+ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
-+ task_pid_nr(p), ppid,
-+ (unsigned long)task_thread_info(p)->flags);
-+
-+ print_worker_info(KERN_INFO, p);
-+ show_stack(p, NULL);
-+ put_task_stack(p);
-+}
-+
-+static inline bool
-+state_filter_match(unsigned long state_filter, struct task_struct *p)
-+{
-+ /* no filter, everything matches */
-+ if (!state_filter)
-+ return true;
-+
-+ /* filter, but doesn't match */
-+ if (!(p->state & state_filter))
-+ return false;
-+
-+ /*
-+ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
-+ * TASK_KILLABLE).
-+ */
-+ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
-+ return false;
-+
-+ return true;
-+}
-+
-+void show_state_filter(unsigned long state_filter)
-+{
-+ struct task_struct *g, *p;
-+
-+#if BITS_PER_LONG == 32
-+ printk(KERN_INFO
-+ " task PC stack pid father\n");
-+#else
-+ printk(KERN_INFO
-+ " task PC stack pid father\n");
-+#endif
-+ rcu_read_lock();
-+ for_each_process_thread(g, p) {
-+ /*
-+ * reset the NMI-timeout, listing all files on a slow
-+ * console might take a lot of time:
-+ * Also, reset softlockup watchdogs on all CPUs, because
-+ * another CPU might be blocked waiting for us to process
-+ * an IPI.
-+ */
-+ touch_nmi_watchdog();
-+ touch_all_softlockup_watchdogs();
-+ if (state_filter_match(state_filter, p))
-+ sched_show_task(p);
-+ }
-+
-+ rcu_read_unlock();
-+ /*
-+ * Only show locks if all tasks are dumped:
-+ */
-+ if (!state_filter)
-+ debug_show_all_locks();
-+}
-+
-+void dump_cpu_task(int cpu)
-+{
-+ pr_info("Task dump for CPU %d:\n", cpu);
-+ sched_show_task(cpu_curr(cpu));
-+}
-+
-+#ifdef CONFIG_SMP
-+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ cpumask_copy(&p->cpus_allowed, new_mask);
-+ p->nr_cpus_allowed = cpumask_weight(new_mask);
-+}
-+
-+void __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ struct rq *rq = task_rq(p);
-+
-+ lockdep_assert_held(&p->pi_lock);
-+
-+ cpumask_copy(&p->cpus_allowed, new_mask);
-+
-+ if (task_queued(p)) {
-+ /*
-+ * Because __kthread_bind() calls this on blocked tasks without
-+ * holding rq->lock.
-+ */
-+ lockdep_assert_held(&rq->lock);
-+ }
-+}
-+
-+/*
-+ * Calling do_set_cpus_allowed from outside the scheduler code should not be
-+ * called on a running or queued task. We should be holding pi_lock.
-+ */
-+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ __do_set_cpus_allowed(p, new_mask);
-+ if (needs_other_cpu(p, task_cpu(p))) {
-+ struct rq *rq;
-+
-+ rq = __task_rq_lock(p);
-+ set_task_cpu(p, valid_task_cpu(p));
-+ resched_task(p);
-+ __task_rq_unlock(rq);
-+ }
-+}
-+#endif
-+
-+/**
-+ * init_idle - set up an idle thread for a given CPU
-+ * @idle: task in question
-+ * @cpu: cpu the idle task belongs to
-+ *
-+ * NOTE: this function does not set the idle thread's NEED_RESCHED
-+ * flag, to make booting more robust.
-+ */
-+void init_idle(struct task_struct *idle, int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&idle->pi_lock, flags);
-+ raw_spin_lock(&rq->lock);
-+ idle->last_ran = rq->niffies;
-+ time_slice_expired(idle, rq);
-+ idle->state = TASK_RUNNING;
-+ /* Setting prio to illegal value shouldn't matter when never queued */
-+ idle->prio = PRIO_LIMIT;
-+
-+ kasan_unpoison_task_stack(idle);
-+
-+#ifdef CONFIG_SMP
-+ /*
-+ * It's possible that init_idle() gets called multiple times on a task,
-+ * in that case do_set_cpus_allowed() will not do the right thing.
-+ *
-+ * And since this is boot we can forgo the serialisation.
-+ */
-+ set_cpus_allowed_common(idle, cpumask_of(cpu));
-+#ifdef CONFIG_SMT_NICE
-+ idle->smt_bias = 0;
-+#endif
-+#endif
-+ set_rq_task(rq, idle);
-+
-+ /* Silence PROVE_RCU */
-+ rcu_read_lock();
-+ set_task_cpu(idle, cpu);
-+ rcu_read_unlock();
-+
-+ rq->curr = rq->idle = idle;
-+ idle->on_rq = TASK_ON_RQ_QUEUED;
-+ raw_spin_unlock(&rq->lock);
-+ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
-+
-+ /* Set the preempt count _outside_ the spinlocks! */
-+ init_idle_preempt_count(idle, cpu);
-+
-+ ftrace_graph_init_idle_task(idle, cpu);
-+ vtime_init_idle(idle, cpu);
-+#ifdef CONFIG_SMP
-+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-+#endif
-+}
-+
-+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
-+ const struct cpumask __maybe_unused *trial)
-+{
-+ return 1;
-+}
-+
-+int task_can_attach(struct task_struct *p,
-+ const struct cpumask *cs_cpus_allowed)
-+{
-+ int ret = 0;
-+
-+ /*
-+ * Kthreads which disallow setaffinity shouldn't be moved
-+ * to a new cpuset; we don't want to change their CPU
-+ * affinity and isolating such threads by their set of
-+ * allowed nodes is unnecessary. Thus, cpusets are not
-+ * applicable for such threads. This prevents checking for
-+ * success of set_cpus_allowed_ptr() on all attached tasks
-+ * before cpus_allowed may be changed.
-+ */
-+ if (p->flags & PF_NO_SETAFFINITY)
-+ ret = -EINVAL;
-+
-+ return ret;
-+}
-+
-+void resched_cpu(int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ rq_lock_irqsave(rq, &flags);
-+ resched_task(cpu_curr(cpu));
-+ rq_unlock_irqrestore(rq, &flags);
-+}
-+
-+#ifdef CONFIG_SMP
-+#ifdef CONFIG_NO_HZ_COMMON
-+void nohz_balance_enter_idle(int cpu)
-+{
-+}
-+
-+void select_nohz_load_balancer(int stop_tick)
-+{
-+}
-+
-+void set_cpu_sd_state_idle(void) {}
-+
-+/*
-+ * In the semi idle case, use the nearest busy CPU for migrating timers
-+ * from an idle CPU. This is good for power-savings.
-+ *
-+ * We don't do similar optimization for completely idle system, as
-+ * selecting an idle CPU will add more delays to the timers than intended
-+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
-+ */
-+int get_nohz_timer_target(void)
-+{
-+ int i, cpu = smp_processor_id();
-+ struct sched_domain *sd;
-+
-+ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
-+ return cpu;
-+
-+ rcu_read_lock();
-+ for_each_domain(cpu, sd) {
-+ for_each_cpu(i, sched_domain_span(sd)) {
-+ if (cpu == i)
-+ continue;
-+
-+ if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
-+ cpu = i;
-+ cpu = i;
-+ goto unlock;
-+ }
-+ }
-+ }
-+
-+ if (!is_housekeeping_cpu(cpu))
-+ cpu = housekeeping_any_cpu();
-+unlock:
-+ rcu_read_unlock();
-+ return cpu;
-+}
-+
-+/*
-+ * When add_timer_on() enqueues a timer into the timer wheel of an
-+ * idle CPU then this timer might expire before the next timer event
-+ * which is scheduled to wake up that CPU. In case of a completely
-+ * idle system the next event might even be infinite time into the
-+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
-+ * leaves the inner idle loop so the newly added timer is taken into
-+ * account when the CPU goes back to idle and evaluates the timer
-+ * wheel for the next timer event.
-+ */
-+void wake_up_idle_cpu(int cpu)
-+{
-+ if (cpu == smp_processor_id())
-+ return;
-+
-+ if (set_nr_and_not_polling(cpu_rq(cpu)->idle))
-+ smp_sched_reschedule(cpu);
-+ else
-+ trace_sched_wake_idle_without_ipi(cpu);
-+}
-+
-+static bool wake_up_full_nohz_cpu(int cpu)
-+{
-+ /*
-+ * We just need the target to call irq_exit() and re-evaluate
-+ * the next tick. The nohz full kick at least implies that.
-+ * If needed we can still optimize that later with an
-+ * empty IRQ.
-+ */
-+ if (cpu_is_offline(cpu))
-+ return true; /* Don't try to wake offline CPUs. */
-+ if (tick_nohz_full_cpu(cpu)) {
-+ if (cpu != smp_processor_id() ||
-+ tick_nohz_tick_stopped())
-+ tick_nohz_full_kick_cpu(cpu);
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
-+/*
-+ * Wake up the specified CPU. If the CPU is going offline, it is the
-+ * caller's responsibility to deal with the lost wakeup, for example,
-+ * by hooking into the CPU_DEAD notifier like timers and hrtimers do.
-+ */
-+void wake_up_nohz_cpu(int cpu)
-+{
-+ if (!wake_up_full_nohz_cpu(cpu))
-+ wake_up_idle_cpu(cpu);
-+}
-+#endif /* CONFIG_NO_HZ_COMMON */
-+
-+/*
-+ * Change a given task's CPU affinity. Migrate the thread to a
-+ * proper CPU and schedule it away if the CPU it's executing on
-+ * is removed from the allowed bitmask.
-+ *
-+ * NOTE: the caller must have a valid reference to the task, the
-+ * task must not exit() & deallocate itself prematurely. The
-+ * call is not atomic; no spinlocks may be held.
-+ */
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+ const struct cpumask *new_mask, bool check)
-+{
-+ const struct cpumask *cpu_valid_mask = cpu_active_mask;
-+ bool queued = false, running_wrong = false, kthread;
-+ struct cpumask old_mask;
-+ unsigned long flags;
-+ struct rq *rq;
-+ int ret = 0;
-+
-+ rq = task_rq_lock(p, &flags);
-+ update_rq_clock(rq);
-+
-+ kthread = !!(p->flags & PF_KTHREAD);
-+ if (kthread) {
-+ /*
-+ * Kernel threads are allowed on online && !active CPUs
-+ */
-+ cpu_valid_mask = cpu_online_mask;
-+ }
-+
-+ /*
-+ * Must re-check here, to close a race against __kthread_bind(),
-+ * sched_setaffinity() is not guaranteed to observe the flag.
-+ */
-+ if (check && (p->flags & PF_NO_SETAFFINITY)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ cpumask_copy(&old_mask, &p->cpus_allowed);
-+ if (cpumask_equal(&old_mask, new_mask))
-+ goto out;
-+
-+ if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ queued = task_queued(p);
-+ __do_set_cpus_allowed(p, new_mask);
-+
-+ if (kthread) {
-+ /*
-+ * For kernel threads that do indeed end up on online &&
-+ * !active we want to ensure they are strict per-CPU threads.
-+ */
-+ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
-+ !cpumask_intersects(new_mask, cpu_active_mask) &&
-+ p->nr_cpus_allowed != 1);
-+ }
-+
-+ /* Can the task run on the task's current CPU? If so, we're done */
-+ if (cpumask_test_cpu(task_cpu(p), new_mask))
-+ goto out;
-+
-+ if (task_running(rq, p)) {
-+ /* Task is running on the wrong cpu now, reschedule it. */
-+ if (rq == this_rq()) {
-+ set_tsk_need_resched(p);
-+ running_wrong = true;
-+ } else
-+ resched_task(p);
-+ } else {
-+ int cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-+
-+ if (queued) {
-+ /*
-+ * Switch runqueue locks after dequeueing the task
-+ * here while still holding the pi_lock to be holding
-+ * the correct lock for enqueueing.
-+ */
-+ dequeue_task(rq, p, 0);
-+ rq_unlock(rq);
-+
-+ rq = cpu_rq(cpu);
-+ rq_lock(rq);
-+ }
-+ set_task_cpu(p, cpu);
-+ if (queued)
-+ enqueue_task(rq, p, 0);
-+ }
-+ if (queued)
-+ try_preempt(p, rq);
-+ if (running_wrong)
-+ preempt_disable();
-+out:
-+ task_rq_unlock(rq, p, &flags);
-+
-+ if (running_wrong) {
-+ __schedule(true);
-+ preempt_enable();
-+ }
-+
-+ return ret;
-+}
-+
-+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-+{
-+ return __set_cpus_allowed_ptr(p, new_mask, false);
-+}
-+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/*
-+ * Run through task list and find tasks affined to the dead cpu, then remove
-+ * that cpu from the list, enable cpu0 and set the zerobound flag. Must hold
-+ * cpu 0 and src_cpu's runqueue locks.
-+ */
-+static void bind_zero(int src_cpu)
-+{
-+ struct task_struct *p, *t;
-+ struct rq *rq0;
-+ int bound = 0;
-+
-+ if (src_cpu == 0)
-+ return;
-+
-+ rq0 = cpu_rq(0);
-+
-+ do_each_thread(t, p) {
-+ if (cpumask_test_cpu(src_cpu, &p->cpus_allowed)) {
-+ bool local = (task_cpu(p) == src_cpu);
-+ struct rq *rq = task_rq(p);
-+
-+ /* task_running is the cpu stopper thread */
-+ if (local && task_running(rq, p))
-+ continue;
-+ atomic_clear_cpu(src_cpu, &p->cpus_allowed);
-+ atomic_set_cpu(0, &p->cpus_allowed);
-+ p->zerobound = true;
-+ bound++;
-+ if (local) {
-+ bool queued = task_queued(p);
-+
-+ if (queued)
-+ dequeue_task(rq, p, 0);
-+ set_task_cpu(p, 0);
-+ if (queued)
-+ enqueue_task(rq0, p, 0);
-+ }
-+ }
-+ } while_each_thread(t, p);
-+
-+ if (bound) {
-+ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n",
-+ bound, src_cpu);
-+ }
-+}
-+
-+/* Find processes with the zerobound flag and reenable their affinity for the
-+ * CPU coming alive. */
-+static void unbind_zero(int src_cpu)
-+{
-+ int unbound = 0, zerobound = 0;
-+ struct task_struct *p, *t;
-+
-+ if (src_cpu == 0)
-+ return;
-+
-+ do_each_thread(t, p) {
-+ if (!p->mm)
-+ p->zerobound = false;
-+ if (p->zerobound) {
-+ unbound++;
-+ cpumask_set_cpu(src_cpu, &p->cpus_allowed);
-+ /* Once every CPU affinity has been re-enabled, remove
-+ * the zerobound flag */
-+ if (cpumask_subset(cpu_possible_mask, &p->cpus_allowed)) {
-+ p->zerobound = false;
-+ zerobound++;
-+ }
-+ }
-+ } while_each_thread(t, p);
-+
-+ if (unbound) {
-+ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n",
-+ unbound, src_cpu);
-+ }
-+ if (zerobound) {
-+ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n",
-+ zerobound);
-+ }
-+}
-+
-+/*
-+ * Ensure that the idle task is using init_mm right before its cpu goes
-+ * offline.
-+ */
-+void idle_task_exit(void)
-+{
-+ struct mm_struct *mm = current->active_mm;
-+
-+ BUG_ON(cpu_online(smp_processor_id()));
-+
-+ if (mm != &init_mm) {
-+ switch_mm(mm, &init_mm, current);
-+ finish_arch_post_lock_switch();
-+ }
-+ mmdrop(mm);
-+}
-+#else /* CONFIG_HOTPLUG_CPU */
-+static void unbind_zero(int src_cpu) {}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+void sched_set_stop_task(int cpu, struct task_struct *stop)
-+{
-+ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
-+ struct sched_param start_param = { .sched_priority = 0 };
-+ struct task_struct *old_stop = cpu_rq(cpu)->stop;
-+
-+ if (stop) {
-+ /*
-+ * Make it appear like a SCHED_FIFO task, its something
-+ * userspace knows about and won't get confused about.
-+ *
-+ * Also, it will make PI more or less work without too
-+ * much confusion -- but then, stop work should not
-+ * rely on PI working anyway.
-+ */
-+ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
-+ }
-+
-+ cpu_rq(cpu)->stop = stop;
-+
-+ if (old_stop) {
-+ /*
-+ * Reset it back to a normal scheduling policy so that
-+ * it can die in pieces.
-+ */
-+ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
-+ }
-+}
-+
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+
-+static struct ctl_table sd_ctl_dir[] = {
-+ {
-+ .procname = "sched_domain",
-+ .mode = 0555,
-+ },
-+ {}
-+};
-+
-+static struct ctl_table sd_ctl_root[] = {
-+ {
-+ .procname = "kernel",
-+ .mode = 0555,
-+ .child = sd_ctl_dir,
-+ },
-+ {}
-+};
-+
-+static struct ctl_table *sd_alloc_ctl_entry(int n)
-+{
-+ struct ctl_table *entry =
-+ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
-+
-+ return entry;
-+}
-+
-+static void sd_free_ctl_entry(struct ctl_table **tablep)
-+{
-+ struct ctl_table *entry;
-+
-+ /*
-+ * In the intermediate directories, both the child directory and
-+ * procname are dynamically allocated and could fail but the mode
-+ * will always be set. In the lowest directory the names are
-+ * static strings and all have proc handlers.
-+ */
-+ for (entry = *tablep; entry->mode; entry++) {
-+ if (entry->child)
-+ sd_free_ctl_entry(&entry->child);
-+ if (entry->proc_handler == NULL)
-+ kfree(entry->procname);
-+ }
-+
-+ kfree(*tablep);
-+ *tablep = NULL;
-+}
-+
-+#define CPU_LOAD_IDX_MAX 5
-+static int min_load_idx = 0;
-+static int max_load_idx = CPU_LOAD_IDX_MAX-1;
-+
-+static void
-+set_table_entry(struct ctl_table *entry,
-+ const char *procname, void *data, int maxlen,
-+ umode_t mode, proc_handler *proc_handler,
-+ bool load_idx)
-+{
-+ entry->procname = procname;
-+ entry->data = data;
-+ entry->maxlen = maxlen;
-+ entry->mode = mode;
-+ entry->proc_handler = proc_handler;
-+
-+ if (load_idx) {
-+ entry->extra1 = &min_load_idx;
-+ entry->extra2 = &max_load_idx;
-+ }
-+}
-+
-+static struct ctl_table *
-+sd_alloc_ctl_domain_table(struct sched_domain *sd)
-+{
-+ struct ctl_table *table = sd_alloc_ctl_entry(14);
-+
-+ if (table == NULL)
-+ return NULL;
-+
-+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
-+ sizeof(long), 0644, proc_doulongvec_minmax, false);
-+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
-+ sizeof(long), 0644, proc_doulongvec_minmax, false);
-+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
-+ sizeof(int), 0644, proc_dointvec_minmax, true);
-+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[9], "cache_nice_tries",
-+ &sd->cache_nice_tries,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[10], "flags", &sd->flags,
-+ sizeof(int), 0644, proc_dointvec_minmax, false);
-+ set_table_entry(&table[11], "max_newidle_lb_cost",
-+ &sd->max_newidle_lb_cost,
-+ sizeof(long), 0644, proc_doulongvec_minmax, false);
-+ set_table_entry(&table[12], "name", sd->name,
-+ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
-+ /* &table[13] is terminator */
-+
-+ return table;
-+}
-+
-+static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
-+{
-+ struct ctl_table *entry, *table;
-+ struct sched_domain *sd;
-+ int domain_num = 0, i;
-+ char buf[32];
-+
-+ for_each_domain(cpu, sd)
-+ domain_num++;
-+ entry = table = sd_alloc_ctl_entry(domain_num + 1);
-+ if (table == NULL)
-+ return NULL;
-+
-+ i = 0;
-+ for_each_domain(cpu, sd) {
-+ snprintf(buf, 32, "domain%d", i);
-+ entry->procname = kstrdup(buf, GFP_KERNEL);
-+ entry->mode = 0555;
-+ entry->child = sd_alloc_ctl_domain_table(sd);
-+ entry++;
-+ i++;
-+ }
-+ return table;
-+}
-+
-+static cpumask_var_t sd_sysctl_cpus;
-+static struct ctl_table_header *sd_sysctl_header;
-+
-+void register_sched_domain_sysctl(void)
-+{
-+ static struct ctl_table *cpu_entries;
-+ static struct ctl_table **cpu_idx;
-+ char buf[32];
-+ int i;
-+
-+ if (!cpu_entries) {
-+ cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
-+ if (!cpu_entries)
-+ return;
-+
-+ WARN_ON(sd_ctl_dir[0].child);
-+ sd_ctl_dir[0].child = cpu_entries;
-+ }
-+
-+ if (!cpu_idx) {
-+ struct ctl_table *e = cpu_entries;
-+
-+ cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
-+ if (!cpu_idx)
-+ return;
-+
-+ /* deal with sparse possible map */
-+ for_each_possible_cpu(i) {
-+ cpu_idx[i] = e;
-+ e++;
-+ }
-+ }
-+
-+ if (!cpumask_available(sd_sysctl_cpus)) {
-+ if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
-+ return;
-+
-+ /* init to possible to not have holes in @cpu_entries */
-+ cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
-+ }
-+
-+ for_each_cpu(i, sd_sysctl_cpus) {
-+ struct ctl_table *e = cpu_idx[i];
-+
-+ if (e->child)
-+ sd_free_ctl_entry(&e->child);
-+
-+ if (!e->procname) {
-+ snprintf(buf, 32, "cpu%d", i);
-+ e->procname = kstrdup(buf, GFP_KERNEL);
-+ }
-+ e->mode = 0555;
-+ e->child = sd_alloc_ctl_cpu_table(i);
-+
-+ __cpumask_clear_cpu(i, sd_sysctl_cpus);
-+ }
-+
-+ WARN_ON(sd_sysctl_header);
-+ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
-+}
-+
-+void dirty_sched_domain_sysctl(int cpu)
-+{
-+ if (cpumask_available(sd_sysctl_cpus))
-+ __cpumask_set_cpu(cpu, sd_sysctl_cpus);
-+}
-+
-+/* may be called multiple times per register */
-+void unregister_sched_domain_sysctl(void)
-+{
-+ unregister_sysctl_table(sd_sysctl_header);
-+ sd_sysctl_header = NULL;
-+}
-+#endif /* CONFIG_SYSCTL */
-+
-+void set_rq_online(struct rq *rq)
-+{
-+ if (!rq->online) {
-+ cpumask_set_cpu(cpu_of(rq), rq->rd->online);
-+ rq->online = true;
-+ }
-+}
-+
-+void set_rq_offline(struct rq *rq)
-+{
-+ if (rq->online) {
-+ int cpu = cpu_of(rq);
-+
-+ cpumask_clear_cpu(cpu, rq->rd->online);
-+ rq->online = false;
-+ clear_cpuidle_map(cpu);
-+ }
-+}
-+
-+/*
-+ * used to mark begin/end of suspend/resume:
-+ */
-+static int num_cpus_frozen;
-+
-+/*
-+ * Update cpusets according to cpu_active mask. If cpusets are
-+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
-+ * around partition_sched_domains().
-+ *
-+ * If we come here as part of a suspend/resume, don't touch cpusets because we
-+ * want to restore it back to its original state upon resume anyway.
-+ */
-+static void cpuset_cpu_active(void)
-+{
-+ if (cpuhp_tasks_frozen) {
-+ /*
-+ * num_cpus_frozen tracks how many CPUs are involved in suspend
-+ * resume sequence. As long as this is not the last online
-+ * operation in the resume sequence, just build a single sched
-+ * domain, ignoring cpusets.
-+ */
-+ partition_sched_domains(1, NULL, NULL);
-+ if (--num_cpus_frozen)
-+ return;
-+ /*
-+ * This is the last CPU online operation. So fall through and
-+ * restore the original sched domains by considering the
-+ * cpuset configurations.
-+ */
-+ cpuset_force_rebuild();
-+ }
-+
-+ cpuset_update_active_cpus();
-+}
-+
-+static int cpuset_cpu_inactive(unsigned int cpu)
-+{
-+ if (!cpuhp_tasks_frozen) {
-+ cpuset_update_active_cpus();
-+ } else {
-+ num_cpus_frozen++;
-+ partition_sched_domains(1, NULL, NULL);
-+ }
-+ return 0;
-+}
-+
-+int sched_cpu_activate(unsigned int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ set_cpu_active(cpu, true);
-+
-+ if (sched_smp_initialized) {
-+ sched_domains_numa_masks_set(cpu);
-+ cpuset_cpu_active();
-+ }
-+
-+ /*
-+ * Put the rq online, if not already. This happens:
-+ *
-+ * 1) In the early boot process, because we build the real domains
-+ * after all CPUs have been brought up.
-+ *
-+ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
-+ * domains.
-+ */
-+ rq_lock_irqsave(rq, &flags);
-+ if (rq->rd) {
-+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-+ set_rq_online(rq);
-+ }
-+ unbind_zero(cpu);
-+ rq_unlock_irqrestore(rq, &flags);
-+
-+ return 0;
-+}
-+
-+int sched_cpu_deactivate(unsigned int cpu)
-+{
-+ int ret;
-+
-+ set_cpu_active(cpu, false);
-+ /*
-+ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-+ * users of this state to go away such that all new such users will
-+ * observe it.
-+ *
-+ * Do sync before park smpboot threads to take care the rcu boost case.
-+ */
-+ synchronize_rcu_mult(call_rcu, call_rcu_sched);
-+
-+ if (!sched_smp_initialized)
-+ return 0;
-+
-+ ret = cpuset_cpu_inactive(cpu);
-+ if (ret) {
-+ set_cpu_active(cpu, true);
-+ return ret;
-+ }
-+ sched_domains_numa_masks_clear(cpu);
-+ return 0;
-+}
-+
-+int sched_cpu_starting(unsigned int __maybe_unused cpu)
-+{
-+ return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+int sched_cpu_dying(unsigned int cpu)
-+{
-+ struct rq *rq = cpu_rq(cpu);
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ double_rq_lock(rq, cpu_rq(0));
-+ if (rq->rd) {
-+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-+ set_rq_offline(rq);
-+ }
-+ bind_zero(cpu);
-+ double_rq_unlock(rq, cpu_rq(0));
-+ sched_start_tick(rq, cpu);
-+ hrexpiry_clear(rq);
-+ local_irq_restore(flags);
-+
-+ return 0;
-+}
-+#endif
-+
-+#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
-+/*
-+ * Cheaper version of the below functions in case support for SMT and MC is
-+ * compiled in but CPUs have no siblings.
-+ */
-+static bool sole_cpu_idle(struct rq *rq)
-+{
-+ return rq_idle(rq);
-+}
-+#endif
-+#ifdef CONFIG_SCHED_SMT
-+static const cpumask_t *thread_cpumask(int cpu)
-+{
-+ return topology_sibling_cpumask(cpu);
-+}
-+/* All this CPU's SMT siblings are idle */
-+static bool siblings_cpu_idle(struct rq *rq)
-+{
-+ return cpumask_subset(&rq->thread_mask, &cpu_idle_map);
-+}
-+#endif
-+#ifdef CONFIG_SCHED_MC
-+static const cpumask_t *core_cpumask(int cpu)
-+{
-+ return topology_core_cpumask(cpu);
-+}
-+/* All this CPU's shared cache siblings are idle */
-+static bool cache_cpu_idle(struct rq *rq)
-+{
-+ return cpumask_subset(&rq->core_mask, &cpu_idle_map);
-+}
-+#endif
-+
-+enum sched_domain_level {
-+ SD_LV_NONE = 0,
-+ SD_LV_SIBLING,
-+ SD_LV_MC,
-+ SD_LV_BOOK,
-+ SD_LV_CPU,
-+ SD_LV_NODE,
-+ SD_LV_ALLNODES,
-+ SD_LV_MAX
-+};
-+
-+void __init sched_init_smp(void)
-+{
-+ struct sched_domain *sd;
-+ int cpu, other_cpu;
-+#ifdef CONFIG_SCHED_SMT
-+ bool smt_threads = false;
-+#endif
-+ cpumask_var_t non_isolated_cpus;
-+ struct rq *rq;
-+
-+ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
-+
-+ sched_init_numa();
-+
-+ /*
-+ * There's no userspace yet to cause hotplug operations; hence all the
-+ * cpu masks are stable and all blatant races in the below code cannot
-+ * happen.
-+ */
-+ mutex_lock(&sched_domains_mutex);
-+ sched_init_domains(cpu_active_mask);
-+ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
-+ if (cpumask_empty(non_isolated_cpus))
-+ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
-+ mutex_unlock(&sched_domains_mutex);
-+
-+ /* Move init over to a non-isolated CPU */
-+ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
-+ BUG();
-+ free_cpumask_var(non_isolated_cpus);
-+
-+ mutex_lock(&sched_domains_mutex);
-+ local_irq_disable();
-+ lock_all_rqs();
-+ /*
-+ * Set up the relative cache distance of each online cpu from each
-+ * other in a simple array for quick lookup. Locality is determined
-+ * by the closest sched_domain that CPUs are separated by. CPUs with
-+ * shared cache in SMT and MC are treated as local. Separate CPUs
-+ * (within the same package or physically) within the same node are
-+ * treated as not local. CPUs not even in the same domain (different
-+ * nodes) are treated as very distant.
-+ */
-+ for_each_online_cpu(cpu) {
-+ rq = cpu_rq(cpu);
-+
-+ /* First check if this cpu is in the same node */
-+ for_each_domain(cpu, sd) {
-+ if (sd->level > SD_LV_MC)
-+ continue;
-+ /* Set locality to local node if not already found lower */
-+ for_each_cpu(other_cpu, sched_domain_span(sd)) {
-+ if (rq->cpu_locality[other_cpu] > 3)
-+ rq->cpu_locality[other_cpu] = 3;
-+ }
-+ }
-+
-+ /*
-+ * Each runqueue has its own function in case it doesn't have
-+ * siblings of its own allowing mixed topologies.
-+ */
-+#ifdef CONFIG_SCHED_MC
-+ for_each_cpu(other_cpu, core_cpumask(cpu)) {
-+ if (rq->cpu_locality[other_cpu] > 2)
-+ rq->cpu_locality[other_cpu] = 2;
-+ }
-+ if (cpumask_weight(core_cpumask(cpu)) > 1) {
-+ cpumask_copy(&rq->core_mask, core_cpumask(cpu));
-+ cpumask_clear_cpu(cpu, &rq->core_mask);
-+ rq->cache_idle = cache_cpu_idle;
-+ }
-+#endif
-+#ifdef CONFIG_SCHED_SMT
-+ if (cpumask_weight(thread_cpumask(cpu)) > 1) {
-+ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu));
-+ cpumask_clear_cpu(cpu, &rq->thread_mask);
-+ for_each_cpu(other_cpu, thread_cpumask(cpu))
-+ rq->cpu_locality[other_cpu] = 1;
-+ rq->siblings_idle = siblings_cpu_idle;
-+ smt_threads = true;
-+ }
-+#endif
-+ }
-+ for_each_possible_cpu(cpu) {
-+ int total_cpus = 1, locality;
-+
-+ rq = cpu_rq(cpu);
-+ for (locality = 1; locality <= 4; locality++) {
-+ for_each_possible_cpu(other_cpu) {
-+ if (rq->cpu_locality[other_cpu] == locality)
-+ rq->rq_order[total_cpus++] = cpu_rq(other_cpu);
-+ }
-+ }
-+ }
-+#ifdef CONFIG_SMT_NICE
-+ if (smt_threads) {
-+ check_siblings = &check_smt_siblings;
-+ wake_siblings = &wake_smt_siblings;
-+ smt_schedule = &smt_should_schedule;
-+ }
-+#endif
-+ unlock_all_rqs();
-+ local_irq_enable();
-+ mutex_unlock(&sched_domains_mutex);
-+
-+ for_each_online_cpu(cpu) {
-+ rq = cpu_rq(cpu);
-+
-+ for_each_online_cpu(other_cpu) {
-+ if (other_cpu <= cpu)
-+ continue;
-+ printk(KERN_DEBUG "MuQSS locality CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]);
-+ }
-+ }
-+
-+ sched_smp_initialized = true;
-+}
-+#else
-+void __init sched_init_smp(void)
-+{
-+ sched_smp_initialized = true;
-+}
-+#endif /* CONFIG_SMP */
-+
-+int in_sched_functions(unsigned long addr)
-+{
-+ return in_lock_functions(addr) ||
-+ (addr >= (unsigned long)__sched_text_start
-+ && addr < (unsigned long)__sched_text_end);
-+}
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+/* task group related information */
-+struct task_group {
-+ struct cgroup_subsys_state css;
-+
-+ struct rcu_head rcu;
-+ struct list_head list;
-+
-+ struct task_group *parent;
-+ struct list_head siblings;
-+ struct list_head children;
-+};
-+
-+/*
-+ * Default task group.
-+ * Every task in system belongs to this group at bootup.
-+ */
-+struct task_group root_task_group;
-+LIST_HEAD(task_groups);
-+
-+/* Cacheline aligned slab cache for task_group */
-+static struct kmem_cache *task_group_cache __read_mostly;
-+#endif /* CONFIG_CGROUP_SCHED */
-+
-+void __init sched_init(void)
-+{
-+#ifdef CONFIG_SMP
-+ int cpu_ids;
-+#endif
-+ int i;
-+ struct rq *rq;
-+
-+ sched_clock_init();
-+
-+ wait_bit_init();
-+
-+ prio_ratios[0] = 128;
-+ for (i = 1 ; i < NICE_WIDTH ; i++)
-+ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
-+
-+ skiplist_node_init(&init_task.node);
-+
-+#ifdef CONFIG_SMP
-+ init_defrootdomain();
-+ cpumask_clear(&cpu_idle_map);
-+#else
-+ uprq = &per_cpu(runqueues, 0);
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+ task_group_cache = KMEM_CACHE(task_group, 0);
-+
-+ list_add(&root_task_group.list, &task_groups);
-+ INIT_LIST_HEAD(&root_task_group.children);
-+ INIT_LIST_HEAD(&root_task_group.siblings);
-+#endif /* CONFIG_CGROUP_SCHED */
-+ for_each_possible_cpu(i) {
-+ rq = cpu_rq(i);
-+ skiplist_init(&rq->node);
-+ rq->sl = new_skiplist(&rq->node);
-+ raw_spin_lock_init(&rq->lock);
-+ rq->nr_running = 0;
-+ rq->nr_uninterruptible = 0;
-+ rq->nr_switches = 0;
-+ rq->clock = rq->old_clock = rq->last_niffy = rq->niffies = 0;
-+ rq->last_jiffy = jiffies;
-+ rq->user_ns = rq->nice_ns = rq->softirq_ns = rq->system_ns =
-+ rq->iowait_ns = rq->idle_ns = 0;
-+ rq->dither = 0;
-+ set_rq_task(rq, &init_task);
-+ rq->iso_ticks = 0;
-+ rq->iso_refractory = false;
-+#ifdef CONFIG_SMP
-+ rq->sd = NULL;
-+ rq->rd = NULL;
-+ rq->online = false;
-+ rq->cpu = i;
-+ rq_attach_root(rq, &def_root_domain);
-+#endif
-+ init_rq_hrexpiry(rq);
-+ atomic_set(&rq->nr_iowait, 0);
-+ }
-+
-+#ifdef CONFIG_SMP
-+ cpu_ids = i;
-+ /*
-+ * Set the base locality for cpu cache distance calculation to
-+ * "distant" (3). Make sure the distance from a CPU to itself is 0.
-+ */
-+ for_each_possible_cpu(i) {
-+ int j;
-+
-+ rq = cpu_rq(i);
-+#ifdef CONFIG_SCHED_SMT
-+ rq->siblings_idle = sole_cpu_idle;
-+#endif
-+#ifdef CONFIG_SCHED_MC
-+ rq->cache_idle = sole_cpu_idle;
-+#endif
-+ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC);
-+ for_each_possible_cpu(j) {
-+ if (i == j)
-+ rq->cpu_locality[j] = 0;
-+ else
-+ rq->cpu_locality[j] = 4;
-+ }
-+ rq->rq_order = kmalloc(cpu_ids * sizeof(struct rq *), GFP_ATOMIC);
-+ rq->rq_order[0] = rq;
-+ for (j = 1; j < cpu_ids; j++)
-+ rq->rq_order[j] = cpu_rq(j);
-+ }
-+#endif
-+
-+ /*
-+ * The boot idle thread does lazy MMU switching as well:
-+ */
-+ mmgrab(&init_mm);
-+ enter_lazy_tlb(&init_mm, current);
-+
-+ /*
-+ * Make us the idle thread. Technically, schedule() should not be
-+ * called from this thread, however somewhere below it might be,
-+ * but because we are the idle thread, we just pick up running again
-+ * when this runqueue becomes "idle".
-+ */
-+ init_idle(current, smp_processor_id());
-+
-+#ifdef CONFIG_SMP
-+ /* May be allocated at isolcpus cmdline parse time */
-+ if (cpu_isolated_map == NULL)
-+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
-+ idle_thread_set_boot_cpu();
-+#endif /* SMP */
-+
-+ init_schedstats();
-+}
-+
-+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+static inline int preempt_count_equals(int preempt_offset)
-+{
-+ int nested = preempt_count() + rcu_preempt_depth();
-+
-+ return (nested == preempt_offset);
-+}
-+
-+void __might_sleep(const char *file, int line, int preempt_offset)
-+{
-+ /*
-+ * Blocking primitives will set (and therefore destroy) current->state,
-+ * since we will exit with TASK_RUNNING make sure we enter with it,
-+ * otherwise we will destroy state.
-+ */
-+ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
-+ "do not call blocking ops when !TASK_RUNNING; "
-+ "state=%lx set at [<%p>] %pS\n",
-+ current->state,
-+ (void *)current->task_state_change,
-+ (void *)current->task_state_change);
-+
-+ ___might_sleep(file, line, preempt_offset);
-+}
-+EXPORT_SYMBOL(__might_sleep);
-+
-+void ___might_sleep(const char *file, int line, int preempt_offset)
-+{
-+ /* Ratelimiting timestamp: */
-+ static unsigned long prev_jiffy;
-+
-+ unsigned long preempt_disable_ip;
-+
-+ /* WARN_ON_ONCE() by default, no rate limit required: */
-+ rcu_sleep_check();
-+
-+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
-+ !is_idle_task(current)) ||
-+ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
-+ oops_in_progress)
-+ return;
-+
-+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-+ return;
-+ prev_jiffy = jiffies;
-+
-+ /* Save this before calling printk(), since that will clobber it: */
-+ preempt_disable_ip = get_preempt_disable_ip(current);
-+
-+ printk(KERN_ERR
-+ "BUG: sleeping function called from invalid context at %s:%d\n",
-+ file, line);
-+ printk(KERN_ERR
-+ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-+ in_atomic(), irqs_disabled(),
-+ current->pid, current->comm);
-+
-+ if (task_stack_end_corrupted(current))
-+ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
-+
-+ debug_show_held_locks(current);
-+ if (irqs_disabled())
-+ print_irqtrace_events(current);
-+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
-+ && !preempt_count_equals(preempt_offset)) {
-+ pr_err("Preemption disabled at:");
-+ print_ip_sym(preempt_disable_ip);
-+ pr_cont("\n");
-+ }
-+ dump_stack();
-+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-+}
-+EXPORT_SYMBOL(___might_sleep);
-+#endif
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static inline void normalise_rt_tasks(void)
-+{
-+ struct task_struct *g, *p;
-+ unsigned long flags;
-+ struct rq *rq;
-+
-+ read_lock(&tasklist_lock);
-+ for_each_process_thread(g, p) {
-+ /*
-+ * Only normalize user tasks:
-+ */
-+ if (p->flags & PF_KTHREAD)
-+ continue;
-+
-+ if (!rt_task(p) && !iso_task(p))
-+ continue;
-+
-+ rq = task_rq_lock(p, &flags);
-+ __setscheduler(p, rq, SCHED_NORMAL, 0, false);
-+ task_rq_unlock(rq, p, &flags);
-+ }
-+ read_unlock(&tasklist_lock);
-+}
-+
-+void normalize_rt_tasks(void)
-+{
-+ normalise_rt_tasks();
-+}
-+#endif /* CONFIG_MAGIC_SYSRQ */
-+
-+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
-+/*
-+ * These functions are only useful for the IA64 MCA handling, or kdb.
-+ *
-+ * They can only be called when the whole system has been
-+ * stopped - every CPU needs to be quiescent, and no scheduling
-+ * activity can take place. Using them for anything else would
-+ * be a serious bug, and as a result, they aren't even visible
-+ * under any other configuration.
-+ */
-+
-+/**
-+ * curr_task - return the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ *
-+ * Return: The current task for @cpu.
-+ */
-+struct task_struct *curr_task(int cpu)
-+{
-+ return cpu_curr(cpu);
-+}
-+
-+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
-+
-+#ifdef CONFIG_IA64
-+/**
-+ * set_curr_task - set the current task for a given CPU.
-+ * @cpu: the processor in question.
-+ * @p: the task pointer to set.
-+ *
-+ * Description: This function must only be used when non-maskable interrupts
-+ * are serviced on a separate stack. It allows the architecture to switch the
-+ * notion of the current task on a CPU in a non-blocking manner. This function
-+ * must be called with all CPU's synchronised, and interrupts disabled, the
-+ * and caller must save the original value of the current task (see
-+ * curr_task() above) and restore that value before reenabling interrupts and
-+ * re-starting the system.
-+ *
-+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
-+ */
-+void ia64_set_curr_task(int cpu, struct task_struct *p)
-+{
-+ cpu_curr(cpu) = p;
-+}
-+
-+#endif
-+
-+void init_idle_bootup_task(struct task_struct *idle)
-+{}
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+__read_mostly bool sched_debug_enabled;
-+
-+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
-+ struct seq_file *m)
-+{}
-+
-+void proc_sched_set_task(struct task_struct *p)
-+{}
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#define SCHED_LOAD_SHIFT (10)
-+#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
-+
-+unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
-+{
-+ return SCHED_LOAD_SCALE;
-+}
-+
-+unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
-+{
-+ unsigned long weight = cpumask_weight(sched_domain_span(sd));
-+ unsigned long smt_gain = sd->smt_gain;
-+
-+ smt_gain /= weight;
-+
-+ return smt_gain;
-+}
-+#endif
-+
-+#ifdef CONFIG_CGROUP_SCHED
-+static void sched_free_group(struct task_group *tg)
-+{
-+ kmem_cache_free(task_group_cache, tg);
-+}
-+
-+/* allocate runqueue etc for a new task group */
-+struct task_group *sched_create_group(struct task_group *parent)
-+{
-+ struct task_group *tg;
-+
-+ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
-+ if (!tg)
-+ return ERR_PTR(-ENOMEM);
-+
-+ return tg;
-+}
-+
-+void sched_online_group(struct task_group *tg, struct task_group *parent)
-+{
-+}
-+
-+/* rcu callback to free various structures associated with a task group */
-+static void sched_free_group_rcu(struct rcu_head *rhp)
-+{
-+ /* Now it should be safe to free those cfs_rqs */
-+ sched_free_group(container_of(rhp, struct task_group, rcu));
-+}
-+
-+void sched_destroy_group(struct task_group *tg)
-+{
-+ /* Wait for possible concurrent references to cfs_rqs complete */
-+ call_rcu(&tg->rcu, sched_free_group_rcu);
-+}
-+
-+void sched_offline_group(struct task_group *tg)
-+{
-+}
-+
-+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-+{
-+ return css ? container_of(css, struct task_group, css) : NULL;
-+}
-+
-+static struct cgroup_subsys_state *
-+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
-+{
-+ struct task_group *parent = css_tg(parent_css);
-+ struct task_group *tg;
-+
-+ if (!parent) {
-+ /* This is early initialization for the top cgroup */
-+ return &root_task_group.css;
-+ }
-+
-+ tg = sched_create_group(parent);
-+ if (IS_ERR(tg))
-+ return ERR_PTR(-ENOMEM);
-+ return &tg->css;
-+}
-+
-+/* Expose task group only after completing cgroup initialization */
-+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
-+{
-+ struct task_group *tg = css_tg(css);
-+ struct task_group *parent = css_tg(css->parent);
-+
-+ if (parent)
-+ sched_online_group(tg, parent);
-+ return 0;
-+}
-+
-+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
-+{
-+ struct task_group *tg = css_tg(css);
-+
-+ sched_offline_group(tg);
-+}
-+
-+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
-+{
-+ struct task_group *tg = css_tg(css);
-+
-+ /*
-+ * Relies on the RCU grace period between css_released() and this.
-+ */
-+ sched_free_group(tg);
-+}
-+
-+static void cpu_cgroup_fork(struct task_struct *task)
-+{
-+}
-+
-+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
-+{
-+ return 0;
-+}
-+
-+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
-+{
-+}
-+
-+static struct cftype cpu_files[] = {
-+ { } /* Terminate */
-+};
-+
-+struct cgroup_subsys cpu_cgrp_subsys = {
-+ .css_alloc = cpu_cgroup_css_alloc,
-+ .css_online = cpu_cgroup_css_online,
-+ .css_released = cpu_cgroup_css_released,
-+ .css_free = cpu_cgroup_css_free,
-+ .fork = cpu_cgroup_fork,
-+ .can_attach = cpu_cgroup_can_attach,
-+ .attach = cpu_cgroup_attach,
-+ .legacy_cftypes = cpu_files,
-+ .early_init = true,
-+};
-+#endif /* CONFIG_CGROUP_SCHED */
-diff -Nur a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
---- a/kernel/sched/MuQSS.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/kernel/sched/MuQSS.h 2019-01-05 20:22:51.099998516 +0000
-@@ -0,0 +1,725 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+#include <linux/sched.h>
-+#include <linux/cpuidle.h>
-+#include <linux/freezer.h>
-+#include <linux/interrupt.h>
-+#include <linux/skip_list.h>
-+#include <linux/stop_machine.h>
-+#include <linux/sched/topology.h>
-+#include <linux/u64_stats_sync.h>
-+#include <linux/tsacct_kern.h>
-+#include <linux/sched/clock.h>
-+#include <linux/sched/wake_q.h>
-+#include <linux/sched/signal.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/cpufreq.h>
-+#include <linux/sched/stat.h>
-+#include <linux/sched/nohz.h>
-+#include <linux/sched/debug.h>
-+#include <linux/sched/hotplug.h>
-+#include <linux/sched/task.h>
-+#include <linux/sched/task_stack.h>
-+#include <linux/sched/cputime.h>
-+#include <linux/sched/init.h>
-+
-+#include <linux/u64_stats_sync.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/tick.h>
-+#include <linux/slab.h>
-+
-+#ifdef CONFIG_PARAVIRT
-+#include <asm/paravirt.h>
-+#endif
-+
-+#include "cpuacct.h"
-+
-+#ifndef MUQSS_SCHED_H
-+#define MUQSS_SCHED_H
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
-+#else
-+# define SCHED_WARN_ON(x) ((void)(x))
-+#endif
-+
-+/* task_struct::on_rq states: */
-+#define TASK_ON_RQ_QUEUED 1
-+#define TASK_ON_RQ_MIGRATING 2
-+
-+struct rq;
-+
-+#ifdef CONFIG_SMP
-+
-+static inline bool sched_asym_prefer(int a, int b)
-+{
-+ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
-+}
-+
-+/*
-+ * We add the notion of a root-domain which will be used to define per-domain
-+ * variables. Each exclusive cpuset essentially defines an island domain by
-+ * fully partitioning the member cpus from any other cpuset. Whenever a new
-+ * exclusive cpuset is created, we also create and attach a new root-domain
-+ * object.
-+ *
-+ */
-+struct root_domain {
-+ atomic_t refcount;
-+ atomic_t rto_count;
-+ struct rcu_head rcu;
-+ cpumask_var_t span;
-+ cpumask_var_t online;
-+
-+ /* Indicate more than one runnable task for any CPU */
-+ bool overload;
-+
-+ /*
-+ * The bit corresponding to a CPU gets set here if such CPU has more
-+ * than one runnable -deadline task (as it is below for RT tasks).
-+ */
-+ cpumask_var_t dlo_mask;
-+ atomic_t dlo_count;
-+ /* Replace unused CFS structures with void */
-+ //struct dl_bw dl_bw;
-+ //struct cpudl cpudl;
-+ void *dl_bw;
-+ void *cpudl;
-+
-+ /*
-+ * The "RT overload" flag: it gets set if a CPU has more than
-+ * one runnable RT task.
-+ */
-+ cpumask_var_t rto_mask;
-+ //struct cpupri cpupri;
-+ void *cpupri;
-+
-+ unsigned long max_cpu_capacity;
-+};
-+
-+extern struct root_domain def_root_domain;
-+extern struct mutex sched_domains_mutex;
-+
-+extern void init_defrootdomain(void);
-+extern int sched_init_domains(const struct cpumask *cpu_map);
-+extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
-+
-+static inline void cpupri_cleanup(void __maybe_unused *cpupri)
-+{
-+}
-+
-+static inline void cpudl_cleanup(void __maybe_unused *cpudl)
-+{
-+}
-+
-+static inline void init_dl_bw(void __maybe_unused *dl_bw)
-+{
-+}
-+
-+static inline int cpudl_init(void __maybe_unused *dl_bw)
-+{
-+ return 0;
-+}
-+
-+static inline int cpupri_init(void __maybe_unused *cpupri)
-+{
-+ return 0;
-+}
-+#endif /* CONFIG_SMP */
-+
-+/*
-+ * This is the main, per-CPU runqueue data structure.
-+ * This data should only be modified by the local cpu.
-+ */
-+struct rq {
-+ raw_spinlock_t lock;
-+
-+ struct task_struct *curr, *idle, *stop;
-+ struct mm_struct *prev_mm;
-+
-+ unsigned int nr_running;
-+ /*
-+ * This is part of a global counter where only the total sum
-+ * over all CPUs matters. A task can increase this counter on
-+ * one CPU and if it got migrated afterwards it may decrease
-+ * it on another CPU. Always updated under the runqueue lock:
-+ */
-+ unsigned long nr_uninterruptible;
-+ u64 nr_switches;
-+
-+ /* Stored data about rq->curr to work outside rq lock */
-+ u64 rq_deadline;
-+ int rq_prio;
-+
-+ /* Best queued id for use outside lock */
-+ u64 best_key;
-+
-+ unsigned long last_scheduler_tick; /* Last jiffy this RQ ticked */
-+ unsigned long last_jiffy; /* Last jiffy this RQ updated rq clock */
-+ u64 niffies; /* Last time this RQ updated rq clock */
-+ u64 last_niffy; /* Last niffies as updated by local clock */
-+ u64 last_jiffy_niffies; /* Niffies @ last_jiffy */
-+
-+ u64 load_update; /* When we last updated load */
-+ unsigned long load_avg; /* Rolling load average */
-+#ifdef CONFIG_SMT_NICE
-+ struct mm_struct *rq_mm;
-+ int rq_smt_bias; /* Policy/nice level bias across smt siblings */
-+#endif
-+ /* Accurate timekeeping data */
-+ unsigned long user_ns, nice_ns, irq_ns, softirq_ns, system_ns,
-+ iowait_ns, idle_ns;
-+ atomic_t nr_iowait;
-+
-+ skiplist_node node;
-+ skiplist *sl;
-+#ifdef CONFIG_SMP
-+ struct task_struct *preempt; /* Preempt triggered on this task */
-+ struct task_struct *preempting; /* Hint only, what task is preempting */
-+
-+ int cpu; /* cpu of this runqueue */
-+ bool online;
-+
-+ struct root_domain *rd;
-+ struct sched_domain *sd;
-+
-+ unsigned long cpu_capacity_orig;
-+
-+ int *cpu_locality; /* CPU relative cache distance */
-+ struct rq **rq_order; /* RQs ordered by relative cache distance */
-+
-+#ifdef CONFIG_SCHED_SMT
-+ cpumask_t thread_mask;
-+ bool (*siblings_idle)(struct rq *rq);
-+ /* See if all smt siblings are idle */
-+#endif /* CONFIG_SCHED_SMT */
-+#ifdef CONFIG_SCHED_MC
-+ cpumask_t core_mask;
-+ bool (*cache_idle)(struct rq *rq);
-+ /* See if all cache siblings are idle */
-+#endif /* CONFIG_SCHED_MC */
-+#endif /* CONFIG_SMP */
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+ u64 prev_irq_time;
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+#ifdef CONFIG_PARAVIRT
-+ u64 prev_steal_time;
-+#endif /* CONFIG_PARAVIRT */
-+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-+ u64 prev_steal_time_rq;
-+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
-+
-+ u64 clock, old_clock, last_tick;
-+ u64 clock_task;
-+ int dither;
-+
-+ int iso_ticks;
-+ bool iso_refractory;
-+
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+ struct hrtimer hrexpiry_timer;
-+#endif
-+
-+#ifdef CONFIG_SCHEDSTATS
-+
-+ /* latency stats */
-+ struct sched_info rq_sched_info;
-+ unsigned long long rq_cpu_time;
-+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
-+
-+ /* sys_sched_yield() stats */
-+ unsigned int yld_count;
-+
-+ /* schedule() stats */
-+ unsigned int sched_switch;
-+ unsigned int sched_count;
-+ unsigned int sched_goidle;
-+
-+ /* try_to_wake_up() stats */
-+ unsigned int ttwu_count;
-+ unsigned int ttwu_local;
-+#endif /* CONFIG_SCHEDSTATS */
-+
-+#ifdef CONFIG_SMP
-+ struct llist_head wake_list;
-+#endif
-+
-+#ifdef CONFIG_CPU_IDLE
-+ /* Must be inspected within a rcu lock section */
-+ struct cpuidle_state *idle_state;
-+#endif
-+};
-+
-+#ifdef CONFIG_SMP
-+struct rq *cpu_rq(int cpu);
-+#endif
-+
-+#ifndef CONFIG_SMP
-+extern struct rq *uprq;
-+#define cpu_rq(cpu) (uprq)
-+#define this_rq() (uprq)
-+#define raw_rq() (uprq)
-+#define task_rq(p) (uprq)
-+#define cpu_curr(cpu) ((uprq)->curr)
-+#else /* CONFIG_SMP */
-+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-+#define this_rq() this_cpu_ptr(&runqueues)
-+#define raw_rq() raw_cpu_ptr(&runqueues)
-+#define task_rq(p) cpu_rq(task_cpu(p))
-+#endif /* CONFIG_SMP */
-+
-+static inline int task_current(struct rq *rq, struct task_struct *p)
-+{
-+ return rq->curr == p;
-+}
-+
-+static inline int task_running(struct rq *rq, struct task_struct *p)
-+{
-+#ifdef CONFIG_SMP
-+ return p->on_cpu;
-+#else
-+ return task_current(rq, p);
-+#endif
-+}
-+
-+static inline void rq_lock(struct rq *rq)
-+ __acquires(rq->lock)
-+{
-+ raw_spin_lock(&rq->lock);
-+}
-+
-+static inline void rq_unlock(struct rq *rq)
-+ __releases(rq->lock)
-+{
-+ raw_spin_unlock(&rq->lock);
-+}
-+
-+static inline void rq_lock_irq(struct rq *rq)
-+ __acquires(rq->lock)
-+{
-+ raw_spin_lock_irq(&rq->lock);
-+}
-+
-+static inline void rq_unlock_irq(struct rq *rq)
-+ __releases(rq->lock)
-+{
-+ raw_spin_unlock_irq(&rq->lock);
-+}
-+
-+static inline void rq_lock_irqsave(struct rq *rq, unsigned long *flags)
-+ __acquires(rq->lock)
-+{
-+ raw_spin_lock_irqsave(&rq->lock, *flags);
-+}
-+
-+static inline void rq_unlock_irqrestore(struct rq *rq, unsigned long *flags)
-+ __releases(rq->lock)
-+{
-+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
-+}
-+
-+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-+ __acquires(p->pi_lock)
-+ __acquires(rq->lock)
-+{
-+ struct rq *rq;
-+
-+ while (42) {
-+ raw_spin_lock_irqsave(&p->pi_lock, *flags);
-+ rq = task_rq(p);
-+ raw_spin_lock(&rq->lock);
-+ if (likely(rq == task_rq(p)))
-+ break;
-+ raw_spin_unlock(&rq->lock);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+ }
-+ return rq;
-+}
-+
-+static inline void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-+ __releases(rq->lock)
-+ __releases(p->pi_lock)
-+{
-+ rq_unlock(rq);
-+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-+}
-+
-+static inline struct rq *__task_rq_lock(struct task_struct *p)
-+ __acquires(rq->lock)
-+{
-+ struct rq *rq;
-+
-+ lockdep_assert_held(&p->pi_lock);
-+
-+ while (42) {
-+ rq = task_rq(p);
-+ raw_spin_lock(&rq->lock);
-+ if (likely(rq == task_rq(p)))
-+ break;
-+ raw_spin_unlock(&rq->lock);
-+ }
-+ return rq;
-+}
-+
-+static inline void __task_rq_unlock(struct rq *rq)
-+{
-+ rq_unlock(rq);
-+}
-+
-+/*
-+ * {de,en}queue flags: Most not used on MuQSS.
-+ *
-+ * DEQUEUE_SLEEP - task is no longer runnable
-+ * ENQUEUE_WAKEUP - task just became runnable
-+ *
-+ * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks
-+ * are in a known state which allows modification. Such pairs
-+ * should preserve as much state as possible.
-+ *
-+ * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location
-+ * in the runqueue.
-+ *
-+ * ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
-+ * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
-+ * ENQUEUE_MIGRATED - the task was migrated during wakeup
-+ *
-+ */
-+
-+#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
-+
-+#define ENQUEUE_RESTORE 0x02
-+
-+static inline u64 __rq_clock_broken(struct rq *rq)
-+{
-+ return READ_ONCE(rq->clock);
-+}
-+
-+static inline u64 rq_clock(struct rq *rq)
-+{
-+ lockdep_assert_held(&rq->lock);
-+
-+ return rq->clock;
-+}
-+
-+static inline u64 rq_clock_task(struct rq *rq)
-+{
-+ lockdep_assert_held(&rq->lock);
-+
-+ return rq->clock_task;
-+}
-+
-+#ifdef CONFIG_NUMA
-+enum numa_topology_type {
-+ NUMA_DIRECT,
-+ NUMA_GLUELESS_MESH,
-+ NUMA_BACKPLANE,
-+};
-+extern enum numa_topology_type sched_numa_topology_type;
-+extern int sched_max_numa_distance;
-+extern bool find_numa_distance(int distance);
-+
-+extern void sched_init_numa(void);
-+extern void sched_domains_numa_masks_set(unsigned int cpu);
-+extern void sched_domains_numa_masks_clear(unsigned int cpu);
-+#else
-+static inline void sched_init_numa(void) { }
-+static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
-+static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
-+#endif
-+
-+extern struct mutex sched_domains_mutex;
-+extern struct static_key_false sched_schedstats;
-+
-+#define rcu_dereference_check_sched_domain(p) \
-+ rcu_dereference_check((p), \
-+ lockdep_is_held(&sched_domains_mutex))
-+
-+#ifdef CONFIG_SMP
-+
-+/*
-+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
-+ * See detach_destroy_domains: synchronize_sched for details.
-+ *
-+ * The domain tree of any CPU may only be accessed from within
-+ * preempt-disabled sections.
-+ */
-+#define for_each_domain(cpu, __sd) \
-+ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
-+ __sd; __sd = __sd->parent)
-+
-+#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
-+
-+/**
-+ * highest_flag_domain - Return highest sched_domain containing flag.
-+ * @cpu: The cpu whose highest level of sched domain is to
-+ * be returned.
-+ * @flag: The flag to check for the highest sched_domain
-+ * for the given cpu.
-+ *
-+ * Returns the highest sched_domain of a cpu which contains the given flag.
-+ */
-+static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
-+{
-+ struct sched_domain *sd, *hsd = NULL;
-+
-+ for_each_domain(cpu, sd) {
-+ if (!(sd->flags & flag))
-+ break;
-+ hsd = sd;
-+ }
-+
-+ return hsd;
-+}
-+
-+static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
-+{
-+ struct sched_domain *sd;
-+
-+ for_each_domain(cpu, sd) {
-+ if (sd->flags & flag)
-+ break;
-+ }
-+
-+ return sd;
-+}
-+
-+DECLARE_PER_CPU(struct sched_domain *, sd_llc);
-+DECLARE_PER_CPU(int, sd_llc_size);
-+DECLARE_PER_CPU(int, sd_llc_id);
-+DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-+DECLARE_PER_CPU(struct sched_domain *, sd_numa);
-+DECLARE_PER_CPU(struct sched_domain *, sd_asym);
-+
-+struct sched_group_capacity {
-+ atomic_t ref;
-+ /*
-+ * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
-+ * for a single CPU.
-+ */
-+ unsigned long capacity;
-+ unsigned long min_capacity; /* Min per-CPU capacity in group */
-+ unsigned long next_update;
-+ int imbalance; /* XXX unrelated to capacity but shared group state */
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+ int id;
-+#endif
-+
-+ unsigned long cpumask[0]; /* balance mask */
-+};
-+
-+struct sched_group {
-+ struct sched_group *next; /* Must be a circular list */
-+ atomic_t ref;
-+
-+ unsigned int group_weight;
-+ struct sched_group_capacity *sgc;
-+ int asym_prefer_cpu; /* cpu of highest priority in group */
-+
-+ /*
-+ * The CPUs this group covers.
-+ *
-+ * NOTE: this field is variable length. (Allocated dynamically
-+ * by attaching extra space to the end of the structure,
-+ * depending on how many CPUs the kernel has booted up with)
-+ */
-+ unsigned long cpumask[0];
-+};
-+
-+static inline struct cpumask *sched_group_span(struct sched_group *sg)
-+{
-+ return to_cpumask(sg->cpumask);
-+}
-+
-+/*
-+ * See build_balance_mask().
-+ */
-+static inline struct cpumask *group_balance_mask(struct sched_group *sg)
-+{
-+ return to_cpumask(sg->sgc->cpumask);
-+}
-+
-+/**
-+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
-+ * @group: The group whose first cpu is to be returned.
-+ */
-+static inline unsigned int group_first_cpu(struct sched_group *group)
-+{
-+ return cpumask_first(sched_group_span(group));
-+}
-+
-+
-+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
-+void register_sched_domain_sysctl(void);
-+void dirty_sched_domain_sysctl(int cpu);
-+void unregister_sched_domain_sysctl(void);
-+#else
-+static inline void register_sched_domain_sysctl(void)
-+{
-+}
-+static inline void dirty_sched_domain_sysctl(int cpu)
-+{
-+}
-+static inline void unregister_sched_domain_sysctl(void)
-+{
-+}
-+#endif
-+
-+extern void sched_ttwu_pending(void);
-+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
-+extern void set_rq_online (struct rq *rq);
-+extern void set_rq_offline(struct rq *rq);
-+extern bool sched_smp_initialized;
-+
-+static inline void update_group_capacity(struct sched_domain *sd, int cpu)
-+{
-+}
-+
-+static inline void trigger_load_balance(struct rq *rq)
-+{
-+}
-+
-+#define sched_feat(x) 0
-+
-+#else /* CONFIG_SMP */
-+
-+static inline void sched_ttwu_pending(void) { }
-+
-+#endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_CPU_IDLE
-+static inline void idle_set_state(struct rq *rq,
-+ struct cpuidle_state *idle_state)
-+{
-+ rq->idle_state = idle_state;
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+ SCHED_WARN_ON(!rcu_read_lock_held());
-+ return rq->idle_state;
-+}
-+#else
-+static inline void idle_set_state(struct rq *rq,
-+ struct cpuidle_state *idle_state)
-+{
-+}
-+
-+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
-+{
-+ return NULL;
-+}
-+#endif
-+
-+#ifdef CONFIG_SCHED_DEBUG
-+extern bool sched_debug_enabled;
-+#endif
-+
-+extern void schedule_idle(void);
-+
-+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-+struct irqtime {
-+ u64 total;
-+ u64 tick_delta;
-+ u64 irq_start_time;
-+ struct u64_stats_sync sync;
-+};
-+
-+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
-+
-+/*
-+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
-+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
-+ * and never move forward.
-+ */
-+static inline u64 irq_time_read(int cpu)
-+{
-+ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-+ unsigned int seq;
-+ u64 total;
-+
-+ do {
-+ seq = __u64_stats_fetch_begin(&irqtime->sync);
-+ total = irqtime->total;
-+ } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-+
-+ return total;
-+}
-+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-+
-+#ifdef CONFIG_SMP
-+static inline int cpu_of(struct rq *rq)
-+{
-+ return rq->cpu;
-+}
-+#else /* CONFIG_SMP */
-+static inline int cpu_of(struct rq *rq)
-+{
-+ return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_CPU_FREQ
-+DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
-+
-+static inline void cpufreq_trigger(struct rq *rq, unsigned int flags)
-+{
-+ struct update_util_data *data;
-+
-+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
-+ cpu_of(rq)));
-+
-+ if (data)
-+ data->func(data, rq->niffies, flags);
-+}
-+#else
-+static inline void cpufreq_trigger(struct rq *rq, unsigned int flag)
-+{
-+}
-+#endif /* CONFIG_CPU_FREQ */
-+
-+#ifdef arch_scale_freq_capacity
-+#ifndef arch_scale_freq_invariant
-+#define arch_scale_freq_invariant() (true)
-+#endif
-+#else /* arch_scale_freq_capacity */
-+#define arch_scale_freq_invariant() (false)
-+#endif
-+
-+/*
-+ * This should only be called when current == rq->idle. Dodgy workaround for
-+ * when softirqs are pending and we are in the idle loop. Setting current to
-+ * resched will kick us out of the idle loop and the softirqs will be serviced
-+ * on our next pass through schedule().
-+ */
-+static inline bool softirq_pending(int cpu)
-+{
-+ if (likely(!local_softirq_pending()))
-+ return false;
-+ set_tsk_need_resched(current);
-+ return true;
-+}
-+
-+#ifdef CONFIG_64BIT
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ return tsk_seruntime(t);
-+}
-+#else
-+struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags);
-+void task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags);
-+
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ unsigned long flags;
-+ u64 ns;
-+ struct rq *rq;
-+
-+ rq = task_rq_lock(t, &flags);
-+ ns = tsk_seruntime(t);
-+ task_rq_unlock(rq, t, &flags);
-+
-+ return ns;
-+}
-+#endif
-+
-+#endif /* MUQSS_SCHED_H */
-diff -Nur a/kernel/sched/sched.h b/kernel/sched/sched.h
---- a/kernel/sched/sched.h 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/sched/sched.h 2019-01-05 20:22:51.099998516 +0000
-@@ -1,5 +1,8 @@
- /* SPDX-License-Identifier: GPL-2.0 */
-
-+#ifdef CONFIG_SCHED_MUQSS
-+#include "MuQSS.h"
-+#else /* CONFIG_SCHED_MUQSS */
- #include <linux/sched.h>
- #include <linux/sched/autogroup.h>
- #include <linux/sched/sysctl.h>
-@@ -2103,3 +2106,29 @@
- #else /* arch_scale_freq_capacity */
- #define arch_scale_freq_invariant() (false)
- #endif
-+
-+static inline bool softirq_pending(int cpu)
-+{
-+ return false;
-+}
-+
-+#ifdef CONFIG_64BIT
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ return t->se.sum_exec_runtime;
-+}
-+#else
-+static inline u64 read_sum_exec_runtime(struct task_struct *t)
-+{
-+ u64 ns;
-+ struct rq_flags rf;
-+ struct rq *rq;
-+
-+ rq = task_rq_lock(t, &rf);
-+ ns = t->se.sum_exec_runtime;
-+ task_rq_unlock(rq, t, &rf);
-+
-+ return ns;
-+}
-+#endif
-+#endif /* CONFIG_SCHED_MUQSS */
-diff -Nur a/kernel/skip_list.c b/kernel/skip_list.c
---- a/kernel/skip_list.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/kernel/skip_list.c 2019-01-05 20:22:51.099998516 +0000
-@@ -0,0 +1,148 @@
-+/*
-+ Copyright (C) 2011,2016 Con Kolivas.
-+
-+ Code based on example originally by William Pugh.
-+
-+Skip Lists are a probabilistic alternative to balanced trees, as
-+described in the June 1990 issue of CACM and were invented by
-+William Pugh in 1987.
-+
-+A couple of comments about this implementation:
-+The routine randomLevel has been hard-coded to generate random
-+levels using p=0.25. It can be easily changed.
-+
-+The insertion routine has been implemented so as to use the
-+dirty hack described in the CACM paper: if a random level is
-+generated that is more than the current maximum level, the
-+current maximum level plus one is used instead.
-+
-+Levels start at zero and go up to MaxLevel (which is equal to
-+MaxNumberOfLevels-1).
-+
-+The routines defined in this file are:
-+
-+init: defines slnode
-+
-+new_skiplist: returns a new, empty list
-+
-+randomLevel: Returns a random level based on a u64 random seed passed to it.
-+In MuQSS, the "niffy" time is used for this purpose.
-+
-+insert(l,key, value): inserts the binding (key, value) into l. This operation
-+occurs in O(log n) time.
-+
-+delnode(slnode, l, node): deletes any binding of key from the l based on the
-+actual node value. This operation occurs in O(k) time where k is the
-+number of levels of the node in question (max 8). The original delete
-+function occurred in O(log n) time and involved a search.
-+
-+MuQSS Notes: In this implementation of skiplists, there are bidirectional
-+next/prev pointers and the insert function returns a pointer to the actual
-+node the value is stored. The key here is chosen by the scheduler so as to
-+sort tasks according to the priority list requirements and is no longer used
-+by the scheduler after insertion. The scheduler lookup, however, occurs in
-+O(1) time because it is always the first item in the level 0 linked list.
-+Since the task struct stores a copy of the node pointer upon skiplist_insert,
-+it can also remove it much faster than the original implementation with the
-+aid of prev<->next pointer manipulation and no searching.
-+
-+*/
-+
-+#include <linux/slab.h>
-+#include <linux/skip_list.h>
-+
-+#define MaxNumberOfLevels 8
-+#define MaxLevel (MaxNumberOfLevels - 1)
-+
-+void skiplist_init(skiplist_node *slnode)
-+{
-+ int i;
-+
-+ slnode->key = 0xFFFFFFFFFFFFFFFF;
-+ slnode->level = 0;
-+ slnode->value = NULL;
-+ for (i = 0; i < MaxNumberOfLevels; i++)
-+ slnode->next[i] = slnode->prev[i] = slnode;
-+}
-+
-+skiplist *new_skiplist(skiplist_node *slnode)
-+{
-+ skiplist *l = kzalloc(sizeof(skiplist), GFP_ATOMIC);
-+
-+ BUG_ON(!l);
-+ l->header = slnode;
-+ return l;
-+}
-+
-+void free_skiplist(skiplist *l)
-+{
-+ skiplist_node *p, *q;
-+
-+ p = l->header;
-+ do {
-+ q = p->next[0];
-+ p->next[0]->prev[0] = q->prev[0];
-+ skiplist_node_init(p);
-+ p = q;
-+ } while (p != l->header);
-+ kfree(l);
-+}
-+
-+void skiplist_node_init(skiplist_node *node)
-+{
-+ memset(node, 0, sizeof(skiplist_node));
-+}
-+
-+static inline unsigned int randomLevel(const long unsigned int randseed)
-+{
-+ return find_first_bit(&randseed, MaxLevel) / 2;
-+}
-+
-+void skiplist_insert(skiplist *l, skiplist_node *node, keyType key, valueType value, unsigned int randseed)
-+{
-+ skiplist_node *update[MaxNumberOfLevels];
-+ skiplist_node *p, *q;
-+ int k = l->level;
-+
-+ p = l->header;
-+ do {
-+ while (q = p->next[k], q->key <= key)
-+ p = q;
-+ update[k] = p;
-+ } while (--k >= 0);
-+
-+ ++l->entries;
-+ k = randomLevel(randseed);
-+ if (k > l->level) {
-+ k = ++l->level;
-+ update[k] = l->header;
-+ }
-+
-+ node->level = k;
-+ node->key = key;
-+ node->value = value;
-+ do {
-+ p = update[k];
-+ node->next[k] = p->next[k];
-+ p->next[k] = node;
-+ node->prev[k] = p;
-+ node->next[k]->prev[k] = node;
-+ } while (--k >= 0);
-+}
-+
-+void skiplist_delete(skiplist *l, skiplist_node *node)
-+{
-+ int k, m = node->level;
-+
-+ for (k = 0; k <= m; k++) {
-+ node->prev[k]->next[k] = node->next[k];
-+ node->next[k]->prev[k] = node->prev[k];
-+ }
-+ skiplist_node_init(node);
-+ if (m == l->level) {
-+ while (l->header->next[m] == l->header && l->header->prev[m] == l->header && m > 0)
-+ m--;
-+ l->level = m;
-+ }
-+ l->entries--;
-+}
-diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
---- a/kernel/sysctl.c 2019-01-05 20:17:13.859238862 +0000
-+++ b/kernel/sysctl.c 2019-01-05 20:22:51.099998516 +0000
-@@ -135,6 +135,12 @@
- static unsigned long one_ul __read_only = 1;
- static int one_hundred __read_only = 100;
- static int one_thousand __read_only = 1000;
-+#ifdef CONFIG_SCHED_MUQSS
-+extern int rr_interval;
-+extern int sched_interactive;
-+extern int sched_iso_cpu;
-+extern int sched_yield_type;
-+#endif
- #ifdef CONFIG_PRINTK
- static int ten_thousand __read_only = 10000;
- #endif
-@@ -296,7 +302,7 @@
- { }
- };
-
--#ifdef CONFIG_SCHED_DEBUG
-+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_MUQSS)
- static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
- static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
- static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
-@@ -313,6 +319,7 @@
- #endif
-
- static struct ctl_table kern_table[] = {
-+#ifndef CONFIG_SCHED_MUQSS
- {
- .procname = "sched_child_runs_first",
- .data = &sysctl_sched_child_runs_first,
-@@ -475,6 +482,7 @@
- .extra1 = &one,
- },
- #endif
-+#endif /* !CONFIG_SCHED_MUQSS */
- #ifdef CONFIG_PROVE_LOCKING
- {
- .procname = "prove_locking",
-@@ -1073,6 +1081,44 @@
- .proc_handler = proc_dointvec,
- },
- #endif
-+#ifdef CONFIG_SCHED_MUQSS
-+ {
-+ .procname = "rr_interval",
-+ .data = &rr_interval,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &one,
-+ .extra2 = &one_thousand,
-+ },
-+ {
-+ .procname = "interactive",
-+ .data = &sched_interactive,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
-+ {
-+ .procname = "iso_cpu",
-+ .data = &sched_iso_cpu,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &zero,
-+ .extra2 = &one_hundred,
-+ },
-+ {
-+ .procname = "yield_type",
-+ .data = &sched_yield_type,
-+ .maxlen = sizeof (int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &zero,
-+ .extra2 = &two,
-+ },
-+#endif
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- {
- .procname = "spin_retry",
-diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
---- a/kernel/time/clockevents.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/time/clockevents.c 2019-01-05 20:22:51.099998516 +0000
-@@ -198,8 +198,13 @@
-
- #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
-
-+#ifdef CONFIG_SCHED_MUQSS
-+/* Limit min_delta to 100us */
-+#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
-+#else
- /* Limit min_delta to a jiffie */
- #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
-+#endif
-
- /**
- * clockevents_increase_min_delta - raise minimum delta of a clock event device
-diff -Nur a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
---- a/kernel/time/posix-cpu-timers.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/time/posix-cpu-timers.c 2019-01-05 20:22:51.109998835 +0000
-@@ -818,7 +818,7 @@
- tsk_expires->virt_exp = expires;
-
- tsk_expires->sched_exp = check_timers_list(++timers, firing,
-- tsk->se.sum_exec_runtime);
-+ tsk_seruntime(tsk));
-
- /*
- * Check for the special case thread timers.
-@@ -828,7 +828,7 @@
- unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
-
- if (hard != RLIM_INFINITY &&
-- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
-+ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
- /*
- * At the hard limit, we just die.
- * No need to calculate anything else now.
-@@ -840,7 +840,7 @@
- __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
- return;
- }
-- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
-+ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
- /*
- * At the soft limit, send a SIGXCPU every second.
- */
-@@ -1081,7 +1081,7 @@
- struct task_cputime task_sample;
-
- task_cputime(tsk, &task_sample.utime, &task_sample.stime);
-- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
-+ task_sample.sum_exec_runtime = tsk_seruntime(tsk);
- if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
- return 1;
- }
-diff -Nur a/kernel/time/timer.c b/kernel/time/timer.c
---- a/kernel/time/timer.c 2019-01-05 20:17:13.859238862 +0000
-+++ b/kernel/time/timer.c 2019-01-05 20:22:51.109998835 +0000
-@@ -1434,7 +1434,7 @@
- * Check, if the next hrtimer event is before the next timer wheel
- * event:
- */
--static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
-+static u64 cmp_next_hrtimer_event(struct timer_base *base, u64 basem, u64 expires)
- {
- u64 nextevt = hrtimer_get_next_event();
-
-@@ -1452,6 +1452,9 @@
- if (nextevt <= basem)
- return basem;
-
-+ if (nextevt < expires && nextevt - basem <= TICK_NSEC)
-+ base->is_idle = false;
-+
- /*
- * Round up to the next jiffie. High resolution timers are
- * off, so the hrtimers are expired in the tick and we need to
-@@ -1521,7 +1524,7 @@
- }
- raw_spin_unlock(&base->lock);
-
-- return cmp_next_hrtimer_event(basem, expires);
-+ return cmp_next_hrtimer_event(base, basem, expires);
- }
-
- /**
-diff -Nur a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
---- a/kernel/trace/trace_selftest.c 2018-12-21 13:13:19.000000000 +0000
-+++ b/kernel/trace/trace_selftest.c 2019-01-05 20:22:51.109998835 +0000
-@@ -1041,10 +1041,15 @@
- {
- /* Make this a -deadline thread */
- static const struct sched_attr attr = {
-+#ifdef CONFIG_SCHED_MUQSS
-+ /* No deadline on MuQSS, use RR */
-+ .sched_policy = SCHED_RR,
-+#else
- .sched_policy = SCHED_DEADLINE,
- .sched_runtime = 100000ULL,
- .sched_deadline = 10000000ULL,
- .sched_period = 10000000ULL
-+#endif
- };
- struct wakeup_test_data *x = data;
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch
deleted file mode 100644
index 69abb373..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0002-Make-preemptible-kernel-default.patch
+++ /dev/null
@@ -1,733 +0,0 @@
-From e8e37da685f7988182d7920a711e00dd2457af65 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 29 Oct 2016 11:20:37 +1100
-Subject: [PATCH 02/16] Make preemptible kernel default.
-
-Make full preempt default on all arches.
----
- arch/arc/configs/tb10x_defconfig | 2 +-
- arch/arm/configs/bcm2835_defconfig | 2 +-
- arch/arm/configs/imx_v6_v7_defconfig | 2 +-
- arch/arm/configs/mps2_defconfig | 2 +-
- arch/arm/configs/mxs_defconfig | 2 +-
- arch/blackfin/configs/BF518F-EZBRD_defconfig | 2 +-
- arch/blackfin/configs/BF526-EZBRD_defconfig | 2 +-
- arch/blackfin/configs/BF527-EZKIT-V2_defconfig | 2 +-
- arch/blackfin/configs/BF527-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF527-TLL6527M_defconfig | 2 +-
- arch/blackfin/configs/BF533-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF533-STAMP_defconfig | 2 +-
- arch/blackfin/configs/BF537-STAMP_defconfig | 2 +-
- arch/blackfin/configs/BF538-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF548-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF561-ACVILON_defconfig | 2 +-
- arch/blackfin/configs/BF561-EZKIT-SMP_defconfig | 2 +-
- arch/blackfin/configs/BF561-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BF609-EZKIT_defconfig | 2 +-
- arch/blackfin/configs/BlackStamp_defconfig | 2 +-
- arch/blackfin/configs/CM-BF527_defconfig | 2 +-
- arch/blackfin/configs/PNAV-10_defconfig | 2 +-
- arch/blackfin/configs/SRV1_defconfig | 2 +-
- arch/blackfin/configs/TCM-BF518_defconfig | 2 +-
- arch/mips/configs/fuloong2e_defconfig | 3 ++-
- arch/mips/configs/gpr_defconfig | 3 ++-
- arch/mips/configs/ip22_defconfig | 3 ++-
- arch/mips/configs/ip28_defconfig | 3 ++-
- arch/mips/configs/jazz_defconfig | 3 ++-
- arch/mips/configs/mtx1_defconfig | 3 ++-
- arch/mips/configs/nlm_xlr_defconfig | 2 +-
- arch/mips/configs/pic32mzda_defconfig | 2 +-
- arch/mips/configs/pistachio_defconfig | 2 +-
- arch/mips/configs/pnx8335_stb225_defconfig | 2 +-
- arch/mips/configs/rm200_defconfig | 3 ++-
- arch/parisc/configs/712_defconfig | 2 +-
- arch/parisc/configs/c3000_defconfig | 2 +-
- arch/parisc/configs/default_defconfig | 2 +-
- arch/powerpc/configs/c2k_defconfig | 2 +-
- arch/powerpc/configs/ppc6xx_defconfig | 2 +-
- arch/score/configs/spct6600_defconfig | 2 +-
- arch/sh/configs/se7712_defconfig | 2 +-
- arch/sh/configs/se7721_defconfig | 2 +-
- arch/sh/configs/titan_defconfig | 2 +-
- arch/sparc/configs/sparc64_defconfig | 2 +-
- arch/tile/configs/tilegx_defconfig | 2 +-
- arch/tile/configs/tilepro_defconfig | 2 +-
- arch/x86/configs/i386_defconfig | 2 +-
- arch/x86/configs/x86_64_defconfig | 2 +-
- kernel/Kconfig.preempt | 7 ++++---
- 50 files changed, 60 insertions(+), 52 deletions(-)
-
-diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
-index f30182549395..42910f628869 100644
---- a/arch/arc/configs/tb10x_defconfig
-+++ b/arch/arc/configs/tb10x_defconfig
-@@ -28,7 +28,7 @@ CONFIG_ARC_PLAT_TB10X=y
- CONFIG_ARC_CACHE_LINE_SHIFT=5
- CONFIG_HZ=250
- CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_COMPACTION is not set
- CONFIG_NET=y
- CONFIG_PACKET=y
-diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
-index 43dab4890ad3..44a52166ca5e 100644
---- a/arch/arm/configs/bcm2835_defconfig
-+++ b/arch/arm/configs/bcm2835_defconfig
-@@ -29,7 +29,7 @@ CONFIG_MODULE_UNLOAD=y
- CONFIG_ARCH_MULTI_V6=y
- CONFIG_ARCH_BCM=y
- CONFIG_ARCH_BCM2835=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_AEABI=y
- CONFIG_KSM=y
- CONFIG_CLEANCACHE=y
-diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
-index 32acac9ab81a..1482bb312987 100644
---- a/arch/arm/configs/imx_v6_v7_defconfig
-+++ b/arch/arm/configs/imx_v6_v7_defconfig
-@@ -47,7 +47,7 @@ CONFIG_PCI_MSI=y
- CONFIG_PCI_IMX6=y
- CONFIG_SMP=y
- CONFIG_ARM_PSCI=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_AEABI=y
- CONFIG_HIGHMEM=y
- CONFIG_CMA=y
-diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
-index 0bcdec7cc169..10ceaefa51e0 100644
---- a/arch/arm/configs/mps2_defconfig
-+++ b/arch/arm/configs/mps2_defconfig
-@@ -18,7 +18,7 @@ CONFIG_ARCH_MPS2=y
- CONFIG_SET_MEM_PARAM=y
- CONFIG_DRAM_BASE=0x21000000
- CONFIG_DRAM_SIZE=0x1000000
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_ATAGS is not set
- CONFIG_ZBOOT_ROM_TEXT=0x0
- CONFIG_ZBOOT_ROM_BSS=0x0
-diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
-index e5822ab01b7d..3e77e02f678f 100644
---- a/arch/arm/configs/mxs_defconfig
-+++ b/arch/arm/configs/mxs_defconfig
-@@ -27,7 +27,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
- # CONFIG_ARCH_MULTI_V7 is not set
- CONFIG_ARCH_MXS=y
- # CONFIG_ARM_THUMB is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_AEABI=y
- CONFIG_NET=y
- CONFIG_PACKET=y
-diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
-index 99c00d835f47..39b91dfa55b5 100644
---- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
-+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF518=y
- CONFIG_IRQ_TIMER0=12
- # CONFIG_CYCLES_CLOCKSOURCE is not set
-diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
-index e66ba31ef84d..675cadb3a0c4 100644
---- a/arch/blackfin/configs/BF526-EZBRD_defconfig
-+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF526=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_BFIN526_EZBRD=y
-diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
-index 0207c588c19f..4c517c443af5 100644
---- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
-+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_2=y
- CONFIG_BFIN527_EZKIT_V2=y
-diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
-index 99c131ba7d90..bf8df3e6cf02 100644
---- a/arch/blackfin/configs/BF527-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_1=y
- CONFIG_IRQ_USB_INT0=11
-diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
-index cdeb51856f26..0220b3b15c53 100644
---- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
-+++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
-@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_LBDAF is not set
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_2=y
- CONFIG_BFIN527_TLL6527M=y
-diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
-index ed7d2c096739..6023e3fd2c48 100644
---- a/arch/blackfin/configs/BF533-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BFIN533_EZKIT=y
- CONFIG_TIMER0=11
- CONFIG_CLKIN_HZ=27000000
-diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
-index 0c241f4d28d7..f5cd0f18b711 100644
---- a/arch/blackfin/configs/BF533-STAMP_defconfig
-+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_TIMER0=11
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
-diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
-index e5360b30e39a..48085fde7f9e 100644
---- a/arch/blackfin/configs/BF537-STAMP_defconfig
-+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF537=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
-diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
-index 60f6fb86125c..12deeaaef3cb 100644
---- a/arch/blackfin/configs/BF538-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
-@@ -21,7 +21,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF538=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_IRQ_TIMER1=12
-diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
-index 38cb17d218d4..6a68ffc55b5a 100644
---- a/arch/blackfin/configs/BF548-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF548_std=y
- CONFIG_IRQ_TIMER0=11
- # CONFIG_CYCLES_CLOCKSOURCE is not set
-diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
-index 78f6bc79f910..e9f3ba783a4e 100644
---- a/arch/blackfin/configs/BF561-ACVILON_defconfig
-+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
-@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_LBDAF is not set
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF561=y
- CONFIG_BF_REV_0_5=y
- CONFIG_IRQ_TIMER0=10
-diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
-index fac8bb578249..89b75a6c3fab 100644
---- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
-+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF561=y
- CONFIG_SMP=y
- CONFIG_IRQ_TIMER0=10
-diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
-index 2a2e4d0cebc1..67b3d2f419ba 100644
---- a/arch/blackfin/configs/BF561-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF561=y
- CONFIG_IRQ_TIMER0=10
- CONFIG_CLKIN_HZ=30000000
-diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
-index 3ce77f07208a..8cc75d4218fb 100644
---- a/arch/blackfin/configs/BF609-EZKIT_defconfig
-+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
-@@ -20,7 +20,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF609=y
- CONFIG_PINT1_ASSIGN=0x01010000
- CONFIG_PINT2_ASSIGN=0x07000101
-diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
-index f4a9200e1ab1..9faf0ec7007f 100644
---- a/arch/blackfin/configs/BlackStamp_defconfig
-+++ b/arch/blackfin/configs/BlackStamp_defconfig
-@@ -17,7 +17,7 @@ CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF532=y
- CONFIG_BF_REV_0_5=y
- CONFIG_BLACKSTAMP=y
-diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
-index 1902bb05d086..4a1ad4fd7bb2 100644
---- a/arch/blackfin/configs/CM-BF527_defconfig
-+++ b/arch/blackfin/configs/CM-BF527_defconfig
-@@ -19,7 +19,7 @@ CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF527=y
- CONFIG_BF_REV_0_1=y
- CONFIG_IRQ_TIMER0=12
-diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
-index c7926812971c..9d787e28bbe8 100644
---- a/arch/blackfin/configs/PNAV-10_defconfig
-+++ b/arch/blackfin/configs/PNAV-10_defconfig
-@@ -15,7 +15,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF537=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_PNAV10=y
-diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
-index 23fdc57d657a..225df32dc9a8 100644
---- a/arch/blackfin/configs/SRV1_defconfig
-+++ b/arch/blackfin/configs/SRV1_defconfig
-@@ -13,7 +13,7 @@ CONFIG_MMAP_ALLOW_UNINITIALIZED=y
- CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- # CONFIG_IOSCHED_DEADLINE is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF537=y
- CONFIG_IRQ_TIMER0=12
- CONFIG_BOOT_LOAD=0x400000
-diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
-index e28959479fe0..425c24e43c34 100644
---- a/arch/blackfin/configs/TCM-BF518_defconfig
-+++ b/arch/blackfin/configs/TCM-BF518_defconfig
-@@ -23,7 +23,7 @@ CONFIG_MODULE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- # CONFIG_IOSCHED_DEADLINE is not set
- # CONFIG_IOSCHED_CFQ is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BF518=y
- CONFIG_BF_REV_0_1=y
- CONFIG_BFIN518F_TCM=y
-diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
-index 499f51498ecb..f7cb39b0662c 100644
---- a/arch/mips/configs/fuloong2e_defconfig
-+++ b/arch/mips/configs/fuloong2e_defconfig
-@@ -2,7 +2,8 @@ CONFIG_MACH_LOONGSON64=y
- CONFIG_64BIT=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_LOCALVERSION="-fuloong2e"
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
-diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
-index 55438fc9991e..db03ef4f737d 100644
---- a/arch/mips/configs/gpr_defconfig
-+++ b/arch/mips/configs/gpr_defconfig
-@@ -1,7 +1,8 @@
- CONFIG_MIPS_ALCHEMY=y
- CONFIG_MIPS_GPR=y
- CONFIG_HIGH_RES_TIMERS=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
-diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
-index 83e8fe2064aa..93e7b167433b 100644
---- a/arch/mips/configs/ip22_defconfig
-+++ b/arch/mips/configs/ip22_defconfig
-@@ -3,7 +3,8 @@ CONFIG_CPU_R5000=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_HZ_1000=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_IKCONFIG=y
- CONFIG_IKCONFIG_PROC=y
-diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
-index d0a4c2cfacf8..6f0600e99c25 100644
---- a/arch/mips/configs/ip28_defconfig
-+++ b/arch/mips/configs/ip28_defconfig
-@@ -1,6 +1,7 @@
- CONFIG_SGI_IP28=y
- CONFIG_ARC_CONSOLE=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_IKCONFIG=y
- CONFIG_IKCONFIG_PROC=y
-diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
-index 9ad1c94376c8..1d62ce7ff5dc 100644
---- a/arch/mips/configs/jazz_defconfig
-+++ b/arch/mips/configs/jazz_defconfig
-@@ -1,6 +1,7 @@
- CONFIG_MACH_JAZZ=y
- CONFIG_OLIVETTI_M700=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_BSD_PROCESS_ACCT=y
-diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
-index c3d0d0a6e044..aa3426d5f7d7 100644
---- a/arch/mips/configs/mtx1_defconfig
-+++ b/arch/mips/configs/mtx1_defconfig
-@@ -1,6 +1,7 @@
- CONFIG_MIPS_ALCHEMY=y
- CONFIG_MIPS_MTX1=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
-diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
-index 1e18fd7de209..b514e91e5426 100644
---- a/arch/mips/configs/nlm_xlr_defconfig
-+++ b/arch/mips/configs/nlm_xlr_defconfig
-@@ -5,7 +5,7 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
- CONFIG_SMP=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_KEXEC=y
- CONFIG_CROSS_COMPILE=""
- # CONFIG_LOCALVERSION_AUTO is not set
-diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
-index 52192c632ae8..96b087498dab 100644
---- a/arch/mips/configs/pic32mzda_defconfig
-+++ b/arch/mips/configs/pic32mzda_defconfig
-@@ -1,7 +1,7 @@
- CONFIG_MACH_PIC32=y
- CONFIG_DTB_PIC32_MZDA_SK=y
- CONFIG_HZ_100=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_SECCOMP is not set
- CONFIG_SYSVIPC=y
- CONFIG_NO_HZ=y
-diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
-index b22a3cf149b6..cfffca3d37f4 100644
---- a/arch/mips/configs/pistachio_defconfig
-+++ b/arch/mips/configs/pistachio_defconfig
-@@ -5,7 +5,7 @@ CONFIG_MIPS_CPS=y
- CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
- CONFIG_ZSMALLOC=y
- CONFIG_NR_CPUS=4
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_DEFAULT_HOSTNAME="localhost"
- CONFIG_SYSVIPC=y
-diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
-index 81b5eb89446c..19f8cea849a1 100644
---- a/arch/mips/configs/pnx8335_stb225_defconfig
-+++ b/arch/mips/configs/pnx8335_stb225_defconfig
-@@ -3,7 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
- CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_HZ_128=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_SECCOMP is not set
- # CONFIG_LOCALVERSION_AUTO is not set
- # CONFIG_SWAP is not set
-diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
-index 99679e514042..2ced507a8ba7 100644
---- a/arch/mips/configs/rm200_defconfig
-+++ b/arch/mips/configs/rm200_defconfig
-@@ -2,7 +2,8 @@ CONFIG_SNI_RM=y
- CONFIG_CPU_LITTLE_ENDIAN=y
- CONFIG_ARC_CONSOLE=y
- CONFIG_HZ_1000=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
-+CONFIG_EXPERIMENTAL=y
- CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_BSD_PROCESS_ACCT=y
-diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
-index ccc109761f44..a6a5b0b7a9c9 100644
---- a/arch/parisc/configs/712_defconfig
-+++ b/arch/parisc/configs/712_defconfig
-@@ -13,7 +13,7 @@ CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- CONFIG_PA7100LC=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_GSC_LASI=y
- # CONFIG_PDC_CHASSIS is not set
- CONFIG_BINFMT_MISC=m
-diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
-index 8d41a73bd71b..b8e0a6662ff9 100644
---- a/arch/parisc/configs/c3000_defconfig
-+++ b/arch/parisc/configs/c3000_defconfig
-@@ -13,7 +13,7 @@ CONFIG_MODULES=y
- CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- CONFIG_PA8X00=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- # CONFIG_GSC is not set
- CONFIG_PCI=y
- CONFIG_PCI_LBA=y
-diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
-index 52c9050a7c5c..8d86d2e989f4 100644
---- a/arch/parisc/configs/default_defconfig
-+++ b/arch/parisc/configs/default_defconfig
-@@ -14,7 +14,7 @@ CONFIG_MODULE_UNLOAD=y
- CONFIG_MODULE_FORCE_UNLOAD=y
- # CONFIG_BLK_DEV_BSG is not set
- CONFIG_PA7100LC=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_IOMMU_CCIO=y
- CONFIG_GSC_LASI=y
- CONFIG_GSC_WAX=y
-diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
-index f1552af9eecc..f8505e6ec7b3 100644
---- a/arch/powerpc/configs/c2k_defconfig
-+++ b/arch/powerpc/configs/c2k_defconfig
-@@ -29,7 +29,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
- CONFIG_CPU_FREQ_GOV_ONDEMAND=m
- CONFIG_GEN_RTC=y
- CONFIG_HIGHMEM=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BINFMT_MISC=y
- CONFIG_PM=y
- CONFIG_PCI_MSI=y
-diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
-index da0e8d535eb8..c016af41ab4f 100644
---- a/arch/powerpc/configs/ppc6xx_defconfig
-+++ b/arch/powerpc/configs/ppc6xx_defconfig
-@@ -74,7 +74,7 @@ CONFIG_QE_GPIO=y
- CONFIG_MCU_MPC8349EMITX=y
- CONFIG_HIGHMEM=y
- CONFIG_HZ_1000=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_BINFMT_MISC=y
- CONFIG_HIBERNATION=y
- CONFIG_PM_DEBUG=y
-diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
-index b2d8802f43b4..46434ca1fa10 100644
---- a/arch/score/configs/spct6600_defconfig
-+++ b/arch/score/configs/spct6600_defconfig
-@@ -1,5 +1,5 @@
- CONFIG_HZ_100=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_EXPERIMENTAL=y
- # CONFIG_LOCALVERSION_AUTO is not set
- CONFIG_SYSVIPC=y
-diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
-index 5a1097641247..eb5fbf554e7f 100644
---- a/arch/sh/configs/se7712_defconfig
-+++ b/arch/sh/configs/se7712_defconfig
-@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
- CONFIG_SH_SOLUTION_ENGINE=y
- CONFIG_SH_PCLK_FREQ=66666666
- CONFIG_HEARTBEAT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_CMDLINE_OVERWRITE=y
- CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda1"
- CONFIG_NET=y
-diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
-index 9c0ef13bee10..cbaa65c8bf9e 100644
---- a/arch/sh/configs/se7721_defconfig
-+++ b/arch/sh/configs/se7721_defconfig
-@@ -23,7 +23,7 @@ CONFIG_FLATMEM_MANUAL=y
- CONFIG_SH_7721_SOLUTION_ENGINE=y
- CONFIG_SH_PCLK_FREQ=33333333
- CONFIG_HEARTBEAT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_CMDLINE_OVERWRITE=y
- CONFIG_CMDLINE="console=ttySC0,115200 root=/dev/sda2"
- CONFIG_NET=y
-diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
-index ceb48e9b70f4..1a69eda6610c 100644
---- a/arch/sh/configs/titan_defconfig
-+++ b/arch/sh/configs/titan_defconfig
-@@ -20,7 +20,7 @@ CONFIG_SH_TITAN=y
- CONFIG_SH_PCLK_FREQ=30000000
- CONFIG_SH_DMA=y
- CONFIG_SH_DMA_API=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_CMDLINE_OVERWRITE=y
- CONFIG_CMDLINE="console=ttySC1,38400N81 root=/dev/nfs ip=:::::eth1:autoconf rw"
- CONFIG_PCI=y
-diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
-index 4d4e1cc6402f..04bea1d28ba7 100644
---- a/arch/sparc/configs/sparc64_defconfig
-+++ b/arch/sparc/configs/sparc64_defconfig
-@@ -22,7 +22,7 @@ CONFIG_NO_HZ=y
- CONFIG_HIGH_RES_TIMERS=y
- CONFIG_NUMA=y
- CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_SUN_LDOMS=y
- CONFIG_PCI=y
- CONFIG_PCI_MSI=y
-diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
-index 9f94435cc44f..aa78ee6cd5eb 100644
---- a/arch/tile/configs/tilegx_defconfig
-+++ b/arch/tile/configs/tilegx_defconfig
-@@ -47,7 +47,7 @@ CONFIG_CFQ_GROUP_IOSCHED=y
- CONFIG_NR_CPUS=100
- CONFIG_HZ_100=y
- # CONFIG_COMPACTION is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_TILE_PCI_IO=y
- CONFIG_PCI_DEBUG=y
- # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
-index 1c5bd4f8ffca..38005862062c 100644
---- a/arch/tile/configs/tilepro_defconfig
-+++ b/arch/tile/configs/tilepro_defconfig
-@@ -44,7 +44,7 @@ CONFIG_KARMA_PARTITION=y
- CONFIG_CFQ_GROUP_IOSCHED=y
- CONFIG_HZ_100=y
- # CONFIG_COMPACTION is not set
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_PCI_DEBUG=y
- # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
- CONFIG_BINFMT_MISC=y
-diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
-index 0eb9f92f3717..e5890ae917e5 100644
---- a/arch/x86/configs/i386_defconfig
-+++ b/arch/x86/configs/i386_defconfig
-@@ -41,7 +41,7 @@ CONFIG_SMP=y
- CONFIG_X86_GENERIC=y
- CONFIG_HPET_TIMER=y
- CONFIG_SCHED_SMT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
- CONFIG_X86_MCE=y
- CONFIG_X86_REBOOTFIXUPS=y
-diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
-index 4a4b16e56d35..7452dcadda74 100644
---- a/arch/x86/configs/x86_64_defconfig
-+++ b/arch/x86/configs/x86_64_defconfig
-@@ -40,7 +40,7 @@ CONFIG_SMP=y
- CONFIG_CALGARY_IOMMU=y
- CONFIG_NR_CPUS=64
- CONFIG_SCHED_SMT=y
--CONFIG_PREEMPT_VOLUNTARY=y
-+CONFIG_PREEMPT=y
- CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
- CONFIG_X86_MCE=y
- CONFIG_MICROCODE=y
-diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
-index 3f9c97419f02..1dc79ec7ad09 100644
---- a/kernel/Kconfig.preempt
-+++ b/kernel/Kconfig.preempt
-@@ -1,7 +1,7 @@
-
- choice
- prompt "Preemption Model"
-- default PREEMPT_NONE
-+ default PREEMPT
-
- config PREEMPT_NONE
- bool "No Forced Preemption (Server)"
-@@ -17,7 +17,7 @@ config PREEMPT_NONE
- latencies.
-
- config PREEMPT_VOLUNTARY
-- bool "Voluntary Kernel Preemption (Desktop)"
-+ bool "Voluntary Kernel Preemption (Nothing)"
- help
- This option reduces the latency of the kernel by adding more
- "explicit preemption points" to the kernel code. These new
-@@ -31,7 +31,8 @@ config PREEMPT_VOLUNTARY
- applications to run more 'smoothly' even when the system is
- under load.
-
-- Select this if you are building a kernel for a desktop system.
-+ Select this for no system in particular (choose Preemptible
-+ instead on a desktop if you know what's good for you).
-
- config PREEMPT
- bool "Preemptible Kernel (Low-Latency Desktop)"
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
deleted file mode 100644
index b7897dbe..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From 44fc740a3ff85d378c28a416a076cc7e019d7b8c Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Fri, 12 May 2017 13:07:37 +1000
-Subject: [PATCH 03/16] Expose vmsplit for our poor 32 bit users.
-
----
- arch/x86/Kconfig | 12 ++++++------
- 1 file changed, 6 insertions(+), 6 deletions(-)
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index e06a7b4e1dc4..931aba4fc567 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -1361,7 +1361,7 @@ config HIGHMEM64G
- endchoice
-
- choice
-- prompt "Memory split" if EXPERT
-+ prompt "Memory split"
- default VMSPLIT_3G
- depends on X86_32
- ---help---
-@@ -1381,17 +1381,17 @@ choice
- option alone!
-
- config VMSPLIT_3G
-- bool "3G/1G user/kernel split"
-+ bool "Default 896MB lowmem (3G/1G user/kernel split)"
- config VMSPLIT_3G_OPT
- depends on !X86_PAE
-- bool "3G/1G user/kernel split (for full 1G low memory)"
-+ bool "1GB lowmem (3G/1G user/kernel split)"
- config VMSPLIT_2G
-- bool "2G/2G user/kernel split"
-+ bool "2GB lowmem (2G/2G user/kernel split)"
- config VMSPLIT_2G_OPT
- depends on !X86_PAE
-- bool "2G/2G user/kernel split (for full 2G low memory)"
-+ bool "2GB lowmem (2G/2G user/kernel split)"
- config VMSPLIT_1G
-- bool "1G/3G user/kernel split"
-+ bool "3GB lowmem (1G/3G user/kernel split)"
- endchoice
-
- config PAGE_OFFSET
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
deleted file mode 100644
index 3c182fbe..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
+++ /dev/null
@@ -1,153 +0,0 @@
-From d27b58b0707ac311be5a51594fc6f22ed1d109e5 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 12 Aug 2017 11:53:39 +1000
-Subject: [PATCH 04/16] Create highres timeout variants of schedule_timeout
- functions.
-
----
- include/linux/freezer.h | 1 +
- include/linux/sched.h | 31 +++++++++++++++++++--
- kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++
- 3 files changed, 101 insertions(+), 2 deletions(-)
-
-diff --git a/include/linux/freezer.h b/include/linux/freezer.h
-index 3995df1d068f..f8645e8f2444 100644
---- a/include/linux/freezer.h
-+++ b/include/linux/freezer.h
-@@ -297,6 +297,7 @@ static inline void set_freezable(void) {}
- #define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
-+#define pm_freezing (false)
- #endif /* !CONFIG_FREEZER */
-
- #endif /* FREEZER_H_INCLUDED */
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 35dc91a0e2ed..38852ebfa864 100644
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -173,13 +173,40 @@ extern cpumask_var_t cpu_isolated_map;
-
- extern void scheduler_tick(void);
-
--#define MAX_SCHEDULE_TIMEOUT LONG_MAX
--
-+#define MAX_SCHEDULE_TIMEOUT LONG_MAX
- extern long schedule_timeout(long timeout);
- extern long schedule_timeout_interruptible(long timeout);
- extern long schedule_timeout_killable(long timeout);
- extern long schedule_timeout_uninterruptible(long timeout);
- extern long schedule_timeout_idle(long timeout);
-+
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+extern long schedule_msec_hrtimeout(long timeout);
-+extern long schedule_min_hrtimeout(void);
-+extern long schedule_msec_hrtimeout_interruptible(long timeout);
-+extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
-+#else
-+static inline long schedule_msec_hrtimeout(long timeout)
-+{
-+ return schedule_timeout(msecs_to_jiffies(timeout));
-+}
-+
-+static inline long schedule_min_hrtimeout(void)
-+{
-+ return schedule_timeout(1);
-+}
-+
-+static inline long schedule_msec_hrtimeout_interruptible(long timeout)
-+{
-+ return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
-+}
-+
-+static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
-+{
-+ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
-+}
-+#endif
-+
- asmlinkage void schedule(void);
- extern void schedule_preempt_disabled(void);
-
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 88f75f92ef36..13227cf2814c 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1787,3 +1787,74 @@ int __sched schedule_hrtimeout(ktime_t *expires,
- return schedule_hrtimeout_range(expires, 0, mode);
- }
- EXPORT_SYMBOL_GPL(schedule_hrtimeout);
-+
-+/*
-+ * As per schedule_hrtimeout but taskes a millisecond value and returns how
-+ * many milliseconds are left.
-+ */
-+long __sched schedule_msec_hrtimeout(long timeout)
-+{
-+ struct hrtimer_sleeper t;
-+ int delta, secs, jiffs;
-+ ktime_t expires;
-+
-+ if (!timeout) {
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
-+ }
-+
-+ jiffs = msecs_to_jiffies(timeout);
-+ /*
-+ * If regular timer resolution is adequate or hrtimer resolution is not
-+ * (yet) better than Hz, as would occur during startup, use regular
-+ * timers.
-+ */
-+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
-+ return schedule_timeout(jiffs);
-+
-+ secs = timeout / 1000;
-+ delta = (timeout % 1000) * NSEC_PER_MSEC;
-+ expires = ktime_set(secs, delta);
-+
-+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
-+
-+ hrtimer_init_sleeper(&t, current);
-+
-+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
-+
-+ if (likely(t.task))
-+ schedule();
-+
-+ hrtimer_cancel(&t.timer);
-+ destroy_hrtimer_on_stack(&t.timer);
-+
-+ __set_current_state(TASK_RUNNING);
-+
-+ expires = hrtimer_expires_remaining(&t.timer);
-+ timeout = ktime_to_ms(expires);
-+ return timeout < 0 ? 0 : timeout;
-+}
-+
-+EXPORT_SYMBOL(schedule_msec_hrtimeout);
-+
-+long __sched schedule_min_hrtimeout(void)
-+{
-+ return schedule_msec_hrtimeout(1);
-+}
-+
-+EXPORT_SYMBOL(schedule_min_hrtimeout);
-+
-+long __sched schedule_msec_hrtimeout_interruptible(long timeout)
-+{
-+ __set_current_state(TASK_INTERRUPTIBLE);
-+ return schedule_msec_hrtimeout(timeout);
-+}
-+EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
-+
-+long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
-+{
-+ __set_current_state(TASK_UNINTERRUPTIBLE);
-+ return schedule_msec_hrtimeout(timeout);
-+}
-+EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
deleted file mode 100644
index 3c889719..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From 5da7d1778b96c514394334c92de9b3d8d71f4a29 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 5 Nov 2016 09:27:36 +1100
-Subject: [PATCH 05/16] Special case calls of schedule_timeout(1) to use the
- min hrtimeout of 1ms, working around low Hz resolutions.
-
----
- kernel/time/timer.c | 17 +++++++++++++++--
- 1 file changed, 15 insertions(+), 2 deletions(-)
-
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index 9c18e16059a3..dd4d1b193286 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1741,6 +1741,19 @@ signed long __sched schedule_timeout(signed long timeout)
-
- expire = timeout + jiffies;
-
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ /*
-+ * Special case 1 as being a request for the minimum timeout
-+ * and use highres timers to timeout after 1ms to workaround
-+ * the granularity of low Hz tick timers.
-+ */
-+ if (!schedule_min_hrtimeout())
-+ return 0;
-+ goto out_timeout;
-+ }
-+#endif
-+
- setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
- __mod_timer(&timer, expire, false);
- schedule();
-@@ -1748,10 +1761,10 @@ signed long __sched schedule_timeout(signed long timeout)
-
- /* Remove the timer from the object tracker */
- destroy_timer_on_stack(&timer);
--
-+out_timeout:
- timeout = expire - jiffies;
-
-- out:
-+out:
- return timeout < 0 ? 0 : timeout;
- }
- EXPORT_SYMBOL(schedule_timeout);
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch
deleted file mode 100644
index 2f065652..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0006-Convert-msleep-to-use-hrtimers-when-active.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 9df803c28bb8ccb2588c0ccaf857b9e673175fed Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Fri, 4 Nov 2016 09:25:54 +1100
-Subject: [PATCH 06/16] Convert msleep to use hrtimers when active.
-
----
- kernel/time/timer.c | 24 ++++++++++++++++++++++--
- 1 file changed, 22 insertions(+), 2 deletions(-)
-
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index dd4d1b193286..c68cb9307f64 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1884,7 +1884,19 @@ void __init init_timers(void)
- */
- void msleep(unsigned int msecs)
- {
-- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
-+ int jiffs = msecs_to_jiffies(msecs);
-+ unsigned long timeout;
-+
-+ /*
-+ * Use high resolution timers where the resolution of tick based
-+ * timers is inadequate.
-+ */
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ while (msecs)
-+ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
-+ return;
-+ }
-+ timeout = msecs_to_jiffies(msecs) + 1;
-
- while (timeout)
- timeout = schedule_timeout_uninterruptible(timeout);
-@@ -1898,7 +1910,15 @@ EXPORT_SYMBOL(msleep);
- */
- unsigned long msleep_interruptible(unsigned int msecs)
- {
-- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
-+ int jiffs = msecs_to_jiffies(msecs);
-+ unsigned long timeout;
-+
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ while (msecs && !signal_pending(current))
-+ msecs = schedule_msec_hrtimeout_interruptible(msecs);
-+ return msecs;
-+ }
-+ timeout = msecs_to_jiffies(msecs) + 1;
-
- while (timeout && !signal_pending(current))
- timeout = schedule_timeout_interruptible(timeout);
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
deleted file mode 100644
index ff071da8..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
+++ /dev/null
@@ -1,529 +0,0 @@
-diff -Nur a/drivers/block/swim.c b/drivers/block/swim.c
---- a/drivers/block/swim.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/block/swim.c 2018-11-03 16:30:39.471807304 +0000
-@@ -332,7 +332,7 @@
- if (swim_readbit(base, MOTOR_ON))
- break;
- current->state = TASK_INTERRUPTIBLE;
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
- } else if (action == OFF) {
- swim_action(base, MOTOR_OFF);
-@@ -351,7 +351,7 @@
- if (!swim_readbit(base, DISK_IN))
- break;
- current->state = TASK_INTERRUPTIBLE;
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
- swim_select(base, RELAX);
- }
-@@ -375,7 +375,7 @@
- for (wait = 0; wait < HZ; wait++) {
-
- current->state = TASK_INTERRUPTIBLE;
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
-
- swim_select(base, RELAX);
- if (!swim_readbit(base, STEP))
-diff -Nur a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
---- a/drivers/bluetooth/hci_qca.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/bluetooth/hci_qca.c 2018-11-03 16:31:56.065260061 +0000
-@@ -880,7 +880,7 @@
- * then host can communicate with new baudrate to controller
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
-+ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
- set_current_state(TASK_RUNNING);
-
- return 0;
-diff -Nur a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
---- a/drivers/char/ipmi/ipmi_msghandler.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/char/ipmi/ipmi_msghandler.c 2018-11-03 16:30:39.473807368 +0000
-@@ -2953,7 +2953,7 @@
- /* Current message first, to preserve order */
- while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
- /* Wait for the message to clear out. */
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
-
- /* No need for locks, the interface is down. */
-diff -Nur a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
---- a/drivers/char/ipmi/ipmi_ssif.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/char/ipmi/ipmi_ssif.c 2018-11-03 16:30:39.473807368 +0000
-@@ -1200,7 +1200,7 @@
-
- /* make sure the driver is not looking for flags any more. */
- while (ssif_info->ssif_state != SSIF_NORMAL)
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
-
- ssif_info->stopping = true;
- del_timer_sync(&ssif_info->retry_timer);
-diff -Nur a/drivers/char/snsc.c b/drivers/char/snsc.c
---- a/drivers/char/snsc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/char/snsc.c 2018-11-03 16:30:39.474807400 +0000
-@@ -198,7 +198,7 @@
- add_wait_queue(&sd->sd_rq, &wait);
- spin_unlock_irqrestore(&sd->sd_rlock, flags);
-
-- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
-+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
-
- remove_wait_queue(&sd->sd_rq, &wait);
- if (signal_pending(current)) {
-@@ -294,7 +294,7 @@
- add_wait_queue(&sd->sd_wq, &wait);
- spin_unlock_irqrestore(&sd->sd_wlock, flags);
-
-- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
-+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
-
- remove_wait_queue(&sd->sd_wq, &wait);
- if (signal_pending(current)) {
-diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2018-11-03 16:30:39.474807400 +0000
-@@ -235,7 +235,7 @@
- DRM_ERROR("SVGA device lockup.\n");
- break;
- }
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- if (interruptible && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
-diff -Nur a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
---- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2018-11-03 16:30:39.474807400 +0000
-@@ -202,7 +202,7 @@
- break;
- }
- if (lazy)
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- else if ((++count & 0x0F) == 0) {
- /**
- * FIXME: Use schedule_hr_timeout here for
-diff -Nur a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
---- a/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c 2018-11-03 16:30:39.475807432 +0000
-@@ -1154,7 +1154,7 @@
- TASK_UNINTERRUPTIBLE);
- if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
- break;
-- schedule_timeout(msecs_to_jiffies(25));
-+ schedule_msec_hrtimeout((25));
- }
- finish_wait(&itv->vsync_waitq, &wait);
- mutex_lock(&itv->serialize_lock);
-diff -Nur a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
---- a/drivers/media/pci/ivtv/ivtv-streams.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/media/pci/ivtv/ivtv-streams.c 2018-11-03 16:30:39.475807432 +0000
-@@ -834,7 +834,7 @@
- while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
- time_before(jiffies,
- then + msecs_to_jiffies(2000))) {
-- schedule_timeout(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout((10));
- }
-
- /* To convert jiffies to ms, we must multiply by 1000
-diff -Nur a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
---- a/drivers/mfd/ucb1x00-core.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/mfd/ucb1x00-core.c 2018-11-03 16:30:39.476807464 +0000
-@@ -253,7 +253,7 @@
- break;
- /* yield to other processes */
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
-
- return UCB_ADC_DAT(val);
-diff -Nur a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
---- a/drivers/misc/sgi-xp/xpc_channel.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/misc/sgi-xp/xpc_channel.c 2018-11-03 16:30:39.476807464 +0000
-@@ -837,7 +837,7 @@
-
- atomic_inc(&ch->n_on_msg_allocate_wq);
- prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
-- ret = schedule_timeout(1);
-+ ret = schedule_min_hrtimeout();
- finish_wait(&ch->msg_allocate_wq, &wait);
- atomic_dec(&ch->n_on_msg_allocate_wq);
-
-diff -Nur a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
---- a/drivers/net/caif/caif_hsi.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/caif/caif_hsi.c 2018-11-03 16:30:39.477807497 +0000
-@@ -940,7 +940,7 @@
- break;
-
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- retry--;
- }
-
-diff -Nur a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
---- a/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c 2018-11-03 16:30:39.477807497 +0000
-@@ -250,7 +250,7 @@
- } else {
- /* the PCAN-USB needs time to init */
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
-+ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
- }
-
- return err;
-diff -Nur a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
---- a/drivers/net/usb/lan78xx.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/usb/lan78xx.c 2018-11-03 16:30:39.478807529 +0000
-@@ -2567,7 +2567,7 @@
- while (!skb_queue_empty(&dev->rxq) &&
- !skb_queue_empty(&dev->txq) &&
- !skb_queue_empty(&dev->done)) {
-- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
-+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
- set_current_state(TASK_UNINTERRUPTIBLE);
- netif_dbg(dev, ifdown, dev->net,
- "waited for %d urb completions\n", temp);
-diff -Nur a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
---- a/drivers/net/usb/usbnet.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/net/usb/usbnet.c 2018-11-03 16:30:39.479807561 +0000
-@@ -772,7 +772,7 @@
- spin_lock_irqsave(&q->lock, flags);
- while (!skb_queue_empty(q)) {
- spin_unlock_irqrestore(&q->lock, flags);
-- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
-+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
- set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock_irqsave(&q->lock, flags);
- }
-diff -Nur a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
---- a/drivers/ntb/test/ntb_perf.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/ntb/test/ntb_perf.c 2018-11-03 16:30:39.479807561 +0000
-@@ -310,7 +310,7 @@
- if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
- last_sleep = jiffies;
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
-
- if (unlikely(kthread_should_stop()))
-diff -Nur a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
---- a/drivers/scsi/fnic/fnic_scsi.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/scsi/fnic/fnic_scsi.c 2018-11-03 16:30:39.480807592 +0000
-@@ -217,7 +217,7 @@
-
- /* wait for io cmpl */
- while (atomic_read(&fnic->in_flight))
-- schedule_timeout(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout((1));
-
- spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
-
-@@ -2255,7 +2255,7 @@
- }
- }
-
-- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
-+ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
-
- /* walk again to check, if IOs are still pending in fw */
- if (fnic_is_abts_pending(fnic, lr_sc))
-diff -Nur a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
---- a/drivers/scsi/snic/snic_scsi.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/scsi/snic/snic_scsi.c 2018-11-03 16:30:39.481807625 +0000
-@@ -2354,7 +2354,7 @@
-
- /* Wait for all the IOs that are entered in Qcmd */
- while (atomic_read(&snic->ios_inflight))
-- schedule_timeout(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout((1));
-
- ret = snic_issue_hba_reset(snic, sc);
- if (ret) {
-diff -Nur a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
---- a/drivers/staging/comedi/drivers/ni_mio_common.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/comedi/drivers/ni_mio_common.c 2018-11-03 16:30:39.483807688 +0000
-@@ -4657,7 +4657,7 @@
- if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
- break;
- set_current_state(TASK_INTERRUPTIBLE);
-- if (schedule_timeout(1))
-+ if (schedule_min_hrtimeout())
- return -EIO;
- }
- if (i == timeout) {
-diff -Nur a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
---- a/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c 2018-11-03 16:30:39.483807688 +0000
-@@ -329,7 +329,7 @@
- schedule();
- } else {
- now = jiffies;
-- schedule_timeout(msecs_to_jiffies(tms));
-+ schedule_msec_hrtimeout((tms));
- tms -= jiffies_to_msecs(jiffies - now);
- if (tms < 0) /* no more wait but may have new event */
- tms = 0;
-diff -Nur a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
---- a/drivers/staging/rts5208/rtsx.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/rts5208/rtsx.c 2018-11-03 16:30:39.483807688 +0000
-@@ -524,7 +524,7 @@
-
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
-+ schedule_msec_hrtimeout((POLLING_INTERVAL));
-
- /* lock the device pointers */
- mutex_lock(&dev->dev_mutex);
-diff -Nur a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
---- a/drivers/staging/speakup/speakup_acntpc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_acntpc.c 2018-11-03 16:30:39.484807721 +0000
-@@ -206,7 +206,7 @@
- full_time_val = full_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (synth_full()) {
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout((full_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-@@ -234,7 +234,7 @@
- jiffy_delta_val = jiffy_delta->u.n.value;
- delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout(delay_time_val);
- jiff_max = jiffies + jiffy_delta_val;
- }
- }
-diff -Nur a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
---- a/drivers/staging/speakup/speakup_apollo.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_apollo.c 2018-11-03 16:30:39.484807721 +0000
-@@ -174,7 +174,7 @@
- if (!synth->io_ops->synth_out(synth, ch)) {
- synth->io_ops->tiocmset(0, UART_MCR_RTS);
- synth->io_ops->tiocmset(UART_MCR_RTS, 0);
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout(full_time_val);
- continue;
- }
- if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
-diff -Nur a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
---- a/drivers/staging/speakup/speakup_decext.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_decext.c 2018-11-03 16:30:39.484807721 +0000
-@@ -185,7 +185,7 @@
- if (ch == '\n')
- ch = 0x0D;
- if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout(delay_time_val);
- continue;
- }
- set_current_state(TASK_RUNNING);
-diff -Nur a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
---- a/drivers/staging/speakup/speakup_decpc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_decpc.c 2018-11-03 16:30:39.484807721 +0000
-@@ -403,7 +403,7 @@
- if (ch == '\n')
- ch = 0x0D;
- if (dt_sendchar(ch)) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-diff -Nur a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
---- a/drivers/staging/speakup/speakup_dectlk.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_dectlk.c 2018-11-03 16:30:39.485807753 +0000
-@@ -253,7 +253,7 @@
- if (ch == '\n')
- ch = 0x0D;
- if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout(delay_time_val);
- continue;
- }
- set_current_state(TASK_RUNNING);
-diff -Nur a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
---- a/drivers/staging/speakup/speakup_dtlk.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_dtlk.c 2018-11-03 16:30:39.485807753 +0000
-@@ -220,7 +220,7 @@
- delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (synth_full()) {
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-@@ -236,7 +236,7 @@
- delay_time_val = delay_time->u.n.value;
- jiffy_delta_val = jiffy_delta->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- jiff_max = jiffies + jiffy_delta_val;
- }
- }
-diff -Nur a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
---- a/drivers/staging/speakup/speakup_keypc.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/speakup_keypc.c 2018-11-03 16:30:39.485807753 +0000
-@@ -208,7 +208,7 @@
- full_time_val = full_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (synth_full()) {
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout((full_time_val));
- continue;
- }
- set_current_state(TASK_RUNNING);
-@@ -241,7 +241,7 @@
- jiffy_delta_val = jiffy_delta->u.n.value;
- delay_time_val = delay_time->u.n.value;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
-- schedule_timeout(msecs_to_jiffies(delay_time_val));
-+ schedule_msec_hrtimeout((delay_time_val));
- jiff_max = jiffies+jiffy_delta_val;
- }
- }
-diff -Nur a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
---- a/drivers/staging/speakup/synth.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/speakup/synth.c 2018-11-03 16:30:39.486807785 +0000
-@@ -92,7 +92,7 @@
- if (ch == '\n')
- ch = synth->procspeech;
- if (!synth->io_ops->synth_out(synth, ch)) {
-- schedule_timeout(msecs_to_jiffies(full_time_val));
-+ schedule_msec_hrtimeout(full_time_val);
- continue;
- }
- if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
-diff -Nur a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
---- a/drivers/staging/unisys/visornic/visornic_main.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/staging/unisys/visornic/visornic_main.c 2018-11-03 16:30:39.486807785 +0000
-@@ -556,7 +556,7 @@
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-- wait += schedule_timeout(msecs_to_jiffies(10));
-+ wait += schedule_msec_hrtimeout((10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
-@@ -567,7 +567,7 @@
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-- schedule_timeout(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout((10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- if (atomic_read(&devdata->usage))
- break;
-@@ -721,7 +721,7 @@
- }
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
-- wait += schedule_timeout(msecs_to_jiffies(10));
-+ wait += schedule_msec_hrtimeout((10));
- spin_lock_irqsave(&devdata->priv_lock, flags);
- }
-
-diff -Nur a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
---- a/drivers/target/target_core_user.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/target/target_core_user.c 2018-11-03 16:30:39.487807817 +0000
-@@ -808,10 +808,9 @@
- pr_debug("sleeping for ring space\n");
- mutex_unlock(&udev->cmdr_lock);
- if (udev->cmd_time_out)
-- ret = schedule_timeout(
-- msecs_to_jiffies(udev->cmd_time_out));
-+ ret = schedule_msec_hrtimeout(udev->cmd_time_out);
- else
-- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
-+ ret = schedule_msec_hrtimeout(TCMU_TIME_OUT);
- finish_wait(&udev->wait_cmdr, &__wait);
- if (!ret) {
- pr_warn("tcmu: command timed out\n");
-diff -Nur a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
---- a/drivers/video/fbdev/omap/hwa742.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/video/fbdev/omap/hwa742.c 2018-11-03 16:30:39.487807817 +0000
-@@ -926,7 +926,7 @@
- if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(5));
-+ schedule_msec_hrtimeout((5));
- }
- hwa742_set_update_mode(hwa742.update_mode_before_suspend);
- }
-diff -Nur a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
---- a/drivers/video/fbdev/pxafb.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/drivers/video/fbdev/pxafb.c 2018-11-03 16:30:39.488807849 +0000
-@@ -1286,7 +1286,7 @@
- mutex_unlock(&fbi->ctrlr_lock);
-
- set_current_state(TASK_INTERRUPTIBLE);
-- schedule_timeout(msecs_to_jiffies(30));
-+ schedule_msec_hrtimeout((30));
- }
-
- pr_debug("%s(): task ending\n", __func__);
-diff -Nur a/fs/afs/vlocation.c b/fs/afs/vlocation.c
---- a/fs/afs/vlocation.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/fs/afs/vlocation.c 2018-11-03 16:30:39.488807849 +0000
-@@ -129,7 +129,7 @@
- if (vl->upd_busy_cnt > 1) {
- /* second+ BUSY - sleep a little bit */
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- }
- continue;
- }
-diff -Nur a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
---- a/fs/btrfs/extent-tree.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/fs/btrfs/extent-tree.c 2018-11-03 16:30:39.491807945 +0000
-@@ -6106,7 +6106,7 @@
-
- if (flush != BTRFS_RESERVE_NO_FLUSH &&
- btrfs_transaction_in_commit(fs_info))
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
-
- if (delalloc_lock)
- mutex_lock(&inode->delalloc_mutex);
-diff -Nur a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
---- a/fs/btrfs/inode-map.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/fs/btrfs/inode-map.c 2018-11-03 16:30:39.492807977 +0000
-@@ -89,7 +89,7 @@
- btrfs_release_path(path);
- root->ino_cache_progress = last;
- up_read(&fs_info->commit_root_sem);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- goto again;
- } else
- continue;
-diff -Nur a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
---- a/sound/usb/line6/pcm.c 2018-10-10 07:54:28.000000000 +0100
-+++ b/sound/usb/line6/pcm.c 2018-11-03 16:30:39.492807977 +0000
-@@ -131,7 +131,7 @@
- if (!alive)
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
-- schedule_timeout(1);
-+ schedule_min_hrtimeout();
- } while (--timeout > 0);
- if (alive)
- dev_err(line6pcm->line6->ifcdev,
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
deleted file mode 100644
index f9f274ce..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
+++ /dev/null
@@ -1,311 +0,0 @@
-From 3ef5df78c2f425115b87f0f2f59fd189c0f1bbe3 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Feb 2017 13:30:07 +1100
-Subject: [PATCH 08/16] Replace all calls to schedule_timeout_interruptible of
- potentially under 50ms to use schedule_msec_hrtimeout_interruptible.
-
----
- drivers/hwmon/fam15h_power.c | 2 +-
- drivers/iio/light/tsl2563.c | 6 +-----
- drivers/media/i2c/msp3400-driver.c | 4 ++--
- drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++---
- drivers/media/radio/radio-mr800.c | 2 +-
- drivers/media/radio/radio-tea5777.c | 2 +-
- drivers/media/radio/tea575x.c | 2 +-
- drivers/parport/ieee1284.c | 2 +-
- drivers/parport/ieee1284_ops.c | 2 +-
- drivers/platform/x86/intel_ips.c | 8 ++++----
- net/core/pktgen.c | 2 +-
- sound/soc/codecs/wm8350.c | 12 ++++++------
- sound/soc/codecs/wm8900.c | 2 +-
- sound/soc/codecs/wm9713.c | 4 ++--
- 14 files changed, 26 insertions(+), 30 deletions(-)
-
-diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
-index 9545a346044f..c24cf1302ec7 100644
---- a/drivers/hwmon/fam15h_power.c
-+++ b/drivers/hwmon/fam15h_power.c
-@@ -237,7 +237,7 @@ static ssize_t power1_average_show(struct device *dev,
- prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
- }
-
-- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
-+ leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
- if (leftover)
- return 0;
-
-diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
-index 7599693f7fe9..452090739138 100644
---- a/drivers/iio/light/tsl2563.c
-+++ b/drivers/iio/light/tsl2563.c
-@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
- default:
- delay = 402;
- }
-- /*
-- * TODO: Make sure that we wait at least required delay but why we
-- * have to extend it one tick more?
-- */
-- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
-+ schedule_msec_hrtimeout_interruptible(delay + 1);
- }
-
- static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
-diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
-index 3db966db83eb..f0fab7676f72 100644
---- a/drivers/media/i2c/msp3400-driver.c
-+++ b/drivers/media/i2c/msp3400-driver.c
-@@ -179,7 +179,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
- break;
- dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
- dev, addr);
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- }
- if (err == 3) {
- dev_warn(&client->dev, "resetting chip, sound will go off.\n");
-@@ -220,7 +220,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
- break;
- dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
- dev, addr);
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- }
- if (err == 3) {
- dev_warn(&client->dev, "resetting chip, sound will go off.\n");
-diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
-index f752f3993687..23372af61ebf 100644
---- a/drivers/media/pci/ivtv/ivtv-gpio.c
-+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
-@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
- curout = (curout & ~0xF) | 1;
- write_reg(curout, IVTV_REG_GPIO_OUT);
- /* We could use something else for smaller time */
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- curout |= 2;
- write_reg(curout, IVTV_REG_GPIO_OUT);
- curdir &= ~0x80;
-@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
- curout = read_reg(IVTV_REG_GPIO_OUT);
- curout &= ~(1 << itv->card->xceive_pin);
- write_reg(curout, IVTV_REG_GPIO_OUT);
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
-
- curout |= 1 << itv->card->xceive_pin;
- write_reg(curout, IVTV_REG_GPIO_OUT);
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- return 0;
- }
-
-diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
-index c9f59129af79..cb6f8394a5c2 100644
---- a/drivers/media/radio/radio-mr800.c
-+++ b/drivers/media/radio/radio-mr800.c
-@@ -378,7 +378,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
- retval = -ENODATA;
- break;
- }
-- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
-+ if (schedule_msec_hrtimeout_interruptible((10))) {
- retval = -ERESTARTSYS;
- break;
- }
-diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
-index 04ed1a5d1177..d593d28dc286 100644
---- a/drivers/media/radio/radio-tea5777.c
-+++ b/drivers/media/radio/radio-tea5777.c
-@@ -245,7 +245,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
- }
-
- if (wait) {
-- if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
-+ if (schedule_msec_hrtimeout_interruptible((wait)))
- return -ERESTARTSYS;
- }
-
-diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
-index 4dc2067bce14..29f4416fb9ae 100644
---- a/drivers/media/radio/tea575x.c
-+++ b/drivers/media/radio/tea575x.c
-@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
-+ if (schedule_msec_hrtimeout_interruptible((10))) {
- /* some signal arrived, stop search */
- tea->val &= ~TEA575X_BIT_SEARCH;
- snd_tea575x_set_freq(tea);
-diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
-index 74cc6dd982d2..c22c4d5f08d0 100644
---- a/drivers/parport/ieee1284.c
-+++ b/drivers/parport/ieee1284.c
-@@ -215,7 +215,7 @@ int parport_wait_peripheral(struct parport *port,
- /* parport_wait_event didn't time out, but the
- * peripheral wasn't actually ready either.
- * Wait for another 10ms. */
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- }
- }
-
-diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
-index 5d41dda6da4e..34705f6b423f 100644
---- a/drivers/parport/ieee1284_ops.c
-+++ b/drivers/parport/ieee1284_ops.c
-@@ -537,7 +537,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
- /* Yield the port for a while. */
- if (count && dev->port->irq != PARPORT_IRQ_NONE) {
- parport_release (dev);
-- schedule_timeout_interruptible(msecs_to_jiffies(40));
-+ schedule_msec_hrtimeout_interruptible((40));
- parport_claim_or_block (dev);
- }
- else
-diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
-index 58dcee562d64..b661b7c071bb 100644
---- a/drivers/platform/x86/intel_ips.c
-+++ b/drivers/platform/x86/intel_ips.c
-@@ -813,7 +813,7 @@ static int ips_adjust(void *data)
- ips_gpu_lower(ips);
-
- sleep:
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
- } while (!kthread_should_stop());
-
- dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
-@@ -992,7 +992,7 @@ static int ips_monitor(void *data)
- seqno_timestamp = get_jiffies_64();
-
- old_cpu_power = thm_readl(THM_CEC);
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
-
- /* Collect an initial average */
- for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
-@@ -1019,7 +1019,7 @@ static int ips_monitor(void *data)
- mchp_samples[i] = mchp;
- }
-
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
- if (kthread_should_stop())
- break;
- }
-@@ -1046,7 +1046,7 @@ static int ips_monitor(void *data)
- * us to reduce the sample frequency if the CPU and GPU are idle.
- */
- old_cpu_power = thm_readl(THM_CEC);
-- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
-+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
- last_sample_period = IPS_SAMPLE_PERIOD;
-
- setup_deferrable_timer_on_stack(&timer, monitor_timeout,
-diff --git a/net/core/pktgen.c b/net/core/pktgen.c
-index 6e1e10ff433a..be5d6f7142e4 100644
---- a/net/core/pktgen.c
-+++ b/net/core/pktgen.c
-@@ -1992,7 +1992,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
- mutex_unlock(&pktgen_thread_lock);
- pr_debug("%s: waiting for %s to disappear....\n",
- __func__, ifname);
-- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
-+ schedule_msec_hrtimeout_interruptible((msec_per_try));
- mutex_lock(&pktgen_thread_lock);
-
- if (++i >= max_tries) {
-diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
-index 2efc5b41ad0f..3e3248c48c6b 100644
---- a/sound/soc/codecs/wm8350.c
-+++ b/sound/soc/codecs/wm8350.c
-@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work)
- out2->ramp == WM8350_RAMP_UP) {
- /* delay is longer over 0dB as increases are larger */
- if (i >= WM8350_OUTn_0dB)
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (2));
- else
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (1));
- } else
- udelay(50); /* doesn't matter if we delay longer */
-@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- (platform->dis_out4 << 6));
-
- /* wait for discharge */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->
- cap_discharge_msecs));
-
-@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- WM8350_VBUFEN);
-
- /* wait for vmid */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->
- vmid_charge_msecs));
-
-@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
-
- /* wait */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->
- vmid_discharge_msecs));
-
-@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
- pm1 | WM8350_OUTPUT_DRAIN_EN);
-
- /* wait */
-- schedule_timeout_interruptible(msecs_to_jiffies
-+ schedule_msec_hrtimeout_interruptible(
- (platform->drain_msecs));
-
- pm1 &= ~WM8350_BIASEN;
-diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
-index c77b49a29311..fc50456e90a9 100644
---- a/sound/soc/codecs/wm8900.c
-+++ b/sound/soc/codecs/wm8900.c
-@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
- /* Need to let things settle before stopping the clock
- * to ensure that restart works, see "Stopping the
- * master clock" in the datasheet. */
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- snd_soc_write(codec, WM8900_REG_POWER2,
- WM8900_REG_POWER2_SYSCLK_ENA);
- break;
-diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
-index 7e4822185feb..0c85a207446a 100644
---- a/sound/soc/codecs/wm9713.c
-+++ b/sound/soc/codecs/wm9713.c
-@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
-
- /* Gracefully shut down the voice interface. */
- snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200);
-- schedule_timeout_interruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_interruptible((1));
- snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
- snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000);
-
-@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
- wm9713->pll_in = freq_in;
-
- /* wait 10ms AC97 link frames for the link to stabilise */
-- schedule_timeout_interruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_interruptible((10));
- return 0;
- }
-
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
deleted file mode 100644
index c910f3df..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-From 6044370cf4bbc5e05f5d78f5772c1d88e3153603 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Feb 2017 13:30:32 +1100
-Subject: [PATCH 09/16] Replace all calls to schedule_timeout_uninterruptible
- of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible
-
----
- drivers/media/pci/cx18/cx18-gpio.c | 4 ++--
- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++--
- drivers/rtc/rtc-wm8350.c | 6 +++---
- drivers/scsi/lpfc/lpfc_scsi.c | 2 +-
- sound/pci/maestro3.c | 4 ++--
- sound/soc/codecs/rt5631.c | 4 ++--
- sound/soc/soc-dapm.c | 2 +-
- 7 files changed, 13 insertions(+), 13 deletions(-)
-
-diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
-index 012859e6dc7b..206bd08265a5 100644
---- a/drivers/media/pci/cx18/cx18-gpio.c
-+++ b/drivers/media/pci/cx18/cx18-gpio.c
-@@ -90,11 +90,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
-
- /* Assert */
- gpio_update(cx, mask, ~active_lo);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
-+ schedule_msec_hrtimeout_uninterruptible((assert_msecs));
-
- /* Deassert */
- gpio_update(cx, mask, ~active_hi);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
-+ schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
- }
-
- /*
-diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
-index 19c442cb93e4..448f41782060 100644
---- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
-+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
-@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
- * doesn't seem to have as many firmware restart cycles...
- *
- * As a test, we're sticking in a 1/100s delay here */
-- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_uninterruptible((10));
-
- return 0;
-
-@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
- IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
- i = 5000;
- do {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(40));
-+ schedule_msec_hrtimeout_uninterruptible((40));
- /* Todo... wait for sync command ... */
-
- read_register(priv->net_dev, IPW_REG_INTA, &inta);
-diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
-index 483c7993516b..fddbaa475066 100644
---- a/drivers/rtc/rtc-wm8350.c
-+++ b/drivers/rtc/rtc-wm8350.c
-@@ -119,7 +119,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
- /* Wait until confirmation of stopping */
- do {
- rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_uninterruptible((1));
- } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
-
- if (!retries) {
-@@ -202,7 +202,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
- /* Wait until confirmation of stopping */
- do {
- rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_uninterruptible((1));
- } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
-
- if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
-@@ -225,7 +225,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
- /* Wait until confirmation */
- do {
- rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
-- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
-+ schedule_msec_hrtimeout_uninterruptible((1));
- } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
-
- if (rtc_ctrl & WM8350_RTC_ALMSTS)
-diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
-index 1a6f122bb25d..c0db66302a3e 100644
---- a/drivers/scsi/lpfc/lpfc_scsi.c
-+++ b/drivers/scsi/lpfc/lpfc_scsi.c
-@@ -5131,7 +5131,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
- tgt_id, lun_id, context);
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
- while (time_after(later, jiffies) && cnt) {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
-+ schedule_msec_hrtimeout_uninterruptible((20));
- cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
- }
- if (cnt) {
-diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
-index 8f20dec97843..944ce63431b0 100644
---- a/sound/pci/maestro3.c
-+++ b/sound/pci/maestro3.c
-@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
- outw(0, io + GPIO_DATA);
- outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
-
-- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
-+ schedule_msec_hrtimeout_uninterruptible((delay1));
-
- outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
- udelay(5);
-@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
- outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
- outw(~0, io + GPIO_MASK);
-
-- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
-+ schedule_msec_hrtimeout_uninterruptible((delay2));
-
- if (! snd_m3_try_read_vendor(chip))
- break;
-diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
-index 55b04c55fb4b..2ed02ad6ac41 100644
---- a/sound/soc/codecs/rt5631.c
-+++ b/sound/soc/codecs/rt5631.c
-@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable)
- hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
- snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
- if (enable) {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_uninterruptible((10));
- /* config one-bit depop parameter */
- rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f);
- snd_soc_update_bits(codec, RT5631_HP_OUT_VOL,
-@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
- hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
- snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
- if (enable) {
-- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
-+ schedule_msec_hrtimeout_uninterruptible((10));
-
- /* config depop sequence parameter */
- rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f);
-diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
-index dcef67a9bd48..11c2bb48c8f2 100644
---- a/sound/soc/soc-dapm.c
-+++ b/sound/soc/soc-dapm.c
-@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
- static void pop_wait(u32 pop_time)
- {
- if (pop_time)
-- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
-+ schedule_msec_hrtimeout_uninterruptible((pop_time));
- }
-
- static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
deleted file mode 100644
index 260bb98d..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From 071486de633698dcdd163295173ce4663ec9158c Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Mon, 20 Feb 2017 13:32:58 +1100
-Subject: [PATCH 10/16] Don't use hrtimer overlay when pm_freezing since some
- drivers still don't correctly use freezable timeouts.
-
----
- kernel/time/hrtimer.c | 2 +-
- kernel/time/timer.c | 9 +++++----
- 2 files changed, 6 insertions(+), 5 deletions(-)
-
-diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index 13227cf2814c..66456c72bace 100644
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1809,7 +1809,7 @@ long __sched schedule_msec_hrtimeout(long timeout)
- * (yet) better than Hz, as would occur during startup, use regular
- * timers.
- */
-- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
-+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
- return schedule_timeout(jiffs);
-
- secs = timeout / 1000;
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index c68cb9307f64..2f2c96b03efe 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -44,6 +44,7 @@
- #include <linux/sched/debug.h>
- #include <linux/slab.h>
- #include <linux/compat.h>
-+#include <linux/freezer.h>
-
- #include <linux/uaccess.h>
- #include <asm/unistd.h>
-@@ -1891,12 +1892,12 @@ void msleep(unsigned int msecs)
- * Use high resolution timers where the resolution of tick based
- * timers is inadequate.
- */
-- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
- while (msecs)
- msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
- return;
- }
-- timeout = msecs_to_jiffies(msecs) + 1;
-+ timeout = jiffs + 1;
-
- while (timeout)
- timeout = schedule_timeout_uninterruptible(timeout);
-@@ -1913,12 +1914,12 @@ unsigned long msleep_interruptible(unsigned int msecs)
- int jiffs = msecs_to_jiffies(msecs);
- unsigned long timeout;
-
-- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
-+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
- while (msecs && !signal_pending(current))
- msecs = schedule_msec_hrtimeout_interruptible(msecs);
- return msecs;
- }
-- timeout = msecs_to_jiffies(msecs) + 1;
-+ timeout = jiffs + 1;
-
- while (timeout && !signal_pending(current))
- timeout = schedule_timeout_interruptible(timeout);
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
deleted file mode 100644
index 5ac20300..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-diff -Nur a/kernel/sysctl.c b/kernel/sysctl.c
---- a/kernel/sysctl.c 2018-11-03 17:03:07.433069521 +0000
-+++ b/kernel/sysctl.c 2018-11-03 17:02:11.020267246 +0000
-@@ -141,7 +141,9 @@
- extern int sched_iso_cpu;
- extern int sched_yield_type;
- #endif
--#ifdef CONFIG_PRINTK
-+extern int hrtimer_granularity_us;
-+extern int hrtimeout_min_us;
-+#if defined(CONFIG_PRINTK) || defined(CONFIG_SCHED_MUQSS)
- static int ten_thousand __read_only = 10000;
- #endif
- #ifdef CONFIG_PERF_EVENTS
-@@ -1119,6 +1121,24 @@
- .extra2 = &two,
- },
- #endif
-+ {
-+ .procname = "hrtimer_granularity_us",
-+ .data = &hrtimer_granularity_us,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &one,
-+ .extra2 = &ten_thousand,
-+ },
-+ {
-+ .procname = "hrtimeout_min_us",
-+ .data = &hrtimeout_min_us,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = &proc_dointvec_minmax,
-+ .extra1 = &one,
-+ .extra2 = &ten_thousand,
-+ },
- #if defined(CONFIG_S390) && defined(CONFIG_SMP)
- {
- .procname = "spin_retry",
-diff -Nur a/kernel/time/clockevents.c b/kernel/time/clockevents.c
---- a/kernel/time/clockevents.c 2018-11-03 17:03:07.433069521 +0000
-+++ b/kernel/time/clockevents.c 2018-11-03 16:58:17.283800909 +0000
-@@ -198,13 +198,9 @@
-
- #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
-
--#ifdef CONFIG_SCHED_MUQSS
-+int __read_mostly hrtimer_granularity_us = 100;
- /* Limit min_delta to 100us */
--#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
--#else
--/* Limit min_delta to a jiffie */
--#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
--#endif
-+#define MIN_DELTA_LIMIT (hrtimer_granularity_us * NSEC_PER_USEC)
-
- /**
- * clockevents_increase_min_delta - raise minimum delta of a clock event device
-diff -Nur a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
---- a/kernel/time/hrtimer.c 2018-11-03 17:04:16.448274547 +0000
-+++ b/kernel/time/hrtimer.c 2018-11-03 16:58:17.283800909 +0000
-@@ -1803,7 +1803,7 @@
- long __sched schedule_msec_hrtimeout(long timeout)
- {
- struct hrtimer_sleeper t;
-- int delta, secs, jiffs;
-+ int delta, jiffs;
- ktime_t expires;
-
- if (!timeout) {
-@@ -1820,9 +1820,8 @@
- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
- return schedule_timeout(jiffs);
-
-- secs = timeout / 1000;
- delta = (timeout % 1000) * NSEC_PER_MSEC;
-- expires = ktime_set(secs, delta);
-+ expires = ktime_set(0, delta);
-
- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires_range_ns(&t.timer, expires, delta);
-@@ -1846,9 +1845,53 @@
-
- EXPORT_SYMBOL(schedule_msec_hrtimeout);
-
-+#define USECS_PER_SEC 1000000
-+extern int hrtimer_granularity_us;
-+
-+static inline long schedule_usec_hrtimeout(long timeout)
-+{
-+ struct hrtimer_sleeper t;
-+ ktime_t expires;
-+ int delta;
-+
-+ if (!timeout) {
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
-+ }
-+
-+ if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
-+ return schedule_timeout(usecs_to_jiffies(timeout));
-+
-+ if (timeout < hrtimer_granularity_us)
-+ timeout = hrtimer_granularity_us;
-+ delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
-+ expires = ktime_set(0, delta);
-+
-+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
-+
-+ hrtimer_init_sleeper(&t, current);
-+
-+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
-+
-+ if (likely(t.task))
-+ schedule();
-+
-+ hrtimer_cancel(&t.timer);
-+ destroy_hrtimer_on_stack(&t.timer);
-+
-+ __set_current_state(TASK_RUNNING);
-+
-+ expires = hrtimer_expires_remaining(&t.timer);
-+ timeout = ktime_to_us(expires);
-+ return timeout < 0 ? 0 : timeout;
-+}
-+
-+int __read_mostly hrtimeout_min_us = 1000;
-+
- long __sched schedule_min_hrtimeout(void)
- {
-- return schedule_msec_hrtimeout(1);
-+ return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
- }
-
- EXPORT_SYMBOL(schedule_min_hrtimeout);
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
deleted file mode 100644
index 99b28d65..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-From 9e47a80f690080c12ce607158b96c305707543b8 Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Wed, 7 Dec 2016 21:23:01 +1100
-Subject: [PATCH 12/16] Reinstate default Hz of 100 in combination with MuQSS
- and -ck patches.
-
----
- kernel/Kconfig.hz | 25 ++++++++++++++++++-------
- 1 file changed, 18 insertions(+), 7 deletions(-)
-
-diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
-index 2a202a846757..1806fcac8f14 100644
---- a/kernel/Kconfig.hz
-+++ b/kernel/Kconfig.hz
-@@ -4,7 +4,8 @@
-
- choice
- prompt "Timer frequency"
-- default HZ_250
-+ default HZ_100 if SCHED_MUQSS
-+ default HZ_250_NODEF if !SCHED_MUQSS
- help
- Allows the configuration of the timer frequency. It is customary
- to have the timer interrupt run at 1000 Hz but 100 Hz may be more
-@@ -19,11 +20,18 @@ choice
- config HZ_100
- bool "100 HZ"
- help
-+ 100 Hz is a suitable choice in combination with MuQSS which does
-+ not rely on ticks for rescheduling interrupts, and is not Hz limited
-+ for timeouts and sleeps from both the kernel and userspace.
-+ This allows us to benefit from the lower overhead and higher
-+ throughput of fewer timer ticks.
-+
-+ Non-MuQSS kernels:
- 100 Hz is a typical choice for servers, SMP and NUMA systems
- with lots of processors that may show reduced performance if
- too many timer interrupts are occurring.
-
-- config HZ_250
-+ config HZ_250_NODEF
- bool "250 HZ"
- help
- 250 Hz is a good compromise choice allowing server performance
-@@ -31,7 +39,10 @@ choice
- on SMP and NUMA systems. If you are going to be using NTSC video
- or multimedia, selected 300Hz instead.
-
-- config HZ_300
-+ 250 Hz is the default choice for the mainline scheduler but not
-+ advantageous in combination with MuQSS.
-+
-+ config HZ_300_NODEF
- bool "300 HZ"
- help
- 300 Hz is a good compromise choice allowing server performance
-@@ -39,7 +50,7 @@ choice
- on SMP and NUMA systems and exactly dividing by both PAL and
- NTSC frame rates for video and multimedia work.
-
-- config HZ_1000
-+ config HZ_1000_NODEF
- bool "1000 HZ"
- help
- 1000 Hz is the preferred choice for desktop systems and other
-@@ -50,9 +61,9 @@ endchoice
- config HZ
- int
- default 100 if HZ_100
-- default 250 if HZ_250
-- default 300 if HZ_300
-- default 1000 if HZ_1000
-+ default 250 if HZ_250_NODEF
-+ default 300 if HZ_300_NODEF
-+ default 1000 if HZ_1000_NODEF
-
- config SCHED_HRTICK
- def_bool HIGH_RES_TIMERS
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
deleted file mode 100644
index 63ec9fdf..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From 5902b315d4061ebbe73a62c52e6d3b618066cebc Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Wed, 7 Dec 2016 21:13:16 +1100
-Subject: [PATCH 13/16] Make threaded IRQs optionally the default which can be
- disabled.
-
----
- kernel/irq/Kconfig | 14 ++++++++++++++
- kernel/irq/manage.c | 10 ++++++++++
- 2 files changed, 24 insertions(+)
-
-diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
-index a117adf7084b..0984c54fd4e9 100644
---- a/kernel/irq/Kconfig
-+++ b/kernel/irq/Kconfig
-@@ -111,6 +111,20 @@ config IRQ_DOMAIN_DEBUG
- config IRQ_FORCED_THREADING
- bool
-
-+config FORCE_IRQ_THREADING
-+ bool "Make IRQ threading compulsory"
-+ depends on IRQ_FORCED_THREADING
-+ default y
-+ ---help---
-+
-+ Make IRQ threading mandatory for any IRQ handlers that support it
-+ instead of being optional and requiring the threadirqs kernel
-+ parameter. Instead they can be optionally disabled with the
-+ nothreadirqs kernel parameter.
-+
-+ Enable if you are building for a desktop or low latency system,
-+ otherwise say N.
-+
- config SPARSE_IRQ
- bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
- ---help---
-diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 4bff6a10ae8e..5a6df0dd23c4 100644
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -24,7 +24,17 @@
- #include "internals.h"
-
- #ifdef CONFIG_IRQ_FORCED_THREADING
-+#ifdef CONFIG_FORCE_IRQ_THREADING
-+__read_mostly bool force_irqthreads = true;
-+#else
- __read_mostly bool force_irqthreads;
-+#endif
-+static int __init setup_noforced_irqthreads(char *arg)
-+{
-+ force_irqthreads = false;
-+ return 0;
-+}
-+early_param("nothreadirqs", setup_noforced_irqthreads);
-
- static int __init setup_forced_irqthreads(char *arg)
- {
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0014-Swap-sucks.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0014-Swap-sucks.patch
deleted file mode 100644
index 6bf5bcda..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0014-Swap-sucks.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From ed0ab4c80fcb6fa4abb4f2f897e591df6eaa2d0e Mon Sep 17 00:00:00 2001
-From: Con Kolivas <kernel@kolivas.org>
-Date: Sat, 12 Aug 2017 12:02:04 +1000
-Subject: [PATCH 14/16] Swap sucks.
-
----
- mm/vmscan.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index eb2f0315b8c0..67d03efab288 100644
---- a/mm/vmscan.c
-+++ b/mm/vmscan.c
-@@ -149,7 +149,7 @@ struct scan_control {
- /*
- * From 0 .. 100. Higher means more swappy.
- */
--int vm_swappiness = 60;
-+int vm_swappiness = 33;
- /*
- * The total number of pages which are beyond the high watermark within all
- * zones.
---
-2.11.0
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
deleted file mode 100644
index bfa509a5..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
-index e84d700709ff6..16364915cff53 100644
---- a/kernel/sched/MuQSS.c
-+++ b/kernel/sched/MuQSS.c
-@@ -70,6 +70,14 @@
-
- #include "MuQSS.h"
-
-+/* needing to include irq_regs.h, "because reasons"...
-+ * implicit declaration of function ‘get_irq_regs’;
-+ * did you mean ‘get_ibs_caps’?
-+ * [-Werror=implicit-function-declaration]
-+ * ^ this is because autodetect is not flawless
-+ */
-+#include <asm/irq_regs.h>
-+
- #define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
- #define rt_task(p) rt_prio((p)->prio)
- #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch
deleted file mode 100644
index f7dc1d1c..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0016-unfuck-MuQSS-on-linux-4_14_15+.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
-index e84d700709ff6..b0be7fcfe41f9 100644
---- a/kernel/sched/MuQSS.c
-+++ b/kernel/sched/MuQSS.c
-@@ -55,6 +55,7 @@
- #include <linux/security.h>
- #include <linux/syscalls.h>
- #include <linux/tick.h>
-+#include <linux/version.h>
-
- #include <asm/switch_to.h>
- #include <asm/tlb.h>
-@@ -1959,7 +1960,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
- p->state = TASK_WAKING;
-
- if (p->in_iowait) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
- delayacct_blkio_end();
-+#else
-+ delayacct_blkio_end(p);
-+#endif
- atomic_dec(&task_rq(p)->nr_iowait);
- }
-
-@@ -1970,7 +1975,11 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
- #else /* CONFIG_SMP */
-
- if (p->in_iowait) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
- delayacct_blkio_end();
-+#else
-+ delayacct_blkio_end(p);
-+#endif
- atomic_dec(&task_rq(p)->nr_iowait);
- }
-
-@@ -2022,7 +2031,11 @@ static void try_to_wake_up_local(struct task_struct *p)
-
- if (!task_on_rq_queued(p)) {
- if (p->in_iowait) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 15)
- delayacct_blkio_end();
-+#else
-+ delayacct_blkio_end(p);
-+#endif
- atomic_dec(&rq->nr_iowait);
- }
- ttwu_activate(rq, p);
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch
deleted file mode 100644
index 1a1717bf..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-0017-unfuck-MuQSS-on-linux-4_14_75+.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-diff -Nur a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
---- a/kernel/sched/MuQSS.c 2019-01-05 22:51:24.547448624 +0000
-+++ b/kernel/sched/MuQSS.c 2019-01-05 22:58:29.821451056 +0000
-@@ -1021,6 +1021,10 @@
- #define CPUIDLE_THREAD_BUSY (16)
- #define CPUIDLE_DIFF_NODE (32)
-
-+#ifdef CONFIG_SCHED_SMT
-+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-+#endif
-+
- /*
- * The best idle CPU is chosen according to the CPUIDLE ranking above where the
- * lowest value would give the most suitable CPU to schedule p onto next. The
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch
deleted file mode 100644
index 28f9b2f6..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-Revert-ath10k-activate-user-space-firmware-loading.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
-index a4f635820..9b4c4facf 100644
---- a/drivers/net/wireless/ath/ath10k/core.c
-+++ b/drivers/net/wireless/ath/ath10k/core.c
-@@ -519,7 +519,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
- dir = ".";
-
- snprintf(filename, sizeof(filename), "%s/%s", dir, file);
-- ret = request_firmware(&fw, filename, ar->dev);
-+ ret = request_firmware_direct(&fw, filename, ar->dev);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
- filename, ret);
-
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-amd64.config b/sys-kernel/linux-sources-redcore-lts/files/4.14-amd64.config
deleted file mode 100644
index 307b0bd9..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-amd64.config
+++ /dev/null
@@ -1,9101 +0,0 @@
-#
-# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.14.95-redcore-lts-r1 Kernel Configuration
-#
-CONFIG_64BIT=y
-CONFIG_X86_64=y
-CONFIG_X86=y
-CONFIG_INSTRUCTION_DECODER=y
-CONFIG_OUTPUT_FORMAT="elf64-x86-64"
-CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_MMU=y
-CONFIG_ARCH_MMAP_RND_BITS_MIN=28
-CONFIG_ARCH_MMAP_RND_BITS_MAX=32
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
-CONFIG_NEED_DMA_MAP_STATE=y
-CONFIG_NEED_SG_DMA_LENGTH=y
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_BUG=y
-CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
-CONFIG_GENERIC_HWEIGHT=y
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_ARCH_HAS_CPU_RELAX=y
-CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
-CONFIG_HAVE_SETUP_PER_CPU_AREA=y
-CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
-CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
-CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
-CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
-CONFIG_ZONE_DMA32=y
-CONFIG_AUDIT_ARCH=y
-CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
-CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_HAVE_INTEL_TXT=y
-CONFIG_X86_64_SMP=y
-CONFIG_ARCH_SUPPORTS_UPROBES=y
-CONFIG_FIX_EARLYCON_MEM=y
-CONFIG_PGTABLE_LEVELS=4
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-CONFIG_IRQ_WORK=y
-CONFIG_BUILDTIME_EXTABLE_SORT=y
-CONFIG_THREAD_INFO_IN_TASK=y
-
-#
-# General setup
-#
-CONFIG_SCHED_MUQSS=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-CONFIG_CROSS_COMPILE=""
-# CONFIG_COMPILE_TEST is not set
-CONFIG_LOCALVERSION=""
-CONFIG_LOCALVERSION_AUTO=y
-CONFIG_HAVE_KERNEL_GZIP=y
-CONFIG_HAVE_KERNEL_BZIP2=y
-CONFIG_HAVE_KERNEL_LZMA=y
-CONFIG_HAVE_KERNEL_XZ=y
-CONFIG_HAVE_KERNEL_LZO=y
-CONFIG_HAVE_KERNEL_LZ4=y
-# CONFIG_KERNEL_GZIP is not set
-# CONFIG_KERNEL_BZIP2 is not set
-# CONFIG_KERNEL_LZMA is not set
-# CONFIG_KERNEL_XZ is not set
-# CONFIG_KERNEL_LZO is not set
-CONFIG_KERNEL_LZ4=y
-CONFIG_DEFAULT_HOSTNAME="(none)"
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-CONFIG_SYSVIPC_SYSCTL=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_POSIX_MQUEUE_SYSCTL=y
-CONFIG_CROSS_MEMORY_ATTACH=y
-CONFIG_FHANDLE=y
-# CONFIG_USELIB is not set
-CONFIG_AUDIT=y
-CONFIG_HAVE_ARCH_AUDITSYSCALL=y
-CONFIG_AUDITSYSCALL=y
-CONFIG_AUDIT_WATCH=y
-CONFIG_AUDIT_TREE=y
-
-#
-# IRQ subsystem
-#
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_GENERIC_IRQ_SHOW=y
-CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
-CONFIG_GENERIC_PENDING_IRQ=y
-CONFIG_GENERIC_IRQ_MIGRATION=y
-CONFIG_GENERIC_IRQ_CHIP=y
-CONFIG_IRQ_DOMAIN=y
-CONFIG_IRQ_SIM=y
-CONFIG_IRQ_DOMAIN_HIERARCHY=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
-# CONFIG_IRQ_DOMAIN_DEBUG is not set
-CONFIG_IRQ_FORCED_THREADING=y
-CONFIG_FORCE_IRQ_THREADING=y
-CONFIG_SPARSE_IRQ=y
-# CONFIG_GENERIC_IRQ_DEBUGFS is not set
-CONFIG_CLOCKSOURCE_WATCHDOG=y
-CONFIG_ARCH_CLOCKSOURCE_DATA=y
-CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
-CONFIG_GENERIC_TIME_VSYSCALL=y
-CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
-CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
-CONFIG_GENERIC_CMOS_UPDATE=y
-
-#
-# Timers subsystem
-#
-CONFIG_TICK_ONESHOT=y
-CONFIG_HZ_PERIODIC=y
-# CONFIG_NO_HZ_IDLE is not set
-# CONFIG_NO_HZ_FULL is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-
-#
-# CPU/Task time and stats accounting
-#
-CONFIG_VIRT_CPU_ACCOUNTING=y
-# CONFIG_TICK_CPU_ACCOUNTING is not set
-CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
-CONFIG_IRQ_TIME_ACCOUNTING=y
-CONFIG_BSD_PROCESS_ACCT=y
-# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-
-#
-# RCU Subsystem
-#
-CONFIG_PREEMPT_RCU=y
-# CONFIG_RCU_EXPERT is not set
-CONFIG_SRCU=y
-CONFIG_TREE_SRCU=y
-CONFIG_TASKS_RCU=y
-CONFIG_RCU_STALL_COMMON=y
-CONFIG_RCU_NEED_SEGCBLIST=y
-CONFIG_CONTEXT_TRACKING=y
-# CONFIG_CONTEXT_TRACKING_FORCE is not set
-CONFIG_BUILD_BIN2C=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=17
-CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
-CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
-CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
-CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
-CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
-CONFIG_ARCH_SUPPORTS_INT128=y
-CONFIG_CGROUPS=y
-CONFIG_PAGE_COUNTER=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_SWAP_ENABLED=y
-CONFIG_BLK_CGROUP=y
-# CONFIG_DEBUG_BLK_CGROUP is not set
-CONFIG_CGROUP_WRITEBACK=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_CGROUP_PIDS=y
-# CONFIG_CGROUP_RDMA is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CPUSETS=y
-CONFIG_PROC_PID_CPUSET=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CGROUP_PERF=y
-CONFIG_CGROUP_BPF=y
-# CONFIG_CGROUP_DEBUG is not set
-CONFIG_SOCK_CGROUP_DATA=y
-# CONFIG_CHECKPOINT_RESTORE is not set
-CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-CONFIG_NET_NS=y
-# CONFIG_SYSFS_DEPRECATED is not set
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_RD_GZIP=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_XZ=y
-CONFIG_RD_LZO=y
-CONFIG_RD_LZ4=y
-CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_LOCAL_INIT is not set
-CONFIG_SYSCTL=y
-CONFIG_ANON_INODES=y
-CONFIG_HAVE_UID16=y
-CONFIG_SYSCTL_EXCEPTION_TRACE=y
-CONFIG_HAVE_PCSPKR_PLATFORM=y
-CONFIG_BPF=y
-# CONFIG_EXPERT is not set
-CONFIG_UID16=y
-CONFIG_MULTIUSER=y
-CONFIG_SGETMASK_SYSCALL=y
-CONFIG_SYSFS_SYSCALL=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_POSIX_TIMERS=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
-CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
-CONFIG_KALLSYMS_BASE_RELATIVE=y
-CONFIG_PRINTK=y
-CONFIG_PRINTK_NMI=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_PCSPKR_PLATFORM=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_FUTEX_PI=y
-CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_TIMERFD=y
-CONFIG_EVENTFD=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_BPF_JIT_ALWAYS_ON=y
-CONFIG_SHMEM=y
-CONFIG_AIO=y
-CONFIG_ADVISE_SYSCALLS=y
-CONFIG_USERFAULTFD=y
-CONFIG_PCI_QUIRKS=y
-CONFIG_MEMBARRIER=y
-# CONFIG_EMBEDDED is not set
-CONFIG_HAVE_PERF_EVENTS=y
-# CONFIG_PC104 is not set
-
-#
-# Kernel Performance Events And Counters
-#
-CONFIG_PERF_EVENTS=y
-# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_SLUB_DEBUG=y
-# CONFIG_SLUB_MEMCG_SYSFS_ON is not set
-# CONFIG_COMPAT_BRK is not set
-# CONFIG_SLAB is not set
-CONFIG_SLUB=y
-CONFIG_SLAB_MERGE_DEFAULT=y
-CONFIG_SLAB_FREELIST_RANDOM=y
-CONFIG_SLAB_FREELIST_HARDENED=y
-CONFIG_SLAB_HARDENED=y
-CONFIG_SLAB_SANITIZE=y
-CONFIG_SLAB_SANITIZE_VERIFY=y
-CONFIG_SLUB_CPU_PARTIAL=y
-CONFIG_SYSTEM_DATA_VERIFICATION=y
-CONFIG_PROFILING=y
-CONFIG_TRACEPOINTS=y
-CONFIG_CRASH_CORE=y
-CONFIG_KEXEC_CORE=y
-CONFIG_HOTPLUG_SMT=y
-# CONFIG_OPROFILE is not set
-CONFIG_HAVE_OPROFILE=y
-CONFIG_OPROFILE_NMI_TIMER=y
-CONFIG_KPROBES=y
-CONFIG_JUMP_LABEL=y
-# CONFIG_STATIC_KEYS_SELFTEST is not set
-CONFIG_OPTPROBES=y
-CONFIG_UPROBES=y
-# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
-CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
-CONFIG_ARCH_USE_BUILTIN_BSWAP=y
-CONFIG_KRETPROBES=y
-CONFIG_USER_RETURN_NOTIFIER=y
-CONFIG_HAVE_IOREMAP_PROT=y
-CONFIG_HAVE_KPROBES=y
-CONFIG_HAVE_KRETPROBES=y
-CONFIG_HAVE_OPTPROBES=y
-CONFIG_HAVE_KPROBES_ON_FTRACE=y
-CONFIG_HAVE_NMI=y
-CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_CONTIGUOUS=y
-CONFIG_GENERIC_SMP_IDLE_THREAD=y
-CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
-CONFIG_ARCH_HAS_SET_MEMORY=y
-CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
-CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
-CONFIG_HAVE_CLK=y
-CONFIG_HAVE_DMA_API_DEBUG=y
-CONFIG_HAVE_HW_BREAKPOINT=y
-CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
-CONFIG_HAVE_USER_RETURN_NOTIFIER=y
-CONFIG_HAVE_PERF_EVENTS_NMI=y
-CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
-CONFIG_HAVE_PERF_REGS=y
-CONFIG_HAVE_PERF_USER_STACK_DUMP=y
-CONFIG_HAVE_ARCH_JUMP_LABEL=y
-CONFIG_HAVE_RCU_TABLE_FREE=y
-CONFIG_HAVE_RCU_TABLE_INVALIDATE=y
-CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
-CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
-CONFIG_HAVE_CMPXCHG_LOCAL=y
-CONFIG_HAVE_CMPXCHG_DOUBLE=y
-CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
-CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
-CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
-CONFIG_SECCOMP_FILTER=y
-CONFIG_HAVE_GCC_PLUGINS=y
-# CONFIG_GCC_PLUGINS is not set
-CONFIG_HAVE_CC_STACKPROTECTOR=y
-CONFIG_CC_STACKPROTECTOR=y
-# CONFIG_CC_STACKPROTECTOR_NONE is not set
-# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
-CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_THIN_ARCHIVES=y
-CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
-CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
-CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
-CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
-CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
-CONFIG_HAVE_ARCH_HUGE_VMAP=y
-CONFIG_HAVE_ARCH_SOFT_DIRTY=y
-CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
-CONFIG_MODULES_USE_ELF_RELA=y
-CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
-CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
-CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
-CONFIG_HAVE_EXIT_THREAD=y
-CONFIG_ARCH_MMAP_RND_BITS=32
-CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
-CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
-CONFIG_HAVE_COPY_THREAD_TLS=y
-CONFIG_HAVE_STACK_VALIDATION=y
-# CONFIG_HAVE_ARCH_HASH is not set
-# CONFIG_ISA_BUS_API is not set
-CONFIG_OLD_SIGSUSPEND3=y
-CONFIG_COMPAT_OLD_SIGACTION=y
-# CONFIG_CPU_NO_EFFICIENT_FFS is not set
-CONFIG_HAVE_ARCH_VMAP_STACK=y
-CONFIG_VMAP_STACK=y
-# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set
-# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
-CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
-CONFIG_STRICT_KERNEL_RWX=y
-CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
-CONFIG_STRICT_MODULE_RWX=y
-CONFIG_ARCH_HAS_REFCOUNT=y
-CONFIG_REFCOUNT_FULL=y
-
-#
-# GCOV-based kernel profiling
-#
-# CONFIG_GCOV_KERNEL is not set
-CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
-CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_BASE_SMALL=0
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG=y
-# CONFIG_MODULE_SIG_FORCE is not set
-CONFIG_MODULE_SIG_ALL=y
-# CONFIG_MODULE_SIG_SHA1 is not set
-# CONFIG_MODULE_SIG_SHA224 is not set
-# CONFIG_MODULE_SIG_SHA256 is not set
-# CONFIG_MODULE_SIG_SHA384 is not set
-CONFIG_MODULE_SIG_SHA512=y
-CONFIG_MODULE_SIG_HASH="sha512"
-CONFIG_MODULE_COMPRESS=y
-CONFIG_MODULE_COMPRESS_GZIP=y
-# CONFIG_MODULE_COMPRESS_XZ is not set
-# CONFIG_TRIM_UNUSED_KSYMS is not set
-CONFIG_MODULES_TREE_LOOKUP=y
-CONFIG_BLOCK=y
-CONFIG_BLK_SCSI_REQUEST=y
-CONFIG_BLK_DEV_BSG=y
-CONFIG_BLK_DEV_BSGLIB=y
-CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_BLK_DEV_ZONED=y
-CONFIG_BLK_DEV_THROTTLING=y
-# CONFIG_BLK_DEV_THROTTLING_LOW is not set
-CONFIG_BLK_CMDLINE_PARSER=y
-CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
-CONFIG_BLK_WBT_MQ=y
-CONFIG_BLK_DEBUG_FS=y
-# CONFIG_BLK_SED_OPAL is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_AIX_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-CONFIG_LDM_PARTITION=y
-CONFIG_LDM_DEBUG=y
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_KARMA_PARTITION is not set
-CONFIG_EFI_PARTITION=y
-# CONFIG_SYSV68_PARTITION is not set
-CONFIG_CMDLINE_PARTITION=y
-CONFIG_BLOCK_COMPAT=y
-CONFIG_BLK_MQ_PCI=y
-CONFIG_BLK_MQ_VIRTIO=y
-CONFIG_BLK_MQ_RDMA=y
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-# CONFIG_DEFAULT_DEADLINE is not set
-CONFIG_DEFAULT_CFQ=y
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_MQ_IOSCHED_DEADLINE=y
-# CONFIG_MQ_IOSCHED_KYBER is not set
-CONFIG_IOSCHED_BFQ=y
-CONFIG_BFQ_GROUP_IOSCHED=y
-CONFIG_PREEMPT_NOTIFIERS=y
-CONFIG_PADATA=y
-CONFIG_ASN1=y
-CONFIG_UNINLINE_SPIN_UNLOCK=y
-CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
-CONFIG_MUTEX_SPIN_ON_OWNER=y
-CONFIG_RWSEM_SPIN_ON_OWNER=y
-CONFIG_LOCK_SPIN_ON_OWNER=y
-CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
-CONFIG_QUEUED_SPINLOCKS=y
-CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
-CONFIG_QUEUED_RWLOCKS=y
-CONFIG_FREEZER=y
-
-#
-# Processor type and features
-#
-CONFIG_ZONE_DMA=y
-CONFIG_SMP=y
-CONFIG_X86_FEATURE_NAMES=y
-CONFIG_X86_FAST_FEATURE_TESTS=y
-CONFIG_X86_X2APIC=y
-CONFIG_X86_MPPARSE=y
-# CONFIG_GOLDFISH is not set
-CONFIG_RETPOLINE=y
-CONFIG_INTEL_RDT=y
-# CONFIG_X86_EXTENDED_PLATFORM is not set
-CONFIG_X86_INTEL_LPSS=y
-CONFIG_X86_AMD_PLATFORM_DEVICE=y
-CONFIG_IOSF_MBI=y
-# CONFIG_IOSF_MBI_DEBUG is not set
-CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
-CONFIG_SCHED_OMIT_FRAME_POINTER=y
-CONFIG_HYPERVISOR_GUEST=y
-CONFIG_PARAVIRT=y
-# CONFIG_PARAVIRT_DEBUG is not set
-# CONFIG_PARAVIRT_SPINLOCKS is not set
-# CONFIG_XEN is not set
-CONFIG_KVM_GUEST=y
-# CONFIG_KVM_DEBUG_FS is not set
-# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
-CONFIG_PARAVIRT_CLOCK=y
-CONFIG_NO_BOOTMEM=y
-# CONFIG_MK8 is not set
-# CONFIG_MPSC is not set
-# CONFIG_MCORE2 is not set
-# CONFIG_MATOM is not set
-CONFIG_GENERIC_CPU=y
-CONFIG_X86_INTERNODE_CACHE_SHIFT=6
-CONFIG_X86_L1_CACHE_SHIFT=6
-CONFIG_X86_TSC=y
-CONFIG_X86_CMPXCHG64=y
-CONFIG_X86_CMOV=y
-CONFIG_X86_MINIMUM_CPU_FAMILY=64
-CONFIG_X86_DEBUGCTLMSR=y
-CONFIG_CPU_SUP_INTEL=y
-CONFIG_CPU_SUP_AMD=y
-CONFIG_CPU_SUP_CENTAUR=y
-CONFIG_HPET_TIMER=y
-CONFIG_HPET_EMULATE_RTC=y
-CONFIG_DMI=y
-CONFIG_GART_IOMMU=y
-CONFIG_CALGARY_IOMMU=y
-CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
-CONFIG_SWIOTLB=y
-CONFIG_IOMMU_HELPER=y
-CONFIG_MAXSMP=y
-CONFIG_NR_CPUS=8192
-CONFIG_SCHED_SMT=y
-CONFIG_SMT_NICE=y
-CONFIG_SCHED_MC=y
-CONFIG_SCHED_MC_PRIO=y
-# CONFIG_PREEMPT_NONE is not set
-# CONFIG_PREEMPT_VOLUNTARY is not set
-CONFIG_PREEMPT=y
-CONFIG_PREEMPT_COUNT=y
-CONFIG_X86_LOCAL_APIC=y
-CONFIG_X86_IO_APIC=y
-CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_X86_MCE=y
-# CONFIG_X86_MCELOG_LEGACY is not set
-CONFIG_X86_MCE_INTEL=y
-CONFIG_X86_MCE_AMD=y
-CONFIG_X86_MCE_THRESHOLD=y
-# CONFIG_X86_MCE_INJECT is not set
-CONFIG_X86_THERMAL_VECTOR=y
-
-#
-# Performance monitoring
-#
-CONFIG_PERF_EVENTS_INTEL_UNCORE=y
-CONFIG_PERF_EVENTS_INTEL_RAPL=y
-CONFIG_PERF_EVENTS_INTEL_CSTATE=y
-CONFIG_PERF_EVENTS_AMD_POWER=m
-# CONFIG_VM86 is not set
-# CONFIG_X86_16BIT is not set
-CONFIG_X86_VSYSCALL_EMULATION=y
-CONFIG_I8K=m
-CONFIG_MICROCODE=y
-CONFIG_MICROCODE_INTEL=y
-CONFIG_MICROCODE_AMD=y
-CONFIG_MICROCODE_OLD_INTERFACE=y
-CONFIG_X86_MSR=m
-CONFIG_X86_CPUID=m
-# CONFIG_X86_5LEVEL is not set
-CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
-CONFIG_ARCH_DMA_ADDR_T_64BIT=y
-CONFIG_X86_DIRECT_GBPAGES=y
-CONFIG_ARCH_HAS_MEM_ENCRYPT=y
-CONFIG_AMD_MEM_ENCRYPT=y
-# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set
-CONFIG_ARCH_USE_MEMREMAP_PROT=y
-CONFIG_NUMA=y
-CONFIG_AMD_NUMA=y
-CONFIG_X86_64_ACPI_NUMA=y
-CONFIG_NODES_SPAN_OTHER_NODES=y
-# CONFIG_NUMA_EMU is not set
-CONFIG_NODES_SHIFT=10
-CONFIG_ARCH_SPARSEMEM_ENABLE=y
-CONFIG_ARCH_SPARSEMEM_DEFAULT=y
-CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-CONFIG_ARCH_MEMORY_PROBE=y
-CONFIG_ARCH_PROC_KCORE_TEXT=y
-CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_SPARSEMEM_MANUAL=y
-CONFIG_SPARSEMEM=y
-CONFIG_NEED_MULTIPLE_NODES=y
-CONFIG_HAVE_MEMORY_PRESENT=y
-CONFIG_SPARSEMEM_EXTREME=y
-CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
-CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y
-CONFIG_SPARSEMEM_VMEMMAP=y
-CONFIG_HAVE_MEMBLOCK=y
-CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
-CONFIG_HAVE_GENERIC_GUP=y
-CONFIG_ARCH_DISCARD_MEMBLOCK=y
-CONFIG_MEMORY_ISOLATION=y
-CONFIG_HAVE_BOOTMEM_INFO_NODE=y
-CONFIG_MEMORY_HOTPLUG=y
-CONFIG_MEMORY_HOTPLUG_SPARSE=y
-# CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE is not set
-CONFIG_MEMORY_HOTREMOVE=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
-CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
-CONFIG_MEMORY_BALLOON=y
-CONFIG_BALLOON_COMPACTION=y
-CONFIG_COMPACTION=y
-CONFIG_MIGRATION=y
-CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
-CONFIG_ARCH_ENABLE_THP_MIGRATION=y
-CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_BOUNCE=y
-CONFIG_VIRT_TO_BUS=y
-CONFIG_MMU_NOTIFIER=y
-CONFIG_KSM=y
-CONFIG_UKSM=y
-# CONFIG_KSM_LEGACY is not set
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
-CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
-CONFIG_MEMORY_FAILURE=y
-# CONFIG_HWPOISON_INJECT is not set
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
-# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set
-CONFIG_ARCH_WANTS_THP_SWAP=y
-CONFIG_THP_SWAP=y
-CONFIG_TRANSPARENT_HUGE_PAGECACHE=y
-CONFIG_CLEANCACHE=y
-CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
-# CONFIG_CMA_DEBUG is not set
-# CONFIG_CMA_DEBUGFS is not set
-CONFIG_CMA_AREAS=7
-# CONFIG_ZSWAP is not set
-CONFIG_ZPOOL=m
-CONFIG_ZBUD=m
-CONFIG_Z3FOLD=m
-CONFIG_ZSMALLOC=y
-# CONFIG_PGTABLE_MAPPING is not set
-# CONFIG_ZSMALLOC_STAT is not set
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y
-# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
-# CONFIG_IDLE_PAGE_TRACKING is not set
-CONFIG_ARCH_HAS_ZONE_DEVICE=y
-# CONFIG_ZONE_DEVICE is not set
-CONFIG_FRAME_VECTOR=y
-CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
-CONFIG_ARCH_HAS_PKEYS=y
-# CONFIG_PERCPU_STATS is not set
-CONFIG_X86_PMEM_LEGACY_DEVICE=y
-CONFIG_X86_PMEM_LEGACY=m
-CONFIG_X86_CHECK_BIOS_CORRUPTION=y
-CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
-CONFIG_X86_RESERVE_LOW=64
-CONFIG_MTRR=y
-CONFIG_MTRR_SANITIZER=y
-CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0
-CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
-CONFIG_X86_PAT=y
-CONFIG_ARCH_USES_PG_UNCACHED=y
-CONFIG_ARCH_RANDOM=y
-CONFIG_X86_SMAP=y
-CONFIG_X86_INTEL_MPX=y
-CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
-CONFIG_EFI=y
-CONFIG_EFI_STUB=y
-CONFIG_EFI_MIXED=y
-CONFIG_SECCOMP=y
-CONFIG_HZ_100=y
-# CONFIG_HZ_250_NODEF is not set
-# CONFIG_HZ_300_NODEF is not set
-# CONFIG_HZ_1000_NODEF is not set
-CONFIG_HZ=100
-CONFIG_SCHED_HRTICK=y
-CONFIG_KEXEC=y
-# CONFIG_CRASH_DUMP is not set
-# CONFIG_KEXEC_JUMP is not set
-CONFIG_PHYSICAL_START=0x1000000
-CONFIG_RELOCATABLE=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_X86_NEED_RELOCS=y
-CONFIG_PHYSICAL_ALIGN=0x1000000
-CONFIG_RANDOMIZE_MEMORY=y
-CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
-CONFIG_HOTPLUG_CPU=y
-CONFIG_BOOTPARAM_HOTPLUG_CPU0=y
-# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
-# CONFIG_COMPAT_VDSO is not set
-CONFIG_LEGACY_VSYSCALL_NATIVE=y
-# CONFIG_LEGACY_VSYSCALL_EMULATE is not set
-# CONFIG_LEGACY_VSYSCALL_NONE is not set
-# CONFIG_CMDLINE_BOOL is not set
-CONFIG_MODIFY_LDT_SYSCALL=y
-CONFIG_HAVE_LIVEPATCH=y
-CONFIG_ARCH_HAS_ADD_PAGES=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
-CONFIG_USE_PERCPU_NUMA_NODE_ID=y
-
-#
-# Power management and ACPI options
-#
-CONFIG_ARCH_HIBERNATION_HEADER=y
-CONFIG_SUSPEND=y
-CONFIG_SUSPEND_FREEZER=y
-CONFIG_HIBERNATE_CALLBACKS=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_STD_PARTITION=""
-CONFIG_PM_SLEEP=y
-CONFIG_PM_SLEEP_SMP=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PM_WAKELOCKS_LIMIT=100
-CONFIG_PM_WAKELOCKS_GC=y
-CONFIG_PM=y
-# CONFIG_PM_DEBUG is not set
-CONFIG_PM_OPP=y
-CONFIG_PM_CLK=y
-CONFIG_PM_GENERIC_DOMAINS=y
-# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set
-CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
-CONFIG_ACPI=y
-CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
-CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
-CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
-# CONFIG_ACPI_DEBUGGER is not set
-CONFIG_ACPI_SLEEP=y
-# CONFIG_ACPI_PROCFS_POWER is not set
-CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
-# CONFIG_ACPI_EC_DEBUGFS is not set
-CONFIG_ACPI_AC=m
-CONFIG_ACPI_BATTERY=m
-CONFIG_ACPI_BUTTON=m
-CONFIG_ACPI_VIDEO=m
-CONFIG_ACPI_FAN=m
-CONFIG_ACPI_DOCK=y
-CONFIG_ACPI_CPU_FREQ_PSS=y
-CONFIG_ACPI_PROCESSOR_CSTATE=y
-CONFIG_ACPI_PROCESSOR_IDLE=y
-CONFIG_ACPI_CPPC_LIB=y
-CONFIG_ACPI_PROCESSOR=y
-CONFIG_ACPI_IPMI=m
-CONFIG_ACPI_HOTPLUG_CPU=y
-CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
-CONFIG_ACPI_THERMAL=m
-CONFIG_ACPI_NUMA=y
-# CONFIG_ACPI_CUSTOM_DSDT is not set
-CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
-CONFIG_ACPI_TABLE_UPGRADE=y
-# CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_PCI_SLOT=y
-CONFIG_X86_PM_TIMER=y
-CONFIG_ACPI_CONTAINER=y
-CONFIG_ACPI_HOTPLUG_MEMORY=y
-CONFIG_ACPI_HOTPLUG_IOAPIC=y
-CONFIG_ACPI_SBS=m
-CONFIG_ACPI_HED=y
-# CONFIG_ACPI_CUSTOM_METHOD is not set
-CONFIG_ACPI_BGRT=y
-# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
-CONFIG_ACPI_NFIT=m
-CONFIG_HAVE_ACPI_APEI=y
-CONFIG_HAVE_ACPI_APEI_NMI=y
-CONFIG_ACPI_APEI=y
-CONFIG_ACPI_APEI_GHES=y
-CONFIG_ACPI_APEI_PCIEAER=y
-CONFIG_ACPI_APEI_MEMORY_FAILURE=y
-# CONFIG_ACPI_APEI_EINJ is not set
-# CONFIG_ACPI_APEI_ERST_DEBUG is not set
-CONFIG_DPTF_POWER=m
-CONFIG_ACPI_WATCHDOG=y
-CONFIG_ACPI_EXTLOG=m
-CONFIG_PMIC_OPREGION=y
-# CONFIG_XPOWER_PMIC_OPREGION is not set
-# CONFIG_BXT_WC_PMIC_OPREGION is not set
-CONFIG_ACPI_CONFIGFS=m
-CONFIG_SFI=y
-
-#
-# CPU Frequency scaling
-#
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
-CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
-CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-
-#
-# CPU frequency scaling drivers
-#
-CONFIG_X86_INTEL_PSTATE=y
-CONFIG_X86_PCC_CPUFREQ=m
-CONFIG_X86_ACPI_CPUFREQ=m
-CONFIG_X86_ACPI_CPUFREQ_CPB=y
-CONFIG_X86_POWERNOW_K8=m
-CONFIG_X86_AMD_FREQ_SENSITIVITY=m
-# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
-# CONFIG_X86_P4_CLOCKMOD is not set
-
-#
-# shared options
-#
-# CONFIG_X86_SPEEDSTEP_LIB is not set
-
-#
-# CPU Idle
-#
-CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_GOV_LADDER=y
-CONFIG_CPU_IDLE_GOV_MENU=y
-# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
-CONFIG_INTEL_IDLE=y
-
-#
-# Bus options (PCI etc.)
-#
-CONFIG_PCI=y
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_HOTPLUG_PCI_PCIE=y
-CONFIG_PCIEAER=y
-CONFIG_PCIE_ECRC=y
-CONFIG_PCIEAER_INJECT=m
-CONFIG_PCIEASPM=y
-# CONFIG_PCIEASPM_DEBUG is not set
-CONFIG_PCIEASPM_DEFAULT=y
-# CONFIG_PCIEASPM_POWERSAVE is not set
-# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
-# CONFIG_PCIEASPM_PERFORMANCE is not set
-CONFIG_PCIE_PME=y
-CONFIG_PCIE_DPC=y
-CONFIG_PCIE_PTM=y
-CONFIG_PCI_BUS_ADDR_T_64BIT=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_MSI_IRQ_DOMAIN=y
-# CONFIG_PCI_DEBUG is not set
-CONFIG_PCI_REALLOC_ENABLE_AUTO=y
-CONFIG_PCI_STUB=m
-CONFIG_HT_IRQ=y
-CONFIG_PCI_ATS=y
-CONFIG_PCI_LOCKLESS_CONFIG=y
-CONFIG_PCI_IOV=y
-CONFIG_PCI_PRI=y
-CONFIG_PCI_PASID=y
-CONFIG_PCI_LABEL=y
-CONFIG_PCI_HYPERV=m
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_HOTPLUG_PCI_ACPI_IBM=m
-CONFIG_HOTPLUG_PCI_CPCI=y
-CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
-CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
-CONFIG_HOTPLUG_PCI_SHPC=m
-
-#
-# DesignWare PCI Core Support
-#
-# CONFIG_PCIE_DW_PLAT is not set
-
-#
-# PCI host controller drivers
-#
-CONFIG_VMD=m
-
-#
-# PCI Endpoint
-#
-# CONFIG_PCI_ENDPOINT is not set
-
-#
-# PCI switch controller drivers
-#
-CONFIG_PCI_SW_SWITCHTEC=m
-CONFIG_ISA_DMA_API=y
-CONFIG_AMD_NB=y
-CONFIG_PCCARD=m
-CONFIG_PCMCIA=m
-CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_CARDBUS=y
-
-#
-# PC-card bridges
-#
-CONFIG_YENTA=m
-CONFIG_YENTA_O2=y
-CONFIG_YENTA_RICOH=y
-CONFIG_YENTA_TI=y
-CONFIG_YENTA_ENE_TUNE=y
-CONFIG_YENTA_TOSHIBA=y
-CONFIG_PD6729=m
-CONFIG_I82092=m
-CONFIG_PCCARD_NONSTATIC=y
-CONFIG_RAPIDIO=y
-CONFIG_RAPIDIO_TSI721=y
-CONFIG_RAPIDIO_DISC_TIMEOUT=30
-CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y
-CONFIG_RAPIDIO_DMA_ENGINE=y
-# CONFIG_RAPIDIO_DEBUG is not set
-CONFIG_RAPIDIO_ENUM_BASIC=m
-CONFIG_RAPIDIO_CHMAN=m
-CONFIG_RAPIDIO_MPORT_CDEV=m
-
-#
-# RapidIO Switch drivers
-#
-CONFIG_RAPIDIO_TSI57X=y
-CONFIG_RAPIDIO_CPS_XX=y
-CONFIG_RAPIDIO_TSI568=y
-CONFIG_RAPIDIO_CPS_GEN2=y
-CONFIG_RAPIDIO_RXS_GEN3=m
-CONFIG_X86_SYSFB=y
-
-#
-# Executable file formats / Emulations
-#
-CONFIG_BINFMT_ELF=y
-CONFIG_COMPAT_BINFMT_ELF=y
-CONFIG_ELFCORE=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
-CONFIG_BINFMT_SCRIPT=y
-# CONFIG_HAVE_AOUT is not set
-CONFIG_BINFMT_MISC=y
-CONFIG_COREDUMP=y
-CONFIG_IA32_EMULATION=y
-CONFIG_IA32_AOUT=y
-CONFIG_X86_X32=y
-CONFIG_COMPAT_32=y
-CONFIG_COMPAT=y
-CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
-CONFIG_SYSVIPC_COMPAT=y
-CONFIG_X86_DEV_DMA_OPS=y
-CONFIG_NET=y
-CONFIG_COMPAT_NETLINK_MESSAGES=y
-CONFIG_NET_INGRESS=y
-CONFIG_NET_EGRESS=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=m
-CONFIG_PACKET_DIAG=m
-CONFIG_UNIX=m
-CONFIG_UNIX_DIAG=m
-CONFIG_TLS=m
-CONFIG_XFRM=y
-CONFIG_XFRM_OFFLOAD=y
-CONFIG_XFRM_ALGO=m
-CONFIG_XFRM_USER=m
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_XFRM_IPCOMP=m
-CONFIG_NET_KEY=m
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_SMC=m
-CONFIG_SMC_DIAG=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_FIB_TRIE_STATS=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_ROUTE_CLASSID=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE_DEMUX=m
-CONFIG_NET_IP_TUNNEL=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
-CONFIG_NET_IPVTI=m
-CONFIG_NET_UDP_TUNNEL=m
-CONFIG_NET_FOU=m
-CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_ESP_OFFLOAD=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_TUNNEL=m
-CONFIG_INET_TUNNEL=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=m
-CONFIG_INET_TCP_DIAG=m
-CONFIG_INET_UDP_DIAG=m
-# CONFIG_INET_RAW_DIAG is not set
-# CONFIG_INET_DIAG_DESTROY is not set
-CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_BIC=m
-CONFIG_TCP_CONG_CUBIC=m
-CONFIG_TCP_CONG_WESTWOOD=m
-CONFIG_TCP_CONG_HTCP=m
-CONFIG_TCP_CONG_HSTCP=m
-CONFIG_TCP_CONG_HYBLA=m
-CONFIG_TCP_CONG_VEGAS=m
-CONFIG_TCP_CONG_NV=m
-CONFIG_TCP_CONG_SCALABLE=m
-CONFIG_TCP_CONG_LP=m
-CONFIG_TCP_CONG_VENO=m
-CONFIG_TCP_CONG_YEAH=m
-CONFIG_TCP_CONG_ILLINOIS=m
-CONFIG_TCP_CONG_DCTCP=m
-# CONFIG_TCP_CONG_CDG is not set
-CONFIG_TCP_CONG_BBR=m
-CONFIG_DEFAULT_RENO=y
-CONFIG_DEFAULT_TCP_CONG="reno"
-CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=m
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_ESP_OFFLOAD=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_IPV6_ILA=m
-CONFIG_INET6_XFRM_TUNNEL=m
-CONFIG_INET6_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
-CONFIG_IPV6_VTI=m
-CONFIG_IPV6_SIT=m
-CONFIG_IPV6_SIT_6RD=y
-CONFIG_IPV6_NDISC_NODETYPE=y
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_GRE=m
-CONFIG_IPV6_FOU=m
-CONFIG_IPV6_FOU_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_IPV6_MROUTE=y
-CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
-CONFIG_IPV6_PIMSM_V2=y
-CONFIG_IPV6_SEG6_LWTUNNEL=y
-CONFIG_IPV6_SEG6_HMAC=y
-# CONFIG_NETLABEL is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_NET_PTP_CLASSIFY=y
-CONFIG_NETWORK_PHY_TIMESTAMPING=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_ADVANCED=y
-CONFIG_BRIDGE_NETFILTER=m
-
-#
-# Core Netfilter Configuration
-#
-CONFIG_NETFILTER_INGRESS=y
-CONFIG_NETFILTER_NETLINK=m
-CONFIG_NETFILTER_NETLINK_ACCT=m
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NETFILTER_NETLINK_LOG=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_COMMON=m
-CONFIG_NF_LOG_NETDEV=m
-CONFIG_NF_CONNTRACK_MARK=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_ZONES=y
-CONFIG_NF_CONNTRACK_PROCFS=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_TIMEOUT=y
-CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CONNTRACK_LABELS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_GRE=m
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_BROADCAST=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_SNMP=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NF_CT_NETLINK_TIMEOUT=m
-CONFIG_NF_CT_NETLINK_HELPER=m
-CONFIG_NETFILTER_NETLINK_GLUE_CT=y
-CONFIG_NF_NAT=m
-CONFIG_NF_NAT_NEEDED=y
-CONFIG_NF_NAT_PROTO_DCCP=y
-CONFIG_NF_NAT_PROTO_UDPLITE=y
-CONFIG_NF_NAT_PROTO_SCTP=y
-CONFIG_NF_NAT_AMANDA=m
-CONFIG_NF_NAT_FTP=m
-CONFIG_NF_NAT_IRC=m
-CONFIG_NF_NAT_SIP=m
-CONFIG_NF_NAT_TFTP=m
-CONFIG_NF_NAT_REDIRECT=m
-CONFIG_NETFILTER_SYNPROXY=m
-CONFIG_NF_TABLES=m
-CONFIG_NF_TABLES_INET=m
-CONFIG_NF_TABLES_NETDEV=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
-CONFIG_NFT_NUMGEN=m
-CONFIG_NFT_CT=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
-CONFIG_NFT_COUNTER=m
-CONFIG_NFT_LOG=m
-CONFIG_NFT_LIMIT=m
-CONFIG_NFT_MASQ=m
-CONFIG_NFT_REDIR=m
-CONFIG_NFT_NAT=m
-CONFIG_NFT_OBJREF=m
-CONFIG_NFT_QUEUE=m
-CONFIG_NFT_QUOTA=m
-CONFIG_NFT_REJECT=m
-CONFIG_NFT_REJECT_INET=m
-CONFIG_NFT_COMPAT=m
-CONFIG_NFT_HASH=m
-CONFIG_NFT_FIB=m
-CONFIG_NFT_FIB_INET=m
-CONFIG_NF_DUP_NETDEV=m
-CONFIG_NFT_DUP_NETDEV=m
-CONFIG_NFT_FWD_NETDEV=m
-CONFIG_NFT_FIB_NETDEV=m
-CONFIG_NETFILTER_XTABLES=m
-
-#
-# Xtables combined modules
-#
-CONFIG_NETFILTER_XT_MARK=m
-CONFIG_NETFILTER_XT_CONNMARK=m
-CONFIG_NETFILTER_XT_SET=m
-
-#
-# Xtables targets
-#
-CONFIG_NETFILTER_XT_TARGET_AUDIT=m
-CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
-CONFIG_NETFILTER_XT_TARGET_CT=m
-CONFIG_NETFILTER_XT_TARGET_DSCP=m
-CONFIG_NETFILTER_XT_TARGET_HL=m
-CONFIG_NETFILTER_XT_TARGET_HMARK=m
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
-CONFIG_NETFILTER_XT_TARGET_LED=m
-CONFIG_NETFILTER_XT_TARGET_LOG=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_NAT=m
-CONFIG_NETFILTER_XT_TARGET_NETMAP=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
-CONFIG_NETFILTER_XT_TARGET_RATEEST=m
-CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
-CONFIG_NETFILTER_XT_TARGET_TEE=m
-CONFIG_NETFILTER_XT_TARGET_TPROXY=m
-CONFIG_NETFILTER_XT_TARGET_TRACE=m
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
-
-#
-# Xtables matches
-#
-CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
-CONFIG_NETFILTER_XT_MATCH_BPF=m
-CONFIG_NETFILTER_XT_MATCH_CGROUP=m
-CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_CPU=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ECN=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_HL=m
-CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
-CONFIG_NETFILTER_XT_MATCH_IPVS=m
-CONFIG_NETFILTER_XT_MATCH_L2TP=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_NFACCT=m
-CONFIG_NETFILTER_XT_MATCH_OSF=m
-CONFIG_NETFILTER_XT_MATCH_OWNER=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_RATEEST=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_IP_SET=m
-CONFIG_IP_SET_MAX=256
-CONFIG_IP_SET_BITMAP_IP=m
-CONFIG_IP_SET_BITMAP_IPMAC=m
-CONFIG_IP_SET_BITMAP_PORT=m
-CONFIG_IP_SET_HASH_IP=m
-CONFIG_IP_SET_HASH_IPMARK=m
-CONFIG_IP_SET_HASH_IPPORT=m
-CONFIG_IP_SET_HASH_IPPORTIP=m
-CONFIG_IP_SET_HASH_IPPORTNET=m
-CONFIG_IP_SET_HASH_IPMAC=m
-CONFIG_IP_SET_HASH_MAC=m
-CONFIG_IP_SET_HASH_NETPORTNET=m
-CONFIG_IP_SET_HASH_NET=m
-CONFIG_IP_SET_HASH_NETNET=m
-CONFIG_IP_SET_HASH_NETPORT=m
-CONFIG_IP_SET_HASH_NETIFACE=m
-CONFIG_IP_SET_LIST_SET=m
-CONFIG_IP_VS=m
-CONFIG_IP_VS_IPV6=y
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_AH_ESP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-CONFIG_IP_VS_PROTO_SCTP=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_FO=m
-CONFIG_IP_VS_OVF=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS SH scheduler
-#
-CONFIG_IP_VS_SH_TAB_BITS=8
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
-CONFIG_IP_VS_NFCT=y
-CONFIG_IP_VS_PE_SIP=m
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_NF_DEFRAG_IPV4=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
-CONFIG_NFT_DUP_IPV4=m
-CONFIG_NFT_FIB_IPV4=m
-CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_DUP_IPV4=m
-CONFIG_NF_LOG_ARP=m
-CONFIG_NF_LOG_IPV4=m
-CONFIG_NF_REJECT_IPV4=m
-CONFIG_NF_NAT_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NF_NAT_MASQUERADE_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
-CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_NF_NAT_PROTO_GRE=m
-CONFIG_NF_NAT_PPTP=m
-CONFIG_NF_NAT_H323=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_RPFILTER=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-# CONFIG_IP_NF_SECURITY is not set
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-
-#
-# IPv6: Netfilter Configuration
-#
-CONFIG_NF_DEFRAG_IPV6=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
-CONFIG_NFT_REJECT_IPV6=m
-CONFIG_NFT_DUP_IPV6=m
-CONFIG_NFT_FIB_IPV6=m
-CONFIG_NF_DUP_IPV6=m
-CONFIG_NF_REJECT_IPV6=m
-CONFIG_NF_LOG_IPV6=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_NF_NAT_MASQUERADE_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RPFILTER=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_TARGET_SYNPROXY=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-# CONFIG_IP6_NF_SECURITY is not set
-CONFIG_IP6_NF_NAT=m
-CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_IP6_NF_TARGET_NPT=m
-
-#
-# DECnet: Netfilter Configuration
-#
-CONFIG_DECNET_NF_GRABULATOR=m
-CONFIG_NF_TABLES_BRIDGE=m
-CONFIG_NFT_BRIDGE_META=m
-CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_ARP=m
-CONFIG_BRIDGE_EBT_IP=m
-CONFIG_BRIDGE_EBT_IP6=m
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_MARK=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_VLAN=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_DCCP=m
-CONFIG_INET_DCCP_DIAG=m
-
-#
-# DCCP CCIDs Configuration
-#
-# CONFIG_IP_DCCP_CCID2_DEBUG is not set
-CONFIG_IP_DCCP_CCID3=y
-# CONFIG_IP_DCCP_CCID3_DEBUG is not set
-CONFIG_IP_DCCP_TFRC_LIB=y
-
-#
-# DCCP Kernel Hacking
-#
-# CONFIG_IP_DCCP_DEBUG is not set
-# CONFIG_NET_DCCPPROBE is not set
-CONFIG_IP_SCTP=m
-# CONFIG_NET_SCTPPROBE is not set
-# CONFIG_SCTP_DBG_OBJCNT is not set
-CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y
-# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set
-# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
-CONFIG_SCTP_COOKIE_HMAC_MD5=y
-CONFIG_SCTP_COOKIE_HMAC_SHA1=y
-CONFIG_INET_SCTP_DIAG=m
-CONFIG_RDS=m
-CONFIG_RDS_RDMA=m
-CONFIG_RDS_TCP=m
-# CONFIG_RDS_DEBUG is not set
-CONFIG_TIPC=m
-CONFIG_TIPC_MEDIA_IB=y
-CONFIG_TIPC_MEDIA_UDP=y
-CONFIG_ATM=m
-CONFIG_ATM_CLIP=m
-# CONFIG_ATM_CLIP_NO_ICMP is not set
-CONFIG_ATM_LANE=m
-CONFIG_ATM_MPOA=m
-CONFIG_ATM_BR2684=m
-# CONFIG_ATM_BR2684_IPFILTER is not set
-CONFIG_L2TP=m
-# CONFIG_L2TP_DEBUGFS is not set
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=m
-CONFIG_L2TP_ETH=m
-CONFIG_STP=m
-CONFIG_GARP=m
-CONFIG_MRP=m
-CONFIG_BRIDGE=m
-CONFIG_BRIDGE_IGMP_SNOOPING=y
-CONFIG_BRIDGE_VLAN_FILTERING=y
-CONFIG_HAVE_NET_DSA=y
-CONFIG_NET_DSA=m
-CONFIG_NET_DSA_TAG_DSA=y
-CONFIG_NET_DSA_TAG_EDSA=y
-CONFIG_NET_DSA_TAG_KSZ=y
-CONFIG_NET_DSA_TAG_LAN9303=y
-CONFIG_NET_DSA_TAG_MTK=y
-CONFIG_NET_DSA_TAG_TRAILER=y
-CONFIG_NET_DSA_TAG_QCA=y
-CONFIG_VLAN_8021Q=m
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_VLAN_8021Q_MVRP=y
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
-CONFIG_LLC=m
-CONFIG_LLC2=m
-CONFIG_IPX=m
-CONFIG_IPX_INTERN=y
-CONFIG_ATALK=m
-CONFIG_DEV_APPLETALK=m
-CONFIG_IPDDP=m
-CONFIG_IPDDP_ENCAP=y
-CONFIG_X25=m
-CONFIG_LAPB=m
-CONFIG_PHONET=m
-CONFIG_6LOWPAN=m
-# CONFIG_6LOWPAN_DEBUGFS is not set
-CONFIG_6LOWPAN_NHC=m
-CONFIG_6LOWPAN_NHC_DEST=m
-CONFIG_6LOWPAN_NHC_FRAGMENT=m
-CONFIG_6LOWPAN_NHC_HOP=m
-CONFIG_6LOWPAN_NHC_IPV6=m
-CONFIG_6LOWPAN_NHC_MOBILITY=m
-CONFIG_6LOWPAN_NHC_ROUTING=m
-CONFIG_6LOWPAN_NHC_UDP=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
-CONFIG_6LOWPAN_GHC_UDP=m
-CONFIG_6LOWPAN_GHC_ICMPV6=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
-CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
-CONFIG_IEEE802154=m
-CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y
-CONFIG_IEEE802154_SOCKET=m
-CONFIG_IEEE802154_6LOWPAN=m
-CONFIG_MAC802154=m
-CONFIG_NET_SCHED=y
-
-#
-# Queueing/Scheduling
-#
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_ATM=m
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_MULTIQ=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFB=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_DRR=m
-CONFIG_NET_SCH_MQPRIO=m
-CONFIG_NET_SCH_CHOKE=m
-CONFIG_NET_SCH_QFQ=m
-CONFIG_NET_SCH_CODEL=m
-CONFIG_NET_SCH_FQ_CODEL=m
-CONFIG_NET_SCH_FQ=m
-CONFIG_NET_SCH_HHF=m
-CONFIG_NET_SCH_PIE=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_SCH_PLUG=m
-# CONFIG_NET_SCH_DEFAULT is not set
-
-#
-# Classification
-#
-CONFIG_NET_CLS=y
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_CLS_U32_PERF=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_CLS_FLOW=m
-CONFIG_NET_CLS_CGROUP=m
-CONFIG_NET_CLS_BPF=m
-CONFIG_NET_CLS_FLOWER=m
-CONFIG_NET_CLS_MATCHALL=m
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_STACK=32
-CONFIG_NET_EMATCH_CMP=m
-CONFIG_NET_EMATCH_NBYTE=m
-CONFIG_NET_EMATCH_U32=m
-CONFIG_NET_EMATCH_META=m
-CONFIG_NET_EMATCH_TEXT=m
-CONFIG_NET_EMATCH_CANID=m
-CONFIG_NET_EMATCH_IPSET=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=m
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_SAMPLE=m
-CONFIG_NET_ACT_IPT=m
-CONFIG_NET_ACT_NAT=m
-CONFIG_NET_ACT_PEDIT=m
-# CONFIG_NET_ACT_SIMP is not set
-CONFIG_NET_ACT_SKBEDIT=m
-CONFIG_NET_ACT_CSUM=m
-CONFIG_NET_ACT_VLAN=m
-CONFIG_NET_ACT_BPF=m
-CONFIG_NET_ACT_CONNMARK=m
-CONFIG_NET_ACT_SKBMOD=m
-CONFIG_NET_ACT_IFE=m
-CONFIG_NET_ACT_TUNNEL_KEY=m
-CONFIG_NET_IFE_SKBMARK=m
-CONFIG_NET_IFE_SKBPRIO=m
-CONFIG_NET_IFE_SKBTCINDEX=m
-CONFIG_NET_CLS_IND=y
-CONFIG_NET_SCH_FIFO=y
-CONFIG_DCB=y
-CONFIG_DNS_RESOLVER=y
-CONFIG_BATMAN_ADV=m
-# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_BLA=y
-CONFIG_BATMAN_ADV_DAT=y
-CONFIG_BATMAN_ADV_NC=y
-CONFIG_BATMAN_ADV_MCAST=y
-CONFIG_BATMAN_ADV_DEBUGFS=y
-# CONFIG_BATMAN_ADV_DEBUG is not set
-CONFIG_OPENVSWITCH=m
-CONFIG_OPENVSWITCH_GRE=m
-CONFIG_OPENVSWITCH_VXLAN=m
-CONFIG_OPENVSWITCH_GENEVE=m
-CONFIG_VSOCKETS=m
-CONFIG_VMWARE_VMCI_VSOCKETS=m
-CONFIG_VIRTIO_VSOCKETS=m
-CONFIG_VIRTIO_VSOCKETS_COMMON=m
-CONFIG_HYPERV_VSOCKETS=m
-CONFIG_NETLINK_DIAG=m
-CONFIG_MPLS=y
-CONFIG_NET_MPLS_GSO=m
-CONFIG_MPLS_ROUTING=m
-CONFIG_MPLS_IPTUNNEL=m
-CONFIG_NET_NSH=m
-CONFIG_HSR=m
-CONFIG_NET_SWITCHDEV=y
-CONFIG_NET_L3_MASTER_DEV=y
-# CONFIG_NET_NCSI is not set
-CONFIG_RPS=y
-CONFIG_RFS_ACCEL=y
-CONFIG_XPS=y
-CONFIG_CGROUP_NET_PRIO=y
-CONFIG_CGROUP_NET_CLASSID=y
-CONFIG_NET_RX_BUSY_POLL=y
-CONFIG_BQL=y
-CONFIG_BPF_JIT=y
-# CONFIG_BPF_STREAM_PARSER is not set
-CONFIG_NET_FLOW_LIMIT=y
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NET_TCPPROBE is not set
-# CONFIG_NET_DROP_MONITOR is not set
-CONFIG_HAMRADIO=y
-
-#
-# Packet Radio protocols
-#
-CONFIG_AX25=m
-CONFIG_AX25_DAMA_SLAVE=y
-CONFIG_NETROM=m
-CONFIG_ROSE=m
-
-#
-# AX.25 network device drivers
-#
-CONFIG_MKISS=m
-CONFIG_6PACK=m
-CONFIG_BPQETHER=m
-CONFIG_BAYCOM_SER_FDX=m
-CONFIG_BAYCOM_SER_HDX=m
-CONFIG_BAYCOM_PAR=m
-CONFIG_YAM=m
-CONFIG_CAN=m
-CONFIG_CAN_RAW=m
-CONFIG_CAN_BCM=m
-CONFIG_CAN_GW=m
-
-#
-# CAN Device Drivers
-#
-CONFIG_CAN_VCAN=m
-CONFIG_CAN_VXCAN=m
-CONFIG_CAN_SLCAN=m
-CONFIG_CAN_DEV=m
-CONFIG_CAN_CALC_BITTIMING=y
-CONFIG_CAN_LEDS=y
-CONFIG_CAN_JANZ_ICAN3=m
-CONFIG_CAN_C_CAN=m
-CONFIG_CAN_C_CAN_PLATFORM=m
-CONFIG_CAN_C_CAN_PCI=m
-CONFIG_CAN_CC770=m
-CONFIG_CAN_CC770_ISA=m
-CONFIG_CAN_CC770_PLATFORM=m
-CONFIG_CAN_IFI_CANFD=m
-CONFIG_CAN_M_CAN=m
-CONFIG_CAN_PEAK_PCIEFD=m
-CONFIG_CAN_SJA1000=m
-CONFIG_CAN_SJA1000_ISA=m
-CONFIG_CAN_SJA1000_PLATFORM=m
-CONFIG_CAN_EMS_PCMCIA=m
-CONFIG_CAN_EMS_PCI=m
-CONFIG_CAN_PEAK_PCMCIA=m
-CONFIG_CAN_PEAK_PCI=m
-CONFIG_CAN_PEAK_PCIEC=y
-CONFIG_CAN_KVASER_PCI=m
-CONFIG_CAN_PLX_PCI=m
-CONFIG_CAN_SOFTING=m
-CONFIG_CAN_SOFTING_CS=m
-
-#
-# CAN SPI interfaces
-#
-CONFIG_CAN_HI311X=m
-CONFIG_CAN_MCP251X=m
-
-#
-# CAN USB interfaces
-#
-CONFIG_CAN_EMS_USB=m
-CONFIG_CAN_ESD_USB2=m
-CONFIG_CAN_GS_USB=m
-CONFIG_CAN_KVASER_USB=m
-CONFIG_CAN_PEAK_USB=m
-CONFIG_CAN_8DEV_USB=m
-CONFIG_CAN_MCBA_USB=m
-# CONFIG_CAN_DEBUG_DEVICES is not set
-CONFIG_BT=m
-CONFIG_BT_BREDR=y
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_CMTP=m
-CONFIG_BT_HIDP=m
-CONFIG_BT_HS=y
-CONFIG_BT_LE=y
-CONFIG_BT_6LOWPAN=m
-CONFIG_BT_LEDS=y
-# CONFIG_BT_SELFTEST is not set
-CONFIG_BT_DEBUGFS=y
-
-#
-# Bluetooth device drivers
-#
-CONFIG_BT_INTEL=m
-CONFIG_BT_BCM=m
-CONFIG_BT_RTL=m
-CONFIG_BT_QCA=m
-CONFIG_BT_HCIBTUSB=m
-CONFIG_BT_HCIBTUSB_BCM=y
-CONFIG_BT_HCIBTUSB_RTL=y
-CONFIG_BT_HCIBTSDIO=m
-CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_SERDEV=y
-CONFIG_BT_HCIUART_H4=y
-CONFIG_BT_HCIUART_NOKIA=m
-CONFIG_BT_HCIUART_BCSP=y
-CONFIG_BT_HCIUART_ATH3K=y
-CONFIG_BT_HCIUART_LL=y
-CONFIG_BT_HCIUART_3WIRE=y
-CONFIG_BT_HCIUART_INTEL=y
-CONFIG_BT_HCIUART_BCM=y
-CONFIG_BT_HCIUART_QCA=y
-CONFIG_BT_HCIUART_AG6XX=y
-CONFIG_BT_HCIUART_MRVL=y
-CONFIG_BT_HCIBCM203X=m
-CONFIG_BT_HCIBPA10X=m
-CONFIG_BT_HCIBFUSB=m
-CONFIG_BT_HCIDTL1=m
-CONFIG_BT_HCIBT3C=m
-CONFIG_BT_HCIBLUECARD=m
-CONFIG_BT_HCIBTUART=m
-CONFIG_BT_HCIVHCI=m
-CONFIG_BT_MRVL=m
-CONFIG_BT_MRVL_SDIO=m
-CONFIG_BT_ATH3K=m
-CONFIG_BT_WILINK=m
-CONFIG_AF_RXRPC=m
-CONFIG_AF_RXRPC_IPV6=y
-# CONFIG_AF_RXRPC_INJECT_LOSS is not set
-# CONFIG_AF_RXRPC_DEBUG is not set
-# CONFIG_RXKAD is not set
-CONFIG_AF_KCM=m
-CONFIG_STREAM_PARSER=m
-CONFIG_FIB_RULES=y
-CONFIG_WIRELESS=y
-CONFIG_WIRELESS_EXT=y
-CONFIG_WEXT_CORE=y
-CONFIG_WEXT_PROC=y
-CONFIG_WEXT_SPY=y
-CONFIG_WEXT_PRIV=y
-CONFIG_CFG80211=m
-CONFIG_NL80211_TESTMODE=y
-# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
-CONFIG_CFG80211_DEFAULT_PS=y
-# CONFIG_CFG80211_DEBUGFS is not set
-# CONFIG_CFG80211_INTERNAL_REGDB is not set
-CONFIG_CFG80211_CRDA_SUPPORT=y
-CONFIG_CFG80211_WEXT=y
-CONFIG_CFG80211_WEXT_EXPORT=y
-CONFIG_LIB80211=m
-CONFIG_LIB80211_CRYPT_WEP=m
-CONFIG_LIB80211_CRYPT_CCMP=m
-CONFIG_LIB80211_CRYPT_TKIP=m
-# CONFIG_LIB80211_DEBUG is not set
-CONFIG_MAC80211=m
-CONFIG_MAC80211_HAS_RC=y
-CONFIG_MAC80211_RC_MINSTREL=y
-CONFIG_MAC80211_RC_MINSTREL_HT=y
-# CONFIG_MAC80211_RC_MINSTREL_VHT is not set
-CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
-CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
-CONFIG_MAC80211_MESH=y
-CONFIG_MAC80211_LEDS=y
-# CONFIG_MAC80211_DEBUGFS is not set
-# CONFIG_MAC80211_MESSAGE_TRACING is not set
-# CONFIG_MAC80211_DEBUG_MENU is not set
-CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
-CONFIG_WIMAX=m
-CONFIG_WIMAX_DEBUG_LEVEL=8
-CONFIG_RFKILL=m
-CONFIG_RFKILL_LEDS=y
-CONFIG_RFKILL_INPUT=y
-CONFIG_RFKILL_GPIO=m
-CONFIG_NET_9P=m
-CONFIG_NET_9P_VIRTIO=m
-CONFIG_NET_9P_RDMA=m
-# CONFIG_NET_9P_DEBUG is not set
-CONFIG_CAIF=m
-# CONFIG_CAIF_DEBUG is not set
-CONFIG_CAIF_NETDEV=m
-CONFIG_CAIF_USB=m
-CONFIG_CEPH_LIB=m
-# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
-CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
-CONFIG_NFC=m
-CONFIG_NFC_DIGITAL=m
-CONFIG_NFC_NCI=m
-CONFIG_NFC_NCI_SPI=m
-CONFIG_NFC_NCI_UART=m
-CONFIG_NFC_HCI=m
-CONFIG_NFC_SHDLC=y
-
-#
-# Near Field Communication (NFC) devices
-#
-CONFIG_NFC_TRF7970A=m
-CONFIG_NFC_MEI_PHY=m
-CONFIG_NFC_SIM=m
-CONFIG_NFC_PORT100=m
-CONFIG_NFC_FDP=m
-CONFIG_NFC_FDP_I2C=m
-CONFIG_NFC_PN544=m
-CONFIG_NFC_PN544_I2C=m
-CONFIG_NFC_PN544_MEI=m
-CONFIG_NFC_PN533=m
-CONFIG_NFC_PN533_USB=m
-CONFIG_NFC_PN533_I2C=m
-CONFIG_NFC_MICROREAD=m
-CONFIG_NFC_MICROREAD_I2C=m
-CONFIG_NFC_MICROREAD_MEI=m
-CONFIG_NFC_MRVL=m
-CONFIG_NFC_MRVL_USB=m
-CONFIG_NFC_MRVL_UART=m
-CONFIG_NFC_MRVL_I2C=m
-CONFIG_NFC_MRVL_SPI=m
-CONFIG_NFC_ST21NFCA=m
-CONFIG_NFC_ST21NFCA_I2C=m
-CONFIG_NFC_ST_NCI=m
-CONFIG_NFC_ST_NCI_I2C=m
-CONFIG_NFC_ST_NCI_SPI=m
-CONFIG_NFC_NXP_NCI=m
-CONFIG_NFC_NXP_NCI_I2C=m
-CONFIG_NFC_S3FWRN5=m
-CONFIG_NFC_S3FWRN5_I2C=m
-CONFIG_NFC_ST95HF=m
-CONFIG_PSAMPLE=m
-CONFIG_NET_IFE=m
-CONFIG_LWTUNNEL=y
-CONFIG_LWTUNNEL_BPF=y
-CONFIG_DST_CACHE=y
-CONFIG_GRO_CELLS=y
-CONFIG_NET_DEVLINK=m
-CONFIG_MAY_USE_DEVLINK=m
-CONFIG_HAVE_EBPF_JIT=y
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-# CONFIG_UEVENT_HELPER is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=y
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_EXTRA_FIRMWARE=""
-CONFIG_FW_LOADER_USER_HELPER=y
-# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
-CONFIG_WANT_DEV_COREDUMP=y
-CONFIG_ALLOW_DEV_COREDUMP=y
-CONFIG_DEV_COREDUMP=y
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
-# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
-CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-# CONFIG_SYS_HYPERVISOR is not set
-# CONFIG_GENERIC_CPU_DEVICES is not set
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_CPU_VULNERABILITIES=y
-CONFIG_REGMAP=y
-CONFIG_REGMAP_I2C=m
-CONFIG_REGMAP_SPI=y
-CONFIG_REGMAP_SPMI=m
-CONFIG_REGMAP_W1=m
-CONFIG_REGMAP_MMIO=y
-CONFIG_REGMAP_IRQ=y
-CONFIG_DMA_SHARED_BUFFER=y
-# CONFIG_DMA_FENCE_TRACE is not set
-# CONFIG_DMA_CMA is not set
-
-#
-# Bus devices
-#
-CONFIG_CONNECTOR=m
-CONFIG_MTD=m
-CONFIG_MTD_TESTS=m
-CONFIG_MTD_REDBOOT_PARTS=m
-CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
-CONFIG_MTD_REDBOOT_PARTS_READONLY=y
-CONFIG_MTD_CMDLINE_PARTS=m
-CONFIG_MTD_AR7_PARTS=m
-
-#
-# Partition parsers
-#
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_BLKDEVS=m
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_BLOCK_RO=m
-CONFIG_FTL=m
-CONFIG_NFTL=m
-CONFIG_NFTL_RW=y
-CONFIG_INFTL=m
-CONFIG_RFD_FTL=m
-CONFIG_SSFDC=m
-CONFIG_SM_FTL=m
-CONFIG_MTD_OOPS=m
-CONFIG_MTD_SWAP=m
-# CONFIG_MTD_PARTITIONED_MASTER is not set
-
-#
-# RAM/ROM/Flash chip drivers
-#
-CONFIG_MTD_CFI=m
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
-# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-CONFIG_MTD_CFI_INTELEXT=m
-CONFIG_MTD_CFI_AMDSTD=m
-CONFIG_MTD_CFI_STAA=m
-CONFIG_MTD_CFI_UTIL=m
-CONFIG_MTD_RAM=m
-CONFIG_MTD_ROM=m
-CONFIG_MTD_ABSENT=m
-
-#
-# Mapping drivers for chip access
-#
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=m
-# CONFIG_MTD_PHYSMAP_COMPAT is not set
-CONFIG_MTD_SBC_GXX=m
-CONFIG_MTD_AMD76XROM=m
-CONFIG_MTD_ICHXROM=m
-CONFIG_MTD_ESB2ROM=m
-CONFIG_MTD_CK804XROM=m
-CONFIG_MTD_SCB2_FLASH=m
-CONFIG_MTD_NETtel=m
-CONFIG_MTD_L440GX=m
-CONFIG_MTD_PCI=m
-CONFIG_MTD_PCMCIA=m
-# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
-CONFIG_MTD_GPIO_ADDR=m
-CONFIG_MTD_INTEL_VR_NOR=m
-CONFIG_MTD_PLATRAM=m
-CONFIG_MTD_LATCH_ADDR=m
-
-#
-# Self-contained MTD device drivers
-#
-CONFIG_MTD_PMC551=m
-CONFIG_MTD_PMC551_BUGFIX=y
-# CONFIG_MTD_PMC551_DEBUG is not set
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_DATAFLASH_WRITE_VERIFY=y
-CONFIG_MTD_DATAFLASH_OTP=y
-CONFIG_MTD_M25P80=m
-CONFIG_MTD_MCHP23K256=m
-CONFIG_MTD_SST25L=m
-CONFIG_MTD_SLRAM=m
-CONFIG_MTD_PHRAM=m
-CONFIG_MTD_MTDRAM=m
-CONFIG_MTDRAM_TOTAL_SIZE=4096
-CONFIG_MTDRAM_ERASE_SIZE=128
-CONFIG_MTD_BLOCK2MTD=m
-
-#
-# Disk-On-Chip Device Drivers
-#
-CONFIG_MTD_DOCG3=m
-CONFIG_BCH_CONST_M=14
-CONFIG_BCH_CONST_T=4
-CONFIG_MTD_NAND_ECC=m
-CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND=m
-CONFIG_MTD_NAND_BCH=m
-CONFIG_MTD_NAND_ECC_BCH=y
-CONFIG_MTD_SM_COMMON=m
-CONFIG_MTD_NAND_DENALI=m
-CONFIG_MTD_NAND_DENALI_PCI=m
-CONFIG_MTD_NAND_GPIO=m
-# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set
-CONFIG_MTD_NAND_RICOH=m
-CONFIG_MTD_NAND_DISKONCHIP=m
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0x0
-CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH=y
-CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE=y
-CONFIG_MTD_NAND_DOCG4=m
-CONFIG_MTD_NAND_CAFE=m
-CONFIG_MTD_NAND_NANDSIM=m
-CONFIG_MTD_NAND_PLATFORM=m
-CONFIG_MTD_ONENAND=m
-CONFIG_MTD_ONENAND_VERIFY_WRITE=y
-CONFIG_MTD_ONENAND_GENERIC=m
-CONFIG_MTD_ONENAND_OTP=y
-CONFIG_MTD_ONENAND_2X_PROGRAM=y
-
-#
-# LPDDR & LPDDR2 PCM memory drivers
-#
-CONFIG_MTD_LPDDR=m
-CONFIG_MTD_QINFO_PROBE=m
-CONFIG_MTD_SPI_NOR=m
-CONFIG_MTD_MT81xx_NOR=m
-CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
-CONFIG_MTD_UBI=m
-CONFIG_MTD_UBI_WL_THRESHOLD=4096
-CONFIG_MTD_UBI_BEB_LIMIT=20
-CONFIG_MTD_UBI_FASTMAP=y
-# CONFIG_MTD_UBI_GLUEBI is not set
-CONFIG_MTD_UBI_BLOCK=y
-# CONFIG_OF is not set
-CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
-CONFIG_PARPORT=m
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_SERIAL=m
-CONFIG_PARPORT_PC_FIFO=y
-CONFIG_PARPORT_PC_SUPERIO=y
-CONFIG_PARPORT_PC_PCMCIA=m
-# CONFIG_PARPORT_GSC is not set
-CONFIG_PARPORT_AX88796=m
-CONFIG_PARPORT_1284=y
-CONFIG_PARPORT_NOT_PC=y
-CONFIG_PNP=y
-# CONFIG_PNP_DEBUG_MESSAGES is not set
-
-#
-# Protocols
-#
-CONFIG_PNPACPI=y
-CONFIG_BLK_DEV=y
-# CONFIG_BLK_DEV_NULL_BLK is not set
-CONFIG_BLK_DEV_FD=m
-CONFIG_PARIDE=m
-
-#
-# Parallel IDE high-level drivers
-#
-CONFIG_PARIDE_PD=m
-CONFIG_PARIDE_PCD=m
-CONFIG_PARIDE_PF=m
-CONFIG_PARIDE_PT=m
-CONFIG_PARIDE_PG=m
-
-#
-# Parallel IDE protocol modules
-#
-CONFIG_PARIDE_ATEN=m
-CONFIG_PARIDE_BPCK=m
-CONFIG_PARIDE_COMM=m
-CONFIG_PARIDE_DSTR=m
-CONFIG_PARIDE_FIT2=m
-CONFIG_PARIDE_FIT3=m
-CONFIG_PARIDE_EPAT=m
-CONFIG_PARIDE_EPATC8=y
-CONFIG_PARIDE_EPIA=m
-CONFIG_PARIDE_FRIQ=m
-CONFIG_PARIDE_FRPW=m
-CONFIG_PARIDE_KBIC=m
-CONFIG_PARIDE_KTTI=m
-CONFIG_PARIDE_ON20=m
-CONFIG_PARIDE_ON26=m
-CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
-CONFIG_ZRAM=m
-CONFIG_ZRAM_WRITEBACK=y
-CONFIG_BLK_DEV_DAC960=m
-CONFIG_BLK_DEV_UMEM=m
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_DRBD=m
-# CONFIG_DRBD_FAULT_INJECTION is not set
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SKD=m
-CONFIG_BLK_DEV_SX8=m
-CONFIG_BLK_DEV_RAM=m
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_BLK_DEV_RAM_DAX=y
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_CDROM_PKTCDVD_BUFFERS=8
-CONFIG_CDROM_PKTCDVD_WCACHE=y
-CONFIG_ATA_OVER_ETH=m
-CONFIG_VIRTIO_BLK=m
-# CONFIG_VIRTIO_BLK_SCSI is not set
-CONFIG_BLK_DEV_RBD=m
-CONFIG_BLK_DEV_RSXX=m
-CONFIG_NVME_CORE=m
-CONFIG_BLK_DEV_NVME=m
-CONFIG_NVME_FABRICS=m
-CONFIG_NVME_RDMA=m
-CONFIG_NVME_FC=m
-CONFIG_NVME_TARGET=m
-CONFIG_NVME_TARGET_LOOP=m
-CONFIG_NVME_TARGET_RDMA=m
-CONFIG_NVME_TARGET_FC=m
-CONFIG_NVME_TARGET_FCLOOP=m
-
-#
-# Misc devices
-#
-CONFIG_SENSORS_LIS3LV02D=m
-CONFIG_AD525X_DPOT=m
-CONFIG_AD525X_DPOT_I2C=m
-CONFIG_AD525X_DPOT_SPI=m
-# CONFIG_DUMMY_IRQ is not set
-CONFIG_IBM_ASM=m
-CONFIG_PHANTOM=m
-CONFIG_SGI_IOC4=m
-CONFIG_TIFM_CORE=m
-CONFIG_TIFM_7XX1=m
-CONFIG_ICS932S401=m
-CONFIG_ENCLOSURE_SERVICES=m
-CONFIG_HP_ILO=m
-CONFIG_APDS9802ALS=m
-CONFIG_ISL29003=m
-CONFIG_ISL29020=m
-CONFIG_SENSORS_TSL2550=m
-CONFIG_SENSORS_BH1770=m
-CONFIG_SENSORS_APDS990X=m
-CONFIG_HMC6352=m
-CONFIG_DS1682=m
-CONFIG_TI_DAC7512=m
-CONFIG_VMWARE_BALLOON=m
-CONFIG_USB_SWITCH_FSA9480=m
-CONFIG_LATTICE_ECP3_CONFIG=m
-CONFIG_SRAM=y
-CONFIG_PCI_ENDPOINT_TEST=m
-CONFIG_C2PORT=m
-CONFIG_C2PORT_DURAMAR_2150=m
-
-#
-# EEPROM support
-#
-CONFIG_EEPROM_AT24=m
-CONFIG_EEPROM_AT25=m
-CONFIG_EEPROM_LEGACY=m
-CONFIG_EEPROM_MAX6875=m
-CONFIG_EEPROM_93CX6=m
-CONFIG_EEPROM_93XX46=m
-CONFIG_EEPROM_IDT_89HPESX=m
-CONFIG_CB710_CORE=m
-# CONFIG_CB710_DEBUG is not set
-CONFIG_CB710_DEBUG_ASSUMPTIONS=y
-
-#
-# Texas Instruments shared transport line discipline
-#
-CONFIG_TI_ST=m
-CONFIG_SENSORS_LIS3_I2C=m
-
-#
-# Altera FPGA firmware download module
-#
-CONFIG_ALTERA_STAPL=m
-CONFIG_INTEL_MEI=y
-CONFIG_INTEL_MEI_ME=y
-CONFIG_INTEL_MEI_TXE=m
-CONFIG_VMWARE_VMCI=m
-
-#
-# Intel MIC Bus Driver
-#
-CONFIG_INTEL_MIC_BUS=m
-
-#
-# SCIF Bus Driver
-#
-CONFIG_SCIF_BUS=m
-
-#
-# VOP Bus Driver
-#
-CONFIG_VOP_BUS=m
-
-#
-# Intel MIC Host Driver
-#
-CONFIG_INTEL_MIC_HOST=m
-
-#
-# Intel MIC Card Driver
-#
-CONFIG_INTEL_MIC_CARD=m
-
-#
-# SCIF Driver
-#
-CONFIG_SCIF=m
-
-#
-# Intel MIC Coprocessor State Management (COSM) Drivers
-#
-CONFIG_MIC_COSM=m
-
-#
-# VOP Driver
-#
-CONFIG_VOP=m
-CONFIG_VHOST_RING=m
-CONFIG_GENWQE=m
-CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
-CONFIG_ECHO=m
-# CONFIG_CXL_BASE is not set
-# CONFIG_CXL_AFU_DRIVER_OPS is not set
-# CONFIG_CXL_LIB is not set
-CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-CONFIG_SCSI_MOD=m
-CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
-CONFIG_SCSI_DMA=y
-CONFIG_SCSI_NETLINK=y
-# CONFIG_SCSI_MQ_DEFAULT is not set
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=m
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=m
-CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_ENCLOSURE=m
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-
-#
-# SCSI Transports
-#
-CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_FC_ATTRS=m
-CONFIG_SCSI_ISCSI_ATTRS=m
-CONFIG_SCSI_SAS_ATTRS=m
-CONFIG_SCSI_SAS_LIBSAS=m
-CONFIG_SCSI_SAS_ATA=y
-CONFIG_SCSI_SAS_HOST_SMP=y
-CONFIG_SCSI_SRP_ATTRS=m
-CONFIG_SCSI_LOWLEVEL=y
-CONFIG_ISCSI_TCP=m
-CONFIG_ISCSI_BOOT_SYSFS=m
-CONFIG_SCSI_CXGB3_ISCSI=m
-CONFIG_SCSI_CXGB4_ISCSI=m
-CONFIG_SCSI_BNX2_ISCSI=m
-CONFIG_SCSI_BNX2X_FCOE=m
-CONFIG_BE2ISCSI=m
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_HPSA=m
-CONFIG_SCSI_3W_9XXX=m
-CONFIG_SCSI_3W_SAS=m
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-CONFIG_AIC7XXX_DEBUG_MASK=0
-# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=4
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_DEBUG_ENABLE is not set
-CONFIG_AIC79XX_DEBUG_MASK=0
-# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-CONFIG_SCSI_AIC94XX=m
-# CONFIG_AIC94XX_DEBUG is not set
-CONFIG_SCSI_MVSAS=m
-# CONFIG_SCSI_MVSAS_DEBUG is not set
-CONFIG_SCSI_MVSAS_TASKLET=y
-CONFIG_SCSI_MVUMI=m
-CONFIG_SCSI_DPT_I2O=m
-CONFIG_SCSI_ADVANSYS=m
-CONFIG_SCSI_ARCMSR=m
-CONFIG_SCSI_ESAS2R=m
-CONFIG_MEGARAID_NEWGEN=y
-CONFIG_MEGARAID_MM=m
-CONFIG_MEGARAID_MAILBOX=m
-CONFIG_MEGARAID_LEGACY=m
-CONFIG_MEGARAID_SAS=m
-CONFIG_SCSI_MPT3SAS=m
-CONFIG_SCSI_MPT2SAS_MAX_SGE=128
-CONFIG_SCSI_MPT3SAS_MAX_SGE=128
-CONFIG_SCSI_MPT2SAS=m
-CONFIG_SCSI_SMARTPQI=m
-CONFIG_SCSI_UFSHCD=m
-CONFIG_SCSI_UFSHCD_PCI=m
-CONFIG_SCSI_UFS_DWC_TC_PCI=m
-CONFIG_SCSI_UFSHCD_PLATFORM=m
-CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m
-CONFIG_SCSI_HPTIOP=m
-CONFIG_SCSI_BUSLOGIC=m
-CONFIG_SCSI_FLASHPOINT=y
-CONFIG_VMWARE_PVSCSI=m
-CONFIG_HYPERV_STORAGE=m
-CONFIG_LIBFC=m
-CONFIG_LIBFCOE=m
-CONFIG_FCOE=m
-CONFIG_FCOE_FNIC=m
-CONFIG_SCSI_SNIC=m
-# CONFIG_SCSI_SNIC_DEBUG_FS is not set
-CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_EATA=m
-# CONFIG_SCSI_EATA_TAGGED_QUEUE is not set
-# CONFIG_SCSI_EATA_LINKED_COMMANDS is not set
-CONFIG_SCSI_EATA_MAX_TAGS=16
-CONFIG_SCSI_FUTURE_DOMAIN=m
-CONFIG_SCSI_GDTH=m
-CONFIG_SCSI_ISCI=m
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INITIO=m
-CONFIG_SCSI_INIA100=m
-CONFIG_SCSI_PPA=m
-CONFIG_SCSI_IMM=m
-# CONFIG_SCSI_IZIP_EPP16 is not set
-# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_STEX=m
-CONFIG_SCSI_SYM53C8XX_2=m
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-CONFIG_SCSI_SYM53C8XX_MMIO=y
-CONFIG_SCSI_IPR=m
-# CONFIG_SCSI_IPR_TRACE is not set
-# CONFIG_SCSI_IPR_DUMP is not set
-CONFIG_SCSI_QLOGIC_1280=m
-CONFIG_SCSI_QLA_FC=m
-CONFIG_TCM_QLA2XXX=m
-# CONFIG_TCM_QLA2XXX_DEBUG is not set
-CONFIG_SCSI_QLA_ISCSI=m
-CONFIG_QEDI=m
-CONFIG_QEDF=m
-CONFIG_SCSI_LPFC=m
-# CONFIG_SCSI_LPFC_DEBUG_FS is not set
-CONFIG_SCSI_DC395x=m
-CONFIG_SCSI_AM53C974=m
-CONFIG_SCSI_WD719X=m
-# CONFIG_SCSI_DEBUG is not set
-CONFIG_SCSI_PMCRAID=m
-CONFIG_SCSI_PM8001=m
-CONFIG_SCSI_BFA_FC=m
-CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_CHELSIO_FCOE=m
-CONFIG_SCSI_LOWLEVEL_PCMCIA=y
-CONFIG_PCMCIA_AHA152X=m
-CONFIG_PCMCIA_FDOMAIN=m
-CONFIG_PCMCIA_QLOGIC=m
-CONFIG_PCMCIA_SYM53C500=m
-CONFIG_SCSI_DH=y
-CONFIG_SCSI_DH_RDAC=m
-CONFIG_SCSI_DH_HP_SW=m
-CONFIG_SCSI_DH_EMC=m
-CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
-CONFIG_SCSI_OSD_DPRINT_SENSE=1
-# CONFIG_SCSI_OSD_DEBUG is not set
-CONFIG_ATA=m
-# CONFIG_ATA_NONSTANDARD is not set
-CONFIG_ATA_VERBOSE_ERROR=y
-CONFIG_ATA_ACPI=y
-CONFIG_SATA_ZPODD=y
-CONFIG_SATA_PMP=y
-
-#
-# Controllers with non-SFF native interface
-#
-CONFIG_SATA_AHCI=m
-CONFIG_SATA_AHCI_PLATFORM=m
-CONFIG_SATA_INIC162X=m
-CONFIG_SATA_ACARD_AHCI=m
-CONFIG_SATA_SIL24=m
-CONFIG_ATA_SFF=y
-
-#
-# SFF controllers with custom DMA interface
-#
-CONFIG_PDC_ADMA=m
-CONFIG_SATA_QSTOR=m
-CONFIG_SATA_SX4=m
-CONFIG_ATA_BMDMA=y
-
-#
-# SATA SFF controllers with BMDMA
-#
-CONFIG_ATA_PIIX=m
-CONFIG_SATA_DWC=m
-# CONFIG_SATA_DWC_OLD_DMA is not set
-# CONFIG_SATA_DWC_DEBUG is not set
-CONFIG_SATA_MV=m
-CONFIG_SATA_NV=m
-CONFIG_SATA_PROMISE=m
-CONFIG_SATA_SIL=m
-CONFIG_SATA_SIS=m
-CONFIG_SATA_SVW=m
-CONFIG_SATA_ULI=m
-CONFIG_SATA_VIA=m
-CONFIG_SATA_VITESSE=m
-
-#
-# PATA SFF controllers with BMDMA
-#
-CONFIG_PATA_ALI=m
-CONFIG_PATA_AMD=m
-CONFIG_PATA_ARTOP=m
-CONFIG_PATA_ATIIXP=m
-CONFIG_PATA_ATP867X=m
-CONFIG_PATA_CMD64X=m
-CONFIG_PATA_CYPRESS=m
-CONFIG_PATA_EFAR=m
-CONFIG_PATA_HPT366=m
-CONFIG_PATA_HPT37X=m
-CONFIG_PATA_HPT3X2N=m
-CONFIG_PATA_HPT3X3=m
-CONFIG_PATA_HPT3X3_DMA=y
-CONFIG_PATA_IT8213=m
-CONFIG_PATA_IT821X=m
-CONFIG_PATA_JMICRON=m
-CONFIG_PATA_MARVELL=m
-CONFIG_PATA_NETCELL=m
-CONFIG_PATA_NINJA32=m
-CONFIG_PATA_NS87415=m
-CONFIG_PATA_OLDPIIX=m
-CONFIG_PATA_OPTIDMA=m
-CONFIG_PATA_PDC2027X=m
-CONFIG_PATA_PDC_OLD=m
-CONFIG_PATA_RADISYS=m
-CONFIG_PATA_RDC=m
-CONFIG_PATA_SCH=m
-CONFIG_PATA_SERVERWORKS=m
-CONFIG_PATA_SIL680=m
-CONFIG_PATA_SIS=m
-CONFIG_PATA_TOSHIBA=m
-CONFIG_PATA_TRIFLEX=m
-CONFIG_PATA_VIA=m
-CONFIG_PATA_WINBOND=m
-
-#
-# PIO-only SFF controllers
-#
-CONFIG_PATA_CMD640_PCI=m
-CONFIG_PATA_MPIIX=m
-CONFIG_PATA_NS87410=m
-CONFIG_PATA_OPTI=m
-CONFIG_PATA_PCMCIA=m
-CONFIG_PATA_RZ1000=m
-
-#
-# Generic fallback / legacy drivers
-#
-CONFIG_PATA_ACPI=m
-CONFIG_ATA_GENERIC=m
-CONFIG_PATA_LEGACY=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
-CONFIG_MD_CLUSTER=m
-CONFIG_BCACHE=m
-# CONFIG_BCACHE_DEBUG is not set
-# CONFIG_BCACHE_CLOSURES_DEBUG is not set
-CONFIG_BLK_DEV_DM_BUILTIN=y
-CONFIG_BLK_DEV_DM=m
-# CONFIG_DM_MQ_DEFAULT is not set
-# CONFIG_DM_DEBUG is not set
-CONFIG_DM_BUFIO=m
-# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
-CONFIG_DM_BIO_PRISON=m
-CONFIG_DM_PERSISTENT_DATA=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
-CONFIG_DM_CACHE_SMQ=m
-CONFIG_DM_ERA=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_LOG_USERSPACE=m
-CONFIG_DM_RAID=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_QL=m
-CONFIG_DM_MULTIPATH_ST=m
-CONFIG_DM_DELAY=m
-CONFIG_DM_UEVENT=y
-CONFIG_DM_FLAKEY=m
-CONFIG_DM_VERITY=m
-# CONFIG_DM_VERITY_FEC is not set
-CONFIG_DM_SWITCH=m
-CONFIG_DM_LOG_WRITES=m
-CONFIG_DM_INTEGRITY=m
-CONFIG_DM_ZONED=m
-CONFIG_TARGET_CORE=m
-CONFIG_TCM_IBLOCK=m
-CONFIG_TCM_FILEIO=m
-CONFIG_TCM_PSCSI=m
-CONFIG_TCM_USER2=m
-CONFIG_LOOPBACK_TARGET=m
-CONFIG_TCM_FC=m
-CONFIG_ISCSI_TARGET=m
-CONFIG_ISCSI_TARGET_CXGB4=m
-CONFIG_SBP_TARGET=m
-CONFIG_FUSION=y
-CONFIG_FUSION_SPI=m
-CONFIG_FUSION_FC=m
-CONFIG_FUSION_SAS=m
-CONFIG_FUSION_MAX_SGE=128
-CONFIG_FUSION_CTL=m
-CONFIG_FUSION_LAN=m
-CONFIG_FUSION_LOGGING=y
-
-#
-# IEEE 1394 (FireWire) support
-#
-CONFIG_FIREWIRE=m
-CONFIG_FIREWIRE_OHCI=m
-CONFIG_FIREWIRE_SBP2=m
-CONFIG_FIREWIRE_NET=m
-CONFIG_FIREWIRE_NOSY=m
-CONFIG_MACINTOSH_DRIVERS=y
-CONFIG_MAC_EMUMOUSEBTN=m
-CONFIG_NETDEVICES=y
-CONFIG_MII=m
-CONFIG_NET_CORE=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_EQUALIZER=m
-CONFIG_NET_FC=y
-CONFIG_IFB=m
-CONFIG_NET_TEAM=m
-CONFIG_NET_TEAM_MODE_BROADCAST=m
-CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
-CONFIG_NET_TEAM_MODE_RANDOM=m
-CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
-CONFIG_NET_TEAM_MODE_LOADBALANCE=m
-CONFIG_MACVLAN=m
-CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
-CONFIG_IPVTAP=m
-CONFIG_VXLAN=m
-CONFIG_GENEVE=m
-CONFIG_GTP=m
-CONFIG_MACSEC=m
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
-CONFIG_NETPOLL=y
-CONFIG_NET_POLL_CONTROLLER=y
-# CONFIG_NTB_NETDEV is not set
-CONFIG_RIONET=m
-CONFIG_RIONET_TX_SIZE=128
-CONFIG_RIONET_RX_SIZE=128
-CONFIG_TUN=m
-CONFIG_TAP=m
-# CONFIG_TUN_VNET_CROSS_LE is not set
-CONFIG_VETH=m
-CONFIG_VIRTIO_NET=m
-CONFIG_NLMON=m
-CONFIG_NET_VRF=m
-CONFIG_VSOCKMON=m
-CONFIG_SUNGEM_PHY=m
-CONFIG_ARCNET=m
-CONFIG_ARCNET_1201=m
-CONFIG_ARCNET_1051=m
-CONFIG_ARCNET_RAW=m
-CONFIG_ARCNET_CAP=m
-CONFIG_ARCNET_COM90xx=m
-CONFIG_ARCNET_COM90xxIO=m
-CONFIG_ARCNET_RIM_I=m
-CONFIG_ARCNET_COM20020=m
-CONFIG_ARCNET_COM20020_PCI=m
-CONFIG_ARCNET_COM20020_CS=m
-CONFIG_ATM_DRIVERS=y
-# CONFIG_ATM_DUMMY is not set
-CONFIG_ATM_TCP=m
-CONFIG_ATM_LANAI=m
-CONFIG_ATM_ENI=m
-# CONFIG_ATM_ENI_DEBUG is not set
-# CONFIG_ATM_ENI_TUNE_BURST is not set
-CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
-# CONFIG_ATM_ZATM_DEBUG is not set
-CONFIG_ATM_NICSTAR=m
-CONFIG_ATM_NICSTAR_USE_SUNI=y
-CONFIG_ATM_NICSTAR_USE_IDT77105=y
-CONFIG_ATM_IDT77252=m
-# CONFIG_ATM_IDT77252_DEBUG is not set
-# CONFIG_ATM_IDT77252_RCV_ALL is not set
-CONFIG_ATM_IDT77252_USE_SUNI=y
-CONFIG_ATM_AMBASSADOR=m
-# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-CONFIG_ATM_HORIZON=m
-# CONFIG_ATM_HORIZON_DEBUG is not set
-CONFIG_ATM_IA=m
-# CONFIG_ATM_IA_DEBUG is not set
-CONFIG_ATM_FORE200E=m
-CONFIG_ATM_FORE200E_USE_TASKLET=y
-CONFIG_ATM_FORE200E_TX_RETRY=16
-CONFIG_ATM_FORE200E_DEBUG=0
-CONFIG_ATM_HE=m
-CONFIG_ATM_HE_USE_SUNI=y
-CONFIG_ATM_SOLOS=m
-
-#
-# CAIF transport drivers
-#
-CONFIG_CAIF_TTY=m
-CONFIG_CAIF_SPI_SLAVE=m
-CONFIG_CAIF_SPI_SYNC=y
-CONFIG_CAIF_HSI=m
-CONFIG_CAIF_VIRTIO=m
-
-#
-# Distributed Switch Architecture drivers
-#
-CONFIG_B53=m
-CONFIG_B53_SPI_DRIVER=m
-CONFIG_B53_MDIO_DRIVER=m
-CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
-CONFIG_NET_DSA_LOOP=m
-CONFIG_NET_DSA_MT7530=m
-CONFIG_NET_DSA_MV88E6060=m
-CONFIG_MICROCHIP_KSZ=m
-CONFIG_MICROCHIP_KSZ_SPI_DRIVER=m
-CONFIG_NET_DSA_MV88E6XXX=m
-CONFIG_NET_DSA_MV88E6XXX_GLOBAL2=y
-CONFIG_NET_DSA_QCA8K=m
-CONFIG_NET_DSA_SMSC_LAN9303=m
-CONFIG_NET_DSA_SMSC_LAN9303_I2C=m
-CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m
-CONFIG_ETHERNET=y
-CONFIG_MDIO=m
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_3C589=m
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
-CONFIG_NET_VENDOR_ADAPTEC=y
-CONFIG_ADAPTEC_STARFIRE=m
-CONFIG_NET_VENDOR_AGERE=y
-CONFIG_ET131X=m
-CONFIG_NET_VENDOR_ALACRITECH=y
-CONFIG_SLICOSS=m
-CONFIG_NET_VENDOR_ALTEON=y
-CONFIG_ACENIC=m
-# CONFIG_ACENIC_OMIT_TIGON_I is not set
-CONFIG_ALTERA_TSE=m
-CONFIG_NET_VENDOR_AMAZON=y
-CONFIG_ENA_ETHERNET=m
-CONFIG_NET_VENDOR_AMD=y
-CONFIG_AMD8111_ETH=m
-CONFIG_PCNET32=m
-CONFIG_PCMCIA_NMCLAN=m
-CONFIG_AMD_XGBE=m
-CONFIG_AMD_XGBE_DCB=y
-CONFIG_AMD_XGBE_HAVE_ECC=y
-CONFIG_NET_VENDOR_AQUANTIA=y
-CONFIG_AQTION=m
-CONFIG_NET_VENDOR_ARC=y
-CONFIG_NET_VENDOR_ATHEROS=y
-CONFIG_ATL2=m
-CONFIG_ATL1=m
-CONFIG_ATL1E=m
-CONFIG_ATL1C=m
-CONFIG_ALX=m
-CONFIG_NET_VENDOR_AURORA=y
-CONFIG_AURORA_NB8800=m
-CONFIG_NET_CADENCE=y
-CONFIG_MACB=m
-CONFIG_MACB_USE_HWSTAMP=y
-CONFIG_MACB_PCI=m
-CONFIG_NET_VENDOR_BROADCOM=y
-CONFIG_B44=m
-CONFIG_B44_PCI_AUTOSELECT=y
-CONFIG_B44_PCICORE_AUTOSELECT=y
-CONFIG_B44_PCI=y
-CONFIG_BNX2=m
-CONFIG_CNIC=m
-CONFIG_TIGON3=m
-CONFIG_TIGON3_HWMON=y
-CONFIG_BNX2X=m
-CONFIG_BNX2X_SRIOV=y
-CONFIG_BNXT=m
-CONFIG_BNXT_SRIOV=y
-CONFIG_BNXT_FLOWER_OFFLOAD=y
-CONFIG_BNXT_DCB=y
-CONFIG_NET_VENDOR_BROCADE=y
-CONFIG_BNA=m
-CONFIG_NET_VENDOR_CAVIUM=y
-CONFIG_THUNDER_NIC_PF=m
-CONFIG_THUNDER_NIC_VF=m
-CONFIG_THUNDER_NIC_BGX=m
-CONFIG_THUNDER_NIC_RGX=m
-CONFIG_LIQUIDIO=m
-CONFIG_LIQUIDIO_VF=m
-CONFIG_NET_VENDOR_CHELSIO=y
-CONFIG_CHELSIO_T1=m
-CONFIG_CHELSIO_T1_1G=y
-CONFIG_CHELSIO_T3=m
-CONFIG_CHELSIO_T4=m
-CONFIG_CHELSIO_T4_DCB=y
-# CONFIG_CHELSIO_T4_FCOE is not set
-CONFIG_CHELSIO_T4VF=m
-CONFIG_CHELSIO_LIB=m
-CONFIG_NET_VENDOR_CISCO=y
-CONFIG_ENIC=m
-CONFIG_CX_ECAT=m
-CONFIG_DNET=m
-CONFIG_NET_VENDOR_DEC=y
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_DE2104X_DSL=0
-CONFIG_TULIP=m
-CONFIG_TULIP_MWI=y
-CONFIG_TULIP_MMIO=y
-CONFIG_TULIP_NAPI=y
-CONFIG_TULIP_NAPI_HW_MITIGATION=y
-CONFIG_DE4X5=m
-CONFIG_WINBOND_840=m
-CONFIG_DM9102=m
-CONFIG_ULI526X=m
-CONFIG_PCMCIA_XIRCOM=m
-CONFIG_NET_VENDOR_DLINK=y
-CONFIG_DL2K=m
-CONFIG_SUNDANCE=m
-# CONFIG_SUNDANCE_MMIO is not set
-CONFIG_NET_VENDOR_EMULEX=y
-CONFIG_BE2NET=m
-CONFIG_BE2NET_HWMON=y
-CONFIG_NET_VENDOR_EZCHIP=y
-CONFIG_NET_VENDOR_EXAR=y
-CONFIG_S2IO=m
-CONFIG_VXGE=m
-# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
-CONFIG_NET_VENDOR_FUJITSU=y
-CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_NET_VENDOR_HP=y
-CONFIG_HP100=m
-CONFIG_NET_VENDOR_HUAWEI=y
-CONFIG_HINIC=m
-CONFIG_NET_VENDOR_INTEL=y
-CONFIG_E100=m
-CONFIG_E1000=m
-CONFIG_E1000E=m
-CONFIG_E1000E_HWTS=y
-CONFIG_IGB=m
-CONFIG_IGB_HWMON=y
-CONFIG_IGB_DCA=y
-CONFIG_IGBVF=m
-CONFIG_IXGB=m
-CONFIG_IXGBE=m
-CONFIG_IXGBE_HWMON=y
-CONFIG_IXGBE_DCA=y
-CONFIG_IXGBE_DCB=y
-CONFIG_IXGBEVF=m
-CONFIG_I40E=m
-CONFIG_I40E_DCB=y
-CONFIG_I40EVF=m
-CONFIG_FM10K=m
-CONFIG_NET_VENDOR_I825XX=y
-CONFIG_JME=m
-CONFIG_NET_VENDOR_MARVELL=y
-CONFIG_MVMDIO=m
-CONFIG_SKGE=m
-# CONFIG_SKGE_DEBUG is not set
-CONFIG_SKGE_GENESIS=y
-CONFIG_SKY2=m
-# CONFIG_SKY2_DEBUG is not set
-CONFIG_NET_VENDOR_MELLANOX=y
-CONFIG_MLX4_EN=m
-CONFIG_MLX4_EN_DCB=y
-CONFIG_MLX4_CORE=m
-CONFIG_MLX4_DEBUG=y
-CONFIG_MLX5_CORE=m
-CONFIG_MLX5_ACCEL=y
-CONFIG_MLX5_FPGA=y
-# CONFIG_MLX5_CORE_EN is not set
-CONFIG_MLXSW_CORE=m
-CONFIG_MLXSW_CORE_HWMON=y
-CONFIG_MLXSW_CORE_THERMAL=y
-CONFIG_MLXSW_PCI=m
-CONFIG_MLXSW_I2C=m
-CONFIG_MLXSW_SWITCHIB=m
-CONFIG_MLXSW_SWITCHX2=m
-CONFIG_MLXSW_SPECTRUM=m
-CONFIG_MLXSW_SPECTRUM_DCB=y
-CONFIG_MLXSW_MINIMAL=m
-CONFIG_MLXFW=m
-CONFIG_NET_VENDOR_MICREL=y
-CONFIG_KS8842=m
-CONFIG_KS8851=m
-CONFIG_KS8851_MLL=m
-CONFIG_KSZ884X_PCI=m
-CONFIG_NET_VENDOR_MICROCHIP=y
-CONFIG_ENC28J60=m
-# CONFIG_ENC28J60_WRITEVERIFY is not set
-CONFIG_ENCX24J600=m
-CONFIG_NET_VENDOR_MYRI=y
-CONFIG_MYRI10GE=m
-CONFIG_MYRI10GE_DCA=y
-CONFIG_FEALNX=m
-CONFIG_NET_VENDOR_NATSEMI=y
-CONFIG_NATSEMI=m
-CONFIG_NS83820=m
-CONFIG_NET_VENDOR_NETRONOME=y
-CONFIG_NFP=m
-# CONFIG_NFP_APP_FLOWER is not set
-# CONFIG_NFP_DEBUG is not set
-CONFIG_NET_VENDOR_8390=y
-CONFIG_PCMCIA_AXNET=m
-CONFIG_NE2K_PCI=m
-CONFIG_PCMCIA_PCNET=m
-CONFIG_NET_VENDOR_NVIDIA=y
-CONFIG_FORCEDETH=m
-CONFIG_NET_VENDOR_OKI=y
-CONFIG_ETHOC=m
-CONFIG_NET_PACKET_ENGINE=y
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
-CONFIG_NET_VENDOR_QLOGIC=y
-CONFIG_QLA3XXX=m
-CONFIG_QLCNIC=m
-CONFIG_QLCNIC_SRIOV=y
-CONFIG_QLCNIC_DCB=y
-CONFIG_QLCNIC_HWMON=y
-CONFIG_QLGE=m
-CONFIG_NETXEN_NIC=m
-CONFIG_QED=m
-CONFIG_QED_LL2=y
-CONFIG_QED_SRIOV=y
-CONFIG_QEDE=m
-CONFIG_QED_RDMA=y
-CONFIG_QED_ISCSI=y
-CONFIG_QED_FCOE=y
-CONFIG_NET_VENDOR_QUALCOMM=y
-CONFIG_QCOM_EMAC=m
-CONFIG_RMNET=m
-CONFIG_NET_VENDOR_REALTEK=y
-CONFIG_ATP=m
-CONFIG_8139CP=m
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_PIO is not set
-CONFIG_8139TOO_TUNE_TWISTER=y
-CONFIG_8139TOO_8129=y
-# CONFIG_8139_OLD_RX_RESET is not set
-CONFIG_R8169=m
-CONFIG_NET_VENDOR_RENESAS=y
-CONFIG_NET_VENDOR_RDC=y
-CONFIG_R6040=m
-CONFIG_NET_VENDOR_ROCKER=y
-CONFIG_ROCKER=m
-CONFIG_NET_VENDOR_SAMSUNG=y
-CONFIG_SXGBE_ETH=m
-CONFIG_NET_VENDOR_SEEQ=y
-CONFIG_NET_VENDOR_SILAN=y
-CONFIG_SC92031=m
-CONFIG_NET_VENDOR_SIS=y
-CONFIG_SIS900=m
-CONFIG_SIS190=m
-CONFIG_NET_VENDOR_SOLARFLARE=y
-CONFIG_SFC=m
-CONFIG_SFC_MTD=y
-CONFIG_SFC_MCDI_MON=y
-CONFIG_SFC_SRIOV=y
-CONFIG_SFC_MCDI_LOGGING=y
-CONFIG_SFC_FALCON=m
-CONFIG_SFC_FALCON_MTD=y
-CONFIG_NET_VENDOR_SMSC=y
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_EPIC100=m
-CONFIG_SMSC911X=m
-# CONFIG_SMSC911X_ARCH_HOOKS is not set
-CONFIG_SMSC9420=m
-CONFIG_NET_VENDOR_STMICRO=y
-CONFIG_STMMAC_ETH=m
-CONFIG_STMMAC_PLATFORM=m
-CONFIG_DWMAC_GENERIC=m
-CONFIG_STMMAC_PCI=m
-CONFIG_NET_VENDOR_SUN=y
-CONFIG_HAPPYMEAL=m
-CONFIG_SUNGEM=m
-CONFIG_CASSINI=m
-CONFIG_NIU=m
-CONFIG_NET_VENDOR_TEHUTI=y
-CONFIG_TEHUTI=m
-CONFIG_NET_VENDOR_TI=y
-CONFIG_TI_CPSW_ALE=m
-CONFIG_TLAN=m
-CONFIG_NET_VENDOR_VIA=y
-CONFIG_VIA_RHINE=m
-CONFIG_VIA_RHINE_MMIO=y
-CONFIG_VIA_VELOCITY=m
-CONFIG_NET_VENDOR_WIZNET=y
-CONFIG_WIZNET_W5100=m
-CONFIG_WIZNET_W5300=m
-# CONFIG_WIZNET_BUS_DIRECT is not set
-# CONFIG_WIZNET_BUS_INDIRECT is not set
-CONFIG_WIZNET_BUS_ANY=y
-CONFIG_WIZNET_W5100_SPI=m
-CONFIG_NET_VENDOR_XIRCOM=y
-CONFIG_PCMCIA_XIRC2PS=m
-CONFIG_NET_VENDOR_SYNOPSYS=y
-CONFIG_DWC_XLGMAC=m
-CONFIG_DWC_XLGMAC_PCI=m
-CONFIG_FDDI=m
-CONFIG_DEFXX=m
-# CONFIG_DEFXX_MMIO is not set
-CONFIG_SKFP=m
-CONFIG_HIPPI=y
-CONFIG_ROADRUNNER=m
-CONFIG_ROADRUNNER_LARGE_RINGS=y
-CONFIG_NET_SB1000=m
-CONFIG_MDIO_DEVICE=m
-CONFIG_MDIO_BUS=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_MDIO_CAVIUM=m
-CONFIG_MDIO_GPIO=m
-CONFIG_MDIO_THUNDER=m
-CONFIG_PHYLIB=m
-CONFIG_SWPHY=y
-CONFIG_LED_TRIGGER_PHY=y
-
-#
-# MII PHY device drivers
-#
-CONFIG_AMD_PHY=m
-CONFIG_AQUANTIA_PHY=m
-CONFIG_AT803X_PHY=m
-CONFIG_BCM7XXX_PHY=m
-CONFIG_BCM87XX_PHY=m
-CONFIG_BCM_NET_PHYLIB=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_CORTINA_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_DP83848_PHY=m
-CONFIG_DP83867_PHY=m
-CONFIG_FIXED_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_INTEL_XWAY_PHY=m
-CONFIG_LSI_ET1011C_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_MARVELL_PHY=m
-CONFIG_MARVELL_10G_PHY=m
-CONFIG_MICREL_PHY=m
-CONFIG_MICROCHIP_PHY=m
-CONFIG_MICROSEMI_PHY=m
-CONFIG_NATIONAL_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_REALTEK_PHY=m
-CONFIG_ROCKCHIP_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_STE10XP=m
-CONFIG_TERANETICS_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_XILINX_GMII2RGMII=m
-CONFIG_MICREL_KS8995MA=m
-CONFIG_PLIP=m
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_MPPE=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPPOATM=m
-CONFIG_PPPOE=m
-CONFIG_PPTP=m
-CONFIG_PPPOL2TP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_SLIP=m
-CONFIG_SLHC=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-
-#
-# Host-side USB support is needed for USB Network Adapter support
-#
-CONFIG_USB_NET_DRIVERS=m
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_RTL8152=m
-CONFIG_USB_LAN78XX=m
-CONFIG_USB_USBNET=m
-CONFIG_USB_NET_AX8817X=m
-CONFIG_USB_NET_AX88179_178A=m
-CONFIG_USB_NET_CDCETHER=m
-CONFIG_USB_NET_CDC_EEM=m
-CONFIG_USB_NET_CDC_NCM=m
-CONFIG_USB_NET_HUAWEI_CDC_NCM=m
-CONFIG_USB_NET_CDC_MBIM=m
-CONFIG_USB_NET_DM9601=m
-CONFIG_USB_NET_SR9700=m
-CONFIG_USB_NET_SR9800=m
-CONFIG_USB_NET_SMSC75XX=m
-CONFIG_USB_NET_SMSC95XX=m
-CONFIG_USB_NET_GL620A=m
-CONFIG_USB_NET_NET1080=m
-CONFIG_USB_NET_PLUSB=m
-CONFIG_USB_NET_MCS7830=m
-CONFIG_USB_NET_RNDIS_HOST=m
-CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
-CONFIG_USB_NET_CDC_SUBSET=m
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_BELKIN=y
-CONFIG_USB_ARMLINUX=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_KC2190=y
-CONFIG_USB_NET_ZAURUS=m
-CONFIG_USB_NET_CX82310_ETH=m
-CONFIG_USB_NET_KALMIA=m
-CONFIG_USB_NET_QMI_WWAN=m
-CONFIG_USB_HSO=m
-CONFIG_USB_NET_INT51X1=m
-CONFIG_USB_CDC_PHONET=m
-CONFIG_USB_IPHETH=m
-CONFIG_USB_SIERRA_NET=m
-CONFIG_USB_VL600=m
-CONFIG_USB_NET_CH9200=m
-CONFIG_WLAN=y
-CONFIG_WLAN_VENDOR_ADMTEK=y
-CONFIG_ADM8211=m
-CONFIG_ATH_COMMON=m
-CONFIG_WLAN_VENDOR_ATH=y
-# CONFIG_ATH_DEBUG is not set
-CONFIG_ATH5K=m
-# CONFIG_ATH5K_DEBUG is not set
-# CONFIG_ATH5K_TRACER is not set
-CONFIG_ATH5K_PCI=y
-CONFIG_ATH9K_HW=m
-CONFIG_ATH9K_COMMON=m
-CONFIG_ATH9K_BTCOEX_SUPPORT=y
-CONFIG_ATH9K=m
-CONFIG_ATH9K_PCI=y
-CONFIG_ATH9K_AHB=y
-# CONFIG_ATH9K_DEBUGFS is not set
-CONFIG_ATH9K_DYNACK=y
-CONFIG_ATH9K_WOW=y
-CONFIG_ATH9K_RFKILL=y
-CONFIG_ATH9K_CHANNEL_CONTEXT=y
-CONFIG_ATH9K_PCOEM=y
-CONFIG_ATH9K_HTC=m
-# CONFIG_ATH9K_HTC_DEBUGFS is not set
-CONFIG_ATH9K_HWRNG=y
-CONFIG_CARL9170=m
-CONFIG_CARL9170_LEDS=y
-CONFIG_CARL9170_WPC=y
-# CONFIG_CARL9170_HWRNG is not set
-CONFIG_ATH6KL=m
-CONFIG_ATH6KL_SDIO=m
-CONFIG_ATH6KL_USB=m
-# CONFIG_ATH6KL_DEBUG is not set
-# CONFIG_ATH6KL_TRACING is not set
-CONFIG_AR5523=m
-CONFIG_WIL6210=m
-CONFIG_WIL6210_ISR_COR=y
-CONFIG_WIL6210_TRACING=y
-CONFIG_WIL6210_DEBUGFS=y
-CONFIG_ATH10K=m
-CONFIG_ATH10K_PCI=m
-CONFIG_ATH10K_SDIO=m
-CONFIG_ATH10K_USB=m
-# CONFIG_ATH10K_DEBUG is not set
-# CONFIG_ATH10K_DEBUGFS is not set
-# CONFIG_ATH10K_TRACING is not set
-CONFIG_WCN36XX=m
-# CONFIG_WCN36XX_DEBUGFS is not set
-CONFIG_WLAN_VENDOR_ATMEL=y
-CONFIG_ATMEL=m
-CONFIG_PCI_ATMEL=m
-CONFIG_PCMCIA_ATMEL=m
-CONFIG_AT76C50X_USB=m
-CONFIG_WLAN_VENDOR_BROADCOM=y
-CONFIG_B43=m
-CONFIG_B43_BCMA=y
-CONFIG_B43_SSB=y
-CONFIG_B43_BUSES_BCMA_AND_SSB=y
-# CONFIG_B43_BUSES_BCMA is not set
-# CONFIG_B43_BUSES_SSB is not set
-CONFIG_B43_PCI_AUTOSELECT=y
-CONFIG_B43_PCICORE_AUTOSELECT=y
-CONFIG_B43_SDIO=y
-CONFIG_B43_BCMA_PIO=y
-CONFIG_B43_PIO=y
-CONFIG_B43_PHY_G=y
-CONFIG_B43_PHY_N=y
-CONFIG_B43_PHY_LP=y
-CONFIG_B43_PHY_HT=y
-CONFIG_B43_LEDS=y
-CONFIG_B43_HWRNG=y
-# CONFIG_B43_DEBUG is not set
-CONFIG_B43LEGACY=m
-CONFIG_B43LEGACY_PCI_AUTOSELECT=y
-CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
-CONFIG_B43LEGACY_LEDS=y
-CONFIG_B43LEGACY_HWRNG=y
-# CONFIG_B43LEGACY_DEBUG is not set
-CONFIG_B43LEGACY_DMA=y
-CONFIG_B43LEGACY_PIO=y
-CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
-# CONFIG_B43LEGACY_DMA_MODE is not set
-# CONFIG_B43LEGACY_PIO_MODE is not set
-CONFIG_BRCMUTIL=m
-CONFIG_BRCMSMAC=m
-CONFIG_BRCMFMAC=m
-CONFIG_BRCMFMAC_PROTO_BCDC=y
-CONFIG_BRCMFMAC_PROTO_MSGBUF=y
-CONFIG_BRCMFMAC_SDIO=y
-CONFIG_BRCMFMAC_USB=y
-CONFIG_BRCMFMAC_PCIE=y
-CONFIG_BRCM_TRACING=y
-# CONFIG_BRCMDBG is not set
-CONFIG_WLAN_VENDOR_CISCO=y
-CONFIG_AIRO=m
-CONFIG_AIRO_CS=m
-CONFIG_WLAN_VENDOR_INTEL=y
-CONFIG_IPW2100=m
-CONFIG_IPW2100_MONITOR=y
-# CONFIG_IPW2100_DEBUG is not set
-CONFIG_IPW2200=m
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_RADIOTAP=y
-CONFIG_IPW2200_PROMISCUOUS=y
-CONFIG_IPW2200_QOS=y
-# CONFIG_IPW2200_DEBUG is not set
-CONFIG_LIBIPW=m
-# CONFIG_LIBIPW_DEBUG is not set
-CONFIG_IWLEGACY=m
-CONFIG_IWL4965=m
-CONFIG_IWL3945=m
-
-#
-# iwl3945 / iwl4965 Debugging Options
-#
-# CONFIG_IWLEGACY_DEBUG is not set
-CONFIG_IWLWIFI=m
-CONFIG_IWLWIFI_LEDS=y
-CONFIG_IWLDVM=m
-CONFIG_IWLMVM=m
-CONFIG_IWLWIFI_OPMODE_MODULAR=y
-CONFIG_IWLWIFI_BCAST_FILTERING=y
-
-#
-# Debugging Options
-#
-# CONFIG_IWLWIFI_DEBUG is not set
-CONFIG_IWLWIFI_DEVICE_TRACING=y
-CONFIG_WLAN_VENDOR_INTERSIL=y
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_HOSTAP_PLX=m
-CONFIG_HOSTAP_PCI=m
-CONFIG_HOSTAP_CS=m
-CONFIG_HERMES=m
-CONFIG_HERMES_PRISM=y
-CONFIG_HERMES_CACHE_FW_ON_INIT=y
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_NORTEL_HERMES=m
-CONFIG_PCI_HERMES=m
-CONFIG_PCMCIA_HERMES=m
-CONFIG_PCMCIA_SPECTRUM=m
-CONFIG_ORINOCO_USB=m
-CONFIG_P54_COMMON=m
-CONFIG_P54_USB=m
-CONFIG_P54_PCI=m
-CONFIG_P54_SPI=m
-CONFIG_P54_SPI_DEFAULT_EEPROM=y
-CONFIG_P54_LEDS=y
-CONFIG_PRISM54=m
-CONFIG_WLAN_VENDOR_MARVELL=y
-CONFIG_LIBERTAS=m
-CONFIG_LIBERTAS_USB=m
-CONFIG_LIBERTAS_CS=m
-CONFIG_LIBERTAS_SDIO=m
-CONFIG_LIBERTAS_SPI=m
-# CONFIG_LIBERTAS_DEBUG is not set
-CONFIG_LIBERTAS_MESH=y
-CONFIG_LIBERTAS_THINFIRM=m
-# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
-CONFIG_LIBERTAS_THINFIRM_USB=m
-CONFIG_MWIFIEX=m
-CONFIG_MWIFIEX_SDIO=m
-CONFIG_MWIFIEX_PCIE=m
-CONFIG_MWIFIEX_USB=m
-CONFIG_MWL8K=m
-CONFIG_WLAN_VENDOR_MEDIATEK=y
-CONFIG_MT7601U=m
-CONFIG_WLAN_VENDOR_RALINK=y
-CONFIG_RT2X00=m
-CONFIG_RT2400PCI=m
-CONFIG_RT2500PCI=m
-CONFIG_RT61PCI=m
-CONFIG_RT2800PCI=m
-CONFIG_RT2800PCI_RT33XX=y
-CONFIG_RT2800PCI_RT35XX=y
-CONFIG_RT2800PCI_RT53XX=y
-CONFIG_RT2800PCI_RT3290=y
-CONFIG_RT2500USB=m
-CONFIG_RT73USB=m
-CONFIG_RT2800USB=m
-CONFIG_RT2800USB_RT33XX=y
-CONFIG_RT2800USB_RT35XX=y
-CONFIG_RT2800USB_RT3573=y
-CONFIG_RT2800USB_RT53XX=y
-CONFIG_RT2800USB_RT55XX=y
-CONFIG_RT2800USB_UNKNOWN=y
-CONFIG_RT2800_LIB=m
-CONFIG_RT2800_LIB_MMIO=m
-CONFIG_RT2X00_LIB_MMIO=m
-CONFIG_RT2X00_LIB_PCI=m
-CONFIG_RT2X00_LIB_USB=m
-CONFIG_RT2X00_LIB=m
-CONFIG_RT2X00_LIB_FIRMWARE=y
-CONFIG_RT2X00_LIB_CRYPTO=y
-CONFIG_RT2X00_LIB_LEDS=y
-# CONFIG_RT2X00_DEBUG is not set
-CONFIG_WLAN_VENDOR_REALTEK=y
-CONFIG_RTL8180=m
-CONFIG_RTL8187=m
-CONFIG_RTL8187_LEDS=y
-CONFIG_RTL_CARDS=m
-CONFIG_RTL8192CE=m
-CONFIG_RTL8192SE=m
-CONFIG_RTL8192DE=m
-CONFIG_RTL8723AE=m
-CONFIG_RTL8723BE=m
-CONFIG_RTL8188EE=m
-CONFIG_RTL8192EE=m
-CONFIG_RTL8821AE=m
-CONFIG_RTL8192CU=m
-CONFIG_RTLWIFI=m
-CONFIG_RTLWIFI_PCI=m
-CONFIG_RTLWIFI_USB=m
-# CONFIG_RTLWIFI_DEBUG is not set
-CONFIG_RTL8192C_COMMON=m
-CONFIG_RTL8723_COMMON=m
-CONFIG_RTLBTCOEXIST=m
-CONFIG_RTL8XXXU=m
-CONFIG_RTL8XXXU_UNTESTED=y
-CONFIG_WLAN_VENDOR_RSI=y
-CONFIG_RSI_91X=m
-# CONFIG_RSI_DEBUGFS is not set
-CONFIG_RSI_SDIO=m
-CONFIG_RSI_USB=m
-CONFIG_WLAN_VENDOR_ST=y
-CONFIG_CW1200=m
-CONFIG_CW1200_WLAN_SDIO=m
-CONFIG_CW1200_WLAN_SPI=m
-CONFIG_WLAN_VENDOR_TI=y
-CONFIG_WL1251=m
-CONFIG_WL1251_SPI=m
-CONFIG_WL1251_SDIO=m
-CONFIG_WL12XX=m
-CONFIG_WL18XX=m
-CONFIG_WLCORE=m
-CONFIG_WLCORE_SDIO=m
-CONFIG_WILINK_PLATFORM_DATA=y
-CONFIG_WLAN_VENDOR_ZYDAS=y
-CONFIG_USB_ZD1201=m
-CONFIG_ZD1211RW=m
-# CONFIG_ZD1211RW_DEBUG is not set
-CONFIG_WLAN_VENDOR_QUANTENNA=y
-CONFIG_QTNFMAC=m
-CONFIG_QTNFMAC_PEARL_PCIE=m
-CONFIG_PCMCIA_RAYCS=m
-CONFIG_PCMCIA_WL3501=m
-# CONFIG_MAC80211_HWSIM is not set
-CONFIG_USB_NET_RNDIS_WLAN=m
-
-#
-# WiMAX Wireless Broadband devices
-#
-CONFIG_WIMAX_I2400M=m
-CONFIG_WIMAX_I2400M_USB=m
-CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
-CONFIG_WAN=y
-CONFIG_LANMEDIA=m
-CONFIG_HDLC=m
-CONFIG_HDLC_RAW=m
-CONFIG_HDLC_RAW_ETH=m
-CONFIG_HDLC_CISCO=m
-CONFIG_HDLC_FR=m
-CONFIG_HDLC_PPP=m
-CONFIG_HDLC_X25=m
-CONFIG_PCI200SYN=m
-CONFIG_WANXL=m
-CONFIG_PC300TOO=m
-CONFIG_FARSYNC=m
-CONFIG_DSCC4=m
-CONFIG_DSCC4_PCISYNC=y
-CONFIG_DSCC4_PCI_RST=y
-CONFIG_DLCI=m
-CONFIG_DLCI_MAX=8
-CONFIG_LAPBETHER=m
-CONFIG_X25_ASY=m
-CONFIG_SBNI=m
-CONFIG_SBNI_MULTILINE=y
-CONFIG_IEEE802154_DRIVERS=m
-CONFIG_IEEE802154_FAKELB=m
-CONFIG_IEEE802154_AT86RF230=m
-# CONFIG_IEEE802154_AT86RF230_DEBUGFS is not set
-CONFIG_IEEE802154_MRF24J40=m
-CONFIG_IEEE802154_CC2520=m
-CONFIG_IEEE802154_ATUSB=m
-CONFIG_IEEE802154_ADF7242=m
-CONFIG_IEEE802154_CA8210=m
-# CONFIG_IEEE802154_CA8210_DEBUGFS is not set
-CONFIG_VMXNET3=m
-CONFIG_FUJITSU_ES=m
-CONFIG_HYPERV_NET=m
-CONFIG_ISDN=y
-CONFIG_ISDN_I4L=m
-CONFIG_ISDN_PPP=y
-CONFIG_ISDN_PPP_VJ=y
-CONFIG_ISDN_MPP=y
-CONFIG_IPPP_FILTER=y
-CONFIG_ISDN_PPP_BSDCOMP=m
-CONFIG_ISDN_AUDIO=y
-CONFIG_ISDN_TTY_FAX=y
-CONFIG_ISDN_X25=y
-
-#
-# ISDN feature submodules
-#
-CONFIG_ISDN_DIVERSION=m
-
-#
-# ISDN4Linux hardware drivers
-#
-
-#
-# Passive cards
-#
-CONFIG_ISDN_DRV_HISAX=m
-
-#
-# D-channel protocol features
-#
-CONFIG_HISAX_EURO=y
-CONFIG_DE_AOC=y
-# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-# CONFIG_HISAX_NO_LLC is not set
-# CONFIG_HISAX_NO_KEYPAD is not set
-CONFIG_HISAX_1TR6=y
-CONFIG_HISAX_NI1=y
-CONFIG_HISAX_MAX_CARDS=8
-
-#
-# HiSax supported cards
-#
-CONFIG_HISAX_16_3=y
-CONFIG_HISAX_TELESPCI=y
-CONFIG_HISAX_S0BOX=y
-CONFIG_HISAX_FRITZPCI=y
-CONFIG_HISAX_AVM_A1_PCMCIA=y
-CONFIG_HISAX_ELSA=y
-CONFIG_HISAX_DIEHLDIVA=y
-CONFIG_HISAX_SEDLBAUER=y
-CONFIG_HISAX_NETJET=y
-CONFIG_HISAX_NETJET_U=y
-CONFIG_HISAX_NICCY=y
-CONFIG_HISAX_BKM_A4T=y
-CONFIG_HISAX_SCT_QUADRO=y
-CONFIG_HISAX_GAZEL=y
-CONFIG_HISAX_HFC_PCI=y
-CONFIG_HISAX_W6692=y
-CONFIG_HISAX_HFC_SX=y
-CONFIG_HISAX_ENTERNOW_PCI=y
-# CONFIG_HISAX_DEBUG is not set
-
-#
-# HiSax PCMCIA card service modules
-#
-CONFIG_HISAX_SEDLBAUER_CS=m
-CONFIG_HISAX_ELSA_CS=m
-CONFIG_HISAX_AVM_A1_CS=m
-CONFIG_HISAX_TELES_CS=m
-
-#
-# HiSax sub driver modules
-#
-CONFIG_HISAX_ST5481=m
-CONFIG_HISAX_HFCUSB=m
-CONFIG_HISAX_HFC4S8S=m
-CONFIG_HISAX_FRITZ_PCIPNP=m
-CONFIG_ISDN_CAPI=m
-CONFIG_CAPI_TRACE=y
-CONFIG_ISDN_CAPI_CAPI20=m
-CONFIG_ISDN_CAPI_MIDDLEWARE=y
-CONFIG_ISDN_CAPI_CAPIDRV=m
-# CONFIG_ISDN_CAPI_CAPIDRV_VERBOSE is not set
-
-#
-# CAPI hardware drivers
-#
-CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-CONFIG_ISDN_DRV_AVMB1_C4=m
-CONFIG_CAPI_EICON=y
-CONFIG_ISDN_DIVAS=m
-CONFIG_ISDN_DIVAS_BRIPCI=y
-CONFIG_ISDN_DIVAS_PRIPCI=y
-CONFIG_ISDN_DIVAS_DIVACAPI=m
-CONFIG_ISDN_DIVAS_USERIDI=m
-CONFIG_ISDN_DIVAS_MAINT=m
-CONFIG_ISDN_DRV_GIGASET=m
-CONFIG_GIGASET_CAPI=y
-# CONFIG_GIGASET_I4L is not set
-# CONFIG_GIGASET_DUMMYLL is not set
-CONFIG_GIGASET_BASE=m
-CONFIG_GIGASET_M105=m
-CONFIG_GIGASET_M101=m
-# CONFIG_GIGASET_DEBUG is not set
-CONFIG_HYSDN=m
-CONFIG_HYSDN_CAPI=y
-CONFIG_MISDN=m
-CONFIG_MISDN_DSP=m
-CONFIG_MISDN_L1OIP=m
-
-#
-# mISDN hardware drivers
-#
-CONFIG_MISDN_HFCPCI=m
-CONFIG_MISDN_HFCMULTI=m
-CONFIG_MISDN_HFCUSB=m
-CONFIG_MISDN_AVMFRITZ=m
-CONFIG_MISDN_SPEEDFAX=m
-CONFIG_MISDN_INFINEON=m
-CONFIG_MISDN_W6692=m
-CONFIG_MISDN_NETJET=m
-CONFIG_MISDN_IPAC=m
-CONFIG_MISDN_ISAR=m
-CONFIG_ISDN_HDLC=m
-CONFIG_NVM=y
-# CONFIG_NVM_DEBUG is not set
-CONFIG_NVM_RRPC=m
-CONFIG_NVM_PBLK=m
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-CONFIG_INPUT_LEDS=y
-CONFIG_INPUT_FF_MEMLESS=m
-CONFIG_INPUT_POLLDEV=m
-CONFIG_INPUT_SPARSEKMAP=m
-CONFIG_INPUT_MATRIXKMAP=m
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-CONFIG_INPUT_JOYDEV=m
-CONFIG_INPUT_EVDEV=m
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ADC=m
-CONFIG_KEYBOARD_ADP5588=m
-CONFIG_KEYBOARD_ADP5589=m
-CONFIG_KEYBOARD_ATKBD=y
-CONFIG_KEYBOARD_QT1070=m
-CONFIG_KEYBOARD_QT2160=m
-CONFIG_KEYBOARD_DLINK_DIR685=m
-CONFIG_KEYBOARD_LKKBD=m
-CONFIG_KEYBOARD_GPIO=m
-CONFIG_KEYBOARD_GPIO_POLLED=m
-CONFIG_KEYBOARD_TCA6416=m
-CONFIG_KEYBOARD_TCA8418=m
-CONFIG_KEYBOARD_MATRIX=m
-CONFIG_KEYBOARD_LM8323=m
-CONFIG_KEYBOARD_LM8333=m
-CONFIG_KEYBOARD_MAX7359=m
-CONFIG_KEYBOARD_MCS=m
-CONFIG_KEYBOARD_MPR121=m
-CONFIG_KEYBOARD_NEWTON=m
-CONFIG_KEYBOARD_OPENCORES=m
-CONFIG_KEYBOARD_SAMSUNG=m
-CONFIG_KEYBOARD_STOWAWAY=m
-CONFIG_KEYBOARD_SUNKBD=m
-CONFIG_KEYBOARD_TM2_TOUCHKEY=m
-CONFIG_KEYBOARD_XTKBD=m
-CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=m
-CONFIG_MOUSE_PS2_ALPS=y
-CONFIG_MOUSE_PS2_BYD=y
-CONFIG_MOUSE_PS2_LOGIPS2PP=y
-CONFIG_MOUSE_PS2_SYNAPTICS=y
-CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
-CONFIG_MOUSE_PS2_CYPRESS=y
-CONFIG_MOUSE_PS2_LIFEBOOK=y
-CONFIG_MOUSE_PS2_TRACKPOINT=y
-CONFIG_MOUSE_PS2_ELANTECH=y
-CONFIG_MOUSE_PS2_SENTELIC=y
-CONFIG_MOUSE_PS2_TOUCHKIT=y
-CONFIG_MOUSE_PS2_FOCALTECH=y
-# CONFIG_MOUSE_PS2_VMMOUSE is not set
-CONFIG_MOUSE_PS2_SMBUS=y
-CONFIG_MOUSE_SERIAL=m
-CONFIG_MOUSE_APPLETOUCH=m
-CONFIG_MOUSE_BCM5974=m
-CONFIG_MOUSE_CYAPA=m
-CONFIG_MOUSE_ELAN_I2C=m
-CONFIG_MOUSE_ELAN_I2C_I2C=y
-CONFIG_MOUSE_ELAN_I2C_SMBUS=y
-CONFIG_MOUSE_VSXXXAA=m
-CONFIG_MOUSE_GPIO=m
-CONFIG_MOUSE_SYNAPTICS_I2C=m
-CONFIG_MOUSE_SYNAPTICS_USB=m
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_JOYSTICK_ANALOG=m
-CONFIG_JOYSTICK_A3D=m
-CONFIG_JOYSTICK_ADI=m
-CONFIG_JOYSTICK_COBRA=m
-CONFIG_JOYSTICK_GF2K=m
-CONFIG_JOYSTICK_GRIP=m
-CONFIG_JOYSTICK_GRIP_MP=m
-CONFIG_JOYSTICK_GUILLEMOT=m
-CONFIG_JOYSTICK_INTERACT=m
-CONFIG_JOYSTICK_SIDEWINDER=m
-CONFIG_JOYSTICK_TMDC=m
-CONFIG_JOYSTICK_IFORCE=m
-CONFIG_JOYSTICK_IFORCE_USB=y
-CONFIG_JOYSTICK_IFORCE_232=y
-CONFIG_JOYSTICK_WARRIOR=m
-CONFIG_JOYSTICK_MAGELLAN=m
-CONFIG_JOYSTICK_SPACEORB=m
-CONFIG_JOYSTICK_SPACEBALL=m
-CONFIG_JOYSTICK_STINGER=m
-CONFIG_JOYSTICK_TWIDJOY=m
-CONFIG_JOYSTICK_ZHENHUA=m
-CONFIG_JOYSTICK_DB9=m
-CONFIG_JOYSTICK_GAMECON=m
-CONFIG_JOYSTICK_TURBOGRAFX=m
-CONFIG_JOYSTICK_AS5011=m
-# CONFIG_JOYSTICK_JOYDUMP is not set
-CONFIG_JOYSTICK_XPAD=m
-CONFIG_JOYSTICK_XPAD_FF=y
-CONFIG_JOYSTICK_XPAD_LEDS=y
-CONFIG_JOYSTICK_WALKERA0701=m
-CONFIG_JOYSTICK_PSXPAD_SPI=m
-CONFIG_JOYSTICK_PSXPAD_SPI_FF=y
-CONFIG_INPUT_TABLET=y
-CONFIG_TABLET_USB_ACECAD=m
-CONFIG_TABLET_USB_AIPTEK=m
-CONFIG_TABLET_USB_GTCO=m
-CONFIG_TABLET_USB_HANWANG=m
-CONFIG_TABLET_USB_KBTAB=m
-CONFIG_TABLET_USB_PEGASUS=m
-CONFIG_TABLET_SERIAL_WACOM4=m
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_PROPERTIES=y
-CONFIG_TOUCHSCREEN_ADS7846=m
-CONFIG_TOUCHSCREEN_AD7877=m
-CONFIG_TOUCHSCREEN_AD7879=m
-CONFIG_TOUCHSCREEN_AD7879_I2C=m
-CONFIG_TOUCHSCREEN_AD7879_SPI=m
-CONFIG_TOUCHSCREEN_ATMEL_MXT=m
-# CONFIG_TOUCHSCREEN_ATMEL_MXT_T37 is not set
-CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
-CONFIG_TOUCHSCREEN_BU21013=m
-CONFIG_TOUCHSCREEN_CY8CTMG110=m
-CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
-CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
-CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
-CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
-CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
-CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
-CONFIG_TOUCHSCREEN_DA9052=m
-CONFIG_TOUCHSCREEN_DYNAPRO=m
-CONFIG_TOUCHSCREEN_HAMPSHIRE=m
-CONFIG_TOUCHSCREEN_EETI=m
-CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
-CONFIG_TOUCHSCREEN_FUJITSU=m
-CONFIG_TOUCHSCREEN_GOODIX=m
-CONFIG_TOUCHSCREEN_ILI210X=m
-CONFIG_TOUCHSCREEN_GUNZE=m
-CONFIG_TOUCHSCREEN_EKTF2127=m
-CONFIG_TOUCHSCREEN_ELAN=m
-CONFIG_TOUCHSCREEN_ELO=m
-CONFIG_TOUCHSCREEN_WACOM_W8001=m
-CONFIG_TOUCHSCREEN_WACOM_I2C=m
-CONFIG_TOUCHSCREEN_MAX11801=m
-CONFIG_TOUCHSCREEN_MCS5000=m
-CONFIG_TOUCHSCREEN_MMS114=m
-CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
-CONFIG_TOUCHSCREEN_MTOUCH=m
-CONFIG_TOUCHSCREEN_INEXIO=m
-CONFIG_TOUCHSCREEN_MK712=m
-CONFIG_TOUCHSCREEN_PENMOUNT=m
-CONFIG_TOUCHSCREEN_EDT_FT5X06=m
-CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
-CONFIG_TOUCHSCREEN_TOUCHWIN=m
-CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
-CONFIG_TOUCHSCREEN_UCB1400=m
-CONFIG_TOUCHSCREEN_PIXCIR=m
-CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
-CONFIG_TOUCHSCREEN_WM831X=m
-CONFIG_TOUCHSCREEN_WM97XX=m
-CONFIG_TOUCHSCREEN_WM9705=y
-CONFIG_TOUCHSCREEN_WM9712=y
-CONFIG_TOUCHSCREEN_WM9713=y
-CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
-CONFIG_TOUCHSCREEN_MC13783=m
-CONFIG_TOUCHSCREEN_USB_EGALAX=y
-CONFIG_TOUCHSCREEN_USB_PANJIT=y
-CONFIG_TOUCHSCREEN_USB_3M=y
-CONFIG_TOUCHSCREEN_USB_ITM=y
-CONFIG_TOUCHSCREEN_USB_ETURBO=y
-CONFIG_TOUCHSCREEN_USB_GUNZE=y
-CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
-CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
-CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
-CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
-CONFIG_TOUCHSCREEN_USB_GOTOP=y
-CONFIG_TOUCHSCREEN_USB_JASTEC=y
-CONFIG_TOUCHSCREEN_USB_ELO=y
-CONFIG_TOUCHSCREEN_USB_E2I=y
-CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
-CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
-CONFIG_TOUCHSCREEN_USB_NEXIO=y
-CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
-CONFIG_TOUCHSCREEN_TOUCHIT213=m
-CONFIG_TOUCHSCREEN_TSC_SERIO=m
-CONFIG_TOUCHSCREEN_TSC200X_CORE=m
-CONFIG_TOUCHSCREEN_TSC2004=m
-CONFIG_TOUCHSCREEN_TSC2005=m
-CONFIG_TOUCHSCREEN_TSC2007=m
-CONFIG_TOUCHSCREEN_TSC2007_IIO=y
-CONFIG_TOUCHSCREEN_PCAP=m
-CONFIG_TOUCHSCREEN_RM_TS=m
-CONFIG_TOUCHSCREEN_SILEAD=m
-CONFIG_TOUCHSCREEN_SIS_I2C=m
-CONFIG_TOUCHSCREEN_ST1232=m
-CONFIG_TOUCHSCREEN_STMFTS=m
-CONFIG_TOUCHSCREEN_SUR40=m
-CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
-CONFIG_TOUCHSCREEN_SX8654=m
-CONFIG_TOUCHSCREEN_TPS6507X=m
-CONFIG_TOUCHSCREEN_ZET6223=m
-CONFIG_TOUCHSCREEN_ZFORCE=m
-CONFIG_TOUCHSCREEN_ROHM_BU21023=m
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_88PM80X_ONKEY=m
-CONFIG_INPUT_AD714X=m
-CONFIG_INPUT_AD714X_I2C=m
-CONFIG_INPUT_AD714X_SPI=m
-CONFIG_INPUT_ARIZONA_HAPTICS=m
-CONFIG_INPUT_BMA150=m
-CONFIG_INPUT_E3X0_BUTTON=m
-CONFIG_INPUT_PCSPKR=m
-CONFIG_INPUT_MAX77693_HAPTIC=m
-CONFIG_INPUT_MC13783_PWRBUTTON=m
-CONFIG_INPUT_MMA8450=m
-CONFIG_INPUT_APANEL=m
-CONFIG_INPUT_GP2A=m
-CONFIG_INPUT_GPIO_BEEPER=m
-CONFIG_INPUT_GPIO_TILT_POLLED=m
-CONFIG_INPUT_GPIO_DECODER=m
-CONFIG_INPUT_ATLAS_BTNS=m
-CONFIG_INPUT_ATI_REMOTE2=m
-CONFIG_INPUT_KEYSPAN_REMOTE=m
-CONFIG_INPUT_KXTJ9=m
-CONFIG_INPUT_KXTJ9_POLLED_MODE=y
-CONFIG_INPUT_POWERMATE=m
-CONFIG_INPUT_YEALINK=m
-CONFIG_INPUT_CM109=m
-CONFIG_INPUT_REGULATOR_HAPTIC=m
-CONFIG_INPUT_RETU_PWRBUTTON=m
-CONFIG_INPUT_TPS65218_PWRBUTTON=m
-CONFIG_INPUT_AXP20X_PEK=m
-CONFIG_INPUT_UINPUT=m
-CONFIG_INPUT_PCF50633_PMU=m
-CONFIG_INPUT_PCF8574=m
-CONFIG_INPUT_PWM_BEEPER=m
-CONFIG_INPUT_PWM_VIBRA=m
-CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
-CONFIG_INPUT_DA9052_ONKEY=m
-CONFIG_INPUT_DA9063_ONKEY=m
-CONFIG_INPUT_WM831X_ON=m
-CONFIG_INPUT_PCAP=m
-CONFIG_INPUT_ADXL34X=m
-CONFIG_INPUT_ADXL34X_I2C=m
-CONFIG_INPUT_ADXL34X_SPI=m
-CONFIG_INPUT_IMS_PCU=m
-CONFIG_INPUT_CMA3000=m
-CONFIG_INPUT_CMA3000_I2C=m
-CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
-CONFIG_INPUT_SOC_BUTTON_ARRAY=m
-CONFIG_INPUT_DRV260X_HAPTICS=m
-CONFIG_INPUT_DRV2665_HAPTICS=m
-CONFIG_INPUT_DRV2667_HAPTICS=m
-CONFIG_RMI4_CORE=m
-CONFIG_RMI4_I2C=m
-CONFIG_RMI4_SPI=m
-CONFIG_RMI4_SMB=m
-CONFIG_RMI4_F03=y
-CONFIG_RMI4_F03_SERIO=m
-CONFIG_RMI4_2D_SENSOR=y
-CONFIG_RMI4_F11=y
-CONFIG_RMI4_F12=y
-CONFIG_RMI4_F30=y
-CONFIG_RMI4_F34=y
-CONFIG_RMI4_F54=y
-CONFIG_RMI4_F55=y
-
-#
-# Hardware I/O ports
-#
-CONFIG_SERIO=y
-CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=m
-CONFIG_SERIO_CT82C710=m
-CONFIG_SERIO_PARKBD=m
-CONFIG_SERIO_PCIPS2=m
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIO_RAW=m
-CONFIG_SERIO_ALTERA_PS2=m
-CONFIG_SERIO_PS2MULT=m
-CONFIG_SERIO_ARC_PS2=m
-CONFIG_HYPERV_KEYBOARD=m
-CONFIG_SERIO_GPIO_PS2=m
-CONFIG_USERIO=m
-CONFIG_GAMEPORT=m
-CONFIG_GAMEPORT_NS558=m
-CONFIG_GAMEPORT_L4=m
-CONFIG_GAMEPORT_EMU10K1=m
-CONFIG_GAMEPORT_FM801=m
-
-#
-# Character devices
-#
-CONFIG_TTY=y
-CONFIG_VT=y
-CONFIG_CONSOLE_TRANSLATIONS=y
-CONFIG_VT_CONSOLE=y
-CONFIG_VT_CONSOLE_SLEEP=y
-CONFIG_HW_CONSOLE=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
-CONFIG_CYZ_INTR=y
-CONFIG_MOXA_INTELLIO=m
-CONFIG_MOXA_SMARTIO=m
-CONFIG_SYNCLINK=m
-CONFIG_SYNCLINKMP=m
-CONFIG_SYNCLINK_GT=m
-CONFIG_NOZOMI=m
-CONFIG_ISI=m
-CONFIG_N_HDLC=m
-CONFIG_N_GSM=m
-CONFIG_TRACE_ROUTER=m
-CONFIG_TRACE_SINK=m
-CONFIG_DEVMEM=y
-# CONFIG_DEVKMEM is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_EARLYCON=y
-CONFIG_SERIAL_8250=y
-# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
-CONFIG_SERIAL_8250_PNP=y
-CONFIG_SERIAL_8250_FINTEK=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_DMA=y
-CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_EXAR=y
-CONFIG_SERIAL_8250_CS=m
-CONFIG_SERIAL_8250_MEN_MCB=m
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-# CONFIG_SERIAL_8250_FSL is not set
-CONFIG_SERIAL_8250_DW=m
-CONFIG_SERIAL_8250_RT288X=y
-CONFIG_SERIAL_8250_LPSS=y
-CONFIG_SERIAL_8250_MID=y
-CONFIG_SERIAL_8250_MOXA=m
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_MAX3100=m
-CONFIG_SERIAL_MAX310X=y
-CONFIG_SERIAL_UARTLITE=m
-CONFIG_SERIAL_UARTLITE_NR_UARTS=1
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-CONFIG_SERIAL_JSM=m
-CONFIG_SERIAL_SCCNXP=m
-CONFIG_SERIAL_SC16IS7XX_CORE=m
-CONFIG_SERIAL_SC16IS7XX=m
-CONFIG_SERIAL_SC16IS7XX_I2C=y
-CONFIG_SERIAL_SC16IS7XX_SPI=y
-CONFIG_SERIAL_ALTERA_JTAGUART=m
-CONFIG_SERIAL_ALTERA_UART=m
-CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
-CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
-CONFIG_SERIAL_IFX6X60=m
-CONFIG_SERIAL_ARC=m
-CONFIG_SERIAL_ARC_NR_PORTS=1
-CONFIG_SERIAL_RP2=m
-CONFIG_SERIAL_RP2_NR_UARTS=32
-CONFIG_SERIAL_FSL_LPUART=m
-CONFIG_SERIAL_MEN_Z135=m
-CONFIG_SERIAL_DEV_BUS=m
-CONFIG_PRINTER=m
-CONFIG_LP_CONSOLE=y
-CONFIG_PPDEV=m
-CONFIG_HVC_DRIVER=y
-CONFIG_VIRTIO_CONSOLE=m
-CONFIG_IPMI_HANDLER=m
-CONFIG_IPMI_DMI_DECODE=y
-CONFIG_IPMI_PANIC_EVENT=y
-CONFIG_IPMI_PANIC_STRING=y
-CONFIG_IPMI_DEVICE_INTERFACE=m
-CONFIG_IPMI_SI=m
-CONFIG_IPMI_SSIF=m
-CONFIG_IPMI_WATCHDOG=m
-CONFIG_IPMI_POWEROFF=m
-CONFIG_HW_RANDOM=m
-CONFIG_HW_RANDOM_TIMERIOMEM=m
-CONFIG_HW_RANDOM_INTEL=m
-CONFIG_HW_RANDOM_AMD=m
-CONFIG_HW_RANDOM_VIA=m
-CONFIG_HW_RANDOM_VIRTIO=m
-CONFIG_HW_RANDOM_TPM=m
-CONFIG_NVRAM=m
-CONFIG_R3964=m
-CONFIG_APPLICOM=m
-
-#
-# PCMCIA character devices
-#
-CONFIG_SYNCLINK_CS=m
-CONFIG_CARDMAN_4000=m
-CONFIG_CARDMAN_4040=m
-CONFIG_SCR24X=m
-CONFIG_IPWIRELESS=m
-CONFIG_MWAVE=m
-CONFIG_RAW_DRIVER=m
-CONFIG_MAX_RAW_DEVS=256
-CONFIG_HPET=y
-CONFIG_HPET_MMAP=y
-CONFIG_HPET_MMAP_DEFAULT=y
-CONFIG_HANGCHECK_TIMER=m
-CONFIG_TCG_TPM=m
-CONFIG_TCG_TIS_CORE=m
-CONFIG_TCG_TIS=m
-CONFIG_TCG_TIS_SPI=m
-CONFIG_TCG_TIS_I2C_ATMEL=m
-CONFIG_TCG_TIS_I2C_INFINEON=m
-CONFIG_TCG_TIS_I2C_NUVOTON=m
-CONFIG_TCG_NSC=m
-CONFIG_TCG_ATMEL=m
-CONFIG_TCG_INFINEON=m
-CONFIG_TCG_CRB=m
-CONFIG_TCG_VTPM_PROXY=m
-CONFIG_TCG_TIS_ST33ZP24=m
-CONFIG_TCG_TIS_ST33ZP24_I2C=m
-CONFIG_TCG_TIS_ST33ZP24_SPI=m
-CONFIG_TELCLOCK=m
-CONFIG_DEVPORT=y
-CONFIG_XILLYBUS=m
-CONFIG_XILLYBUS_PCIE=m
-
-#
-# I2C support
-#
-CONFIG_I2C=m
-CONFIG_I2C_BOARDINFO=y
-CONFIG_I2C_COMPAT=y
-CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_MUX=m
-
-#
-# Multiplexer I2C Chip support
-#
-CONFIG_I2C_MUX_GPIO=m
-CONFIG_I2C_MUX_LTC4306=m
-CONFIG_I2C_MUX_PCA9541=m
-CONFIG_I2C_MUX_PCA954x=m
-CONFIG_I2C_MUX_REG=m
-CONFIG_I2C_MUX_MLXCPLD=m
-CONFIG_I2C_HELPER_AUTO=y
-CONFIG_I2C_SMBUS=m
-CONFIG_I2C_ALGOBIT=m
-CONFIG_I2C_ALGOPCA=m
-
-#
-# I2C Hardware Bus support
-#
-
-#
-# PC SMBus host controller drivers
-#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-CONFIG_I2C_AMD756_S4882=m
-CONFIG_I2C_AMD8111=m
-CONFIG_I2C_I801=m
-CONFIG_I2C_ISCH=m
-CONFIG_I2C_ISMT=m
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_NFORCE2=m
-CONFIG_I2C_NFORCE2_S4985=m
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-
-#
-# ACPI drivers
-#
-CONFIG_I2C_SCMI=m
-
-#
-# I2C system bus drivers (mostly embedded / system-on-chip)
-#
-CONFIG_I2C_CBUS_GPIO=m
-CONFIG_I2C_DESIGNWARE_CORE=m
-CONFIG_I2C_DESIGNWARE_PLATFORM=m
-# CONFIG_I2C_DESIGNWARE_SLAVE is not set
-CONFIG_I2C_DESIGNWARE_PCI=m
-# CONFIG_I2C_DESIGNWARE_BAYTRAIL is not set
-CONFIG_I2C_EMEV2=m
-CONFIG_I2C_GPIO=m
-CONFIG_I2C_KEMPLD=m
-CONFIG_I2C_OCORES=m
-CONFIG_I2C_PCA_PLATFORM=m
-# CONFIG_I2C_PXA_PCI is not set
-CONFIG_I2C_SIMTEC=m
-CONFIG_I2C_XILINX=m
-
-#
-# External I2C/SMBus adapter drivers
-#
-CONFIG_I2C_DIOLAN_U2C=m
-CONFIG_I2C_DLN2=m
-CONFIG_I2C_PARPORT=m
-CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_ROBOTFUZZ_OSIF=m
-CONFIG_I2C_TAOS_EVM=m
-CONFIG_I2C_TINY_USB=m
-CONFIG_I2C_VIPERBOARD=m
-
-#
-# Other I2C/SMBus bus drivers
-#
-CONFIG_I2C_MLXCPLD=m
-CONFIG_I2C_CROS_EC_TUNNEL=m
-# CONFIG_I2C_STUB is not set
-CONFIG_I2C_SLAVE=y
-CONFIG_I2C_SLAVE_EEPROM=m
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-CONFIG_SPI=y
-# CONFIG_SPI_DEBUG is not set
-CONFIG_SPI_MASTER=y
-
-#
-# SPI Master Controller Drivers
-#
-CONFIG_SPI_ALTERA=m
-CONFIG_SPI_AXI_SPI_ENGINE=m
-CONFIG_SPI_BITBANG=m
-CONFIG_SPI_BUTTERFLY=m
-CONFIG_SPI_CADENCE=m
-CONFIG_SPI_DESIGNWARE=m
-CONFIG_SPI_DW_PCI=m
-CONFIG_SPI_DW_MID_DMA=y
-CONFIG_SPI_DW_MMIO=m
-CONFIG_SPI_DLN2=m
-CONFIG_SPI_GPIO=m
-CONFIG_SPI_LM70_LLP=m
-CONFIG_SPI_OC_TINY=m
-CONFIG_SPI_PXA2XX=m
-CONFIG_SPI_PXA2XX_PCI=m
-CONFIG_SPI_ROCKCHIP=m
-CONFIG_SPI_SC18IS602=m
-CONFIG_SPI_XCOMM=m
-CONFIG_SPI_XILINX=m
-CONFIG_SPI_ZYNQMP_GQSPI=m
-
-#
-# SPI Protocol Masters
-#
-CONFIG_SPI_SPIDEV=m
-CONFIG_SPI_LOOPBACK_TEST=m
-CONFIG_SPI_TLE62X0=m
-CONFIG_SPI_SLAVE=y
-CONFIG_SPI_SLAVE_TIME=m
-CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
-CONFIG_SPMI=m
-CONFIG_HSI=m
-CONFIG_HSI_BOARDINFO=y
-
-#
-# HSI controllers
-#
-
-#
-# HSI clients
-#
-CONFIG_HSI_CHAR=m
-CONFIG_PPS=m
-# CONFIG_PPS_DEBUG is not set
-# CONFIG_NTP_PPS is not set
-
-#
-# PPS clients support
-#
-# CONFIG_PPS_CLIENT_KTIMER is not set
-CONFIG_PPS_CLIENT_LDISC=m
-CONFIG_PPS_CLIENT_PARPORT=m
-CONFIG_PPS_CLIENT_GPIO=m
-
-#
-# PPS generators support
-#
-
-#
-# PTP clock support
-#
-CONFIG_PTP_1588_CLOCK=m
-CONFIG_DP83640_PHY=m
-CONFIG_PTP_1588_CLOCK_KVM=m
-CONFIG_PINCTRL=y
-
-#
-# Pin controllers
-#
-CONFIG_PINMUX=y
-CONFIG_PINCONF=y
-CONFIG_GENERIC_PINCONF=y
-# CONFIG_DEBUG_PINCTRL is not set
-CONFIG_PINCTRL_AMD=m
-CONFIG_PINCTRL_MCP23S08=m
-CONFIG_PINCTRL_BAYTRAIL=y
-CONFIG_PINCTRL_CHERRYVIEW=m
-CONFIG_PINCTRL_INTEL=m
-CONFIG_PINCTRL_BROXTON=m
-CONFIG_PINCTRL_CANNONLAKE=m
-CONFIG_PINCTRL_DENVERTON=m
-CONFIG_PINCTRL_GEMINILAKE=m
-CONFIG_PINCTRL_LEWISBURG=m
-CONFIG_PINCTRL_SUNRISEPOINT=m
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_ACPI=y
-CONFIG_GPIOLIB_IRQCHIP=y
-# CONFIG_DEBUG_GPIO is not set
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_GENERIC=m
-CONFIG_GPIO_MAX730X=m
-
-#
-# Memory mapped GPIO drivers
-#
-CONFIG_GPIO_AMDPT=m
-CONFIG_GPIO_AXP209=m
-CONFIG_GPIO_DWAPB=m
-CONFIG_GPIO_EXAR=m
-CONFIG_GPIO_GENERIC_PLATFORM=m
-CONFIG_GPIO_ICH=m
-CONFIG_GPIO_LYNXPOINT=y
-CONFIG_GPIO_MENZ127=m
-CONFIG_GPIO_MOCKUP=m
-CONFIG_GPIO_VX855=m
-
-#
-# Port-mapped I/O GPIO drivers
-#
-CONFIG_GPIO_F7188X=m
-CONFIG_GPIO_IT87=m
-CONFIG_GPIO_SCH=m
-CONFIG_GPIO_SCH311X=m
-
-#
-# I2C GPIO expanders
-#
-CONFIG_GPIO_ADP5588=m
-CONFIG_GPIO_MAX7300=m
-CONFIG_GPIO_MAX732X=m
-CONFIG_GPIO_PCA953X=m
-CONFIG_GPIO_PCF857X=m
-CONFIG_GPIO_TPIC2810=m
-
-#
-# MFD GPIO expanders
-#
-CONFIG_GPIO_ARIZONA=m
-CONFIG_GPIO_BD9571MWV=m
-CONFIG_GPIO_DA9052=m
-CONFIG_GPIO_DLN2=m
-CONFIG_GPIO_JANZ_TTL=m
-CONFIG_GPIO_KEMPLD=m
-CONFIG_GPIO_LP3943=m
-CONFIG_GPIO_LP873X=m
-CONFIG_GPIO_TPS65086=m
-CONFIG_GPIO_TPS65218=m
-CONFIG_GPIO_TPS65912=m
-CONFIG_GPIO_UCB1400=m
-CONFIG_GPIO_WHISKEY_COVE=m
-CONFIG_GPIO_WM831X=m
-CONFIG_GPIO_WM8994=m
-
-#
-# PCI GPIO expanders
-#
-CONFIG_GPIO_AMD8111=m
-CONFIG_GPIO_ML_IOH=m
-CONFIG_GPIO_PCI_IDIO_16=m
-CONFIG_GPIO_RDC321X=m
-
-#
-# SPI GPIO expanders
-#
-CONFIG_GPIO_MAX7301=m
-CONFIG_GPIO_MC33880=m
-CONFIG_GPIO_PISOSR=m
-CONFIG_GPIO_XRA1403=m
-
-#
-# USB GPIO expanders
-#
-CONFIG_GPIO_VIPERBOARD=m
-CONFIG_W1=m
-CONFIG_W1_CON=y
-
-#
-# 1-wire Bus Masters
-#
-CONFIG_W1_MASTER_MATROX=m
-CONFIG_W1_MASTER_DS2490=m
-CONFIG_W1_MASTER_DS2482=m
-CONFIG_W1_MASTER_DS1WM=m
-CONFIG_W1_MASTER_GPIO=m
-
-#
-# 1-wire Slaves
-#
-CONFIG_W1_SLAVE_THERM=m
-CONFIG_W1_SLAVE_SMEM=m
-# CONFIG_W1_SLAVE_DS2405 is not set
-CONFIG_W1_SLAVE_DS2408=m
-# CONFIG_W1_SLAVE_DS2408_READBACK is not set
-CONFIG_W1_SLAVE_DS2413=m
-CONFIG_W1_SLAVE_DS2406=m
-CONFIG_W1_SLAVE_DS2423=m
-CONFIG_W1_SLAVE_DS2805=m
-CONFIG_W1_SLAVE_DS2431=m
-CONFIG_W1_SLAVE_DS2433=m
-CONFIG_W1_SLAVE_DS2433_CRC=y
-CONFIG_W1_SLAVE_DS2438=m
-CONFIG_W1_SLAVE_DS2760=m
-CONFIG_W1_SLAVE_DS2780=m
-CONFIG_W1_SLAVE_DS2781=m
-CONFIG_W1_SLAVE_DS28E04=m
-CONFIG_POWER_AVS=y
-CONFIG_POWER_RESET=y
-# CONFIG_POWER_RESET_RESTART is not set
-CONFIG_POWER_SUPPLY=y
-# CONFIG_POWER_SUPPLY_DEBUG is not set
-CONFIG_PDA_POWER=m
-CONFIG_GENERIC_ADC_BATTERY=m
-CONFIG_WM831X_BACKUP=m
-CONFIG_WM831X_POWER=m
-# CONFIG_TEST_POWER is not set
-CONFIG_BATTERY_DS2760=m
-CONFIG_BATTERY_DS2780=m
-CONFIG_BATTERY_DS2781=m
-CONFIG_BATTERY_DS2782=m
-CONFIG_BATTERY_SBS=m
-CONFIG_CHARGER_SBS=m
-CONFIG_BATTERY_BQ27XXX=m
-CONFIG_BATTERY_BQ27XXX_I2C=m
-CONFIG_BATTERY_BQ27XXX_HDQ=m
-# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
-CONFIG_BATTERY_DA9052=m
-CONFIG_CHARGER_DA9150=m
-CONFIG_BATTERY_DA9150=m
-CONFIG_CHARGER_AXP20X=m
-CONFIG_BATTERY_AXP20X=m
-CONFIG_AXP20X_POWER=m
-CONFIG_AXP288_CHARGER=m
-CONFIG_AXP288_FUEL_GAUGE=m
-CONFIG_BATTERY_MAX17040=m
-CONFIG_BATTERY_MAX17042=m
-CONFIG_BATTERY_MAX1721X=m
-CONFIG_CHARGER_PCF50633=m
-CONFIG_CHARGER_ISP1704=m
-CONFIG_CHARGER_MAX8903=m
-CONFIG_CHARGER_LP8727=m
-CONFIG_CHARGER_GPIO=m
-CONFIG_CHARGER_MANAGER=y
-CONFIG_CHARGER_LTC3651=m
-CONFIG_CHARGER_MAX14577=m
-CONFIG_CHARGER_MAX77693=m
-CONFIG_CHARGER_BQ2415X=m
-CONFIG_CHARGER_BQ24190=m
-CONFIG_CHARGER_BQ24257=m
-CONFIG_CHARGER_BQ24735=m
-CONFIG_CHARGER_BQ25890=m
-CONFIG_CHARGER_SMB347=m
-CONFIG_CHARGER_TPS65217=m
-CONFIG_BATTERY_GAUGE_LTC2941=m
-CONFIG_BATTERY_RT5033=m
-CONFIG_CHARGER_RT9455=m
-CONFIG_HWMON=m
-CONFIG_HWMON_VID=m
-# CONFIG_HWMON_DEBUG_CHIP is not set
-
-#
-# Native drivers
-#
-CONFIG_SENSORS_ABITUGURU=m
-CONFIG_SENSORS_ABITUGURU3=m
-CONFIG_SENSORS_AD7314=m
-CONFIG_SENSORS_AD7414=m
-CONFIG_SENSORS_AD7418=m
-CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1026=m
-CONFIG_SENSORS_ADM1029=m
-CONFIG_SENSORS_ADM1031=m
-CONFIG_SENSORS_ADM9240=m
-CONFIG_SENSORS_ADT7X10=m
-CONFIG_SENSORS_ADT7310=m
-CONFIG_SENSORS_ADT7410=m
-CONFIG_SENSORS_ADT7411=m
-CONFIG_SENSORS_ADT7462=m
-CONFIG_SENSORS_ADT7470=m
-CONFIG_SENSORS_ADT7475=m
-CONFIG_SENSORS_ASC7621=m
-CONFIG_SENSORS_K8TEMP=m
-CONFIG_SENSORS_K10TEMP=m
-CONFIG_SENSORS_FAM15H_POWER=m
-CONFIG_SENSORS_APPLESMC=m
-CONFIG_SENSORS_ASB100=m
-CONFIG_SENSORS_ASPEED=m
-CONFIG_SENSORS_ATXP1=m
-CONFIG_SENSORS_DS620=m
-CONFIG_SENSORS_DS1621=m
-CONFIG_SENSORS_DELL_SMM=m
-CONFIG_SENSORS_DA9052_ADC=m
-CONFIG_SENSORS_I5K_AMB=m
-CONFIG_SENSORS_F71805F=m
-CONFIG_SENSORS_F71882FG=m
-CONFIG_SENSORS_F75375S=m
-CONFIG_SENSORS_MC13783_ADC=m
-CONFIG_SENSORS_FSCHMD=m
-CONFIG_SENSORS_FTSTEUTATES=m
-CONFIG_SENSORS_GL518SM=m
-CONFIG_SENSORS_GL520SM=m
-CONFIG_SENSORS_G760A=m
-CONFIG_SENSORS_G762=m
-CONFIG_SENSORS_GPIO_FAN=m
-CONFIG_SENSORS_HIH6130=m
-CONFIG_SENSORS_IBMAEM=m
-CONFIG_SENSORS_IBMPEX=m
-CONFIG_SENSORS_IIO_HWMON=m
-CONFIG_SENSORS_I5500=m
-CONFIG_SENSORS_CORETEMP=m
-CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_JC42=m
-CONFIG_SENSORS_POWR1220=m
-CONFIG_SENSORS_LINEAGE=m
-CONFIG_SENSORS_LTC2945=m
-CONFIG_SENSORS_LTC2990=m
-CONFIG_SENSORS_LTC4151=m
-CONFIG_SENSORS_LTC4215=m
-CONFIG_SENSORS_LTC4222=m
-CONFIG_SENSORS_LTC4245=m
-CONFIG_SENSORS_LTC4260=m
-CONFIG_SENSORS_LTC4261=m
-CONFIG_SENSORS_MAX1111=m
-CONFIG_SENSORS_MAX16065=m
-CONFIG_SENSORS_MAX1619=m
-CONFIG_SENSORS_MAX1668=m
-CONFIG_SENSORS_MAX197=m
-CONFIG_SENSORS_MAX31722=m
-CONFIG_SENSORS_MAX6639=m
-CONFIG_SENSORS_MAX6642=m
-CONFIG_SENSORS_MAX6650=m
-CONFIG_SENSORS_MAX6697=m
-CONFIG_SENSORS_MAX31790=m
-CONFIG_SENSORS_MCP3021=m
-CONFIG_SENSORS_TC654=m
-CONFIG_SENSORS_MENF21BMC_HWMON=m
-CONFIG_SENSORS_ADCXX=m
-CONFIG_SENSORS_LM63=m
-CONFIG_SENSORS_LM70=m
-CONFIG_SENSORS_LM73=m
-CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
-CONFIG_SENSORS_LM78=m
-CONFIG_SENSORS_LM80=m
-CONFIG_SENSORS_LM83=m
-CONFIG_SENSORS_LM85=m
-CONFIG_SENSORS_LM87=m
-CONFIG_SENSORS_LM90=m
-CONFIG_SENSORS_LM92=m
-CONFIG_SENSORS_LM93=m
-CONFIG_SENSORS_LM95234=m
-CONFIG_SENSORS_LM95241=m
-CONFIG_SENSORS_LM95245=m
-CONFIG_SENSORS_PC87360=m
-CONFIG_SENSORS_PC87427=m
-CONFIG_SENSORS_NTC_THERMISTOR=m
-CONFIG_SENSORS_NCT6683=m
-CONFIG_SENSORS_NCT6775=m
-CONFIG_SENSORS_NCT7802=m
-CONFIG_SENSORS_NCT7904=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_PMBUS=m
-CONFIG_SENSORS_PMBUS=m
-CONFIG_SENSORS_ADM1275=m
-CONFIG_SENSORS_IBM_CFFPS=m
-CONFIG_SENSORS_IR35221=m
-CONFIG_SENSORS_LM25066=m
-CONFIG_SENSORS_LTC2978=m
-CONFIG_SENSORS_LTC2978_REGULATOR=y
-CONFIG_SENSORS_LTC3815=m
-CONFIG_SENSORS_MAX16064=m
-CONFIG_SENSORS_MAX20751=m
-CONFIG_SENSORS_MAX34440=m
-CONFIG_SENSORS_MAX8688=m
-CONFIG_SENSORS_TPS40422=m
-CONFIG_SENSORS_TPS53679=m
-CONFIG_SENSORS_UCD9000=m
-CONFIG_SENSORS_UCD9200=m
-CONFIG_SENSORS_ZL6100=m
-CONFIG_SENSORS_SHT15=m
-CONFIG_SENSORS_SHT21=m
-CONFIG_SENSORS_SHT3x=m
-CONFIG_SENSORS_SHTC1=m
-CONFIG_SENSORS_SIS5595=m
-CONFIG_SENSORS_DME1737=m
-CONFIG_SENSORS_EMC1403=m
-CONFIG_SENSORS_EMC2103=m
-CONFIG_SENSORS_EMC6W201=m
-CONFIG_SENSORS_SMSC47M1=m
-CONFIG_SENSORS_SMSC47M192=m
-CONFIG_SENSORS_SMSC47B397=m
-CONFIG_SENSORS_SCH56XX_COMMON=m
-CONFIG_SENSORS_SCH5627=m
-CONFIG_SENSORS_SCH5636=m
-CONFIG_SENSORS_STTS751=m
-CONFIG_SENSORS_SMM665=m
-CONFIG_SENSORS_ADC128D818=m
-CONFIG_SENSORS_ADS1015=m
-CONFIG_SENSORS_ADS7828=m
-CONFIG_SENSORS_ADS7871=m
-CONFIG_SENSORS_AMC6821=m
-CONFIG_SENSORS_INA209=m
-CONFIG_SENSORS_INA2XX=m
-CONFIG_SENSORS_INA3221=m
-CONFIG_SENSORS_TC74=m
-CONFIG_SENSORS_THMC50=m
-CONFIG_SENSORS_TMP102=m
-CONFIG_SENSORS_TMP103=m
-CONFIG_SENSORS_TMP108=m
-CONFIG_SENSORS_TMP401=m
-CONFIG_SENSORS_TMP421=m
-CONFIG_SENSORS_VIA_CPUTEMP=m
-CONFIG_SENSORS_VIA686A=m
-CONFIG_SENSORS_VT1211=m
-CONFIG_SENSORS_VT8231=m
-CONFIG_SENSORS_W83781D=m
-CONFIG_SENSORS_W83791D=m
-CONFIG_SENSORS_W83792D=m
-CONFIG_SENSORS_W83793=m
-CONFIG_SENSORS_W83795=m
-# CONFIG_SENSORS_W83795_FANCTRL is not set
-CONFIG_SENSORS_W83L785TS=m
-CONFIG_SENSORS_W83L786NG=m
-CONFIG_SENSORS_W83627HF=m
-CONFIG_SENSORS_W83627EHF=m
-CONFIG_SENSORS_WM831X=m
-CONFIG_SENSORS_XGENE=m
-
-#
-# ACPI drivers
-#
-CONFIG_SENSORS_ACPI_POWER=m
-CONFIG_SENSORS_ATK0110=m
-CONFIG_THERMAL=y
-CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
-CONFIG_THERMAL_WRITABLE_TRIPS=y
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
-# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
-# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
-# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
-CONFIG_THERMAL_GOV_FAIR_SHARE=y
-CONFIG_THERMAL_GOV_STEP_WISE=y
-CONFIG_THERMAL_GOV_BANG_BANG=y
-CONFIG_THERMAL_GOV_USER_SPACE=y
-# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
-CONFIG_CLOCK_THERMAL=y
-CONFIG_DEVFREQ_THERMAL=y
-# CONFIG_THERMAL_EMULATION is not set
-CONFIG_INTEL_POWERCLAMP=m
-CONFIG_X86_PKG_TEMP_THERMAL=m
-CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
-CONFIG_INTEL_SOC_DTS_THERMAL=m
-
-#
-# ACPI INT340X thermal drivers
-#
-CONFIG_INT340X_THERMAL=m
-CONFIG_ACPI_THERMAL_REL=m
-CONFIG_INT3406_THERMAL=m
-CONFIG_INTEL_BXT_PMIC_THERMAL=m
-CONFIG_INTEL_PCH_THERMAL=m
-CONFIG_GENERIC_ADC_THERMAL=m
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_CORE=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
-# CONFIG_WATCHDOG_SYSFS is not set
-
-#
-# Watchdog Device Drivers
-#
-CONFIG_SOFT_WATCHDOG=m
-# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set
-CONFIG_DA9052_WATCHDOG=m
-CONFIG_DA9063_WATCHDOG=m
-CONFIG_DA9062_WATCHDOG=m
-CONFIG_MENF21BMC_WATCHDOG=m
-CONFIG_WDAT_WDT=m
-CONFIG_WM831X_WATCHDOG=m
-CONFIG_XILINX_WATCHDOG=m
-CONFIG_ZIIRAVE_WATCHDOG=m
-CONFIG_CADENCE_WATCHDOG=m
-CONFIG_DW_WATCHDOG=m
-CONFIG_MAX63XX_WATCHDOG=m
-CONFIG_RETU_WATCHDOG=m
-CONFIG_ACQUIRE_WDT=m
-CONFIG_ADVANTECH_WDT=m
-CONFIG_ALIM1535_WDT=m
-CONFIG_ALIM7101_WDT=m
-CONFIG_F71808E_WDT=m
-# CONFIG_SP5100_TCO is not set
-CONFIG_SBC_FITPC2_WATCHDOG=m
-CONFIG_EUROTECH_WDT=m
-CONFIG_IB700_WDT=m
-CONFIG_IBMASR=m
-CONFIG_WAFER_WDT=m
-CONFIG_I6300ESB_WDT=m
-CONFIG_IE6XX_WDT=m
-CONFIG_ITCO_WDT=m
-CONFIG_ITCO_VENDOR_SUPPORT=y
-CONFIG_IT8712F_WDT=m
-CONFIG_IT87_WDT=m
-CONFIG_HP_WATCHDOG=m
-CONFIG_KEMPLD_WDT=m
-CONFIG_HPWDT_NMI_DECODING=y
-CONFIG_SC1200_WDT=m
-CONFIG_PC87413_WDT=m
-CONFIG_NV_TCO=m
-CONFIG_60XX_WDT=m
-CONFIG_CPU5_WDT=m
-CONFIG_SMSC_SCH311X_WDT=m
-CONFIG_SMSC37B787_WDT=m
-CONFIG_VIA_WDT=m
-CONFIG_W83627HF_WDT=m
-CONFIG_W83877F_WDT=m
-CONFIG_W83977F_WDT=m
-CONFIG_MACHZ_WDT=m
-CONFIG_SBC_EPX_C3_WATCHDOG=m
-CONFIG_INTEL_MEI_WDT=m
-CONFIG_NI903X_WDT=m
-CONFIG_NIC7018_WDT=m
-CONFIG_MEN_A21_WDT=m
-
-#
-# PCI-based Watchdog Cards
-#
-CONFIG_PCIPCWATCHDOG=m
-CONFIG_WDTPCI=m
-
-#
-# USB-based Watchdog Cards
-#
-CONFIG_USBPCWATCHDOG=m
-
-#
-# Watchdog Pretimeout Governors
-#
-CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
-# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP is not set
-CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y
-CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=m
-CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y
-CONFIG_SSB_POSSIBLE=y
-
-#
-# Sonics Silicon Backplane
-#
-CONFIG_SSB=m
-CONFIG_SSB_SPROM=y
-CONFIG_SSB_BLOCKIO=y
-CONFIG_SSB_PCIHOST_POSSIBLE=y
-CONFIG_SSB_PCIHOST=y
-CONFIG_SSB_B43_PCI_BRIDGE=y
-CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
-CONFIG_SSB_PCMCIAHOST=y
-CONFIG_SSB_SDIOHOST_POSSIBLE=y
-CONFIG_SSB_SDIOHOST=y
-# CONFIG_SSB_DEBUG is not set
-CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
-CONFIG_SSB_DRIVER_PCICORE=y
-CONFIG_SSB_DRIVER_GPIO=y
-CONFIG_BCMA_POSSIBLE=y
-CONFIG_BCMA=m
-CONFIG_BCMA_BLOCKIO=y
-CONFIG_BCMA_HOST_PCI_POSSIBLE=y
-CONFIG_BCMA_HOST_PCI=y
-CONFIG_BCMA_HOST_SOC=y
-CONFIG_BCMA_DRIVER_PCI=y
-CONFIG_BCMA_SFLASH=y
-CONFIG_BCMA_DRIVER_GMAC_CMN=y
-CONFIG_BCMA_DRIVER_GPIO=y
-# CONFIG_BCMA_DEBUG is not set
-
-#
-# Multifunction device drivers
-#
-CONFIG_MFD_CORE=y
-CONFIG_MFD_BCM590XX=m
-CONFIG_MFD_BD9571MWV=m
-CONFIG_MFD_AXP20X=m
-CONFIG_MFD_AXP20X_I2C=m
-CONFIG_MFD_CROS_EC=m
-CONFIG_MFD_CROS_EC_I2C=m
-CONFIG_MFD_CROS_EC_SPI=m
-CONFIG_PMIC_DA9052=y
-CONFIG_MFD_DA9052_SPI=y
-CONFIG_MFD_DA9062=m
-CONFIG_MFD_DA9063=m
-CONFIG_MFD_DA9150=m
-CONFIG_MFD_DLN2=m
-CONFIG_MFD_MC13XXX=m
-CONFIG_MFD_MC13XXX_SPI=m
-CONFIG_MFD_MC13XXX_I2C=m
-CONFIG_HTC_PASIC3=m
-CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
-CONFIG_LPC_ICH=m
-CONFIG_LPC_SCH=m
-CONFIG_INTEL_SOC_PMIC_BXTWC=m
-CONFIG_MFD_INTEL_LPSS=m
-CONFIG_MFD_INTEL_LPSS_ACPI=m
-CONFIG_MFD_INTEL_LPSS_PCI=m
-CONFIG_MFD_JANZ_CMODIO=m
-CONFIG_MFD_KEMPLD=m
-CONFIG_MFD_88PM800=m
-CONFIG_MFD_88PM805=m
-CONFIG_MFD_MAX14577=m
-CONFIG_MFD_MAX77693=m
-CONFIG_MFD_MAX8907=m
-CONFIG_MFD_MT6397=m
-CONFIG_MFD_MENF21BMC=m
-CONFIG_EZX_PCAP=y
-CONFIG_MFD_VIPERBOARD=m
-CONFIG_MFD_RETU=m
-CONFIG_MFD_PCF50633=m
-CONFIG_PCF50633_ADC=m
-CONFIG_PCF50633_GPIO=m
-CONFIG_UCB1400_CORE=m
-CONFIG_MFD_RDC321X=m
-CONFIG_MFD_RTSX_PCI=m
-CONFIG_MFD_RT5033=m
-CONFIG_MFD_RTSX_USB=m
-CONFIG_MFD_SI476X_CORE=m
-CONFIG_MFD_SM501=m
-CONFIG_MFD_SM501_GPIO=y
-CONFIG_MFD_SKY81452=m
-CONFIG_ABX500_CORE=y
-CONFIG_MFD_SYSCON=y
-CONFIG_MFD_TI_AM335X_TSCADC=m
-CONFIG_MFD_LP3943=m
-CONFIG_MFD_TI_LMU=m
-CONFIG_TPS6105X=m
-CONFIG_TPS65010=m
-CONFIG_TPS6507X=m
-CONFIG_MFD_TPS65086=m
-CONFIG_MFD_TPS65217=m
-CONFIG_MFD_TI_LP873X=m
-CONFIG_MFD_TPS65218=m
-CONFIG_MFD_TPS65912=y
-CONFIG_MFD_TPS65912_I2C=m
-CONFIG_MFD_TPS65912_SPI=y
-CONFIG_MFD_WL1273_CORE=m
-CONFIG_MFD_LM3533=m
-# CONFIG_MFD_TMIO is not set
-CONFIG_MFD_VX855=m
-CONFIG_MFD_ARIZONA=y
-CONFIG_MFD_ARIZONA_I2C=m
-CONFIG_MFD_ARIZONA_SPI=m
-CONFIG_MFD_CS47L24=y
-CONFIG_MFD_WM5102=y
-CONFIG_MFD_WM5110=y
-CONFIG_MFD_WM8997=y
-CONFIG_MFD_WM8998=y
-CONFIG_MFD_WM831X=y
-CONFIG_MFD_WM831X_SPI=y
-CONFIG_MFD_WM8994=m
-CONFIG_REGULATOR=y
-# CONFIG_REGULATOR_DEBUG is not set
-CONFIG_REGULATOR_FIXED_VOLTAGE=m
-CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
-CONFIG_REGULATOR_USERSPACE_CONSUMER=m
-CONFIG_REGULATOR_88PM800=m
-CONFIG_REGULATOR_ACT8865=m
-CONFIG_REGULATOR_AD5398=m
-CONFIG_REGULATOR_ANATOP=m
-CONFIG_REGULATOR_ARIZONA_LDO1=m
-CONFIG_REGULATOR_ARIZONA_MICSUPP=m
-CONFIG_REGULATOR_AXP20X=m
-CONFIG_REGULATOR_BCM590XX=m
-CONFIG_REGULATOR_BD9571MWV=m
-CONFIG_REGULATOR_DA9052=m
-CONFIG_REGULATOR_DA9062=m
-CONFIG_REGULATOR_DA9063=m
-CONFIG_REGULATOR_DA9210=m
-CONFIG_REGULATOR_DA9211=m
-CONFIG_REGULATOR_FAN53555=m
-CONFIG_REGULATOR_GPIO=m
-CONFIG_REGULATOR_ISL9305=m
-CONFIG_REGULATOR_ISL6271A=m
-CONFIG_REGULATOR_LM363X=m
-CONFIG_REGULATOR_LP3971=m
-CONFIG_REGULATOR_LP3972=m
-CONFIG_REGULATOR_LP872X=m
-CONFIG_REGULATOR_LP8755=m
-CONFIG_REGULATOR_LTC3589=m
-CONFIG_REGULATOR_LTC3676=m
-CONFIG_REGULATOR_MAX14577=m
-CONFIG_REGULATOR_MAX1586=m
-CONFIG_REGULATOR_MAX8649=m
-CONFIG_REGULATOR_MAX8660=m
-CONFIG_REGULATOR_MAX8907=m
-CONFIG_REGULATOR_MAX8952=m
-CONFIG_REGULATOR_MAX77693=m
-CONFIG_REGULATOR_MC13XXX_CORE=m
-CONFIG_REGULATOR_MC13783=m
-CONFIG_REGULATOR_MC13892=m
-CONFIG_REGULATOR_MT6311=m
-CONFIG_REGULATOR_MT6323=m
-CONFIG_REGULATOR_MT6397=m
-CONFIG_REGULATOR_PCAP=m
-CONFIG_REGULATOR_PCF50633=m
-CONFIG_REGULATOR_PFUZE100=m
-CONFIG_REGULATOR_PV88060=m
-CONFIG_REGULATOR_PV88080=m
-CONFIG_REGULATOR_PV88090=m
-CONFIG_REGULATOR_PWM=m
-CONFIG_REGULATOR_QCOM_SPMI=m
-CONFIG_REGULATOR_RT5033=m
-CONFIG_REGULATOR_SKY81452=m
-CONFIG_REGULATOR_TPS51632=m
-CONFIG_REGULATOR_TPS6105X=m
-CONFIG_REGULATOR_TPS62360=m
-CONFIG_REGULATOR_TPS65023=m
-CONFIG_REGULATOR_TPS6507X=m
-CONFIG_REGULATOR_TPS65086=m
-CONFIG_REGULATOR_TPS65132=m
-CONFIG_REGULATOR_TPS65217=m
-CONFIG_REGULATOR_TPS6524X=m
-CONFIG_REGULATOR_TPS65912=m
-CONFIG_REGULATOR_WM831X=m
-CONFIG_REGULATOR_WM8994=m
-CONFIG_CEC_CORE=m
-CONFIG_RC_CORE=m
-CONFIG_RC_MAP=m
-CONFIG_RC_DECODERS=y
-CONFIG_LIRC=m
-CONFIG_IR_LIRC_CODEC=m
-CONFIG_IR_NEC_DECODER=m
-CONFIG_IR_RC5_DECODER=m
-CONFIG_IR_RC6_DECODER=m
-CONFIG_IR_JVC_DECODER=m
-CONFIG_IR_SONY_DECODER=m
-CONFIG_IR_SANYO_DECODER=m
-CONFIG_IR_SHARP_DECODER=m
-CONFIG_IR_MCE_KBD_DECODER=m
-CONFIG_IR_XMP_DECODER=m
-CONFIG_RC_DEVICES=y
-CONFIG_RC_ATI_REMOTE=m
-CONFIG_IR_ENE=m
-CONFIG_IR_HIX5HD2=m
-CONFIG_IR_IMON=m
-CONFIG_IR_MCEUSB=m
-CONFIG_IR_ITE_CIR=m
-CONFIG_IR_FINTEK=m
-CONFIG_IR_NUVOTON=m
-CONFIG_IR_REDRAT3=m
-CONFIG_IR_SPI=m
-CONFIG_IR_STREAMZAP=m
-CONFIG_IR_WINBOND_CIR=m
-CONFIG_IR_IGORPLUGUSB=m
-CONFIG_IR_IGUANA=m
-CONFIG_IR_TTUSBIR=m
-CONFIG_RC_LOOPBACK=m
-CONFIG_IR_GPIO_CIR=m
-CONFIG_IR_GPIO_TX=m
-CONFIG_IR_PWM_TX=m
-CONFIG_IR_SERIAL=m
-CONFIG_IR_SERIAL_TRANSMITTER=y
-CONFIG_IR_SIR=m
-CONFIG_MEDIA_SUPPORT=m
-
-#
-# Multimedia core support
-#
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
-CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
-CONFIG_MEDIA_RADIO_SUPPORT=y
-CONFIG_MEDIA_SDR_SUPPORT=y
-CONFIG_MEDIA_CEC_SUPPORT=y
-CONFIG_MEDIA_CEC_RC=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_MEDIA_CONTROLLER_DVB=y
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_VIDEO_V4L2=m
-# CONFIG_VIDEO_ADV_DEBUG is not set
-# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
-CONFIG_VIDEO_PCI_SKELETON=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_V4L2_MEM2MEM_DEV=m
-CONFIG_V4L2_FLASH_LED_CLASS=m
-CONFIG_V4L2_FWNODE=m
-CONFIG_VIDEOBUF_GEN=m
-CONFIG_VIDEOBUF_DMA_SG=m
-CONFIG_VIDEOBUF_VMALLOC=m
-CONFIG_VIDEOBUF_DVB=m
-CONFIG_VIDEOBUF2_CORE=m
-CONFIG_VIDEOBUF2_MEMOPS=m
-CONFIG_VIDEOBUF2_DMA_CONTIG=m
-CONFIG_VIDEOBUF2_VMALLOC=m
-CONFIG_VIDEOBUF2_DMA_SG=m
-CONFIG_VIDEOBUF2_DVB=m
-CONFIG_DVB_CORE=m
-CONFIG_DVB_NET=y
-CONFIG_TTPCI_EEPROM=m
-CONFIG_DVB_MAX_ADAPTERS=8
-CONFIG_DVB_DYNAMIC_MINORS=y
-# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
-
-#
-# Media drivers
-#
-CONFIG_MEDIA_USB_SUPPORT=y
-
-#
-# Webcam devices
-#
-CONFIG_USB_VIDEO_CLASS=m
-CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
-CONFIG_USB_GSPCA=m
-CONFIG_USB_M5602=m
-CONFIG_USB_STV06XX=m
-CONFIG_USB_GL860=m
-CONFIG_USB_GSPCA_BENQ=m
-CONFIG_USB_GSPCA_CONEX=m
-CONFIG_USB_GSPCA_CPIA1=m
-CONFIG_USB_GSPCA_DTCS033=m
-CONFIG_USB_GSPCA_ETOMS=m
-CONFIG_USB_GSPCA_FINEPIX=m
-CONFIG_USB_GSPCA_JEILINJ=m
-CONFIG_USB_GSPCA_JL2005BCD=m
-CONFIG_USB_GSPCA_KINECT=m
-CONFIG_USB_GSPCA_KONICA=m
-CONFIG_USB_GSPCA_MARS=m
-CONFIG_USB_GSPCA_MR97310A=m
-CONFIG_USB_GSPCA_NW80X=m
-CONFIG_USB_GSPCA_OV519=m
-CONFIG_USB_GSPCA_OV534=m
-CONFIG_USB_GSPCA_OV534_9=m
-CONFIG_USB_GSPCA_PAC207=m
-CONFIG_USB_GSPCA_PAC7302=m
-CONFIG_USB_GSPCA_PAC7311=m
-CONFIG_USB_GSPCA_SE401=m
-CONFIG_USB_GSPCA_SN9C2028=m
-CONFIG_USB_GSPCA_SN9C20X=m
-CONFIG_USB_GSPCA_SONIXB=m
-CONFIG_USB_GSPCA_SONIXJ=m
-CONFIG_USB_GSPCA_SPCA500=m
-CONFIG_USB_GSPCA_SPCA501=m
-CONFIG_USB_GSPCA_SPCA505=m
-CONFIG_USB_GSPCA_SPCA506=m
-CONFIG_USB_GSPCA_SPCA508=m
-CONFIG_USB_GSPCA_SPCA561=m
-CONFIG_USB_GSPCA_SPCA1528=m
-CONFIG_USB_GSPCA_SQ905=m
-CONFIG_USB_GSPCA_SQ905C=m
-CONFIG_USB_GSPCA_SQ930X=m
-CONFIG_USB_GSPCA_STK014=m
-CONFIG_USB_GSPCA_STK1135=m
-CONFIG_USB_GSPCA_STV0680=m
-CONFIG_USB_GSPCA_SUNPLUS=m
-CONFIG_USB_GSPCA_T613=m
-CONFIG_USB_GSPCA_TOPRO=m
-CONFIG_USB_GSPCA_TOUPTEK=m
-CONFIG_USB_GSPCA_TV8532=m
-CONFIG_USB_GSPCA_VC032X=m
-CONFIG_USB_GSPCA_VICAM=m
-CONFIG_USB_GSPCA_XIRLINK_CIT=m
-CONFIG_USB_GSPCA_ZC3XX=m
-CONFIG_USB_PWC=m
-# CONFIG_USB_PWC_DEBUG is not set
-CONFIG_USB_PWC_INPUT_EVDEV=y
-CONFIG_VIDEO_CPIA2=m
-CONFIG_USB_ZR364XX=m
-CONFIG_USB_STKWEBCAM=m
-CONFIG_USB_S2255=m
-CONFIG_VIDEO_USBTV=m
-
-#
-# Analog TV USB devices
-#
-CONFIG_VIDEO_PVRUSB2=m
-CONFIG_VIDEO_PVRUSB2_SYSFS=y
-CONFIG_VIDEO_PVRUSB2_DVB=y
-# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
-CONFIG_VIDEO_HDPVR=m
-CONFIG_VIDEO_USBVISION=m
-CONFIG_VIDEO_STK1160_COMMON=m
-CONFIG_VIDEO_STK1160=m
-CONFIG_VIDEO_GO7007=m
-CONFIG_VIDEO_GO7007_USB=m
-CONFIG_VIDEO_GO7007_LOADER=m
-CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
-
-#
-# Analog/digital TV USB devices
-#
-CONFIG_VIDEO_AU0828=m
-CONFIG_VIDEO_AU0828_V4L2=y
-CONFIG_VIDEO_AU0828_RC=y
-CONFIG_VIDEO_CX231XX=m
-CONFIG_VIDEO_CX231XX_RC=y
-CONFIG_VIDEO_CX231XX_ALSA=m
-CONFIG_VIDEO_CX231XX_DVB=m
-CONFIG_VIDEO_TM6000=m
-CONFIG_VIDEO_TM6000_ALSA=m
-CONFIG_VIDEO_TM6000_DVB=m
-
-#
-# Digital TV USB devices
-#
-CONFIG_DVB_USB=m
-# CONFIG_DVB_USB_DEBUG is not set
-CONFIG_DVB_USB_DIB3000MC=m
-CONFIG_DVB_USB_A800=m
-CONFIG_DVB_USB_DIBUSB_MB=m
-CONFIG_DVB_USB_DIBUSB_MB_FAULTY=y
-CONFIG_DVB_USB_DIBUSB_MC=m
-CONFIG_DVB_USB_DIB0700=m
-CONFIG_DVB_USB_UMT_010=m
-CONFIG_DVB_USB_CXUSB=m
-CONFIG_DVB_USB_M920X=m
-CONFIG_DVB_USB_DIGITV=m
-CONFIG_DVB_USB_VP7045=m
-CONFIG_DVB_USB_VP702X=m
-CONFIG_DVB_USB_GP8PSK=m
-CONFIG_DVB_USB_NOVA_T_USB2=m
-CONFIG_DVB_USB_TTUSB2=m
-CONFIG_DVB_USB_DTT200U=m
-CONFIG_DVB_USB_OPERA1=m
-CONFIG_DVB_USB_AF9005=m
-CONFIG_DVB_USB_AF9005_REMOTE=m
-CONFIG_DVB_USB_PCTV452E=m
-CONFIG_DVB_USB_DW2102=m
-CONFIG_DVB_USB_CINERGY_T2=m
-CONFIG_DVB_USB_DTV5100=m
-CONFIG_DVB_USB_FRIIO=m
-CONFIG_DVB_USB_AZ6027=m
-CONFIG_DVB_USB_TECHNISAT_USB2=m
-CONFIG_DVB_USB_V2=m
-CONFIG_DVB_USB_AF9015=m
-CONFIG_DVB_USB_AF9035=m
-CONFIG_DVB_USB_ANYSEE=m
-CONFIG_DVB_USB_AU6610=m
-CONFIG_DVB_USB_AZ6007=m
-CONFIG_DVB_USB_CE6230=m
-CONFIG_DVB_USB_EC168=m
-CONFIG_DVB_USB_GL861=m
-CONFIG_DVB_USB_LME2510=m
-CONFIG_DVB_USB_MXL111SF=m
-CONFIG_DVB_USB_RTL28XXU=m
-CONFIG_DVB_USB_DVBSKY=m
-CONFIG_DVB_USB_ZD1301=m
-CONFIG_DVB_TTUSB_BUDGET=m
-CONFIG_DVB_TTUSB_DEC=m
-CONFIG_SMS_USB_DRV=m
-CONFIG_DVB_B2C2_FLEXCOP_USB=m
-# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
-CONFIG_DVB_AS102=m
-
-#
-# Webcam, TV (analog/digital) USB devices
-#
-CONFIG_VIDEO_EM28XX=m
-CONFIG_VIDEO_EM28XX_V4L2=m
-CONFIG_VIDEO_EM28XX_ALSA=m
-CONFIG_VIDEO_EM28XX_DVB=m
-CONFIG_VIDEO_EM28XX_RC=m
-
-#
-# Software defined radio USB devices
-#
-CONFIG_USB_AIRSPY=m
-CONFIG_USB_HACKRF=m
-CONFIG_USB_MSI2500=m
-
-#
-# USB HDMI CEC adapters
-#
-CONFIG_USB_PULSE8_CEC=m
-CONFIG_USB_RAINSHADOW_CEC=m
-CONFIG_MEDIA_PCI_SUPPORT=y
-
-#
-# Media capture support
-#
-CONFIG_VIDEO_MEYE=m
-CONFIG_VIDEO_SOLO6X10=m
-CONFIG_VIDEO_TW5864=m
-CONFIG_VIDEO_TW68=m
-CONFIG_VIDEO_TW686X=m
-CONFIG_VIDEO_ZORAN=m
-CONFIG_VIDEO_ZORAN_DC30=m
-CONFIG_VIDEO_ZORAN_ZR36060=m
-CONFIG_VIDEO_ZORAN_BUZ=m
-CONFIG_VIDEO_ZORAN_DC10=m
-CONFIG_VIDEO_ZORAN_LML33=m
-CONFIG_VIDEO_ZORAN_LML33R10=m
-CONFIG_VIDEO_ZORAN_AVS6EYES=m
-
-#
-# Media capture/analog TV support
-#
-CONFIG_VIDEO_IVTV=m
-# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
-CONFIG_VIDEO_IVTV_ALSA=m
-CONFIG_VIDEO_FB_IVTV=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DT3155=m
-
-#
-# Media capture/analog/hybrid TV support
-#
-CONFIG_VIDEO_CX18=m
-CONFIG_VIDEO_CX18_ALSA=m
-CONFIG_VIDEO_CX23885=m
-CONFIG_MEDIA_ALTERA_CI=m
-CONFIG_VIDEO_CX25821=m
-CONFIG_VIDEO_CX25821_ALSA=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_CX88_ALSA=m
-CONFIG_VIDEO_CX88_BLACKBIRD=m
-CONFIG_VIDEO_CX88_DVB=m
-CONFIG_VIDEO_CX88_ENABLE_VP3054=y
-CONFIG_VIDEO_CX88_VP3054=m
-CONFIG_VIDEO_CX88_MPEG=m
-CONFIG_VIDEO_BT848=m
-CONFIG_DVB_BT8XX=m
-CONFIG_VIDEO_SAA7134=m
-CONFIG_VIDEO_SAA7134_ALSA=m
-CONFIG_VIDEO_SAA7134_RC=y
-CONFIG_VIDEO_SAA7134_DVB=m
-CONFIG_VIDEO_SAA7134_GO7007=m
-CONFIG_VIDEO_SAA7164=m
-# CONFIG_VIDEO_COBALT is not set
-
-#
-# Media digital TV PCI Adapters
-#
-CONFIG_DVB_AV7110_IR=y
-CONFIG_DVB_AV7110=m
-CONFIG_DVB_AV7110_OSD=y
-CONFIG_DVB_BUDGET_CORE=m
-CONFIG_DVB_BUDGET=m
-CONFIG_DVB_BUDGET_CI=m
-CONFIG_DVB_BUDGET_AV=m
-CONFIG_DVB_BUDGET_PATCH=m
-CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
-CONFIG_DVB_PLUTO2=m
-CONFIG_DVB_DM1105=m
-CONFIG_DVB_PT1=m
-CONFIG_DVB_PT3=m
-CONFIG_MANTIS_CORE=m
-CONFIG_DVB_MANTIS=m
-CONFIG_DVB_HOPPER=m
-CONFIG_DVB_NGENE=m
-CONFIG_DVB_DDBRIDGE=m
-# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
-CONFIG_DVB_SMIPCIE=m
-CONFIG_DVB_NETUP_UNIDVB=m
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_VIDEO_CAFE_CCIC=m
-CONFIG_SOC_CAMERA=m
-CONFIG_SOC_CAMERA_PLATFORM=m
-CONFIG_V4L_MEM2MEM_DRIVERS=y
-CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
-CONFIG_VIDEO_SH_VEU=m
-# CONFIG_V4L_TEST_DRIVERS is not set
-CONFIG_DVB_PLATFORM_DRIVERS=y
-CONFIG_CEC_PLATFORM_DRIVERS=y
-CONFIG_SDR_PLATFORM_DRIVERS=y
-
-#
-# Supported MMC/SDIO adapters
-#
-CONFIG_SMS_SDIO_DRV=m
-CONFIG_RADIO_ADAPTERS=y
-CONFIG_RADIO_TEA575X=m
-CONFIG_RADIO_SI470X=y
-CONFIG_USB_SI470X=m
-CONFIG_I2C_SI470X=m
-CONFIG_RADIO_SI4713=m
-CONFIG_USB_SI4713=m
-CONFIG_PLATFORM_SI4713=m
-CONFIG_I2C_SI4713=m
-CONFIG_RADIO_SI476X=m
-CONFIG_USB_MR800=m
-CONFIG_USB_DSBR=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_SHARK=m
-CONFIG_RADIO_SHARK2=m
-CONFIG_USB_KEENE=m
-CONFIG_USB_RAREMONO=m
-CONFIG_USB_MA901=m
-CONFIG_RADIO_TEA5764=m
-CONFIG_RADIO_SAA7706H=m
-CONFIG_RADIO_TEF6862=m
-CONFIG_RADIO_WL1273=m
-
-#
-# Texas Instruments WL128x FM driver (ST based)
-#
-CONFIG_RADIO_WL128X=m
-
-#
-# Supported FireWire (IEEE 1394) Adapters
-#
-CONFIG_DVB_FIREDTV=m
-CONFIG_DVB_FIREDTV_INPUT=y
-CONFIG_MEDIA_COMMON_OPTIONS=y
-
-#
-# common driver options
-#
-CONFIG_VIDEO_CX2341X=m
-CONFIG_VIDEO_TVEEPROM=m
-CONFIG_CYPRESS_FIRMWARE=m
-CONFIG_DVB_B2C2_FLEXCOP=m
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_SMS_SIANO_MDTV=m
-CONFIG_SMS_SIANO_RC=y
-# CONFIG_SMS_SIANO_DEBUGFS is not set
-
-#
-# Media ancillary drivers (tuners, sensors, i2c, spi, frontends)
-#
-CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
-CONFIG_MEDIA_ATTACH=y
-CONFIG_VIDEO_IR_I2C=m
-
-#
-# Audio decoders, processors and mixers
-#
-CONFIG_VIDEO_TVAUDIO=m
-CONFIG_VIDEO_TDA7432=m
-CONFIG_VIDEO_TDA9840=m
-CONFIG_VIDEO_TEA6415C=m
-CONFIG_VIDEO_TEA6420=m
-CONFIG_VIDEO_MSP3400=m
-CONFIG_VIDEO_CS3308=m
-CONFIG_VIDEO_CS5345=m
-CONFIG_VIDEO_CS53L32A=m
-CONFIG_VIDEO_UDA1342=m
-CONFIG_VIDEO_WM8775=m
-CONFIG_VIDEO_WM8739=m
-CONFIG_VIDEO_VP27SMPX=m
-CONFIG_VIDEO_SONY_BTF_MPX=m
-
-#
-# RDS decoders
-#
-CONFIG_VIDEO_SAA6588=m
-
-#
-# Video decoders
-#
-CONFIG_VIDEO_BT819=m
-CONFIG_VIDEO_BT856=m
-CONFIG_VIDEO_BT866=m
-CONFIG_VIDEO_KS0127=m
-CONFIG_VIDEO_SAA7110=m
-CONFIG_VIDEO_SAA711X=m
-CONFIG_VIDEO_TVP5150=m
-CONFIG_VIDEO_TW2804=m
-CONFIG_VIDEO_TW9903=m
-CONFIG_VIDEO_TW9906=m
-CONFIG_VIDEO_VPX3220=m
-
-#
-# Video and audio decoders
-#
-CONFIG_VIDEO_SAA717X=m
-CONFIG_VIDEO_CX25840=m
-
-#
-# Video encoders
-#
-CONFIG_VIDEO_SAA7127=m
-CONFIG_VIDEO_SAA7185=m
-CONFIG_VIDEO_ADV7170=m
-CONFIG_VIDEO_ADV7175=m
-
-#
-# Camera sensor devices
-#
-CONFIG_VIDEO_OV2640=m
-CONFIG_VIDEO_OV7640=m
-CONFIG_VIDEO_OV7670=m
-CONFIG_VIDEO_MT9M111=m
-CONFIG_VIDEO_MT9V011=m
-
-#
-# Flash devices
-#
-
-#
-# Video improvement chips
-#
-CONFIG_VIDEO_UPD64031A=m
-CONFIG_VIDEO_UPD64083=m
-
-#
-# Audio/Video compression chips
-#
-CONFIG_VIDEO_SAA6752HS=m
-
-#
-# SDR tuner chips
-#
-
-#
-# Miscellaneous helper chips
-#
-CONFIG_VIDEO_M52790=m
-
-#
-# Sensors used on soc_camera driver
-#
-
-#
-# soc_camera sensor drivers
-#
-CONFIG_SOC_CAMERA_IMX074=m
-CONFIG_SOC_CAMERA_MT9M001=m
-CONFIG_SOC_CAMERA_MT9M111=m
-CONFIG_SOC_CAMERA_MT9T031=m
-CONFIG_SOC_CAMERA_MT9T112=m
-CONFIG_SOC_CAMERA_MT9V022=m
-CONFIG_SOC_CAMERA_OV5642=m
-CONFIG_SOC_CAMERA_OV772X=m
-CONFIG_SOC_CAMERA_OV9640=m
-CONFIG_SOC_CAMERA_OV9740=m
-CONFIG_SOC_CAMERA_RJ54N1=m
-CONFIG_SOC_CAMERA_TW9910=m
-CONFIG_MEDIA_TUNER=m
-CONFIG_MEDIA_TUNER_SIMPLE=m
-CONFIG_MEDIA_TUNER_TDA8290=m
-CONFIG_MEDIA_TUNER_TDA827X=m
-CONFIG_MEDIA_TUNER_TDA18271=m
-CONFIG_MEDIA_TUNER_TDA9887=m
-CONFIG_MEDIA_TUNER_TEA5761=m
-CONFIG_MEDIA_TUNER_TEA5767=m
-CONFIG_MEDIA_TUNER_MSI001=m
-CONFIG_MEDIA_TUNER_MT20XX=m
-CONFIG_MEDIA_TUNER_MT2060=m
-CONFIG_MEDIA_TUNER_MT2063=m
-CONFIG_MEDIA_TUNER_MT2266=m
-CONFIG_MEDIA_TUNER_MT2131=m
-CONFIG_MEDIA_TUNER_QT1010=m
-CONFIG_MEDIA_TUNER_XC2028=m
-CONFIG_MEDIA_TUNER_XC5000=m
-CONFIG_MEDIA_TUNER_XC4000=m
-CONFIG_MEDIA_TUNER_MXL5005S=m
-CONFIG_MEDIA_TUNER_MXL5007T=m
-CONFIG_MEDIA_TUNER_MC44S803=m
-CONFIG_MEDIA_TUNER_MAX2165=m
-CONFIG_MEDIA_TUNER_TDA18218=m
-CONFIG_MEDIA_TUNER_FC0011=m
-CONFIG_MEDIA_TUNER_FC0012=m
-CONFIG_MEDIA_TUNER_FC0013=m
-CONFIG_MEDIA_TUNER_TDA18212=m
-CONFIG_MEDIA_TUNER_E4000=m
-CONFIG_MEDIA_TUNER_FC2580=m
-CONFIG_MEDIA_TUNER_M88RS6000T=m
-CONFIG_MEDIA_TUNER_TUA9001=m
-CONFIG_MEDIA_TUNER_SI2157=m
-CONFIG_MEDIA_TUNER_IT913X=m
-CONFIG_MEDIA_TUNER_R820T=m
-CONFIG_MEDIA_TUNER_MXL301RF=m
-CONFIG_MEDIA_TUNER_QM1D1C0042=m
-
-#
-# Multistandard (satellite) frontends
-#
-CONFIG_DVB_STB0899=m
-CONFIG_DVB_STB6100=m
-CONFIG_DVB_STV090x=m
-CONFIG_DVB_STV0910=m
-CONFIG_DVB_STV6110x=m
-CONFIG_DVB_STV6111=m
-CONFIG_DVB_MXL5XX=m
-CONFIG_DVB_M88DS3103=m
-
-#
-# Multistandard (cable + terrestrial) frontends
-#
-CONFIG_DVB_DRXK=m
-CONFIG_DVB_TDA18271C2DD=m
-CONFIG_DVB_SI2165=m
-CONFIG_DVB_MN88472=m
-CONFIG_DVB_MN88473=m
-
-#
-# DVB-S (satellite) frontends
-#
-CONFIG_DVB_CX24110=m
-CONFIG_DVB_CX24123=m
-CONFIG_DVB_MT312=m
-CONFIG_DVB_ZL10036=m
-CONFIG_DVB_ZL10039=m
-CONFIG_DVB_S5H1420=m
-CONFIG_DVB_STV0288=m
-CONFIG_DVB_STB6000=m
-CONFIG_DVB_STV0299=m
-CONFIG_DVB_STV6110=m
-CONFIG_DVB_STV0900=m
-CONFIG_DVB_TDA8083=m
-CONFIG_DVB_TDA10086=m
-CONFIG_DVB_TDA8261=m
-CONFIG_DVB_VES1X93=m
-CONFIG_DVB_TUNER_ITD1000=m
-CONFIG_DVB_TUNER_CX24113=m
-CONFIG_DVB_TDA826X=m
-CONFIG_DVB_TUA6100=m
-CONFIG_DVB_CX24116=m
-CONFIG_DVB_CX24117=m
-CONFIG_DVB_CX24120=m
-CONFIG_DVB_SI21XX=m
-CONFIG_DVB_TS2020=m
-CONFIG_DVB_DS3000=m
-CONFIG_DVB_MB86A16=m
-CONFIG_DVB_TDA10071=m
-
-#
-# DVB-T (terrestrial) frontends
-#
-CONFIG_DVB_SP8870=m
-CONFIG_DVB_SP887X=m
-CONFIG_DVB_CX22700=m
-CONFIG_DVB_CX22702=m
-CONFIG_DVB_DRXD=m
-CONFIG_DVB_L64781=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_NXT6000=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_ZL10353=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
-CONFIG_DVB_DIB7000M=m
-CONFIG_DVB_DIB7000P=m
-CONFIG_DVB_TDA10048=m
-CONFIG_DVB_AF9013=m
-CONFIG_DVB_EC100=m
-CONFIG_DVB_STV0367=m
-CONFIG_DVB_CXD2820R=m
-CONFIG_DVB_CXD2841ER=m
-CONFIG_DVB_RTL2830=m
-CONFIG_DVB_RTL2832=m
-CONFIG_DVB_RTL2832_SDR=m
-CONFIG_DVB_SI2168=m
-CONFIG_DVB_AS102_FE=m
-CONFIG_DVB_ZD1301_DEMOD=m
-CONFIG_DVB_GP8PSK_FE=m
-
-#
-# DVB-C (cable) frontends
-#
-CONFIG_DVB_VES1820=m
-CONFIG_DVB_TDA10021=m
-CONFIG_DVB_TDA10023=m
-CONFIG_DVB_STV0297=m
-
-#
-# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
-#
-CONFIG_DVB_NXT200X=m
-CONFIG_DVB_OR51211=m
-CONFIG_DVB_OR51132=m
-CONFIG_DVB_BCM3510=m
-CONFIG_DVB_LGDT330X=m
-CONFIG_DVB_LGDT3305=m
-CONFIG_DVB_LGDT3306A=m
-CONFIG_DVB_LG2160=m
-CONFIG_DVB_S5H1409=m
-CONFIG_DVB_AU8522=m
-CONFIG_DVB_AU8522_DTV=m
-CONFIG_DVB_AU8522_V4L=m
-CONFIG_DVB_S5H1411=m
-
-#
-# ISDB-T (terrestrial) frontends
-#
-CONFIG_DVB_S921=m
-CONFIG_DVB_DIB8000=m
-CONFIG_DVB_MB86A20S=m
-
-#
-# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
-#
-CONFIG_DVB_TC90522=m
-
-#
-# Digital terrestrial only tuners/PLL
-#
-CONFIG_DVB_PLL=m
-CONFIG_DVB_TUNER_DIB0070=m
-CONFIG_DVB_TUNER_DIB0090=m
-
-#
-# SEC control devices for DVB-S
-#
-CONFIG_DVB_DRX39XYJ=m
-CONFIG_DVB_LNBH25=m
-CONFIG_DVB_LNBP21=m
-CONFIG_DVB_LNBP22=m
-CONFIG_DVB_ISL6405=m
-CONFIG_DVB_ISL6421=m
-CONFIG_DVB_ISL6423=m
-CONFIG_DVB_A8293=m
-CONFIG_DVB_SP2=m
-CONFIG_DVB_LGS8GXX=m
-CONFIG_DVB_ATBM8830=m
-CONFIG_DVB_TDA665x=m
-CONFIG_DVB_IX2505V=m
-CONFIG_DVB_M88RS2000=m
-CONFIG_DVB_AF9033=m
-CONFIG_DVB_HORUS3A=m
-CONFIG_DVB_ASCOT2E=m
-CONFIG_DVB_HELENE=m
-
-#
-# Tools to develop new frontends
-#
-# CONFIG_DVB_DUMMY_FE is not set
-
-#
-# Graphics support
-#
-CONFIG_AGP=m
-CONFIG_AGP_AMD64=m
-CONFIG_AGP_INTEL=m
-CONFIG_AGP_SIS=m
-CONFIG_AGP_VIA=m
-CONFIG_INTEL_GTT=m
-CONFIG_VGA_ARB=y
-CONFIG_VGA_ARB_MAX_GPUS=16
-CONFIG_VGA_SWITCHEROO=y
-CONFIG_DRM=m
-CONFIG_DRM_MIPI_DSI=y
-CONFIG_DRM_DP_AUX_CHARDEV=y
-# CONFIG_DRM_DEBUG_MM_SELFTEST is not set
-CONFIG_DRM_KMS_HELPER=m
-CONFIG_DRM_KMS_FB_HELPER=y
-CONFIG_DRM_FBDEV_EMULATION=y
-CONFIG_DRM_FBDEV_OVERALLOC=100
-# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
-CONFIG_DRM_TTM=m
-CONFIG_DRM_GEM_CMA_HELPER=y
-CONFIG_DRM_KMS_CMA_HELPER=y
-CONFIG_DRM_VM=y
-
-#
-# I2C encoder or helper chips
-#
-CONFIG_DRM_I2C_CH7006=m
-CONFIG_DRM_I2C_SIL164=m
-CONFIG_DRM_I2C_NXP_TDA998X=m
-CONFIG_DRM_RADEON=m
-# CONFIG_DRM_RADEON_USERPTR is not set
-CONFIG_DRM_AMDGPU=m
-CONFIG_DRM_AMDGPU_SI=y
-CONFIG_DRM_AMDGPU_CIK=y
-CONFIG_DRM_AMDGPU_USERPTR=y
-# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
-
-#
-# ACP (Audio CoProcessor) Configuration
-#
-CONFIG_DRM_AMD_ACP=y
-CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_DEBUG=5
-CONFIG_NOUVEAU_DEBUG_DEFAULT=3
-CONFIG_DRM_NOUVEAU_BACKLIGHT=y
-CONFIG_DRM_I915=m
-# CONFIG_DRM_I915_ALPHA_SUPPORT is not set
-CONFIG_DRM_I915_CAPTURE_ERROR=y
-CONFIG_DRM_I915_COMPRESS_ERROR=y
-CONFIG_DRM_I915_USERPTR=y
-CONFIG_DRM_I915_GVT=y
-CONFIG_DRM_I915_GVT_KVMGT=m
-CONFIG_DRM_VGEM=m
-CONFIG_DRM_VMWGFX=m
-CONFIG_DRM_VMWGFX_FBCON=y
-CONFIG_DRM_GMA500=m
-CONFIG_DRM_GMA600=y
-CONFIG_DRM_GMA3600=y
-CONFIG_DRM_UDL=m
-CONFIG_DRM_AST=m
-CONFIG_DRM_MGAG200=m
-CONFIG_DRM_CIRRUS_QEMU=m
-CONFIG_DRM_QXL=m
-CONFIG_DRM_BOCHS=m
-CONFIG_DRM_VIRTIO_GPU=m
-CONFIG_DRM_PANEL=y
-
-#
-# Display Panels
-#
-CONFIG_DRM_BRIDGE=y
-CONFIG_DRM_PANEL_BRIDGE=y
-
-#
-# Display Interface Bridges
-#
-CONFIG_DRM_ANALOGIX_ANX78XX=m
-CONFIG_HSA_AMD=m
-CONFIG_DRM_HISI_HIBMC=m
-CONFIG_DRM_TINYDRM=m
-CONFIG_TINYDRM_MIPI_DBI=m
-CONFIG_TINYDRM_MI0283QT=m
-CONFIG_TINYDRM_REPAPER=m
-CONFIG_TINYDRM_ST7586=m
-# CONFIG_DRM_LEGACY is not set
-# CONFIG_DRM_LIB_RANDOM is not set
-
-#
-# Frame buffer Devices
-#
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_CMDLINE=y
-CONFIG_FB_NOTIFY=y
-# CONFIG_FB_DDC is not set
-CONFIG_FB_BOOT_VESA_SUPPORT=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_IMAGEBLIT=y
-# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
-CONFIG_FB_SYS_FILLRECT=m
-CONFIG_FB_SYS_COPYAREA=m
-CONFIG_FB_SYS_IMAGEBLIT=m
-# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set
-CONFIG_FB_FOREIGN_ENDIAN=y
-CONFIG_FB_BOTH_ENDIAN=y
-# CONFIG_FB_BIG_ENDIAN is not set
-# CONFIG_FB_LITTLE_ENDIAN is not set
-CONFIG_FB_SYS_FOPS=m
-CONFIG_FB_DEFERRED_IO=y
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-CONFIG_FB_BACKLIGHT=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-
-#
-# Frame buffer hardware drivers
-#
-# CONFIG_FB_CIRRUS is not set
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ARC is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-# CONFIG_FB_VGA16 is not set
-# CONFIG_FB_UVESA is not set
-CONFIG_FB_VESA=y
-CONFIG_FB_EFI=y
-# CONFIG_FB_N411 is not set
-# CONFIG_FB_HGA is not set
-# CONFIG_FB_OPENCORES is not set
-# CONFIG_FB_S1D13XXX is not set
-# CONFIG_FB_NVIDIA is not set
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_I740 is not set
-# CONFIG_FB_LE80578 is not set
-# CONFIG_FB_MATROX is not set
-# CONFIG_FB_RADEON is not set
-# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_ATY is not set
-# CONFIG_FB_S3 is not set
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_VIA is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_VT8623 is not set
-# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_ARK is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_CARMINE is not set
-# CONFIG_FB_SM501 is not set
-# CONFIG_FB_SMSCUFX is not set
-# CONFIG_FB_UDL is not set
-# CONFIG_FB_IBM_GXT4500 is not set
-# CONFIG_FB_VIRTUAL is not set
-# CONFIG_FB_METRONOME is not set
-# CONFIG_FB_MB862XX is not set
-# CONFIG_FB_BROADSHEET is not set
-# CONFIG_FB_AUO_K190X is not set
-# CONFIG_FB_HYPERV is not set
-CONFIG_FB_SIMPLE=y
-# CONFIG_FB_SM712 is not set
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=m
-CONFIG_LCD_L4F00242T03=m
-CONFIG_LCD_LMS283GF05=m
-CONFIG_LCD_LTV350QV=m
-CONFIG_LCD_ILI922X=m
-CONFIG_LCD_ILI9320=m
-CONFIG_LCD_TDO24M=m
-CONFIG_LCD_VGG2432A4=m
-CONFIG_LCD_PLATFORM=m
-CONFIG_LCD_S6E63M0=m
-CONFIG_LCD_LD9040=m
-CONFIG_LCD_AMS369FG06=m
-CONFIG_LCD_LMS501KF03=m
-CONFIG_LCD_HX8357=m
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_GENERIC=m
-CONFIG_BACKLIGHT_LM3533=m
-CONFIG_BACKLIGHT_PWM=m
-CONFIG_BACKLIGHT_DA9052=m
-CONFIG_BACKLIGHT_APPLE=m
-CONFIG_BACKLIGHT_PM8941_WLED=m
-CONFIG_BACKLIGHT_SAHARA=m
-CONFIG_BACKLIGHT_WM831X=m
-CONFIG_BACKLIGHT_ADP8860=m
-CONFIG_BACKLIGHT_ADP8870=m
-CONFIG_BACKLIGHT_PCF50633=m
-CONFIG_BACKLIGHT_LM3630A=m
-CONFIG_BACKLIGHT_LM3639=m
-CONFIG_BACKLIGHT_LP855X=m
-CONFIG_BACKLIGHT_SKY81452=m
-CONFIG_BACKLIGHT_TPS65217=m
-CONFIG_BACKLIGHT_GPIO=m
-CONFIG_BACKLIGHT_LV5207LP=m
-CONFIG_BACKLIGHT_BD6107=m
-CONFIG_BACKLIGHT_ARCXCNN=m
-# CONFIG_VGASTATE is not set
-CONFIG_HDMI=y
-
-#
-# Console display driver support
-#
-CONFIG_VGA_CONSOLE=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
-CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64
-# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_DUMMY_CONSOLE_COLUMNS=80
-CONFIG_DUMMY_CONSOLE_ROWS=25
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
-CONFIG_LOGO_LINUX_CLUT224=y
-CONFIG_SOUND=m
-CONFIG_SOUND_OSS_CORE=y
-CONFIG_SOUND_OSS_CORE_PRECLAIM=y
-CONFIG_SND=m
-CONFIG_SND_TIMER=m
-CONFIG_SND_PCM=m
-CONFIG_SND_PCM_ELD=y
-CONFIG_SND_PCM_IEC958=y
-CONFIG_SND_DMAENGINE_PCM=m
-CONFIG_SND_HWDEP=m
-CONFIG_SND_SEQ_DEVICE=m
-CONFIG_SND_RAWMIDI=m
-CONFIG_SND_COMPRESS_OFFLOAD=m
-CONFIG_SND_JACK=y
-CONFIG_SND_JACK_INPUT_DEV=y
-CONFIG_SND_OSSEMUL=y
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_PCM_OSS_PLUGINS=y
-CONFIG_SND_PCM_TIMER=y
-CONFIG_SND_HRTIMER=m
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_MAX_CARDS=32
-CONFIG_SND_SUPPORT_OLD_API=y
-CONFIG_SND_PROC_FS=y
-CONFIG_SND_VERBOSE_PROCFS=y
-# CONFIG_SND_VERBOSE_PRINTK is not set
-# CONFIG_SND_DEBUG is not set
-CONFIG_SND_VMASTER=y
-CONFIG_SND_DMA_SGBUF=y
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_SEQUENCER_OSS=m
-CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
-CONFIG_SND_SEQ_MIDI_EVENT=m
-CONFIG_SND_SEQ_MIDI=m
-CONFIG_SND_SEQ_MIDI_EMUL=m
-CONFIG_SND_SEQ_VIRMIDI=m
-CONFIG_SND_MPU401_UART=m
-CONFIG_SND_OPL3_LIB=m
-CONFIG_SND_OPL3_LIB_SEQ=m
-# CONFIG_SND_OPL4_LIB_SEQ is not set
-CONFIG_SND_VX_LIB=m
-CONFIG_SND_AC97_CODEC=m
-CONFIG_SND_DRIVERS=y
-# CONFIG_SND_PCSP is not set
-CONFIG_SND_DUMMY=m
-CONFIG_SND_ALOOP=m
-CONFIG_SND_VIRMIDI=m
-CONFIG_SND_MTPAV=m
-CONFIG_SND_MTS64=m
-CONFIG_SND_SERIAL_U16550=m
-CONFIG_SND_MPU401=m
-CONFIG_SND_PORTMAN2X4=m
-CONFIG_SND_AC97_POWER_SAVE=y
-CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
-CONFIG_SND_SB_COMMON=m
-CONFIG_SND_PCI=y
-CONFIG_SND_AD1889=m
-CONFIG_SND_ALS300=m
-CONFIG_SND_ALS4000=m
-CONFIG_SND_ALI5451=m
-CONFIG_SND_ASIHPI=m
-CONFIG_SND_ATIIXP=m
-CONFIG_SND_ATIIXP_MODEM=m
-CONFIG_SND_AU8810=m
-CONFIG_SND_AU8820=m
-CONFIG_SND_AU8830=m
-CONFIG_SND_AW2=m
-CONFIG_SND_AZT3328=m
-CONFIG_SND_BT87X=m
-CONFIG_SND_BT87X_OVERCLOCK=y
-CONFIG_SND_CA0106=m
-CONFIG_SND_CMIPCI=m
-CONFIG_SND_OXYGEN_LIB=m
-CONFIG_SND_OXYGEN=m
-CONFIG_SND_CS4281=m
-CONFIG_SND_CS46XX=m
-CONFIG_SND_CS46XX_NEW_DSP=y
-CONFIG_SND_CTXFI=m
-CONFIG_SND_DARLA20=m
-CONFIG_SND_GINA20=m
-CONFIG_SND_LAYLA20=m
-CONFIG_SND_DARLA24=m
-CONFIG_SND_GINA24=m
-CONFIG_SND_LAYLA24=m
-CONFIG_SND_MONA=m
-CONFIG_SND_MIA=m
-CONFIG_SND_ECHO3G=m
-CONFIG_SND_INDIGO=m
-CONFIG_SND_INDIGOIO=m
-CONFIG_SND_INDIGODJ=m
-CONFIG_SND_INDIGOIOX=m
-CONFIG_SND_INDIGODJX=m
-CONFIG_SND_EMU10K1=m
-CONFIG_SND_EMU10K1_SEQ=m
-CONFIG_SND_EMU10K1X=m
-CONFIG_SND_ENS1370=m
-CONFIG_SND_ENS1371=m
-CONFIG_SND_ES1938=m
-CONFIG_SND_ES1968=m
-CONFIG_SND_ES1968_INPUT=y
-CONFIG_SND_ES1968_RADIO=y
-CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X_BOOL=y
-CONFIG_SND_HDSP=m
-CONFIG_SND_HDSPM=m
-CONFIG_SND_ICE1712=m
-CONFIG_SND_ICE1724=m
-CONFIG_SND_INTEL8X0=m
-CONFIG_SND_INTEL8X0M=m
-CONFIG_SND_KORG1212=m
-CONFIG_SND_LOLA=m
-CONFIG_SND_LX6464ES=m
-CONFIG_SND_MAESTRO3=m
-CONFIG_SND_MAESTRO3_INPUT=y
-CONFIG_SND_MIXART=m
-CONFIG_SND_NM256=m
-CONFIG_SND_PCXHR=m
-CONFIG_SND_RIPTIDE=m
-CONFIG_SND_RME32=m
-CONFIG_SND_RME96=m
-CONFIG_SND_RME9652=m
-CONFIG_SND_SONICVIBES=m
-CONFIG_SND_TRIDENT=m
-CONFIG_SND_VIA82XX=m
-CONFIG_SND_VIA82XX_MODEM=m
-CONFIG_SND_VIRTUOSO=m
-CONFIG_SND_VX222=m
-CONFIG_SND_YMFPCI=m
-
-#
-# HD-Audio
-#
-CONFIG_SND_HDA=m
-CONFIG_SND_HDA_INTEL=m
-CONFIG_SND_HDA_HWDEP=y
-CONFIG_SND_HDA_RECONFIG=y
-CONFIG_SND_HDA_INPUT_BEEP=y
-CONFIG_SND_HDA_INPUT_BEEP_MODE=1
-CONFIG_SND_HDA_PATCH_LOADER=y
-CONFIG_SND_HDA_CODEC_REALTEK=m
-CONFIG_SND_HDA_CODEC_ANALOG=m
-CONFIG_SND_HDA_CODEC_SIGMATEL=m
-CONFIG_SND_HDA_CODEC_VIA=m
-CONFIG_SND_HDA_CODEC_HDMI=m
-CONFIG_SND_HDA_CODEC_CIRRUS=m
-CONFIG_SND_HDA_CODEC_CONEXANT=m
-CONFIG_SND_HDA_CODEC_CA0110=m
-CONFIG_SND_HDA_CODEC_CA0132=m
-CONFIG_SND_HDA_CODEC_CA0132_DSP=y
-CONFIG_SND_HDA_CODEC_CMEDIA=m
-CONFIG_SND_HDA_CODEC_SI3054=m
-CONFIG_SND_HDA_GENERIC=m
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0
-CONFIG_SND_HDA_CORE=m
-CONFIG_SND_HDA_DSP_LOADER=y
-CONFIG_SND_HDA_I915=y
-CONFIG_SND_HDA_EXT_CORE=m
-CONFIG_SND_HDA_PREALLOC_SIZE=64
-CONFIG_SND_SPI=y
-CONFIG_SND_USB=y
-CONFIG_SND_USB_AUDIO=m
-CONFIG_SND_USB_UA101=m
-CONFIG_SND_USB_USX2Y=m
-CONFIG_SND_USB_CAIAQ=m
-CONFIG_SND_USB_CAIAQ_INPUT=y
-CONFIG_SND_USB_US122L=m
-CONFIG_SND_USB_6FIRE=m
-CONFIG_SND_USB_HIFACE=m
-CONFIG_SND_BCD2000=m
-CONFIG_SND_USB_LINE6=m
-CONFIG_SND_USB_POD=m
-CONFIG_SND_USB_PODHD=m
-CONFIG_SND_USB_TONEPORT=m
-CONFIG_SND_USB_VARIAX=m
-CONFIG_SND_FIREWIRE=y
-CONFIG_SND_FIREWIRE_LIB=m
-CONFIG_SND_DICE=m
-CONFIG_SND_OXFW=m
-CONFIG_SND_ISIGHT=m
-CONFIG_SND_FIREWORKS=m
-CONFIG_SND_BEBOB=m
-CONFIG_SND_FIREWIRE_DIGI00X=m
-CONFIG_SND_FIREWIRE_TASCAM=m
-CONFIG_SND_FIREWIRE_MOTU=m
-CONFIG_SND_FIREFACE=m
-CONFIG_SND_PCMCIA=y
-CONFIG_SND_VXPOCKET=m
-CONFIG_SND_PDAUDIOCF=m
-CONFIG_SND_SOC=m
-CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
-CONFIG_SND_SOC_COMPRESS=y
-CONFIG_SND_SOC_TOPOLOGY=y
-CONFIG_SND_SOC_AMD_ACP=m
-CONFIG_SND_ATMEL_SOC=m
-CONFIG_SND_DESIGNWARE_I2S=m
-CONFIG_SND_DESIGNWARE_PCM=y
-
-#
-# SoC Audio for Freescale CPUs
-#
-
-#
-# Common SoC Audio options for Freescale CPUs:
-#
-CONFIG_SND_SOC_FSL_ASRC=m
-CONFIG_SND_SOC_FSL_SAI=m
-CONFIG_SND_SOC_FSL_SSI=m
-CONFIG_SND_SOC_FSL_SPDIF=m
-CONFIG_SND_SOC_FSL_ESAI=m
-CONFIG_SND_SOC_IMX_AUDMUX=m
-CONFIG_SND_I2S_HI6210_I2S=m
-CONFIG_SND_SOC_IMG=y
-CONFIG_SND_SOC_IMG_I2S_IN=m
-CONFIG_SND_SOC_IMG_I2S_OUT=m
-CONFIG_SND_SOC_IMG_PARALLEL_OUT=m
-CONFIG_SND_SOC_IMG_SPDIF_IN=m
-CONFIG_SND_SOC_IMG_SPDIF_OUT=m
-CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
-CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
-CONFIG_SND_SST_IPC=m
-CONFIG_SND_SST_IPC_ACPI=m
-CONFIG_SND_SOC_INTEL_SST=m
-CONFIG_SND_SOC_INTEL_SST_FIRMWARE=m
-CONFIG_SND_SOC_INTEL_SST_ACPI=m
-CONFIG_SND_SOC_INTEL_SST_MATCH=m
-CONFIG_SND_SOC_INTEL_HASWELL=m
-CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
-CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m
-CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m
-CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
-CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
-CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
-CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
-CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
-CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
-CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
-CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m
-CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m
-CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH=m
-CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
-CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
-CONFIG_SND_SOC_INTEL_SKYLAKE=m
-CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m
-CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m
-CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
-
-#
-# STMicroelectronics STM32 SOC audio support
-#
-CONFIG_SND_SOC_XTFPGA_I2S=m
-CONFIG_ZX_TDM=m
-CONFIG_SND_SOC_I2C_AND_SPI=m
-
-#
-# CODEC drivers
-#
-# CONFIG_SND_SOC_AC97_CODEC is not set
-CONFIG_SND_SOC_ADAU_UTILS=m
-CONFIG_SND_SOC_ADAU1701=m
-CONFIG_SND_SOC_ADAU17X1=m
-CONFIG_SND_SOC_ADAU1761=m
-CONFIG_SND_SOC_ADAU1761_I2C=m
-CONFIG_SND_SOC_ADAU1761_SPI=m
-CONFIG_SND_SOC_ADAU7002=m
-CONFIG_SND_SOC_AK4104=m
-CONFIG_SND_SOC_AK4554=m
-CONFIG_SND_SOC_AK4613=m
-CONFIG_SND_SOC_AK4642=m
-CONFIG_SND_SOC_AK5386=m
-CONFIG_SND_SOC_ALC5623=m
-# CONFIG_SND_SOC_BT_SCO is not set
-CONFIG_SND_SOC_CS35L32=m
-CONFIG_SND_SOC_CS35L33=m
-CONFIG_SND_SOC_CS35L34=m
-CONFIG_SND_SOC_CS35L35=m
-CONFIG_SND_SOC_CS42L42=m
-CONFIG_SND_SOC_CS42L51=m
-CONFIG_SND_SOC_CS42L51_I2C=m
-CONFIG_SND_SOC_CS42L52=m
-CONFIG_SND_SOC_CS42L56=m
-CONFIG_SND_SOC_CS42L73=m
-CONFIG_SND_SOC_CS4265=m
-CONFIG_SND_SOC_CS4270=m
-CONFIG_SND_SOC_CS4271=m
-CONFIG_SND_SOC_CS4271_I2C=m
-CONFIG_SND_SOC_CS4271_SPI=m
-CONFIG_SND_SOC_CS42XX8=m
-CONFIG_SND_SOC_CS42XX8_I2C=m
-CONFIG_SND_SOC_CS43130=m
-CONFIG_SND_SOC_CS4349=m
-CONFIG_SND_SOC_CS53L30=m
-CONFIG_SND_SOC_DA7213=m
-CONFIG_SND_SOC_DA7219=m
-CONFIG_SND_SOC_DIO2125=m
-CONFIG_SND_SOC_DMIC=m
-CONFIG_SND_SOC_HDMI_CODEC=m
-CONFIG_SND_SOC_ES7134=m
-CONFIG_SND_SOC_ES8316=m
-CONFIG_SND_SOC_ES8328=m
-CONFIG_SND_SOC_ES8328_I2C=m
-CONFIG_SND_SOC_ES8328_SPI=m
-CONFIG_SND_SOC_GTM601=m
-CONFIG_SND_SOC_HDAC_HDMI=m
-CONFIG_SND_SOC_INNO_RK3036=m
-CONFIG_SND_SOC_MAX98090=m
-CONFIG_SND_SOC_MAX98357A=m
-CONFIG_SND_SOC_MAX98504=m
-CONFIG_SND_SOC_MAX98927=m
-CONFIG_SND_SOC_MAX9860=m
-CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
-CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
-CONFIG_SND_SOC_PCM1681=m
-CONFIG_SND_SOC_PCM179X=m
-CONFIG_SND_SOC_PCM179X_I2C=m
-CONFIG_SND_SOC_PCM179X_SPI=m
-CONFIG_SND_SOC_PCM3168A=m
-CONFIG_SND_SOC_PCM3168A_I2C=m
-CONFIG_SND_SOC_PCM3168A_SPI=m
-CONFIG_SND_SOC_PCM512x=m
-CONFIG_SND_SOC_PCM512x_I2C=m
-CONFIG_SND_SOC_PCM512x_SPI=m
-CONFIG_SND_SOC_RL6231=m
-CONFIG_SND_SOC_RL6347A=m
-CONFIG_SND_SOC_RT286=m
-CONFIG_SND_SOC_RT298=m
-CONFIG_SND_SOC_RT5514=m
-CONFIG_SND_SOC_RT5514_SPI=m
-CONFIG_SND_SOC_RT5616=m
-CONFIG_SND_SOC_RT5631=m
-CONFIG_SND_SOC_RT5640=m
-CONFIG_SND_SOC_RT5645=m
-CONFIG_SND_SOC_RT5651=m
-CONFIG_SND_SOC_RT5663=m
-CONFIG_SND_SOC_RT5670=m
-CONFIG_SND_SOC_RT5677=m
-CONFIG_SND_SOC_RT5677_SPI=m
-CONFIG_SND_SOC_SGTL5000=m
-CONFIG_SND_SOC_SI476X=m
-CONFIG_SND_SOC_SIGMADSP=m
-CONFIG_SND_SOC_SIGMADSP_I2C=m
-CONFIG_SND_SOC_SIGMADSP_REGMAP=m
-CONFIG_SND_SOC_SIRF_AUDIO_CODEC=m
-CONFIG_SND_SOC_SPDIF=m
-CONFIG_SND_SOC_SSM2602=m
-CONFIG_SND_SOC_SSM2602_SPI=m
-CONFIG_SND_SOC_SSM2602_I2C=m
-CONFIG_SND_SOC_SSM4567=m
-CONFIG_SND_SOC_STA32X=m
-CONFIG_SND_SOC_STA350=m
-CONFIG_SND_SOC_STI_SAS=m
-CONFIG_SND_SOC_TAS2552=m
-CONFIG_SND_SOC_TAS5086=m
-CONFIG_SND_SOC_TAS571X=m
-CONFIG_SND_SOC_TAS5720=m
-CONFIG_SND_SOC_TFA9879=m
-CONFIG_SND_SOC_TLV320AIC23=m
-CONFIG_SND_SOC_TLV320AIC23_I2C=m
-CONFIG_SND_SOC_TLV320AIC23_SPI=m
-CONFIG_SND_SOC_TLV320AIC31XX=m
-CONFIG_SND_SOC_TLV320AIC3X=m
-CONFIG_SND_SOC_TS3A227E=m
-CONFIG_SND_SOC_WM8510=m
-CONFIG_SND_SOC_WM8523=m
-CONFIG_SND_SOC_WM8524=m
-CONFIG_SND_SOC_WM8580=m
-CONFIG_SND_SOC_WM8711=m
-CONFIG_SND_SOC_WM8728=m
-CONFIG_SND_SOC_WM8731=m
-CONFIG_SND_SOC_WM8737=m
-CONFIG_SND_SOC_WM8741=m
-CONFIG_SND_SOC_WM8750=m
-CONFIG_SND_SOC_WM8753=m
-CONFIG_SND_SOC_WM8770=m
-CONFIG_SND_SOC_WM8776=m
-CONFIG_SND_SOC_WM8804=m
-CONFIG_SND_SOC_WM8804_I2C=m
-CONFIG_SND_SOC_WM8804_SPI=m
-CONFIG_SND_SOC_WM8903=m
-CONFIG_SND_SOC_WM8960=m
-CONFIG_SND_SOC_WM8962=m
-CONFIG_SND_SOC_WM8974=m
-CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SOC_WM8985=m
-CONFIG_SND_SOC_ZX_AUD96P22=m
-CONFIG_SND_SOC_NAU8540=m
-CONFIG_SND_SOC_NAU8810=m
-CONFIG_SND_SOC_NAU8824=m
-CONFIG_SND_SOC_NAU8825=m
-CONFIG_SND_SOC_TPA6130A2=m
-CONFIG_SND_SIMPLE_CARD_UTILS=m
-CONFIG_SND_SIMPLE_CARD=m
-CONFIG_SND_X86=y
-CONFIG_HDMI_LPE_AUDIO=m
-CONFIG_SND_SYNTH_EMUX=m
-CONFIG_AC97_BUS=m
-
-#
-# HID support
-#
-CONFIG_HID=m
-CONFIG_HID_BATTERY_STRENGTH=y
-CONFIG_HIDRAW=y
-CONFIG_UHID=m
-CONFIG_HID_GENERIC=m
-
-#
-# Special HID drivers
-#
-CONFIG_HID_A4TECH=m
-CONFIG_HID_ACCUTOUCH=m
-CONFIG_HID_ACRUX=m
-CONFIG_HID_ACRUX_FF=y
-CONFIG_HID_APPLE=m
-CONFIG_HID_APPLEIR=m
-CONFIG_HID_ASUS=m
-CONFIG_HID_AUREAL=m
-CONFIG_HID_BELKIN=m
-CONFIG_HID_BETOP_FF=m
-CONFIG_HID_CHERRY=m
-CONFIG_HID_CHICONY=m
-CONFIG_HID_CORSAIR=m
-CONFIG_HID_PRODIKEYS=m
-CONFIG_HID_CMEDIA=m
-CONFIG_HID_CP2112=m
-CONFIG_HID_CYPRESS=m
-CONFIG_HID_DRAGONRISE=m
-CONFIG_DRAGONRISE_FF=y
-CONFIG_HID_EMS_FF=m
-CONFIG_HID_ELECOM=m
-CONFIG_HID_ELO=m
-CONFIG_HID_EZKEY=m
-CONFIG_HID_GEMBIRD=m
-CONFIG_HID_GFRM=m
-CONFIG_HID_HOLTEK=m
-CONFIG_HOLTEK_FF=y
-CONFIG_HID_GT683R=m
-CONFIG_HID_KEYTOUCH=m
-CONFIG_HID_KYE=m
-CONFIG_HID_UCLOGIC=m
-CONFIG_HID_WALTOP=m
-CONFIG_HID_GYRATION=m
-CONFIG_HID_ICADE=m
-CONFIG_HID_ITE=m
-CONFIG_HID_TWINHAN=m
-CONFIG_HID_KENSINGTON=m
-CONFIG_HID_LCPOWER=m
-CONFIG_HID_LED=m
-CONFIG_HID_LENOVO=m
-CONFIG_HID_LOGITECH=m
-CONFIG_HID_LOGITECH_DJ=m
-CONFIG_HID_LOGITECH_HIDPP=m
-CONFIG_LOGITECH_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_LOGIG940_FF=y
-CONFIG_LOGIWHEELS_FF=y
-CONFIG_HID_MAGICMOUSE=m
-CONFIG_HID_MAYFLASH=m
-CONFIG_HID_MICROSOFT=m
-CONFIG_HID_MONTEREY=m
-CONFIG_HID_MULTITOUCH=m
-CONFIG_HID_NTI=m
-CONFIG_HID_NTRIG=m
-CONFIG_HID_ORTEK=m
-CONFIG_HID_PANTHERLORD=m
-CONFIG_PANTHERLORD_FF=y
-CONFIG_HID_PENMOUNT=m
-CONFIG_HID_PETALYNX=m
-CONFIG_HID_PICOLCD=m
-CONFIG_HID_PICOLCD_FB=y
-CONFIG_HID_PICOLCD_BACKLIGHT=y
-CONFIG_HID_PICOLCD_LCD=y
-CONFIG_HID_PICOLCD_LEDS=y
-CONFIG_HID_PICOLCD_CIR=y
-CONFIG_HID_PLANTRONICS=m
-CONFIG_HID_PRIMAX=m
-CONFIG_HID_RETRODE=m
-CONFIG_HID_ROCCAT=m
-CONFIG_HID_SAITEK=m
-CONFIG_HID_SAMSUNG=m
-CONFIG_HID_SONY=m
-CONFIG_SONY_FF=y
-CONFIG_HID_SPEEDLINK=m
-CONFIG_HID_STEELSERIES=m
-CONFIG_HID_SUNPLUS=m
-CONFIG_HID_RMI=m
-CONFIG_HID_GREENASIA=m
-CONFIG_GREENASIA_FF=y
-CONFIG_HID_HYPERV_MOUSE=m
-CONFIG_HID_SMARTJOYPLUS=m
-CONFIG_SMARTJOYPLUS_FF=y
-CONFIG_HID_TIVO=m
-CONFIG_HID_TOPSEED=m
-CONFIG_HID_THINGM=m
-CONFIG_HID_THRUSTMASTER=m
-CONFIG_THRUSTMASTER_FF=y
-CONFIG_HID_UDRAW_PS3=m
-CONFIG_HID_WACOM=m
-CONFIG_HID_WIIMOTE=m
-CONFIG_HID_XINMO=m
-CONFIG_HID_ZEROPLUS=m
-CONFIG_ZEROPLUS_FF=y
-CONFIG_HID_ZYDACRON=m
-CONFIG_HID_SENSOR_HUB=m
-CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
-CONFIG_HID_ALPS=m
-
-#
-# USB HID support
-#
-CONFIG_USB_HID=m
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
-
-#
-# I2C HID support
-#
-CONFIG_I2C_HID=m
-
-#
-# Intel ISH HID support
-#
-CONFIG_INTEL_ISH_HID=m
-CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-CONFIG_USB_SUPPORT=y
-CONFIG_USB_COMMON=y
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB=m
-CONFIG_USB_PCI=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-
-#
-# Miscellaneous USB options
-#
-CONFIG_USB_DEFAULT_PERSIST=y
-CONFIG_USB_DYNAMIC_MINORS=y
-CONFIG_USB_OTG=y
-# CONFIG_USB_OTG_WHITELIST is not set
-# CONFIG_USB_OTG_BLACKLIST_HUB is not set
-CONFIG_USB_OTG_FSM=m
-CONFIG_USB_LEDS_TRIGGER_USBPORT=m
-CONFIG_USB_MON=m
-CONFIG_USB_WUSB=m
-CONFIG_USB_WUSB_CBAF=m
-# CONFIG_USB_WUSB_CBAF_DEBUG is not set
-
-#
-# USB Host Controller Drivers
-#
-CONFIG_USB_C67X00_HCD=m
-CONFIG_USB_XHCI_HCD=m
-CONFIG_USB_XHCI_PCI=m
-CONFIG_USB_XHCI_PLATFORM=m
-CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_EHCI_TT_NEWSCHED=y
-CONFIG_USB_EHCI_PCI=m
-CONFIG_USB_EHCI_HCD_PLATFORM=m
-CONFIG_USB_OXU210HP_HCD=m
-CONFIG_USB_ISP116X_HCD=m
-CONFIG_USB_ISP1362_HCD=m
-CONFIG_USB_FOTG210_HCD=m
-CONFIG_USB_MAX3421_HCD=m
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_OHCI_HCD_PCI=m
-CONFIG_USB_OHCI_HCD_SSB=y
-CONFIG_USB_OHCI_HCD_PLATFORM=m
-CONFIG_USB_UHCI_HCD=m
-CONFIG_USB_U132_HCD=m
-CONFIG_USB_SL811_HCD=m
-# CONFIG_USB_SL811_HCD_ISO is not set
-CONFIG_USB_SL811_CS=m
-CONFIG_USB_R8A66597_HCD=m
-CONFIG_USB_WHCI_HCD=m
-CONFIG_USB_HWA_HCD=m
-CONFIG_USB_HCD_BCMA=m
-CONFIG_USB_HCD_SSB=m
-# CONFIG_USB_HCD_TEST_MODE is not set
-
-#
-# USB Device Class drivers
-#
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
-CONFIG_USB_WDM=m
-CONFIG_USB_TMC=m
-
-#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
-#
-
-#
-# also be needed; see USB_STORAGE Help for more info
-#
-CONFIG_USB_STORAGE=m
-# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_REALTEK=m
-CONFIG_REALTEK_AUTOPM=y
-CONFIG_USB_STORAGE_DATAFAB=m
-CONFIG_USB_STORAGE_FREECOM=m
-CONFIG_USB_STORAGE_ISD200=m
-CONFIG_USB_STORAGE_USBAT=m
-CONFIG_USB_STORAGE_SDDR09=m
-CONFIG_USB_STORAGE_SDDR55=m
-CONFIG_USB_STORAGE_JUMPSHOT=m
-CONFIG_USB_STORAGE_ALAUDA=m
-CONFIG_USB_STORAGE_ONETOUCH=m
-CONFIG_USB_STORAGE_KARMA=m
-CONFIG_USB_STORAGE_CYPRESS_ATACB=m
-CONFIG_USB_STORAGE_ENE_UB6250=m
-CONFIG_USB_UAS=m
-
-#
-# USB Imaging devices
-#
-CONFIG_USB_MDC800=m
-CONFIG_USB_MICROTEK=m
-CONFIG_USBIP_CORE=m
-CONFIG_USBIP_VHCI_HCD=m
-CONFIG_USBIP_VHCI_HC_PORTS=8
-CONFIG_USBIP_VHCI_NR_HCS=1
-CONFIG_USBIP_HOST=m
-CONFIG_USBIP_VUDC=m
-# CONFIG_USBIP_DEBUG is not set
-CONFIG_USB_MUSB_HDRC=m
-# CONFIG_USB_MUSB_HOST is not set
-# CONFIG_USB_MUSB_GADGET is not set
-CONFIG_USB_MUSB_DUAL_ROLE=y
-
-#
-# Platform Glue Layer
-#
-
-#
-# MUSB DMA mode
-#
-CONFIG_MUSB_PIO_ONLY=y
-CONFIG_USB_DWC3=m
-# CONFIG_USB_DWC3_ULPI is not set
-# CONFIG_USB_DWC3_HOST is not set
-# CONFIG_USB_DWC3_GADGET is not set
-CONFIG_USB_DWC3_DUAL_ROLE=y
-
-#
-# Platform Glue Driver Support
-#
-CONFIG_USB_DWC3_PCI=m
-CONFIG_USB_DWC2=m
-# CONFIG_USB_DWC2_HOST is not set
-
-#
-# Gadget/Dual-role mode requires USB Gadget support to be enabled
-#
-# CONFIG_USB_DWC2_PERIPHERAL is not set
-CONFIG_USB_DWC2_DUAL_ROLE=y
-CONFIG_USB_DWC2_PCI=m
-# CONFIG_USB_DWC2_DEBUG is not set
-# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
-CONFIG_USB_CHIPIDEA=m
-CONFIG_USB_CHIPIDEA_PCI=m
-CONFIG_USB_CHIPIDEA_UDC=y
-CONFIG_USB_CHIPIDEA_HOST=y
-# CONFIG_USB_CHIPIDEA_ULPI is not set
-CONFIG_USB_ISP1760=m
-CONFIG_USB_ISP1760_HCD=y
-CONFIG_USB_ISP1761_UDC=y
-# CONFIG_USB_ISP1760_HOST_ROLE is not set
-# CONFIG_USB_ISP1760_GADGET_ROLE is not set
-CONFIG_USB_ISP1760_DUAL_ROLE=y
-
-#
-# USB port drivers
-#
-CONFIG_USB_USS720=m
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_SIMPLE=m
-CONFIG_USB_SERIAL_AIRCABLE=m
-CONFIG_USB_SERIAL_ARK3116=m
-CONFIG_USB_SERIAL_BELKIN=m
-CONFIG_USB_SERIAL_CH341=m
-CONFIG_USB_SERIAL_WHITEHEAT=m
-CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CP210X=m
-CONFIG_USB_SERIAL_CYPRESS_M8=m
-CONFIG_USB_SERIAL_EMPEG=m
-CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_VISOR=m
-CONFIG_USB_SERIAL_IPAQ=m
-CONFIG_USB_SERIAL_IR=m
-CONFIG_USB_SERIAL_EDGEPORT=m
-CONFIG_USB_SERIAL_EDGEPORT_TI=m
-CONFIG_USB_SERIAL_F81232=m
-CONFIG_USB_SERIAL_F8153X=m
-CONFIG_USB_SERIAL_GARMIN=m
-CONFIG_USB_SERIAL_IPW=m
-CONFIG_USB_SERIAL_IUU=m
-CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-CONFIG_USB_SERIAL_KEYSPAN=m
-CONFIG_USB_SERIAL_KLSI=m
-CONFIG_USB_SERIAL_KOBIL_SCT=m
-CONFIG_USB_SERIAL_MCT_U232=m
-CONFIG_USB_SERIAL_METRO=m
-CONFIG_USB_SERIAL_MOS7720=m
-CONFIG_USB_SERIAL_MOS7715_PARPORT=y
-CONFIG_USB_SERIAL_MOS7840=m
-CONFIG_USB_SERIAL_MXUPORT=m
-CONFIG_USB_SERIAL_NAVMAN=m
-CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_SERIAL_OTI6858=m
-CONFIG_USB_SERIAL_QCAUX=m
-CONFIG_USB_SERIAL_QUALCOMM=m
-CONFIG_USB_SERIAL_SPCP8X5=m
-CONFIG_USB_SERIAL_SAFE=m
-CONFIG_USB_SERIAL_SAFE_PADDED=y
-CONFIG_USB_SERIAL_SIERRAWIRELESS=m
-CONFIG_USB_SERIAL_SYMBOL=m
-CONFIG_USB_SERIAL_TI=m
-CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
-CONFIG_USB_SERIAL_WWAN=m
-CONFIG_USB_SERIAL_OPTION=m
-CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_SERIAL_OPTICON=m
-CONFIG_USB_SERIAL_XSENS_MT=m
-CONFIG_USB_SERIAL_WISHBONE=m
-CONFIG_USB_SERIAL_SSU100=m
-CONFIG_USB_SERIAL_QT2=m
-# CONFIG_USB_SERIAL_UPD78F0730 is not set
-# CONFIG_USB_SERIAL_DEBUG is not set
-
-#
-# USB Miscellaneous drivers
-#
-CONFIG_USB_EMI62=m
-CONFIG_USB_EMI26=m
-CONFIG_USB_ADUTUX=m
-CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
-CONFIG_USB_LEGOTOWER=m
-CONFIG_USB_LCD=m
-CONFIG_USB_CYPRESS_CY7C63=m
-CONFIG_USB_CYTHERM=m
-CONFIG_USB_IDMOUSE=m
-CONFIG_USB_FTDI_ELAN=m
-CONFIG_USB_APPLEDISPLAY=m
-CONFIG_USB_SISUSBVGA=m
-CONFIG_USB_SISUSBVGA_CON=y
-CONFIG_USB_LD=m
-CONFIG_USB_TRANCEVIBRATOR=m
-CONFIG_USB_IOWARRIOR=m
-CONFIG_USB_TEST=m
-CONFIG_USB_EHSET_TEST_FIXTURE=m
-CONFIG_USB_ISIGHTFW=m
-CONFIG_USB_YUREX=m
-CONFIG_USB_EZUSB_FX2=m
-CONFIG_USB_HUB_USB251XB=m
-CONFIG_USB_HSIC_USB3503=m
-CONFIG_USB_HSIC_USB4604=m
-CONFIG_USB_LINK_LAYER_TEST=m
-CONFIG_USB_CHAOSKEY=m
-CONFIG_USB_ATM=m
-CONFIG_USB_SPEEDTOUCH=m
-CONFIG_USB_CXACRU=m
-CONFIG_USB_UEAGLEATM=m
-CONFIG_USB_XUSBATM=m
-
-#
-# USB Physical Layer drivers
-#
-CONFIG_USB_PHY=y
-CONFIG_NOP_USB_XCEIV=m
-CONFIG_USB_GPIO_VBUS=m
-CONFIG_TAHVO_USB=m
-CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
-CONFIG_USB_ISP1301=m
-CONFIG_USB_GADGET=m
-# CONFIG_USB_GADGET_DEBUG is not set
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-CONFIG_USB_GADGET_VBUS_DRAW=2
-CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
-CONFIG_U_SERIAL_CONSOLE=y
-
-#
-# USB Peripheral Controller
-#
-CONFIG_USB_FOTG210_UDC=m
-CONFIG_USB_GR_UDC=m
-CONFIG_USB_R8A66597=m
-CONFIG_USB_PXA27X=m
-CONFIG_USB_MV_UDC=m
-CONFIG_USB_MV_U3D=m
-CONFIG_USB_SNP_CORE=m
-CONFIG_USB_M66592=m
-CONFIG_USB_BDC_UDC=m
-
-#
-# Platform Support
-#
-CONFIG_USB_BDC_PCI=m
-CONFIG_USB_AMD5536UDC=m
-CONFIG_USB_NET2272=m
-CONFIG_USB_NET2272_DMA=y
-CONFIG_USB_NET2280=m
-CONFIG_USB_GOKU=m
-CONFIG_USB_EG20T=m
-# CONFIG_USB_DUMMY_HCD is not set
-CONFIG_USB_LIBCOMPOSITE=m
-CONFIG_USB_F_ACM=m
-CONFIG_USB_F_SS_LB=m
-CONFIG_USB_U_SERIAL=m
-CONFIG_USB_U_ETHER=m
-CONFIG_USB_U_AUDIO=m
-CONFIG_USB_F_SERIAL=m
-CONFIG_USB_F_OBEX=m
-CONFIG_USB_F_NCM=m
-CONFIG_USB_F_ECM=m
-CONFIG_USB_F_PHONET=m
-CONFIG_USB_F_EEM=m
-CONFIG_USB_F_SUBSET=m
-CONFIG_USB_F_RNDIS=m
-CONFIG_USB_F_MASS_STORAGE=m
-CONFIG_USB_F_FS=m
-CONFIG_USB_F_UAC1=m
-CONFIG_USB_F_UAC2=m
-CONFIG_USB_F_UVC=m
-CONFIG_USB_F_MIDI=m
-CONFIG_USB_F_HID=m
-CONFIG_USB_F_PRINTER=m
-CONFIG_USB_F_TCM=m
-CONFIG_USB_CONFIGFS=m
-CONFIG_USB_CONFIGFS_SERIAL=y
-CONFIG_USB_CONFIGFS_ACM=y
-CONFIG_USB_CONFIGFS_OBEX=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_ECM=y
-CONFIG_USB_CONFIGFS_ECM_SUBSET=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_EEM=y
-CONFIG_USB_CONFIGFS_PHONET=y
-CONFIG_USB_CONFIGFS_MASS_STORAGE=y
-CONFIG_USB_CONFIGFS_F_LB_SS=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_UAC1=y
-# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set
-CONFIG_USB_CONFIGFS_F_UAC2=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_HID=y
-CONFIG_USB_CONFIGFS_F_UVC=y
-CONFIG_USB_CONFIGFS_F_PRINTER=y
-CONFIG_USB_CONFIGFS_F_TCM=y
-CONFIG_USB_ZERO=m
-CONFIG_USB_ZERO_HNPTEST=y
-CONFIG_USB_AUDIO=m
-CONFIG_GADGET_UAC1=y
-# CONFIG_GADGET_UAC1_LEGACY is not set
-CONFIG_USB_ETH=m
-CONFIG_USB_ETH_RNDIS=y
-CONFIG_USB_ETH_EEM=y
-CONFIG_USB_G_NCM=m
-CONFIG_USB_GADGETFS=m
-CONFIG_USB_FUNCTIONFS=m
-CONFIG_USB_FUNCTIONFS_ETH=y
-CONFIG_USB_FUNCTIONFS_RNDIS=y
-CONFIG_USB_FUNCTIONFS_GENERIC=y
-CONFIG_USB_MASS_STORAGE=m
-CONFIG_USB_GADGET_TARGET=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_MIDI_GADGET=m
-CONFIG_USB_G_PRINTER=m
-CONFIG_USB_CDC_COMPOSITE=m
-CONFIG_USB_G_NOKIA=m
-CONFIG_USB_G_ACM_MS=m
-CONFIG_USB_G_MULTI=m
-CONFIG_USB_G_MULTI_RNDIS=y
-CONFIG_USB_G_MULTI_CDC=y
-CONFIG_USB_G_HID=m
-# CONFIG_USB_G_DBGP is not set
-CONFIG_USB_G_WEBCAM=m
-
-#
-# USB Power Delivery and Type-C drivers
-#
-CONFIG_TYPEC=m
-CONFIG_TYPEC_UCSI=m
-CONFIG_UCSI_ACPI=m
-CONFIG_USB_LED_TRIG=y
-CONFIG_USB_ULPI_BUS=m
-CONFIG_UWB=m
-CONFIG_UWB_HWA=m
-CONFIG_UWB_WHCI=m
-CONFIG_UWB_I1480U=m
-CONFIG_MMC=m
-CONFIG_MMC_BLOCK=m
-CONFIG_MMC_BLOCK_MINORS=8
-CONFIG_SDIO_UART=m
-# CONFIG_MMC_TEST is not set
-
-#
-# MMC/SD/SDIO Host Controller Drivers
-#
-# CONFIG_MMC_DEBUG is not set
-CONFIG_MMC_SDHCI=m
-CONFIG_MMC_SDHCI_PCI=m
-CONFIG_MMC_RICOH_MMC=y
-CONFIG_MMC_SDHCI_ACPI=m
-CONFIG_MMC_SDHCI_PLTFM=m
-CONFIG_MMC_WBSD=m
-CONFIG_MMC_TIFM_SD=m
-CONFIG_MMC_SPI=m
-CONFIG_MMC_SDRICOH_CS=m
-CONFIG_MMC_CB710=m
-CONFIG_MMC_VIA_SDMMC=m
-CONFIG_MMC_VUB300=m
-CONFIG_MMC_USHC=m
-CONFIG_MMC_USDHI6ROL0=m
-CONFIG_MMC_REALTEK_PCI=m
-CONFIG_MMC_REALTEK_USB=m
-CONFIG_MMC_TOSHIBA_PCI=m
-CONFIG_MMC_MTK=m
-CONFIG_MMC_SDHCI_XENON=m
-CONFIG_MEMSTICK=m
-# CONFIG_MEMSTICK_DEBUG is not set
-
-#
-# MemoryStick drivers
-#
-# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
-CONFIG_MSPRO_BLOCK=m
-CONFIG_MS_BLOCK=m
-
-#
-# MemoryStick Host Controller Drivers
-#
-CONFIG_MEMSTICK_TIFM_MS=m
-CONFIG_MEMSTICK_JMICRON_38X=m
-CONFIG_MEMSTICK_R592=m
-CONFIG_MEMSTICK_REALTEK_PCI=m
-CONFIG_MEMSTICK_REALTEK_USB=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_CLASS_FLASH=m
-# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
-
-#
-# LED drivers
-#
-CONFIG_LEDS_AS3645A=m
-CONFIG_LEDS_LM3530=m
-CONFIG_LEDS_LM3533=m
-CONFIG_LEDS_LM3642=m
-CONFIG_LEDS_MT6323=m
-CONFIG_LEDS_PCA9532=m
-CONFIG_LEDS_PCA9532_GPIO=y
-CONFIG_LEDS_GPIO=m
-CONFIG_LEDS_LP3944=m
-CONFIG_LEDS_LP3952=m
-CONFIG_LEDS_LP55XX_COMMON=m
-CONFIG_LEDS_LP5521=m
-CONFIG_LEDS_LP5523=m
-CONFIG_LEDS_LP5562=m
-CONFIG_LEDS_LP8501=m
-CONFIG_LEDS_LP8860=m
-CONFIG_LEDS_CLEVO_MAIL=m
-CONFIG_LEDS_PCA955X=m
-# CONFIG_LEDS_PCA955X_GPIO is not set
-CONFIG_LEDS_PCA963X=m
-CONFIG_LEDS_WM831X_STATUS=m
-CONFIG_LEDS_DA9052=m
-CONFIG_LEDS_DAC124S085=m
-CONFIG_LEDS_PWM=m
-CONFIG_LEDS_REGULATOR=m
-CONFIG_LEDS_BD2802=m
-CONFIG_LEDS_INTEL_SS4200=m
-CONFIG_LEDS_LT3593=m
-CONFIG_LEDS_MC13783=m
-CONFIG_LEDS_TCA6507=m
-CONFIG_LEDS_TLC591XX=m
-CONFIG_LEDS_LM355x=m
-CONFIG_LEDS_MENF21BMC=m
-
-#
-# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
-#
-CONFIG_LEDS_BLINKM=m
-CONFIG_LEDS_MLXCPLD=m
-CONFIG_LEDS_USER=m
-CONFIG_LEDS_NIC78BX=m
-
-#
-# LED Triggers
-#
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_ONESHOT=m
-CONFIG_LEDS_TRIGGER_DISK=y
-# CONFIG_LEDS_TRIGGER_MTD is not set
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_CPU=y
-CONFIG_LEDS_TRIGGER_GPIO=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
-
-#
-# iptables trigger is under Netfilter config (LED target)
-#
-CONFIG_LEDS_TRIGGER_TRANSIENT=m
-CONFIG_LEDS_TRIGGER_CAMERA=m
-CONFIG_LEDS_TRIGGER_PANIC=y
-CONFIG_ACCESSIBILITY=y
-CONFIG_A11Y_BRAILLE_CONSOLE=y
-CONFIG_INFINIBAND=m
-CONFIG_INFINIBAND_USER_MAD=m
-CONFIG_INFINIBAND_USER_ACCESS=m
-# CONFIG_INFINIBAND_EXP_USER_ACCESS is not set
-CONFIG_INFINIBAND_USER_MEM=y
-CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
-CONFIG_INFINIBAND_ADDR_TRANS=y
-CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
-CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_MTHCA_DEBUG=y
-CONFIG_INFINIBAND_QIB=m
-CONFIG_INFINIBAND_QIB_DCA=y
-CONFIG_INFINIBAND_CXGB3=m
-# CONFIG_INFINIBAND_CXGB3_DEBUG is not set
-CONFIG_INFINIBAND_CXGB4=m
-CONFIG_INFINIBAND_I40IW=m
-CONFIG_MLX4_INFINIBAND=m
-CONFIG_MLX5_INFINIBAND=m
-CONFIG_INFINIBAND_NES=m
-# CONFIG_INFINIBAND_NES_DEBUG is not set
-CONFIG_INFINIBAND_OCRDMA=m
-CONFIG_INFINIBAND_VMWARE_PVRDMA=m
-CONFIG_INFINIBAND_USNIC=m
-CONFIG_INFINIBAND_IPOIB=m
-CONFIG_INFINIBAND_IPOIB_CM=y
-CONFIG_INFINIBAND_IPOIB_DEBUG=y
-# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
-CONFIG_INFINIBAND_SRP=m
-CONFIG_INFINIBAND_SRPT=m
-CONFIG_INFINIBAND_ISER=m
-CONFIG_INFINIBAND_ISERT=m
-CONFIG_INFINIBAND_OPA_VNIC=m
-CONFIG_INFINIBAND_RDMAVT=m
-CONFIG_RDMA_RXE=m
-CONFIG_INFINIBAND_HFI1=m
-# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
-# CONFIG_SDMA_VERBOSITY is not set
-CONFIG_INFINIBAND_QEDR=m
-CONFIG_INFINIBAND_BNXT_RE=m
-CONFIG_EDAC_ATOMIC_SCRUB=y
-CONFIG_EDAC_SUPPORT=y
-CONFIG_EDAC=y
-CONFIG_EDAC_LEGACY_SYSFS=y
-# CONFIG_EDAC_DEBUG is not set
-CONFIG_EDAC_DECODE_MCE=m
-CONFIG_EDAC_GHES=y
-CONFIG_EDAC_AMD64=m
-CONFIG_EDAC_AMD64_ERROR_INJECTION=y
-CONFIG_EDAC_E752X=m
-CONFIG_EDAC_I82975X=m
-CONFIG_EDAC_I3000=m
-CONFIG_EDAC_I3200=m
-CONFIG_EDAC_IE31200=m
-CONFIG_EDAC_X38=m
-CONFIG_EDAC_I5400=m
-CONFIG_EDAC_I7CORE=m
-CONFIG_EDAC_I5000=m
-CONFIG_EDAC_I5100=m
-CONFIG_EDAC_I7300=m
-CONFIG_EDAC_SBRIDGE=m
-CONFIG_EDAC_SKX=m
-CONFIG_EDAC_PND2=m
-CONFIG_RTC_LIB=y
-CONFIG_RTC_MC146818_LIB=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS=y
-CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
-CONFIG_RTC_SYSTOHC=y
-CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
-# CONFIG_RTC_DEBUG is not set
-CONFIG_RTC_NVMEM=y
-
-#
-# RTC interfaces
-#
-CONFIG_RTC_INTF_SYSFS=y
-CONFIG_RTC_INTF_PROC=y
-CONFIG_RTC_INTF_DEV=y
-CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-# CONFIG_RTC_DRV_TEST is not set
-
-#
-# I2C RTC drivers
-#
-CONFIG_RTC_DRV_88PM80X=m
-CONFIG_RTC_DRV_ABB5ZES3=m
-CONFIG_RTC_DRV_ABX80X=m
-CONFIG_RTC_DRV_DS1307=m
-CONFIG_RTC_DRV_DS1307_HWMON=y
-# CONFIG_RTC_DRV_DS1307_CENTURY is not set
-CONFIG_RTC_DRV_DS1374=m
-CONFIG_RTC_DRV_DS1374_WDT=y
-CONFIG_RTC_DRV_DS1672=m
-CONFIG_RTC_DRV_MAX6900=m
-CONFIG_RTC_DRV_MAX8907=m
-CONFIG_RTC_DRV_RS5C372=m
-CONFIG_RTC_DRV_ISL1208=m
-CONFIG_RTC_DRV_ISL12022=m
-CONFIG_RTC_DRV_X1205=m
-CONFIG_RTC_DRV_PCF8523=m
-CONFIG_RTC_DRV_PCF85063=m
-CONFIG_RTC_DRV_PCF8563=m
-CONFIG_RTC_DRV_PCF8583=m
-CONFIG_RTC_DRV_M41T80=m
-CONFIG_RTC_DRV_M41T80_WDT=y
-CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_S35390A=m
-CONFIG_RTC_DRV_FM3130=m
-CONFIG_RTC_DRV_RX8010=m
-CONFIG_RTC_DRV_RX8581=m
-CONFIG_RTC_DRV_RX8025=m
-CONFIG_RTC_DRV_EM3027=m
-CONFIG_RTC_DRV_RV8803=m
-
-#
-# SPI RTC drivers
-#
-CONFIG_RTC_DRV_M41T93=m
-CONFIG_RTC_DRV_M41T94=m
-CONFIG_RTC_DRV_DS1302=m
-CONFIG_RTC_DRV_DS1305=m
-CONFIG_RTC_DRV_DS1343=m
-CONFIG_RTC_DRV_DS1347=m
-CONFIG_RTC_DRV_DS1390=m
-CONFIG_RTC_DRV_MAX6916=m
-CONFIG_RTC_DRV_R9701=m
-CONFIG_RTC_DRV_RX4581=m
-CONFIG_RTC_DRV_RX6110=m
-CONFIG_RTC_DRV_RS5C348=m
-CONFIG_RTC_DRV_MAX6902=m
-CONFIG_RTC_DRV_PCF2123=m
-CONFIG_RTC_DRV_MCP795=m
-CONFIG_RTC_I2C_AND_SPI=m
-
-#
-# SPI and I2C RTC drivers
-#
-CONFIG_RTC_DRV_DS3232=m
-CONFIG_RTC_DRV_DS3232_HWMON=y
-CONFIG_RTC_DRV_PCF2127=m
-CONFIG_RTC_DRV_RV3029C2=m
-CONFIG_RTC_DRV_RV3029_HWMON=y
-
-#
-# Platform RTC drivers
-#
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_RTC_DRV_DS1286=m
-CONFIG_RTC_DRV_DS1511=m
-CONFIG_RTC_DRV_DS1553=m
-CONFIG_RTC_DRV_DS1685_FAMILY=m
-CONFIG_RTC_DRV_DS1685=y
-# CONFIG_RTC_DRV_DS1689 is not set
-# CONFIG_RTC_DRV_DS17285 is not set
-# CONFIG_RTC_DRV_DS17485 is not set
-# CONFIG_RTC_DRV_DS17885 is not set
-CONFIG_RTC_DS1685_PROC_REGS=y
-CONFIG_RTC_DS1685_SYSFS_REGS=y
-CONFIG_RTC_DRV_DS1742=m
-CONFIG_RTC_DRV_DS2404=m
-CONFIG_RTC_DRV_DA9052=m
-CONFIG_RTC_DRV_DA9063=m
-CONFIG_RTC_DRV_STK17TA8=m
-CONFIG_RTC_DRV_M48T86=m
-CONFIG_RTC_DRV_M48T35=m
-CONFIG_RTC_DRV_M48T59=m
-CONFIG_RTC_DRV_MSM6242=m
-CONFIG_RTC_DRV_BQ4802=m
-CONFIG_RTC_DRV_RP5C01=m
-CONFIG_RTC_DRV_V3020=m
-CONFIG_RTC_DRV_WM831X=m
-CONFIG_RTC_DRV_PCF50633=m
-
-#
-# on-CPU RTC drivers
-#
-CONFIG_RTC_DRV_FTRTC010=m
-CONFIG_RTC_DRV_PCAP=m
-CONFIG_RTC_DRV_MC13XXX=m
-CONFIG_RTC_DRV_MT6397=m
-
-#
-# HID Sensor RTC drivers
-#
-CONFIG_RTC_DRV_HID_SENSOR_TIME=m
-CONFIG_DMADEVICES=y
-# CONFIG_DMADEVICES_DEBUG is not set
-
-#
-# DMA Devices
-#
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_VIRTUAL_CHANNELS=y
-CONFIG_DMA_ACPI=y
-CONFIG_ALTERA_MSGDMA=m
-CONFIG_INTEL_IDMA64=m
-CONFIG_INTEL_IOATDMA=m
-CONFIG_INTEL_MIC_X100_DMA=m
-CONFIG_QCOM_HIDMA_MGMT=m
-CONFIG_QCOM_HIDMA=m
-CONFIG_DW_DMAC_CORE=y
-CONFIG_DW_DMAC=m
-CONFIG_DW_DMAC_PCI=y
-CONFIG_HSU_DMA=y
-
-#
-# DMA Clients
-#
-CONFIG_ASYNC_TX_DMA=y
-# CONFIG_DMATEST is not set
-CONFIG_DMA_ENGINE_RAID=y
-
-#
-# DMABUF options
-#
-CONFIG_SYNC_FILE=y
-# CONFIG_SW_SYNC is not set
-CONFIG_DCA=m
-CONFIG_AUXDISPLAY=y
-CONFIG_HD44780=m
-CONFIG_KS0108=m
-CONFIG_KS0108_PORT=0x378
-CONFIG_KS0108_DELAY=2
-CONFIG_CFAG12864B=m
-CONFIG_CFAG12864B_RATE=20
-CONFIG_IMG_ASCII_LCD=m
-CONFIG_PANEL=m
-CONFIG_PANEL_PARPORT=0
-CONFIG_PANEL_PROFILE=5
-# CONFIG_PANEL_CHANGE_MESSAGE is not set
-CONFIG_CHARLCD=m
-CONFIG_UIO=m
-CONFIG_UIO_CIF=m
-CONFIG_UIO_PDRV_GENIRQ=m
-CONFIG_UIO_DMEM_GENIRQ=m
-CONFIG_UIO_AEC=m
-CONFIG_UIO_SERCOS3=m
-CONFIG_UIO_PCI_GENERIC=m
-CONFIG_UIO_NETX=m
-CONFIG_UIO_PRUSS=m
-CONFIG_UIO_MF624=m
-CONFIG_UIO_HV_GENERIC=m
-CONFIG_VFIO_IOMMU_TYPE1=m
-CONFIG_VFIO_VIRQFD=m
-CONFIG_VFIO=m
-# CONFIG_VFIO_NOIOMMU is not set
-CONFIG_VFIO_PCI=m
-CONFIG_VFIO_PCI_VGA=y
-CONFIG_VFIO_PCI_MMAP=y
-CONFIG_VFIO_PCI_INTX=y
-CONFIG_VFIO_PCI_IGD=y
-CONFIG_VFIO_MDEV=m
-CONFIG_VFIO_MDEV_DEVICE=m
-CONFIG_IRQ_BYPASS_MANAGER=m
-CONFIG_VIRT_DRIVERS=y
-CONFIG_VIRTIO=m
-
-#
-# Virtio drivers
-#
-CONFIG_VIRTIO_PCI=m
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTIO_BALLOON=m
-CONFIG_VIRTIO_INPUT=m
-CONFIG_VIRTIO_MMIO=m
-CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
-
-#
-# Microsoft Hyper-V guest support
-#
-CONFIG_HYPERV=m
-CONFIG_HYPERV_TSCPAGE=y
-CONFIG_HYPERV_UTILS=m
-CONFIG_HYPERV_BALLOON=m
-CONFIG_STAGING=y
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-CONFIG_IRDA_ULTRA=y
-
-#
-# IrDA options
-#
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-CONFIG_IRTTY_SIR=m
-
-#
-# Dongle support
-#
-CONFIG_DONGLE=y
-CONFIG_ESI_DONGLE=m
-CONFIG_ACTISYS_DONGLE=m
-CONFIG_TEKRAM_DONGLE=m
-CONFIG_TOIM3232_DONGLE=m
-CONFIG_LITELINK_DONGLE=m
-CONFIG_MA600_DONGLE=m
-CONFIG_GIRBIL_DONGLE=m
-CONFIG_MCP2120_DONGLE=m
-CONFIG_OLD_BELKIN_DONGLE=m
-CONFIG_ACT200L_DONGLE=m
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-
-#
-# FIR device drivers
-#
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_NSC_FIR=m
-CONFIG_WINBOND_FIR=m
-CONFIG_SMC_IRCC_FIR=m
-CONFIG_ALI_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_VIA_FIR=m
-CONFIG_MCS_FIR=m
-CONFIG_PRISM2_USB=m
-CONFIG_COMEDI=m
-# CONFIG_COMEDI_DEBUG is not set
-CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
-CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
-CONFIG_COMEDI_MISC_DRIVERS=y
-CONFIG_COMEDI_BOND=m
-CONFIG_COMEDI_TEST=m
-CONFIG_COMEDI_PARPORT=m
-CONFIG_COMEDI_SERIAL2002=m
-CONFIG_COMEDI_ISA_DRIVERS=y
-CONFIG_COMEDI_PCL711=m
-CONFIG_COMEDI_PCL724=m
-CONFIG_COMEDI_PCL726=m
-CONFIG_COMEDI_PCL730=m
-CONFIG_COMEDI_PCL812=m
-CONFIG_COMEDI_PCL816=m
-CONFIG_COMEDI_PCL818=m
-CONFIG_COMEDI_PCM3724=m
-CONFIG_COMEDI_AMPLC_DIO200_ISA=m
-CONFIG_COMEDI_AMPLC_PC236_ISA=m
-CONFIG_COMEDI_AMPLC_PC263_ISA=m
-CONFIG_COMEDI_RTI800=m
-CONFIG_COMEDI_RTI802=m
-CONFIG_COMEDI_DAC02=m
-CONFIG_COMEDI_DAS16M1=m
-CONFIG_COMEDI_DAS08_ISA=m
-CONFIG_COMEDI_DAS16=m
-CONFIG_COMEDI_DAS800=m
-CONFIG_COMEDI_DAS1800=m
-CONFIG_COMEDI_DAS6402=m
-CONFIG_COMEDI_DT2801=m
-CONFIG_COMEDI_DT2811=m
-CONFIG_COMEDI_DT2814=m
-CONFIG_COMEDI_DT2815=m
-CONFIG_COMEDI_DT2817=m
-CONFIG_COMEDI_DT282X=m
-CONFIG_COMEDI_DMM32AT=m
-CONFIG_COMEDI_FL512=m
-CONFIG_COMEDI_AIO_AIO12_8=m
-CONFIG_COMEDI_AIO_IIRO_16=m
-CONFIG_COMEDI_II_PCI20KC=m
-CONFIG_COMEDI_C6XDIGIO=m
-CONFIG_COMEDI_MPC624=m
-CONFIG_COMEDI_ADQ12B=m
-CONFIG_COMEDI_NI_AT_A2150=m
-CONFIG_COMEDI_NI_AT_AO=m
-CONFIG_COMEDI_NI_ATMIO=m
-CONFIG_COMEDI_NI_ATMIO16D=m
-CONFIG_COMEDI_NI_LABPC_ISA=m
-CONFIG_COMEDI_PCMAD=m
-CONFIG_COMEDI_PCMDA12=m
-CONFIG_COMEDI_PCMMIO=m
-CONFIG_COMEDI_PCMUIO=m
-CONFIG_COMEDI_MULTIQ3=m
-CONFIG_COMEDI_S526=m
-CONFIG_COMEDI_PCI_DRIVERS=m
-CONFIG_COMEDI_8255_PCI=m
-CONFIG_COMEDI_ADDI_WATCHDOG=m
-CONFIG_COMEDI_ADDI_APCI_1032=m
-CONFIG_COMEDI_ADDI_APCI_1500=m
-CONFIG_COMEDI_ADDI_APCI_1516=m
-CONFIG_COMEDI_ADDI_APCI_1564=m
-CONFIG_COMEDI_ADDI_APCI_16XX=m
-CONFIG_COMEDI_ADDI_APCI_2032=m
-CONFIG_COMEDI_ADDI_APCI_2200=m
-CONFIG_COMEDI_ADDI_APCI_3120=m
-CONFIG_COMEDI_ADDI_APCI_3501=m
-CONFIG_COMEDI_ADDI_APCI_3XXX=m
-CONFIG_COMEDI_ADL_PCI6208=m
-CONFIG_COMEDI_ADL_PCI7X3X=m
-CONFIG_COMEDI_ADL_PCI8164=m
-CONFIG_COMEDI_ADL_PCI9111=m
-CONFIG_COMEDI_ADL_PCI9118=m
-CONFIG_COMEDI_ADV_PCI1710=m
-CONFIG_COMEDI_ADV_PCI1720=m
-CONFIG_COMEDI_ADV_PCI1723=m
-CONFIG_COMEDI_ADV_PCI1724=m
-CONFIG_COMEDI_ADV_PCI1760=m
-CONFIG_COMEDI_ADV_PCI_DIO=m
-CONFIG_COMEDI_AMPLC_DIO200_PCI=m
-CONFIG_COMEDI_AMPLC_PC236_PCI=m
-CONFIG_COMEDI_AMPLC_PC263_PCI=m
-CONFIG_COMEDI_AMPLC_PCI224=m
-CONFIG_COMEDI_AMPLC_PCI230=m
-CONFIG_COMEDI_CONTEC_PCI_DIO=m
-CONFIG_COMEDI_DAS08_PCI=m
-CONFIG_COMEDI_DT3000=m
-CONFIG_COMEDI_DYNA_PCI10XX=m
-CONFIG_COMEDI_GSC_HPDI=m
-CONFIG_COMEDI_MF6X4=m
-CONFIG_COMEDI_ICP_MULTI=m
-CONFIG_COMEDI_DAQBOARD2000=m
-CONFIG_COMEDI_JR3_PCI=m
-CONFIG_COMEDI_KE_COUNTER=m
-CONFIG_COMEDI_CB_PCIDAS64=m
-CONFIG_COMEDI_CB_PCIDAS=m
-CONFIG_COMEDI_CB_PCIDDA=m
-CONFIG_COMEDI_CB_PCIMDAS=m
-CONFIG_COMEDI_CB_PCIMDDA=m
-CONFIG_COMEDI_ME4000=m
-CONFIG_COMEDI_ME_DAQ=m
-CONFIG_COMEDI_NI_6527=m
-CONFIG_COMEDI_NI_65XX=m
-CONFIG_COMEDI_NI_660X=m
-CONFIG_COMEDI_NI_670X=m
-CONFIG_COMEDI_NI_LABPC_PCI=m
-CONFIG_COMEDI_NI_PCIDIO=m
-CONFIG_COMEDI_NI_PCIMIO=m
-CONFIG_COMEDI_RTD520=m
-CONFIG_COMEDI_S626=m
-CONFIG_COMEDI_MITE=m
-CONFIG_COMEDI_NI_TIOCMD=m
-CONFIG_COMEDI_PCMCIA_DRIVERS=m
-CONFIG_COMEDI_CB_DAS16_CS=m
-CONFIG_COMEDI_DAS08_CS=m
-CONFIG_COMEDI_NI_DAQ_700_CS=m
-CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
-CONFIG_COMEDI_NI_LABPC_CS=m
-CONFIG_COMEDI_NI_MIO_CS=m
-CONFIG_COMEDI_QUATECH_DAQP_CS=m
-CONFIG_COMEDI_USB_DRIVERS=m
-CONFIG_COMEDI_DT9812=m
-CONFIG_COMEDI_NI_USB6501=m
-CONFIG_COMEDI_USBDUX=m
-CONFIG_COMEDI_USBDUXFAST=m
-CONFIG_COMEDI_USBDUXSIGMA=m
-CONFIG_COMEDI_VMK80XX=m
-CONFIG_COMEDI_8254=m
-CONFIG_COMEDI_8255=m
-CONFIG_COMEDI_8255_SA=m
-CONFIG_COMEDI_KCOMEDILIB=m
-CONFIG_COMEDI_AMPLC_DIO200=m
-CONFIG_COMEDI_AMPLC_PC236=m
-CONFIG_COMEDI_DAS08=m
-CONFIG_COMEDI_ISADMA=m
-CONFIG_COMEDI_NI_LABPC=m
-CONFIG_COMEDI_NI_LABPC_ISADMA=m
-CONFIG_COMEDI_NI_TIO=m
-CONFIG_RTL8192U=m
-CONFIG_RTLLIB=m
-CONFIG_RTLLIB_CRYPTO_CCMP=m
-CONFIG_RTLLIB_CRYPTO_TKIP=m
-CONFIG_RTLLIB_CRYPTO_WEP=m
-CONFIG_RTL8192E=m
-CONFIG_RTL8723BS=m
-CONFIG_R8712U=m
-CONFIG_R8188EU=m
-CONFIG_88EU_AP_MODE=y
-CONFIG_R8822BE=m
-CONFIG_RTLHALMAC_ST=m
-CONFIG_RTLPHYDM_ST=m
-CONFIG_RTLWIFI_DEBUG_ST=y
-CONFIG_RTS5208=m
-CONFIG_VT6655=m
-CONFIG_VT6656=m
-
-#
-# IIO staging drivers
-#
-
-#
-# Accelerometers
-#
-CONFIG_ADIS16201=m
-CONFIG_ADIS16203=m
-CONFIG_ADIS16209=m
-CONFIG_ADIS16240=m
-
-#
-# Analog to digital converters
-#
-CONFIG_AD7606=m
-CONFIG_AD7606_IFACE_PARALLEL=m
-CONFIG_AD7606_IFACE_SPI=m
-CONFIG_AD7780=m
-CONFIG_AD7816=m
-CONFIG_AD7192=m
-CONFIG_AD7280=m
-
-#
-# Analog digital bi-direction converters
-#
-CONFIG_ADT7316=m
-CONFIG_ADT7316_SPI=m
-CONFIG_ADT7316_I2C=m
-
-#
-# Capacitance to digital converters
-#
-CONFIG_AD7150=m
-CONFIG_AD7152=m
-CONFIG_AD7746=m
-
-#
-# Direct Digital Synthesis
-#
-CONFIG_AD9832=m
-CONFIG_AD9834=m
-
-#
-# Digital gyroscope sensors
-#
-CONFIG_ADIS16060=m
-
-#
-# Network Analyzer, Impedance Converters
-#
-CONFIG_AD5933=m
-
-#
-# Light sensors
-#
-CONFIG_TSL2x7x=m
-
-#
-# Active energy metering IC
-#
-CONFIG_ADE7753=m
-CONFIG_ADE7754=m
-CONFIG_ADE7758=m
-CONFIG_ADE7759=m
-CONFIG_ADE7854=m
-CONFIG_ADE7854_I2C=m
-CONFIG_ADE7854_SPI=m
-
-#
-# Resolver to digital converters
-#
-CONFIG_AD2S90=m
-CONFIG_AD2S1200=m
-CONFIG_AD2S1210=m
-
-#
-# Triggers - standalone
-#
-CONFIG_FB_SM750=m
-CONFIG_FB_XGI=m
-
-#
-# Speakup console speech
-#
-CONFIG_SPEAKUP=m
-CONFIG_SPEAKUP_SYNTH_ACNTSA=m
-CONFIG_SPEAKUP_SYNTH_APOLLO=m
-CONFIG_SPEAKUP_SYNTH_AUDPTR=m
-CONFIG_SPEAKUP_SYNTH_BNS=m
-CONFIG_SPEAKUP_SYNTH_DECTLK=m
-CONFIG_SPEAKUP_SYNTH_DECEXT=m
-CONFIG_SPEAKUP_SYNTH_LTLK=m
-CONFIG_SPEAKUP_SYNTH_SOFT=m
-CONFIG_SPEAKUP_SYNTH_SPKOUT=m
-CONFIG_SPEAKUP_SYNTH_TXPRT=m
-# CONFIG_SPEAKUP_SYNTH_DUMMY is not set
-CONFIG_STAGING_MEDIA=y
-CONFIG_INTEL_ATOMISP=y
-CONFIG_VIDEO_ATOMISP=m
-CONFIG_VIDEO_OV5693=m
-CONFIG_VIDEO_IMX=m
-CONFIG_VIDEO_OV2722=m
-CONFIG_VIDEO_GC2235=m
-CONFIG_VIDEO_OV8858=m
-CONFIG_VIDEO_MSRLIST_HELPER=m
-CONFIG_VIDEO_MT9M114=m
-CONFIG_VIDEO_AP1302=m
-CONFIG_VIDEO_GC0310=m
-CONFIG_VIDEO_OV2680=m
-CONFIG_VIDEO_LM3554=m
-CONFIG_I2C_BCM2048=m
-CONFIG_DVB_CXD2099=m
-CONFIG_LIRC_STAGING=y
-CONFIG_LIRC_ZILOG=m
-
-#
-# Android
-#
-CONFIG_LTE_GDM724X=m
-CONFIG_FIREWIRE_SERIAL=m
-CONFIG_FWTTY_MAX_TOTAL_PORTS=64
-CONFIG_FWTTY_MAX_CARD_PORTS=32
-CONFIG_MTD_SPINAND_MT29F=m
-CONFIG_MTD_SPINAND_ONDIEECC=y
-CONFIG_LNET=m
-CONFIG_LNET_MAX_PAYLOAD=1048576
-# CONFIG_LNET_SELFTEST is not set
-CONFIG_LNET_XPRT_IB=m
-CONFIG_LUSTRE_FS=m
-# CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK is not set
-CONFIG_DGNC=m
-CONFIG_GS_FPGABOOT=m
-CONFIG_CRYPTO_SKEIN=m
-CONFIG_UNISYSSPAR=y
-# CONFIG_UNISYS_VISORBUS is not set
-CONFIG_FB_TFT=m
-CONFIG_FB_TFT_AGM1264K_FL=m
-CONFIG_FB_TFT_BD663474=m
-CONFIG_FB_TFT_HX8340BN=m
-CONFIG_FB_TFT_HX8347D=m
-CONFIG_FB_TFT_HX8353D=m
-# CONFIG_FB_TFT_HX8357D is not set
-# CONFIG_FB_TFT_ILI9163 is not set
-CONFIG_FB_TFT_ILI9320=m
-CONFIG_FB_TFT_ILI9325=m
-CONFIG_FB_TFT_ILI9340=m
-CONFIG_FB_TFT_ILI9341=m
-CONFIG_FB_TFT_ILI9481=m
-CONFIG_FB_TFT_ILI9486=m
-CONFIG_FB_TFT_PCD8544=m
-CONFIG_FB_TFT_RA8875=m
-CONFIG_FB_TFT_S6D02A1=m
-CONFIG_FB_TFT_S6D1121=m
-CONFIG_FB_TFT_SH1106=m
-CONFIG_FB_TFT_SSD1289=m
-CONFIG_FB_TFT_SSD1305=m
-CONFIG_FB_TFT_SSD1306=m
-CONFIG_FB_TFT_SSD1325=m
-CONFIG_FB_TFT_SSD1331=m
-CONFIG_FB_TFT_SSD1351=m
-CONFIG_FB_TFT_ST7735R=m
-CONFIG_FB_TFT_ST7789V=m
-CONFIG_FB_TFT_TINYLCD=m
-CONFIG_FB_TFT_TLS8204=m
-CONFIG_FB_TFT_UC1611=m
-CONFIG_FB_TFT_UC1701=m
-CONFIG_FB_TFT_UPD161704=m
-CONFIG_FB_TFT_WATTEROTT=m
-CONFIG_FB_FLEX=m
-CONFIG_FB_TFT_FBTFT_DEVICE=m
-CONFIG_WILC1000=m
-CONFIG_WILC1000_SDIO=m
-CONFIG_WILC1000_SPI=m
-# CONFIG_WILC1000_HW_OOB_INTR is not set
-CONFIG_MOST=m
-CONFIG_MOSTCORE=m
-CONFIG_AIM_CDEV=m
-CONFIG_AIM_NETWORK=m
-CONFIG_AIM_SOUND=m
-CONFIG_AIM_V4L2=m
-CONFIG_HDM_DIM2=m
-CONFIG_HDM_I2C=m
-CONFIG_HDM_USB=m
-CONFIG_KS7010=m
-CONFIG_GREYBUS=m
-CONFIG_GREYBUS_ES2=m
-CONFIG_GREYBUS_AUDIO=m
-CONFIG_GREYBUS_BOOTROM=m
-CONFIG_GREYBUS_FIRMWARE=m
-CONFIG_GREYBUS_HID=m
-CONFIG_GREYBUS_LIGHT=m
-CONFIG_GREYBUS_LOG=m
-CONFIG_GREYBUS_LOOPBACK=m
-CONFIG_GREYBUS_POWER=m
-CONFIG_GREYBUS_RAW=m
-CONFIG_GREYBUS_VIBRATOR=m
-CONFIG_GREYBUS_BRIDGED_PHY=m
-CONFIG_GREYBUS_GPIO=m
-CONFIG_GREYBUS_I2C=m
-CONFIG_GREYBUS_PWM=m
-CONFIG_GREYBUS_SDIO=m
-CONFIG_GREYBUS_SPI=m
-CONFIG_GREYBUS_UART=m
-CONFIG_GREYBUS_USB=m
-
-#
-# USB Power Delivery and Type-C drivers
-#
-CONFIG_TYPEC_TCPM=m
-CONFIG_TYPEC_TCPCI=m
-CONFIG_TYPEC_FUSB302=m
-CONFIG_DRM_VBOXVIDEO=m
-CONFIG_PI433=m
-CONFIG_X86_PLATFORM_DEVICES=y
-CONFIG_ACER_WMI=m
-CONFIG_ACERHDF=m
-CONFIG_ALIENWARE_WMI=m
-CONFIG_ASUS_LAPTOP=m
-CONFIG_DELL_SMBIOS=m
-CONFIG_DELL_LAPTOP=m
-CONFIG_DELL_WMI=m
-CONFIG_DELL_WMI_AIO=m
-CONFIG_DELL_WMI_LED=m
-CONFIG_DELL_SMO8800=m
-CONFIG_DELL_RBTN=m
-CONFIG_FUJITSU_LAPTOP=m
-CONFIG_FUJITSU_TABLET=m
-CONFIG_AMILO_RFKILL=m
-CONFIG_HP_ACCEL=m
-CONFIG_HP_WIRELESS=m
-CONFIG_HP_WMI=m
-CONFIG_MSI_LAPTOP=m
-CONFIG_PANASONIC_LAPTOP=m
-CONFIG_COMPAL_LAPTOP=m
-CONFIG_SONY_LAPTOP=m
-CONFIG_SONYPI_COMPAT=y
-CONFIG_IDEAPAD_LAPTOP=m
-CONFIG_SURFACE3_WMI=m
-CONFIG_THINKPAD_ACPI=m
-CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
-# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set
-# CONFIG_THINKPAD_ACPI_DEBUG is not set
-# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
-CONFIG_THINKPAD_ACPI_VIDEO=y
-CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
-CONFIG_SENSORS_HDAPS=m
-CONFIG_INTEL_MENLOW=m
-CONFIG_EEEPC_LAPTOP=m
-CONFIG_ASUS_WMI=m
-CONFIG_ASUS_NB_WMI=m
-CONFIG_EEEPC_WMI=m
-CONFIG_ASUS_WIRELESS=m
-CONFIG_ACPI_WMI=m
-CONFIG_WMI_BMOF=m
-CONFIG_MSI_WMI=m
-CONFIG_PEAQ_WMI=m
-CONFIG_TOPSTAR_LAPTOP=m
-CONFIG_ACPI_TOSHIBA=m
-CONFIG_TOSHIBA_BT_RFKILL=m
-CONFIG_TOSHIBA_HAPS=m
-CONFIG_TOSHIBA_WMI=m
-CONFIG_ACPI_CMPC=m
-CONFIG_INTEL_CHT_INT33FE=m
-CONFIG_INTEL_INT0002_VGPIO=m
-CONFIG_INTEL_HID_EVENT=m
-CONFIG_INTEL_VBTN=m
-CONFIG_INTEL_IPS=m
-CONFIG_INTEL_PMC_CORE=y
-CONFIG_IBM_RTL=m
-CONFIG_SAMSUNG_LAPTOP=m
-CONFIG_MXM_WMI=m
-CONFIG_INTEL_OAKTRAIL=m
-CONFIG_SAMSUNG_Q10=m
-CONFIG_APPLE_GMUX=m
-CONFIG_INTEL_RST=m
-CONFIG_INTEL_SMARTCONNECT=m
-CONFIG_PVPANIC=m
-CONFIG_INTEL_PMC_IPC=m
-CONFIG_INTEL_BXTWC_PMIC_TMU=m
-CONFIG_SURFACE_PRO3_BUTTON=m
-CONFIG_SURFACE_3_BUTTON=m
-CONFIG_INTEL_PUNIT_IPC=m
-CONFIG_INTEL_TELEMETRY=m
-CONFIG_MLX_PLATFORM=m
-CONFIG_MLX_CPLD_PLATFORM=m
-# CONFIG_INTEL_TURBO_MAX_3 is not set
-CONFIG_PMC_ATOM=y
-CONFIG_CHROME_PLATFORMS=y
-CONFIG_CHROMEOS_LAPTOP=m
-CONFIG_CHROMEOS_PSTORE=m
-CONFIG_CROS_EC_CHARDEV=m
-CONFIG_CROS_EC_LPC=m
-CONFIG_CROS_EC_LPC_MEC=y
-CONFIG_CROS_EC_PROTO=y
-CONFIG_CROS_KBD_LED_BACKLIGHT=m
-CONFIG_CLKDEV_LOOKUP=y
-CONFIG_HAVE_CLK_PREPARE=y
-CONFIG_COMMON_CLK=y
-
-#
-# Common Clock Framework
-#
-CONFIG_COMMON_CLK_WM831X=m
-CONFIG_COMMON_CLK_SI5351=m
-CONFIG_COMMON_CLK_CDCE706=m
-CONFIG_COMMON_CLK_CS2000_CP=m
-# CONFIG_COMMON_CLK_NXP is not set
-CONFIG_COMMON_CLK_PWM=m
-# CONFIG_COMMON_CLK_PXA is not set
-# CONFIG_COMMON_CLK_PIC32 is not set
-CONFIG_HWSPINLOCK=m
-
-#
-# Clock Source drivers
-#
-CONFIG_CLKEVT_I8253=y
-CONFIG_I8253_LOCK=y
-CONFIG_CLKBLD_I8253=y
-# CONFIG_ATMEL_PIT is not set
-# CONFIG_SH_TIMER_CMT is not set
-# CONFIG_SH_TIMER_MTU2 is not set
-# CONFIG_SH_TIMER_TMU is not set
-# CONFIG_EM_TIMER_STI is not set
-CONFIG_MAILBOX=y
-CONFIG_PCC=y
-CONFIG_ALTERA_MBOX=m
-CONFIG_IOMMU_API=y
-CONFIG_IOMMU_SUPPORT=y
-
-#
-# Generic IOMMU Pagetable Support
-#
-CONFIG_IOMMU_IOVA=y
-CONFIG_AMD_IOMMU=y
-CONFIG_AMD_IOMMU_V2=m
-CONFIG_DMAR_TABLE=y
-CONFIG_INTEL_IOMMU=y
-CONFIG_INTEL_IOMMU_SVM=y
-# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
-CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-CONFIG_IRQ_REMAP=y
-
-#
-# Remoteproc drivers
-#
-CONFIG_REMOTEPROC=m
-
-#
-# Rpmsg drivers
-#
-CONFIG_RPMSG=m
-CONFIG_RPMSG_CHAR=m
-CONFIG_RPMSG_QCOM_GLINK_NATIVE=m
-CONFIG_RPMSG_QCOM_GLINK_RPM=m
-
-#
-# SOC (System On Chip) specific Drivers
-#
-
-#
-# Amlogic SoC drivers
-#
-
-#
-# Broadcom SoC drivers
-#
-
-#
-# i.MX SoC drivers
-#
-
-#
-# Qualcomm SoC drivers
-#
-# CONFIG_SUNXI_SRAM is not set
-CONFIG_SOC_TI=y
-CONFIG_PM_DEVFREQ=y
-
-#
-# DEVFREQ Governors
-#
-CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
-CONFIG_DEVFREQ_GOV_PERFORMANCE=y
-CONFIG_DEVFREQ_GOV_POWERSAVE=y
-CONFIG_DEVFREQ_GOV_USERSPACE=y
-CONFIG_DEVFREQ_GOV_PASSIVE=m
-
-#
-# DEVFREQ Drivers
-#
-CONFIG_PM_DEVFREQ_EVENT=y
-CONFIG_EXTCON=y
-
-#
-# Extcon Device Drivers
-#
-CONFIG_EXTCON_ADC_JACK=m
-CONFIG_EXTCON_ARIZONA=m
-CONFIG_EXTCON_AXP288=m
-CONFIG_EXTCON_GPIO=m
-# CONFIG_EXTCON_INTEL_INT3496 is not set
-CONFIG_EXTCON_MAX14577=m
-CONFIG_EXTCON_MAX3355=m
-CONFIG_EXTCON_MAX77693=m
-CONFIG_EXTCON_RT8973A=m
-CONFIG_EXTCON_SM5502=m
-# CONFIG_EXTCON_USB_GPIO is not set
-CONFIG_EXTCON_USBC_CROS_EC=m
-CONFIG_MEMORY=y
-CONFIG_IIO=m
-CONFIG_IIO_BUFFER=y
-CONFIG_IIO_BUFFER_CB=m
-CONFIG_IIO_KFIFO_BUF=m
-CONFIG_IIO_TRIGGERED_BUFFER=m
-CONFIG_IIO_CONFIGFS=m
-CONFIG_IIO_TRIGGER=y
-CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
-CONFIG_IIO_SW_DEVICE=m
-CONFIG_IIO_SW_TRIGGER=m
-CONFIG_IIO_TRIGGERED_EVENT=m
-
-#
-# Accelerometers
-#
-CONFIG_BMA180=m
-CONFIG_BMA220=m
-CONFIG_BMC150_ACCEL=m
-CONFIG_BMC150_ACCEL_I2C=m
-CONFIG_BMC150_ACCEL_SPI=m
-CONFIG_DA280=m
-CONFIG_DA311=m
-CONFIG_DMARD09=m
-CONFIG_DMARD10=m
-CONFIG_HID_SENSOR_ACCEL_3D=m
-CONFIG_IIO_ST_ACCEL_3AXIS=m
-CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
-CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
-CONFIG_KXSD9=m
-CONFIG_KXSD9_SPI=m
-CONFIG_KXSD9_I2C=m
-CONFIG_KXCJK1013=m
-CONFIG_MC3230=m
-CONFIG_MMA7455=m
-CONFIG_MMA7455_I2C=m
-CONFIG_MMA7455_SPI=m
-CONFIG_MMA7660=m
-CONFIG_MMA8452=m
-CONFIG_MMA9551_CORE=m
-CONFIG_MMA9551=m
-CONFIG_MMA9553=m
-CONFIG_MXC4005=m
-CONFIG_MXC6255=m
-CONFIG_SCA3000=m
-CONFIG_STK8312=m
-CONFIG_STK8BA50=m
-
-#
-# Analog to digital converters
-#
-CONFIG_AD_SIGMA_DELTA=m
-CONFIG_AD7266=m
-CONFIG_AD7291=m
-CONFIG_AD7298=m
-CONFIG_AD7476=m
-CONFIG_AD7766=m
-CONFIG_AD7791=m
-CONFIG_AD7793=m
-CONFIG_AD7887=m
-CONFIG_AD7923=m
-CONFIG_AD799X=m
-CONFIG_AXP20X_ADC=m
-CONFIG_AXP288_ADC=m
-CONFIG_CC10001_ADC=m
-CONFIG_DA9150_GPADC=m
-CONFIG_DLN2_ADC=m
-CONFIG_HI8435=m
-CONFIG_HX711=m
-CONFIG_INA2XX_ADC=m
-CONFIG_LTC2471=m
-CONFIG_LTC2485=m
-CONFIG_LTC2497=m
-CONFIG_MAX1027=m
-CONFIG_MAX11100=m
-CONFIG_MAX1118=m
-CONFIG_MAX1363=m
-CONFIG_MAX9611=m
-CONFIG_MCP320X=m
-CONFIG_MCP3422=m
-CONFIG_MEN_Z188_ADC=m
-CONFIG_NAU7802=m
-CONFIG_QCOM_VADC_COMMON=m
-CONFIG_QCOM_SPMI_IADC=m
-CONFIG_QCOM_SPMI_VADC=m
-CONFIG_TI_ADC081C=m
-CONFIG_TI_ADC0832=m
-CONFIG_TI_ADC084S021=m
-CONFIG_TI_ADC12138=m
-CONFIG_TI_ADC108S102=m
-CONFIG_TI_ADC128S052=m
-CONFIG_TI_ADC161S626=m
-CONFIG_TI_ADS1015=m
-CONFIG_TI_ADS7950=m
-CONFIG_TI_AM335X_ADC=m
-CONFIG_TI_TLC4541=m
-CONFIG_VIPERBOARD_ADC=m
-
-#
-# Amplifiers
-#
-CONFIG_AD8366=m
-
-#
-# Chemical Sensors
-#
-CONFIG_ATLAS_PH_SENSOR=m
-CONFIG_CCS811=m
-CONFIG_IAQCORE=m
-CONFIG_VZ89X=m
-CONFIG_IIO_CROS_EC_SENSORS_CORE=m
-CONFIG_IIO_CROS_EC_SENSORS=m
-
-#
-# Hid Sensor IIO Common
-#
-CONFIG_HID_SENSOR_IIO_COMMON=m
-CONFIG_HID_SENSOR_IIO_TRIGGER=m
-CONFIG_IIO_MS_SENSORS_I2C=m
-
-#
-# SSP Sensor Common
-#
-CONFIG_IIO_SSP_SENSORS_COMMONS=m
-CONFIG_IIO_SSP_SENSORHUB=m
-CONFIG_IIO_ST_SENSORS_I2C=m
-CONFIG_IIO_ST_SENSORS_SPI=m
-CONFIG_IIO_ST_SENSORS_CORE=m
-
-#
-# Counters
-#
-
-#
-# Digital to analog converters
-#
-CONFIG_AD5064=m
-CONFIG_AD5360=m
-CONFIG_AD5380=m
-CONFIG_AD5421=m
-CONFIG_AD5446=m
-CONFIG_AD5449=m
-CONFIG_AD5592R_BASE=m
-CONFIG_AD5592R=m
-CONFIG_AD5593R=m
-CONFIG_AD5504=m
-CONFIG_AD5624R_SPI=m
-CONFIG_LTC2632=m
-CONFIG_AD5686=m
-CONFIG_AD5755=m
-CONFIG_AD5761=m
-CONFIG_AD5764=m
-CONFIG_AD5791=m
-CONFIG_AD7303=m
-CONFIG_AD8801=m
-CONFIG_M62332=m
-CONFIG_MAX517=m
-CONFIG_MCP4725=m
-CONFIG_MCP4922=m
-
-#
-# IIO dummy driver
-#
-# CONFIG_IIO_SIMPLE_DUMMY is not set
-
-#
-# Frequency Synthesizers DDS/PLL
-#
-
-#
-# Clock Generator/Distribution
-#
-CONFIG_AD9523=m
-
-#
-# Phase-Locked Loop (PLL) frequency synthesizers
-#
-CONFIG_ADF4350=m
-
-#
-# Digital gyroscope sensors
-#
-CONFIG_ADIS16080=m
-CONFIG_ADIS16130=m
-CONFIG_ADIS16136=m
-CONFIG_ADIS16260=m
-CONFIG_ADXRS450=m
-CONFIG_BMG160=m
-CONFIG_BMG160_I2C=m
-CONFIG_BMG160_SPI=m
-CONFIG_HID_SENSOR_GYRO_3D=m
-CONFIG_MPU3050=m
-CONFIG_MPU3050_I2C=m
-CONFIG_IIO_ST_GYRO_3AXIS=m
-CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
-CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
-CONFIG_ITG3200=m
-
-#
-# Health Sensors
-#
-
-#
-# Heart Rate Monitors
-#
-CONFIG_AFE4403=m
-CONFIG_AFE4404=m
-CONFIG_MAX30100=m
-CONFIG_MAX30102=m
-
-#
-# Humidity sensors
-#
-CONFIG_AM2315=m
-CONFIG_DHT11=m
-CONFIG_HDC100X=m
-CONFIG_HID_SENSOR_HUMIDITY=m
-CONFIG_HTS221=m
-CONFIG_HTS221_I2C=m
-CONFIG_HTS221_SPI=m
-CONFIG_HTU21=m
-CONFIG_SI7005=m
-CONFIG_SI7020=m
-
-#
-# Inertial measurement units
-#
-CONFIG_ADIS16400=m
-CONFIG_ADIS16480=m
-CONFIG_BMI160=m
-CONFIG_BMI160_I2C=m
-CONFIG_BMI160_SPI=m
-CONFIG_KMX61=m
-CONFIG_INV_MPU6050_IIO=m
-CONFIG_INV_MPU6050_I2C=m
-CONFIG_INV_MPU6050_SPI=m
-CONFIG_IIO_ST_LSM6DSX=m
-CONFIG_IIO_ST_LSM6DSX_I2C=m
-CONFIG_IIO_ST_LSM6DSX_SPI=m
-CONFIG_IIO_ADIS_LIB=m
-CONFIG_IIO_ADIS_LIB_BUFFER=y
-
-#
-# Light sensors
-#
-# CONFIG_ACPI_ALS is not set
-CONFIG_ADJD_S311=m
-CONFIG_AL3320A=m
-CONFIG_APDS9300=m
-CONFIG_APDS9960=m
-CONFIG_BH1750=m
-CONFIG_BH1780=m
-CONFIG_CM32181=m
-CONFIG_CM3232=m
-CONFIG_CM3323=m
-CONFIG_CM36651=m
-CONFIG_IIO_CROS_EC_LIGHT_PROX=m
-CONFIG_GP2AP020A00F=m
-CONFIG_SENSORS_ISL29018=m
-CONFIG_SENSORS_ISL29028=m
-CONFIG_ISL29125=m
-CONFIG_HID_SENSOR_ALS=m
-CONFIG_HID_SENSOR_PROX=m
-CONFIG_JSA1212=m
-CONFIG_RPR0521=m
-CONFIG_SENSORS_LM3533=m
-CONFIG_LTR501=m
-CONFIG_MAX44000=m
-CONFIG_OPT3001=m
-CONFIG_PA12203001=m
-CONFIG_SI1145=m
-CONFIG_STK3310=m
-CONFIG_TCS3414=m
-CONFIG_TCS3472=m
-CONFIG_SENSORS_TSL2563=m
-CONFIG_TSL2583=m
-CONFIG_TSL4531=m
-CONFIG_US5182D=m
-CONFIG_VCNL4000=m
-CONFIG_VEML6070=m
-CONFIG_VL6180=m
-
-#
-# Magnetometer sensors
-#
-CONFIG_AK8975=m
-CONFIG_AK09911=m
-CONFIG_BMC150_MAGN=m
-CONFIG_BMC150_MAGN_I2C=m
-CONFIG_BMC150_MAGN_SPI=m
-CONFIG_MAG3110=m
-CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
-CONFIG_MMC35240=m
-CONFIG_IIO_ST_MAGN_3AXIS=m
-CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
-CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
-CONFIG_SENSORS_HMC5843=m
-CONFIG_SENSORS_HMC5843_I2C=m
-CONFIG_SENSORS_HMC5843_SPI=m
-
-#
-# Multiplexers
-#
-
-#
-# Inclinometer sensors
-#
-CONFIG_HID_SENSOR_INCLINOMETER_3D=m
-CONFIG_HID_SENSOR_DEVICE_ROTATION=m
-
-#
-# Triggers - standalone
-#
-CONFIG_IIO_HRTIMER_TRIGGER=m
-CONFIG_IIO_INTERRUPT_TRIGGER=m
-CONFIG_IIO_TIGHTLOOP_TRIGGER=m
-CONFIG_IIO_SYSFS_TRIGGER=m
-
-#
-# Digital potentiometers
-#
-CONFIG_DS1803=m
-CONFIG_MAX5481=m
-CONFIG_MAX5487=m
-CONFIG_MCP4131=m
-CONFIG_MCP4531=m
-CONFIG_TPL0102=m
-
-#
-# Digital potentiostats
-#
-CONFIG_LMP91000=m
-
-#
-# Pressure sensors
-#
-CONFIG_ABP060MG=m
-CONFIG_BMP280=m
-CONFIG_BMP280_I2C=m
-CONFIG_BMP280_SPI=m
-CONFIG_IIO_CROS_EC_BARO=m
-CONFIG_HID_SENSOR_PRESS=m
-CONFIG_HP03=m
-CONFIG_MPL115=m
-CONFIG_MPL115_I2C=m
-CONFIG_MPL115_SPI=m
-CONFIG_MPL3115=m
-CONFIG_MS5611=m
-CONFIG_MS5611_I2C=m
-CONFIG_MS5611_SPI=m
-CONFIG_MS5637=m
-CONFIG_IIO_ST_PRESS=m
-CONFIG_IIO_ST_PRESS_I2C=m
-CONFIG_IIO_ST_PRESS_SPI=m
-CONFIG_T5403=m
-CONFIG_HP206C=m
-CONFIG_ZPA2326=m
-CONFIG_ZPA2326_I2C=m
-CONFIG_ZPA2326_SPI=m
-
-#
-# Lightning sensors
-#
-CONFIG_AS3935=m
-
-#
-# Proximity and distance sensors
-#
-CONFIG_LIDAR_LITE_V2=m
-CONFIG_SRF04=m
-CONFIG_SX9500=m
-CONFIG_SRF08=m
-
-#
-# Temperature sensors
-#
-CONFIG_MAXIM_THERMOCOUPLE=m
-CONFIG_HID_SENSOR_TEMP=m
-CONFIG_MLX90614=m
-CONFIG_TMP006=m
-CONFIG_TMP007=m
-CONFIG_TSYS01=m
-CONFIG_TSYS02D=m
-CONFIG_NTB=m
-CONFIG_NTB_AMD=m
-CONFIG_NTB_IDT=m
-CONFIG_NTB_INTEL=m
-CONFIG_NTB_PINGPONG=m
-CONFIG_NTB_TOOL=m
-CONFIG_NTB_PERF=m
-CONFIG_NTB_TRANSPORT=m
-CONFIG_VME_BUS=y
-
-#
-# VME Bridge Drivers
-#
-CONFIG_VME_CA91CX42=m
-CONFIG_VME_TSI148=m
-CONFIG_VME_FAKE=m
-
-#
-# VME Board Drivers
-#
-CONFIG_VMIVME_7805=m
-
-#
-# VME Device Drivers
-#
-CONFIG_VME_USER=m
-CONFIG_VME_PIO2=m
-CONFIG_PWM=y
-CONFIG_PWM_SYSFS=y
-CONFIG_PWM_CROS_EC=m
-CONFIG_PWM_LP3943=m
-CONFIG_PWM_LPSS=m
-CONFIG_PWM_LPSS_PCI=m
-CONFIG_PWM_LPSS_PLATFORM=m
-CONFIG_PWM_PCA9685=m
-CONFIG_ARM_GIC_MAX_NR=1
-CONFIG_IPACK_BUS=m
-CONFIG_BOARD_TPCI200=m
-CONFIG_SERIAL_IPOCTAL=m
-CONFIG_RESET_CONTROLLER=y
-# CONFIG_RESET_ATH79 is not set
-# CONFIG_RESET_BERLIN is not set
-# CONFIG_RESET_IMX7 is not set
-# CONFIG_RESET_LANTIQ is not set
-# CONFIG_RESET_LPC18XX is not set
-# CONFIG_RESET_MESON is not set
-# CONFIG_RESET_PISTACHIO is not set
-# CONFIG_RESET_SOCFPGA is not set
-# CONFIG_RESET_STM32 is not set
-# CONFIG_RESET_SUNXI is not set
-CONFIG_RESET_TI_SYSCON=m
-# CONFIG_RESET_ZYNQ is not set
-# CONFIG_RESET_TEGRA_BPMP is not set
-CONFIG_FMC=m
-CONFIG_FMC_FAKEDEV=m
-CONFIG_FMC_TRIVIAL=m
-CONFIG_FMC_WRITE_EEPROM=m
-CONFIG_FMC_CHARDEV=m
-
-#
-# PHY Subsystem
-#
-CONFIG_GENERIC_PHY=y
-CONFIG_BCM_KONA_USB2_PHY=m
-CONFIG_PHY_PXA_28NM_HSIC=m
-CONFIG_PHY_PXA_28NM_USB2=m
-CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_QCOM_USB_HS=m
-CONFIG_PHY_QCOM_USB_HSIC=m
-CONFIG_PHY_SAMSUNG_USB2=m
-# CONFIG_PHY_EXYNOS4210_USB2 is not set
-# CONFIG_PHY_EXYNOS4X12_USB2 is not set
-# CONFIG_PHY_EXYNOS5250_USB2 is not set
-CONFIG_PHY_TUSB1210=m
-CONFIG_POWERCAP=y
-CONFIG_INTEL_RAPL=m
-CONFIG_MCB=m
-CONFIG_MCB_PCI=m
-CONFIG_MCB_LPC=m
-
-#
-# Performance monitor support
-#
-CONFIG_RAS=y
-CONFIG_RAS_CEC=y
-CONFIG_THUNDERBOLT=m
-
-#
-# Android
-#
-# CONFIG_ANDROID is not set
-CONFIG_LIBNVDIMM=m
-CONFIG_BLK_DEV_PMEM=m
-CONFIG_ND_BLK=m
-CONFIG_ND_CLAIM=y
-CONFIG_ND_BTT=m
-CONFIG_BTT=y
-CONFIG_DAX=y
-CONFIG_DEV_DAX=m
-CONFIG_NVMEM=y
-CONFIG_STM=m
-# CONFIG_STM_DUMMY is not set
-CONFIG_STM_SOURCE_CONSOLE=m
-CONFIG_STM_SOURCE_HEARTBEAT=m
-CONFIG_INTEL_TH=m
-CONFIG_INTEL_TH_PCI=m
-CONFIG_INTEL_TH_GTH=m
-CONFIG_INTEL_TH_STH=m
-CONFIG_INTEL_TH_MSU=m
-CONFIG_INTEL_TH_PTI=m
-# CONFIG_INTEL_TH_DEBUG is not set
-CONFIG_FPGA=m
-CONFIG_FPGA_MGR_ALTERA_CVP=m
-CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
-CONFIG_FPGA_MGR_XILINX_SPI=m
-CONFIG_ALTERA_PR_IP_CORE=m
-
-#
-# FSI support
-#
-CONFIG_FSI=m
-CONFIG_FSI_MASTER_GPIO=m
-CONFIG_FSI_MASTER_HUB=m
-CONFIG_FSI_SCOM=m
-
-#
-# Firmware Drivers
-#
-CONFIG_EDD=m
-# CONFIG_EDD_OFF is not set
-CONFIG_FIRMWARE_MEMMAP=y
-CONFIG_DELL_RBU=m
-CONFIG_DCDBAS=m
-CONFIG_DMIID=y
-CONFIG_DMI_SYSFS=m
-CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
-CONFIG_ISCSI_IBFT_FIND=y
-CONFIG_ISCSI_IBFT=m
-CONFIG_FW_CFG_SYSFS=m
-# CONFIG_FW_CFG_SYSFS_CMDLINE is not set
-CONFIG_GOOGLE_FIRMWARE=y
-CONFIG_GOOGLE_SMI=m
-CONFIG_GOOGLE_COREBOOT_TABLE=m
-CONFIG_GOOGLE_COREBOOT_TABLE_ACPI=m
-CONFIG_GOOGLE_MEMCONSOLE=m
-CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY=m
-CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=m
-CONFIG_GOOGLE_VPD=m
-
-#
-# EFI (Extensible Firmware Interface) Support
-#
-CONFIG_EFI_VARS=m
-CONFIG_EFI_ESRT=y
-CONFIG_EFI_VARS_PSTORE=m
-CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
-CONFIG_EFI_RUNTIME_MAP=y
-# CONFIG_EFI_FAKE_MEMMAP is not set
-CONFIG_EFI_RUNTIME_WRAPPERS=y
-CONFIG_EFI_BOOTLOADER_CONTROL=m
-CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_EFI_TEST=m
-CONFIG_APPLE_PROPERTIES=y
-CONFIG_RESET_ATTACK_MITIGATION=y
-CONFIG_UEFI_CPER=y
-CONFIG_EFI_DEV_PATH_PARSER=y
-
-#
-# Tegra firmware driver
-#
-
-#
-# File systems
-#
-CONFIG_DCACHE_WORD_ACCESS=y
-CONFIG_FS_IOMAP=y
-CONFIG_EXT2_FS=m
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=m
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=m
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
-CONFIG_EXT4_FS_ENCRYPTION=y
-# CONFIG_EXT4_DEBUG is not set
-CONFIG_JBD2=m
-# CONFIG_JBD2_DEBUG is not set
-CONFIG_FS_MBCACHE=m
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
-# CONFIG_XFS_WARN is not set
-# CONFIG_XFS_DEBUG is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-CONFIG_BTRFS_FS=m
-CONFIG_BTRFS_FS_POSIX_ACL=y
-# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
-# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
-# CONFIG_BTRFS_DEBUG is not set
-# CONFIG_BTRFS_ASSERT is not set
-CONFIG_NILFS2_FS=m
-CONFIG_F2FS_FS=m
-CONFIG_F2FS_STAT_FS=y
-CONFIG_F2FS_FS_XATTR=y
-CONFIG_F2FS_FS_POSIX_ACL=y
-CONFIG_F2FS_FS_SECURITY=y
-CONFIG_F2FS_CHECK_FS=y
-CONFIG_F2FS_FS_ENCRYPTION=y
-# CONFIG_F2FS_FAULT_INJECTION is not set
-CONFIG_FS_DAX=y
-CONFIG_FS_POSIX_ACL=y
-CONFIG_EXPORTFS=y
-CONFIG_EXPORTFS_BLOCK_OPS=y
-CONFIG_FILE_LOCKING=y
-CONFIG_MANDATORY_FILE_LOCKING=y
-CONFIG_FS_ENCRYPTION=m
-CONFIG_FSNOTIFY=y
-CONFIG_DNOTIFY=y
-CONFIG_INOTIFY_USER=y
-CONFIG_FANOTIFY=y
-# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
-CONFIG_QUOTA=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-# CONFIG_QUOTA_DEBUG is not set
-CONFIG_QUOTA_TREE=m
-CONFIG_QFMT_V1=m
-CONFIG_QFMT_V2=m
-CONFIG_QUOTACTL=y
-CONFIG_QUOTACTL_COMPAT=y
-CONFIG_AUTOFS4_FS=m
-CONFIG_FUSE_FS=m
-CONFIG_CUSE=m
-CONFIG_OVERLAY_FS=m
-# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
-CONFIG_OVERLAY_FS_INDEX=y
-
-#
-# Caches
-#
-CONFIG_FSCACHE=m
-CONFIG_FSCACHE_STATS=y
-# CONFIG_FSCACHE_HISTOGRAM is not set
-# CONFIG_FSCACHE_DEBUG is not set
-# CONFIG_FSCACHE_OBJECT_LIST is not set
-CONFIG_CACHEFILES=m
-# CONFIG_CACHEFILES_DEBUG is not set
-# CONFIG_CACHEFILES_HISTOGRAM is not set
-
-#
-# CD-ROM/DVD Filesystems
-#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="utf8"
-# CONFIG_FAT_DEFAULT_UTF8 is not set
-CONFIG_NTFS_FS=m
-# CONFIG_NTFS_DEBUG is not set
-CONFIG_NTFS_RW=y
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_PROC_PAGE_MONITOR=y
-# CONFIG_PROC_CHILDREN is not set
-CONFIG_KERNFS=y
-CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_TMPFS_XATTR=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
-CONFIG_CONFIGFS_FS=m
-CONFIG_EFIVAR_FS=m
-CONFIG_MISC_FILESYSTEMS=y
-CONFIG_ORANGEFS_FS=m
-CONFIG_ADFS_FS=m
-# CONFIG_ADFS_FS_RW is not set
-CONFIG_AFFS_FS=m
-CONFIG_ECRYPT_FS=m
-CONFIG_ECRYPT_FS_MESSAGING=y
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_HFSPLUS_FS_POSIX_ACL=y
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_FS_DEBUG=0
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_FS_POSIX_ACL=y
-CONFIG_JFFS2_FS_SECURITY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_RTIME=y
-CONFIG_JFFS2_RUBIN=y
-# CONFIG_JFFS2_CMODE_NONE is not set
-CONFIG_JFFS2_CMODE_PRIORITY=y
-# CONFIG_JFFS2_CMODE_SIZE is not set
-# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
-CONFIG_UBIFS_FS=m
-CONFIG_UBIFS_FS_ADVANCED_COMPR=y
-CONFIG_UBIFS_FS_LZO=y
-CONFIG_UBIFS_FS_ZLIB=y
-CONFIG_UBIFS_ATIME_SUPPORT=y
-CONFIG_UBIFS_FS_ENCRYPTION=y
-CONFIG_UBIFS_FS_SECURITY=y
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-# CONFIG_SQUASHFS_FILE_CACHE is not set
-CONFIG_SQUASHFS_FILE_DIRECT=y
-# CONFIG_SQUASHFS_DECOMP_SINGLE is not set
-# CONFIG_SQUASHFS_DECOMP_MULTI is not set
-CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
-CONFIG_SQUASHFS_XATTR=y
-CONFIG_SQUASHFS_ZLIB=y
-CONFIG_SQUASHFS_LZ4=y
-CONFIG_SQUASHFS_LZO=y
-CONFIG_SQUASHFS_XZ=y
-CONFIG_SQUASHFS_ZSTD=y
-CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y
-CONFIG_SQUASHFS_EMBEDDED=y
-CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
-CONFIG_VXFS_FS=m
-CONFIG_MINIX_FS=m
-CONFIG_OMFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_QNX6FS_FS=m
-# CONFIG_QNX6FS_DEBUG is not set
-CONFIG_ROMFS_FS=m
-# CONFIG_ROMFS_BACKED_BY_BLOCK is not set
-# CONFIG_ROMFS_BACKED_BY_MTD is not set
-CONFIG_ROMFS_BACKED_BY_BOTH=y
-CONFIG_ROMFS_ON_BLOCK=y
-CONFIG_ROMFS_ON_MTD=y
-CONFIG_PSTORE=y
-# CONFIG_PSTORE_ZLIB_COMPRESS is not set
-# CONFIG_PSTORE_LZO_COMPRESS is not set
-CONFIG_PSTORE_LZ4_COMPRESS=y
-# CONFIG_PSTORE_CONSOLE is not set
-CONFIG_PSTORE_PMSG=y
-CONFIG_PSTORE_RAM=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-# CONFIG_UFS_FS_WRITE is not set
-# CONFIG_UFS_DEBUG is not set
-CONFIG_EXOFS_FS=m
-# CONFIG_EXOFS_DEBUG is not set
-CONFIG_ORE=m
-CONFIG_NETWORK_FILESYSTEMS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V2=m
-CONFIG_NFS_V3=m
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=m
-CONFIG_NFS_SWAP=y
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_V4_2=y
-CONFIG_PNFS_FILE_LAYOUT=m
-CONFIG_PNFS_BLOCK=m
-CONFIG_PNFS_FLEXFILE_LAYOUT=m
-CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
-CONFIG_NFS_V4_1_MIGRATION=y
-CONFIG_NFS_V4_SECURITY_LABEL=y
-CONFIG_NFS_FSCACHE=y
-# CONFIG_NFS_USE_LEGACY_DNS is not set
-CONFIG_NFS_USE_KERNEL_DNS=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V2_ACL=y
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_PNFS=y
-CONFIG_NFSD_BLOCKLAYOUT=y
-CONFIG_NFSD_SCSILAYOUT=y
-CONFIG_NFSD_FLEXFILELAYOUT=y
-# CONFIG_NFSD_V4_SECURITY_LABEL is not set
-# CONFIG_NFSD_FAULT_INJECTION is not set
-CONFIG_GRACE_PERIOD=m
-CONFIG_LOCKD=m
-CONFIG_LOCKD_V4=y
-CONFIG_NFS_ACL_SUPPORT=m
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=m
-CONFIG_SUNRPC_GSS=m
-CONFIG_SUNRPC_BACKCHANNEL=y
-CONFIG_SUNRPC_SWAP=y
-CONFIG_RPCSEC_GSS_KRB5=m
-# CONFIG_SUNRPC_DEBUG is not set
-CONFIG_SUNRPC_XPRT_RDMA=m
-CONFIG_CEPH_FS=m
-CONFIG_CEPH_FSCACHE=y
-CONFIG_CEPH_FS_POSIX_ACL=y
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_STATS2=y
-CONFIG_CIFS_WEAK_PW_HASH=y
-CONFIG_CIFS_UPCALL=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_ACL=y
-# CONFIG_CIFS_DEBUG is not set
-CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_CIFS_SMB311=y
-CONFIG_CIFS_FSCACHE=y
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_SMALLDOS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
-CONFIG_CODA_FS=m
-CONFIG_AFS_FS=m
-# CONFIG_AFS_DEBUG is not set
-CONFIG_AFS_FSCACHE=y
-CONFIG_9P_FS=m
-CONFIG_9P_FSCACHE=y
-CONFIG_9P_FS_POSIX_ACL=y
-CONFIG_9P_FS_SECURITY=y
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="utf8"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_MAC_ROMAN=m
-CONFIG_NLS_MAC_CELTIC=m
-CONFIG_NLS_MAC_CENTEURO=m
-CONFIG_NLS_MAC_CROATIAN=m
-CONFIG_NLS_MAC_CYRILLIC=m
-CONFIG_NLS_MAC_GAELIC=m
-CONFIG_NLS_MAC_GREEK=m
-CONFIG_NLS_MAC_ICELAND=m
-CONFIG_NLS_MAC_INUIT=m
-CONFIG_NLS_MAC_ROMANIAN=m
-CONFIG_NLS_MAC_TURKISH=m
-CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Kernel hacking
-#
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-
-#
-# printk and dmesg options
-#
-# CONFIG_PRINTK_TIME is not set
-CONFIG_CONSOLE_LOGLEVEL_DEFAULT=1
-CONFIG_MESSAGE_LOGLEVEL_DEFAULT=1
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_DYNAMIC_DEBUG is not set
-
-#
-# Compile-time checks and compiler options
-#
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=0
-CONFIG_STRIP_ASM_SYMS=y
-# CONFIG_READABLE_ASM is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_PAGE_OWNER is not set
-CONFIG_DEBUG_FS=y
-# CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_SECTION_MISMATCH is not set
-CONFIG_SECTION_MISMATCH_WARN_ONLY=y
-CONFIG_STACK_VALIDATION=y
-# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
-CONFIG_MAGIC_SYSRQ_SERIAL=y
-CONFIG_DEBUG_KERNEL=y
-
-#
-# Memory Debugging
-#
-# CONFIG_PAGE_EXTENSION is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_PAGE_POISONING is not set
-# CONFIG_DEBUG_PAGE_REF is not set
-# CONFIG_DEBUG_RODATA_TEST is not set
-# CONFIG_DEBUG_OBJECTS is not set
-CONFIG_SLUB_DEBUG_ON=y
-# CONFIG_SLUB_STATS is not set
-CONFIG_HAVE_DEBUG_KMEMLEAK=y
-# CONFIG_DEBUG_KMEMLEAK is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_DEBUG_VM is not set
-CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
-# CONFIG_DEBUG_VIRTUAL is not set
-CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_PER_CPU_MAPS is not set
-CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-CONFIG_HAVE_ARCH_KASAN=y
-# CONFIG_KASAN is not set
-CONFIG_ARCH_HAS_KCOV=y
-# CONFIG_KCOV is not set
-# CONFIG_DEBUG_SHIRQ is not set
-
-#
-# Debug Lockups and Hangs
-#
-# CONFIG_SOFTLOCKUP_DETECTOR is not set
-CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
-# CONFIG_HARDLOCKUP_DETECTOR is not set
-# CONFIG_DETECT_HUNG_TASK is not set
-# CONFIG_WQ_WATCHDOG is not set
-# CONFIG_PANIC_ON_OOPS is not set
-CONFIG_PANIC_ON_OOPS_VALUE=0
-CONFIG_PANIC_TIMEOUT=0
-CONFIG_SCHED_DEBUG=y
-CONFIG_SCHED_INFO=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SCHED_STACK_END_CHECK=y
-# CONFIG_DEBUG_TIMEKEEPING is not set
-# CONFIG_DEBUG_PREEMPT is not set
-
-#
-# Lock Debugging (spinlocks, mutexes, etc...)
-#
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
-# CONFIG_DEBUG_LOCK_ALLOC is not set
-# CONFIG_PROVE_LOCKING is not set
-# CONFIG_LOCK_STAT is not set
-# CONFIG_DEBUG_ATOMIC_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_LOCK_TORTURE_TEST is not set
-# CONFIG_WW_MUTEX_SELFTEST is not set
-CONFIG_STACKTRACE=y
-# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
-# CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
-CONFIG_DEBUG_SG=y
-CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_DEBUG_CREDENTIALS=y
-
-#
-# RCU Debugging
-#
-# CONFIG_PROVE_RCU is not set
-CONFIG_TORTURE_TEST=m
-CONFIG_RCU_PERF_TEST=m
-# CONFIG_RCU_TORTURE_TEST is not set
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-# CONFIG_RCU_TRACE is not set
-# CONFIG_RCU_EQS_DEBUG is not set
-# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
-# CONFIG_NOTIFIER_ERROR_INJECTION is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_LATENCYTOP is not set
-CONFIG_USER_STACKTRACE_SUPPORT=y
-CONFIG_NOP_TRACER=y
-CONFIG_HAVE_FUNCTION_TRACER=y
-CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
-CONFIG_HAVE_DYNAMIC_FTRACE=y
-CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
-CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
-CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
-CONFIG_HAVE_FENTRY=y
-CONFIG_HAVE_C_RECORDMCOUNT=y
-CONFIG_TRACE_CLOCK=y
-CONFIG_RING_BUFFER=y
-CONFIG_EVENT_TRACING=y
-CONFIG_CONTEXT_SWITCH_TRACER=y
-CONFIG_TRACING=y
-CONFIG_TRACING_SUPPORT=y
-CONFIG_FTRACE=y
-# CONFIG_FUNCTION_TRACER is not set
-# CONFIG_IRQSOFF_TRACER is not set
-# CONFIG_PREEMPT_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_HWLAT_TRACER is not set
-# CONFIG_ENABLE_DEFAULT_TRACERS is not set
-# CONFIG_FTRACE_SYSCALLS is not set
-# CONFIG_TRACER_SNAPSHOT is not set
-CONFIG_BRANCH_PROFILE_NONE=y
-# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
-# CONFIG_STACK_TRACER is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-CONFIG_KPROBE_EVENTS=y
-CONFIG_UPROBE_EVENTS=y
-CONFIG_BPF_EVENTS=y
-CONFIG_PROBE_EVENTS=y
-# CONFIG_MMIOTRACE is not set
-# CONFIG_HIST_TRIGGERS is not set
-# CONFIG_TRACEPOINT_BENCHMARK is not set
-# CONFIG_RING_BUFFER_BENCHMARK is not set
-# CONFIG_RING_BUFFER_STARTUP_TEST is not set
-# CONFIG_TRACE_EVAL_MAP_FILE is not set
-# CONFIG_TRACING_EVENTS_GPIO is not set
-# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
-# CONFIG_DMA_API_DEBUG is not set
-
-#
-# Runtime Testing
-#
-CONFIG_LKDTM=m
-# CONFIG_TEST_LIST_SORT is not set
-# CONFIG_TEST_SORT is not set
-# CONFIG_KPROBES_SANITY_TEST is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_RBTREE_TEST is not set
-# CONFIG_INTERVAL_TREE_TEST is not set
-# CONFIG_PERCPU_TEST is not set
-# CONFIG_ATOMIC64_SELFTEST is not set
-# CONFIG_ASYNC_RAID6_TEST is not set
-# CONFIG_TEST_HEXDUMP is not set
-# CONFIG_TEST_STRING_HELPERS is not set
-# CONFIG_TEST_KSTRTOX is not set
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-# CONFIG_TEST_RHASHTABLE is not set
-CONFIG_TEST_HASH=m
-# CONFIG_TEST_PARMAN is not set
-CONFIG_TEST_LKM=m
-# CONFIG_TEST_USER_COPY is not set
-# CONFIG_TEST_BPF is not set
-# CONFIG_TEST_FIRMWARE is not set
-CONFIG_TEST_SYSCTL=m
-# CONFIG_TEST_UDELAY is not set
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_MEMTEST=y
-# CONFIG_BUG_ON_DATA_CORRUPTION is not set
-# CONFIG_SAMPLES is not set
-CONFIG_HAVE_ARCH_KGDB=y
-# CONFIG_KGDB is not set
-CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
-# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set
-# CONFIG_UBSAN is not set
-CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
-CONFIG_STRICT_DEVMEM=y
-CONFIG_IO_STRICT_DEVMEM=y
-CONFIG_EARLY_PRINTK_USB=y
-CONFIG_X86_VERBOSE_BOOTUP=y
-CONFIG_EARLY_PRINTK=y
-# CONFIG_EARLY_PRINTK_DBGP is not set
-# CONFIG_EARLY_PRINTK_EFI is not set
-CONFIG_EARLY_PRINTK_USB_XDBC=y
-CONFIG_X86_PTDUMP_CORE=y
-# CONFIG_X86_PTDUMP is not set
-# CONFIG_EFI_PGT_DUMP is not set
-CONFIG_DEBUG_WX=y
-CONFIG_DOUBLEFAULT=y
-# CONFIG_DEBUG_TLBFLUSH is not set
-# CONFIG_IOMMU_DEBUG is not set
-# CONFIG_IOMMU_STRESS is not set
-CONFIG_HAVE_MMIOTRACE_SUPPORT=y
-# CONFIG_X86_DECODER_SELFTEST is not set
-CONFIG_IO_DELAY_TYPE_0X80=0
-CONFIG_IO_DELAY_TYPE_0XED=1
-CONFIG_IO_DELAY_TYPE_UDELAY=2
-CONFIG_IO_DELAY_TYPE_NONE=3
-CONFIG_IO_DELAY_0X80=y
-# CONFIG_IO_DELAY_0XED is not set
-# CONFIG_IO_DELAY_UDELAY is not set
-# CONFIG_IO_DELAY_NONE is not set
-CONFIG_DEFAULT_IO_DELAY_TYPE=0
-# CONFIG_DEBUG_BOOT_PARAMS is not set
-# CONFIG_CPA_DEBUG is not set
-# CONFIG_OPTIMIZE_INLINING is not set
-# CONFIG_DEBUG_ENTRY is not set
-# CONFIG_DEBUG_NMI_SELFTEST is not set
-CONFIG_X86_DEBUG_FPU=y
-# CONFIG_PUNIT_ATOM_DEBUG is not set
-CONFIG_UNWINDER_ORC=y
-# CONFIG_UNWINDER_FRAME_POINTER is not set
-
-#
-# Security options
-#
-CONFIG_KEYS=y
-CONFIG_KEYS_COMPAT=y
-CONFIG_PERSISTENT_KEYRINGS=y
-# CONFIG_BIG_KEYS is not set
-CONFIG_TRUSTED_KEYS=m
-CONFIG_ENCRYPTED_KEYS=m
-# CONFIG_KEY_DH_OPERATIONS is not set
-CONFIG_SECURITY_DMESG_RESTRICT=y
-CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
-CONFIG_SECURITY_TIOCSTI_RESTRICT=y
-CONFIG_SECURITY=y
-# CONFIG_SECURITY_WRITABLE_HOOKS is not set
-CONFIG_SECURITYFS=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_PAGE_TABLE_ISOLATION=y
-# CONFIG_SECURITY_INFINIBAND is not set
-# CONFIG_SECURITY_NETWORK_XFRM is not set
-CONFIG_SECURITY_PATH=y
-CONFIG_INTEL_TXT=y
-CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_FORTIFY_SOURCE=y
-CONFIG_PAGE_SANITIZE=y
-CONFIG_PAGE_SANITIZE_VERIFY=y
-# CONFIG_STATIC_USERMODEHELPER is not set
-# CONFIG_SECURITY_SELINUX is not set
-# CONFIG_SECURITY_SMACK is not set
-# CONFIG_SECURITY_TOMOYO is not set
-CONFIG_SECURITY_APPARMOR=y
-CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1
-CONFIG_SECURITY_APPARMOR_HASH=y
-CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y
-# CONFIG_SECURITY_APPARMOR_DEBUG is not set
-# CONFIG_SECURITY_LOADPIN is not set
-CONFIG_SECURITY_YAMA=y
-# CONFIG_INTEGRITY is not set
-CONFIG_DEFAULT_SECURITY_APPARMOR=y
-# CONFIG_DEFAULT_SECURITY_DAC is not set
-CONFIG_DEFAULT_SECURITY="apparmor"
-CONFIG_XOR_BLOCKS=m
-CONFIG_ASYNC_CORE=m
-CONFIG_ASYNC_MEMCPY=m
-CONFIG_ASYNC_XOR=m
-CONFIG_ASYNC_PQ=m
-CONFIG_ASYNC_RAID6_RECOV=m
-CONFIG_CRYPTO=y
-
-#
-# Crypto core or helper
-#
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_ALGAPI2=y
-CONFIG_CRYPTO_AEAD=m
-CONFIG_CRYPTO_AEAD2=y
-CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_BLKCIPHER2=y
-CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_HASH2=y
-CONFIG_CRYPTO_RNG=m
-CONFIG_CRYPTO_RNG2=y
-CONFIG_CRYPTO_RNG_DEFAULT=m
-CONFIG_CRYPTO_AKCIPHER2=y
-CONFIG_CRYPTO_AKCIPHER=y
-CONFIG_CRYPTO_KPP2=y
-CONFIG_CRYPTO_KPP=m
-CONFIG_CRYPTO_ACOMP2=y
-CONFIG_CRYPTO_RSA=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
-CONFIG_CRYPTO_MANAGER=y
-CONFIG_CRYPTO_MANAGER2=y
-CONFIG_CRYPTO_USER=m
-CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_NULL2=y
-CONFIG_CRYPTO_PCRYPT=m
-CONFIG_CRYPTO_WORKQUEUE=y
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ABLK_HELPER=m
-CONFIG_CRYPTO_SIMD=m
-CONFIG_CRYPTO_GLUE_HELPER_X86=m
-CONFIG_CRYPTO_ENGINE=m
-
-#
-# Authenticated Encryption with Associated Data
-#
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_SEQIV=m
-CONFIG_CRYPTO_ECHAINIV=m
-
-#
-# Block modes
-#
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_CTR=m
-CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_KEYWRAP=m
-
-#
-# Hash modes
-#
-CONFIG_CRYPTO_CMAC=m
-CONFIG_CRYPTO_HMAC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-
-#
-# Digest
-#
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CRC32C_INTEL=m
-CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRC32_PCLMUL=m
-CONFIG_CRYPTO_CRCT10DIF=y
-CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
-CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_POLY1305=m
-CONFIG_CRYPTO_POLY1305_X86_64=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
-CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA1_SSSE3=m
-CONFIG_CRYPTO_SHA256_SSSE3=m
-CONFIG_CRYPTO_SHA512_SSSE3=m
-CONFIG_CRYPTO_SHA1_MB=m
-CONFIG_CRYPTO_SHA256_MB=m
-CONFIG_CRYPTO_SHA512_MB=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
-
-#
-# Ciphers
-#
-CONFIG_CRYPTO_AES=y
-CONFIG_CRYPTO_AES_TI=m
-CONFIG_CRYPTO_AES_X86_64=m
-CONFIG_CRYPTO_AES_NI_INTEL=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_BLOWFISH_COMMON=m
-CONFIG_CRYPTO_BLOWFISH_X86_64=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAMELLIA_X86_64=m
-CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
-CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
-CONFIG_CRYPTO_CAST_COMMON=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST5_AVX_X86_64=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_CAST6_AVX_X86_64=m
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_DES3_EDE_X86_64=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_CHACHA20=m
-CONFIG_CRYPTO_CHACHA20_X86_64=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
-CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
-CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_TWOFISH_X86_64=m
-CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
-CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
-
-#
-# Compression
-#
-CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRYPTO_842=m
-CONFIG_CRYPTO_LZ4=m
-CONFIG_CRYPTO_LZ4HC=m
-
-#
-# Random Number Generation
-#
-CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_DRBG_MENU=m
-CONFIG_CRYPTO_DRBG_HMAC=y
-CONFIG_CRYPTO_DRBG_HASH=y
-CONFIG_CRYPTO_DRBG_CTR=y
-CONFIG_CRYPTO_DRBG=m
-CONFIG_CRYPTO_JITTERENTROPY=m
-CONFIG_CRYPTO_USER_API=m
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
-CONFIG_CRYPTO_USER_API_RNG=m
-CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_CRYPTO_HASH_INFO=y
-CONFIG_CRYPTO_HW=y
-CONFIG_CRYPTO_DEV_PADLOCK=m
-CONFIG_CRYPTO_DEV_PADLOCK_AES=m
-CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
-# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set
-CONFIG_CRYPTO_DEV_CCP=y
-CONFIG_CRYPTO_DEV_CCP_DD=m
-CONFIG_CRYPTO_DEV_SP_CCP=y
-CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
-CONFIG_CRYPTO_DEV_QAT=m
-CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
-CONFIG_CRYPTO_DEV_QAT_C3XXX=m
-CONFIG_CRYPTO_DEV_QAT_C62X=m
-CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
-CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
-CONFIG_CRYPTO_DEV_QAT_C62XVF=m
-CONFIG_CRYPTO_DEV_NITROX=m
-CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
-CONFIG_CRYPTO_DEV_CHELSIO=m
-CONFIG_CRYPTO_DEV_VIRTIO=m
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
-CONFIG_X509_CERTIFICATE_PARSER=y
-CONFIG_PKCS7_MESSAGE_PARSER=y
-# CONFIG_PKCS7_TEST_KEY is not set
-# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set
-
-#
-# Certificates for signature checking
-#
-CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
-CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_SYSTEM_TRUSTED_KEYS=""
-# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
-CONFIG_SECONDARY_TRUSTED_KEYRING=y
-CONFIG_SYSTEM_BLACKLIST_KEYRING=y
-CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
-CONFIG_HAVE_KVM=y
-CONFIG_HAVE_KVM_IRQCHIP=y
-CONFIG_HAVE_KVM_IRQFD=y
-CONFIG_HAVE_KVM_IRQ_ROUTING=y
-CONFIG_HAVE_KVM_EVENTFD=y
-CONFIG_KVM_MMIO=y
-CONFIG_KVM_ASYNC_PF=y
-CONFIG_HAVE_KVM_MSI=y
-CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
-CONFIG_KVM_VFIO=y
-CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
-CONFIG_KVM_COMPAT=y
-CONFIG_HAVE_KVM_IRQ_BYPASS=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=m
-CONFIG_KVM_INTEL=m
-CONFIG_KVM_AMD=m
-# CONFIG_KVM_MMU_AUDIT is not set
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_SCSI=m
-CONFIG_VHOST_VSOCK=m
-CONFIG_VHOST=m
-# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
-CONFIG_BINARY_PRINTF=y
-
-#
-# Library routines
-#
-CONFIG_RAID6_PQ=m
-CONFIG_BITREVERSE=y
-# CONFIG_HAVE_ARCH_BITREVERSE is not set
-CONFIG_RATIONAL=y
-CONFIG_GENERIC_STRNCPY_FROM_USER=y
-CONFIG_GENERIC_STRNLEN_USER=y
-CONFIG_GENERIC_NET_UTILS=y
-CONFIG_GENERIC_FIND_FIRST_BIT=y
-CONFIG_GENERIC_PCI_IOMAP=y
-CONFIG_GENERIC_IOMAP=y
-CONFIG_GENERIC_IO=y
-CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
-CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
-CONFIG_CRC_CCITT=m
-CONFIG_CRC16=m
-CONFIG_CRC_T10DIF=y
-CONFIG_CRC_ITU_T=m
-CONFIG_CRC32=y
-# CONFIG_CRC32_SELFTEST is not set
-CONFIG_CRC32_SLICEBY8=y
-# CONFIG_CRC32_SLICEBY4 is not set
-# CONFIG_CRC32_SARWATE is not set
-# CONFIG_CRC32_BIT is not set
-CONFIG_CRC4=m
-CONFIG_CRC7=m
-CONFIG_LIBCRC32C=m
-CONFIG_CRC8=m
-CONFIG_XXHASH=m
-# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
-# CONFIG_RANDOM32_SELFTEST is not set
-CONFIG_842_COMPRESS=m
-CONFIG_842_DECOMPRESS=m
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_LZO_COMPRESS=y
-CONFIG_LZO_DECOMPRESS=y
-CONFIG_LZ4_COMPRESS=y
-CONFIG_LZ4HC_COMPRESS=m
-CONFIG_LZ4_DECOMPRESS=y
-CONFIG_ZSTD_COMPRESS=m
-CONFIG_ZSTD_DECOMPRESS=m
-CONFIG_XZ_DEC=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
-CONFIG_XZ_DEC_BCJ=y
-# CONFIG_XZ_DEC_TEST is not set
-CONFIG_DECOMPRESS_GZIP=y
-CONFIG_DECOMPRESS_BZIP2=y
-CONFIG_DECOMPRESS_LZMA=y
-CONFIG_DECOMPRESS_XZ=y
-CONFIG_DECOMPRESS_LZO=y
-CONFIG_DECOMPRESS_LZ4=y
-CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_REED_SOLOMON=m
-CONFIG_REED_SOLOMON_ENC8=y
-CONFIG_REED_SOLOMON_DEC8=y
-CONFIG_REED_SOLOMON_DEC16=y
-CONFIG_BCH=m
-CONFIG_BCH_CONST_PARAMS=y
-CONFIG_TEXTSEARCH=y
-CONFIG_TEXTSEARCH_KMP=m
-CONFIG_TEXTSEARCH_BM=m
-CONFIG_TEXTSEARCH_FSM=m
-CONFIG_BTREE=y
-CONFIG_INTERVAL_TREE=y
-CONFIG_RADIX_TREE_MULTIORDER=y
-CONFIG_ASSOCIATIVE_ARRAY=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT_MAP=y
-CONFIG_HAS_DMA=y
-# CONFIG_DMA_NOOP_OPS is not set
-CONFIG_DMA_VIRT_OPS=y
-CONFIG_CHECK_SIGNATURE=y
-CONFIG_CPUMASK_OFFSTACK=y
-CONFIG_CPU_RMAP=y
-CONFIG_DQL=y
-CONFIG_GLOB=y
-# CONFIG_GLOB_SELFTEST is not set
-CONFIG_NLATTR=y
-CONFIG_LRU_CACHE=m
-CONFIG_CLZ_TAB=y
-CONFIG_CORDIC=m
-CONFIG_DDR=y
-CONFIG_IRQ_POLL=y
-CONFIG_MPILIB=y
-CONFIG_OID_REGISTRY=y
-CONFIG_UCS2_STRING=y
-CONFIG_FONT_SUPPORT=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-# CONFIG_FONT_6x11 is not set
-# CONFIG_FONT_7x14 is not set
-# CONFIG_FONT_PEARL_8x8 is not set
-# CONFIG_FONT_ACORN_8x8 is not set
-# CONFIG_FONT_MINI_4x6 is not set
-# CONFIG_FONT_6x10 is not set
-# CONFIG_FONT_10x18 is not set
-# CONFIG_FONT_SUN8x16 is not set
-# CONFIG_FONT_SUN12x22 is not set
-# CONFIG_SG_SPLIT is not set
-CONFIG_SG_POOL=y
-CONFIG_ARCH_HAS_SG_CHAIN=y
-CONFIG_ARCH_HAS_PMEM_API=y
-CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
-CONFIG_SBITMAP=y
-CONFIG_PARMAN=m
-# CONFIG_STRING_SELFTEST is not set
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch
deleted file mode 100644
index 2376edae..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-introduce-NUMA-identity-node-sched-domain.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 808998fe1..18d3321ef 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -1339,6 +1339,10 @@ void sched_init_numa(void)
- if (!sched_domains_numa_distance)
- return;
-
-+ /* Includes NUMA identity node at level 0. */
-+ sched_domains_numa_distance[level++] = curr_distance;
-+ sched_domains_numa_levels = level;
-+
- /*
- * O(nr_nodes^2) deduplicating selection sort -- in order to find the
- * unique distances in the node_distance() table.
-@@ -1386,8 +1390,7 @@ void sched_init_numa(void)
- return;
-
- /*
-- * 'level' contains the number of unique distances, excluding the
-- * identity distance node_distance(i,i).
-+ * 'level' contains the number of unique distances
- *
- * The sched_domains_numa_distance[] array includes the actual distance
- * numbers.
-@@ -1448,10 +1451,19 @@ void sched_init_numa(void)
- for (i = 0; sched_domain_topology[i].mask; i++)
- tl[i] = sched_domain_topology[i];
-
-+ /*
-+ * Add the NUMA identity distance, aka single NODE.
-+ */
-+ tl[i++] = (struct sched_domain_topology_level){
-+ .mask = sd_numa_mask,
-+ .numa_level = 0,
-+ SD_INIT_NAME(NODE)
-+ };
-+
- /*
- * .. and append 'j' levels of NUMA goodness.
- */
-- for (j = 0; j < level; i++, j++) {
-+ for (j = 1; j < level; i++, j++) {
- tl[i] = (struct sched_domain_topology_level){
- .mask = sd_numa_mask,
- .sd_flags = cpu_numa_flags,
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch
deleted file mode 100644
index b1e8a9b0..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-k10temp-add-ZEN-support.patch
+++ /dev/null
@@ -1,177 +0,0 @@
-diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
-index ce3b91f22..0721e1756 100644
---- a/drivers/hwmon/k10temp.c
-+++ b/drivers/hwmon/k10temp.c
-@@ -36,6 +36,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
- /* Provide lock for writing to NB_SMU_IND_ADDR */
- static DEFINE_MUTEX(nb_smu_ind_mutex);
-
-+#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
-+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
-+#endif
-+
- /* CPUID function 0x80000001, ebx */
- #define CPUID_PKGTYPE_MASK 0xf0000000
- #define CPUID_PKGTYPE_F 0x00000000
-@@ -61,31 +65,72 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
- */
- #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-
--static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
-- int offset, u32 *val)
-+/* F17h M01h Access througn SMN */
-+#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
-+
-+struct k10temp_data {
-+ struct pci_dev *pdev;
-+ void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
-+ int temp_offset;
-+};
-+
-+struct tctl_offset {
-+ u8 model;
-+ char const *id;
-+ int offset;
-+};
-+
-+static const struct tctl_offset tctl_offset_table[] = {
-+ { 0x17, "AMD Ryzen 5 1600X", 20000 },
-+ { 0x17, "AMD Ryzen 7 1700X", 20000 },
-+ { 0x17, "AMD Ryzen 7 1800X", 20000 },
-+ { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
-+ { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
-+ { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
-+ { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
-+ { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
-+};
-+
-+static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
-+{
-+ pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
-+}
-+
-+static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
-+ unsigned int base, int offset, u32 *val)
- {
- mutex_lock(&nb_smu_ind_mutex);
- pci_bus_write_config_dword(pdev->bus, devfn,
-- 0xb8, offset);
-+ base, offset);
- pci_bus_read_config_dword(pdev->bus, devfn,
-- 0xbc, val);
-+ base + 4, val);
- mutex_unlock(&nb_smu_ind_mutex);
- }
-
-+static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
-+{
-+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
-+ F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
-+}
-+
-+static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
-+{
-+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
-+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
-+}
-+
- static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
-+ struct k10temp_data *data = dev_get_drvdata(dev);
- u32 regval;
-- struct pci_dev *pdev = dev_get_drvdata(dev);
--
-- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
-- amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
-- F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
-- &regval);
-- } else {
-- pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
-- }
-- return sprintf(buf, "%u\n", (regval >> 21) * 125);
-+ unsigned int temp;
-+
-+ data->read_tempreg(data->pdev, &regval);
-+ temp = (regval >> 21) * 125;
-+ temp -= data->temp_offset;
-+
-+ return sprintf(buf, "%u\n", temp);
- }
-
- static ssize_t temp1_max_show(struct device *dev,
-@@ -98,11 +143,12 @@ static ssize_t show_temp_crit(struct device *dev,
- struct device_attribute *devattr, char *buf)
- {
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-+ struct k10temp_data *data = dev_get_drvdata(dev);
- int show_hyst = attr->index;
- u32 regval;
- int value;
-
-- pci_read_config_dword(dev_get_drvdata(dev),
-+ pci_read_config_dword(data->pdev,
- REG_HARDWARE_THERMAL_CONTROL, &regval);
- value = ((regval >> 16) & 0x7f) * 500 + 52000;
- if (show_hyst)
-@@ -119,7 +165,8 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
- struct attribute *attr, int index)
- {
- struct device *dev = container_of(kobj, struct device, kobj);
-- struct pci_dev *pdev = dev_get_drvdata(dev);
-+ struct k10temp_data *data = dev_get_drvdata(dev);
-+ struct pci_dev *pdev = data->pdev;
-
- if (index >= 2) {
- u32 reg_caps, reg_htc;
-@@ -187,7 +234,9 @@ static int k10temp_probe(struct pci_dev *pdev,
- {
- int unreliable = has_erratum_319(pdev);
- struct device *dev = &pdev->dev;
-+ struct k10temp_data *data;
- struct device *hwmon_dev;
-+ int i;
-
- if (unreliable) {
- if (!force) {
-@@ -199,7 +248,31 @@ static int k10temp_probe(struct pci_dev *pdev,
- "unreliable CPU thermal sensor; check erratum 319\n");
- }
-
-- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev,
-+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
-+ if (!data)
-+ return -ENOMEM;
-+
-+ data->pdev = pdev;
-+
-+ if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
-+ boot_cpu_data.x86_model == 0x70))
-+ data->read_tempreg = read_tempreg_nb_f15;
-+ else if (boot_cpu_data.x86 == 0x17)
-+ data->read_tempreg = read_tempreg_nb_f17;
-+ else
-+ data->read_tempreg = read_tempreg_pci;
-+
-+ for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
-+ const struct tctl_offset *entry = &tctl_offset_table[i];
-+
-+ if (boot_cpu_data.x86 == entry->model &&
-+ strstr(boot_cpu_data.x86_model_id, entry->id)) {
-+ data->temp_offset = entry->offset;
-+ break;
-+ }
-+ }
-+
-+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
- k10temp_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
- }
-@@ -214,6 +287,7 @@ static const struct pci_device_id k10temp_id_table[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
-+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
- {}
- };
- MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-linux-hardened.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-linux-hardened.patch
deleted file mode 100644
index 9280791e..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-linux-hardened.patch
+++ /dev/null
@@ -1,2868 +0,0 @@
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 7d8b17ce8804..7e4f071c3bf2 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -490,16 +490,6 @@
- nosocket -- Disable socket memory accounting.
- nokmem -- Disable kernel memory accounting.
-
-- checkreqprot [SELINUX] Set initial checkreqprot flag value.
-- Format: { "0" | "1" }
-- See security/selinux/Kconfig help text.
-- 0 -- check protection applied by kernel (includes
-- any implied execute protection).
-- 1 -- check protection requested by application.
-- Default value is set via a kernel config option.
-- Value can be changed at runtime via
-- /selinux/checkreqprot.
--
- cio_ignore= [S390]
- See Documentation/s390/CommonIO for details.
- clk_ignore_unused
-@@ -2984,6 +2974,11 @@
- the specified number of seconds. This is to be used if
- your oopses keep scrolling off the screen.
-
-+ extra_latent_entropy
-+ Enable a very simple form of latent entropy extraction
-+ from the first 4GB of memory as the bootmem allocator
-+ passes the memory pages to the buddy allocator.
-+
- pcbit= [HW,ISDN]
-
- pcd. [PARIDE]
-diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
-index 694968c7523c..002d86416ef8 100644
---- a/Documentation/sysctl/kernel.txt
-+++ b/Documentation/sysctl/kernel.txt
-@@ -91,6 +91,7 @@ show up in /proc/sys/kernel:
- - sysctl_writes_strict
- - tainted
- - threads-max
-+- tiocsti_restrict
- - unknown_nmi_panic
- - watchdog
- - watchdog_thresh
-@@ -999,6 +1000,26 @@ available RAM pages threads-max is reduced accordingly.
-
- ==============================================================
-
-+tiocsti_restrict:
-+
-+This toggle indicates whether unprivileged users are prevented
-+from using the TIOCSTI ioctl to inject commands into other processes
-+which share a tty session.
-+
-+When tiocsti_restrict is set to (0) there are no restrictions(accept
-+the default restriction of only being able to injection commands into
-+one's own tty). When tiocsti_restrict is set to (1), users must
-+have CAP_SYS_ADMIN to use the TIOCSTI ioctl.
-+
-+When user namespaces are in use, the check for the capability
-+CAP_SYS_ADMIN is done against the user namespace that originally
-+opened the tty.
-+
-+The kernel config option CONFIG_SECURITY_TIOCSTI_RESTRICT sets the
-+default value of tiocsti_restrict.
-+
-+==============================================================
-+
- unknown_nmi_panic:
-
- The value in this file affects behavior of handling NMI. When the
-diff --git a/Makefile b/Makefile
-index 70cc37cb3e99..edc3de99b3cd 100644
---- a/Makefile
-+++ b/Makefile
-@@ -714,6 +714,9 @@ endif
- KBUILD_CFLAGS += $(stackp-flag)
-
- ifeq ($(cc-name),clang)
-+ifdef CONFIG_LOCAL_INIT
-+KBUILD_CFLAGS += -fsanitize=local-init
-+endif
- KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
- KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
- KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-diff --git a/arch/Kconfig b/arch/Kconfig
-index 77b3e21c4844..3dff252446ac 100644
---- a/arch/Kconfig
-+++ b/arch/Kconfig
-@@ -446,6 +446,11 @@ config GCC_PLUGIN_LATENT_ENTROPY
- is some slowdown of the boot process (about 0.5%) and fork and
- irq processing.
-
-+ When extra_latent_entropy is passed on the kernel command line,
-+ entropy will be extracted from up to the first 4GB of RAM while the
-+ runtime memory allocator is being initialized. This costs even more
-+ slowdown of the boot process.
-+
- Note that entropy extracted this way is not cryptographically
- secure!
-
-@@ -539,7 +544,7 @@ config CC_STACKPROTECTOR
- choice
- prompt "Stack Protector buffer overflow detection"
- depends on HAVE_CC_STACKPROTECTOR
-- default CC_STACKPROTECTOR_NONE
-+ default CC_STACKPROTECTOR_STRONG
- help
- This option turns on the "stack-protector" GCC feature. This
- feature puts, at the beginning of functions, a canary value on
-@@ -741,7 +746,7 @@ config ARCH_MMAP_RND_BITS
- int "Number of bits to use for ASLR of mmap base address" if EXPERT
- range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
- default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
-- default ARCH_MMAP_RND_BITS_MIN
-+ default ARCH_MMAP_RND_BITS_MAX
- depends on HAVE_ARCH_MMAP_RND_BITS
- help
- This value can be used to select the number of bits to use to
-@@ -775,7 +780,7 @@ config ARCH_MMAP_RND_COMPAT_BITS
- int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
- range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
- default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
-- default ARCH_MMAP_RND_COMPAT_BITS_MIN
-+ default ARCH_MMAP_RND_COMPAT_BITS_MAX
- depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
- help
- This value can be used to select the number of bits to use to
-@@ -958,6 +963,7 @@ config ARCH_HAS_REFCOUNT
-
- config REFCOUNT_FULL
- bool "Perform full reference count validation at the expense of speed"
-+ default y
- help
- Enabling this switches the refcounting infrastructure from a fast
- unchecked atomic_t implementation to a fully state checked
-diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index c30cd78b6918..ba32a283f027 100644
---- a/arch/arm64/Kconfig
-+++ b/arch/arm64/Kconfig
-@@ -926,6 +926,7 @@ endif
-
- config ARM64_SW_TTBR0_PAN
- bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
-+ default y
- help
- Enabling this option prevents the kernel from accessing
- user-space memory directly by pointing TTBR0_EL1 to a reserved
-@@ -1052,6 +1053,7 @@ config RANDOMIZE_BASE
- bool "Randomize the address of the kernel image"
- select ARM64_MODULE_PLTS if MODULES
- select RELOCATABLE
-+ default y
- help
- Randomizes the virtual address at which the kernel image is
- loaded, as a security feature that deters exploit attempts
-diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
-index cc6bd559af85..01d5442d4722 100644
---- a/arch/arm64/Kconfig.debug
-+++ b/arch/arm64/Kconfig.debug
-@@ -45,6 +45,7 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
- config DEBUG_WX
- bool "Warn on W+X mappings at boot"
- select ARM64_PTDUMP_CORE
-+ default y
- ---help---
- Generate a warning if any W+X mappings are found at boot.
-
-diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
-index b05796578e7a..8f6e2099717d 100644
---- a/arch/arm64/configs/defconfig
-+++ b/arch/arm64/configs/defconfig
-@@ -1,4 +1,3 @@
--CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_AUDIT=y
- CONFIG_NO_HZ_IDLE=y
-diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
-index 33be513ef24c..6f0c0e3ef0dd 100644
---- a/arch/arm64/include/asm/elf.h
-+++ b/arch/arm64/include/asm/elf.h
-@@ -114,10 +114,10 @@
-
- /*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
-- * 64-bit, this is above 4GB to leave the entire 32-bit address
-+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
-+#define ELF_ET_DYN_BASE 0x100000000UL
-
- #ifndef __ASSEMBLY__
-
-@@ -158,10 +158,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- /* 1GB of VA */
- #ifdef CONFIG_COMPAT
- #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
-- 0x7ff >> (PAGE_SHIFT - 12) : \
-- 0x3ffff >> (PAGE_SHIFT - 12))
-+ ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \
-+ ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
- #else
--#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
-+#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))
- #endif
-
- #ifdef __AARCH64EB__
-diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
-index 9e773732520c..91359f45b5fc 100644
---- a/arch/arm64/kernel/process.c
-+++ b/arch/arm64/kernel/process.c
-@@ -419,9 +419,9 @@ unsigned long arch_align_stack(unsigned long sp)
- unsigned long arch_randomize_brk(struct mm_struct *mm)
- {
- if (is_compat_task())
-- return randomize_page(mm->brk, SZ_32M);
-+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
- else
-- return randomize_page(mm->brk, SZ_1G);
-+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
- }
-
- /*
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 4f393eb9745f..1a31f8fc82ed 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -1145,8 +1145,7 @@ config VM86
- default X86_LEGACY_VM86
-
- config X86_16BIT
-- bool "Enable support for 16-bit segments" if EXPERT
-- default y
-+ bool "Enable support for 16-bit segments"
- depends on MODIFY_LDT_SYSCALL
- ---help---
- This option is required by programs like Wine to run 16-bit
-@@ -2220,7 +2219,7 @@ config COMPAT_VDSO
- choice
- prompt "vsyscall table for legacy applications"
- depends on X86_64
-- default LEGACY_VSYSCALL_EMULATE
-+ default LEGACY_VSYSCALL_NONE
- help
- Legacy user code that does not know how to find the vDSO expects
- to be able to issue three syscalls by calling fixed addresses in
-@@ -2310,8 +2309,7 @@ config CMDLINE_OVERRIDE
- be set to 'N' under normal conditions.
-
- config MODIFY_LDT_SYSCALL
-- bool "Enable the LDT (local descriptor table)" if EXPERT
-- default y
-+ bool "Enable the LDT (local descriptor table)"
- ---help---
- Linux can allow user programs to install a per-process x86
- Local Descriptor Table (LDT) using the modify_ldt(2) system
-diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 6293a8768a91..add82e0f1df3 100644
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -101,6 +101,7 @@ config EFI_PGT_DUMP
- config DEBUG_WX
- bool "Warn on W+X mappings at boot"
- select X86_PTDUMP_CORE
-+ default y
- ---help---
- Generate a warning if any W+X mappings are found at boot.
-
-diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
-index e32fc1f274d8..d08acc76502a 100644
---- a/arch/x86/configs/x86_64_defconfig
-+++ b/arch/x86/configs/x86_64_defconfig
-@@ -1,5 +1,4 @@
- # CONFIG_LOCALVERSION_AUTO is not set
--CONFIG_SYSVIPC=y
- CONFIG_POSIX_MQUEUE=y
- CONFIG_BSD_PROCESS_ACCT=y
- CONFIG_TASKSTATS=y
-diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
-index 1911310959f8..bba8dbbc07a8 100644
---- a/arch/x86/entry/vdso/vma.c
-+++ b/arch/x86/entry/vdso/vma.c
-@@ -203,55 +203,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
- }
-
- #ifdef CONFIG_X86_64
--/*
-- * Put the vdso above the (randomized) stack with another randomized
-- * offset. This way there is no hole in the middle of address space.
-- * To save memory make sure it is still in the same PTE as the stack
-- * top. This doesn't give that many random bits.
-- *
-- * Note that this algorithm is imperfect: the distribution of the vdso
-- * start address within a PMD is biased toward the end.
-- *
-- * Only used for the 64-bit and x32 vdsos.
-- */
--static unsigned long vdso_addr(unsigned long start, unsigned len)
--{
-- unsigned long addr, end;
-- unsigned offset;
--
-- /*
-- * Round up the start address. It can start out unaligned as a result
-- * of stack start randomization.
-- */
-- start = PAGE_ALIGN(start);
--
-- /* Round the lowest possible end address up to a PMD boundary. */
-- end = (start + len + PMD_SIZE - 1) & PMD_MASK;
-- if (end >= TASK_SIZE_MAX)
-- end = TASK_SIZE_MAX;
-- end -= len;
--
-- if (end > start) {
-- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
-- addr = start + (offset << PAGE_SHIFT);
-- } else {
-- addr = start;
-- }
--
-- /*
-- * Forcibly align the final address in case we have a hardware
-- * issue that requires alignment for performance reasons.
-- */
-- addr = align_vdso_addr(addr);
--
-- return addr;
--}
--
- static int map_vdso_randomized(const struct vdso_image *image)
- {
-- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
--
-- return map_vdso(image, addr);
-+ return map_vdso(image, 0);
- }
- #endif
-
-diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
-index 3a091cea36c5..0931c05a3348 100644
---- a/arch/x86/include/asm/elf.h
-+++ b/arch/x86/include/asm/elf.h
-@@ -249,11 +249,11 @@ extern int force_personality32;
-
- /*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
-- * 64-bit, this is above 4GB to leave the entire 32-bit address
-+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
- #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
-- (DEFAULT_MAP_WINDOW / 3 * 2))
-+ 0x100000000UL)
-
- /* This yields a mask that user programs can use to figure out what
- instruction set this CPU supports. This could be done in user space,
-@@ -312,8 +312,8 @@ extern unsigned long get_mmap_base(int is_legacy);
-
- #ifdef CONFIG_X86_32
-
--#define __STACK_RND_MASK(is32bit) (0x7ff)
--#define STACK_RND_MASK (0x7ff)
-+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
-+#define STACK_RND_MASK ((1UL << mmap_rnd_bits) - 1)
-
- #define ARCH_DLINFO ARCH_DLINFO_IA32
-
-@@ -322,7 +322,11 @@ extern unsigned long get_mmap_base(int is_legacy);
- #else /* CONFIG_X86_32 */
-
- /* 1GB for 64bit, 8MB for 32bit */
--#define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
-+#ifdef CONFIG_COMPAT
-+#define __STACK_RND_MASK(is32bit) ((is32bit) ? (1UL << mmap_rnd_compat_bits) - 1 : (1UL << mmap_rnd_bits) - 1)
-+#else
-+#define __STACK_RND_MASK(is32bit) ((1UL << mmap_rnd_bits) - 1)
-+#endif
- #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
-
- #define ARCH_DLINFO \
-@@ -380,5 +384,4 @@ struct va_alignment {
- } ____cacheline_aligned;
-
- extern struct va_alignment va_align;
--extern unsigned long align_vdso_addr(unsigned long);
- #endif /* _ASM_X86_ELF_H */
-diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
-index e31040333f0c..14f3f214c9d1 100644
---- a/arch/x86/include/asm/tlbflush.h
-+++ b/arch/x86/include/asm/tlbflush.h
-@@ -302,6 +302,7 @@ static inline void cr4_set_bits(unsigned long mask)
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- if ((cr4 | mask) != cr4) {
- cr4 |= mask;
- this_cpu_write(cpu_tlbstate.cr4, cr4);
-@@ -315,6 +316,7 @@ static inline void cr4_clear_bits(unsigned long mask)
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- if ((cr4 & ~mask) != cr4) {
- cr4 &= ~mask;
- this_cpu_write(cpu_tlbstate.cr4, cr4);
-@@ -327,6 +329,7 @@ static inline void cr4_toggle_bits(unsigned long mask)
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- cr4 ^= mask;
- this_cpu_write(cpu_tlbstate.cr4, cr4);
- __write_cr4(cr4);
-@@ -435,6 +438,7 @@ static inline void __native_flush_tlb_global(void)
- raw_local_irq_save(flags);
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
-+ BUG_ON(cr4 != __read_cr4());
- /* toggle PGE */
- native_write_cr4(cr4 ^ X86_CR4_PGE);
- /* write old PGE again and flush TLBs */
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 51e49f6fe8e1..7ee813033624 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -1669,7 +1669,6 @@ void cpu_init(void)
- wrmsrl(MSR_KERNEL_GS_BASE, 0);
- barrier();
-
-- x86_configure_nx();
- x2apic_setup();
-
- /*
-diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index a98d1cdd6299..7426eb5d1c03 100644
---- a/arch/x86/kernel/process.c
-+++ b/arch/x86/kernel/process.c
-@@ -40,6 +40,8 @@
- #include <asm/desc.h>
- #include <asm/prctl.h>
- #include <asm/spec-ctrl.h>
-+#include <asm/elf.h>
-+#include <linux/sizes.h>
-
- #include "process.h"
-
-@@ -782,7 +784,10 @@ unsigned long arch_align_stack(unsigned long sp)
-
- unsigned long arch_randomize_brk(struct mm_struct *mm)
- {
-- return randomize_page(mm->brk, 0x02000000);
-+ if (mmap_is_ia32())
-+ return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE;
-+ else
-+ return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE;
- }
-
- /*
-diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index a63fe77b3217..e1085e76043e 100644
---- a/arch/x86/kernel/sys_x86_64.c
-+++ b/arch/x86/kernel/sys_x86_64.c
-@@ -54,13 +54,6 @@ static unsigned long get_align_bits(void)
- return va_align.bits & get_align_mask();
- }
-
--unsigned long align_vdso_addr(unsigned long addr)
--{
-- unsigned long align_mask = get_align_mask();
-- addr = (addr + align_mask) & ~align_mask;
-- return addr | get_align_bits();
--}
--
- static int __init control_va_addr_alignment(char *str)
- {
- /* guard against enabling this on other CPU families */
-@@ -122,10 +115,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
- }
-
- *begin = get_mmap_base(1);
-- if (in_compat_syscall())
-- *end = task_size_32bit();
-- else
-- *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
-+ *end = get_mmap_base(0);
- }
-
- unsigned long
-@@ -206,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
-- info.low_limit = PAGE_SIZE;
-+ info.low_limit = get_mmap_base(1);
- info.high_limit = get_mmap_base(0);
-
- /*
-diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 3141e67ec24c..e93173193f60 100644
---- a/arch/x86/mm/init_32.c
-+++ b/arch/x86/mm/init_32.c
-@@ -558,7 +558,7 @@ static void __init pagetable_init(void)
- permanent_kmaps_init(pgd_base);
- }
-
--pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
-+pteval_t __supported_pte_mask __ro_after_init = ~(_PAGE_NX | _PAGE_GLOBAL);
- EXPORT_SYMBOL_GPL(__supported_pte_mask);
-
- /* user-defined highmem size */
-@@ -865,7 +865,7 @@ int arch_remove_memory(u64 start, u64 size)
- #endif
- #endif
-
--int kernel_set_to_readonly __read_mostly;
-+int kernel_set_to_readonly __ro_after_init;
-
- void set_kernel_text_rw(void)
- {
-@@ -917,12 +917,11 @@ void mark_rodata_ro(void)
- unsigned long start = PFN_ALIGN(_text);
- unsigned long size = PFN_ALIGN(_etext) - start;
-
-+ kernel_set_to_readonly = 1;
- set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
- printk(KERN_INFO "Write protecting the kernel text: %luk\n",
- size >> 10);
-
-- kernel_set_to_readonly = 1;
--
- #ifdef CONFIG_CPA_DEBUG
- printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
- start, start+size);
-diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 624edfbff02d..54bb0705dd53 100644
---- a/arch/x86/mm/init_64.c
-+++ b/arch/x86/mm/init_64.c
-@@ -65,7 +65,7 @@
- * around without checking the pgd every time.
- */
-
--pteval_t __supported_pte_mask __read_mostly = ~0;
-+pteval_t __supported_pte_mask __ro_after_init = ~0;
- EXPORT_SYMBOL_GPL(__supported_pte_mask);
-
- int force_personality32;
-@@ -1179,7 +1179,7 @@ void __init mem_init(void)
- mem_init_print_info(NULL);
- }
-
--int kernel_set_to_readonly;
-+int kernel_set_to_readonly __ro_after_init;
-
- void set_kernel_text_rw(void)
- {
-@@ -1228,9 +1228,8 @@ void mark_rodata_ro(void)
-
- printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
- (end - start) >> 10);
-- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
--
- kernel_set_to_readonly = 1;
-+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
-
- /*
- * The rodata/data/bss/brk section (but not the kernel text!)
-diff --git a/block/blk-softirq.c b/block/blk-softirq.c
-index 01e2b353a2b9..9aeddca4a29f 100644
---- a/block/blk-softirq.c
-+++ b/block/blk-softirq.c
-@@ -20,7 +20,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
--static __latent_entropy void blk_done_softirq(struct softirq_action *h)
-+static __latent_entropy void blk_done_softirq(void)
- {
- struct list_head *cpu_list, local_list;
-
-diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 04f406d7e973..60d8c59fa824 100644
---- a/drivers/ata/libata-core.c
-+++ b/drivers/ata/libata-core.c
-@@ -5148,7 +5148,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
- struct ata_port *ap;
- unsigned int tag;
-
-- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
-+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- ap = qc->ap;
-
- qc->flags = 0;
-@@ -5165,7 +5165,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
- struct ata_port *ap;
- struct ata_link *link;
-
-- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
-+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
- WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
- ap = qc->ap;
- link = qc->dev->link;
-diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
-index c28dca0c613d..d4813f0d25ca 100644
---- a/drivers/char/Kconfig
-+++ b/drivers/char/Kconfig
-@@ -9,7 +9,6 @@ source "drivers/tty/Kconfig"
-
- config DEVMEM
- bool "/dev/mem virtual device support"
-- default y
- help
- Say Y here if you want to support the /dev/mem device.
- The /dev/mem device is used to access areas of physical
-@@ -568,7 +567,6 @@ config TELCLOCK
- config DEVPORT
- bool "/dev/port character device"
- depends on ISA || PCI
-- default y
- help
- Say Y here if you want to support the /dev/port device. The /dev/port
- device is similar to /dev/mem, but for I/O ports.
-diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
-index e105532bfba8..e07d52bb9b62 100644
---- a/drivers/media/dvb-frontends/cx24116.c
-+++ b/drivers/media/dvb-frontends/cx24116.c
-@@ -1462,7 +1462,7 @@ static int cx24116_tune(struct dvb_frontend *fe, bool re_tune,
- return cx24116_read_status(fe, status);
- }
-
--static int cx24116_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24116_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
-index d37cb7762bd6..97e0feff0ede 100644
---- a/drivers/media/dvb-frontends/cx24117.c
-+++ b/drivers/media/dvb-frontends/cx24117.c
-@@ -1555,7 +1555,7 @@ static int cx24117_tune(struct dvb_frontend *fe, bool re_tune,
- return cx24117_read_status(fe, status);
- }
-
--static int cx24117_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24117_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
-index 7f11dcc94d85..01da670760ba 100644
---- a/drivers/media/dvb-frontends/cx24120.c
-+++ b/drivers/media/dvb-frontends/cx24120.c
-@@ -1491,7 +1491,7 @@ static int cx24120_tune(struct dvb_frontend *fe, bool re_tune,
- return cx24120_read_status(fe, status);
- }
-
--static int cx24120_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24120_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
-index 1d59d1d3bd82..41cd0e9ea199 100644
---- a/drivers/media/dvb-frontends/cx24123.c
-+++ b/drivers/media/dvb-frontends/cx24123.c
-@@ -1005,7 +1005,7 @@ static int cx24123_tune(struct dvb_frontend *fe,
- return retval;
- }
-
--static int cx24123_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cx24123_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
-index f6ebbb47b9b2..3e0d8cbd76da 100644
---- a/drivers/media/dvb-frontends/cxd2820r_core.c
-+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
-@@ -403,7 +403,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
- return DVBFE_ALGO_SEARCH_ERROR;
- }
-
--static int cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo cxd2820r_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_CUSTOM;
- }
-diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
-index e8ac8c3e2ec0..e0f4ba8302d1 100644
---- a/drivers/media/dvb-frontends/mb86a20s.c
-+++ b/drivers/media/dvb-frontends/mb86a20s.c
-@@ -2055,7 +2055,7 @@ static void mb86a20s_release(struct dvb_frontend *fe)
- kfree(state);
- }
-
--static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
-index 274544a3ae0e..9ef9b9bc1bd2 100644
---- a/drivers/media/dvb-frontends/s921.c
-+++ b/drivers/media/dvb-frontends/s921.c
-@@ -464,7 +464,7 @@ static int s921_tune(struct dvb_frontend *fe,
- return rc;
- }
-
--static int s921_get_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo s921_get_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
-index 7166d2279465..fa682f9fdc4b 100644
---- a/drivers/media/pci/bt8xx/dst.c
-+++ b/drivers/media/pci/bt8xx/dst.c
-@@ -1657,7 +1657,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
- return 0;
- }
-
--static int dst_get_tuning_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo dst_get_tuning_algo(struct dvb_frontend *fe)
- {
- return dst_algo ? DVBFE_ALGO_HW : DVBFE_ALGO_SW;
- }
-diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
-index f75f69556be7..d913a6050e8c 100644
---- a/drivers/media/pci/pt1/va1j5jf8007s.c
-+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
-@@ -98,7 +98,7 @@ static int va1j5jf8007s_read_snr(struct dvb_frontend *fe, u16 *snr)
- return 0;
- }
-
--static int va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo va1j5jf8007s_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
-index 63fda79a75c0..4115c3ccd4a8 100644
---- a/drivers/media/pci/pt1/va1j5jf8007t.c
-+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
-@@ -88,7 +88,7 @@ static int va1j5jf8007t_read_snr(struct dvb_frontend *fe, u16 *snr)
- return 0;
- }
-
--static int va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
-+static enum dvbfe_algo va1j5jf8007t_get_frontend_algo(struct dvb_frontend *fe)
- {
- return DVBFE_ALGO_HW;
- }
-diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
-index 981b3ef71e47..9883da1da383 100644
---- a/drivers/misc/lkdtm_core.c
-+++ b/drivers/misc/lkdtm_core.c
-@@ -78,7 +78,7 @@ static irqreturn_t jp_handle_irq_event(unsigned int irq,
- return 0;
- }
-
--static void jp_tasklet_action(struct softirq_action *a)
-+static void jp_tasklet_action(void)
- {
- lkdtm_handler();
- jprobe_return();
-diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
-index b811442c5ce6..4f62a63cbcb1 100644
---- a/drivers/tty/Kconfig
-+++ b/drivers/tty/Kconfig
-@@ -122,7 +122,6 @@ config UNIX98_PTYS
-
- config LEGACY_PTYS
- bool "Legacy (BSD) PTY support"
-- default y
- ---help---
- A pseudo terminal (PTY) is a software device consisting of two
- halves: a master and a slave. The slave device behaves identical to
-diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
-index 417b81c67fe9..4e9bb7851ab1 100644
---- a/drivers/tty/tty_io.c
-+++ b/drivers/tty/tty_io.c
-@@ -171,6 +171,7 @@ static void free_tty_struct(struct tty_struct *tty)
- put_device(tty->dev);
- kfree(tty->write_buf);
- tty->magic = 0xDEADDEAD;
-+ put_user_ns(tty->owner_user_ns);
- kfree(tty);
- }
-
-@@ -2167,11 +2168,19 @@ static int tty_fasync(int fd, struct file *filp, int on)
- * FIXME: may race normal receive processing
- */
-
-+int tiocsti_restrict = IS_ENABLED(CONFIG_SECURITY_TIOCSTI_RESTRICT);
-+
- static int tiocsti(struct tty_struct *tty, char __user *p)
- {
- char ch, mbz = 0;
- struct tty_ldisc *ld;
-
-+ if (tiocsti_restrict &&
-+ !ns_capable(tty->owner_user_ns, CAP_SYS_ADMIN)) {
-+ dev_warn_ratelimited(tty->dev,
-+ "Denied TIOCSTI ioctl for non-privileged process\n");
-+ return -EPERM;
-+ }
- if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (get_user(ch, p))
-@@ -2854,6 +2863,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
- tty->index = idx;
- tty_line_name(driver, idx, tty->name);
- tty->dev = tty_get_device(tty);
-+ tty->owner_user_ns = get_user_ns(current_user_ns());
-
- return tty;
- }
-diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index a073cb5be013..e9dfece7b7ce 100644
---- a/drivers/usb/core/hub.c
-+++ b/drivers/usb/core/hub.c
-@@ -38,6 +38,8 @@
- #define USB_VENDOR_GENESYS_LOGIC 0x05e3
- #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
-
-+extern int deny_new_usb;
-+
- /* Protect struct usb_device->state and ->children members
- * Note: Both are also protected by ->dev.sem, except that ->state can
- * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
-@@ -4818,6 +4820,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
- goto done;
- return;
- }
-+
-+ if (deny_new_usb) {
-+ dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1);
-+ goto done;
-+ }
-+
- if (hub_is_superspeed(hub->hdev))
- unit_load = 150;
- else
-diff --git a/fs/exec.c b/fs/exec.c
-index 0da4d748b4e6..69fcee853363 100644
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -62,6 +62,7 @@
- #include <linux/oom.h>
- #include <linux/compat.h>
- #include <linux/vmalloc.h>
-+#include <linux/random.h>
-
- #include <linux/uaccess.h>
- #include <asm/mmu_context.h>
-@@ -321,6 +322,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
- arch_bprm_mm_init(mm, vma);
- up_write(&mm->mmap_sem);
- bprm->p = vma->vm_end - sizeof(void *);
-+ if (randomize_va_space)
-+ bprm->p ^= get_random_int() & ~PAGE_MASK;
- return 0;
- err:
- up_write(&mm->mmap_sem);
-diff --git a/fs/namei.c b/fs/namei.c
-index d1e467b7b9de..0d96ad71b700 100644
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -902,10 +902,10 @@ static inline void put_link(struct nameidata *nd)
- path_put(&last->link);
- }
-
--int sysctl_protected_symlinks __read_mostly = 0;
--int sysctl_protected_hardlinks __read_mostly = 0;
--int sysctl_protected_fifos __read_mostly;
--int sysctl_protected_regular __read_mostly;
-+int sysctl_protected_symlinks __read_mostly = 1;
-+int sysctl_protected_hardlinks __read_mostly = 1;
-+int sysctl_protected_fifos __read_mostly = 2;
-+int sysctl_protected_regular __read_mostly = 2;
-
- /**
- * may_follow_link - Check symlink following for unsafe situations
-diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
-index 5f93cfacb3d1..cea0d7d3b23e 100644
---- a/fs/nfs/Kconfig
-+++ b/fs/nfs/Kconfig
-@@ -195,4 +195,3 @@ config NFS_DEBUG
- bool
- depends on NFS_FS && SUNRPC_DEBUG
- select CRC32
-- default y
-diff --git a/fs/pipe.c b/fs/pipe.c
-index 8ef7d7bef775..b82f305ec13d 100644
---- a/fs/pipe.c
-+++ b/fs/pipe.c
-@@ -38,7 +38,7 @@ unsigned int pipe_max_size = 1048576;
- /*
- * Minimum pipe size, as required by POSIX
- */
--unsigned int pipe_min_size = PAGE_SIZE;
-+unsigned int pipe_min_size __read_only = PAGE_SIZE;
-
- /* Maximum allocatable pages per user. Hard limit is unset by default, soft
- * matches default values.
-diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
-index 1ade1206bb89..60b0f76dec47 100644
---- a/fs/proc/Kconfig
-+++ b/fs/proc/Kconfig
-@@ -39,7 +39,6 @@ config PROC_KCORE
- config PROC_VMCORE
- bool "/proc/vmcore support"
- depends on PROC_FS && CRASH_DUMP
-- default y
- help
- Exports the dump image of crashed kernel in ELF format.
-
-diff --git a/fs/stat.c b/fs/stat.c
-index 873785dae022..d3c2ada8b9c7 100644
---- a/fs/stat.c
-+++ b/fs/stat.c
-@@ -40,8 +40,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
- stat->gid = inode->i_gid;
- stat->rdev = inode->i_rdev;
- stat->size = i_size_read(inode);
-- stat->atime = inode->i_atime;
-- stat->mtime = inode->i_mtime;
-+ if (is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
-+ stat->atime = inode->i_ctime;
-+ stat->mtime = inode->i_ctime;
-+ } else {
-+ stat->atime = inode->i_atime;
-+ stat->mtime = inode->i_mtime;
-+ }
- stat->ctime = inode->i_ctime;
- stat->blksize = i_blocksize(inode);
- stat->blocks = inode->i_blocks;
-@@ -75,9 +80,14 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
- stat->result_mask |= STATX_BASIC_STATS;
- request_mask &= STATX_ALL;
- query_flags &= KSTAT_QUERY_FLAGS;
-- if (inode->i_op->getattr)
-- return inode->i_op->getattr(path, stat, request_mask,
-- query_flags);
-+ if (inode->i_op->getattr) {
-+ int retval = inode->i_op->getattr(path, stat, request_mask, query_flags);
-+ if (!retval && is_sidechannel_device(inode) && !capable_noaudit(CAP_MKNOD)) {
-+ stat->atime = stat->ctime;
-+ stat->mtime = stat->ctime;
-+ }
-+ return retval;
-+ }
-
- generic_fillattr(inode, stat);
- return 0;
-diff --git a/include/linux/cache.h b/include/linux/cache.h
-index 750621e41d1c..e7157c18c62c 100644
---- a/include/linux/cache.h
-+++ b/include/linux/cache.h
-@@ -31,6 +31,8 @@
- #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
- #endif
-
-+#define __read_only __ro_after_init
-+
- #ifndef ____cacheline_aligned
- #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
- #endif
-diff --git a/include/linux/capability.h b/include/linux/capability.h
-index f640dcbc880c..2b4f5d651f19 100644
---- a/include/linux/capability.h
-+++ b/include/linux/capability.h
-@@ -207,6 +207,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
- extern bool has_ns_capability_noaudit(struct task_struct *t,
- struct user_namespace *ns, int cap);
- extern bool capable(int cap);
-+extern bool capable_noaudit(int cap);
- extern bool ns_capable(struct user_namespace *ns, int cap);
- extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
- #else
-@@ -232,6 +233,10 @@ static inline bool capable(int cap)
- {
- return true;
- }
-+static inline bool capable_noaudit(int cap)
-+{
-+ return true;
-+}
- static inline bool ns_capable(struct user_namespace *ns, int cap)
- {
- return true;
-diff --git a/include/linux/fs.h b/include/linux/fs.h
-index f6a577edec67..fa3a6caeca6c 100644
---- a/include/linux/fs.h
-+++ b/include/linux/fs.h
-@@ -3383,4 +3383,15 @@ static inline bool dir_relax_shared(struct inode *inode)
- extern bool path_noexec(const struct path *path);
- extern void inode_nohighmem(struct inode *inode);
-
-+extern int device_sidechannel_restrict;
-+
-+static inline bool is_sidechannel_device(const struct inode *inode)
-+{
-+ umode_t mode;
-+ if (!device_sidechannel_restrict)
-+ return false;
-+ mode = inode->i_mode;
-+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
-+}
-+
- #endif /* _LINUX_FS_H */
-diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
-index bdaf22582f6e..326ff15d4637 100644
---- a/include/linux/fsnotify.h
-+++ b/include/linux/fsnotify.h
-@@ -181,6 +181,9 @@ static inline void fsnotify_access(struct file *file)
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_ACCESS;
-
-+ if (is_sidechannel_device(inode))
-+ return;
-+
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
-@@ -199,6 +202,9 @@ static inline void fsnotify_modify(struct file *file)
- struct inode *inode = path->dentry->d_inode;
- __u32 mask = FS_MODIFY;
-
-+ if (is_sidechannel_device(inode))
-+ return;
-+
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
-diff --git a/include/linux/gfp.h b/include/linux/gfp.h
-index b041f94678de..fd8bb5a78b75 100644
---- a/include/linux/gfp.h
-+++ b/include/linux/gfp.h
-@@ -518,9 +518,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
- extern unsigned long get_zeroed_page(gfp_t gfp_mask);
-
--void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
-+void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __attribute__((alloc_size(1)));
- void free_pages_exact(void *virt, size_t size);
--void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
-+void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __attribute__((alloc_size(2)));
-
- #define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask), 0)
-diff --git a/include/linux/highmem.h b/include/linux/highmem.h
-index 776f90f3a1cd..3f5c47000059 100644
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -191,6 +191,13 @@ static inline void clear_highpage(struct page *page)
- kunmap_atomic(kaddr);
- }
-
-+static inline void verify_zero_highpage(struct page *page)
-+{
-+ void *kaddr = kmap_atomic(page);
-+ BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE));
-+ kunmap_atomic(kaddr);
-+}
-+
- static inline void zero_user_segments(struct page *page,
- unsigned start1, unsigned end1,
- unsigned start2, unsigned end2)
-diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
-index 69c238210325..ee487ea4f48f 100644
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -485,7 +485,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
-
- struct softirq_action
- {
-- void (*action)(struct softirq_action *);
-+ void (*action)(void);
- };
-
- asmlinkage void do_softirq(void);
-@@ -500,7 +500,7 @@ static inline void do_softirq_own_stack(void)
- }
- #endif
-
--extern void open_softirq(int nr, void (*action)(struct softirq_action *));
-+extern void __init open_softirq(int nr, void (*action)(void));
- extern void softirq_init(void);
- extern void __raise_softirq_irqoff(unsigned int nr);
-
-diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
-index df32d2508290..c992d130b94d 100644
---- a/include/linux/kobject_ns.h
-+++ b/include/linux/kobject_ns.h
-@@ -46,7 +46,7 @@ struct kobj_ns_type_operations {
- void (*drop_ns)(void *);
- };
-
--int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
-+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
- int kobj_ns_type_registered(enum kobj_ns_type type);
- const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
- const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
-diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 58f2263de4de..e90dc5d98c7f 100644
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -525,7 +525,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
- }
- #endif
-
--extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-+extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __attribute__((alloc_size(1)));
- static inline void *kvmalloc(size_t size, gfp_t flags)
- {
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-diff --git a/include/linux/percpu.h b/include/linux/percpu.h
-index 296bbe49d5d1..b26652c9a98d 100644
---- a/include/linux/percpu.h
-+++ b/include/linux/percpu.h
-@@ -129,7 +129,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
- #endif
-
--extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
-+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
- extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
- extern bool is_kernel_percpu_address(unsigned long addr);
-
-@@ -137,8 +137,8 @@ extern bool is_kernel_percpu_address(unsigned long addr);
- extern void __init setup_per_cpu_areas(void);
- #endif
-
--extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
--extern void __percpu *__alloc_percpu(size_t size, size_t align);
-+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __attribute__((alloc_size(1)));
-+extern void __percpu *__alloc_percpu(size_t size, size_t align) __attribute__((alloc_size(1)));
- extern void free_percpu(void __percpu *__pdata);
- extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
-
-diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 8e22f24ded6a..b7fecdfa6de5 100644
---- a/include/linux/perf_event.h
-+++ b/include/linux/perf_event.h
-@@ -1165,6 +1165,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
- int perf_event_max_stack_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
-+static inline bool perf_paranoid_any(void)
-+{
-+ return sysctl_perf_event_paranoid > 2;
-+}
-+
- static inline bool perf_paranoid_tracepoint_raw(void)
- {
- return sysctl_perf_event_paranoid > -1;
-diff --git a/include/linux/slab.h b/include/linux/slab.h
-index ae5ed6492d54..fd0786124504 100644
---- a/include/linux/slab.h
-+++ b/include/linux/slab.h
-@@ -146,8 +146,8 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *);
- /*
- * Common kmalloc functions provided by all allocators
- */
--void * __must_check __krealloc(const void *, size_t, gfp_t);
--void * __must_check krealloc(const void *, size_t, gfp_t);
-+void * __must_check __krealloc(const void *, size_t, gfp_t) __attribute__((alloc_size(2)));
-+void * __must_check krealloc(const void *, size_t, gfp_t) __attribute((alloc_size(2)));
- void kfree(const void *);
- void kzfree(const void *);
- size_t ksize(const void *);
-@@ -324,7 +324,7 @@ static __always_inline int kmalloc_index(size_t size)
- }
- #endif /* !CONFIG_SLOB */
-
--void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-+void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
- void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
- void kmem_cache_free(struct kmem_cache *, void *);
-
-@@ -348,7 +348,7 @@ static __always_inline void kfree_bulk(size_t size, void **p)
- }
-
- #ifdef CONFIG_NUMA
--void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-+void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc __attribute__((alloc_size(1)));
- void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
- #else
- static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-@@ -473,7 +473,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
- * for general use, and so are not documented here. For a full list of
- * potential flags, always refer to linux/gfp.h.
- */
--static __always_inline void *kmalloc(size_t size, gfp_t flags)
-+static __always_inline __attribute__((alloc_size(1))) void *kmalloc(size_t size, gfp_t flags)
- {
- if (__builtin_constant_p(size)) {
- if (size > KMALLOC_MAX_CACHE_SIZE)
-@@ -513,7 +513,7 @@ static __always_inline int kmalloc_size(int n)
- return 0;
- }
-
--static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-+static __always_inline __attribute__((alloc_size(1))) void *kmalloc_node(size_t size, gfp_t flags, int node)
- {
- #ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
-diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
-index f8ced87a2efe..cd61c8d2aa6e 100644
---- a/include/linux/slub_def.h
-+++ b/include/linux/slub_def.h
-@@ -121,6 +121,11 @@ struct kmem_cache {
- unsigned long random;
- #endif
-
-+#ifdef CONFIG_SLAB_CANARY
-+ unsigned long random_active;
-+ unsigned long random_inactive;
-+#endif
-+
- #ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
-diff --git a/include/linux/string.h b/include/linux/string.h
-index 96115bf561b4..f93d908c5bbc 100644
---- a/include/linux/string.h
-+++ b/include/linux/string.h
-@@ -234,10 +234,16 @@ void __read_overflow2(void) __compiletime_error("detected read beyond size of ob
- void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
- void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
-
-+#ifdef CONFIG_FORTIFY_SOURCE_STRICT_STRING
-+#define __string_size(p) __builtin_object_size(p, 1)
-+#else
-+#define __string_size(p) __builtin_object_size(p, 0)
-+#endif
-+
- #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
- __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
-@@ -247,7 +253,7 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
-
- __FORTIFY_INLINE char *strcat(char *p, const char *q)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
- if (p_size == (size_t)-1)
- return __builtin_strcat(p, q);
- if (strlcat(p, q, p_size) >= p_size)
-@@ -258,7 +264,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q)
- __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
- {
- __kernel_size_t ret;
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
-
- /* Work around gcc excess stack consumption issue */
- if (p_size == (size_t)-1 ||
-@@ -273,7 +279,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
- extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
- __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-+ size_t p_size = __string_size(p);
- __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
- if (p_size <= ret && maxlen != ret)
- fortify_panic(__func__);
-@@ -285,8 +291,8 @@ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
- __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
- {
- size_t ret;
-- size_t p_size = __builtin_object_size(p, 0);
-- size_t q_size = __builtin_object_size(q, 0);
-+ size_t p_size = __string_size(p);
-+ size_t q_size = __string_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __real_strlcpy(p, q, size);
- ret = strlen(q);
-@@ -306,8 +312,8 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
- __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
- {
- size_t p_len, copy_len;
-- size_t p_size = __builtin_object_size(p, 0);
-- size_t q_size = __builtin_object_size(q, 0);
-+ size_t p_size = __string_size(p);
-+ size_t q_size = __string_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strncat(p, q, count);
- p_len = strlen(p);
-@@ -420,8 +426,8 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
- /* defined after fortified strlen and memcpy to reuse them */
- __FORTIFY_INLINE char *strcpy(char *p, const char *q)
- {
-- size_t p_size = __builtin_object_size(p, 0);
-- size_t q_size = __builtin_object_size(q, 0);
-+ size_t p_size = __string_size(p);
-+ size_t q_size = __string_size(q);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __builtin_strcpy(p, q);
- memcpy(p, q, strlen(q) + 1);
-diff --git a/include/linux/tty.h b/include/linux/tty.h
-index 1dd587ba6d88..9a9a04fb641d 100644
---- a/include/linux/tty.h
-+++ b/include/linux/tty.h
-@@ -13,6 +13,7 @@
- #include <uapi/linux/tty.h>
- #include <linux/rwsem.h>
- #include <linux/llist.h>
-+#include <linux/user_namespace.h>
-
-
- /*
-@@ -335,6 +336,7 @@ struct tty_struct {
- /* If the tty has a pending do_SAK, queue it here - akpm */
- struct work_struct SAK_work;
- struct tty_port *port;
-+ struct user_namespace *owner_user_ns;
- } __randomize_layout;
-
- /* Each of a tty's open files has private_data pointing to tty_file_private */
-@@ -344,6 +346,8 @@ struct tty_file_private {
- struct list_head list;
- };
-
-+extern int tiocsti_restrict;
-+
- /* tty magic number */
- #define TTY_MAGIC 0x5401
-
-diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
-index 1e5d8c392f15..66d0e49c9987 100644
---- a/include/linux/vmalloc.h
-+++ b/include/linux/vmalloc.h
-@@ -68,19 +68,19 @@ static inline void vmalloc_init(void)
- }
- #endif
-
--extern void *vmalloc(unsigned long size);
--extern void *vzalloc(unsigned long size);
--extern void *vmalloc_user(unsigned long size);
--extern void *vmalloc_node(unsigned long size, int node);
--extern void *vzalloc_node(unsigned long size, int node);
--extern void *vmalloc_exec(unsigned long size);
--extern void *vmalloc_32(unsigned long size);
--extern void *vmalloc_32_user(unsigned long size);
--extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
-+extern void *vmalloc(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vzalloc(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_user(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
-+extern void *vzalloc_node(unsigned long size, int node) __attribute__((alloc_size(1)));
-+extern void *vmalloc_exec(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_32(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *vmalloc_32_user(unsigned long size) __attribute__((alloc_size(1)));
-+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __attribute__((alloc_size(1)));
- extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
-- const void *caller);
-+ const void *caller) __attribute__((alloc_size(1)));
- #ifndef CONFIG_MMU
- extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
- static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
-diff --git a/init/Kconfig b/init/Kconfig
-index 46075327c165..0c78750bc76d 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -309,6 +309,7 @@ config USELIB
- config AUDIT
- bool "Auditing support"
- depends on NET
-+ default y
- help
- Enable auditing infrastructure that can be used with another
- kernel subsystem, such as SELinux (which requires this for
-@@ -1052,6 +1053,12 @@ config CC_OPTIMIZE_FOR_SIZE
-
- endchoice
-
-+config LOCAL_INIT
-+ bool "Zero uninitialized locals"
-+ help
-+ Zero-fill uninitialized local variables, other than variable-length
-+ arrays. Requires compiler support.
-+
- config SYSCTL
- bool
-
-@@ -1361,8 +1368,7 @@ config SHMEM
- which may be appropriate on small systems without swap.
-
- config AIO
-- bool "Enable AIO support" if EXPERT
-- default y
-+ bool "Enable AIO support"
- help
- This option enables POSIX asynchronous I/O which may by used
- by some high performance threaded applications. Disabling
-@@ -1491,7 +1497,7 @@ config VM_EVENT_COUNTERS
-
- config SLUB_DEBUG
- default y
-- bool "Enable SLUB debugging support" if EXPERT
-+ bool "Enable SLUB debugging support"
- depends on SLUB && SYSFS
- help
- SLUB has extensive debug support features. Disabling these can
-@@ -1515,7 +1521,6 @@ config SLUB_MEMCG_SYSFS_ON
-
- config COMPAT_BRK
- bool "Disable heap randomization"
-- default y
- help
- Randomizing heap placement makes heap exploits harder, but it
- also breaks ancient binaries (including anything libc5 based).
-@@ -1562,7 +1567,6 @@ endchoice
-
- config SLAB_MERGE_DEFAULT
- bool "Allow slab caches to be merged"
-- default y
- help
- For reduced kernel memory fragmentation, slab caches can be
- merged when they share the same size and other characteristics.
-@@ -1575,9 +1579,9 @@ config SLAB_MERGE_DEFAULT
- command line.
-
- config SLAB_FREELIST_RANDOM
-- default n
- depends on SLAB || SLUB
- bool "SLAB freelist randomization"
-+ default y
- help
- Randomizes the freelist order used on creating new pages. This
- security feature reduces the predictability of the kernel slab
-@@ -1586,12 +1590,56 @@ config SLAB_FREELIST_RANDOM
- config SLAB_FREELIST_HARDENED
- bool "Harden slab freelist metadata"
- depends on SLUB
-+ default y
- help
- Many kernel heap attacks try to target slab cache metadata and
- other infrastructure. This options makes minor performance
- sacrifies to harden the kernel slab allocator against common
- freelist exploit methods.
-
-+config SLAB_HARDENED
-+ default y
-+ depends on SLUB
-+ bool "Hardened SLAB infrastructure"
-+ help
-+ Make minor performance sacrifices to harden the kernel slab
-+ allocator.
-+
-+config SLAB_CANARY
-+ depends on SLUB
-+ depends on !SLAB_MERGE_DEFAULT
-+ bool "SLAB canaries"
-+ default y
-+ help
-+ Place canaries at the end of kernel slab allocations, sacrificing
-+ some performance and memory usage for security.
-+
-+ Canaries can detect some forms of heap corruption when allocations
-+ are freed and as part of the HARDENED_USERCOPY feature. It provides
-+ basic use-after-free detection for HARDENED_USERCOPY.
-+
-+ Canaries absorb small overflows (rendering them harmless), mitigate
-+ non-NUL terminated C string overflows on 64-bit via a guaranteed zero
-+ byte and provide basic double-free detection.
-+
-+config SLAB_SANITIZE
-+ bool "Sanitize SLAB allocations"
-+ depends on SLUB
-+ default y
-+ help
-+ Zero fill slab allocations on free, reducing the lifetime of
-+ sensitive data and helping to mitigate use-after-free bugs.
-+
-+ For slabs with debug poisoning enabling, this has no impact.
-+
-+config SLAB_SANITIZE_VERIFY
-+ depends on SLAB_SANITIZE && PAGE_SANITIZE
-+ default y
-+ bool "Verify sanitized SLAB allocations"
-+ help
-+ Verify that newly allocated slab allocations are zeroed to detect
-+ write-after-free bugs.
-+
- config SLUB_CPU_PARTIAL
- default y
- depends on SLUB && SMP
-diff --git a/kernel/audit.c b/kernel/audit.c
-index d301276bca58..d55a1e290cea 100644
---- a/kernel/audit.c
-+++ b/kernel/audit.c
-@@ -1575,6 +1575,9 @@ static int __init audit_enable(char *str)
- audit_default = !!simple_strtol(str, NULL, 0);
- if (!audit_default)
- audit_initialized = AUDIT_DISABLED;
-+ else
-+ audit_initialized = AUDIT_UNINITIALIZED;
-+
- audit_enabled = audit_default;
- audit_ever_enabled = !!audit_enabled;
-
-diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
-index d203a5d6b726..2a6c3e2c57a6 100644
---- a/kernel/bpf/core.c
-+++ b/kernel/bpf/core.c
-@@ -539,7 +539,7 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
- bpf_prog_unlock_free(fp);
- }
-
--int bpf_jit_harden __read_mostly;
-+int bpf_jit_harden __read_mostly = 2;
-
- static int bpf_jit_blind_insn(const struct bpf_insn *from,
- const struct bpf_insn *aux,
-diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index 5c9deed4524e..6d90aabecfc7 100644
---- a/kernel/bpf/syscall.c
-+++ b/kernel/bpf/syscall.c
-@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(prog_idr_lock);
- static DEFINE_IDR(map_idr);
- static DEFINE_SPINLOCK(map_idr_lock);
-
--int sysctl_unprivileged_bpf_disabled __read_mostly;
-+int sysctl_unprivileged_bpf_disabled __read_mostly = 1;
-
- static const struct bpf_map_ops * const bpf_map_types[] = {
- #define BPF_PROG_TYPE(_id, _ops)
-diff --git a/kernel/capability.c b/kernel/capability.c
-index 1e1c0236f55b..452062fe45ce 100644
---- a/kernel/capability.c
-+++ b/kernel/capability.c
-@@ -431,6 +431,12 @@ bool capable(int cap)
- return ns_capable(&init_user_ns, cap);
- }
- EXPORT_SYMBOL(capable);
-+
-+bool capable_noaudit(int cap)
-+{
-+ return ns_capable_noaudit(&init_user_ns, cap);
-+}
-+EXPORT_SYMBOL(capable_noaudit);
- #endif /* CONFIG_MULTIUSER */
-
- /**
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 991af683ef9e..66f66b648707 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask;
- * 0 - disallow raw tracepoint access for unpriv
- * 1 - disallow cpu events for unpriv
- * 2 - disallow kernel profiling for unpriv
-+ * 3 - disallow all unpriv perf event use
- */
-+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
-+int sysctl_perf_event_paranoid __read_mostly = 3;
-+#else
- int sysctl_perf_event_paranoid __read_mostly = 2;
-+#endif
-
- /* Minimum for 512 kiB + 1 user control page */
- int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
-@@ -9984,6 +9989,9 @@ SYSCALL_DEFINE5(perf_event_open,
- if (flags & ~PERF_FLAG_ALL)
- return -EINVAL;
-
-+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
-+ return -EACCES;
-+
- err = perf_copy_attr(attr_uptr, &attr);
- if (err)
- return err;
-diff --git a/kernel/fork.c b/kernel/fork.c
-index 6d6ce2c3a364..951a76b3dc32 100644
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -102,6 +102,11 @@
-
- #define CREATE_TRACE_POINTS
- #include <trace/events/task.h>
-+#ifdef CONFIG_USER_NS
-+extern int unprivileged_userns_clone;
-+#else
-+#define unprivileged_userns_clone 0
-+#endif
-
- /*
- * Minimum number of threads to boot the kernel
-@@ -1555,6 +1560,10 @@ static __latent_entropy struct task_struct *copy_process(
- if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
- return ERR_PTR(-EINVAL);
-
-+ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
-+ if (!capable(CAP_SYS_ADMIN))
-+ return ERR_PTR(-EPERM);
-+
- /*
- * Thread groups must share signals as well, and detached threads
- * can only be started up within the thread group.
-@@ -2357,6 +2366,12 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
- if (unshare_flags & CLONE_NEWNS)
- unshare_flags |= CLONE_FS;
-
-+ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
-+ err = -EPERM;
-+ if (!capable(CAP_SYS_ADMIN))
-+ goto bad_unshare_out;
-+ }
-+
- err = check_unshare_flags(unshare_flags);
- if (err)
- goto bad_unshare_out;
-diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
-index 0972a8e09d08..00dde7aad47a 100644
---- a/kernel/power/snapshot.c
-+++ b/kernel/power/snapshot.c
-@@ -1136,7 +1136,7 @@ void free_basic_memory_bitmaps(void)
-
- void clear_free_pages(void)
- {
--#ifdef CONFIG_PAGE_POISONING_ZERO
-+#if defined(CONFIG_PAGE_POISONING_ZERO) || defined(CONFIG_PAGE_SANITIZE)
- struct memory_bitmap *bm = free_pages_map;
- unsigned long pfn;
-
-@@ -1153,7 +1153,7 @@ void clear_free_pages(void)
- }
- memory_bm_position_reset(bm);
- pr_info("PM: free pages cleared after restore\n");
--#endif /* PAGE_POISONING_ZERO */
-+#endif /* PAGE_POISONING_ZERO || PAGE_SANITIZE */
- }
-
- /**
-diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
-index a64eee0db39e..4d7de378fe4c 100644
---- a/kernel/rcu/tiny.c
-+++ b/kernel/rcu/tiny.c
-@@ -164,7 +164,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
- }
- }
-
--static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-+static __latent_entropy void rcu_process_callbacks(void)
- {
- __rcu_process_callbacks(&rcu_sched_ctrlblk);
- __rcu_process_callbacks(&rcu_bh_ctrlblk);
-diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
-index 710ce1d6b982..4013b634e820 100644
---- a/kernel/rcu/tree.c
-+++ b/kernel/rcu/tree.c
-@@ -2927,7 +2927,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
- /*
- * Do RCU core processing for the current CPU.
- */
--static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
-+static __latent_entropy void rcu_process_callbacks(void)
- {
- struct rcu_state *rsp;
-
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index f33b24080b1c..99c5e423906f 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -8982,7 +8982,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
- * run_rebalance_domains is triggered when needed from the scheduler tick.
- * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
- */
--static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
-+static __latent_entropy void run_rebalance_domains(void)
- {
- struct rq *this_rq = this_rq();
- enum cpu_idle_type idle = this_rq->idle_balance ?
-diff --git a/kernel/softirq.c b/kernel/softirq.c
-index a4c87cf27f9d..efb97a8dc568 100644
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
- EXPORT_SYMBOL(irq_stat);
- #endif
-
--static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
-+static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE);
-
- DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
-
-@@ -285,7 +285,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
-- h->action(h);
-+ h->action();
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
-@@ -448,7 +448,7 @@ void __raise_softirq_irqoff(unsigned int nr)
- or_softirq_pending(1UL << nr);
- }
-
--void open_softirq(int nr, void (*action)(struct softirq_action *))
-+void __init open_softirq(int nr, void (*action)(void))
- {
- softirq_vec[nr].action = action;
- }
-@@ -490,7 +490,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
- }
- EXPORT_SYMBOL(__tasklet_hi_schedule);
-
--static __latent_entropy void tasklet_action(struct softirq_action *a)
-+static __latent_entropy void tasklet_action(void)
- {
- struct tasklet_struct *list;
-
-@@ -526,7 +526,7 @@ static __latent_entropy void tasklet_action(struct softirq_action *a)
- }
- }
-
--static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
-+static __latent_entropy void tasklet_hi_action(void)
- {
- struct tasklet_struct *list;
-
-diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index d330b1ce3b94..050278b12928 100644
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -66,6 +66,7 @@
- #include <linux/kexec.h>
- #include <linux/bpf.h>
- #include <linux/mount.h>
-+#include <linux/tty.h>
-
- #include <linux/uaccess.h>
- #include <asm/processor.h>
-@@ -98,12 +99,19 @@
- #if defined(CONFIG_SYSCTL)
-
- /* External variables not in a header file. */
-+#if IS_ENABLED(CONFIG_USB)
-+int deny_new_usb __read_mostly = 0;
-+EXPORT_SYMBOL(deny_new_usb);
-+#endif
- extern int suid_dumpable;
- #ifdef CONFIG_COREDUMP
- extern int core_uses_pid;
- extern char core_pattern[];
- extern unsigned int core_pipe_limit;
- #endif
-+#ifdef CONFIG_USER_NS
-+extern int unprivileged_userns_clone;
-+#endif
- extern int pid_max;
- extern int pid_max_min, pid_max_max;
- extern int percpu_pagelist_fraction;
-@@ -115,40 +123,43 @@ extern int sysctl_nr_trim_pages;
-
- /* Constants used for minimum and maximum */
- #ifdef CONFIG_LOCKUP_DETECTOR
--static int sixty = 60;
-+static int sixty __read_only = 60;
- #endif
-
--static int __maybe_unused neg_one = -1;
-+static int __maybe_unused neg_one __read_only = -1;
-
- static int zero;
--static int __maybe_unused one = 1;
--static int __maybe_unused two = 2;
--static int __maybe_unused four = 4;
--static unsigned long one_ul = 1;
--static int one_hundred = 100;
--static int one_thousand = 1000;
-+static int __maybe_unused one __read_only = 1;
-+static int __maybe_unused two __read_only = 2;
-+static int __maybe_unused four __read_only = 4;
-+static unsigned long one_ul __read_only = 1;
-+static int one_hundred __read_only = 100;
-+static int one_thousand __read_only = 1000;
- #ifdef CONFIG_PRINTK
--static int ten_thousand = 10000;
-+static int ten_thousand __read_only = 10000;
- #endif
- #ifdef CONFIG_PERF_EVENTS
--static int six_hundred_forty_kb = 640 * 1024;
-+static int six_hundred_forty_kb __read_only = 640 * 1024;
- #endif
-
- /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
--static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
-+static unsigned long dirty_bytes_min __read_only = 2 * PAGE_SIZE;
-
- /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
--static int maxolduid = 65535;
--static int minolduid;
-+static int maxolduid __read_only = 65535;
-+static int minolduid __read_only;
-
--static int ngroups_max = NGROUPS_MAX;
-+static int ngroups_max __read_only = NGROUPS_MAX;
- static const int cap_last_cap = CAP_LAST_CAP;
-
- /*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */
- #ifdef CONFIG_DETECT_HUNG_TASK
--static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
-+static unsigned long hung_task_timeout_max __read_only = (LONG_MAX/HZ);
- #endif
-
-+int device_sidechannel_restrict __read_mostly = 1;
-+EXPORT_SYMBOL(device_sidechannel_restrict);
-+
- #ifdef CONFIG_INOTIFY_USER
- #include <linux/inotify.h>
- #endif
-@@ -286,19 +297,19 @@ static struct ctl_table sysctl_base_table[] = {
- };
-
- #ifdef CONFIG_SCHED_DEBUG
--static int min_sched_granularity_ns = 100000; /* 100 usecs */
--static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
--static int min_wakeup_granularity_ns; /* 0 usecs */
--static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
-+static int min_sched_granularity_ns __read_only = 100000; /* 100 usecs */
-+static int max_sched_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
-+static int min_wakeup_granularity_ns __read_only; /* 0 usecs */
-+static int max_wakeup_granularity_ns __read_only = NSEC_PER_SEC; /* 1 second */
- #ifdef CONFIG_SMP
--static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
--static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
-+static int min_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_NONE;
-+static int max_sched_tunable_scaling __read_only = SCHED_TUNABLESCALING_END-1;
- #endif /* CONFIG_SMP */
- #endif /* CONFIG_SCHED_DEBUG */
-
- #ifdef CONFIG_COMPACTION
--static int min_extfrag_threshold;
--static int max_extfrag_threshold = 1000;
-+static int min_extfrag_threshold __read_only;
-+static int max_extfrag_threshold __read_only = 1000;
- #endif
-
- static struct ctl_table kern_table[] = {
-@@ -512,6 +523,15 @@ static struct ctl_table kern_table[] = {
- .proc_handler = proc_dointvec,
- },
- #endif
-+#ifdef CONFIG_USER_NS
-+ {
-+ .procname = "unprivileged_userns_clone",
-+ .data = &unprivileged_userns_clone,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec,
-+ },
-+#endif
- #ifdef CONFIG_PROC_SYSCTL
- {
- .procname = "tainted",
-@@ -853,6 +873,37 @@ static struct ctl_table kern_table[] = {
- .extra1 = &zero,
- .extra2 = &two,
- },
-+#endif
-+#if defined CONFIG_TTY
-+ {
-+ .procname = "tiocsti_restrict",
-+ .data = &tiocsti_restrict,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
-+#endif
-+ {
-+ .procname = "device_sidechannel_restrict",
-+ .data = &device_sidechannel_restrict,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
-+#if IS_ENABLED(CONFIG_USB)
-+ {
-+ .procname = "deny_new_usb",
-+ .data = &deny_new_usb,
-+ .maxlen = sizeof(int),
-+ .mode = 0644,
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
-+ .extra1 = &zero,
-+ .extra2 = &one,
-+ },
- #endif
- {
- .procname = "ngroups_max",
-diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index f17c76a1a05f..50f079d11488 100644
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -1640,7 +1640,7 @@ static inline void __run_timers(struct timer_base *base)
- /*
- * This function runs timers and the timer-tq in bottom half context.
- */
--static __latent_entropy void run_timer_softirq(struct softirq_action *h)
-+static __latent_entropy void run_timer_softirq(void)
- {
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
-
-diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
-index ed80a88980f0..ff6d27d06af0 100644
---- a/kernel/user_namespace.c
-+++ b/kernel/user_namespace.c
-@@ -24,6 +24,9 @@
- #include <linux/projid.h>
- #include <linux/fs_struct.h>
-
-+/* sysctl */
-+int unprivileged_userns_clone;
-+
- static struct kmem_cache *user_ns_cachep __read_mostly;
- static DEFINE_MUTEX(userns_state_mutex);
-
-diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index 62d0e25c054c..3953072277eb 100644
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -937,6 +937,7 @@ endmenu # "Debug lockups and hangs"
-
- config PANIC_ON_OOPS
- bool "Panic on Oops"
-+ default y
- help
- Say Y here to enable the kernel to panic when it oopses. This
- has the same effect as setting oops=panic on the kernel command
-@@ -946,7 +947,7 @@ config PANIC_ON_OOPS
- anything erroneous after an oops which could result in data
- corruption or other issues.
-
-- Say N if unsure.
-+ Say Y if unsure.
-
- config PANIC_ON_OOPS_VALUE
- int
-@@ -1319,6 +1320,7 @@ config DEBUG_BUGVERBOSE
- config DEBUG_LIST
- bool "Debug linked list manipulation"
- depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
-+ default y
- help
- Enable this to turn on extended checks in the linked-list
- walking routines.
-@@ -1932,6 +1934,7 @@ config MEMTEST
- config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
-+ default y
- help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
-@@ -1952,7 +1955,7 @@ config STRICT_DEVMEM
- bool "Filter access to /dev/mem"
- depends on MMU && DEVMEM
- depends on ARCH_HAS_DEVMEM_IS_ALLOWED
-- default y if TILE || PPC
-+ default y
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- of memory, including kernel and userspace memory. Accidental
-@@ -1971,6 +1974,7 @@ config STRICT_DEVMEM
- config IO_STRICT_DEVMEM
- bool "Filter I/O access to /dev/mem"
- depends on STRICT_DEVMEM
-+ default y
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- io-memory regardless of whether a driver is actively using that
-diff --git a/lib/irq_poll.c b/lib/irq_poll.c
-index 86a709954f5a..6f15787fcb1b 100644
---- a/lib/irq_poll.c
-+++ b/lib/irq_poll.c
-@@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop)
- }
- EXPORT_SYMBOL(irq_poll_complete);
-
--static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
-+static void __latent_entropy irq_poll_softirq(void)
- {
- struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
- int rearm = 0, budget = irq_poll_budget;
-diff --git a/lib/kobject.c b/lib/kobject.c
-index bbbb067de8ec..fec2f780cf9b 100644
---- a/lib/kobject.c
-+++ b/lib/kobject.c
-@@ -956,9 +956,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
-
-
- static DEFINE_SPINLOCK(kobj_ns_type_lock);
--static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
-+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __ro_after_init;
-
--int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
-+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
- {
- enum kobj_ns_type type = ops->type;
- int error;
-diff --git a/lib/nlattr.c b/lib/nlattr.c
-index 3d8295c85505..3fa3b3409d69 100644
---- a/lib/nlattr.c
-+++ b/lib/nlattr.c
-@@ -341,6 +341,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
- {
- int minlen = min_t(int, count, nla_len(src));
-
-+ BUG_ON(minlen < 0);
-+
- memcpy(dest, nla_data(src), minlen);
- if (count > minlen)
- memset(dest + minlen, 0, count - minlen);
-diff --git a/lib/vsprintf.c b/lib/vsprintf.c
-index 4a990f3fd345..3df8db5af0ba 100644
---- a/lib/vsprintf.c
-+++ b/lib/vsprintf.c
-@@ -1588,7 +1588,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
- return widen_string(buf, buf - buf_start, end, spec);
- }
-
--int kptr_restrict __read_mostly;
-+int kptr_restrict __read_mostly = 2;
-
- /*
- * Show a '%p' thing. A kernel extension is that the '%p' is followed
-diff --git a/mm/Kconfig b/mm/Kconfig
-index 59efbd3337e0..c070e14ec83d 100644
---- a/mm/Kconfig
-+++ b/mm/Kconfig
-@@ -319,7 +319,8 @@ config KSM
- config DEFAULT_MMAP_MIN_ADDR
- int "Low address space to protect from user allocation"
- depends on MMU
-- default 4096
-+ default 32768 if ARM || (ARM64 && COMPAT)
-+ default 65536
- help
- This is the portion of low virtual memory which should be protected
- from userspace allocation. Keeping a user from writing to low pages
-diff --git a/mm/mmap.c b/mm/mmap.c
-index 2398776195d2..a8ffa2223ad1 100644
---- a/mm/mmap.c
-+++ b/mm/mmap.c
-@@ -220,6 +220,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
-
- newbrk = PAGE_ALIGN(brk);
- oldbrk = PAGE_ALIGN(mm->brk);
-+ /* properly handle unaligned min_brk as an empty heap */
-+ if (min_brk & ~PAGE_MASK) {
-+ if (brk == min_brk)
-+ newbrk -= PAGE_SIZE;
-+ if (mm->brk == min_brk)
-+ oldbrk -= PAGE_SIZE;
-+ }
- if (oldbrk == newbrk)
- goto set_brk;
-
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index a2f365f40433..5e726e59de20 100644
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -67,6 +67,7 @@
- #include <linux/ftrace.h>
- #include <linux/lockdep.h>
- #include <linux/nmi.h>
-+#include <linux/random.h>
-
- #include <asm/sections.h>
- #include <asm/tlbflush.h>
-@@ -98,6 +99,15 @@ int _node_numa_mem_[MAX_NUMNODES];
- DEFINE_MUTEX(pcpu_drain_mutex);
- DEFINE_PER_CPU(struct work_struct, pcpu_drain);
-
-+bool __meminitdata extra_latent_entropy;
-+
-+static int __init setup_extra_latent_entropy(char *str)
-+{
-+ extra_latent_entropy = true;
-+ return 0;
-+}
-+early_param("extra_latent_entropy", setup_extra_latent_entropy);
-+
- #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
- volatile unsigned long latent_entropy __latent_entropy;
- EXPORT_SYMBOL(latent_entropy);
-@@ -1063,6 +1073,13 @@ static __always_inline bool free_pages_prepare(struct page *page,
- debug_check_no_obj_freed(page_address(page),
- PAGE_SIZE << order);
- }
-+
-+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE)) {
-+ int i;
-+ for (i = 0; i < (1 << order); i++)
-+ clear_highpage(page + i);
-+ }
-+
- arch_free_page(page, order);
- kernel_poison_pages(page, 1 << order, 0);
- kernel_map_pages(page, 1 << order, 0);
-@@ -1278,6 +1295,21 @@ static void __init __free_pages_boot_core(struct page *page, unsigned int order)
- __ClearPageReserved(p);
- set_page_count(p, 0);
-
-+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
-+ unsigned long hash = 0;
-+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
-+ const unsigned long *data = lowmem_page_address(page);
-+
-+ for (index = 0; index < end; index++)
-+ hash ^= hash + data[index];
-+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
-+ latent_entropy ^= hash;
-+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
-+#else
-+ add_device_randomness((const void *)&hash, sizeof(hash));
-+#endif
-+ }
-+
- page_zone(page)->managed_pages += nr_pages;
- set_page_refcounted(page);
- __free_pages(page, order);
-@@ -1718,8 +1750,8 @@ static inline int check_new_page(struct page *page)
-
- static inline bool free_pages_prezeroed(void)
- {
-- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
-- page_poisoning_enabled();
-+ return IS_ENABLED(CONFIG_PAGE_SANITIZE) ||
-+ (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && page_poisoning_enabled());
- }
-
- #ifdef CONFIG_DEBUG_VM
-@@ -1776,6 +1808,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
-
- post_alloc_hook(page, order, gfp_flags);
-
-+ if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY)) {
-+ for (i = 0; i < (1 << order); i++)
-+ verify_zero_highpage(page + i);
-+ }
-+
- if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
- for (i = 0; i < (1 << order); i++)
- clear_highpage(page + i);
-diff --git a/mm/slab.h b/mm/slab.h
-index 485d9fbb8802..436461588804 100644
---- a/mm/slab.h
-+++ b/mm/slab.h
-@@ -311,7 +311,11 @@ static inline bool is_root_cache(struct kmem_cache *s)
- static inline bool slab_equal_or_root(struct kmem_cache *s,
- struct kmem_cache *p)
- {
-+#ifdef CONFIG_SLAB_HARDENED
-+ return p == s;
-+#else
- return true;
-+#endif
- }
-
- static inline const char *cache_name(struct kmem_cache *s)
-@@ -363,18 +367,26 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
- * to not do even the assignment. In that case, slab_equal_or_root
- * will also be a constant.
- */
-- if (!memcg_kmem_enabled() &&
-+ if (!IS_ENABLED(CONFIG_SLAB_HARDENED) &&
-+ !memcg_kmem_enabled() &&
- !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
- return s;
-
- page = virt_to_head_page(x);
-+#ifdef CONFIG_SLAB_HARDENED
-+ BUG_ON(!PageSlab(page));
-+#endif
- cachep = page->slab_cache;
- if (slab_equal_or_root(cachep, s))
- return cachep;
-
- pr_err("%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name);
-+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
-+ BUG_ON(1);
-+#else
- WARN_ON_ONCE(1);
-+#endif
- return s;
- }
-
-@@ -399,7 +411,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
- * back there or track user information then we can
- * only use the space before that information.
- */
-- if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
-+ if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
-diff --git a/mm/slab_common.c b/mm/slab_common.c
-index f6764cf162b8..015c8e4df318 100644
---- a/mm/slab_common.c
-+++ b/mm/slab_common.c
-@@ -26,10 +26,10 @@
-
- #include "slab.h"
-
--enum slab_state slab_state;
-+enum slab_state slab_state __ro_after_init;
- LIST_HEAD(slab_caches);
- DEFINE_MUTEX(slab_mutex);
--struct kmem_cache *kmem_cache;
-+struct kmem_cache *kmem_cache __ro_after_init;
-
- static LIST_HEAD(slab_caches_to_rcu_destroy);
- static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
-@@ -49,7 +49,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
- /*
- * Merge control. If this is set then no merging of slab caches will occur.
- */
--static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
-+static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
-
- static int __init setup_slab_nomerge(char *str)
- {
-@@ -931,7 +931,7 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
- * of two cache sizes there. The size of larger slabs can be determined using
- * fls.
- */
--static s8 size_index[24] = {
-+static s8 size_index[24] __ro_after_init = {
- 3, /* 8 */
- 4, /* 16 */
- 5, /* 24 */
-diff --git a/mm/slub.c b/mm/slub.c
-index 220d42e592ef..3decf87b1cf2 100644
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -125,6 +125,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
- #endif
- }
-
-+static inline bool has_sanitize(struct kmem_cache *s)
-+{
-+ return IS_ENABLED(CONFIG_SLAB_SANITIZE) && !(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON));
-+}
-+
-+static inline bool has_sanitize_verify(struct kmem_cache *s)
-+{
-+ return IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && has_sanitize(s);
-+}
-+
- void *fixup_red_left(struct kmem_cache *s, void *p)
- {
- if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
-@@ -297,6 +307,35 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
- *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
- }
-
-+#ifdef CONFIG_SLAB_CANARY
-+static inline unsigned long *get_canary(struct kmem_cache *s, void *object)
-+{
-+ if (s->offset)
-+ return object + s->offset + sizeof(void *);
-+ return object + s->inuse;
-+}
-+
-+static inline unsigned long get_canary_value(const void *canary, unsigned long value)
-+{
-+ return (value ^ (unsigned long)canary) & CANARY_MASK;
-+}
-+
-+static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value)
-+{
-+ unsigned long *canary = get_canary(s, object);
-+ *canary = get_canary_value(canary, value);
-+}
-+
-+static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value)
-+{
-+ unsigned long *canary = get_canary(s, object);
-+ BUG_ON(*canary != get_canary_value(canary, value));
-+}
-+#else
-+#define set_canary(s, object, value)
-+#define check_canary(s, object, value)
-+#endif
-+
- /* Loop over all objects in a slab */
- #define for_each_object(__p, __s, __addr, __objects) \
- for (__p = fixup_red_left(__s, __addr); \
-@@ -484,13 +523,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
- * Debug settings:
- */
- #if defined(CONFIG_SLUB_DEBUG_ON)
--static int slub_debug = DEBUG_DEFAULT_FLAGS;
-+static int slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS;
- #else
--static int slub_debug;
-+static int slub_debug __ro_after_init;
- #endif
-
--static char *slub_debug_slabs;
--static int disable_higher_order_debug;
-+static char *slub_debug_slabs __ro_after_init;
-+static int disable_higher_order_debug __ro_after_init;
-
- /*
- * slub is about to manipulate internal object metadata. This memory lies
-@@ -550,6 +589,9 @@ static struct track *get_track(struct kmem_cache *s, void *object,
- else
- p = object + s->inuse;
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ p = (void *)p + sizeof(void *);
-+
- return p + alloc;
- }
-
-@@ -688,6 +730,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
- else
- off = s->inuse;
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ off += sizeof(void *);
-+
- if (s->flags & SLAB_STORE_USER)
- off += 2 * sizeof(struct track);
-
-@@ -817,6 +862,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
- /* Freepointer is placed after the object. */
- off += sizeof(void *);
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ off += sizeof(void *);
-+
- if (s->flags & SLAB_STORE_USER)
- /* We also have user information there */
- off += 2 * sizeof(struct track);
-@@ -1416,8 +1464,9 @@ static void setup_object(struct kmem_cache *s, struct page *page,
- void *object)
- {
- setup_object_debug(s, page, object);
-+ set_canary(s, object, s->random_inactive);
- kasan_init_slab_obj(s, object);
-- if (unlikely(s->ctor)) {
-+ if (unlikely(s->ctor) && !has_sanitize_verify(s)) {
- kasan_unpoison_object_data(s, object);
- s->ctor(object);
- kasan_poison_object_data(s, object);
-@@ -2717,9 +2766,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
- stat(s, ALLOC_FASTPATH);
- }
-
-- if (unlikely(gfpflags & __GFP_ZERO) && object)
-+ if (has_sanitize_verify(s) && object) {
-+ size_t offset = s->offset ? 0 : sizeof(void *);
-+ BUG_ON(memchr_inv(object + offset, 0, s->object_size - offset));
-+ if (s->ctor)
-+ s->ctor(object);
-+ if (unlikely(gfpflags & __GFP_ZERO) && offset)
-+ memset(object, 0, sizeof(void *));
-+ } else if (unlikely(gfpflags & __GFP_ZERO) && object)
- memset(object, 0, s->object_size);
-
-+ if (object) {
-+ check_canary(s, object, s->random_inactive);
-+ set_canary(s, object, s->random_active);
-+ }
-+
- slab_post_alloc_hook(s, gfpflags, 1, &object);
-
- return object;
-@@ -2926,6 +2987,27 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
- void *tail_obj = tail ? : head;
- struct kmem_cache_cpu *c;
- unsigned long tid;
-+ bool sanitize = has_sanitize(s);
-+
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY) || sanitize) {
-+ __maybe_unused int offset = s->offset ? 0 : sizeof(void *);
-+ void *x = head;
-+
-+ while (1) {
-+ check_canary(s, x, s->random_active);
-+ set_canary(s, x, s->random_inactive);
-+
-+ if (sanitize) {
-+ memset(x + offset, 0, s->object_size - offset);
-+ if (!IS_ENABLED(CONFIG_SLAB_SANITIZE_VERIFY) && s->ctor)
-+ s->ctor(x);
-+ }
-+ if (x == tail_obj)
-+ break;
-+ x = get_freepointer(s, x);
-+ }
-+ }
-+
- redo:
- /*
- * Determine the currently cpus per cpu slab.
-@@ -3104,7 +3186,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- void **p)
- {
- struct kmem_cache_cpu *c;
-- int i;
-+ int i, k;
-
- /* memcg and kmem_cache debug support */
- s = slab_pre_alloc_hook(s, flags);
-@@ -3141,13 +3223,29 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- local_irq_enable();
-
- /* Clear memory outside IRQ disabled fastpath loop */
-- if (unlikely(flags & __GFP_ZERO)) {
-+ if (has_sanitize_verify(s)) {
-+ int j;
-+
-+ for (j = 0; j < i; j++) {
-+ size_t offset = s->offset ? 0 : sizeof(void *);
-+ BUG_ON(memchr_inv(p[j] + offset, 0, s->object_size - offset));
-+ if (s->ctor)
-+ s->ctor(p[j]);
-+ if (unlikely(flags & __GFP_ZERO) && offset)
-+ memset(p[j], 0, sizeof(void *));
-+ }
-+ } else if (unlikely(flags & __GFP_ZERO)) {
- int j;
-
- for (j = 0; j < i; j++)
- memset(p[j], 0, s->object_size);
- }
-
-+ for (k = 0; k < i; k++) {
-+ check_canary(s, p[k], s->random_inactive);
-+ set_canary(s, p[k], s->random_active);
-+ }
-+
- /* memcg and kmem_cache debug support */
- slab_post_alloc_hook(s, flags, size, p);
- return i;
-@@ -3179,9 +3277,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
- * and increases the number of allocations possible without having to
- * take the list_lock.
- */
--static int slub_min_order;
--static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
--static int slub_min_objects;
-+static int slub_min_order __ro_after_init;
-+static int slub_max_order __ro_after_init = PAGE_ALLOC_COSTLY_ORDER;
-+static int slub_min_objects __ro_after_init;
-
- /*
- * Calculate the order of allocation given an slab object size.
-@@ -3351,6 +3449,7 @@ static void early_kmem_cache_node_alloc(int node)
- init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
- init_tracking(kmem_cache_node, n);
- #endif
-+ set_canary(kmem_cache_node, n, kmem_cache_node->random_active);
- kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
- GFP_KERNEL);
- init_kmem_cache_node(n);
-@@ -3507,6 +3606,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
- size += sizeof(void *);
- }
-
-+ if (IS_ENABLED(CONFIG_SLAB_CANARY))
-+ size += sizeof(void *);
-+
- #ifdef CONFIG_SLUB_DEBUG
- if (flags & SLAB_STORE_USER)
- /*
-@@ -3577,6 +3679,10 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
- #ifdef CONFIG_SLAB_FREELIST_HARDENED
- s->random = get_random_long();
- #endif
-+#ifdef CONFIG_SLAB_CANARY
-+ s->random_active = get_random_long();
-+ s->random_inactive = get_random_long();
-+#endif
-
- if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
- s->reserved = sizeof(struct rcu_head);
-@@ -3841,6 +3947,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
- offset -= s->red_left_pad;
- }
-
-+ check_canary(s, (void *)ptr - offset, s->random_active);
-+
- /* Allow address range falling entirely within object size. */
- if (offset <= object_size && n <= object_size - offset)
- return NULL;
-@@ -3859,7 +3967,11 @@ static size_t __ksize(const void *object)
- page = virt_to_head_page(object);
-
- if (unlikely(!PageSlab(page))) {
-+#ifdef CONFIG_BUG_ON_DATA_CORRUPTION
-+ BUG_ON(!PageCompound(page));
-+#else
- WARN_ON(!PageCompound(page));
-+#endif
- return PAGE_SIZE << compound_order(page);
- }
-
-@@ -4724,7 +4836,7 @@ enum slab_stat_type {
- #define SO_TOTAL (1 << SL_TOTAL)
-
- #ifdef CONFIG_MEMCG
--static bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
-+static bool memcg_sysfs_enabled __ro_after_init = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
-
- static int __init setup_slub_memcg_sysfs(char *str)
- {
-diff --git a/mm/swap.c b/mm/swap.c
-index a77d68f2c1b6..d1f1d75f4d1f 100644
---- a/mm/swap.c
-+++ b/mm/swap.c
-@@ -92,6 +92,13 @@ static void __put_compound_page(struct page *page)
- if (!PageHuge(page))
- __page_cache_release(page);
- dtor = get_compound_page_dtor(page);
-+ if (!PageHuge(page))
-+ BUG_ON(dtor != free_compound_page
-+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-+ && dtor != free_transhuge_page
-+#endif
-+ );
-+
- (*dtor)(page);
- }
-
-diff --git a/net/core/dev.c b/net/core/dev.c
-index 4337450a5fdb..5a3c7d217719 100644
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -4117,7 +4117,7 @@ int netif_rx_ni(struct sk_buff *skb)
- }
- EXPORT_SYMBOL(netif_rx_ni);
-
--static __latent_entropy void net_tx_action(struct softirq_action *h)
-+static __latent_entropy void net_tx_action(void)
- {
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-
-@@ -5635,7 +5635,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
- return work;
- }
-
--static __latent_entropy void net_rx_action(struct softirq_action *h)
-+static __latent_entropy void net_rx_action(void)
- {
- struct softnet_data *sd = this_cpu_ptr(&softnet_data);
- unsigned long time_limit = jiffies +
-diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
-index f48fe6fc7e8c..d78c52835c08 100644
---- a/net/ipv4/Kconfig
-+++ b/net/ipv4/Kconfig
-@@ -261,6 +261,7 @@ config IP_PIMSM_V2
-
- config SYN_COOKIES
- bool "IP: TCP syncookie support"
-+ default y
- ---help---
- Normal TCP/IP networking is open to an attack known as "SYN
- flooding". This denial-of-service attack prevents legitimate remote
-diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
-index 18bc8738e989..d2866f6dd736 100644
---- a/scripts/mod/modpost.c
-+++ b/scripts/mod/modpost.c
-@@ -37,6 +37,7 @@ static int vmlinux_section_warnings = 1;
- static int warn_unresolved = 0;
- /* How a symbol is exported */
- static int sec_mismatch_count = 0;
-+static int writable_fptr_count = 0;
- static int sec_mismatch_verbose = 1;
- static int sec_mismatch_fatal = 0;
- /* ignore missing files */
-@@ -965,6 +966,7 @@ enum mismatch {
- ANY_EXIT_TO_ANY_INIT,
- EXPORT_TO_INIT_EXIT,
- EXTABLE_TO_NON_TEXT,
-+ DATA_TO_TEXT
- };
-
- /**
-@@ -1091,6 +1093,12 @@ static const struct sectioncheck sectioncheck[] = {
- .good_tosec = {ALL_TEXT_SECTIONS , NULL},
- .mismatch = EXTABLE_TO_NON_TEXT,
- .handler = extable_mismatch_handler,
-+},
-+/* Do not reference code from writable data */
-+{
-+ .fromsec = { DATA_SECTIONS, NULL },
-+ .bad_tosec = { ALL_TEXT_SECTIONS, NULL },
-+ .mismatch = DATA_TO_TEXT
- }
- };
-
-@@ -1240,10 +1248,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
- continue;
- if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
- continue;
-- if (sym->st_value == addr)
-- return sym;
- /* Find a symbol nearby - addr are maybe negative */
- d = sym->st_value - addr;
-+ if (d == 0)
-+ return sym;
- if (d < 0)
- d = addr - sym->st_value;
- if (d < distance) {
-@@ -1402,7 +1410,11 @@ static void report_sec_mismatch(const char *modname,
- char *prl_from;
- char *prl_to;
-
-- sec_mismatch_count++;
-+ if (mismatch->mismatch == DATA_TO_TEXT)
-+ writable_fptr_count++;
-+ else
-+ sec_mismatch_count++;
-+
- if (!sec_mismatch_verbose)
- return;
-
-@@ -1526,6 +1538,14 @@ static void report_sec_mismatch(const char *modname,
- fatal("There's a special handler for this mismatch type, "
- "we should never get here.");
- break;
-+ case DATA_TO_TEXT:
-+#if 0
-+ fprintf(stderr,
-+ "The %s %s:%s references\n"
-+ "the %s %s:%s%s\n",
-+ from, fromsec, fromsym, to, tosec, tosym, to_p);
-+#endif
-+ break;
- }
- fprintf(stderr, "\n");
- }
-@@ -2539,6 +2559,14 @@ int main(int argc, char **argv)
- }
- }
- free(buf.p);
-+ if (writable_fptr_count) {
-+ if (!sec_mismatch_verbose) {
-+ warn("modpost: Found %d writable function pointer(s).\n"
-+ "To see full details build your kernel with:\n"
-+ "'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
-+ writable_fptr_count);
-+ }
-+ }
-
- return err;
- }
-diff --git a/security/Kconfig b/security/Kconfig
-index 87f2a6f842fd..7bdbb7edf5bf 100644
---- a/security/Kconfig
-+++ b/security/Kconfig
-@@ -8,7 +8,7 @@ source security/keys/Kconfig
-
- config SECURITY_DMESG_RESTRICT
- bool "Restrict unprivileged access to the kernel syslog"
-- default n
-+ default y
- help
- This enforces restrictions on unprivileged users reading the kernel
- syslog via dmesg(8).
-@@ -18,10 +18,34 @@ config SECURITY_DMESG_RESTRICT
-
- If you are unsure how to answer this question, answer N.
-
-+config SECURITY_PERF_EVENTS_RESTRICT
-+ bool "Restrict unprivileged use of performance events"
-+ depends on PERF_EVENTS
-+ default y
-+ help
-+ If you say Y here, the kernel.perf_event_paranoid sysctl
-+ will be set to 3 by default, and no unprivileged use of the
-+ perf_event_open syscall will be permitted unless it is
-+ changed.
-+
-+config SECURITY_TIOCSTI_RESTRICT
-+ bool "Restrict unprivileged use of tiocsti command injection"
-+ default y
-+ help
-+ This enforces restrictions on unprivileged users injecting commands
-+ into other processes which share a tty session using the TIOCSTI
-+ ioctl. This option makes TIOCSTI use require CAP_SYS_ADMIN.
-+
-+ If this option is not selected, no restrictions will be enforced
-+ unless the tiocsti_restrict sysctl is explicitly set to (1).
-+
-+ If you are unsure how to answer this question, answer N.
-+
- config SECURITY
- bool "Enable different security models"
- depends on SYSFS
- depends on MULTIUSER
-+ default y
- help
- This allows you to choose different security modules to be
- configured into your kernel.
-@@ -48,6 +72,7 @@ config SECURITYFS
- config SECURITY_NETWORK
- bool "Socket and Networking Security Hooks"
- depends on SECURITY
-+ default y
- help
- This enables the socket and networking security hooks.
- If enabled, a security module can use these hooks to
-@@ -155,6 +180,7 @@ config HARDENED_USERCOPY
- depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
- select BUG
- imply STRICT_DEVMEM
-+ default y
- help
- This option checks for obviously wrong memory regions when
- copying memory to/from the kernel (via copy_to_user() and
-@@ -178,10 +204,36 @@ config HARDENED_USERCOPY_PAGESPAN
- config FORTIFY_SOURCE
- bool "Harden common str/mem functions against buffer overflows"
- depends on ARCH_HAS_FORTIFY_SOURCE
-+ default y
- help
- Detect overflows of buffers in common string and memory functions
- where the compiler can determine and validate the buffer sizes.
-
-+config FORTIFY_SOURCE_STRICT_STRING
-+ bool "Harden common functions against buffer overflows"
-+ depends on FORTIFY_SOURCE
-+ depends on EXPERT
-+ help
-+ Perform stricter overflow checks catching overflows within objects
-+ for common C string functions rather than only between objects.
-+
-+ This is not yet intended for production use, only bug finding.
-+
-+config PAGE_SANITIZE
-+ bool "Sanitize pages"
-+ default y
-+ help
-+ Zero fill page allocations on free, reducing the lifetime of
-+ sensitive data and helping to mitigate use-after-free bugs.
-+
-+config PAGE_SANITIZE_VERIFY
-+ bool "Verify sanitized pages"
-+ depends on PAGE_SANITIZE
-+ default y
-+ help
-+ Verify that newly allocated pages are zeroed to detect
-+ write-after-free bugs.
-+
- config STATIC_USERMODEHELPER
- bool "Force all usermode helper calls through a single binary"
- help
-diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
-index 8af7a690eb40..6539694b0fd3 100644
---- a/security/selinux/Kconfig
-+++ b/security/selinux/Kconfig
-@@ -2,7 +2,7 @@ config SECURITY_SELINUX
- bool "NSA SELinux Support"
- depends on SECURITY_NETWORK && AUDIT && NET && INET
- select NETWORK_SECMARK
-- default n
-+ default y
- help
- This selects NSA Security-Enhanced Linux (SELinux).
- You will also need a policy configuration and a labeled filesystem.
-@@ -79,23 +79,3 @@ config SECURITY_SELINUX_AVC_STATS
- This option collects access vector cache statistics to
- /selinux/avc/cache_stats, which may be monitored via
- tools such as avcstat.
--
--config SECURITY_SELINUX_CHECKREQPROT_VALUE
-- int "NSA SELinux checkreqprot default value"
-- depends on SECURITY_SELINUX
-- range 0 1
-- default 0
-- help
-- This option sets the default value for the 'checkreqprot' flag
-- that determines whether SELinux checks the protection requested
-- by the application or the protection that will be applied by the
-- kernel (including any implied execute for read-implies-exec) for
-- mmap and mprotect calls. If this option is set to 0 (zero),
-- SELinux will default to checking the protection that will be applied
-- by the kernel. If this option is set to 1 (one), SELinux will
-- default to checking the protection requested by the application.
-- The checkreqprot flag may be changed from the default via the
-- 'checkreqprot=' boot parameter. It may also be changed at runtime
-- via /selinux/checkreqprot if authorized by policy.
--
-- If you are unsure how to answer this question, answer 0.
-diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
-index 1649cd18eb0b..067f35559aa7 100644
---- a/security/selinux/include/objsec.h
-+++ b/security/selinux/include/objsec.h
-@@ -150,6 +150,6 @@ struct pkey_security_struct {
- u32 sid; /* SID of pkey */
- };
-
--extern unsigned int selinux_checkreqprot;
-+extern const unsigned int selinux_checkreqprot;
-
- #endif /* _SELINUX_OBJSEC_H_ */
-diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
-index 00eed842c491..8f7b8d7e6f91 100644
---- a/security/selinux/selinuxfs.c
-+++ b/security/selinux/selinuxfs.c
-@@ -41,16 +41,7 @@
- #include "objsec.h"
- #include "conditional.h"
-
--unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
--
--static int __init checkreqprot_setup(char *str)
--{
-- unsigned long checkreqprot;
-- if (!kstrtoul(str, 0, &checkreqprot))
-- selinux_checkreqprot = checkreqprot ? 1 : 0;
-- return 1;
--}
--__setup("checkreqprot=", checkreqprot_setup);
-+const unsigned int selinux_checkreqprot;
-
- static DEFINE_MUTEX(sel_mutex);
-
-@@ -610,10 +601,9 @@ static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
- return PTR_ERR(page);
-
- length = -EINVAL;
-- if (sscanf(page, "%u", &new_value) != 1)
-+ if (sscanf(page, "%u", &new_value) != 1 || new_value)
- goto out;
-
-- selinux_checkreqprot = new_value ? 1 : 0;
- length = count;
- out:
- kfree(page);
-diff --git a/security/yama/Kconfig b/security/yama/Kconfig
-index 96b27405558a..485c1b85c325 100644
---- a/security/yama/Kconfig
-+++ b/security/yama/Kconfig
-@@ -1,7 +1,7 @@
- config SECURITY_YAMA
- bool "Yama support"
- depends on SECURITY
-- default n
-+ default y
- help
- This selects Yama, which extends DAC support with additional
- system-wide security settings beyond regular Linux discretionary
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-mute-pps_state_mismatch.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-mute-pps_state_mismatch.patch
deleted file mode 100644
index dc1d254b..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-mute-pps_state_mismatch.patch
+++ /dev/null
@@ -1,16 +0,0 @@
-diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
-index 09f274419..595bc5844 100644
---- a/drivers/gpu/drm/i915/intel_dp.c
-+++ b/drivers/gpu/drm/i915/intel_dp.c
-@@ -5249,7 +5249,10 @@ intel_pps_verify_state(struct drm_i915_private *dev_priv,
-
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
-- DRM_ERROR("PPS state mismatch\n");
-+ /* seem buggy on 4.14.x .. mute that for now
-+ * even is not a real solution ..
-+ * DRM_ERROR("PPS state mismatch\n");
-+ */
- intel_pps_dump_state("sw", sw);
- intel_pps_dump_state("hw", &hw);
- }
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch
deleted file mode 100644
index b6be46cc..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-restore-SD_PREFER_SIBLING-on-MC-domains.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
-index 093f2ceba..808998fe1 100644
---- a/kernel/sched/topology.c
-+++ b/kernel/sched/topology.c
-@@ -1164,6 +1164,7 @@ sd_init(struct sched_domain_topology_level *tl,
- sd->smt_gain = 1178; /* ~15% */
-
- } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
-+ sd->flags |= SD_PREFER_SIBLING;
- sd->imbalance_pct = 117;
- sd->cache_nice_tries = 1;
- sd->busy_idx = 2;
diff --git a/sys-kernel/linux-sources-redcore-lts/files/4.14-uksm-linux-hardened.patch b/sys-kernel/linux-sources-redcore-lts/files/4.14-uksm-linux-hardened.patch
deleted file mode 100644
index f0596117..00000000
--- a/sys-kernel/linux-sources-redcore-lts/files/4.14-uksm-linux-hardened.patch
+++ /dev/null
@@ -1,6919 +0,0 @@
-diff -Nur a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
---- a/Documentation/vm/00-INDEX 2018-05-25 15:18:02.000000000 +0100
-+++ b/Documentation/vm/00-INDEX 2018-05-26 19:30:55.783140311 +0100
-@@ -20,6 +20,8 @@
- - description of the idle page tracking feature.
- ksm.txt
- - how to use the Kernel Samepage Merging feature.
-+uksm.txt
-+ - Introduction to Ultra KSM
- numa
- - information about NUMA specific code in the Linux vm.
- numa_memory_policy.txt
-diff -Nur a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt
---- a/Documentation/vm/uksm.txt 1970-01-01 01:00:00.000000000 +0100
-+++ b/Documentation/vm/uksm.txt 2018-05-26 19:30:55.783140311 +0100
-@@ -0,0 +1,61 @@
-+The Ultra Kernel Samepage Merging feature
-+----------------------------------------------
-+/*
-+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
-+ *
-+ * This is an improvement upon KSM. Some basic data structures and routines
-+ * are borrowed from ksm.c .
-+ *
-+ * Its new features:
-+ * 1. Full system scan:
-+ * It automatically scans all user processes' anonymous VMAs. Kernel-user
-+ * interaction to submit a memory area to KSM is no longer needed.
-+ *
-+ * 2. Rich area detection:
-+ * It automatically detects rich areas containing abundant duplicated
-+ * pages based. Rich areas are given a full scan speed. Poor areas are
-+ * sampled at a reasonable speed with very low CPU consumption.
-+ *
-+ * 3. Ultra Per-page scan speed improvement:
-+ * A new hash algorithm is proposed. As a result, on a machine with
-+ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
-+ * can scan memory areas that does not contain duplicated pages at speed of
-+ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
-+ * 477MB/sec ~ 923MB/sec.
-+ *
-+ * 4. Thrashing area avoidance:
-+ * Thrashing area(an VMA that has frequent Ksm page break-out) can be
-+ * filtered out. My benchmark shows it's more efficient than KSM's per-page
-+ * hash value based volatile page detection.
-+ *
-+ *
-+ * 5. Misc changes upon KSM:
-+ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
-+ * comparison. It's much faster than default C version on x86.
-+ * * rmap_item now has an struct *page member to loosely cache a
-+ * address-->page mapping, which reduces too much time-costly
-+ * follow_page().
-+ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
-+ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_
-+ * ksm is needed for this case.
-+ *
-+ * 6. Full Zero Page consideration(contributed by Figo Zhang)
-+ * Now uksmd consider full zero pages as special pages and merge them to an
-+ * special unswappable uksm zero page.
-+ */
-+
-+ChangeLog:
-+
-+2012-05-05 The creation of this Doc
-+2012-05-08 UKSM 0.1.1.1 libc crash bug fix, api clean up, doc clean up.
-+2012-05-28 UKSM 0.1.1.2 bug fix release
-+2012-06-26 UKSM 0.1.2-beta1 first beta release for 0.1.2
-+2012-07-2 UKSM 0.1.2-beta2
-+2012-07-10 UKSM 0.1.2-beta3
-+2012-07-26 UKSM 0.1.2 Fine grained speed control, more scan optimization.
-+2012-10-13 UKSM 0.1.2.1 Bug fixes.
-+2012-12-31 UKSM 0.1.2.2 Minor bug fixes.
-+2014-07-02 UKSM 0.1.2.3 Fix a " __this_cpu_read() in preemptible bug".
-+2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings.
-+2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation.
-+2017-02-26 UKSM 0.1.2.6 Fix a bug in hugetlbpage handling and a race bug with page migration.
-diff -Nur a/fs/exec.c b/fs/exec.c
---- a/fs/exec.c 2018-05-26 19:24:34.831782903 +0100
-+++ b/fs/exec.c 2018-05-26 19:31:18.404873956 +0100
-@@ -63,6 +63,7 @@
- #include <linux/compat.h>
- #include <linux/vmalloc.h>
- #include <linux/random.h>
-+#include <linux/ksm.h>
-
- #include <linux/uaccess.h>
- #include <asm/mmu_context.h>
-@@ -1377,6 +1378,7 @@
- /* An exec changes our domain. We are no longer part of the thread
- group */
- current->self_exec_id++;
-+
- flush_signal_handlers(current, 0);
- }
- EXPORT_SYMBOL(setup_new_exec);
-diff -Nur a/fs/proc/meminfo.c b/fs/proc/meminfo.c
---- a/fs/proc/meminfo.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/fs/proc/meminfo.c 2018-05-26 19:30:55.784140344 +0100
-@@ -118,6 +118,10 @@
- global_zone_page_state(NR_KERNEL_STACK_KB));
- show_val_kb(m, "PageTables: ",
- global_zone_page_state(NR_PAGETABLE));
-+#ifdef CONFIG_UKSM
-+ show_val_kb(m, "KsmZeroPages: ",
-+ global_zone_page_state(NR_UKSM_ZERO_PAGES));
-+#endif
- #ifdef CONFIG_QUICKLIST
- show_val_kb(m, "Quicklists: ", quicklist_total_size());
- #endif
-diff -Nur a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
---- a/include/asm-generic/pgtable.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/asm-generic/pgtable.h 2018-05-26 19:30:55.784140344 +0100
-@@ -789,12 +789,25 @@
- extern void untrack_pfn_moved(struct vm_area_struct *vma);
- #endif
-
-+#ifdef CONFIG_UKSM
-+static inline int is_uksm_zero_pfn(unsigned long pfn)
-+{
-+ extern unsigned long uksm_zero_pfn;
-+ return pfn == uksm_zero_pfn;
-+}
-+#else
-+static inline int is_uksm_zero_pfn(unsigned long pfn)
-+{
-+ return 0;
-+}
-+#endif
-+
- #ifdef __HAVE_COLOR_ZERO_PAGE
- static inline int is_zero_pfn(unsigned long pfn)
- {
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
-- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
-+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn);
- }
-
- #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
-@@ -803,7 +816,7 @@
- static inline int is_zero_pfn(unsigned long pfn)
- {
- extern unsigned long zero_pfn;
-- return pfn == zero_pfn;
-+ return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn));
- }
-
- static inline unsigned long my_zero_pfn(unsigned long addr)
-diff -Nur a/include/linux/ksm.h b/include/linux/ksm.h
---- a/include/linux/ksm.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/linux/ksm.h 2018-05-26 19:30:55.784140344 +0100
-@@ -21,21 +21,6 @@
- #ifdef CONFIG_KSM
- int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags);
--int __ksm_enter(struct mm_struct *mm);
--void __ksm_exit(struct mm_struct *mm);
--
--static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
--{
-- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
-- return __ksm_enter(mm);
-- return 0;
--}
--
--static inline void ksm_exit(struct mm_struct *mm)
--{
-- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
-- __ksm_exit(mm);
--}
-
- static inline struct stable_node *page_stable_node(struct page *page)
- {
-@@ -65,6 +50,33 @@
- void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
- void ksm_migrate_page(struct page *newpage, struct page *oldpage);
-
-+#ifdef CONFIG_KSM_LEGACY
-+int __ksm_enter(struct mm_struct *mm);
-+void __ksm_exit(struct mm_struct *mm);
-+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-+{
-+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
-+ return __ksm_enter(mm);
-+ return 0;
-+}
-+
-+static inline void ksm_exit(struct mm_struct *mm)
-+{
-+ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
-+ __ksm_exit(mm);
-+}
-+
-+#elif defined(CONFIG_UKSM)
-+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-+{
-+ return 0;
-+}
-+
-+static inline void ksm_exit(struct mm_struct *mm)
-+{
-+}
-+#endif /* !CONFIG_UKSM */
-+
- #else /* !CONFIG_KSM */
-
- static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
-@@ -106,4 +118,6 @@
- #endif /* CONFIG_MMU */
- #endif /* !CONFIG_KSM */
-
-+#include <linux/uksm.h>
-+
- #endif /* __LINUX_KSM_H */
-diff -Nur a/include/linux/mm_types.h b/include/linux/mm_types.h
---- a/include/linux/mm_types.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/linux/mm_types.h 2018-05-26 19:30:55.784140344 +0100
-@@ -337,6 +337,9 @@
- struct mempolicy *vm_policy; /* NUMA policy for the VMA */
- #endif
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
-+#ifdef CONFIG_UKSM
-+ struct vma_slot *uksm_vma_slot;
-+#endif
- } __randomize_layout;
-
- struct core_thread {
-diff -Nur a/include/linux/mmzone.h b/include/linux/mmzone.h
---- a/include/linux/mmzone.h 2018-05-25 15:18:02.000000000 +0100
-+++ b/include/linux/mmzone.h 2018-05-26 19:30:55.785140376 +0100
-@@ -148,6 +148,9 @@
- NR_ZSPAGES, /* allocated in zsmalloc */
- #endif
- NR_FREE_CMA_PAGES,
-+#ifdef CONFIG_UKSM
-+ NR_UKSM_ZERO_PAGES,
-+#endif
- NR_VM_ZONE_STAT_ITEMS };
-
- enum node_stat_item {
-@@ -872,7 +875,7 @@
- }
-
- /**
-- * is_highmem - helper function to quickly check if a struct zone is a
-+ * is_highmem - helper function to quickly check if a struct zone is a
- * highmem zone or not. This is an attempt to keep references
- * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
- * @zone - pointer to struct zone variable
-diff -Nur a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h
---- a/include/linux/sradix-tree.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/include/linux/sradix-tree.h 2018-05-26 19:30:55.785140376 +0100
-@@ -0,0 +1,77 @@
-+#ifndef _LINUX_SRADIX_TREE_H
-+#define _LINUX_SRADIX_TREE_H
-+
-+
-+#define INIT_SRADIX_TREE(root, mask) \
-+do { \
-+ (root)->height = 0; \
-+ (root)->gfp_mask = (mask); \
-+ (root)->rnode = NULL; \
-+} while (0)
-+
-+#define ULONG_BITS (sizeof(unsigned long) * 8)
-+#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-+//#define SRADIX_TREE_MAP_SHIFT 6
-+//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT)
-+//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1)
-+
-+struct sradix_tree_node {
-+ unsigned int height; /* Height from the bottom */
-+ unsigned int count;
-+ unsigned int fulls; /* Number of full sublevel trees */
-+ struct sradix_tree_node *parent;
-+ void *stores[0];
-+};
-+
-+/* A simple radix tree implementation */
-+struct sradix_tree_root {
-+ unsigned int height;
-+ struct sradix_tree_node *rnode;
-+
-+ /* Where found to have available empty stores in its sublevels */
-+ struct sradix_tree_node *enter_node;
-+ unsigned int shift;
-+ unsigned int stores_size;
-+ unsigned int mask;
-+ unsigned long min; /* The first hole index */
-+ unsigned long num;
-+ //unsigned long *height_to_maxindex;
-+
-+ /* How the node is allocated and freed. */
-+ struct sradix_tree_node *(*alloc)(void);
-+ void (*free)(struct sradix_tree_node *node);
-+
-+ /* When a new node is added and removed */
-+ void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child);
-+ void (*assign)(struct sradix_tree_node *node, unsigned int index, void *item);
-+ void (*rm)(struct sradix_tree_node *node, unsigned int offset);
-+};
-+
-+struct sradix_tree_path {
-+ struct sradix_tree_node *node;
-+ int offset;
-+};
-+
-+static inline
-+void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift)
-+{
-+ root->height = 0;
-+ root->rnode = NULL;
-+ root->shift = shift;
-+ root->stores_size = 1UL << shift;
-+ root->mask = root->stores_size - 1;
-+}
-+
-+
-+extern void *sradix_tree_next(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index,
-+ int (*iter)(void *, unsigned long));
-+
-+extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num);
-+
-+extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index);
-+
-+extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index);
-+
-+#endif /* _LINUX_SRADIX_TREE_H */
-diff -Nur a/include/linux/uksm.h b/include/linux/uksm.h
---- a/include/linux/uksm.h 1970-01-01 01:00:00.000000000 +0100
-+++ b/include/linux/uksm.h 2018-05-26 19:30:55.785140376 +0100
-@@ -0,0 +1,149 @@
-+#ifndef __LINUX_UKSM_H
-+#define __LINUX_UKSM_H
-+/*
-+ * Memory merging support.
-+ *
-+ * This code enables dynamic sharing of identical pages found in different
-+ * memory areas, even if they are not shared by fork().
-+ */
-+
-+/* if !CONFIG_UKSM this file should not be compiled at all. */
-+#ifdef CONFIG_UKSM
-+
-+#include <linux/bitops.h>
-+#include <linux/mm.h>
-+#include <linux/pagemap.h>
-+#include <linux/rmap.h>
-+#include <linux/sched.h>
-+
-+extern unsigned long zero_pfn __read_mostly;
-+extern unsigned long uksm_zero_pfn __read_mostly;
-+extern struct page *empty_uksm_zero_page;
-+
-+/* must be done before linked to mm */
-+extern void uksm_vma_add_new(struct vm_area_struct *vma);
-+extern void uksm_remove_vma(struct vm_area_struct *vma);
-+
-+#define UKSM_SLOT_NEED_SORT (1 << 0)
-+#define UKSM_SLOT_NEED_RERAND (1 << 1)
-+#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */
-+#define UKSM_SLOT_FUL_SCANNED (1 << 3)
-+#define UKSM_SLOT_IN_UKSM (1 << 4)
-+
-+struct vma_slot {
-+ struct sradix_tree_node *snode;
-+ unsigned long sindex;
-+
-+ struct list_head slot_list;
-+ unsigned long fully_scanned_round;
-+ unsigned long dedup_num;
-+ unsigned long pages_scanned;
-+ unsigned long this_sampled;
-+ unsigned long last_scanned;
-+ unsigned long pages_to_scan;
-+ struct scan_rung *rung;
-+ struct page **rmap_list_pool;
-+ unsigned int *pool_counts;
-+ unsigned long pool_size;
-+ struct vm_area_struct *vma;
-+ struct mm_struct *mm;
-+ unsigned long ctime_j;
-+ unsigned long pages;
-+ unsigned long flags;
-+ unsigned long pages_cowed; /* pages cowed this round */
-+ unsigned long pages_merged; /* pages merged this round */
-+ unsigned long pages_bemerged;
-+
-+ /* when it has page merged in this eval round */
-+ struct list_head dedup_list;
-+};
-+
-+static inline void uksm_unmap_zero_page(pte_t pte)
-+{
-+ if (pte_pfn(pte) == uksm_zero_pfn)
-+ __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
-+}
-+
-+static inline void uksm_map_zero_page(pte_t pte)
-+{
-+ if (pte_pfn(pte) == uksm_zero_pfn)
-+ __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
-+}
-+
-+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
-+{
-+ if (vma->uksm_vma_slot && PageKsm(page))
-+ vma->uksm_vma_slot->pages_cowed++;
-+}
-+
-+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
-+{
-+ if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn)
-+ vma->uksm_vma_slot->pages_cowed++;
-+}
-+
-+static inline int uksm_flags_can_scan(unsigned long vm_flags)
-+{
-+#ifdef VM_SAO
-+ if (vm_flags & VM_SAO)
-+ return 0;
-+#endif
-+
-+ return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND |
-+ VM_HUGETLB | VM_MIXEDMAP | VM_SHARED
-+ | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN));
-+}
-+
-+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
-+{
-+ if (uksm_flags_can_scan(*vm_flags_p))
-+ *vm_flags_p |= VM_MERGEABLE;
-+}
-+
-+/*
-+ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will
-+ * be removed when uksm zero page patch is stable enough.
-+ */
-+static inline void uksm_bugon_zeropage(pte_t pte)
-+{
-+ BUG_ON(pte_pfn(pte) == uksm_zero_pfn);
-+}
-+#else
-+static inline void uksm_vma_add_new(struct vm_area_struct *vma)
-+{
-+}
-+
-+static inline void uksm_remove_vma(struct vm_area_struct *vma)
-+{
-+}
-+
-+static inline void uksm_unmap_zero_page(pte_t pte)
-+{
-+}
-+
-+static inline void uksm_map_zero_page(pte_t pte)
-+{
-+}
-+
-+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
-+{
-+}
-+
-+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
-+{
-+}
-+
-+static inline int uksm_flags_can_scan(unsigned long vm_flags)
-+{
-+ return 0;
-+}
-+
-+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
-+{
-+}
-+
-+static inline void uksm_bugon_zeropage(pte_t pte)
-+{
-+}
-+#endif /* !CONFIG_UKSM */
-+#endif /* __LINUX_UKSM_H */
-diff -Nur a/kernel/fork.c b/kernel/fork.c
---- a/kernel/fork.c 2018-05-26 19:24:34.840783196 +0100
-+++ b/kernel/fork.c 2018-05-26 19:30:55.785140376 +0100
-@@ -655,7 +655,7 @@
- goto fail_nomem;
- charge = len;
- }
-- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+ tmp = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
- if (!tmp)
- goto fail_nomem;
- *tmp = *mpnt;
-@@ -714,7 +714,7 @@
- __vma_link_rb(mm, tmp, rb_link, rb_parent);
- rb_link = &tmp->vm_rb.rb_right;
- rb_parent = &tmp->vm_rb;
--
-+ uksm_vma_add_new(tmp);
- mm->map_count++;
- if (!(tmp->vm_flags & VM_WIPEONFORK))
- retval = copy_page_range(mm, oldmm, mpnt);
-diff -Nur a/lib/Makefile b/lib/Makefile
---- a/lib/Makefile 2018-05-25 15:18:02.000000000 +0100
-+++ b/lib/Makefile 2018-05-26 19:30:55.786140408 +0100
-@@ -18,7 +18,7 @@
- KCOV_INSTRUMENT_dynamic_debug.o := n
-
- lib-y := ctype.o string.o vsprintf.o cmdline.o \
-- rbtree.o radix-tree.o dump_stack.o timerqueue.o\
-+ rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\
- idr.o int_sqrt.o extable.o \
- sha1.o chacha20.o irq_regs.o argv_split.o \
- flex_proportions.o ratelimit.o show_mem.o \
-diff -Nur a/lib/sradix-tree.c b/lib/sradix-tree.c
---- a/lib/sradix-tree.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/lib/sradix-tree.c 2018-05-26 19:30:55.786140408 +0100
-@@ -0,0 +1,476 @@
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/gcd.h>
-+#include <linux/sradix-tree.h>
-+
-+static inline int sradix_node_full(struct sradix_tree_root *root, struct sradix_tree_node *node)
-+{
-+ return node->fulls == root->stores_size ||
-+ (node->height == 1 && node->count == root->stores_size);
-+}
-+
-+/*
-+ * Extend a sradix tree so it can store key @index.
-+ */
-+static int sradix_tree_extend(struct sradix_tree_root *root, unsigned long index)
-+{
-+ struct sradix_tree_node *node;
-+ unsigned int height;
-+
-+ if (unlikely(root->rnode == NULL)) {
-+ if (!(node = root->alloc()))
-+ return -ENOMEM;
-+
-+ node->height = 1;
-+ root->rnode = node;
-+ root->height = 1;
-+ }
-+
-+ /* Figure out what the height should be. */
-+ height = root->height;
-+ index >>= root->shift * height;
-+
-+ while (index) {
-+ index >>= root->shift;
-+ height++;
-+ }
-+
-+ while (height > root->height) {
-+ unsigned int newheight;
-+
-+ if (!(node = root->alloc()))
-+ return -ENOMEM;
-+
-+ /* Increase the height. */
-+ node->stores[0] = root->rnode;
-+ root->rnode->parent = node;
-+ if (root->extend)
-+ root->extend(node, root->rnode);
-+
-+ newheight = root->height + 1;
-+ node->height = newheight;
-+ node->count = 1;
-+ if (sradix_node_full(root, root->rnode))
-+ node->fulls = 1;
-+
-+ root->rnode = node;
-+ root->height = newheight;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Search the next item from the current node, that is not NULL
-+ * and can satify root->iter().
-+ */
-+void *sradix_tree_next(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index,
-+ int (*iter)(void *item, unsigned long height))
-+{
-+ unsigned long offset;
-+ void *item;
-+
-+ if (unlikely(node == NULL)) {
-+ node = root->rnode;
-+ for (offset = 0; offset < root->stores_size; offset++) {
-+ item = node->stores[offset];
-+ if (item && (!iter || iter(item, node->height)))
-+ break;
-+ }
-+
-+ if (unlikely(offset >= root->stores_size))
-+ return NULL;
-+
-+ if (node->height == 1)
-+ return item;
-+ else
-+ goto go_down;
-+ }
-+
-+ while (node) {
-+ offset = (index & root->mask) + 1;
-+ for (; offset < root->stores_size; offset++) {
-+ item = node->stores[offset];
-+ if (item && (!iter || iter(item, node->height)))
-+ break;
-+ }
-+
-+ if (offset < root->stores_size)
-+ break;
-+
-+ node = node->parent;
-+ index >>= root->shift;
-+ }
-+
-+ if (!node)
-+ return NULL;
-+
-+ while (node->height > 1) {
-+go_down:
-+ node = item;
-+ for (offset = 0; offset < root->stores_size; offset++) {
-+ item = node->stores[offset];
-+ if (item && (!iter || iter(item, node->height)))
-+ break;
-+ }
-+
-+ if (unlikely(offset >= root->stores_size))
-+ return NULL;
-+ }
-+
-+ BUG_ON(offset > root->stores_size);
-+
-+ return item;
-+}
-+
-+/*
-+ * Blindly insert the item to the tree. Typically, we reuse the
-+ * first empty store item.
-+ */
-+int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num)
-+{
-+ unsigned long index;
-+ unsigned int height;
-+ struct sradix_tree_node *node, *tmp = NULL;
-+ int offset, offset_saved;
-+ void **store = NULL;
-+ int error, i, j, shift;
-+
-+go_on:
-+ index = root->min;
-+
-+ if (root->enter_node && !sradix_node_full(root, root->enter_node)) {
-+ node = root->enter_node;
-+ BUG_ON((index >> (root->shift * root->height)));
-+ } else {
-+ node = root->rnode;
-+ if (node == NULL || (index >> (root->shift * root->height))
-+ || sradix_node_full(root, node)) {
-+ error = sradix_tree_extend(root, index);
-+ if (error)
-+ return error;
-+
-+ node = root->rnode;
-+ }
-+ }
-+
-+
-+ height = node->height;
-+ shift = (height - 1) * root->shift;
-+ offset = (index >> shift) & root->mask;
-+ while (shift > 0) {
-+ offset_saved = offset;
-+ for (; offset < root->stores_size; offset++) {
-+ store = &node->stores[offset];
-+ tmp = *store;
-+
-+ if (!tmp || !sradix_node_full(root, tmp))
-+ break;
-+ }
-+ BUG_ON(offset >= root->stores_size);
-+
-+ if (offset != offset_saved) {
-+ index += (offset - offset_saved) << shift;
-+ index &= ~((1UL << shift) - 1);
-+ }
-+
-+ if (!tmp) {
-+ if (!(tmp = root->alloc()))
-+ return -ENOMEM;
-+
-+ tmp->height = shift / root->shift;
-+ *store = tmp;
-+ tmp->parent = node;
-+ node->count++;
-+// if (root->extend)
-+// root->extend(node, tmp);
-+ }
-+
-+ node = tmp;
-+ shift -= root->shift;
-+ offset = (index >> shift) & root->mask;
-+ }
-+
-+ BUG_ON(node->height != 1);
-+
-+
-+ store = &node->stores[offset];
-+ for (i = 0, j = 0;
-+ j < root->stores_size - node->count &&
-+ i < root->stores_size - offset && j < num; i++) {
-+ if (!store[i]) {
-+ store[i] = item[j];
-+ if (root->assign)
-+ root->assign(node, index + i, item[j]);
-+ j++;
-+ }
-+ }
-+
-+ node->count += j;
-+ root->num += j;
-+ num -= j;
-+
-+ while (sradix_node_full(root, node)) {
-+ node = node->parent;
-+ if (!node)
-+ break;
-+
-+ node->fulls++;
-+ }
-+
-+ if (unlikely(!node)) {
-+ /* All nodes are full */
-+ root->min = 1 << (root->height * root->shift);
-+ root->enter_node = NULL;
-+ } else {
-+ root->min = index + i - 1;
-+ root->min |= (1UL << (node->height - 1)) - 1;
-+ root->min++;
-+ root->enter_node = node;
-+ }
-+
-+ if (num) {
-+ item += j;
-+ goto go_on;
-+ }
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * sradix_tree_shrink - shrink height of a sradix tree to minimal
-+ * @root sradix tree root
-+ *
-+ */
-+static inline void sradix_tree_shrink(struct sradix_tree_root *root)
-+{
-+ /* try to shrink tree height */
-+ while (root->height > 1) {
-+ struct sradix_tree_node *to_free = root->rnode;
-+
-+ /*
-+ * The candidate node has more than one child, or its child
-+ * is not at the leftmost store, we cannot shrink.
-+ */
-+ if (to_free->count != 1 || !to_free->stores[0])
-+ break;
-+
-+ root->rnode = to_free->stores[0];
-+ root->rnode->parent = NULL;
-+ root->height--;
-+ if (unlikely(root->enter_node == to_free))
-+ root->enter_node = NULL;
-+ root->free(to_free);
-+ }
-+}
-+
-+/*
-+ * Del the item on the known leaf node and index
-+ */
-+void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
-+ struct sradix_tree_node *node, unsigned long index)
-+{
-+ unsigned int offset;
-+ struct sradix_tree_node *start, *end;
-+
-+ BUG_ON(node->height != 1);
-+
-+ start = node;
-+ while (node && !(--node->count))
-+ node = node->parent;
-+
-+ end = node;
-+ if (!node) {
-+ root->rnode = NULL;
-+ root->height = 0;
-+ root->min = 0;
-+ root->num = 0;
-+ root->enter_node = NULL;
-+ } else {
-+ offset = (index >> (root->shift * (node->height - 1))) & root->mask;
-+ if (root->rm)
-+ root->rm(node, offset);
-+ node->stores[offset] = NULL;
-+ root->num--;
-+ if (root->min > index) {
-+ root->min = index;
-+ root->enter_node = node;
-+ }
-+ }
-+
-+ if (start != end) {
-+ do {
-+ node = start;
-+ start = start->parent;
-+ if (unlikely(root->enter_node == node))
-+ root->enter_node = end;
-+ root->free(node);
-+ } while (start != end);
-+
-+ /*
-+ * Note that shrink may free "end", so enter_node still need to
-+ * be checked inside.
-+ */
-+ sradix_tree_shrink(root);
-+ } else if (node->count == root->stores_size - 1) {
-+ /* It WAS a full leaf node. Update the ancestors */
-+ node = node->parent;
-+ while (node) {
-+ node->fulls--;
-+ if (node->fulls != root->stores_size - 1)
-+ break;
-+
-+ node = node->parent;
-+ }
-+ }
-+}
-+
-+void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index)
-+{
-+ unsigned int height, offset;
-+ struct sradix_tree_node *node;
-+ int shift;
-+
-+ node = root->rnode;
-+ if (node == NULL || (index >> (root->shift * root->height)))
-+ return NULL;
-+
-+ height = root->height;
-+ shift = (height - 1) * root->shift;
-+
-+ do {
-+ offset = (index >> shift) & root->mask;
-+ node = node->stores[offset];
-+ if (!node)
-+ return NULL;
-+
-+ shift -= root->shift;
-+ } while (shift >= 0);
-+
-+ return node;
-+}
-+
-+/*
-+ * Return the item if it exists, otherwise create it in place
-+ * and return the created item.
-+ */
-+void *sradix_tree_lookup_create(struct sradix_tree_root *root,
-+ unsigned long index, void *(*item_alloc)(void))
-+{
-+ unsigned int height, offset;
-+ struct sradix_tree_node *node, *tmp;
-+ void *item;
-+ int shift, error;
-+
-+ if (root->rnode == NULL || (index >> (root->shift * root->height))) {
-+ if (item_alloc) {
-+ error = sradix_tree_extend(root, index);
-+ if (error)
-+ return NULL;
-+ } else {
-+ return NULL;
-+ }
-+ }
-+
-+ node = root->rnode;
-+ height = root->height;
-+ shift = (height - 1) * root->shift;
-+
-+ do {
-+ offset = (index >> shift) & root->mask;
-+ if (!node->stores[offset]) {
-+ if (!(tmp = root->alloc()))
-+ return NULL;
-+
-+ tmp->height = shift / root->shift;
-+ node->stores[offset] = tmp;
-+ tmp->parent = node;
-+ node->count++;
-+ node = tmp;
-+ } else {
-+ node = node->stores[offset];
-+ }
-+
-+ shift -= root->shift;
-+ } while (shift > 0);
-+
-+ BUG_ON(node->height != 1);
-+ offset = index & root->mask;
-+ if (node->stores[offset]) {
-+ return node->stores[offset];
-+ } else if (item_alloc) {
-+ if (!(item = item_alloc()))
-+ return NULL;
-+
-+ node->stores[offset] = item;
-+
-+ /*
-+ * NOTE: we do NOT call root->assign here, since this item is
-+ * newly created by us having no meaning. Caller can call this
-+ * if it's necessary to do so.
-+ */
-+
-+ node->count++;
-+ root->num++;
-+
-+ while (sradix_node_full(root, node)) {
-+ node = node->parent;
-+ if (!node)
-+ break;
-+
-+ node->fulls++;
-+ }
-+
-+ if (unlikely(!node)) {
-+ /* All nodes are full */
-+ root->min = 1 << (root->height * root->shift);
-+ } else {
-+ if (root->min == index) {
-+ root->min |= (1UL << (node->height - 1)) - 1;
-+ root->min++;
-+ root->enter_node = node;
-+ }
-+ }
-+
-+ return item;
-+ } else {
-+ return NULL;
-+ }
-+
-+}
-+
-+int sradix_tree_delete(struct sradix_tree_root *root, unsigned long index)
-+{
-+ unsigned int height, offset;
-+ struct sradix_tree_node *node;
-+ int shift;
-+
-+ node = root->rnode;
-+ if (node == NULL || (index >> (root->shift * root->height)))
-+ return -ENOENT;
-+
-+ height = root->height;
-+ shift = (height - 1) * root->shift;
-+
-+ do {
-+ offset = (index >> shift) & root->mask;
-+ node = node->stores[offset];
-+ if (!node)
-+ return -ENOENT;
-+
-+ shift -= root->shift;
-+ } while (shift > 0);
-+
-+ offset = index & root->mask;
-+ if (!node->stores[offset])
-+ return -ENOENT;
-+
-+ sradix_tree_delete_from_leaf(root, node, index);
-+
-+ return 0;
-+}
-diff -Nur a/mm/Kconfig b/mm/Kconfig
---- a/mm/Kconfig 2018-05-26 19:24:34.846783391 +0100
-+++ b/mm/Kconfig 2018-05-26 19:30:55.786140408 +0100
-@@ -315,6 +315,32 @@
- See Documentation/vm/ksm.txt for more information: KSM is inactive
- until a program has madvised that an area is MADV_MERGEABLE, and
- root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
-+choice
-+ prompt "Choose UKSM/KSM strategy"
-+ default UKSM
-+ depends on KSM
-+ help
-+ This option allows to select a UKSM/KSM stragety.
-+
-+config UKSM
-+ bool "Ultra-KSM for page merging"
-+ depends on KSM
-+ help
-+ UKSM is inspired by the Linux kernel project \u2014 KSM(Kernel Same
-+ page Merging), but with a fundamentally rewritten core algorithm. With
-+ an advanced algorithm, UKSM now can transparently scans all anonymously
-+ mapped user space applications with an significantly improved scan speed
-+ and CPU efficiency. Since KVM is friendly to KSM, KVM can also benefit from
-+ UKSM. Now UKSM has its first stable release and first real world enterprise user.
-+ For more information, please goto its project page.
-+ (www.kerneldedup.org)
-+
-+config KSM_LEGACY
-+ bool "Legacy KSM implementation"
-+ depends on KSM
-+ help
-+ The legacy KSM implementation from Red Hat.
-+endchoice
-
- config DEFAULT_MMAP_MIN_ADDR
- int "Low address space to protect from user allocation"
-diff -Nur a/mm/Makefile b/mm/Makefile
---- a/mm/Makefile 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/Makefile 2018-05-26 19:30:55.786140408 +0100
-@@ -65,7 +65,8 @@
- obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
- obj-$(CONFIG_SLOB) += slob.o
- obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
--obj-$(CONFIG_KSM) += ksm.o
-+obj-$(CONFIG_KSM_LEGACY) += ksm.o
-+obj-$(CONFIG_UKSM) += uksm.o
- obj-$(CONFIG_PAGE_POISONING) += page_poison.o
- obj-$(CONFIG_SLAB) += slab.o
- obj-$(CONFIG_SLUB) += slub.o
-diff -Nur a/mm/memory.c b/mm/memory.c
---- a/mm/memory.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/memory.c 2018-05-26 19:30:55.787140441 +0100
-@@ -129,6 +129,25 @@
-
- unsigned long highest_memmap_pfn __read_mostly;
-
-+#ifdef CONFIG_UKSM
-+unsigned long uksm_zero_pfn __read_mostly;
-+EXPORT_SYMBOL_GPL(uksm_zero_pfn);
-+struct page *empty_uksm_zero_page;
-+
-+static int __init setup_uksm_zero_page(void)
-+{
-+ empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0);
-+ if (!empty_uksm_zero_page)
-+ panic("Oh boy, that early out of memory?");
-+
-+ SetPageReserved(empty_uksm_zero_page);
-+ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
-+
-+ return 0;
-+}
-+core_initcall(setup_uksm_zero_page);
-+#endif
-+
- /*
- * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
- */
-@@ -140,6 +159,7 @@
- core_initcall(init_zero_pfn);
-
-
-+
- #if defined(SPLIT_RSS_COUNTING)
-
- void sync_mm_rss(struct mm_struct *mm)
-@@ -1035,6 +1055,9 @@
- get_page(page);
- page_dup_rmap(page, false);
- rss[mm_counter(page)]++;
-+
-+ /* Should return NULL in vm_normal_page() */
-+ uksm_bugon_zeropage(pte);
- } else if (pte_devmap(pte)) {
- page = pte_page(pte);
-
-@@ -1048,6 +1071,8 @@
- page_dup_rmap(page, false);
- rss[mm_counter(page)]++;
- }
-+ } else {
-+ uksm_map_zero_page(pte);
- }
-
- out_set_pte:
-@@ -1317,8 +1342,10 @@
- ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
- tlb_remove_tlb_entry(tlb, pte, addr);
-- if (unlikely(!page))
-+ if (unlikely(!page)) {
-+ uksm_unmap_zero_page(ptent);
- continue;
-+ }
-
- if (!PageAnon(page)) {
- if (pte_dirty(ptent)) {
-@@ -2318,8 +2345,10 @@
- clear_page(kaddr);
- kunmap_atomic(kaddr);
- flush_dcache_page(dst);
-- } else
-+ } else {
- copy_user_highpage(dst, src, va, vma);
-+ uksm_cow_page(vma, src);
-+ }
- }
-
- static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
-@@ -2468,6 +2497,7 @@
- vmf->address);
- if (!new_page)
- goto oom;
-+ uksm_cow_pte(vma, vmf->orig_pte);
- } else {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
- vmf->address);
-@@ -2494,7 +2524,9 @@
- mm_counter_file(old_page));
- inc_mm_counter_fast(mm, MM_ANONPAGES);
- }
-+ uksm_bugon_zeropage(vmf->orig_pte);
- } else {
-+ uksm_unmap_zero_page(vmf->orig_pte);
- inc_mm_counter_fast(mm, MM_ANONPAGES);
- }
- flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
-diff -Nur a/mm/mmap.c b/mm/mmap.c
---- a/mm/mmap.c 2018-05-26 19:24:34.847783423 +0100
-+++ b/mm/mmap.c 2018-05-26 19:30:55.788140473 +0100
-@@ -45,6 +45,7 @@
- #include <linux/moduleparam.h>
- #include <linux/pkeys.h>
- #include <linux/oom.h>
-+#include <linux/ksm.h>
-
- #include <linux/uaccess.h>
- #include <asm/cacheflush.h>
-@@ -173,6 +174,7 @@
- if (vma->vm_file)
- fput(vma->vm_file);
- mpol_put(vma_policy(vma));
-+ uksm_remove_vma(vma);
- kmem_cache_free(vm_area_cachep, vma);
- return next;
- }
-@@ -699,9 +701,16 @@
- long adjust_next = 0;
- int remove_next = 0;
-
-+/*
-+ * to avoid deadlock, ksm_remove_vma must be done before any spin_lock is
-+ * acquired
-+ */
-+ uksm_remove_vma(vma);
-+
- if (next && !insert) {
- struct vm_area_struct *exporter = NULL, *importer = NULL;
-
-+ uksm_remove_vma(next);
- if (end >= next->vm_end) {
- /*
- * vma expands, overlapping all the next, and
-@@ -834,6 +843,7 @@
- end_changed = true;
- }
- vma->vm_pgoff = pgoff;
-+
- if (adjust_next) {
- next->vm_start += adjust_next << PAGE_SHIFT;
- next->vm_pgoff += adjust_next;
-@@ -939,6 +949,7 @@
- if (remove_next == 2) {
- remove_next = 1;
- end = next->vm_end;
-+ uksm_remove_vma(next);
- goto again;
- }
- else if (next)
-@@ -965,10 +976,14 @@
- */
- VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
- }
-+ } else {
-+ if (next && !insert)
-+ uksm_vma_add_new(next);
- }
- if (insert && file)
- uprobe_mmap(insert);
-
-+ uksm_vma_add_new(vma);
- validate_mm(mm);
-
- return 0;
-@@ -1385,6 +1400,9 @@
- vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
- mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
-
-+ /* If uksm is enabled, we add VM_MERGEABLE to new VMAs. */
-+ uksm_vm_flags_mod(&vm_flags);
-+
- if (flags & MAP_LOCKED)
- if (!can_do_mlock())
- return -EPERM;
-@@ -1724,6 +1742,7 @@
- allow_write_access(file);
- }
- file = vma->vm_file;
-+ uksm_vma_add_new(vma);
- out:
- perf_event_mmap(vma);
-
-@@ -1765,6 +1784,7 @@
- if (vm_flags & VM_DENYWRITE)
- allow_write_access(file);
- free_vma:
-+ uksm_remove_vma(vma);
- kmem_cache_free(vm_area_cachep, vma);
- unacct_error:
- if (charged)
-@@ -2589,6 +2609,8 @@
- else
- err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
-
-+ uksm_vma_add_new(new);
-+
- /* Success. */
- if (!err)
- return 0;
-@@ -2881,6 +2903,7 @@
- if ((flags & (~VM_EXEC)) != 0)
- return -EINVAL;
- flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-+ uksm_vm_flags_mod(&flags);
-
- error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
- if (offset_in_page(error))
-@@ -2938,6 +2961,7 @@
- vma->vm_flags = flags;
- vma->vm_page_prot = vm_get_page_prot(flags);
- vma_link(mm, vma, prev, rb_link, rb_parent);
-+ uksm_vma_add_new(vma);
- out:
- perf_event_mmap(vma);
- mm->total_vm += len >> PAGE_SHIFT;
-@@ -3015,6 +3039,12 @@
- up_write(&mm->mmap_sem);
- }
-
-+ /*
-+ * Taking write lock on mmap_sem does not harm others,
-+ * but it's crucial for uksm to avoid races.
-+ */
-+ down_write(&mm->mmap_sem);
-+
- if (mm->locked_vm) {
- vma = mm->mmap;
- while (vma) {
-@@ -3049,6 +3079,11 @@
- vma = remove_vma(vma);
- }
- vm_unacct_memory(nr_accounted);
-+
-+ mm->mmap = NULL;
-+ mm->mm_rb = RB_ROOT;
-+ vmacache_invalidate(mm);
-+ up_write(&mm->mmap_sem);
- }
-
- /* Insert vm structure into process list sorted by address
-@@ -3158,6 +3193,7 @@
- new_vma->vm_ops->open(new_vma);
- vma_link(mm, new_vma, prev, rb_link, rb_parent);
- *need_rmap_locks = false;
-+ uksm_vma_add_new(new_vma);
- }
- return new_vma;
-
-@@ -3308,6 +3344,7 @@
- vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
-
- perf_event_mmap(vma);
-+ uksm_vma_add_new(vma);
-
- return vma;
-
-diff -Nur a/mm/rmap.c b/mm/rmap.c
---- a/mm/rmap.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/rmap.c 2018-05-26 19:30:55.788140473 +0100
-@@ -1013,9 +1013,9 @@
-
- /**
- * __page_set_anon_rmap - set up new anonymous rmap
-- * @page: Page to add to rmap
-+ * @page: Page to add to rmap
- * @vma: VM area to add page to.
-- * @address: User virtual address of the mapping
-+ * @address: User virtual address of the mapping
- * @exclusive: the page is exclusively owned by the current process
- */
- static void __page_set_anon_rmap(struct page *page,
-diff -Nur a/mm/uksm.c b/mm/uksm.c
---- a/mm/uksm.c 1970-01-01 01:00:00.000000000 +0100
-+++ b/mm/uksm.c 2018-05-26 19:30:55.791140570 +0100
-@@ -0,0 +1,5584 @@
-+/*
-+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
-+ *
-+ * This is an improvement upon KSM. Some basic data structures and routines
-+ * are borrowed from ksm.c .
-+ *
-+ * Its new features:
-+ * 1. Full system scan:
-+ * It automatically scans all user processes' anonymous VMAs. Kernel-user
-+ * interaction to submit a memory area to KSM is no longer needed.
-+ *
-+ * 2. Rich area detection:
-+ * It automatically detects rich areas containing abundant duplicated
-+ * pages based. Rich areas are given a full scan speed. Poor areas are
-+ * sampled at a reasonable speed with very low CPU consumption.
-+ *
-+ * 3. Ultra Per-page scan speed improvement:
-+ * A new hash algorithm is proposed. As a result, on a machine with
-+ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it
-+ * can scan memory areas that does not contain duplicated pages at speed of
-+ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of
-+ * 477MB/sec ~ 923MB/sec.
-+ *
-+ * 4. Thrashing area avoidance:
-+ * Thrashing area(an VMA that has frequent Ksm page break-out) can be
-+ * filtered out. My benchmark shows it's more efficient than KSM's per-page
-+ * hash value based volatile page detection.
-+ *
-+ *
-+ * 5. Misc changes upon KSM:
-+ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page
-+ * comparison. It's much faster than default C version on x86.
-+ * * rmap_item now has an struct *page member to loosely cache a
-+ * address-->page mapping, which reduces too much time-costly
-+ * follow_page().
-+ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know.
-+ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_
-+ * ksm is needed for this case.
-+ *
-+ * 6. Full Zero Page consideration(contributed by Figo Zhang)
-+ * Now uksmd consider full zero pages as special pages and merge them to an
-+ * special unswappable uksm zero page.
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/fs.h>
-+#include <linux/mman.h>
-+#include <linux/sched.h>
-+#include <linux/sched/mm.h>
-+#include <linux/sched/coredump.h>
-+#include <linux/sched/cputime.h>
-+#include <linux/rwsem.h>
-+#include <linux/pagemap.h>
-+#include <linux/rmap.h>
-+#include <linux/spinlock.h>
-+#include <linux/jhash.h>
-+#include <linux/delay.h>
-+#include <linux/kthread.h>
-+#include <linux/wait.h>
-+#include <linux/slab.h>
-+#include <linux/rbtree.h>
-+#include <linux/memory.h>
-+#include <linux/mmu_notifier.h>
-+#include <linux/swap.h>
-+#include <linux/ksm.h>
-+#include <linux/crypto.h>
-+#include <linux/scatterlist.h>
-+#include <crypto/hash.h>
-+#include <linux/random.h>
-+#include <linux/math64.h>
-+#include <linux/gcd.h>
-+#include <linux/freezer.h>
-+#include <linux/oom.h>
-+#include <linux/numa.h>
-+#include <linux/sradix-tree.h>
-+
-+#include <asm/tlbflush.h>
-+#include "internal.h"
-+
-+#ifdef CONFIG_X86
-+#undef memcmp
-+
-+#ifdef CONFIG_X86_32
-+#define memcmp memcmpx86_32
-+/*
-+ * Compare 4-byte-aligned address s1 and s2, with length n
-+ */
-+int memcmpx86_32(void *s1, void *s2, size_t n)
-+{
-+ size_t num = n / 4;
-+ register int res;
-+
-+ __asm__ __volatile__
-+ (
-+ "testl %3,%3\n\t"
-+ "repe; cmpsd\n\t"
-+ "je 1f\n\t"
-+ "sbbl %0,%0\n\t"
-+ "orl $1,%0\n"
-+ "1:"
-+ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
-+ : "0" (0)
-+ : "cc");
-+
-+ return res;
-+}
-+
-+/*
-+ * Check the page is all zero ?
-+ */
-+static int is_full_zero(const void *s1, size_t len)
-+{
-+ unsigned char same;
-+
-+ len /= 4;
-+
-+ __asm__ __volatile__
-+ ("repe; scasl;"
-+ "sete %0"
-+ : "=qm" (same), "+D" (s1), "+c" (len)
-+ : "a" (0)
-+ : "cc");
-+
-+ return same;
-+}
-+
-+
-+#elif defined(CONFIG_X86_64)
-+#define memcmp memcmpx86_64
-+/*
-+ * Compare 8-byte-aligned address s1 and s2, with length n
-+ */
-+int memcmpx86_64(void *s1, void *s2, size_t n)
-+{
-+ size_t num = n / 8;
-+ register int res;
-+
-+ __asm__ __volatile__
-+ (
-+ "testq %q3,%q3\n\t"
-+ "repe; cmpsq\n\t"
-+ "je 1f\n\t"
-+ "sbbq %q0,%q0\n\t"
-+ "orq $1,%q0\n"
-+ "1:"
-+ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num)
-+ : "0" (0)
-+ : "cc");
-+
-+ return res;
-+}
-+
-+static int is_full_zero(const void *s1, size_t len)
-+{
-+ unsigned char same;
-+
-+ len /= 8;
-+
-+ __asm__ __volatile__
-+ ("repe; scasq;"
-+ "sete %0"
-+ : "=qm" (same), "+D" (s1), "+c" (len)
-+ : "a" (0)
-+ : "cc");
-+
-+ return same;
-+}
-+
-+#endif
-+#else
-+static int is_full_zero(const void *s1, size_t len)
-+{
-+ unsigned long *src = s1;
-+ int i;
-+
-+ len /= sizeof(*src);
-+
-+ for (i = 0; i < len; i++) {
-+ if (src[i])
-+ return 0;
-+ }
-+
-+ return 1;
-+}
-+#endif
-+
-+#define UKSM_RUNG_ROUND_FINISHED (1 << 0)
-+#define TIME_RATIO_SCALE 10000
-+
-+#define SLOT_TREE_NODE_SHIFT 8
-+#define SLOT_TREE_NODE_STORE_SIZE (1UL << SLOT_TREE_NODE_SHIFT)
-+struct slot_tree_node {
-+ unsigned long size;
-+ struct sradix_tree_node snode;
-+ void *stores[SLOT_TREE_NODE_STORE_SIZE];
-+};
-+
-+static struct kmem_cache *slot_tree_node_cachep;
-+
-+static struct sradix_tree_node *slot_tree_node_alloc(void)
-+{
-+ struct slot_tree_node *p;
-+
-+ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (!p)
-+ return NULL;
-+
-+ return &p->snode;
-+}
-+
-+static void slot_tree_node_free(struct sradix_tree_node *node)
-+{
-+ struct slot_tree_node *p;
-+
-+ p = container_of(node, struct slot_tree_node, snode);
-+ kmem_cache_free(slot_tree_node_cachep, p);
-+}
-+
-+static void slot_tree_node_extend(struct sradix_tree_node *parent,
-+ struct sradix_tree_node *child)
-+{
-+ struct slot_tree_node *p, *c;
-+
-+ p = container_of(parent, struct slot_tree_node, snode);
-+ c = container_of(child, struct slot_tree_node, snode);
-+
-+ p->size += c->size;
-+}
-+
-+void slot_tree_node_assign(struct sradix_tree_node *node,
-+ unsigned int index, void *item)
-+{
-+ struct vma_slot *slot = item;
-+ struct slot_tree_node *cur;
-+
-+ slot->snode = node;
-+ slot->sindex = index;
-+
-+ while (node) {
-+ cur = container_of(node, struct slot_tree_node, snode);
-+ cur->size += slot->pages;
-+ node = node->parent;
-+ }
-+}
-+
-+void slot_tree_node_rm(struct sradix_tree_node *node, unsigned int offset)
-+{
-+ struct vma_slot *slot;
-+ struct slot_tree_node *cur;
-+ unsigned long pages;
-+
-+ if (node->height == 1) {
-+ slot = node->stores[offset];
-+ pages = slot->pages;
-+ } else {
-+ cur = container_of(node->stores[offset],
-+ struct slot_tree_node, snode);
-+ pages = cur->size;
-+ }
-+
-+ while (node) {
-+ cur = container_of(node, struct slot_tree_node, snode);
-+ cur->size -= pages;
-+ node = node->parent;
-+ }
-+}
-+
-+unsigned long slot_iter_index;
-+int slot_iter(void *item, unsigned long height)
-+{
-+ struct slot_tree_node *node;
-+ struct vma_slot *slot;
-+
-+ if (height == 1) {
-+ slot = item;
-+ if (slot_iter_index < slot->pages) {
-+ /*in this one*/
-+ return 1;
-+ } else {
-+ slot_iter_index -= slot->pages;
-+ return 0;
-+ }
-+
-+ } else {
-+ node = container_of(item, struct slot_tree_node, snode);
-+ if (slot_iter_index < node->size) {
-+ /*in this one*/
-+ return 1;
-+ } else {
-+ slot_iter_index -= node->size;
-+ return 0;
-+ }
-+ }
-+}
-+
-+
-+static inline void slot_tree_init_root(struct sradix_tree_root *root)
-+{
-+ init_sradix_tree_root(root, SLOT_TREE_NODE_SHIFT);
-+ root->alloc = slot_tree_node_alloc;
-+ root->free = slot_tree_node_free;
-+ root->extend = slot_tree_node_extend;
-+ root->assign = slot_tree_node_assign;
-+ root->rm = slot_tree_node_rm;
-+}
-+
-+void slot_tree_init(void)
-+{
-+ slot_tree_node_cachep = kmem_cache_create("slot_tree_node",
-+ sizeof(struct slot_tree_node), 0,
-+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
-+ NULL);
-+}
-+
-+
-+/* Each rung of this ladder is a list of VMAs having a same scan ratio */
-+struct scan_rung {
-+ //struct list_head scanned_list;
-+ struct sradix_tree_root vma_root;
-+ struct sradix_tree_root vma_root2;
-+
-+ struct vma_slot *current_scan;
-+ unsigned long current_offset;
-+
-+ /*
-+ * The initial value for current_offset, it should loop over
-+ * [0~ step - 1] to let all slot have its chance to be scanned.
-+ */
-+ unsigned long offset_init;
-+ unsigned long step; /* dynamic step for current_offset */
-+ unsigned int flags;
-+ unsigned long pages_to_scan;
-+ //unsigned long fully_scanned_slots;
-+ /*
-+ * a little bit tricky - if cpu_time_ratio > 0, then the value is the
-+ * the cpu time ratio it can spend in rung_i for every scan
-+ * period. if < 0, then it is the cpu time ratio relative to the
-+ * max cpu percentage user specified. Both in unit of
-+ * 1/TIME_RATIO_SCALE
-+ */
-+ int cpu_ratio;
-+
-+ /*
-+ * How long it will take for all slots in this rung to be fully
-+ * scanned? If it's zero, we don't care about the cover time:
-+ * it's fully scanned.
-+ */
-+ unsigned int cover_msecs;
-+ //unsigned long vma_num;
-+ //unsigned long pages; /* Sum of all slot's pages in rung */
-+};
-+
-+/**
-+ * node of either the stable or unstale rbtree
-+ *
-+ */
-+struct tree_node {
-+ struct rb_node node; /* link in the main (un)stable rbtree */
-+ struct rb_root sub_root; /* rb_root for sublevel collision rbtree */
-+ u32 hash;
-+ unsigned long count; /* TODO: merged with sub_root */
-+ struct list_head all_list; /* all tree nodes in stable/unstable tree */
-+};
-+
-+/**
-+ * struct stable_node - node of the stable rbtree
-+ * @node: rb node of this ksm page in the stable tree
-+ * @hlist: hlist head of rmap_items using this ksm page
-+ * @kpfn: page frame number of this ksm page
-+ */
-+struct stable_node {
-+ struct rb_node node; /* link in sub-rbtree */
-+ struct tree_node *tree_node; /* it's tree node root in stable tree, NULL if it's in hell list */
-+ struct hlist_head hlist;
-+ unsigned long kpfn;
-+ u32 hash_max; /* if ==0 then it's not been calculated yet */
-+ struct list_head all_list; /* in a list for all stable nodes */
-+};
-+
-+/**
-+ * struct node_vma - group rmap_items linked in a same stable
-+ * node together.
-+ */
-+struct node_vma {
-+ union {
-+ struct vma_slot *slot;
-+ unsigned long key; /* slot is used as key sorted on hlist */
-+ };
-+ struct hlist_node hlist;
-+ struct hlist_head rmap_hlist;
-+ struct stable_node *head;
-+};
-+
-+/**
-+ * struct rmap_item - reverse mapping item for virtual addresses
-+ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
-+ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
-+ * @mm: the memory structure this rmap_item is pointing into
-+ * @address: the virtual address this rmap_item tracks (+ flags in low bits)
-+ * @node: rb node of this rmap_item in the unstable tree
-+ * @head: pointer to stable_node heading this list in the stable tree
-+ * @hlist: link into hlist of rmap_items hanging off that stable_node
-+ */
-+struct rmap_item {
-+ struct vma_slot *slot;
-+ struct page *page;
-+ unsigned long address; /* + low bits used for flags below */
-+ unsigned long hash_round;
-+ unsigned long entry_index;
-+ union {
-+ struct {/* when in unstable tree */
-+ struct rb_node node;
-+ struct tree_node *tree_node;
-+ u32 hash_max;
-+ };
-+ struct { /* when in stable tree */
-+ struct node_vma *head;
-+ struct hlist_node hlist;
-+ struct anon_vma *anon_vma;
-+ };
-+ };
-+} __aligned(4);
-+
-+struct rmap_list_entry {
-+ union {
-+ struct rmap_item *item;
-+ unsigned long addr;
-+ };
-+ /* lowest bit is used for is_addr tag */
-+} __aligned(4); /* 4 aligned to fit in to pages*/
-+
-+
-+/* Basic data structure definition ends */
-+
-+
-+/*
-+ * Flags for rmap_item to judge if it's listed in the stable/unstable tree.
-+ * The flags use the low bits of rmap_item.address
-+ */
-+#define UNSTABLE_FLAG 0x1
-+#define STABLE_FLAG 0x2
-+#define get_rmap_addr(x) ((x)->address & PAGE_MASK)
-+
-+/*
-+ * rmap_list_entry helpers
-+ */
-+#define IS_ADDR_FLAG 1
-+#define is_addr(ptr) ((unsigned long)(ptr) & IS_ADDR_FLAG)
-+#define set_is_addr(ptr) ((ptr) |= IS_ADDR_FLAG)
-+#define get_clean_addr(ptr) (((ptr) & ~(__typeof__(ptr))IS_ADDR_FLAG))
-+
-+
-+/*
-+ * High speed caches for frequently allocated and freed structs
-+ */
-+static struct kmem_cache *rmap_item_cache;
-+static struct kmem_cache *stable_node_cache;
-+static struct kmem_cache *node_vma_cache;
-+static struct kmem_cache *vma_slot_cache;
-+static struct kmem_cache *tree_node_cache;
-+#define UKSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("uksm_"#__struct,\
-+ sizeof(struct __struct), __alignof__(struct __struct),\
-+ (__flags), NULL)
-+
-+/* Array of all scan_rung, uksm_scan_ladder[0] having the minimum scan ratio */
-+#define SCAN_LADDER_SIZE 4
-+static struct scan_rung uksm_scan_ladder[SCAN_LADDER_SIZE];
-+
-+/* The evaluation rounds uksmd has finished */
-+static unsigned long long uksm_eval_round = 1;
-+
-+/*
-+ * we add 1 to this var when we consider we should rebuild the whole
-+ * unstable tree.
-+ */
-+static unsigned long uksm_hash_round = 1;
-+
-+/*
-+ * How many times the whole memory is scanned.
-+ */
-+static unsigned long long fully_scanned_round = 1;
-+
-+/* The total number of virtual pages of all vma slots */
-+static u64 uksm_pages_total;
-+
-+/* The number of pages has been scanned since the start up */
-+static u64 uksm_pages_scanned;
-+
-+static u64 scanned_virtual_pages;
-+
-+/* The number of pages has been scanned since last encode_benefit call */
-+static u64 uksm_pages_scanned_last;
-+
-+/* If the scanned number is tooo large, we encode it here */
-+static u64 pages_scanned_stored;
-+
-+static unsigned long pages_scanned_base;
-+
-+/* The number of nodes in the stable tree */
-+static unsigned long uksm_pages_shared;
-+
-+/* The number of page slots additionally sharing those nodes */
-+static unsigned long uksm_pages_sharing;
-+
-+/* The number of nodes in the unstable tree */
-+static unsigned long uksm_pages_unshared;
-+
-+/*
-+ * Milliseconds ksmd should sleep between scans,
-+ * >= 100ms to be consistent with
-+ * scan_time_to_sleep_msec()
-+ */
-+static unsigned int uksm_sleep_jiffies;
-+
-+/* The real value for the uksmd next sleep */
-+static unsigned int uksm_sleep_real;
-+
-+/* Saved value for user input uksm_sleep_jiffies when it's enlarged */
-+static unsigned int uksm_sleep_saved;
-+
-+/* Max percentage of cpu utilization ksmd can take to scan in one batch */
-+static unsigned int uksm_max_cpu_percentage;
-+
-+static int uksm_cpu_governor;
-+
-+static char *uksm_cpu_governor_str[4] = { "full", "medium", "low", "quiet" };
-+
-+struct uksm_cpu_preset_s {
-+ int cpu_ratio[SCAN_LADDER_SIZE];
-+ unsigned int cover_msecs[SCAN_LADDER_SIZE];
-+ unsigned int max_cpu; /* percentage */
-+};
-+
-+struct uksm_cpu_preset_s uksm_cpu_preset[4] = {
-+ { {20, 40, -2500, -10000}, {1000, 500, 200, 50}, 95},
-+ { {20, 30, -2500, -10000}, {1000, 500, 400, 100}, 50},
-+ { {10, 20, -5000, -10000}, {1500, 1000, 1000, 250}, 20},
-+ { {10, 20, 40, 75}, {2000, 1000, 1000, 1000}, 1},
-+};
-+
-+/* The default value for uksm_ema_page_time if it's not initialized */
-+#define UKSM_PAGE_TIME_DEFAULT 500
-+
-+/*cost to scan one page by expotional moving average in nsecs */
-+static unsigned long uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
-+
-+/* The expotional moving average alpha weight, in percentage. */
-+#define EMA_ALPHA 20
-+
-+/*
-+ * The threshold used to filter out thrashing areas,
-+ * If it == 0, filtering is disabled, otherwise it's the percentage up-bound
-+ * of the thrashing ratio of all areas. Any area with a bigger thrashing ratio
-+ * will be considered as having a zero duplication ratio.
-+ */
-+static unsigned int uksm_thrash_threshold = 50;
-+
-+/* How much dedup ratio is considered to be abundant*/
-+static unsigned int uksm_abundant_threshold = 10;
-+
-+/* All slots having merged pages in this eval round. */
-+struct list_head vma_slot_dedup = LIST_HEAD_INIT(vma_slot_dedup);
-+
-+/* How many times the ksmd has slept since startup */
-+static unsigned long long uksm_sleep_times;
-+
-+#define UKSM_RUN_STOP 0
-+#define UKSM_RUN_MERGE 1
-+static unsigned int uksm_run = 1;
-+
-+static DECLARE_WAIT_QUEUE_HEAD(uksm_thread_wait);
-+static DEFINE_MUTEX(uksm_thread_mutex);
-+
-+/*
-+ * List vma_slot_new is for newly created vma_slot waiting to be added by
-+ * ksmd. If one cannot be added(e.g. due to it's too small), it's moved to
-+ * vma_slot_noadd. vma_slot_del is the list for vma_slot whose corresponding
-+ * VMA has been removed/freed.
-+ */
-+struct list_head vma_slot_new = LIST_HEAD_INIT(vma_slot_new);
-+struct list_head vma_slot_noadd = LIST_HEAD_INIT(vma_slot_noadd);
-+struct list_head vma_slot_del = LIST_HEAD_INIT(vma_slot_del);
-+static DEFINE_SPINLOCK(vma_slot_list_lock);
-+
-+/* The unstable tree heads */
-+static struct rb_root root_unstable_tree = RB_ROOT;
-+
-+/*
-+ * All tree_nodes are in a list to be freed at once when unstable tree is
-+ * freed after each scan round.
-+ */
-+static struct list_head unstable_tree_node_list =
-+ LIST_HEAD_INIT(unstable_tree_node_list);
-+
-+/* List contains all stable nodes */
-+static struct list_head stable_node_list = LIST_HEAD_INIT(stable_node_list);
-+
-+/*
-+ * When the hash strength is changed, the stable tree must be delta_hashed and
-+ * re-structured. We use two set of below structs to speed up the
-+ * re-structuring of stable tree.
-+ */
-+static struct list_head
-+stable_tree_node_list[2] = {LIST_HEAD_INIT(stable_tree_node_list[0]),
-+ LIST_HEAD_INIT(stable_tree_node_list[1])};
-+
-+static struct list_head *stable_tree_node_listp = &stable_tree_node_list[0];
-+static struct rb_root root_stable_tree[2] = {RB_ROOT, RB_ROOT};
-+static struct rb_root *root_stable_treep = &root_stable_tree[0];
-+static unsigned long stable_tree_index;
-+
-+/* The hash strength needed to hash a full page */
-+#define HASH_STRENGTH_FULL (PAGE_SIZE / sizeof(u32))
-+
-+/* The hash strength needed for loop-back hashing */
-+#define HASH_STRENGTH_MAX (HASH_STRENGTH_FULL + 10)
-+
-+/* The random offsets in a page */
-+static u32 *random_nums;
-+
-+/* The hash strength */
-+static unsigned long hash_strength = HASH_STRENGTH_FULL >> 4;
-+
-+/* The delta value each time the hash strength increases or decreases */
-+static unsigned long hash_strength_delta;
-+#define HASH_STRENGTH_DELTA_MAX 5
-+
-+/* The time we have saved due to random_sample_hash */
-+static u64 rshash_pos;
-+
-+/* The time we have wasted due to hash collision */
-+static u64 rshash_neg;
-+
-+struct uksm_benefit {
-+ u64 pos;
-+ u64 neg;
-+ u64 scanned;
-+ unsigned long base;
-+} benefit;
-+
-+/*
-+ * The relative cost of memcmp, compared to 1 time unit of random sample
-+ * hash, this value is tested when ksm module is initialized
-+ */
-+static unsigned long memcmp_cost;
-+
-+static unsigned long rshash_neg_cont_zero;
-+static unsigned long rshash_cont_obscure;
-+
-+/* The possible states of hash strength adjustment heuristic */
-+enum rshash_states {
-+ RSHASH_STILL,
-+ RSHASH_TRYUP,
-+ RSHASH_TRYDOWN,
-+ RSHASH_NEW,
-+ RSHASH_PRE_STILL,
-+};
-+
-+/* The possible direction we are about to adjust hash strength */
-+enum rshash_direct {
-+ GO_UP,
-+ GO_DOWN,
-+ OBSCURE,
-+ STILL,
-+};
-+
-+/* random sampling hash state machine */
-+static struct {
-+ enum rshash_states state;
-+ enum rshash_direct pre_direct;
-+ u8 below_count;
-+ /* Keep a lookup window of size 5, iff above_count/below_count > 3
-+ * in this window we stop trying.
-+ */
-+ u8 lookup_window_index;
-+ u64 stable_benefit;
-+ unsigned long turn_point_down;
-+ unsigned long turn_benefit_down;
-+ unsigned long turn_point_up;
-+ unsigned long turn_benefit_up;
-+ unsigned long stable_point;
-+} rshash_state;
-+
-+/*zero page hash table, hash_strength [0 ~ HASH_STRENGTH_MAX]*/
-+static u32 *zero_hash_table;
-+
-+static inline struct node_vma *alloc_node_vma(void)
-+{
-+ struct node_vma *node_vma;
-+
-+ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (node_vma) {
-+ INIT_HLIST_HEAD(&node_vma->rmap_hlist);
-+ INIT_HLIST_NODE(&node_vma->hlist);
-+ }
-+ return node_vma;
-+}
-+
-+static inline void free_node_vma(struct node_vma *node_vma)
-+{
-+ kmem_cache_free(node_vma_cache, node_vma);
-+}
-+
-+
-+static inline struct vma_slot *alloc_vma_slot(void)
-+{
-+ struct vma_slot *slot;
-+
-+ /*
-+ * In case ksm is not initialized by now.
-+ * Oops, we need to consider the call site of uksm_init() in the future.
-+ */
-+ if (!vma_slot_cache)
-+ return NULL;
-+
-+ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (slot) {
-+ INIT_LIST_HEAD(&slot->slot_list);
-+ INIT_LIST_HEAD(&slot->dedup_list);
-+ slot->flags |= UKSM_SLOT_NEED_RERAND;
-+ }
-+ return slot;
-+}
-+
-+static inline void free_vma_slot(struct vma_slot *vma_slot)
-+{
-+ kmem_cache_free(vma_slot_cache, vma_slot);
-+}
-+
-+
-+
-+static inline struct rmap_item *alloc_rmap_item(void)
-+{
-+ struct rmap_item *rmap_item;
-+
-+ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (rmap_item) {
-+ /* bug on lowest bit is not clear for flag use */
-+ BUG_ON(is_addr(rmap_item));
-+ }
-+ return rmap_item;
-+}
-+
-+static inline void free_rmap_item(struct rmap_item *rmap_item)
-+{
-+ rmap_item->slot = NULL; /* debug safety */
-+ kmem_cache_free(rmap_item_cache, rmap_item);
-+}
-+
-+static inline struct stable_node *alloc_stable_node(void)
-+{
-+ struct stable_node *node;
-+
-+ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (!node)
-+ return NULL;
-+
-+ INIT_HLIST_HEAD(&node->hlist);
-+ list_add(&node->all_list, &stable_node_list);
-+ return node;
-+}
-+
-+static inline void free_stable_node(struct stable_node *stable_node)
-+{
-+ list_del(&stable_node->all_list);
-+ kmem_cache_free(stable_node_cache, stable_node);
-+}
-+
-+static inline struct tree_node *alloc_tree_node(struct list_head *list)
-+{
-+ struct tree_node *node;
-+
-+ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL |
-+ __GFP_NORETRY | __GFP_NOWARN);
-+ if (!node)
-+ return NULL;
-+
-+ list_add(&node->all_list, list);
-+ return node;
-+}
-+
-+static inline void free_tree_node(struct tree_node *node)
-+{
-+ list_del(&node->all_list);
-+ kmem_cache_free(tree_node_cache, node);
-+}
-+
-+static void uksm_drop_anon_vma(struct rmap_item *rmap_item)
-+{
-+ struct anon_vma *anon_vma = rmap_item->anon_vma;
-+
-+ put_anon_vma(anon_vma);
-+}
-+
-+
-+/**
-+ * Remove a stable node from stable_tree, may unlink from its tree_node and
-+ * may remove its parent tree_node if no other stable node is pending.
-+ *
-+ * @stable_node The node need to be removed
-+ * @unlink_rb Will this node be unlinked from the rbtree?
-+ * @remove_tree_ node Will its tree_node be removed if empty?
-+ */
-+static void remove_node_from_stable_tree(struct stable_node *stable_node,
-+ int unlink_rb, int remove_tree_node)
-+{
-+ struct node_vma *node_vma;
-+ struct rmap_item *rmap_item;
-+ struct hlist_node *n;
-+
-+ if (!hlist_empty(&stable_node->hlist)) {
-+ hlist_for_each_entry_safe(node_vma, n,
-+ &stable_node->hlist, hlist) {
-+ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
-+ uksm_pages_sharing--;
-+
-+ uksm_drop_anon_vma(rmap_item);
-+ rmap_item->address &= PAGE_MASK;
-+ }
-+ free_node_vma(node_vma);
-+ cond_resched();
-+ }
-+
-+ /* the last one is counted as shared */
-+ uksm_pages_shared--;
-+ uksm_pages_sharing++;
-+ }
-+
-+ if (stable_node->tree_node && unlink_rb) {
-+ rb_erase(&stable_node->node,
-+ &stable_node->tree_node->sub_root);
-+
-+ if (RB_EMPTY_ROOT(&stable_node->tree_node->sub_root) &&
-+ remove_tree_node) {
-+ rb_erase(&stable_node->tree_node->node,
-+ root_stable_treep);
-+ free_tree_node(stable_node->tree_node);
-+ } else {
-+ stable_node->tree_node->count--;
-+ }
-+ }
-+
-+ free_stable_node(stable_node);
-+}
-+
-+
-+/*
-+ * get_uksm_page: checks if the page indicated by the stable node
-+ * is still its ksm page, despite having held no reference to it.
-+ * In which case we can trust the content of the page, and it
-+ * returns the gotten page; but if the page has now been zapped,
-+ * remove the stale node from the stable tree and return NULL.
-+ *
-+ * You would expect the stable_node to hold a reference to the ksm page.
-+ * But if it increments the page's count, swapping out has to wait for
-+ * ksmd to come around again before it can free the page, which may take
-+ * seconds or even minutes: much too unresponsive. So instead we use a
-+ * "keyhole reference": access to the ksm page from the stable node peeps
-+ * out through its keyhole to see if that page still holds the right key,
-+ * pointing back to this stable node. This relies on freeing a PageAnon
-+ * page to reset its page->mapping to NULL, and relies on no other use of
-+ * a page to put something that might look like our key in page->mapping.
-+ *
-+ * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
-+ * but this is different - made simpler by uksm_thread_mutex being held, but
-+ * interesting for assuming that no other use of the struct page could ever
-+ * put our expected_mapping into page->mapping (or a field of the union which
-+ * coincides with page->mapping). The RCU calls are not for KSM at all, but
-+ * to keep the page_count protocol described with page_cache_get_speculative.
-+ *
-+ * Note: it is possible that get_uksm_page() will return NULL one moment,
-+ * then page the next, if the page is in between page_freeze_refs() and
-+ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
-+ * is on its way to being freed; but it is an anomaly to bear in mind.
-+ *
-+ * @unlink_rb: if the removal of this node will firstly unlink from
-+ * its rbtree. stable_node_reinsert will prevent this when restructuring the
-+ * node from its old tree.
-+ *
-+ * @remove_tree_node: if this is the last one of its tree_node, will the
-+ * tree_node be freed ? If we are inserting stable node, this tree_node may
-+ * be reused, so don't free it.
-+ */
-+static struct page *get_uksm_page(struct stable_node *stable_node,
-+ int unlink_rb, int remove_tree_node)
-+{
-+ struct page *page;
-+ void *expected_mapping;
-+ unsigned long kpfn;
-+
-+ expected_mapping = (void *)((unsigned long)stable_node |
-+ PAGE_MAPPING_KSM);
-+again:
-+ kpfn = READ_ONCE(stable_node->kpfn);
-+ page = pfn_to_page(kpfn);
-+
-+ /*
-+ * page is computed from kpfn, so on most architectures reading
-+ * page->mapping is naturally ordered after reading node->kpfn,
-+ * but on Alpha we need to be more careful.
-+ */
-+ smp_read_barrier_depends();
-+
-+ if (READ_ONCE(page->mapping) != expected_mapping)
-+ goto stale;
-+
-+ /*
-+ * We cannot do anything with the page while its refcount is 0.
-+ * Usually 0 means free, or tail of a higher-order page: in which
-+ * case this node is no longer referenced, and should be freed;
-+ * however, it might mean that the page is under page_freeze_refs().
-+ * The __remove_mapping() case is easy, again the node is now stale;
-+ * but if page is swapcache in migrate_page_move_mapping(), it might
-+ * still be our page, in which case it's essential to keep the node.
-+ */
-+ while (!get_page_unless_zero(page)) {
-+ /*
-+ * Another check for page->mapping != expected_mapping would
-+ * work here too. We have chosen the !PageSwapCache test to
-+ * optimize the common case, when the page is or is about to
-+ * be freed: PageSwapCache is cleared (under spin_lock_irq)
-+ * in the freeze_refs section of __remove_mapping(); but Anon
-+ * page->mapping reset to NULL later, in free_pages_prepare().
-+ */
-+ if (!PageSwapCache(page))
-+ goto stale;
-+ cpu_relax();
-+ }
-+
-+ if (READ_ONCE(page->mapping) != expected_mapping) {
-+ put_page(page);
-+ goto stale;
-+ }
-+
-+ lock_page(page);
-+ if (READ_ONCE(page->mapping) != expected_mapping) {
-+ unlock_page(page);
-+ put_page(page);
-+ goto stale;
-+ }
-+ unlock_page(page);
-+ return page;
-+stale:
-+ /*
-+ * We come here from above when page->mapping or !PageSwapCache
-+ * suggests that the node is stale; but it might be under migration.
-+ * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
-+ * before checking whether node->kpfn has been changed.
-+ */
-+ smp_rmb();
-+ if (stable_node->kpfn != kpfn)
-+ goto again;
-+
-+ remove_node_from_stable_tree(stable_node, unlink_rb, remove_tree_node);
-+
-+ return NULL;
-+}
-+
-+/*
-+ * Removing rmap_item from stable or unstable tree.
-+ * This function will clean the information from the stable/unstable tree.
-+ */
-+static inline void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
-+{
-+ if (rmap_item->address & STABLE_FLAG) {
-+ struct stable_node *stable_node;
-+ struct node_vma *node_vma;
-+ struct page *page;
-+
-+ node_vma = rmap_item->head;
-+ stable_node = node_vma->head;
-+ page = get_uksm_page(stable_node, 1, 1);
-+ if (!page)
-+ goto out;
-+
-+ /*
-+ * page lock is needed because it's racing with
-+ * try_to_unmap_ksm(), etc.
-+ */
-+ lock_page(page);
-+ hlist_del(&rmap_item->hlist);
-+
-+ if (hlist_empty(&node_vma->rmap_hlist)) {
-+ hlist_del(&node_vma->hlist);
-+ free_node_vma(node_vma);
-+ }
-+ unlock_page(page);
-+
-+ put_page(page);
-+ if (hlist_empty(&stable_node->hlist)) {
-+ /* do NOT call remove_node_from_stable_tree() here,
-+ * it's possible for a forked rmap_item not in
-+ * stable tree while the in-tree rmap_items were
-+ * deleted.
-+ */
-+ uksm_pages_shared--;
-+ } else
-+ uksm_pages_sharing--;
-+
-+
-+ uksm_drop_anon_vma(rmap_item);
-+ } else if (rmap_item->address & UNSTABLE_FLAG) {
-+ if (rmap_item->hash_round == uksm_hash_round) {
-+
-+ rb_erase(&rmap_item->node,
-+ &rmap_item->tree_node->sub_root);
-+ if (RB_EMPTY_ROOT(&rmap_item->tree_node->sub_root)) {
-+ rb_erase(&rmap_item->tree_node->node,
-+ &root_unstable_tree);
-+
-+ free_tree_node(rmap_item->tree_node);
-+ } else
-+ rmap_item->tree_node->count--;
-+ }
-+ uksm_pages_unshared--;
-+ }
-+
-+ rmap_item->address &= PAGE_MASK;
-+ rmap_item->hash_max = 0;
-+
-+out:
-+ cond_resched(); /* we're called from many long loops */
-+}
-+
-+static inline int slot_in_uksm(struct vma_slot *slot)
-+{
-+ return list_empty(&slot->slot_list);
-+}
-+
-+/*
-+ * Test if the mm is exiting
-+ */
-+static inline bool uksm_test_exit(struct mm_struct *mm)
-+{
-+ return atomic_read(&mm->mm_users) == 0;
-+}
-+
-+static inline unsigned long vma_pool_size(struct vma_slot *slot)
-+{
-+ return round_up(sizeof(struct rmap_list_entry) * slot->pages,
-+ PAGE_SIZE) >> PAGE_SHIFT;
-+}
-+
-+#define CAN_OVERFLOW_U64(x, delta) (U64_MAX - (x) < (delta))
-+
-+/* must be done with sem locked */
-+static int slot_pool_alloc(struct vma_slot *slot)
-+{
-+ unsigned long pool_size;
-+
-+ if (slot->rmap_list_pool)
-+ return 0;
-+
-+ pool_size = vma_pool_size(slot);
-+ slot->rmap_list_pool = kcalloc(pool_size, sizeof(struct page *),
-+ GFP_KERNEL);
-+ if (!slot->rmap_list_pool)
-+ return -ENOMEM;
-+
-+ slot->pool_counts = kcalloc(pool_size, sizeof(unsigned int),
-+ GFP_KERNEL);
-+ if (!slot->pool_counts) {
-+ kfree(slot->rmap_list_pool);
-+ return -ENOMEM;
-+ }
-+
-+ slot->pool_size = pool_size;
-+ BUG_ON(CAN_OVERFLOW_U64(uksm_pages_total, slot->pages));
-+ slot->flags |= UKSM_SLOT_IN_UKSM;
-+ uksm_pages_total += slot->pages;
-+
-+ return 0;
-+}
-+
-+/*
-+ * Called after vma is unlinked from its mm
-+ */
-+void uksm_remove_vma(struct vm_area_struct *vma)
-+{
-+ struct vma_slot *slot;
-+
-+ if (!vma->uksm_vma_slot)
-+ return;
-+
-+ spin_lock(&vma_slot_list_lock);
-+ slot = vma->uksm_vma_slot;
-+ if (!slot)
-+ goto out;
-+
-+ if (slot_in_uksm(slot)) {
-+ /**
-+ * This slot has been added by ksmd, so move to the del list
-+ * waiting ksmd to free it.
-+ */
-+ list_add_tail(&slot->slot_list, &vma_slot_del);
-+ } else {
-+ /**
-+ * It's still on new list. It's ok to free slot directly.
-+ */
-+ list_del(&slot->slot_list);
-+ free_vma_slot(slot);
-+ }
-+out:
-+ vma->uksm_vma_slot = NULL;
-+ spin_unlock(&vma_slot_list_lock);
-+}
-+
-+/**
-+ * Need to do two things:
-+ * 1. check if slot was moved to del list
-+ * 2. make sure the mmap_sem is manipulated under valid vma.
-+ *
-+ * My concern here is that in some cases, this may make
-+ * vma_slot_list_lock() waiters to serialized further by some
-+ * sem->wait_lock, can this really be expensive?
-+ *
-+ *
-+ * @return
-+ * 0: if successfully locked mmap_sem
-+ * -ENOENT: this slot was moved to del list
-+ * -EBUSY: vma lock failed
-+ */
-+static int try_down_read_slot_mmap_sem(struct vma_slot *slot)
-+{
-+ struct vm_area_struct *vma;
-+ struct mm_struct *mm;
-+ struct rw_semaphore *sem;
-+
-+ spin_lock(&vma_slot_list_lock);
-+
-+ /* the slot_list was removed and inited from new list, when it enters
-+ * uksm_list. If now it's not empty, then it must be moved to del list
-+ */
-+ if (!slot_in_uksm(slot)) {
-+ spin_unlock(&vma_slot_list_lock);
-+ return -ENOENT;
-+ }
-+
-+ BUG_ON(slot->pages != vma_pages(slot->vma));
-+ /* Ok, vma still valid */
-+ vma = slot->vma;
-+ mm = vma->vm_mm;
-+ sem = &mm->mmap_sem;
-+
-+ if (uksm_test_exit(mm)) {
-+ spin_unlock(&vma_slot_list_lock);
-+ return -ENOENT;
-+ }
-+
-+ if (down_read_trylock(sem)) {
-+ spin_unlock(&vma_slot_list_lock);
-+ if (slot_pool_alloc(slot)) {
-+ uksm_remove_vma(vma);
-+ up_read(sem);
-+ return -ENOENT;
-+ }
-+ return 0;
-+ }
-+
-+ spin_unlock(&vma_slot_list_lock);
-+ return -EBUSY;
-+}
-+
-+static inline unsigned long
-+vma_page_address(struct page *page, struct vm_area_struct *vma)
-+{
-+ pgoff_t pgoff = page->index;
-+ unsigned long address;
-+
-+ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
-+ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
-+ /* page should be within @vma mapping range */
-+ return -EFAULT;
-+ }
-+ return address;
-+}
-+
-+
-+/* return 0 on success with the item's mmap_sem locked */
-+static inline int get_mergeable_page_lock_mmap(struct rmap_item *item)
-+{
-+ struct mm_struct *mm;
-+ struct vma_slot *slot = item->slot;
-+ int err = -EINVAL;
-+
-+ struct page *page;
-+
-+ /*
-+ * try_down_read_slot_mmap_sem() returns non-zero if the slot
-+ * has been removed by uksm_remove_vma().
-+ */
-+ if (try_down_read_slot_mmap_sem(slot))
-+ return -EBUSY;
-+
-+ mm = slot->vma->vm_mm;
-+
-+ if (uksm_test_exit(mm))
-+ goto failout_up;
-+
-+ page = item->page;
-+ rcu_read_lock();
-+ if (!get_page_unless_zero(page)) {
-+ rcu_read_unlock();
-+ goto failout_up;
-+ }
-+
-+ /* No need to consider huge page here. */
-+ if (item->slot->vma->anon_vma != page_anon_vma(page) ||
-+ vma_page_address(page, item->slot->vma) != get_rmap_addr(item)) {
-+ /*
-+ * TODO:
-+ * should we release this item becase of its stale page
-+ * mapping?
-+ */
-+ put_page(page);
-+ rcu_read_unlock();
-+ goto failout_up;
-+ }
-+ rcu_read_unlock();
-+ return 0;
-+
-+failout_up:
-+ up_read(&mm->mmap_sem);
-+ return err;
-+}
-+
-+/*
-+ * What kind of VMA is considered ?
-+ */
-+static inline int vma_can_enter(struct vm_area_struct *vma)
-+{
-+ return uksm_flags_can_scan(vma->vm_flags);
-+}
-+
-+/*
-+ * Called whenever a fresh new vma is created A new vma_slot.
-+ * is created and inserted into a global list Must be called.
-+ * after vma is inserted to its mm.
-+ */
-+void uksm_vma_add_new(struct vm_area_struct *vma)
-+{
-+ struct vma_slot *slot;
-+
-+ if (!vma_can_enter(vma)) {
-+ vma->uksm_vma_slot = NULL;
-+ return;
-+ }
-+
-+ slot = alloc_vma_slot();
-+ if (!slot) {
-+ vma->uksm_vma_slot = NULL;
-+ return;
-+ }
-+
-+ vma->uksm_vma_slot = slot;
-+ vma->vm_flags |= VM_MERGEABLE;
-+ slot->vma = vma;
-+ slot->mm = vma->vm_mm;
-+ slot->ctime_j = jiffies;
-+ slot->pages = vma_pages(vma);
-+ spin_lock(&vma_slot_list_lock);
-+ list_add_tail(&slot->slot_list, &vma_slot_new);
-+ spin_unlock(&vma_slot_list_lock);
-+}
-+
-+/* 32/3 < they < 32/2 */
-+#define shiftl 8
-+#define shiftr 12
-+
-+#define HASH_FROM_TO(from, to) \
-+for (index = from; index < to; index++) { \
-+ pos = random_nums[index]; \
-+ hash += key[pos]; \
-+ hash += (hash << shiftl); \
-+ hash ^= (hash >> shiftr); \
-+}
-+
-+
-+#define HASH_FROM_DOWN_TO(from, to) \
-+for (index = from - 1; index >= to; index--) { \
-+ hash ^= (hash >> shiftr); \
-+ hash ^= (hash >> (shiftr*2)); \
-+ hash -= (hash << shiftl); \
-+ hash += (hash << (shiftl*2)); \
-+ pos = random_nums[index]; \
-+ hash -= key[pos]; \
-+}
-+
-+/*
-+ * The main random sample hash function.
-+ */
-+static u32 random_sample_hash(void *addr, u32 hash_strength)
-+{
-+ u32 hash = 0xdeadbeef;
-+ int index, pos, loop = hash_strength;
-+ u32 *key = (u32 *)addr;
-+
-+ if (loop > HASH_STRENGTH_FULL)
-+ loop = HASH_STRENGTH_FULL;
-+
-+ HASH_FROM_TO(0, loop);
-+
-+ if (hash_strength > HASH_STRENGTH_FULL) {
-+ loop = hash_strength - HASH_STRENGTH_FULL;
-+ HASH_FROM_TO(0, loop);
-+ }
-+
-+ return hash;
-+}
-+
-+
-+/**
-+ * It's used when hash strength is adjusted
-+ *
-+ * @addr The page's virtual address
-+ * @from The original hash strength
-+ * @to The hash strength changed to
-+ * @hash The hash value generated with "from" hash value
-+ *
-+ * return the hash value
-+ */
-+static u32 delta_hash(void *addr, int from, int to, u32 hash)
-+{
-+ u32 *key = (u32 *)addr;
-+ int index, pos; /* make sure they are int type */
-+
-+ if (to > from) {
-+ if (from >= HASH_STRENGTH_FULL) {
-+ from -= HASH_STRENGTH_FULL;
-+ to -= HASH_STRENGTH_FULL;
-+ HASH_FROM_TO(from, to);
-+ } else if (to <= HASH_STRENGTH_FULL) {
-+ HASH_FROM_TO(from, to);
-+ } else {
-+ HASH_FROM_TO(from, HASH_STRENGTH_FULL);
-+ HASH_FROM_TO(0, to - HASH_STRENGTH_FULL);
-+ }
-+ } else {
-+ if (from <= HASH_STRENGTH_FULL) {
-+ HASH_FROM_DOWN_TO(from, to);
-+ } else if (to >= HASH_STRENGTH_FULL) {
-+ from -= HASH_STRENGTH_FULL;
-+ to -= HASH_STRENGTH_FULL;
-+ HASH_FROM_DOWN_TO(from, to);
-+ } else {
-+ HASH_FROM_DOWN_TO(from - HASH_STRENGTH_FULL, 0);
-+ HASH_FROM_DOWN_TO(HASH_STRENGTH_FULL, to);
-+ }
-+ }
-+
-+ return hash;
-+}
-+
-+/**
-+ *
-+ * Called when: rshash_pos or rshash_neg is about to overflow or a scan round
-+ * has finished.
-+ *
-+ * return 0 if no page has been scanned since last call, 1 otherwise.
-+ */
-+static inline int encode_benefit(void)
-+{
-+ u64 scanned_delta, pos_delta, neg_delta;
-+ unsigned long base = benefit.base;
-+
-+ scanned_delta = uksm_pages_scanned - uksm_pages_scanned_last;
-+
-+ if (!scanned_delta)
-+ return 0;
-+
-+ scanned_delta >>= base;
-+ pos_delta = rshash_pos >> base;
-+ neg_delta = rshash_neg >> base;
-+
-+ if (CAN_OVERFLOW_U64(benefit.pos, pos_delta) ||
-+ CAN_OVERFLOW_U64(benefit.neg, neg_delta) ||
-+ CAN_OVERFLOW_U64(benefit.scanned, scanned_delta)) {
-+ benefit.scanned >>= 1;
-+ benefit.neg >>= 1;
-+ benefit.pos >>= 1;
-+ benefit.base++;
-+ scanned_delta >>= 1;
-+ pos_delta >>= 1;
-+ neg_delta >>= 1;
-+ }
-+
-+ benefit.pos += pos_delta;
-+ benefit.neg += neg_delta;
-+ benefit.scanned += scanned_delta;
-+
-+ BUG_ON(!benefit.scanned);
-+
-+ rshash_pos = rshash_neg = 0;
-+ uksm_pages_scanned_last = uksm_pages_scanned;
-+
-+ return 1;
-+}
-+
-+static inline void reset_benefit(void)
-+{
-+ benefit.pos = 0;
-+ benefit.neg = 0;
-+ benefit.base = 0;
-+ benefit.scanned = 0;
-+}
-+
-+static inline void inc_rshash_pos(unsigned long delta)
-+{
-+ if (CAN_OVERFLOW_U64(rshash_pos, delta))
-+ encode_benefit();
-+
-+ rshash_pos += delta;
-+}
-+
-+static inline void inc_rshash_neg(unsigned long delta)
-+{
-+ if (CAN_OVERFLOW_U64(rshash_neg, delta))
-+ encode_benefit();
-+
-+ rshash_neg += delta;
-+}
-+
-+
-+static inline u32 page_hash(struct page *page, unsigned long hash_strength,
-+ int cost_accounting)
-+{
-+ u32 val;
-+ unsigned long delta;
-+
-+ void *addr = kmap_atomic(page);
-+
-+ val = random_sample_hash(addr, hash_strength);
-+ kunmap_atomic(addr);
-+
-+ if (cost_accounting) {
-+ if (hash_strength < HASH_STRENGTH_FULL)
-+ delta = HASH_STRENGTH_FULL - hash_strength;
-+ else
-+ delta = 0;
-+
-+ inc_rshash_pos(delta);
-+ }
-+
-+ return val;
-+}
-+
-+static int memcmp_pages(struct page *page1, struct page *page2,
-+ int cost_accounting)
-+{
-+ char *addr1, *addr2;
-+ int ret;
-+
-+ addr1 = kmap_atomic(page1);
-+ addr2 = kmap_atomic(page2);
-+ ret = memcmp(addr1, addr2, PAGE_SIZE);
-+ kunmap_atomic(addr2);
-+ kunmap_atomic(addr1);
-+
-+ if (cost_accounting)
-+ inc_rshash_neg(memcmp_cost);
-+
-+ return ret;
-+}
-+
-+static inline int pages_identical(struct page *page1, struct page *page2)
-+{
-+ return !memcmp_pages(page1, page2, 0);
-+}
-+
-+static inline int is_page_full_zero(struct page *page)
-+{
-+ char *addr;
-+ int ret;
-+
-+ addr = kmap_atomic(page);
-+ ret = is_full_zero(addr, PAGE_SIZE);
-+ kunmap_atomic(addr);
-+
-+ return ret;
-+}
-+
-+static int write_protect_page(struct vm_area_struct *vma, struct page *page,
-+ pte_t *orig_pte, pte_t *old_pte)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ struct page_vma_mapped_walk pvmw = {
-+ .page = page,
-+ .vma = vma,
-+ };
-+ int swapped;
-+ int err = -EFAULT;
-+ unsigned long mmun_start; /* For mmu_notifiers */
-+ unsigned long mmun_end; /* For mmu_notifiers */
-+
-+ pvmw.address = page_address_in_vma(page, vma);
-+ if (pvmw.address == -EFAULT)
-+ goto out;
-+
-+ BUG_ON(PageTransCompound(page));
-+
-+ mmun_start = pvmw.address;
-+ mmun_end = pvmw.address + PAGE_SIZE;
-+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-+
-+ if (!page_vma_mapped_walk(&pvmw))
-+ goto out_mn;
-+ if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
-+ goto out_unlock;
-+
-+ if (old_pte)
-+ *old_pte = *pvmw.pte;
-+
-+ if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
-+ (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || mm_tlb_flush_pending(mm)) {
-+ pte_t entry;
-+
-+ swapped = PageSwapCache(page);
-+ flush_cache_page(vma, pvmw.address, page_to_pfn(page));
-+ /*
-+ * Ok this is tricky, when get_user_pages_fast() run it doesn't
-+ * take any lock, therefore the check that we are going to make
-+ * with the pagecount against the mapcount is racey and
-+ * O_DIRECT can happen right after the check.
-+ * So we clear the pte and flush the tlb before the check
-+ * this assure us that no O_DIRECT can happen after the check
-+ * or in the middle of the check.
-+ */
-+ entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
-+ /*
-+ * Check that no O_DIRECT or similar I/O is in progress on the
-+ * page
-+ */
-+ if (page_mapcount(page) + 1 + swapped != page_count(page)) {
-+ set_pte_at(mm, pvmw.address, pvmw.pte, entry);
-+ goto out_unlock;
-+ }
-+ if (pte_dirty(entry))
-+ set_page_dirty(page);
-+
-+ if (pte_protnone(entry))
-+ entry = pte_mkclean(pte_clear_savedwrite(entry));
-+ else
-+ entry = pte_mkclean(pte_wrprotect(entry));
-+
-+ set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
-+ }
-+ *orig_pte = *pvmw.pte;
-+ err = 0;
-+
-+out_unlock:
-+ page_vma_mapped_walk_done(&pvmw);
-+out_mn:
-+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-+out:
-+ return err;
-+}
-+
-+#define MERGE_ERR_PGERR 1 /* the page is invalid cannot continue */
-+#define MERGE_ERR_COLLI 2 /* there is a collision */
-+#define MERGE_ERR_COLLI_MAX 3 /* collision at the max hash strength */
-+#define MERGE_ERR_CHANGED 4 /* the page has changed since last hash */
-+
-+
-+/**
-+ * replace_page - replace page in vma by new ksm page
-+ * @vma: vma that holds the pte pointing to page
-+ * @page: the page we are replacing by kpage
-+ * @kpage: the ksm page we replace page by
-+ * @orig_pte: the original value of the pte
-+ *
-+ * Returns 0 on success, MERGE_ERR_PGERR on failure.
-+ */
-+static int replace_page(struct vm_area_struct *vma, struct page *page,
-+ struct page *kpage, pte_t orig_pte)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ pgd_t *pgd;
-+ p4d_t *p4d;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *ptep;
-+ spinlock_t *ptl;
-+ pte_t entry;
-+
-+ unsigned long addr;
-+ int err = MERGE_ERR_PGERR;
-+ unsigned long mmun_start; /* For mmu_notifiers */
-+ unsigned long mmun_end; /* For mmu_notifiers */
-+
-+ addr = page_address_in_vma(page, vma);
-+ if (addr == -EFAULT)
-+ goto out;
-+
-+ pgd = pgd_offset(mm, addr);
-+ if (!pgd_present(*pgd))
-+ goto out;
-+
-+ p4d = p4d_offset(pgd, addr);
-+ pud = pud_offset(p4d, addr);
-+ if (!pud_present(*pud))
-+ goto out;
-+
-+ pmd = pmd_offset(pud, addr);
-+ BUG_ON(pmd_trans_huge(*pmd));
-+ if (!pmd_present(*pmd))
-+ goto out;
-+
-+ mmun_start = addr;
-+ mmun_end = addr + PAGE_SIZE;
-+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
-+
-+ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
-+ if (!pte_same(*ptep, orig_pte)) {
-+ pte_unmap_unlock(ptep, ptl);
-+ goto out_mn;
-+ }
-+
-+ flush_cache_page(vma, addr, pte_pfn(*ptep));
-+ ptep_clear_flush_notify(vma, addr, ptep);
-+ entry = mk_pte(kpage, vma->vm_page_prot);
-+
-+ /* special treatment is needed for zero_page */
-+ if ((page_to_pfn(kpage) == uksm_zero_pfn) ||
-+ (page_to_pfn(kpage) == zero_pfn)) {
-+ entry = pte_mkspecial(entry);
-+ dec_mm_counter(mm, MM_ANONPAGES);
-+ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
-+ } else {
-+ get_page(kpage);
-+ page_add_anon_rmap(kpage, vma, addr, false);
-+ }
-+
-+ set_pte_at_notify(mm, addr, ptep, entry);
-+
-+ page_remove_rmap(page, false);
-+ if (!page_mapped(page))
-+ try_to_free_swap(page);
-+ put_page(page);
-+
-+ pte_unmap_unlock(ptep, ptl);
-+ err = 0;
-+out_mn:
-+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-+out:
-+ return err;
-+}
-+
-+
-+/**
-+ * Fully hash a page with HASH_STRENGTH_MAX return a non-zero hash value. The
-+ * zero hash value at HASH_STRENGTH_MAX is used to indicated that its
-+ * hash_max member has not been calculated.
-+ *
-+ * @page The page needs to be hashed
-+ * @hash_old The hash value calculated with current hash strength
-+ *
-+ * return the new hash value calculated at HASH_STRENGTH_MAX
-+ */
-+static inline u32 page_hash_max(struct page *page, u32 hash_old)
-+{
-+ u32 hash_max = 0;
-+ void *addr;
-+
-+ addr = kmap_atomic(page);
-+ hash_max = delta_hash(addr, hash_strength,
-+ HASH_STRENGTH_MAX, hash_old);
-+
-+ kunmap_atomic(addr);
-+
-+ if (!hash_max)
-+ hash_max = 1;
-+
-+ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
-+ return hash_max;
-+}
-+
-+/*
-+ * We compare the hash again, to ensure that it is really a hash collision
-+ * instead of being caused by page write.
-+ */
-+static inline int check_collision(struct rmap_item *rmap_item,
-+ u32 hash)
-+{
-+ int err;
-+ struct page *page = rmap_item->page;
-+
-+ /* if this rmap_item has already been hash_maxed, then the collision
-+ * must appears in the second-level rbtree search. In this case we check
-+ * if its hash_max value has been changed. Otherwise, the collision
-+ * happens in the first-level rbtree search, so we check against it's
-+ * current hash value.
-+ */
-+ if (rmap_item->hash_max) {
-+ inc_rshash_neg(memcmp_cost);
-+ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength);
-+
-+ if (rmap_item->hash_max == page_hash_max(page, hash))
-+ err = MERGE_ERR_COLLI;
-+ else
-+ err = MERGE_ERR_CHANGED;
-+ } else {
-+ inc_rshash_neg(memcmp_cost + hash_strength);
-+
-+ if (page_hash(page, hash_strength, 0) == hash)
-+ err = MERGE_ERR_COLLI;
-+ else
-+ err = MERGE_ERR_CHANGED;
-+ }
-+
-+ return err;
-+}
-+
-+/**
-+ * Try to merge a rmap_item.page with a kpage in stable node. kpage must
-+ * already be a ksm page.
-+ *
-+ * @return 0 if the pages were merged, -EFAULT otherwise.
-+ */
-+static int try_to_merge_with_uksm_page(struct rmap_item *rmap_item,
-+ struct page *kpage, u32 hash)
-+{
-+ struct vm_area_struct *vma = rmap_item->slot->vma;
-+ struct mm_struct *mm = vma->vm_mm;
-+ pte_t orig_pte = __pte(0);
-+ int err = MERGE_ERR_PGERR;
-+ struct page *page;
-+
-+ if (uksm_test_exit(mm))
-+ goto out;
-+
-+ page = rmap_item->page;
-+
-+ if (page == kpage) { /* ksm page forked */
-+ err = 0;
-+ goto out;
-+ }
-+
-+ /*
-+ * We need the page lock to read a stable PageSwapCache in
-+ * write_protect_page(). We use trylock_page() instead of
-+ * lock_page() because we don't want to wait here - we
-+ * prefer to continue scanning and merging different pages,
-+ * then come back to this page when it is unlocked.
-+ */
-+ if (!trylock_page(page))
-+ goto out;
-+
-+ if (!PageAnon(page) || !PageKsm(kpage))
-+ goto out_unlock;
-+
-+ if (PageTransCompound(page)) {
-+ err = split_huge_page(page);
-+ if (err)
-+ goto out_unlock;
-+ }
-+
-+ /*
-+ * If this anonymous page is mapped only here, its pte may need
-+ * to be write-protected. If it's mapped elsewhere, all of its
-+ * ptes are necessarily already write-protected. But in either
-+ * case, we need to lock and check page_count is not raised.
-+ */
-+ if (write_protect_page(vma, page, &orig_pte, NULL) == 0) {
-+ if (pages_identical(page, kpage))
-+ err = replace_page(vma, page, kpage, orig_pte);
-+ else
-+ err = check_collision(rmap_item, hash);
-+ }
-+
-+ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
-+ munlock_vma_page(page);
-+ if (!PageMlocked(kpage)) {
-+ unlock_page(page);
-+ lock_page(kpage);
-+ mlock_vma_page(kpage);
-+ page = kpage; /* for final unlock */
-+ }
-+ }
-+
-+out_unlock:
-+ unlock_page(page);
-+out:
-+ return err;
-+}
-+
-+
-+
-+/**
-+ * If two pages fail to merge in try_to_merge_two_pages, then we have a chance
-+ * to restore a page mapping that has been changed in try_to_merge_two_pages.
-+ *
-+ * @return 0 on success.
-+ */
-+static int restore_uksm_page_pte(struct vm_area_struct *vma, unsigned long addr,
-+ pte_t orig_pte, pte_t wprt_pte)
-+{
-+ struct mm_struct *mm = vma->vm_mm;
-+ pgd_t *pgd;
-+ p4d_t *p4d;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ pte_t *ptep;
-+ spinlock_t *ptl;
-+
-+ int err = -EFAULT;
-+
-+ pgd = pgd_offset(mm, addr);
-+ if (!pgd_present(*pgd))
-+ goto out;
-+
-+ p4d = p4d_offset(pgd, addr);
-+ pud = pud_offset(p4d, addr);
-+ if (!pud_present(*pud))
-+ goto out;
-+
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ goto out;
-+
-+ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
-+ if (!pte_same(*ptep, wprt_pte)) {
-+ /* already copied, let it be */
-+ pte_unmap_unlock(ptep, ptl);
-+ goto out;
-+ }
-+
-+ /*
-+ * Good boy, still here. When we still get the ksm page, it does not
-+ * return to the free page pool, there is no way that a pte was changed
-+ * to other page and gets back to this page. And remind that ksm page
-+ * do not reuse in do_wp_page(). So it's safe to restore the original
-+ * pte.
-+ */
-+ flush_cache_page(vma, addr, pte_pfn(*ptep));
-+ ptep_clear_flush_notify(vma, addr, ptep);
-+ set_pte_at_notify(mm, addr, ptep, orig_pte);
-+
-+ pte_unmap_unlock(ptep, ptl);
-+ err = 0;
-+out:
-+ return err;
-+}
-+
-+/**
-+ * try_to_merge_two_pages() - take two identical pages and prepare
-+ * them to be merged into one page(rmap_item->page)
-+ *
-+ * @return 0 if we successfully merged two identical pages into
-+ * one ksm page. MERGE_ERR_COLLI if it's only a hash collision
-+ * search in rbtree. MERGE_ERR_CHANGED if rmap_item has been
-+ * changed since it's hashed. MERGE_ERR_PGERR otherwise.
-+ *
-+ */
-+static int try_to_merge_two_pages(struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ u32 hash)
-+{
-+ pte_t orig_pte1 = __pte(0), orig_pte2 = __pte(0);
-+ pte_t wprt_pte1 = __pte(0), wprt_pte2 = __pte(0);
-+ struct vm_area_struct *vma1 = rmap_item->slot->vma;
-+ struct vm_area_struct *vma2 = tree_rmap_item->slot->vma;
-+ struct page *page = rmap_item->page;
-+ struct page *tree_page = tree_rmap_item->page;
-+ int err = MERGE_ERR_PGERR;
-+ struct address_space *saved_mapping;
-+
-+
-+ if (rmap_item->page == tree_rmap_item->page)
-+ goto out;
-+
-+ if (!trylock_page(page))
-+ goto out;
-+
-+ if (!PageAnon(page))
-+ goto out_unlock;
-+
-+ if (PageTransCompound(page)) {
-+ err = split_huge_page(page);
-+ if (err)
-+ goto out_unlock;
-+ }
-+
-+ if (write_protect_page(vma1, page, &wprt_pte1, &orig_pte1) != 0) {
-+ unlock_page(page);
-+ goto out;
-+ }
-+
-+ /*
-+ * While we hold page lock, upgrade page from
-+ * PageAnon+anon_vma to PageKsm+NULL stable_node:
-+ * stable_tree_insert() will update stable_node.
-+ */
-+ saved_mapping = page->mapping;
-+ set_page_stable_node(page, NULL);
-+ mark_page_accessed(page);
-+ if (!PageDirty(page))
-+ SetPageDirty(page);
-+
-+ unlock_page(page);
-+
-+ if (!trylock_page(tree_page))
-+ goto restore_out;
-+
-+ if (!PageAnon(tree_page)) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+
-+ if (PageTransCompound(tree_page)) {
-+ err = split_huge_page(tree_page);
-+ if (err) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+ }
-+
-+ if (write_protect_page(vma2, tree_page, &wprt_pte2, &orig_pte2) != 0) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+
-+ if (pages_identical(page, tree_page)) {
-+ err = replace_page(vma2, tree_page, page, wprt_pte2);
-+ if (err) {
-+ unlock_page(tree_page);
-+ goto restore_out;
-+ }
-+
-+ if ((vma2->vm_flags & VM_LOCKED)) {
-+ munlock_vma_page(tree_page);
-+ if (!PageMlocked(page)) {
-+ unlock_page(tree_page);
-+ lock_page(page);
-+ mlock_vma_page(page);
-+ tree_page = page; /* for final unlock */
-+ }
-+ }
-+
-+ unlock_page(tree_page);
-+
-+ goto out; /* success */
-+
-+ } else {
-+ if (tree_rmap_item->hash_max &&
-+ tree_rmap_item->hash_max == rmap_item->hash_max) {
-+ err = MERGE_ERR_COLLI_MAX;
-+ } else if (page_hash(page, hash_strength, 0) ==
-+ page_hash(tree_page, hash_strength, 0)) {
-+ inc_rshash_neg(memcmp_cost + hash_strength * 2);
-+ err = MERGE_ERR_COLLI;
-+ } else {
-+ err = MERGE_ERR_CHANGED;
-+ }
-+
-+ unlock_page(tree_page);
-+ }
-+
-+restore_out:
-+ lock_page(page);
-+ if (!restore_uksm_page_pte(vma1, get_rmap_addr(rmap_item),
-+ orig_pte1, wprt_pte1))
-+ page->mapping = saved_mapping;
-+
-+out_unlock:
-+ unlock_page(page);
-+out:
-+ return err;
-+}
-+
-+static inline int hash_cmp(u32 new_val, u32 node_val)
-+{
-+ if (new_val > node_val)
-+ return 1;
-+ else if (new_val < node_val)
-+ return -1;
-+ else
-+ return 0;
-+}
-+
-+static inline u32 rmap_item_hash_max(struct rmap_item *item, u32 hash)
-+{
-+ u32 hash_max = item->hash_max;
-+
-+ if (!hash_max) {
-+ hash_max = page_hash_max(item->page, hash);
-+
-+ item->hash_max = hash_max;
-+ }
-+
-+ return hash_max;
-+}
-+
-+
-+
-+/**
-+ * stable_tree_search() - search the stable tree for a page
-+ *
-+ * @item: the rmap_item we are comparing with
-+ * @hash: the hash value of this item->page already calculated
-+ *
-+ * @return the page we have found, NULL otherwise. The page returned has
-+ * been gotten.
-+ */
-+static struct page *stable_tree_search(struct rmap_item *item, u32 hash)
-+{
-+ struct rb_node *node = root_stable_treep->rb_node;
-+ struct tree_node *tree_node;
-+ unsigned long hash_max;
-+ struct page *page = item->page;
-+ struct stable_node *stable_node;
-+
-+ stable_node = page_stable_node(page);
-+ if (stable_node) {
-+ /* ksm page forked, that is
-+ * if (PageKsm(page) && !in_stable_tree(rmap_item))
-+ * it's actually gotten once outside.
-+ */
-+ get_page(page);
-+ return page;
-+ }
-+
-+ while (node) {
-+ int cmp;
-+
-+ tree_node = rb_entry(node, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0)
-+ node = node->rb_left;
-+ else if (cmp > 0)
-+ node = node->rb_right;
-+ else
-+ break;
-+ }
-+
-+ if (!node)
-+ return NULL;
-+
-+ if (tree_node->count == 1) {
-+ stable_node = rb_entry(tree_node->sub_root.rb_node,
-+ struct stable_node, node);
-+ BUG_ON(!stable_node);
-+
-+ goto get_page_out;
-+ }
-+
-+ /*
-+ * ok, we have to search the second
-+ * level subtree, hash the page to a
-+ * full strength.
-+ */
-+ node = tree_node->sub_root.rb_node;
-+ BUG_ON(!node);
-+ hash_max = rmap_item_hash_max(item, hash);
-+
-+ while (node) {
-+ int cmp;
-+
-+ stable_node = rb_entry(node, struct stable_node, node);
-+
-+ cmp = hash_cmp(hash_max, stable_node->hash_max);
-+
-+ if (cmp < 0)
-+ node = node->rb_left;
-+ else if (cmp > 0)
-+ node = node->rb_right;
-+ else
-+ goto get_page_out;
-+ }
-+
-+ return NULL;
-+
-+get_page_out:
-+ page = get_uksm_page(stable_node, 1, 1);
-+ return page;
-+}
-+
-+static int try_merge_rmap_item(struct rmap_item *item,
-+ struct page *kpage,
-+ struct page *tree_page)
-+{
-+ struct vm_area_struct *vma = item->slot->vma;
-+ struct page_vma_mapped_walk pvmw = {
-+ .page = kpage,
-+ .vma = vma,
-+ };
-+
-+ pvmw.address = get_rmap_addr(item);
-+ if (!page_vma_mapped_walk(&pvmw))
-+ return 0;
-+
-+ if (pte_write(*pvmw.pte)) {
-+ /* has changed, abort! */
-+ page_vma_mapped_walk_done(&pvmw);
-+ return 0;
-+ }
-+
-+ get_page(tree_page);
-+ page_add_anon_rmap(tree_page, vma, pvmw.address, false);
-+
-+ flush_cache_page(vma, pvmw.address, page_to_pfn(kpage));
-+ ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
-+ set_pte_at_notify(vma->vm_mm, pvmw.address, pvmw.pte,
-+ mk_pte(tree_page, vma->vm_page_prot));
-+
-+ page_remove_rmap(kpage, false);
-+ put_page(kpage);
-+
-+ page_vma_mapped_walk_done(&pvmw);
-+
-+ return 1;
-+}
-+
-+/**
-+ * try_to_merge_with_stable_page() - when two rmap_items need to be inserted
-+ * into stable tree, the page was found to be identical to a stable ksm page,
-+ * this is the last chance we can merge them into one.
-+ *
-+ * @item1: the rmap_item holding the page which we wanted to insert
-+ * into stable tree.
-+ * @item2: the other rmap_item we found when unstable tree search
-+ * @oldpage: the page currently mapped by the two rmap_items
-+ * @tree_page: the page we found identical in stable tree node
-+ * @success1: return if item1 is successfully merged
-+ * @success2: return if item2 is successfully merged
-+ */
-+static void try_merge_with_stable(struct rmap_item *item1,
-+ struct rmap_item *item2,
-+ struct page **kpage,
-+ struct page *tree_page,
-+ int *success1, int *success2)
-+{
-+ struct vm_area_struct *vma1 = item1->slot->vma;
-+ struct vm_area_struct *vma2 = item2->slot->vma;
-+ *success1 = 0;
-+ *success2 = 0;
-+
-+ if (unlikely(*kpage == tree_page)) {
-+ /* I don't think this can really happen */
-+ pr_warn("UKSM: unexpected condition detected in "
-+ "%s -- *kpage == tree_page !\n", __func__);
-+ *success1 = 1;
-+ *success2 = 1;
-+ return;
-+ }
-+
-+ if (!PageAnon(*kpage) || !PageKsm(*kpage))
-+ goto failed;
-+
-+ if (!trylock_page(tree_page))
-+ goto failed;
-+
-+ /* If the oldpage is still ksm and still pointed
-+ * to in the right place, and still write protected,
-+ * we are confident it's not changed, no need to
-+ * memcmp anymore.
-+ * be ware, we cannot take nested pte locks,
-+ * deadlock risk.
-+ */
-+ if (!try_merge_rmap_item(item1, *kpage, tree_page))
-+ goto unlock_failed;
-+
-+ /* ok, then vma2, remind that pte1 already set */
-+ if (!try_merge_rmap_item(item2, *kpage, tree_page))
-+ goto success_1;
-+
-+ *success2 = 1;
-+success_1:
-+ *success1 = 1;
-+
-+
-+ if ((*success1 && vma1->vm_flags & VM_LOCKED) ||
-+ (*success2 && vma2->vm_flags & VM_LOCKED)) {
-+ munlock_vma_page(*kpage);
-+ if (!PageMlocked(tree_page))
-+ mlock_vma_page(tree_page);
-+ }
-+
-+ /*
-+ * We do not need oldpage any more in the caller, so can break the lock
-+ * now.
-+ */
-+ unlock_page(*kpage);
-+ *kpage = tree_page; /* Get unlocked outside. */
-+ return;
-+
-+unlock_failed:
-+ unlock_page(tree_page);
-+failed:
-+ return;
-+}
-+
-+static inline void stable_node_hash_max(struct stable_node *node,
-+ struct page *page, u32 hash)
-+{
-+ u32 hash_max = node->hash_max;
-+
-+ if (!hash_max) {
-+ hash_max = page_hash_max(page, hash);
-+ node->hash_max = hash_max;
-+ }
-+}
-+
-+static inline
-+struct stable_node *new_stable_node(struct tree_node *tree_node,
-+ struct page *kpage, u32 hash_max)
-+{
-+ struct stable_node *new_stable_node;
-+
-+ new_stable_node = alloc_stable_node();
-+ if (!new_stable_node)
-+ return NULL;
-+
-+ new_stable_node->kpfn = page_to_pfn(kpage);
-+ new_stable_node->hash_max = hash_max;
-+ new_stable_node->tree_node = tree_node;
-+ set_page_stable_node(kpage, new_stable_node);
-+
-+ return new_stable_node;
-+}
-+
-+static inline
-+struct stable_node *first_level_insert(struct tree_node *tree_node,
-+ struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ struct page **kpage, u32 hash,
-+ int *success1, int *success2)
-+{
-+ int cmp;
-+ struct page *tree_page;
-+ u32 hash_max = 0;
-+ struct stable_node *stable_node, *new_snode;
-+ struct rb_node *parent = NULL, **new;
-+
-+ /* this tree node contains no sub-tree yet */
-+ stable_node = rb_entry(tree_node->sub_root.rb_node,
-+ struct stable_node, node);
-+
-+ tree_page = get_uksm_page(stable_node, 1, 0);
-+ if (tree_page) {
-+ cmp = memcmp_pages(*kpage, tree_page, 1);
-+ if (!cmp) {
-+ try_merge_with_stable(rmap_item, tree_rmap_item, kpage,
-+ tree_page, success1, success2);
-+ put_page(tree_page);
-+ if (!*success1 && !*success2)
-+ goto failed;
-+
-+ return stable_node;
-+
-+ } else {
-+ /*
-+ * collision in first level try to create a subtree.
-+ * A new node need to be created.
-+ */
-+ put_page(tree_page);
-+
-+ stable_node_hash_max(stable_node, tree_page,
-+ tree_node->hash);
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+ cmp = hash_cmp(hash_max, stable_node->hash_max);
-+
-+ parent = &stable_node->node;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto failed;
-+ }
-+
-+ } else {
-+ /* the only stable_node deleted, we reuse its tree_node.
-+ */
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+
-+ new_snode = new_stable_node(tree_node, *kpage, hash_max);
-+ if (!new_snode)
-+ goto failed;
-+
-+ rb_link_node(&new_snode->node, parent, new);
-+ rb_insert_color(&new_snode->node, &tree_node->sub_root);
-+ tree_node->count++;
-+ *success1 = *success2 = 1;
-+
-+ return new_snode;
-+
-+failed:
-+ return NULL;
-+}
-+
-+static inline
-+struct stable_node *stable_subtree_insert(struct tree_node *tree_node,
-+ struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ struct page **kpage, u32 hash,
-+ int *success1, int *success2)
-+{
-+ struct page *tree_page;
-+ u32 hash_max;
-+ struct stable_node *stable_node, *new_snode;
-+ struct rb_node *parent, **new;
-+
-+research:
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ BUG_ON(!*new);
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+ while (*new) {
-+ int cmp;
-+
-+ stable_node = rb_entry(*new, struct stable_node, node);
-+
-+ cmp = hash_cmp(hash_max, stable_node->hash_max);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else {
-+ tree_page = get_uksm_page(stable_node, 1, 0);
-+ if (tree_page) {
-+ cmp = memcmp_pages(*kpage, tree_page, 1);
-+ if (!cmp) {
-+ try_merge_with_stable(rmap_item,
-+ tree_rmap_item, kpage,
-+ tree_page, success1, success2);
-+
-+ put_page(tree_page);
-+ if (!*success1 && !*success2)
-+ goto failed;
-+ /*
-+ * successfully merged with a stable
-+ * node
-+ */
-+ return stable_node;
-+ } else {
-+ put_page(tree_page);
-+ goto failed;
-+ }
-+ } else {
-+ /*
-+ * stable node may be deleted,
-+ * and subtree maybe
-+ * restructed, cannot
-+ * continue, research it.
-+ */
-+ if (tree_node->count) {
-+ goto research;
-+ } else {
-+ /* reuse the tree node*/
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+ }
-+ }
-+ }
-+
-+ new_snode = new_stable_node(tree_node, *kpage, hash_max);
-+ if (!new_snode)
-+ goto failed;
-+
-+ rb_link_node(&new_snode->node, parent, new);
-+ rb_insert_color(&new_snode->node, &tree_node->sub_root);
-+ tree_node->count++;
-+ *success1 = *success2 = 1;
-+
-+ return new_snode;
-+
-+failed:
-+ return NULL;
-+}
-+
-+
-+/**
-+ * stable_tree_insert() - try to insert a merged page in unstable tree to
-+ * the stable tree
-+ *
-+ * @kpage: the page need to be inserted
-+ * @hash: the current hash of this page
-+ * @rmap_item: the rmap_item being scanned
-+ * @tree_rmap_item: the rmap_item found on unstable tree
-+ * @success1: return if rmap_item is merged
-+ * @success2: return if tree_rmap_item is merged
-+ *
-+ * @return the stable_node on stable tree if at least one
-+ * rmap_item is inserted into stable tree, NULL
-+ * otherwise.
-+ */
-+static struct stable_node *
-+stable_tree_insert(struct page **kpage, u32 hash,
-+ struct rmap_item *rmap_item,
-+ struct rmap_item *tree_rmap_item,
-+ int *success1, int *success2)
-+{
-+ struct rb_node **new = &root_stable_treep->rb_node;
-+ struct rb_node *parent = NULL;
-+ struct stable_node *stable_node;
-+ struct tree_node *tree_node;
-+ u32 hash_max = 0;
-+
-+ *success1 = *success2 = 0;
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_node = rb_entry(*new, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else
-+ break;
-+ }
-+
-+ if (*new) {
-+ if (tree_node->count == 1) {
-+ stable_node = first_level_insert(tree_node, rmap_item,
-+ tree_rmap_item, kpage,
-+ hash, success1, success2);
-+ } else {
-+ stable_node = stable_subtree_insert(tree_node,
-+ rmap_item, tree_rmap_item, kpage,
-+ hash, success1, success2);
-+ }
-+ } else {
-+
-+ /* no tree node found */
-+ tree_node = alloc_tree_node(stable_tree_node_listp);
-+ if (!tree_node) {
-+ stable_node = NULL;
-+ goto out;
-+ }
-+
-+ stable_node = new_stable_node(tree_node, *kpage, hash_max);
-+ if (!stable_node) {
-+ free_tree_node(tree_node);
-+ goto out;
-+ }
-+
-+ tree_node->hash = hash;
-+ rb_link_node(&tree_node->node, parent, new);
-+ rb_insert_color(&tree_node->node, root_stable_treep);
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+
-+ rb_link_node(&stable_node->node, parent, new);
-+ rb_insert_color(&stable_node->node, &tree_node->sub_root);
-+ tree_node->count++;
-+ *success1 = *success2 = 1;
-+ }
-+
-+out:
-+ return stable_node;
-+}
-+
-+
-+/**
-+ * get_tree_rmap_item_page() - try to get the page and lock the mmap_sem
-+ *
-+ * @return 0 on success, -EBUSY if unable to lock the mmap_sem,
-+ * -EINVAL if the page mapping has been changed.
-+ */
-+static inline int get_tree_rmap_item_page(struct rmap_item *tree_rmap_item)
-+{
-+ int err;
-+
-+ err = get_mergeable_page_lock_mmap(tree_rmap_item);
-+
-+ if (err == -EINVAL) {
-+ /* its page map has been changed, remove it */
-+ remove_rmap_item_from_tree(tree_rmap_item);
-+ }
-+
-+ /* The page is gotten and mmap_sem is locked now. */
-+ return err;
-+}
-+
-+
-+/**
-+ * unstable_tree_search_insert() - search an unstable tree rmap_item with the
-+ * same hash value. Get its page and trylock the mmap_sem
-+ */
-+static inline
-+struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
-+ u32 hash)
-+
-+{
-+ struct rb_node **new = &root_unstable_tree.rb_node;
-+ struct rb_node *parent = NULL;
-+ struct tree_node *tree_node;
-+ u32 hash_max;
-+ struct rmap_item *tree_rmap_item;
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_node = rb_entry(*new, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else
-+ break;
-+ }
-+
-+ if (*new) {
-+ /* got the tree_node */
-+ if (tree_node->count == 1) {
-+ tree_rmap_item = rb_entry(tree_node->sub_root.rb_node,
-+ struct rmap_item, node);
-+ BUG_ON(!tree_rmap_item);
-+
-+ goto get_page_out;
-+ }
-+
-+ /* well, search the collision subtree */
-+ new = &tree_node->sub_root.rb_node;
-+ BUG_ON(!*new);
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_rmap_item = rb_entry(*new, struct rmap_item,
-+ node);
-+
-+ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
-+ parent = *new;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto get_page_out;
-+ }
-+ } else {
-+ /* alloc a new tree_node */
-+ tree_node = alloc_tree_node(&unstable_tree_node_list);
-+ if (!tree_node)
-+ return NULL;
-+
-+ tree_node->hash = hash;
-+ rb_link_node(&tree_node->node, parent, new);
-+ rb_insert_color(&tree_node->node, &root_unstable_tree);
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+
-+ /* did not found even in sub-tree */
-+ rmap_item->tree_node = tree_node;
-+ rmap_item->address |= UNSTABLE_FLAG;
-+ rmap_item->hash_round = uksm_hash_round;
-+ rb_link_node(&rmap_item->node, parent, new);
-+ rb_insert_color(&rmap_item->node, &tree_node->sub_root);
-+
-+ uksm_pages_unshared++;
-+ return NULL;
-+
-+get_page_out:
-+ if (tree_rmap_item->page == rmap_item->page)
-+ return NULL;
-+
-+ if (get_tree_rmap_item_page(tree_rmap_item))
-+ return NULL;
-+
-+ return tree_rmap_item;
-+}
-+
-+static void hold_anon_vma(struct rmap_item *rmap_item,
-+ struct anon_vma *anon_vma)
-+{
-+ rmap_item->anon_vma = anon_vma;
-+ get_anon_vma(anon_vma);
-+}
-+
-+
-+/**
-+ * stable_tree_append() - append a rmap_item to a stable node. Deduplication
-+ * ratio statistics is done in this function.
-+ *
-+ */
-+static void stable_tree_append(struct rmap_item *rmap_item,
-+ struct stable_node *stable_node, int logdedup)
-+{
-+ struct node_vma *node_vma = NULL, *new_node_vma, *node_vma_cont = NULL;
-+ unsigned long key = (unsigned long)rmap_item->slot;
-+ unsigned long factor = rmap_item->slot->rung->step;
-+
-+ BUG_ON(!stable_node);
-+ rmap_item->address |= STABLE_FLAG;
-+
-+ if (hlist_empty(&stable_node->hlist)) {
-+ uksm_pages_shared++;
-+ goto node_vma_new;
-+ } else {
-+ uksm_pages_sharing++;
-+ }
-+
-+ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
-+ if (node_vma->key >= key)
-+ break;
-+
-+ if (logdedup) {
-+ node_vma->slot->pages_bemerged += factor;
-+ if (list_empty(&node_vma->slot->dedup_list))
-+ list_add(&node_vma->slot->dedup_list,
-+ &vma_slot_dedup);
-+ }
-+ }
-+
-+ if (node_vma) {
-+ if (node_vma->key == key) {
-+ node_vma_cont = hlist_entry_safe(node_vma->hlist.next, struct node_vma, hlist);
-+ goto node_vma_ok;
-+ } else if (node_vma->key > key) {
-+ node_vma_cont = node_vma;
-+ }
-+ }
-+
-+node_vma_new:
-+ /* no same vma already in node, alloc a new node_vma */
-+ new_node_vma = alloc_node_vma();
-+ BUG_ON(!new_node_vma);
-+ new_node_vma->head = stable_node;
-+ new_node_vma->slot = rmap_item->slot;
-+
-+ if (!node_vma) {
-+ hlist_add_head(&new_node_vma->hlist, &stable_node->hlist);
-+ } else if (node_vma->key != key) {
-+ if (node_vma->key < key)
-+ hlist_add_behind(&new_node_vma->hlist, &node_vma->hlist);
-+ else {
-+ hlist_add_before(&new_node_vma->hlist,
-+ &node_vma->hlist);
-+ }
-+
-+ }
-+ node_vma = new_node_vma;
-+
-+node_vma_ok: /* ok, ready to add to the list */
-+ rmap_item->head = node_vma;
-+ hlist_add_head(&rmap_item->hlist, &node_vma->rmap_hlist);
-+ hold_anon_vma(rmap_item, rmap_item->slot->vma->anon_vma);
-+ if (logdedup) {
-+ rmap_item->slot->pages_merged++;
-+ if (node_vma_cont) {
-+ node_vma = node_vma_cont;
-+ hlist_for_each_entry_continue(node_vma, hlist) {
-+ node_vma->slot->pages_bemerged += factor;
-+ if (list_empty(&node_vma->slot->dedup_list))
-+ list_add(&node_vma->slot->dedup_list,
-+ &vma_slot_dedup);
-+ }
-+ }
-+ }
-+}
-+
-+/*
-+ * We use break_ksm to break COW on a ksm page: it's a stripped down
-+ *
-+ * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
-+ * put_page(page);
-+ *
-+ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
-+ * in case the application has unmapped and remapped mm,addr meanwhile.
-+ * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
-+ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
-+ */
-+static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
-+{
-+ struct page *page;
-+ int ret = 0;
-+
-+ do {
-+ cond_resched();
-+ page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
-+ if (IS_ERR_OR_NULL(page))
-+ break;
-+ if (PageKsm(page)) {
-+ ret = handle_mm_fault(vma, addr,
-+ FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
-+ } else
-+ ret = VM_FAULT_WRITE;
-+ put_page(page);
-+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
-+ /*
-+ * We must loop because handle_mm_fault() may back out if there's
-+ * any difficulty e.g. if pte accessed bit gets updated concurrently.
-+ *
-+ * VM_FAULT_WRITE is what we have been hoping for: it indicates that
-+ * COW has been broken, even if the vma does not permit VM_WRITE;
-+ * but note that a concurrent fault might break PageKsm for us.
-+ *
-+ * VM_FAULT_SIGBUS could occur if we race with truncation of the
-+ * backing file, which also invalidates anonymous pages: that's
-+ * okay, that truncation will have unmapped the PageKsm for us.
-+ *
-+ * VM_FAULT_OOM: at the time of writing (late July 2009), setting
-+ * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
-+ * current task has TIF_MEMDIE set, and will be OOM killed on return
-+ * to user; and ksmd, having no mm, would never be chosen for that.
-+ *
-+ * But if the mm is in a limited mem_cgroup, then the fault may fail
-+ * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
-+ * even ksmd can fail in this way - though it's usually breaking ksm
-+ * just to undo a merge it made a moment before, so unlikely to oom.
-+ *
-+ * That's a pity: we might therefore have more kernel pages allocated
-+ * than we're counting as nodes in the stable tree; but uksm_do_scan
-+ * will retry to break_cow on each pass, so should recover the page
-+ * in due course. The important thing is to not let VM_MERGEABLE
-+ * be cleared while any such pages might remain in the area.
-+ */
-+ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
-+}
-+
-+static void break_cow(struct rmap_item *rmap_item)
-+{
-+ struct vm_area_struct *vma = rmap_item->slot->vma;
-+ struct mm_struct *mm = vma->vm_mm;
-+ unsigned long addr = get_rmap_addr(rmap_item);
-+
-+ if (uksm_test_exit(mm))
-+ goto out;
-+
-+ break_ksm(vma, addr);
-+out:
-+ return;
-+}
-+
-+/*
-+ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather
-+ * than check every pte of a given vma, the locking doesn't quite work for
-+ * that - an rmap_item is assigned to the stable tree after inserting ksm
-+ * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
-+ * rmap_items from parent to child at fork time (so as not to waste time
-+ * if exit comes before the next scan reaches it).
-+ *
-+ * Similarly, although we'd like to remove rmap_items (so updating counts
-+ * and freeing memory) when unmerging an area, it's easier to leave that
-+ * to the next pass of ksmd - consider, for example, how ksmd might be
-+ * in cmp_and_merge_page on one of the rmap_items we would be removing.
-+ */
-+inline int unmerge_uksm_pages(struct vm_area_struct *vma,
-+ unsigned long start, unsigned long end)
-+{
-+ unsigned long addr;
-+ int err = 0;
-+
-+ for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
-+ if (uksm_test_exit(vma->vm_mm))
-+ break;
-+ if (signal_pending(current))
-+ err = -ERESTARTSYS;
-+ else
-+ err = break_ksm(vma, addr);
-+ }
-+ return err;
-+}
-+
-+static inline void inc_uksm_pages_scanned(void)
-+{
-+ u64 delta;
-+
-+
-+ if (uksm_pages_scanned == U64_MAX) {
-+ encode_benefit();
-+
-+ delta = uksm_pages_scanned >> pages_scanned_base;
-+
-+ if (CAN_OVERFLOW_U64(pages_scanned_stored, delta)) {
-+ pages_scanned_stored >>= 1;
-+ delta >>= 1;
-+ pages_scanned_base++;
-+ }
-+
-+ pages_scanned_stored += delta;
-+
-+ uksm_pages_scanned = uksm_pages_scanned_last = 0;
-+ }
-+
-+ uksm_pages_scanned++;
-+}
-+
-+static inline int find_zero_page_hash(int strength, u32 hash)
-+{
-+ return (zero_hash_table[strength] == hash);
-+}
-+
-+static
-+int cmp_and_merge_zero_page(struct vm_area_struct *vma, struct page *page)
-+{
-+ struct page *zero_page = empty_uksm_zero_page;
-+ struct mm_struct *mm = vma->vm_mm;
-+ pte_t orig_pte = __pte(0);
-+ int err = -EFAULT;
-+
-+ if (uksm_test_exit(mm))
-+ goto out;
-+
-+ if (!trylock_page(page))
-+ goto out;
-+
-+ if (!PageAnon(page))
-+ goto out_unlock;
-+
-+ if (PageTransCompound(page)) {
-+ err = split_huge_page(page);
-+ if (err)
-+ goto out_unlock;
-+ }
-+
-+ if (write_protect_page(vma, page, &orig_pte, 0) == 0) {
-+ if (is_page_full_zero(page))
-+ err = replace_page(vma, page, zero_page, orig_pte);
-+ }
-+
-+out_unlock:
-+ unlock_page(page);
-+out:
-+ return err;
-+}
-+
-+/*
-+ * cmp_and_merge_page() - first see if page can be merged into the stable
-+ * tree; if not, compare hash to previous and if it's the same, see if page
-+ * can be inserted into the unstable tree, or merged with a page already there
-+ * and both transferred to the stable tree.
-+ *
-+ * @page: the page that we are searching identical page to.
-+ * @rmap_item: the reverse mapping into the virtual address of this page
-+ */
-+static void cmp_and_merge_page(struct rmap_item *rmap_item, u32 hash)
-+{
-+ struct rmap_item *tree_rmap_item;
-+ struct page *page;
-+ struct page *kpage = NULL;
-+ u32 hash_max;
-+ int err;
-+ unsigned int success1, success2;
-+ struct stable_node *snode;
-+ int cmp;
-+ struct rb_node *parent = NULL, **new;
-+
-+ remove_rmap_item_from_tree(rmap_item);
-+ page = rmap_item->page;
-+
-+ /* We first start with searching the page inside the stable tree */
-+ kpage = stable_tree_search(rmap_item, hash);
-+ if (kpage) {
-+ err = try_to_merge_with_uksm_page(rmap_item, kpage,
-+ hash);
-+ if (!err) {
-+ /*
-+ * The page was successfully merged, add
-+ * its rmap_item to the stable tree.
-+ * page lock is needed because it's
-+ * racing with try_to_unmap_ksm(), etc.
-+ */
-+ lock_page(kpage);
-+ snode = page_stable_node(kpage);
-+ stable_tree_append(rmap_item, snode, 1);
-+ unlock_page(kpage);
-+ put_page(kpage);
-+ return; /* success */
-+ }
-+ put_page(kpage);
-+
-+ /*
-+ * if it's a collision and it has been search in sub-rbtree
-+ * (hash_max != 0), we want to abort, because if it is
-+ * successfully merged in unstable tree, the collision trends to
-+ * happen again.
-+ */
-+ if (err == MERGE_ERR_COLLI && rmap_item->hash_max)
-+ return;
-+ }
-+
-+ tree_rmap_item =
-+ unstable_tree_search_insert(rmap_item, hash);
-+ if (tree_rmap_item) {
-+ err = try_to_merge_two_pages(rmap_item, tree_rmap_item, hash);
-+ /*
-+ * As soon as we merge this page, we want to remove the
-+ * rmap_item of the page we have merged with from the unstable
-+ * tree, and insert it instead as new node in the stable tree.
-+ */
-+ if (!err) {
-+ kpage = page;
-+ remove_rmap_item_from_tree(tree_rmap_item);
-+ lock_page(kpage);
-+ snode = stable_tree_insert(&kpage, hash,
-+ rmap_item, tree_rmap_item,
-+ &success1, &success2);
-+
-+ /*
-+ * Do not log dedup for tree item, it's not counted as
-+ * scanned in this round.
-+ */
-+ if (success2)
-+ stable_tree_append(tree_rmap_item, snode, 0);
-+
-+ /*
-+ * The order of these two stable append is important:
-+ * we are scanning rmap_item.
-+ */
-+ if (success1)
-+ stable_tree_append(rmap_item, snode, 1);
-+
-+ /*
-+ * The original kpage may be unlocked inside
-+ * stable_tree_insert() already. This page
-+ * should be unlocked before doing
-+ * break_cow().
-+ */
-+ unlock_page(kpage);
-+
-+ if (!success1)
-+ break_cow(rmap_item);
-+
-+ if (!success2)
-+ break_cow(tree_rmap_item);
-+
-+ } else if (err == MERGE_ERR_COLLI) {
-+ BUG_ON(tree_rmap_item->tree_node->count > 1);
-+
-+ rmap_item_hash_max(tree_rmap_item,
-+ tree_rmap_item->tree_node->hash);
-+
-+ hash_max = rmap_item_hash_max(rmap_item, hash);
-+ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max);
-+ parent = &tree_rmap_item->node;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto put_up_out;
-+
-+ rmap_item->tree_node = tree_rmap_item->tree_node;
-+ rmap_item->address |= UNSTABLE_FLAG;
-+ rmap_item->hash_round = uksm_hash_round;
-+ rb_link_node(&rmap_item->node, parent, new);
-+ rb_insert_color(&rmap_item->node,
-+ &tree_rmap_item->tree_node->sub_root);
-+ rmap_item->tree_node->count++;
-+ } else {
-+ /*
-+ * either one of the page has changed or they collide
-+ * at the max hash, we consider them as ill items.
-+ */
-+ remove_rmap_item_from_tree(tree_rmap_item);
-+ }
-+put_up_out:
-+ put_page(tree_rmap_item->page);
-+ up_read(&tree_rmap_item->slot->vma->vm_mm->mmap_sem);
-+ }
-+}
-+
-+
-+
-+
-+static inline unsigned long get_pool_index(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = (sizeof(struct rmap_list_entry *) * index) >> PAGE_SHIFT;
-+ if (pool_index >= slot->pool_size)
-+ BUG();
-+ return pool_index;
-+}
-+
-+static inline unsigned long index_page_offset(unsigned long index)
-+{
-+ return offset_in_page(sizeof(struct rmap_list_entry *) * index);
-+}
-+
-+static inline
-+struct rmap_list_entry *get_rmap_list_entry(struct vma_slot *slot,
-+ unsigned long index, int need_alloc)
-+{
-+ unsigned long pool_index;
-+ struct page *page;
-+ void *addr;
-+
-+
-+ pool_index = get_pool_index(slot, index);
-+ if (!slot->rmap_list_pool[pool_index]) {
-+ if (!need_alloc)
-+ return NULL;
-+
-+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
-+ if (!page)
-+ return NULL;
-+
-+ slot->rmap_list_pool[pool_index] = page;
-+ }
-+
-+ addr = kmap(slot->rmap_list_pool[pool_index]);
-+ addr += index_page_offset(index);
-+
-+ return addr;
-+}
-+
-+static inline void put_rmap_list_entry(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ BUG_ON(!slot->rmap_list_pool[pool_index]);
-+ kunmap(slot->rmap_list_pool[pool_index]);
-+}
-+
-+static inline int entry_is_new(struct rmap_list_entry *entry)
-+{
-+ return !entry->item;
-+}
-+
-+static inline unsigned long get_index_orig_addr(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ return slot->vma->vm_start + (index << PAGE_SHIFT);
-+}
-+
-+static inline unsigned long get_entry_address(struct rmap_list_entry *entry)
-+{
-+ unsigned long addr;
-+
-+ if (is_addr(entry->addr))
-+ addr = get_clean_addr(entry->addr);
-+ else if (entry->item)
-+ addr = get_rmap_addr(entry->item);
-+ else
-+ BUG();
-+
-+ return addr;
-+}
-+
-+static inline struct rmap_item *get_entry_item(struct rmap_list_entry *entry)
-+{
-+ if (is_addr(entry->addr))
-+ return NULL;
-+
-+ return entry->item;
-+}
-+
-+static inline void inc_rmap_list_pool_count(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ BUG_ON(!slot->rmap_list_pool[pool_index]);
-+ slot->pool_counts[pool_index]++;
-+}
-+
-+static inline void dec_rmap_list_pool_count(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ BUG_ON(!slot->rmap_list_pool[pool_index]);
-+ BUG_ON(!slot->pool_counts[pool_index]);
-+ slot->pool_counts[pool_index]--;
-+}
-+
-+static inline int entry_has_rmap(struct rmap_list_entry *entry)
-+{
-+ return !is_addr(entry->addr) && entry->item;
-+}
-+
-+static inline void swap_entries(struct rmap_list_entry *entry1,
-+ unsigned long index1,
-+ struct rmap_list_entry *entry2,
-+ unsigned long index2)
-+{
-+ struct rmap_list_entry tmp;
-+
-+ /* swapping two new entries is meaningless */
-+ BUG_ON(entry_is_new(entry1) && entry_is_new(entry2));
-+
-+ tmp = *entry1;
-+ *entry1 = *entry2;
-+ *entry2 = tmp;
-+
-+ if (entry_has_rmap(entry1))
-+ entry1->item->entry_index = index1;
-+
-+ if (entry_has_rmap(entry2))
-+ entry2->item->entry_index = index2;
-+
-+ if (entry_has_rmap(entry1) && !entry_has_rmap(entry2)) {
-+ inc_rmap_list_pool_count(entry1->item->slot, index1);
-+ dec_rmap_list_pool_count(entry1->item->slot, index2);
-+ } else if (!entry_has_rmap(entry1) && entry_has_rmap(entry2)) {
-+ inc_rmap_list_pool_count(entry2->item->slot, index2);
-+ dec_rmap_list_pool_count(entry2->item->slot, index1);
-+ }
-+}
-+
-+static inline void free_entry_item(struct rmap_list_entry *entry)
-+{
-+ unsigned long index;
-+ struct rmap_item *item;
-+
-+ if (!is_addr(entry->addr)) {
-+ BUG_ON(!entry->item);
-+ item = entry->item;
-+ entry->addr = get_rmap_addr(item);
-+ set_is_addr(entry->addr);
-+ index = item->entry_index;
-+ remove_rmap_item_from_tree(item);
-+ dec_rmap_list_pool_count(item->slot, index);
-+ free_rmap_item(item);
-+ }
-+}
-+
-+static inline int pool_entry_boundary(unsigned long index)
-+{
-+ unsigned long linear_addr;
-+
-+ linear_addr = sizeof(struct rmap_list_entry *) * index;
-+ return index && !offset_in_page(linear_addr);
-+}
-+
-+static inline void try_free_last_pool(struct vma_slot *slot,
-+ unsigned long index)
-+{
-+ unsigned long pool_index;
-+
-+ pool_index = get_pool_index(slot, index);
-+ if (slot->rmap_list_pool[pool_index] &&
-+ !slot->pool_counts[pool_index]) {
-+ __free_page(slot->rmap_list_pool[pool_index]);
-+ slot->rmap_list_pool[pool_index] = NULL;
-+ slot->flags |= UKSM_SLOT_NEED_SORT;
-+ }
-+
-+}
-+
-+static inline unsigned long vma_item_index(struct vm_area_struct *vma,
-+ struct rmap_item *item)
-+{
-+ return (get_rmap_addr(item) - vma->vm_start) >> PAGE_SHIFT;
-+}
-+
-+static int within_same_pool(struct vma_slot *slot,
-+ unsigned long i, unsigned long j)
-+{
-+ unsigned long pool_i, pool_j;
-+
-+ pool_i = get_pool_index(slot, i);
-+ pool_j = get_pool_index(slot, j);
-+
-+ return (pool_i == pool_j);
-+}
-+
-+static void sort_rmap_entry_list(struct vma_slot *slot)
-+{
-+ unsigned long i, j;
-+ struct rmap_list_entry *entry, *swap_entry;
-+
-+ entry = get_rmap_list_entry(slot, 0, 0);
-+ for (i = 0; i < slot->pages; ) {
-+
-+ if (!entry)
-+ goto skip_whole_pool;
-+
-+ if (entry_is_new(entry))
-+ goto next_entry;
-+
-+ if (is_addr(entry->addr)) {
-+ entry->addr = 0;
-+ goto next_entry;
-+ }
-+
-+ j = vma_item_index(slot->vma, entry->item);
-+ if (j == i)
-+ goto next_entry;
-+
-+ if (within_same_pool(slot, i, j))
-+ swap_entry = entry + j - i;
-+ else
-+ swap_entry = get_rmap_list_entry(slot, j, 1);
-+
-+ swap_entries(entry, i, swap_entry, j);
-+ if (!within_same_pool(slot, i, j))
-+ put_rmap_list_entry(slot, j);
-+ continue;
-+
-+skip_whole_pool:
-+ i += PAGE_SIZE / sizeof(*entry);
-+ if (i < slot->pages)
-+ entry = get_rmap_list_entry(slot, i, 0);
-+ continue;
-+
-+next_entry:
-+ if (i >= slot->pages - 1 ||
-+ !within_same_pool(slot, i, i + 1)) {
-+ put_rmap_list_entry(slot, i);
-+ if (i + 1 < slot->pages)
-+ entry = get_rmap_list_entry(slot, i + 1, 0);
-+ } else
-+ entry++;
-+ i++;
-+ continue;
-+ }
-+
-+ /* free empty pool entries which contain no rmap_item */
-+ /* CAN be simplied to based on only pool_counts when bug freed !!!!! */
-+ for (i = 0; i < slot->pool_size; i++) {
-+ unsigned char has_rmap;
-+ void *addr;
-+
-+ if (!slot->rmap_list_pool[i])
-+ continue;
-+
-+ has_rmap = 0;
-+ addr = kmap(slot->rmap_list_pool[i]);
-+ BUG_ON(!addr);
-+ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
-+ entry = (struct rmap_list_entry *)addr + j;
-+ if (is_addr(entry->addr))
-+ continue;
-+ if (!entry->item)
-+ continue;
-+ has_rmap = 1;
-+ }
-+ kunmap(slot->rmap_list_pool[i]);
-+ if (!has_rmap) {
-+ BUG_ON(slot->pool_counts[i]);
-+ __free_page(slot->rmap_list_pool[i]);
-+ slot->rmap_list_pool[i] = NULL;
-+ }
-+ }
-+
-+ slot->flags &= ~UKSM_SLOT_NEED_SORT;
-+}
-+
-+/*
-+ * vma_fully_scanned() - if all the pages in this slot have been scanned.
-+ */
-+static inline int vma_fully_scanned(struct vma_slot *slot)
-+{
-+ return slot->pages_scanned == slot->pages;
-+}
-+
-+/**
-+ * get_next_rmap_item() - Get the next rmap_item in a vma_slot according to
-+ * its random permutation. This function is embedded with the random
-+ * permutation index management code.
-+ */
-+static struct rmap_item *get_next_rmap_item(struct vma_slot *slot, u32 *hash)
-+{
-+ unsigned long rand_range, addr, swap_index, scan_index;
-+ struct rmap_item *item = NULL;
-+ struct rmap_list_entry *scan_entry, *swap_entry = NULL;
-+ struct page *page;
-+
-+ scan_index = swap_index = slot->pages_scanned % slot->pages;
-+
-+ if (pool_entry_boundary(scan_index))
-+ try_free_last_pool(slot, scan_index - 1);
-+
-+ if (vma_fully_scanned(slot)) {
-+ if (slot->flags & UKSM_SLOT_NEED_SORT)
-+ slot->flags |= UKSM_SLOT_NEED_RERAND;
-+ else
-+ slot->flags &= ~UKSM_SLOT_NEED_RERAND;
-+ if (slot->flags & UKSM_SLOT_NEED_SORT)
-+ sort_rmap_entry_list(slot);
-+ }
-+
-+ scan_entry = get_rmap_list_entry(slot, scan_index, 1);
-+ if (!scan_entry)
-+ return NULL;
-+
-+ if (entry_is_new(scan_entry)) {
-+ scan_entry->addr = get_index_orig_addr(slot, scan_index);
-+ set_is_addr(scan_entry->addr);
-+ }
-+
-+ if (slot->flags & UKSM_SLOT_NEED_RERAND) {
-+ rand_range = slot->pages - scan_index;
-+ BUG_ON(!rand_range);
-+ swap_index = scan_index + (prandom_u32() % rand_range);
-+ }
-+
-+ if (swap_index != scan_index) {
-+ swap_entry = get_rmap_list_entry(slot, swap_index, 1);
-+ if (entry_is_new(swap_entry)) {
-+ swap_entry->addr = get_index_orig_addr(slot,
-+ swap_index);
-+ set_is_addr(swap_entry->addr);
-+ }
-+ swap_entries(scan_entry, scan_index, swap_entry, swap_index);
-+ }
-+
-+ addr = get_entry_address(scan_entry);
-+ item = get_entry_item(scan_entry);
-+ BUG_ON(addr > slot->vma->vm_end || addr < slot->vma->vm_start);
-+
-+ page = follow_page(slot->vma, addr, FOLL_GET);
-+ if (IS_ERR_OR_NULL(page))
-+ goto nopage;
-+
-+ if (!PageAnon(page))
-+ goto putpage;
-+
-+ /*check is zero_page pfn or uksm_zero_page*/
-+ if ((page_to_pfn(page) == zero_pfn)
-+ || (page_to_pfn(page) == uksm_zero_pfn))
-+ goto putpage;
-+
-+ flush_anon_page(slot->vma, page, addr);
-+ flush_dcache_page(page);
-+
-+
-+ *hash = page_hash(page, hash_strength, 1);
-+ inc_uksm_pages_scanned();
-+ /*if the page content all zero, re-map to zero-page*/
-+ if (find_zero_page_hash(hash_strength, *hash)) {
-+ if (!cmp_and_merge_zero_page(slot->vma, page)) {
-+ slot->pages_merged++;
-+
-+ /* For full-zero pages, no need to create rmap item */
-+ goto putpage;
-+ } else {
-+ inc_rshash_neg(memcmp_cost / 2);
-+ }
-+ }
-+
-+ if (!item) {
-+ item = alloc_rmap_item();
-+ if (item) {
-+ /* It has already been zeroed */
-+ item->slot = slot;
-+ item->address = addr;
-+ item->entry_index = scan_index;
-+ scan_entry->item = item;
-+ inc_rmap_list_pool_count(slot, scan_index);
-+ } else
-+ goto putpage;
-+ }
-+
-+ BUG_ON(item->slot != slot);
-+ /* the page may have changed */
-+ item->page = page;
-+ put_rmap_list_entry(slot, scan_index);
-+ if (swap_entry)
-+ put_rmap_list_entry(slot, swap_index);
-+ return item;
-+
-+putpage:
-+ put_page(page);
-+ page = NULL;
-+nopage:
-+ /* no page, store addr back and free rmap_item if possible */
-+ free_entry_item(scan_entry);
-+ put_rmap_list_entry(slot, scan_index);
-+ if (swap_entry)
-+ put_rmap_list_entry(slot, swap_index);
-+ return NULL;
-+}
-+
-+static inline int in_stable_tree(struct rmap_item *rmap_item)
-+{
-+ return rmap_item->address & STABLE_FLAG;
-+}
-+
-+/**
-+ * scan_vma_one_page() - scan the next page in a vma_slot. Called with
-+ * mmap_sem locked.
-+ */
-+static noinline void scan_vma_one_page(struct vma_slot *slot)
-+{
-+ u32 hash;
-+ struct mm_struct *mm;
-+ struct rmap_item *rmap_item = NULL;
-+ struct vm_area_struct *vma = slot->vma;
-+
-+ mm = vma->vm_mm;
-+ BUG_ON(!mm);
-+ BUG_ON(!slot);
-+
-+ rmap_item = get_next_rmap_item(slot, &hash);
-+ if (!rmap_item)
-+ goto out1;
-+
-+ if (PageKsm(rmap_item->page) && in_stable_tree(rmap_item))
-+ goto out2;
-+
-+ cmp_and_merge_page(rmap_item, hash);
-+out2:
-+ put_page(rmap_item->page);
-+out1:
-+ slot->pages_scanned++;
-+ slot->this_sampled++;
-+ if (slot->fully_scanned_round != fully_scanned_round)
-+ scanned_virtual_pages++;
-+
-+ if (vma_fully_scanned(slot))
-+ slot->fully_scanned_round = fully_scanned_round;
-+}
-+
-+static inline unsigned long rung_get_pages(struct scan_rung *rung)
-+{
-+ struct slot_tree_node *node;
-+
-+ if (!rung->vma_root.rnode)
-+ return 0;
-+
-+ node = container_of(rung->vma_root.rnode, struct slot_tree_node, snode);
-+
-+ return node->size;
-+}
-+
-+#define RUNG_SAMPLED_MIN 3
-+
-+static inline
-+void uksm_calc_rung_step(struct scan_rung *rung,
-+ unsigned long page_time, unsigned long ratio)
-+{
-+ unsigned long sampled, pages;
-+
-+ /* will be fully scanned ? */
-+ if (!rung->cover_msecs) {
-+ rung->step = 1;
-+ return;
-+ }
-+
-+ sampled = rung->cover_msecs * (NSEC_PER_MSEC / TIME_RATIO_SCALE)
-+ * ratio / page_time;
-+
-+ /*
-+ * Before we finsish a scan round and expensive per-round jobs,
-+ * we need to have a chance to estimate the per page time. So
-+ * the sampled number can not be too small.
-+ */
-+ if (sampled < RUNG_SAMPLED_MIN)
-+ sampled = RUNG_SAMPLED_MIN;
-+
-+ pages = rung_get_pages(rung);
-+ if (likely(pages > sampled))
-+ rung->step = pages / sampled;
-+ else
-+ rung->step = 1;
-+}
-+
-+static inline int step_need_recalc(struct scan_rung *rung)
-+{
-+ unsigned long pages, stepmax;
-+
-+ pages = rung_get_pages(rung);
-+ stepmax = pages / RUNG_SAMPLED_MIN;
-+
-+ return pages && (rung->step > pages ||
-+ (stepmax && rung->step > stepmax));
-+}
-+
-+static inline
-+void reset_current_scan(struct scan_rung *rung, int finished, int step_recalc)
-+{
-+ struct vma_slot *slot;
-+
-+ if (finished)
-+ rung->flags |= UKSM_RUNG_ROUND_FINISHED;
-+
-+ if (step_recalc || step_need_recalc(rung)) {
-+ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
-+ BUG_ON(step_need_recalc(rung));
-+ }
-+
-+ slot_iter_index = prandom_u32() % rung->step;
-+ BUG_ON(!rung->vma_root.rnode);
-+ slot = sradix_tree_next(&rung->vma_root, NULL, 0, slot_iter);
-+ BUG_ON(!slot);
-+
-+ rung->current_scan = slot;
-+ rung->current_offset = slot_iter_index;
-+}
-+
-+static inline struct sradix_tree_root *slot_get_root(struct vma_slot *slot)
-+{
-+ return &slot->rung->vma_root;
-+}
-+
-+/*
-+ * return if resetted.
-+ */
-+static int advance_current_scan(struct scan_rung *rung)
-+{
-+ unsigned short n;
-+ struct vma_slot *slot, *next = NULL;
-+
-+ BUG_ON(!rung->vma_root.num);
-+
-+ slot = rung->current_scan;
-+ n = (slot->pages - rung->current_offset) % rung->step;
-+ slot_iter_index = rung->step - n;
-+ next = sradix_tree_next(&rung->vma_root, slot->snode,
-+ slot->sindex, slot_iter);
-+
-+ if (next) {
-+ rung->current_offset = slot_iter_index;
-+ rung->current_scan = next;
-+ return 0;
-+ } else {
-+ reset_current_scan(rung, 1, 0);
-+ return 1;
-+ }
-+}
-+
-+static inline void rung_rm_slot(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung = slot->rung;
-+ struct sradix_tree_root *root;
-+
-+ if (rung->current_scan == slot)
-+ advance_current_scan(rung);
-+
-+ root = slot_get_root(slot);
-+ sradix_tree_delete_from_leaf(root, slot->snode, slot->sindex);
-+ slot->snode = NULL;
-+ if (step_need_recalc(rung)) {
-+ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio);
-+ BUG_ON(step_need_recalc(rung));
-+ }
-+
-+ /* In case advance_current_scan loop back to this slot again */
-+ if (rung->vma_root.num && rung->current_scan == slot)
-+ reset_current_scan(slot->rung, 1, 0);
-+}
-+
-+static inline void rung_add_new_slots(struct scan_rung *rung,
-+ struct vma_slot **slots, unsigned long num)
-+{
-+ int err;
-+ struct vma_slot *slot;
-+ unsigned long i;
-+ struct sradix_tree_root *root = &rung->vma_root;
-+
-+ err = sradix_tree_enter(root, (void **)slots, num);
-+ BUG_ON(err);
-+
-+ for (i = 0; i < num; i++) {
-+ slot = slots[i];
-+ slot->rung = rung;
-+ BUG_ON(vma_fully_scanned(slot));
-+ }
-+
-+ if (rung->vma_root.num == num)
-+ reset_current_scan(rung, 0, 1);
-+}
-+
-+static inline int rung_add_one_slot(struct scan_rung *rung,
-+ struct vma_slot *slot)
-+{
-+ int err;
-+
-+ err = sradix_tree_enter(&rung->vma_root, (void **)&slot, 1);
-+ if (err)
-+ return err;
-+
-+ slot->rung = rung;
-+ if (rung->vma_root.num == 1)
-+ reset_current_scan(rung, 0, 1);
-+
-+ return 0;
-+}
-+
-+/*
-+ * Return true if the slot is deleted from its rung.
-+ */
-+static inline int vma_rung_enter(struct vma_slot *slot, struct scan_rung *rung)
-+{
-+ struct scan_rung *old_rung = slot->rung;
-+ int err;
-+
-+ if (old_rung == rung)
-+ return 0;
-+
-+ rung_rm_slot(slot);
-+ err = rung_add_one_slot(rung, slot);
-+ if (err) {
-+ err = rung_add_one_slot(old_rung, slot);
-+ WARN_ON(err); /* OOPS, badly OOM, we lost this slot */
-+ }
-+
-+ return 1;
-+}
-+
-+static inline int vma_rung_up(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung;
-+
-+ rung = slot->rung;
-+ if (slot->rung != &uksm_scan_ladder[SCAN_LADDER_SIZE-1])
-+ rung++;
-+
-+ return vma_rung_enter(slot, rung);
-+}
-+
-+static inline int vma_rung_down(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung;
-+
-+ rung = slot->rung;
-+ if (slot->rung != &uksm_scan_ladder[0])
-+ rung--;
-+
-+ return vma_rung_enter(slot, rung);
-+}
-+
-+/**
-+ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
-+ */
-+static unsigned long cal_dedup_ratio(struct vma_slot *slot)
-+{
-+ unsigned long ret;
-+ unsigned long pages;
-+
-+ pages = slot->this_sampled;
-+ if (!pages)
-+ return 0;
-+
-+ BUG_ON(slot->pages_scanned == slot->last_scanned);
-+
-+ ret = slot->pages_merged;
-+
-+ /* Thrashing area filtering */
-+ if (ret && uksm_thrash_threshold) {
-+ if (slot->pages_cowed * 100 / slot->pages_merged
-+ > uksm_thrash_threshold) {
-+ ret = 0;
-+ } else {
-+ ret = slot->pages_merged - slot->pages_cowed;
-+ }
-+ }
-+
-+ return ret * 100 / pages;
-+}
-+
-+/**
-+ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot.
-+ */
-+static unsigned long cal_dedup_ratio_old(struct vma_slot *slot)
-+{
-+ unsigned long ret;
-+ unsigned long pages;
-+
-+ pages = slot->pages;
-+ if (!pages)
-+ return 0;
-+
-+ ret = slot->pages_bemerged;
-+
-+ /* Thrashing area filtering */
-+ if (ret && uksm_thrash_threshold) {
-+ if (slot->pages_cowed * 100 / slot->pages_bemerged
-+ > uksm_thrash_threshold) {
-+ ret = 0;
-+ } else {
-+ ret = slot->pages_bemerged - slot->pages_cowed;
-+ }
-+ }
-+
-+ return ret * 100 / pages;
-+}
-+
-+/**
-+ * stable_node_reinsert() - When the hash_strength has been adjusted, the
-+ * stable tree need to be restructured, this is the function re-inserting the
-+ * stable node.
-+ */
-+static inline void stable_node_reinsert(struct stable_node *new_node,
-+ struct page *page,
-+ struct rb_root *root_treep,
-+ struct list_head *tree_node_listp,
-+ u32 hash)
-+{
-+ struct rb_node **new = &root_treep->rb_node;
-+ struct rb_node *parent = NULL;
-+ struct stable_node *stable_node;
-+ struct tree_node *tree_node;
-+ struct page *tree_page;
-+ int cmp;
-+
-+ while (*new) {
-+ int cmp;
-+
-+ tree_node = rb_entry(*new, struct tree_node, node);
-+
-+ cmp = hash_cmp(hash, tree_node->hash);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else
-+ break;
-+ }
-+
-+ if (*new) {
-+ /* find a stable tree node with same first level hash value */
-+ stable_node_hash_max(new_node, page, hash);
-+ if (tree_node->count == 1) {
-+ stable_node = rb_entry(tree_node->sub_root.rb_node,
-+ struct stable_node, node);
-+ tree_page = get_uksm_page(stable_node, 1, 0);
-+ if (tree_page) {
-+ stable_node_hash_max(stable_node,
-+ tree_page, hash);
-+ put_page(tree_page);
-+
-+ /* prepare for stable node insertion */
-+
-+ cmp = hash_cmp(new_node->hash_max,
-+ stable_node->hash_max);
-+ parent = &stable_node->node;
-+ if (cmp < 0)
-+ new = &parent->rb_left;
-+ else if (cmp > 0)
-+ new = &parent->rb_right;
-+ else
-+ goto failed;
-+
-+ goto add_node;
-+ } else {
-+ /* the only stable_node deleted, the tree node
-+ * was not deleted.
-+ */
-+ goto tree_node_reuse;
-+ }
-+ }
-+
-+ /* well, search the collision subtree */
-+ new = &tree_node->sub_root.rb_node;
-+ parent = NULL;
-+ BUG_ON(!*new);
-+ while (*new) {
-+ int cmp;
-+
-+ stable_node = rb_entry(*new, struct stable_node, node);
-+
-+ cmp = hash_cmp(new_node->hash_max,
-+ stable_node->hash_max);
-+
-+ if (cmp < 0) {
-+ parent = *new;
-+ new = &parent->rb_left;
-+ } else if (cmp > 0) {
-+ parent = *new;
-+ new = &parent->rb_right;
-+ } else {
-+ /* oh, no, still a collision */
-+ goto failed;
-+ }
-+ }
-+
-+ goto add_node;
-+ }
-+
-+ /* no tree node found */
-+ tree_node = alloc_tree_node(tree_node_listp);
-+ if (!tree_node) {
-+ pr_err("UKSM: memory allocation error!\n");
-+ goto failed;
-+ } else {
-+ tree_node->hash = hash;
-+ rb_link_node(&tree_node->node, parent, new);
-+ rb_insert_color(&tree_node->node, root_treep);
-+
-+tree_node_reuse:
-+ /* prepare for stable node insertion */
-+ parent = NULL;
-+ new = &tree_node->sub_root.rb_node;
-+ }
-+
-+add_node:
-+ rb_link_node(&new_node->node, parent, new);
-+ rb_insert_color(&new_node->node, &tree_node->sub_root);
-+ new_node->tree_node = tree_node;
-+ tree_node->count++;
-+ return;
-+
-+failed:
-+ /* This can only happen when two nodes have collided
-+ * in two levels.
-+ */
-+ new_node->tree_node = NULL;
-+ return;
-+}
-+
-+static inline void free_all_tree_nodes(struct list_head *list)
-+{
-+ struct tree_node *node, *tmp;
-+
-+ list_for_each_entry_safe(node, tmp, list, all_list) {
-+ free_tree_node(node);
-+ }
-+}
-+
-+/**
-+ * stable_tree_delta_hash() - Delta hash the stable tree from previous hash
-+ * strength to the current hash_strength. It re-structures the hole tree.
-+ */
-+static inline void stable_tree_delta_hash(u32 prev_hash_strength)
-+{
-+ struct stable_node *node, *tmp;
-+ struct rb_root *root_new_treep;
-+ struct list_head *new_tree_node_listp;
-+
-+ stable_tree_index = (stable_tree_index + 1) % 2;
-+ root_new_treep = &root_stable_tree[stable_tree_index];
-+ new_tree_node_listp = &stable_tree_node_list[stable_tree_index];
-+ *root_new_treep = RB_ROOT;
-+ BUG_ON(!list_empty(new_tree_node_listp));
-+
-+ /*
-+ * we need to be safe, the node could be removed by get_uksm_page()
-+ */
-+ list_for_each_entry_safe(node, tmp, &stable_node_list, all_list) {
-+ void *addr;
-+ struct page *node_page;
-+ u32 hash;
-+
-+ /*
-+ * We are completely re-structuring the stable nodes to a new
-+ * stable tree. We don't want to touch the old tree unlinks and
-+ * old tree_nodes. The old tree_nodes will be freed at once.
-+ */
-+ node_page = get_uksm_page(node, 0, 0);
-+ if (!node_page)
-+ continue;
-+
-+ if (node->tree_node) {
-+ hash = node->tree_node->hash;
-+
-+ addr = kmap_atomic(node_page);
-+
-+ hash = delta_hash(addr, prev_hash_strength,
-+ hash_strength, hash);
-+ kunmap_atomic(addr);
-+ } else {
-+ /*
-+ *it was not inserted to rbtree due to collision in last
-+ *round scan.
-+ */
-+ hash = page_hash(node_page, hash_strength, 0);
-+ }
-+
-+ stable_node_reinsert(node, node_page, root_new_treep,
-+ new_tree_node_listp, hash);
-+ put_page(node_page);
-+ }
-+
-+ root_stable_treep = root_new_treep;
-+ free_all_tree_nodes(stable_tree_node_listp);
-+ BUG_ON(!list_empty(stable_tree_node_listp));
-+ stable_tree_node_listp = new_tree_node_listp;
-+}
-+
-+static inline void inc_hash_strength(unsigned long delta)
-+{
-+ hash_strength += 1 << delta;
-+ if (hash_strength > HASH_STRENGTH_MAX)
-+ hash_strength = HASH_STRENGTH_MAX;
-+}
-+
-+static inline void dec_hash_strength(unsigned long delta)
-+{
-+ unsigned long change = 1 << delta;
-+
-+ if (hash_strength <= change + 1)
-+ hash_strength = 1;
-+ else
-+ hash_strength -= change;
-+}
-+
-+static inline void inc_hash_strength_delta(void)
-+{
-+ hash_strength_delta++;
-+ if (hash_strength_delta > HASH_STRENGTH_DELTA_MAX)
-+ hash_strength_delta = HASH_STRENGTH_DELTA_MAX;
-+}
-+
-+static inline unsigned long get_current_neg_ratio(void)
-+{
-+ u64 pos = benefit.pos;
-+ u64 neg = benefit.neg;
-+
-+ if (!neg)
-+ return 0;
-+
-+ if (!pos || neg > pos)
-+ return 100;
-+
-+ if (neg > div64_u64(U64_MAX, 100))
-+ pos = div64_u64(pos, 100);
-+ else
-+ neg *= 100;
-+
-+ return div64_u64(neg, pos);
-+}
-+
-+static inline unsigned long get_current_benefit(void)
-+{
-+ u64 pos = benefit.pos;
-+ u64 neg = benefit.neg;
-+ u64 scanned = benefit.scanned;
-+
-+ if (neg > pos)
-+ return 0;
-+
-+ return div64_u64((pos - neg), scanned);
-+}
-+
-+static inline int judge_rshash_direction(void)
-+{
-+ u64 current_neg_ratio, stable_benefit;
-+ u64 current_benefit, delta = 0;
-+ int ret = STILL;
-+
-+ /*
-+ * Try to probe a value after the boot, and in case the system
-+ * are still for a long time.
-+ */
-+ if ((fully_scanned_round & 0xFFULL) == 10) {
-+ ret = OBSCURE;
-+ goto out;
-+ }
-+
-+ current_neg_ratio = get_current_neg_ratio();
-+
-+ if (current_neg_ratio == 0) {
-+ rshash_neg_cont_zero++;
-+ if (rshash_neg_cont_zero > 2)
-+ return GO_DOWN;
-+ else
-+ return STILL;
-+ }
-+ rshash_neg_cont_zero = 0;
-+
-+ if (current_neg_ratio > 90) {
-+ ret = GO_UP;
-+ goto out;
-+ }
-+
-+ current_benefit = get_current_benefit();
-+ stable_benefit = rshash_state.stable_benefit;
-+
-+ if (!stable_benefit) {
-+ ret = OBSCURE;
-+ goto out;
-+ }
-+
-+ if (current_benefit > stable_benefit)
-+ delta = current_benefit - stable_benefit;
-+ else if (current_benefit < stable_benefit)
-+ delta = stable_benefit - current_benefit;
-+
-+ delta = div64_u64(100 * delta, stable_benefit);
-+
-+ if (delta > 50) {
-+ rshash_cont_obscure++;
-+ if (rshash_cont_obscure > 2)
-+ return OBSCURE;
-+ else
-+ return STILL;
-+ }
-+
-+out:
-+ rshash_cont_obscure = 0;
-+ return ret;
-+}
-+
-+/**
-+ * rshash_adjust() - The main function to control the random sampling state
-+ * machine for hash strength adapting.
-+ *
-+ * return true if hash_strength has changed.
-+ */
-+static inline int rshash_adjust(void)
-+{
-+ unsigned long prev_hash_strength = hash_strength;
-+
-+ if (!encode_benefit())
-+ return 0;
-+
-+ switch (rshash_state.state) {
-+ case RSHASH_STILL:
-+ switch (judge_rshash_direction()) {
-+ case GO_UP:
-+ if (rshash_state.pre_direct == GO_DOWN)
-+ hash_strength_delta = 0;
-+
-+ inc_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ rshash_state.stable_benefit = get_current_benefit();
-+ rshash_state.pre_direct = GO_UP;
-+ break;
-+
-+ case GO_DOWN:
-+ if (rshash_state.pre_direct == GO_UP)
-+ hash_strength_delta = 0;
-+
-+ dec_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ rshash_state.stable_benefit = get_current_benefit();
-+ rshash_state.pre_direct = GO_DOWN;
-+ break;
-+
-+ case OBSCURE:
-+ rshash_state.stable_point = hash_strength;
-+ rshash_state.turn_point_down = hash_strength;
-+ rshash_state.turn_point_up = hash_strength;
-+ rshash_state.turn_benefit_down = get_current_benefit();
-+ rshash_state.turn_benefit_up = get_current_benefit();
-+ rshash_state.lookup_window_index = 0;
-+ rshash_state.state = RSHASH_TRYDOWN;
-+ dec_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ break;
-+
-+ case STILL:
-+ break;
-+ default:
-+ BUG();
-+ }
-+ break;
-+
-+ case RSHASH_TRYDOWN:
-+ if (rshash_state.lookup_window_index++ % 5 == 0)
-+ rshash_state.below_count = 0;
-+
-+ if (get_current_benefit() < rshash_state.stable_benefit)
-+ rshash_state.below_count++;
-+ else if (get_current_benefit() >
-+ rshash_state.turn_benefit_down) {
-+ rshash_state.turn_point_down = hash_strength;
-+ rshash_state.turn_benefit_down = get_current_benefit();
-+ }
-+
-+ if (rshash_state.below_count >= 3 ||
-+ judge_rshash_direction() == GO_UP ||
-+ hash_strength == 1) {
-+ hash_strength = rshash_state.stable_point;
-+ hash_strength_delta = 0;
-+ inc_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ rshash_state.lookup_window_index = 0;
-+ rshash_state.state = RSHASH_TRYUP;
-+ hash_strength_delta = 0;
-+ } else {
-+ dec_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ }
-+ break;
-+
-+ case RSHASH_TRYUP:
-+ if (rshash_state.lookup_window_index++ % 5 == 0)
-+ rshash_state.below_count = 0;
-+
-+ if (get_current_benefit() < rshash_state.turn_benefit_down)
-+ rshash_state.below_count++;
-+ else if (get_current_benefit() > rshash_state.turn_benefit_up) {
-+ rshash_state.turn_point_up = hash_strength;
-+ rshash_state.turn_benefit_up = get_current_benefit();
-+ }
-+
-+ if (rshash_state.below_count >= 3 ||
-+ judge_rshash_direction() == GO_DOWN ||
-+ hash_strength == HASH_STRENGTH_MAX) {
-+ hash_strength = rshash_state.turn_benefit_up >
-+ rshash_state.turn_benefit_down ?
-+ rshash_state.turn_point_up :
-+ rshash_state.turn_point_down;
-+
-+ rshash_state.state = RSHASH_PRE_STILL;
-+ } else {
-+ inc_hash_strength(hash_strength_delta);
-+ inc_hash_strength_delta();
-+ }
-+
-+ break;
-+
-+ case RSHASH_NEW:
-+ case RSHASH_PRE_STILL:
-+ rshash_state.stable_benefit = get_current_benefit();
-+ rshash_state.state = RSHASH_STILL;
-+ hash_strength_delta = 0;
-+ break;
-+ default:
-+ BUG();
-+ }
-+
-+ /* rshash_neg = rshash_pos = 0; */
-+ reset_benefit();
-+
-+ if (prev_hash_strength != hash_strength)
-+ stable_tree_delta_hash(prev_hash_strength);
-+
-+ return prev_hash_strength != hash_strength;
-+}
-+
-+/**
-+ * round_update_ladder() - The main function to do update of all the
-+ * adjustments whenever a scan round is finished.
-+ */
-+static noinline void round_update_ladder(void)
-+{
-+ int i;
-+ unsigned long dedup;
-+ struct vma_slot *slot, *tmp_slot;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++)
-+ uksm_scan_ladder[i].flags &= ~UKSM_RUNG_ROUND_FINISHED;
-+
-+ list_for_each_entry_safe(slot, tmp_slot, &vma_slot_dedup, dedup_list) {
-+
-+ /* slot may be rung_rm_slot() when mm exits */
-+ if (slot->snode) {
-+ dedup = cal_dedup_ratio_old(slot);
-+ if (dedup && dedup >= uksm_abundant_threshold)
-+ vma_rung_up(slot);
-+ }
-+
-+ slot->pages_bemerged = 0;
-+ slot->pages_cowed = 0;
-+
-+ list_del_init(&slot->dedup_list);
-+ }
-+}
-+
-+static void uksm_del_vma_slot(struct vma_slot *slot)
-+{
-+ int i, j;
-+ struct rmap_list_entry *entry;
-+
-+ if (slot->snode) {
-+ /*
-+ * In case it just failed when entering the rung, it's not
-+ * necessary.
-+ */
-+ rung_rm_slot(slot);
-+ }
-+
-+ if (!list_empty(&slot->dedup_list))
-+ list_del(&slot->dedup_list);
-+
-+ if (!slot->rmap_list_pool || !slot->pool_counts) {
-+ /* In case it OOMed in uksm_vma_enter() */
-+ goto out;
-+ }
-+
-+ for (i = 0; i < slot->pool_size; i++) {
-+ void *addr;
-+
-+ if (!slot->rmap_list_pool[i])
-+ continue;
-+
-+ addr = kmap(slot->rmap_list_pool[i]);
-+ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) {
-+ entry = (struct rmap_list_entry *)addr + j;
-+ if (is_addr(entry->addr))
-+ continue;
-+ if (!entry->item)
-+ continue;
-+
-+ remove_rmap_item_from_tree(entry->item);
-+ free_rmap_item(entry->item);
-+ slot->pool_counts[i]--;
-+ }
-+ BUG_ON(slot->pool_counts[i]);
-+ kunmap(slot->rmap_list_pool[i]);
-+ __free_page(slot->rmap_list_pool[i]);
-+ }
-+ kfree(slot->rmap_list_pool);
-+ kfree(slot->pool_counts);
-+
-+out:
-+ slot->rung = NULL;
-+ if (slot->flags & UKSM_SLOT_IN_UKSM) {
-+ BUG_ON(uksm_pages_total < slot->pages);
-+ uksm_pages_total -= slot->pages;
-+ }
-+
-+ if (slot->fully_scanned_round == fully_scanned_round)
-+ scanned_virtual_pages -= slot->pages;
-+ else
-+ scanned_virtual_pages -= slot->pages_scanned;
-+ free_vma_slot(slot);
-+}
-+
-+
-+#define SPIN_LOCK_PERIOD 32
-+static struct vma_slot *cleanup_slots[SPIN_LOCK_PERIOD];
-+static inline void cleanup_vma_slots(void)
-+{
-+ struct vma_slot *slot;
-+ int i;
-+
-+ i = 0;
-+ spin_lock(&vma_slot_list_lock);
-+ while (!list_empty(&vma_slot_del)) {
-+ slot = list_entry(vma_slot_del.next,
-+ struct vma_slot, slot_list);
-+ list_del(&slot->slot_list);
-+ cleanup_slots[i++] = slot;
-+ if (i == SPIN_LOCK_PERIOD) {
-+ spin_unlock(&vma_slot_list_lock);
-+ while (--i >= 0)
-+ uksm_del_vma_slot(cleanup_slots[i]);
-+ i = 0;
-+ spin_lock(&vma_slot_list_lock);
-+ }
-+ }
-+ spin_unlock(&vma_slot_list_lock);
-+
-+ while (--i >= 0)
-+ uksm_del_vma_slot(cleanup_slots[i]);
-+}
-+
-+/*
-+ * Expotional moving average formula
-+ */
-+static inline unsigned long ema(unsigned long curr, unsigned long last_ema)
-+{
-+ /*
-+ * For a very high burst, even the ema cannot work well, a false very
-+ * high per-page time estimation can result in feedback in very high
-+ * overhead of context switch and rung update -- this will then lead
-+ * to higher per-paper time, this may not converge.
-+ *
-+ * Instead, we try to approach this value in a binary manner.
-+ */
-+ if (curr > last_ema * 10)
-+ return last_ema * 2;
-+
-+ return (EMA_ALPHA * curr + (100 - EMA_ALPHA) * last_ema) / 100;
-+}
-+
-+/*
-+ * convert cpu ratio in 1/TIME_RATIO_SCALE configured by user to
-+ * nanoseconds based on current uksm_sleep_jiffies.
-+ */
-+static inline unsigned long cpu_ratio_to_nsec(unsigned int ratio)
-+{
-+ return NSEC_PER_USEC * jiffies_to_usecs(uksm_sleep_jiffies) /
-+ (TIME_RATIO_SCALE - ratio) * ratio;
-+}
-+
-+
-+static inline unsigned long rung_real_ratio(int cpu_time_ratio)
-+{
-+ unsigned long ret;
-+
-+ BUG_ON(!cpu_time_ratio);
-+
-+ if (cpu_time_ratio > 0)
-+ ret = cpu_time_ratio;
-+ else
-+ ret = (unsigned long)(-cpu_time_ratio) *
-+ uksm_max_cpu_percentage / 100UL;
-+
-+ return ret ? ret : 1;
-+}
-+
-+static noinline void uksm_calc_scan_pages(void)
-+{
-+ struct scan_rung *ladder = uksm_scan_ladder;
-+ unsigned long sleep_usecs, nsecs;
-+ unsigned long ratio;
-+ int i;
-+ unsigned long per_page;
-+
-+ if (uksm_ema_page_time > 100000 ||
-+ (((unsigned long) uksm_eval_round & (256UL - 1)) == 0UL))
-+ uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT;
-+
-+ per_page = uksm_ema_page_time;
-+ BUG_ON(!per_page);
-+
-+ /*
-+ * For every 8 eval round, we try to probe a uksm_sleep_jiffies value
-+ * based on saved user input.
-+ */
-+ if (((unsigned long) uksm_eval_round & (8UL - 1)) == 0UL)
-+ uksm_sleep_jiffies = uksm_sleep_saved;
-+
-+ /* We require a rung scan at least 1 page in a period. */
-+ nsecs = per_page;
-+ ratio = rung_real_ratio(ladder[0].cpu_ratio);
-+ if (cpu_ratio_to_nsec(ratio) < nsecs) {
-+ sleep_usecs = nsecs * (TIME_RATIO_SCALE - ratio) / ratio
-+ / NSEC_PER_USEC;
-+ uksm_sleep_jiffies = usecs_to_jiffies(sleep_usecs) + 1;
-+ }
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ ratio = rung_real_ratio(ladder[i].cpu_ratio);
-+ ladder[i].pages_to_scan = cpu_ratio_to_nsec(ratio) /
-+ per_page;
-+ BUG_ON(!ladder[i].pages_to_scan);
-+ uksm_calc_rung_step(&ladder[i], per_page, ratio);
-+ }
-+}
-+
-+/*
-+ * From the scan time of this round (ns) to next expected min sleep time
-+ * (ms), be careful of the possible overflows. ratio is taken from
-+ * rung_real_ratio()
-+ */
-+static inline
-+unsigned int scan_time_to_sleep(unsigned long long scan_time, unsigned long ratio)
-+{
-+ scan_time >>= 20; /* to msec level now */
-+ BUG_ON(scan_time > (ULONG_MAX / TIME_RATIO_SCALE));
-+
-+ return (unsigned int) ((unsigned long) scan_time *
-+ (TIME_RATIO_SCALE - ratio) / ratio);
-+}
-+
-+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-+
-+static void uksm_vma_enter(struct vma_slot **slots, unsigned long num)
-+{
-+ struct scan_rung *rung;
-+
-+ rung = &uksm_scan_ladder[0];
-+ rung_add_new_slots(rung, slots, num);
-+}
-+
-+static struct vma_slot *batch_slots[SLOT_TREE_NODE_STORE_SIZE];
-+
-+static void uksm_enter_all_slots(void)
-+{
-+ struct vma_slot *slot;
-+ unsigned long index;
-+ struct list_head empty_vma_list;
-+ int i;
-+
-+ i = 0;
-+ index = 0;
-+ INIT_LIST_HEAD(&empty_vma_list);
-+
-+ spin_lock(&vma_slot_list_lock);
-+ while (!list_empty(&vma_slot_new)) {
-+ slot = list_entry(vma_slot_new.next,
-+ struct vma_slot, slot_list);
-+
-+ if (!slot->vma->anon_vma) {
-+ list_move(&slot->slot_list, &empty_vma_list);
-+ } else if (vma_can_enter(slot->vma)) {
-+ batch_slots[index++] = slot;
-+ list_del_init(&slot->slot_list);
-+ } else {
-+ list_move(&slot->slot_list, &vma_slot_noadd);
-+ }
-+
-+ if (++i == SPIN_LOCK_PERIOD ||
-+ (index && !(index % SLOT_TREE_NODE_STORE_SIZE))) {
-+ spin_unlock(&vma_slot_list_lock);
-+
-+ if (index && !(index % SLOT_TREE_NODE_STORE_SIZE)) {
-+ uksm_vma_enter(batch_slots, index);
-+ index = 0;
-+ }
-+ i = 0;
-+ cond_resched();
-+ spin_lock(&vma_slot_list_lock);
-+ }
-+ }
-+
-+ list_splice(&empty_vma_list, &vma_slot_new);
-+
-+ spin_unlock(&vma_slot_list_lock);
-+
-+ if (index)
-+ uksm_vma_enter(batch_slots, index);
-+
-+}
-+
-+static inline int rung_round_finished(struct scan_rung *rung)
-+{
-+ return rung->flags & UKSM_RUNG_ROUND_FINISHED;
-+}
-+
-+static inline void judge_slot(struct vma_slot *slot)
-+{
-+ struct scan_rung *rung = slot->rung;
-+ unsigned long dedup;
-+ int deleted;
-+
-+ dedup = cal_dedup_ratio(slot);
-+ if (vma_fully_scanned(slot) && uksm_thrash_threshold)
-+ deleted = vma_rung_enter(slot, &uksm_scan_ladder[0]);
-+ else if (dedup && dedup >= uksm_abundant_threshold)
-+ deleted = vma_rung_up(slot);
-+ else
-+ deleted = vma_rung_down(slot);
-+
-+ slot->pages_merged = 0;
-+ slot->pages_cowed = 0;
-+ slot->this_sampled = 0;
-+
-+ if (vma_fully_scanned(slot))
-+ slot->pages_scanned = 0;
-+
-+ slot->last_scanned = slot->pages_scanned;
-+
-+ /* If its deleted in above, then rung was already advanced. */
-+ if (!deleted)
-+ advance_current_scan(rung);
-+}
-+
-+
-+static inline int hash_round_finished(void)
-+{
-+ if (scanned_virtual_pages > (uksm_pages_total >> 2)) {
-+ scanned_virtual_pages = 0;
-+ if (uksm_pages_scanned)
-+ fully_scanned_round++;
-+
-+ return 1;
-+ } else {
-+ return 0;
-+ }
-+}
-+
-+#define UKSM_MMSEM_BATCH 5
-+#define BUSY_RETRY 100
-+
-+/**
-+ * uksm_do_scan() - the main worker function.
-+ */
-+static noinline void uksm_do_scan(void)
-+{
-+ struct vma_slot *slot, *iter;
-+ struct mm_struct *busy_mm;
-+ unsigned char round_finished, all_rungs_emtpy;
-+ int i, err, mmsem_batch;
-+ unsigned long pcost;
-+ long long delta_exec;
-+ unsigned long vpages, max_cpu_ratio;
-+ unsigned long long start_time, end_time, scan_time;
-+ unsigned int expected_jiffies;
-+
-+ might_sleep();
-+
-+ vpages = 0;
-+
-+ start_time = task_sched_runtime(current);
-+ max_cpu_ratio = 0;
-+ mmsem_batch = 0;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE;) {
-+ struct scan_rung *rung = &uksm_scan_ladder[i];
-+ unsigned long ratio;
-+ int busy_retry;
-+
-+ if (!rung->pages_to_scan) {
-+ i++;
-+ continue;
-+ }
-+
-+ if (!rung->vma_root.num) {
-+ rung->pages_to_scan = 0;
-+ i++;
-+ continue;
-+ }
-+
-+ ratio = rung_real_ratio(rung->cpu_ratio);
-+ if (ratio > max_cpu_ratio)
-+ max_cpu_ratio = ratio;
-+
-+ busy_retry = BUSY_RETRY;
-+ /*
-+ * Do not consider rung_round_finished() here, just used up the
-+ * rung->pages_to_scan quota.
-+ */
-+ while (rung->pages_to_scan && rung->vma_root.num &&
-+ likely(!freezing(current))) {
-+ int reset = 0;
-+
-+ slot = rung->current_scan;
-+
-+ BUG_ON(vma_fully_scanned(slot));
-+
-+ if (mmsem_batch)
-+ err = 0;
-+ else
-+ err = try_down_read_slot_mmap_sem(slot);
-+
-+ if (err == -ENOENT) {
-+rm_slot:
-+ rung_rm_slot(slot);
-+ continue;
-+ }
-+
-+ busy_mm = slot->mm;
-+
-+ if (err == -EBUSY) {
-+ /* skip other vmas on the same mm */
-+ do {
-+ reset = advance_current_scan(rung);
-+ iter = rung->current_scan;
-+ busy_retry--;
-+ if (iter->vma->vm_mm != busy_mm ||
-+ !busy_retry || reset)
-+ break;
-+ } while (1);
-+
-+ if (iter->vma->vm_mm != busy_mm) {
-+ continue;
-+ } else {
-+ /* scan round finsished */
-+ break;
-+ }
-+ }
-+
-+ BUG_ON(!vma_can_enter(slot->vma));
-+ if (uksm_test_exit(slot->vma->vm_mm)) {
-+ mmsem_batch = 0;
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ goto rm_slot;
-+ }
-+
-+ if (mmsem_batch)
-+ mmsem_batch--;
-+ else
-+ mmsem_batch = UKSM_MMSEM_BATCH;
-+
-+ /* Ok, we have take the mmap_sem, ready to scan */
-+ scan_vma_one_page(slot);
-+ rung->pages_to_scan--;
-+ vpages++;
-+
-+ if (rung->current_offset + rung->step > slot->pages - 1
-+ || vma_fully_scanned(slot)) {
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ judge_slot(slot);
-+ mmsem_batch = 0;
-+ } else {
-+ rung->current_offset += rung->step;
-+ if (!mmsem_batch)
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ }
-+
-+ busy_retry = BUSY_RETRY;
-+ cond_resched();
-+ }
-+
-+ if (mmsem_batch) {
-+ up_read(&slot->vma->vm_mm->mmap_sem);
-+ mmsem_batch = 0;
-+ }
-+
-+ if (freezing(current))
-+ break;
-+
-+ cond_resched();
-+ }
-+ end_time = task_sched_runtime(current);
-+ delta_exec = end_time - start_time;
-+
-+ if (freezing(current))
-+ return;
-+
-+ cleanup_vma_slots();
-+ uksm_enter_all_slots();
-+
-+ round_finished = 1;
-+ all_rungs_emtpy = 1;
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ struct scan_rung *rung = &uksm_scan_ladder[i];
-+
-+ if (rung->vma_root.num) {
-+ all_rungs_emtpy = 0;
-+ if (!rung_round_finished(rung))
-+ round_finished = 0;
-+ }
-+ }
-+
-+ if (all_rungs_emtpy)
-+ round_finished = 0;
-+
-+ if (round_finished) {
-+ round_update_ladder();
-+ uksm_eval_round++;
-+
-+ if (hash_round_finished() && rshash_adjust()) {
-+ /* Reset the unstable root iff hash strength changed */
-+ uksm_hash_round++;
-+ root_unstable_tree = RB_ROOT;
-+ free_all_tree_nodes(&unstable_tree_node_list);
-+ }
-+
-+ /*
-+ * A number of pages can hang around indefinitely on per-cpu
-+ * pagevecs, raised page count preventing write_protect_page
-+ * from merging them. Though it doesn't really matter much,
-+ * it is puzzling to see some stuck in pages_volatile until
-+ * other activity jostles them out, and they also prevented
-+ * LTP's KSM test from succeeding deterministically; so drain
-+ * them here (here rather than on entry to uksm_do_scan(),
-+ * so we don't IPI too often when pages_to_scan is set low).
-+ */
-+ lru_add_drain_all();
-+ }
-+
-+
-+ if (vpages && delta_exec > 0) {
-+ pcost = (unsigned long) delta_exec / vpages;
-+ if (likely(uksm_ema_page_time))
-+ uksm_ema_page_time = ema(pcost, uksm_ema_page_time);
-+ else
-+ uksm_ema_page_time = pcost;
-+ }
-+
-+ uksm_calc_scan_pages();
-+ uksm_sleep_real = uksm_sleep_jiffies;
-+ /* in case of radical cpu bursts, apply the upper bound */
-+ end_time = task_sched_runtime(current);
-+ if (max_cpu_ratio && end_time > start_time) {
-+ scan_time = end_time - start_time;
-+ expected_jiffies = msecs_to_jiffies(
-+ scan_time_to_sleep(scan_time, max_cpu_ratio));
-+
-+ if (expected_jiffies > uksm_sleep_real)
-+ uksm_sleep_real = expected_jiffies;
-+
-+ /* We have a 1 second up bound for responsiveness. */
-+ if (jiffies_to_msecs(uksm_sleep_real) > MSEC_PER_SEC)
-+ uksm_sleep_real = msecs_to_jiffies(1000);
-+ }
-+
-+ return;
-+}
-+
-+static int ksmd_should_run(void)
-+{
-+ return uksm_run & UKSM_RUN_MERGE;
-+}
-+
-+static int uksm_scan_thread(void *nothing)
-+{
-+ set_freezable();
-+ set_user_nice(current, 5);
-+
-+ while (!kthread_should_stop()) {
-+ mutex_lock(&uksm_thread_mutex);
-+ if (ksmd_should_run())
-+ uksm_do_scan();
-+ mutex_unlock(&uksm_thread_mutex);
-+
-+ try_to_freeze();
-+
-+ if (ksmd_should_run()) {
-+ schedule_timeout_interruptible(uksm_sleep_real);
-+ uksm_sleep_times++;
-+ } else {
-+ wait_event_freezable(uksm_thread_wait,
-+ ksmd_should_run() || kthread_should_stop());
-+ }
-+ }
-+ return 0;
-+}
-+
-+void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
-+{
-+ struct stable_node *stable_node;
-+ struct node_vma *node_vma;
-+ struct rmap_item *rmap_item;
-+ int search_new_forks = 0;
-+ unsigned long address;
-+
-+ VM_BUG_ON_PAGE(!PageKsm(page), page);
-+ VM_BUG_ON_PAGE(!PageLocked(page), page);
-+
-+ stable_node = page_stable_node(page);
-+ if (!stable_node)
-+ return;
-+again:
-+ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) {
-+ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) {
-+ struct anon_vma *anon_vma = rmap_item->anon_vma;
-+ struct anon_vma_chain *vmac;
-+ struct vm_area_struct *vma;
-+
-+ cond_resched();
-+ anon_vma_lock_read(anon_vma);
-+ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
-+ 0, ULONG_MAX) {
-+ cond_resched();
-+ vma = vmac->vma;
-+ address = get_rmap_addr(rmap_item);
-+
-+ if (address < vma->vm_start ||
-+ address >= vma->vm_end)
-+ continue;
-+
-+ if ((rmap_item->slot->vma == vma) ==
-+ search_new_forks)
-+ continue;
-+
-+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
-+ continue;
-+
-+ if (!rwc->rmap_one(page, vma, address, rwc->arg)) {
-+ anon_vma_unlock_read(anon_vma);
-+ return;
-+ }
-+
-+ if (rwc->done && rwc->done(page)) {
-+ anon_vma_unlock_read(anon_vma);
-+ return;
-+ }
-+ }
-+ anon_vma_unlock_read(anon_vma);
-+ }
-+ }
-+ if (!search_new_forks++)
-+ goto again;
-+}
-+
-+#ifdef CONFIG_MIGRATION
-+/* Common ksm interface but may be specific to uksm */
-+void ksm_migrate_page(struct page *newpage, struct page *oldpage)
-+{
-+ struct stable_node *stable_node;
-+
-+ VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
-+ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
-+ VM_BUG_ON(newpage->mapping != oldpage->mapping);
-+
-+ stable_node = page_stable_node(newpage);
-+ if (stable_node) {
-+ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
-+ stable_node->kpfn = page_to_pfn(newpage);
-+ /*
-+ * newpage->mapping was set in advance; now we need smp_wmb()
-+ * to make sure that the new stable_node->kpfn is visible
-+ * to get_ksm_page() before it can see that oldpage->mapping
-+ * has gone stale (or that PageSwapCache has been cleared).
-+ */
-+ smp_wmb();
-+ set_page_stable_node(oldpage, NULL);
-+ }
-+}
-+#endif /* CONFIG_MIGRATION */
-+
-+#ifdef CONFIG_MEMORY_HOTREMOVE
-+static struct stable_node *uksm_check_stable_tree(unsigned long start_pfn,
-+ unsigned long end_pfn)
-+{
-+ struct rb_node *node;
-+
-+ for (node = rb_first(root_stable_treep); node; node = rb_next(node)) {
-+ struct stable_node *stable_node;
-+
-+ stable_node = rb_entry(node, struct stable_node, node);
-+ if (stable_node->kpfn >= start_pfn &&
-+ stable_node->kpfn < end_pfn)
-+ return stable_node;
-+ }
-+ return NULL;
-+}
-+
-+static int uksm_memory_callback(struct notifier_block *self,
-+ unsigned long action, void *arg)
-+{
-+ struct memory_notify *mn = arg;
-+ struct stable_node *stable_node;
-+
-+ switch (action) {
-+ case MEM_GOING_OFFLINE:
-+ /*
-+ * Keep it very simple for now: just lock out ksmd and
-+ * MADV_UNMERGEABLE while any memory is going offline.
-+ * mutex_lock_nested() is necessary because lockdep was alarmed
-+ * that here we take uksm_thread_mutex inside notifier chain
-+ * mutex, and later take notifier chain mutex inside
-+ * uksm_thread_mutex to unlock it. But that's safe because both
-+ * are inside mem_hotplug_mutex.
-+ */
-+ mutex_lock_nested(&uksm_thread_mutex, SINGLE_DEPTH_NESTING);
-+ break;
-+
-+ case MEM_OFFLINE:
-+ /*
-+ * Most of the work is done by page migration; but there might
-+ * be a few stable_nodes left over, still pointing to struct
-+ * pages which have been offlined: prune those from the tree.
-+ */
-+ while ((stable_node = uksm_check_stable_tree(mn->start_pfn,
-+ mn->start_pfn + mn->nr_pages)) != NULL)
-+ remove_node_from_stable_tree(stable_node, 1, 1);
-+ /* fallthrough */
-+
-+ case MEM_CANCEL_OFFLINE:
-+ mutex_unlock(&uksm_thread_mutex);
-+ break;
-+ }
-+ return NOTIFY_OK;
-+}
-+#endif /* CONFIG_MEMORY_HOTREMOVE */
-+
-+#ifdef CONFIG_SYSFS
-+/*
-+ * This all compiles without CONFIG_SYSFS, but is a waste of space.
-+ */
-+
-+#define UKSM_ATTR_RO(_name) \
-+ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
-+#define UKSM_ATTR(_name) \
-+ static struct kobj_attribute _name##_attr = \
-+ __ATTR(_name, 0644, _name##_show, _name##_store)
-+
-+static ssize_t max_cpu_percentage_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_max_cpu_percentage);
-+}
-+
-+static ssize_t max_cpu_percentage_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ unsigned long max_cpu_percentage;
-+ int err;
-+
-+ err = kstrtoul(buf, 10, &max_cpu_percentage);
-+ if (err || max_cpu_percentage > 100)
-+ return -EINVAL;
-+
-+ if (max_cpu_percentage == 100)
-+ max_cpu_percentage = 99;
-+ else if (max_cpu_percentage < 10)
-+ max_cpu_percentage = 10;
-+
-+ uksm_max_cpu_percentage = max_cpu_percentage;
-+
-+ return count;
-+}
-+UKSM_ATTR(max_cpu_percentage);
-+
-+static ssize_t sleep_millisecs_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", jiffies_to_msecs(uksm_sleep_jiffies));
-+}
-+
-+static ssize_t sleep_millisecs_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ unsigned long msecs;
-+ int err;
-+
-+ err = kstrtoul(buf, 10, &msecs);
-+ if (err || msecs > MSEC_PER_SEC)
-+ return -EINVAL;
-+
-+ uksm_sleep_jiffies = msecs_to_jiffies(msecs);
-+ uksm_sleep_saved = uksm_sleep_jiffies;
-+
-+ return count;
-+}
-+UKSM_ATTR(sleep_millisecs);
-+
-+
-+static ssize_t cpu_governor_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
-+ int i;
-+
-+ buf[0] = '\0';
-+ for (i = 0; i < n ; i++) {
-+ if (uksm_cpu_governor == i)
-+ strcat(buf, "[");
-+
-+ strcat(buf, uksm_cpu_governor_str[i]);
-+
-+ if (uksm_cpu_governor == i)
-+ strcat(buf, "]");
-+
-+ strcat(buf, " ");
-+ }
-+ strcat(buf, "\n");
-+
-+ return strlen(buf);
-+}
-+
-+static inline void init_performance_values(void)
-+{
-+ int i;
-+ struct scan_rung *rung;
-+ struct uksm_cpu_preset_s *preset = uksm_cpu_preset + uksm_cpu_governor;
-+
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = uksm_scan_ladder + i;
-+ rung->cpu_ratio = preset->cpu_ratio[i];
-+ rung->cover_msecs = preset->cover_msecs[i];
-+ }
-+
-+ uksm_max_cpu_percentage = preset->max_cpu;
-+}
-+
-+static ssize_t cpu_governor_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *);
-+
-+ for (n--; n >= 0 ; n--) {
-+ if (!strncmp(buf, uksm_cpu_governor_str[n],
-+ strlen(uksm_cpu_governor_str[n])))
-+ break;
-+ }
-+
-+ if (n < 0)
-+ return -EINVAL;
-+ else
-+ uksm_cpu_governor = n;
-+
-+ init_performance_values();
-+
-+ return count;
-+}
-+UKSM_ATTR(cpu_governor);
-+
-+static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
-+ char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_run);
-+}
-+
-+static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int err;
-+ unsigned long flags;
-+
-+ err = kstrtoul(buf, 10, &flags);
-+ if (err || flags > UINT_MAX)
-+ return -EINVAL;
-+ if (flags > UKSM_RUN_MERGE)
-+ return -EINVAL;
-+
-+ mutex_lock(&uksm_thread_mutex);
-+ if (uksm_run != flags)
-+ uksm_run = flags;
-+ mutex_unlock(&uksm_thread_mutex);
-+
-+ if (flags & UKSM_RUN_MERGE)
-+ wake_up_interruptible(&uksm_thread_wait);
-+
-+ return count;
-+}
-+UKSM_ATTR(run);
-+
-+static ssize_t abundant_threshold_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_abundant_threshold);
-+}
-+
-+static ssize_t abundant_threshold_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int err;
-+ unsigned long flags;
-+
-+ err = kstrtoul(buf, 10, &flags);
-+ if (err || flags > 99)
-+ return -EINVAL;
-+
-+ uksm_abundant_threshold = flags;
-+
-+ return count;
-+}
-+UKSM_ATTR(abundant_threshold);
-+
-+static ssize_t thrash_threshold_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%u\n", uksm_thrash_threshold);
-+}
-+
-+static ssize_t thrash_threshold_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int err;
-+ unsigned long flags;
-+
-+ err = kstrtoul(buf, 10, &flags);
-+ if (err || flags > 99)
-+ return -EINVAL;
-+
-+ uksm_thrash_threshold = flags;
-+
-+ return count;
-+}
-+UKSM_ATTR(thrash_threshold);
-+
-+static ssize_t cpu_ratios_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int i, size;
-+ struct scan_rung *rung;
-+ char *p = buf;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+
-+ if (rung->cpu_ratio > 0)
-+ size = sprintf(p, "%d ", rung->cpu_ratio);
-+ else
-+ size = sprintf(p, "MAX/%d ",
-+ TIME_RATIO_SCALE / -rung->cpu_ratio);
-+
-+ p += size;
-+ }
-+
-+ *p++ = '\n';
-+ *p = '\0';
-+
-+ return p - buf;
-+}
-+
-+static ssize_t cpu_ratios_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int i, cpuratios[SCAN_LADDER_SIZE], err;
-+ unsigned long value;
-+ struct scan_rung *rung;
-+ char *p, *end = NULL;
-+
-+ p = kzalloc(count, GFP_KERNEL);
-+ if (!p)
-+ return -ENOMEM;
-+
-+ memcpy(p, buf, count);
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ if (i != SCAN_LADDER_SIZE - 1) {
-+ end = strchr(p, ' ');
-+ if (!end)
-+ return -EINVAL;
-+
-+ *end = '\0';
-+ }
-+
-+ if (strstr(p, "MAX/")) {
-+ p = strchr(p, '/') + 1;
-+ err = kstrtoul(p, 10, &value);
-+ if (err || value > TIME_RATIO_SCALE || !value)
-+ return -EINVAL;
-+
-+ cpuratios[i] = -(int) (TIME_RATIO_SCALE / value);
-+ } else {
-+ err = kstrtoul(p, 10, &value);
-+ if (err || value > TIME_RATIO_SCALE || !value)
-+ return -EINVAL;
-+
-+ cpuratios[i] = value;
-+ }
-+
-+ p = end + 1;
-+ }
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+
-+ rung->cpu_ratio = cpuratios[i];
-+ }
-+
-+ return count;
-+}
-+UKSM_ATTR(cpu_ratios);
-+
-+static ssize_t eval_intervals_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ int i, size;
-+ struct scan_rung *rung;
-+ char *p = buf;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+ size = sprintf(p, "%u ", rung->cover_msecs);
-+ p += size;
-+ }
-+
-+ *p++ = '\n';
-+ *p = '\0';
-+
-+ return p - buf;
-+}
-+
-+static ssize_t eval_intervals_store(struct kobject *kobj,
-+ struct kobj_attribute *attr,
-+ const char *buf, size_t count)
-+{
-+ int i, err;
-+ unsigned long values[SCAN_LADDER_SIZE];
-+ struct scan_rung *rung;
-+ char *p, *end = NULL;
-+ ssize_t ret = count;
-+
-+ p = kzalloc(count + 2, GFP_KERNEL);
-+ if (!p)
-+ return -ENOMEM;
-+
-+ memcpy(p, buf, count);
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ if (i != SCAN_LADDER_SIZE - 1) {
-+ end = strchr(p, ' ');
-+ if (!end) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ *end = '\0';
-+ }
-+
-+ err = kstrtoul(p, 10, &values[i]);
-+ if (err) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ p = end + 1;
-+ }
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = &uksm_scan_ladder[i];
-+
-+ rung->cover_msecs = values[i];
-+ }
-+
-+out:
-+ kfree(p);
-+ return ret;
-+}
-+UKSM_ATTR(eval_intervals);
-+
-+static ssize_t ema_per_page_time_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_ema_page_time);
-+}
-+UKSM_ATTR_RO(ema_per_page_time);
-+
-+static ssize_t pages_shared_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_pages_shared);
-+}
-+UKSM_ATTR_RO(pages_shared);
-+
-+static ssize_t pages_sharing_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_pages_sharing);
-+}
-+UKSM_ATTR_RO(pages_sharing);
-+
-+static ssize_t pages_unshared_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", uksm_pages_unshared);
-+}
-+UKSM_ATTR_RO(pages_unshared);
-+
-+static ssize_t full_scans_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%llu\n", fully_scanned_round);
-+}
-+UKSM_ATTR_RO(full_scans);
-+
-+static ssize_t pages_scanned_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ unsigned long base = 0;
-+ u64 delta, ret;
-+
-+ if (pages_scanned_stored) {
-+ base = pages_scanned_base;
-+ ret = pages_scanned_stored;
-+ delta = uksm_pages_scanned >> base;
-+ if (CAN_OVERFLOW_U64(ret, delta)) {
-+ ret >>= 1;
-+ delta >>= 1;
-+ base++;
-+ ret += delta;
-+ }
-+ } else {
-+ ret = uksm_pages_scanned;
-+ }
-+
-+ while (ret > ULONG_MAX) {
-+ ret >>= 1;
-+ base++;
-+ }
-+
-+ if (base)
-+ return sprintf(buf, "%lu * 2^%lu\n", (unsigned long)ret, base);
-+ else
-+ return sprintf(buf, "%lu\n", (unsigned long)ret);
-+}
-+UKSM_ATTR_RO(pages_scanned);
-+
-+static ssize_t hash_strength_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%lu\n", hash_strength);
-+}
-+UKSM_ATTR_RO(hash_strength);
-+
-+static ssize_t sleep_times_show(struct kobject *kobj,
-+ struct kobj_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "%llu\n", uksm_sleep_times);
-+}
-+UKSM_ATTR_RO(sleep_times);
-+
-+
-+static struct attribute *uksm_attrs[] = {
-+ &max_cpu_percentage_attr.attr,
-+ &sleep_millisecs_attr.attr,
-+ &cpu_governor_attr.attr,
-+ &run_attr.attr,
-+ &ema_per_page_time_attr.attr,
-+ &pages_shared_attr.attr,
-+ &pages_sharing_attr.attr,
-+ &pages_unshared_attr.attr,
-+ &full_scans_attr.attr,
-+ &pages_scanned_attr.attr,
-+ &hash_strength_attr.attr,
-+ &sleep_times_attr.attr,
-+ &thrash_threshold_attr.attr,
-+ &abundant_threshold_attr.attr,
-+ &cpu_ratios_attr.attr,
-+ &eval_intervals_attr.attr,
-+ NULL,
-+};
-+
-+static struct attribute_group uksm_attr_group = {
-+ .attrs = uksm_attrs,
-+ .name = "uksm",
-+};
-+#endif /* CONFIG_SYSFS */
-+
-+static inline void init_scan_ladder(void)
-+{
-+ int i;
-+ struct scan_rung *rung;
-+
-+ for (i = 0; i < SCAN_LADDER_SIZE; i++) {
-+ rung = uksm_scan_ladder + i;
-+ slot_tree_init_root(&rung->vma_root);
-+ }
-+
-+ init_performance_values();
-+ uksm_calc_scan_pages();
-+}
-+
-+static inline int cal_positive_negative_costs(void)
-+{
-+ struct page *p1, *p2;
-+ unsigned char *addr1, *addr2;
-+ unsigned long i, time_start, hash_cost;
-+ unsigned long loopnum = 0;
-+
-+ /*IMPORTANT: volatile is needed to prevent over-optimization by gcc. */
-+ volatile u32 hash;
-+ volatile int ret;
-+
-+ p1 = alloc_page(GFP_KERNEL);
-+ if (!p1)
-+ return -ENOMEM;
-+
-+ p2 = alloc_page(GFP_KERNEL);
-+ if (!p2)
-+ return -ENOMEM;
-+
-+ addr1 = kmap_atomic(p1);
-+ addr2 = kmap_atomic(p2);
-+ memset(addr1, prandom_u32(), PAGE_SIZE);
-+ memcpy(addr2, addr1, PAGE_SIZE);
-+
-+ /* make sure that the two pages differ in last byte */
-+ addr2[PAGE_SIZE-1] = ~addr2[PAGE_SIZE-1];
-+ kunmap_atomic(addr2);
-+ kunmap_atomic(addr1);
-+
-+ time_start = jiffies;
-+ while (jiffies - time_start < 100) {
-+ for (i = 0; i < 100; i++)
-+ hash = page_hash(p1, HASH_STRENGTH_FULL, 0);
-+ loopnum += 100;
-+ }
-+ hash_cost = (jiffies - time_start);
-+
-+ time_start = jiffies;
-+ for (i = 0; i < loopnum; i++)
-+ ret = pages_identical(p1, p2);
-+ memcmp_cost = HASH_STRENGTH_FULL * (jiffies - time_start);
-+ memcmp_cost /= hash_cost;
-+ pr_info("UKSM: relative memcmp_cost = %lu "
-+ "hash=%u cmp_ret=%d.\n",
-+ memcmp_cost, hash, ret);
-+
-+ __free_page(p1);
-+ __free_page(p2);
-+ return 0;
-+}
-+
-+static int init_zeropage_hash_table(void)
-+{
-+ struct page *page;
-+ char *addr;
-+ int i;
-+
-+ page = alloc_page(GFP_KERNEL);
-+ if (!page)
-+ return -ENOMEM;
-+
-+ addr = kmap_atomic(page);
-+ memset(addr, 0, PAGE_SIZE);
-+ kunmap_atomic(addr);
-+
-+ zero_hash_table = kmalloc_array(HASH_STRENGTH_MAX, sizeof(u32),
-+ GFP_KERNEL);
-+ if (!zero_hash_table)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < HASH_STRENGTH_MAX; i++)
-+ zero_hash_table[i] = page_hash(page, i, 0);
-+
-+ __free_page(page);
-+
-+ return 0;
-+}
-+
-+static inline int init_random_sampling(void)
-+{
-+ unsigned long i;
-+
-+ random_nums = kmalloc(PAGE_SIZE, GFP_KERNEL);
-+ if (!random_nums)
-+ return -ENOMEM;
-+
-+ for (i = 0; i < HASH_STRENGTH_FULL; i++)
-+ random_nums[i] = i;
-+
-+ for (i = 0; i < HASH_STRENGTH_FULL; i++) {
-+ unsigned long rand_range, swap_index, tmp;
-+
-+ rand_range = HASH_STRENGTH_FULL - i;
-+ swap_index = i + prandom_u32() % rand_range;
-+ tmp = random_nums[i];
-+ random_nums[i] = random_nums[swap_index];
-+ random_nums[swap_index] = tmp;
-+ }
-+
-+ rshash_state.state = RSHASH_NEW;
-+ rshash_state.below_count = 0;
-+ rshash_state.lookup_window_index = 0;
-+
-+ return cal_positive_negative_costs();
-+}
-+
-+static int __init uksm_slab_init(void)
-+{
-+ rmap_item_cache = UKSM_KMEM_CACHE(rmap_item, 0);
-+ if (!rmap_item_cache)
-+ goto out;
-+
-+ stable_node_cache = UKSM_KMEM_CACHE(stable_node, 0);
-+ if (!stable_node_cache)
-+ goto out_free1;
-+
-+ node_vma_cache = UKSM_KMEM_CACHE(node_vma, 0);
-+ if (!node_vma_cache)
-+ goto out_free2;
-+
-+ vma_slot_cache = UKSM_KMEM_CACHE(vma_slot, 0);
-+ if (!vma_slot_cache)
-+ goto out_free3;
-+
-+ tree_node_cache = UKSM_KMEM_CACHE(tree_node, 0);
-+ if (!tree_node_cache)
-+ goto out_free4;
-+
-+ return 0;
-+
-+out_free4:
-+ kmem_cache_destroy(vma_slot_cache);
-+out_free3:
-+ kmem_cache_destroy(node_vma_cache);
-+out_free2:
-+ kmem_cache_destroy(stable_node_cache);
-+out_free1:
-+ kmem_cache_destroy(rmap_item_cache);
-+out:
-+ return -ENOMEM;
-+}
-+
-+static void __init uksm_slab_free(void)
-+{
-+ kmem_cache_destroy(stable_node_cache);
-+ kmem_cache_destroy(rmap_item_cache);
-+ kmem_cache_destroy(node_vma_cache);
-+ kmem_cache_destroy(vma_slot_cache);
-+ kmem_cache_destroy(tree_node_cache);
-+}
-+
-+/* Common interface to ksm, different to it. */
-+int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
-+ unsigned long end, int advice, unsigned long *vm_flags)
-+{
-+ int err;
-+
-+ switch (advice) {
-+ case MADV_MERGEABLE:
-+ return 0; /* just ignore the advice */
-+
-+ case MADV_UNMERGEABLE:
-+ if (!(*vm_flags & VM_MERGEABLE) || !uksm_flags_can_scan(*vm_flags))
-+ return 0; /* just ignore the advice */
-+
-+ if (vma->anon_vma) {
-+ err = unmerge_uksm_pages(vma, start, end);
-+ if (err)
-+ return err;
-+ }
-+
-+ uksm_remove_vma(vma);
-+ *vm_flags &= ~VM_MERGEABLE;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+/* Common interface to ksm, actually the same. */
-+struct page *ksm_might_need_to_copy(struct page *page,
-+ struct vm_area_struct *vma, unsigned long address)
-+{
-+ struct anon_vma *anon_vma = page_anon_vma(page);
-+ struct page *new_page;
-+
-+ if (PageKsm(page)) {
-+ if (page_stable_node(page))
-+ return page; /* no need to copy it */
-+ } else if (!anon_vma) {
-+ return page; /* no need to copy it */
-+ } else if (anon_vma->root == vma->anon_vma->root &&
-+ page->index == linear_page_index(vma, address)) {
-+ return page; /* still no need to copy it */
-+ }
-+ if (!PageUptodate(page))
-+ return page; /* let do_swap_page report the error */
-+
-+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-+ if (new_page) {
-+ copy_user_highpage(new_page, page, address, vma);
-+
-+ SetPageDirty(new_page);
-+ __SetPageUptodate(new_page);
-+ __SetPageLocked(new_page);
-+ }
-+
-+ return new_page;
-+}
-+
-+static int __init uksm_init(void)
-+{
-+ struct task_struct *uksm_thread;
-+ int err;
-+
-+ uksm_sleep_jiffies = msecs_to_jiffies(100);
-+ uksm_sleep_saved = uksm_sleep_jiffies;
-+
-+ slot_tree_init();
-+ init_scan_ladder();
-+
-+
-+ err = init_random_sampling();
-+ if (err)
-+ goto out_free2;
-+
-+ err = uksm_slab_init();
-+ if (err)
-+ goto out_free1;
-+
-+ err = init_zeropage_hash_table();
-+ if (err)
-+ goto out_free0;
-+
-+ uksm_thread = kthread_run(uksm_scan_thread, NULL, "uksmd");
-+ if (IS_ERR(uksm_thread)) {
-+ pr_err("uksm: creating kthread failed\n");
-+ err = PTR_ERR(uksm_thread);
-+ goto out_free;
-+ }
-+
-+#ifdef CONFIG_SYSFS
-+ err = sysfs_create_group(mm_kobj, &uksm_attr_group);
-+ if (err) {
-+ pr_err("uksm: register sysfs failed\n");
-+ kthread_stop(uksm_thread);
-+ goto out_free;
-+ }
-+#else
-+ uksm_run = UKSM_RUN_MERGE; /* no way for user to start it */
-+
-+#endif /* CONFIG_SYSFS */
-+
-+#ifdef CONFIG_MEMORY_HOTREMOVE
-+ /*
-+ * Choose a high priority since the callback takes uksm_thread_mutex:
-+ * later callbacks could only be taking locks which nest within that.
-+ */
-+ hotplug_memory_notifier(uksm_memory_callback, 100);
-+#endif
-+ return 0;
-+
-+out_free:
-+ kfree(zero_hash_table);
-+out_free0:
-+ uksm_slab_free();
-+out_free1:
-+ kfree(random_nums);
-+out_free2:
-+ kfree(uksm_scan_ladder);
-+ return err;
-+}
-+
-+#ifdef MODULE
-+subsys_initcall(ksm_init);
-+#else
-+late_initcall(uksm_init);
-+#endif
-+
-diff -Nur a/mm/vmstat.c b/mm/vmstat.c
---- a/mm/vmstat.c 2018-05-25 15:18:02.000000000 +0100
-+++ b/mm/vmstat.c 2018-05-26 19:30:55.791140570 +0100
-@@ -1091,6 +1091,9 @@
- "nr_dirtied",
- "nr_written",
-
-+#ifdef CONFIG_UKSM
-+ "nr_uksm_zero_pages",
-+#endif
- /* enum writeback_stat_item counters */
- "nr_dirty_threshold",
- "nr_dirty_background_threshold",
diff --git a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.95-r1.ebuild b/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.95-r1.ebuild
deleted file mode 100644
index 27a6fcaa..00000000
--- a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.14.95-r1.ebuild
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright 1999-2017 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=6
-
-inherit eutils
-
-EXTRAVERSION="redcore-lts-r1"
-KV_FULL="${PV}-${EXTRAVERSION}"
-KV_MAJOR="4.14"
-
-DESCRIPTION="Official Redcore Linux Kernel Sources"
-HOMEPAGE="https://redcorelinux.org"
-SRC_URI="https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-${PV}.tar.xz"
-
-KEYWORDS="amd64"
-LICENSE="GPL-2"
-SLOT="${PVR}"
-IUSE=""
-
-RESTRICT="strip mirror"
-DEPEND="
- app-arch/lz4
- app-arch/xz-utils
- sys-devel/autoconf
- sys-devel/bc
- sys-devel/make"
-RDEPEND="${DEPEND}"
-
-PATCHES=(
- "${FILESDIR}"/"${KV_MAJOR}"-introduce-NUMA-identity-node-sched-domain.patch
- "${FILESDIR}"/"${KV_MAJOR}"-k10temp-add-ZEN-support.patch
- "${FILESDIR}"/"${KV_MAJOR}"-mute-pps_state_mismatch.patch
- "${FILESDIR}"/"${KV_MAJOR}"-restore-SD_PREFER_SIBLING-on-MC-domains.patch
- "${FILESDIR}"/"${KV_MAJOR}"-Revert-ath10k-activate-user-space-firmware-loading.patch
- "${FILESDIR}"/"${KV_MAJOR}"-linux-hardened.patch
- "${FILESDIR}"/"${KV_MAJOR}"-uksm-linux-hardened.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0001-MuQSS-version-0.162-CPU-scheduler-linux-hardened.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0002-Make-preemptible-kernel-default.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0003-Expose-vmsplit-for-our-poor-32-bit-users.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0004-Create-highres-timeout-variants-of-schedule_timeout-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0005-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0006-Convert-msleep-to-use-hrtimers-when-active.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0007-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0008-Replace-all-calls-to-schedule_timeout_interruptible-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0009-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0010-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0011-Make-hrtimer-granularity-and-minimum-hrtimeout-confi.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0012-Reinstate-default-Hz-of-100-in-combination-with-MuQS.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0013-Make-threaded-IRQs-optionally-the-default-which-can-.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0014-Swap-sucks.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0015-MuQSS.c-needs-irq_regs.h-to-use-get_irq_regs.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0016-unfuck-MuQSS-on-linux-4_14_15+.patch
- "${FILESDIR}"/"${KV_MAJOR}"-0017-unfuck-MuQSS-on-linux-4_14_75+.patch
-)
-
-S="${WORKDIR}"/linux-"${PV}"
-
-pkg_setup() {
- export KBUILD_BUILD_USER="nexus"
- export KBUILD_BUILD_HOST="nexus.redcorelinux.org"
-
- export REAL_ARCH="$ARCH"
- unset ARCH ; unset LDFLAGS #will interfere with Makefile if set
-}
-
-src_prepare() {
- default
- emake mrproper
- sed -ri "s|^(EXTRAVERSION =).*|\1 -${EXTRAVERSION}|" Makefile
- cp "${FILESDIR}"/"${KV_MAJOR}"-amd64.config .config
- rm -rf $(find . -type f|grep -F \.orig)
-}
-
-src_compile() {
- emake prepare modules_prepare
-}
-
-src_install() {
- dodir usr/src/linux-"${KV_FULL}"
- cp -ax "${S}"/* "${D}"usr/src/linux-"${KV_FULL}"
-}
-
-_kernel_sources_delete() {
- rm -rf "${ROOT}"usr/src/linux-"${KV_FULL}"
-}
-
-pkg_postrm() {
- _kernel_sources_delete
-}
diff --git a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.19.20-r1.ebuild b/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.19.20-r2.ebuild
index d2326b96..bbab8f6d 100644
--- a/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.19.20-r1.ebuild
+++ b/sys-kernel/linux-sources-redcore-lts/linux-sources-redcore-lts-4.19.20-r2.ebuild
@@ -5,7 +5,7 @@ EAPI=6
inherit eutils
-EXTRAVERSION="redcore-lts-r1"
+EXTRAVERSION="redcore-lts-r2"
KV_FULL="${PV}-${EXTRAVERSION}"
KV_MAJOR="4.19"