summaryrefslogtreecommitdiff
path: root/sys-kernel
diff options
context:
space:
mode:
Diffstat (limited to 'sys-kernel')
-rw-r--r--sys-kernel/linux-image-redcore-lts-legacy/Manifest2
-rw-r--r--sys-kernel/linux-image-redcore-lts-legacy/files/4.19-amd64.config13
-rw-r--r--sys-kernel/linux-image-redcore-lts-legacy/files/4.19-linux-hardened.patch1312
-rw-r--r--sys-kernel/linux-image-redcore-lts-legacy/linux-image-redcore-lts-legacy-4.19.108.ebuild (renamed from sys-kernel/linux-image-redcore-lts-legacy/linux-image-redcore-lts-legacy-4.19.104.ebuild)0
-rw-r--r--sys-kernel/linux-sources-redcore-lts-legacy/Manifest2
-rw-r--r--sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-amd64.config13
-rw-r--r--sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-linux-hardened.patch1312
-rw-r--r--sys-kernel/linux-sources-redcore-lts-legacy/linux-sources-redcore-lts-legacy-4.19.108.ebuild (renamed from sys-kernel/linux-sources-redcore-lts-legacy/linux-sources-redcore-lts-legacy-4.19.104.ebuild)0
8 files changed, 2444 insertions, 210 deletions
diff --git a/sys-kernel/linux-image-redcore-lts-legacy/Manifest b/sys-kernel/linux-image-redcore-lts-legacy/Manifest
index e9c22b9a..2847081b 100644
--- a/sys-kernel/linux-image-redcore-lts-legacy/Manifest
+++ b/sys-kernel/linux-image-redcore-lts-legacy/Manifest
@@ -1 +1 @@
-DIST linux-4.19.104.tar.xz 103438092 BLAKE2B 10324dae2d3e29048f4ae9474eb3b9dc6c210e7d0fad017b07620ca600cf76bf144b026a47ef7ff11e1dbb9ed7e60bba7d51e8ef6b278b32cae2205ac478a48c SHA512 08bd31c37e413a5f1f785a0fd53d7f044255fc30bf6af15876c15a88ad533c1895d905f8708d93c7fa769b4f6b13af46b442ff213b8ac8a936fee433c8fcd012
+DIST linux-4.19.108.tar.xz 103408396 BLAKE2B d8130982ef7b1bbcdc6b1e4099b6bf5bd707f722b74b2e8cf8bcd45e510f3e39c8418851c923013a6919c4c45c43c65fae8f39ba92563f4645c1c3a3fcfd2937 SHA512 c39099f314fe22e902f561f0dd17fad021a9b8d4ad3c154709a7680e710980a44bcadf46092da7f1a0a9c6910a9b2a8116cc874f8991e197ff05fafe4312da2d
diff --git a/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-amd64.config b/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-amd64.config
index 274f6f27..7dbc8f7a 100644
--- a/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-amd64.config
+++ b/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-amd64.config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.19.95-redcore-lts Kernel Configuration
+# Linux/x86 4.19.108-redcore-lts-legacy Kernel Configuration
#
#
@@ -159,6 +159,7 @@ CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_USER_NS=y
+# CONFIG_USER_NS_UNPRIVILEGED is not set
CONFIG_PID_NS=y
CONFIG_NET_NS=y
# CONFIG_CHECKPOINT_RESTORE is not set
@@ -8743,6 +8744,16 @@ CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_APPARMOR=y
# CONFIG_DEFAULT_SECURITY_DAC is not set
CONFIG_DEFAULT_SECURITY="apparmor"
+
+#
+# Hardened Enhancements
+#
+CONFIG_HARDENED_RANDOM=y
+CONFIG_HARDENED_STEALTH_NETWORKING=y
+CONFIG_HARDENED_NO_SIMULT_CONNECT=y
+CONFIG_HARDENED_SYSFS_RESTRICT=y
+CONFIG_HARDENED_FIFO=y
+# CONFIG_HARDENED_MODULE_LOAD is not set
CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
diff --git a/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-linux-hardened.patch b/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-linux-hardened.patch
index 3cba43ca..2f477148 100644
--- a/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-linux-hardened.patch
+++ b/sys-kernel/linux-image-redcore-lts-legacy/files/4.19-linux-hardened.patch
@@ -1,5 +1,5 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index a29301d6e6c6..d3a259e762fa 100644
+index 8bf0c0532046..c81c652ecf44 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -496,16 +496,6 @@
@@ -19,7 +19,7 @@ index a29301d6e6c6..d3a259e762fa 100644
cio_ignore= [S390]
See Documentation/s390/CommonIO for details.
clk_ignore_unused
-@@ -3165,6 +3155,11 @@
+@@ -3207,6 +3197,11 @@
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -71,10 +71,10 @@ index 37a679501ddc..59b747920f4d 100644
The value in this file affects behavior of handling NMI. When the
diff --git a/Makefile b/Makefile
-index f7e7e365e2ff..7c69091b65a0 100644
+index 313f0c8dd66f..8e66fca67fd0 100644
--- a/Makefile
+++ b/Makefile
-@@ -693,6 +693,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
+@@ -696,6 +696,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
ifeq ($(cc-name),clang)
@@ -115,10 +115,10 @@ index a336548487e6..bbe821420e7a 100644
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 8790a29d0af4..265c6edd859f 100644
+index 51fe21f5d078..f345755446d9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -1031,6 +1031,7 @@ endif
+@@ -1033,6 +1033,7 @@ endif
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
@@ -126,7 +126,7 @@ index 8790a29d0af4..265c6edd859f 100644
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
-@@ -1206,6 +1207,7 @@ config RANDOMIZE_BASE
+@@ -1208,6 +1209,7 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
@@ -147,7 +147,7 @@ index 69c9170bdd24..a786227db0e3 100644
Generate a warning if any W+X mappings are found at boot.
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
-index db8d364f8476..67441db36c07 100644
+index 1a4f8b67bbe8..85273063eb56 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
@@ -187,10 +187,10 @@ index 433b9554c6a1..1f4b06317c9f 100644
#ifdef __AARCH64EB__
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
-index 7f1628effe6d..38bd2f95a961 100644
+index d6a49bb07a5f..16e4214c2305 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
-@@ -481,9 +481,9 @@ unsigned long arch_align_stack(unsigned long sp)
+@@ -517,9 +517,9 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (is_compat_task())
@@ -203,7 +203,7 @@ index 7f1628effe6d..38bd2f95a961 100644
/*
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index e76d16ac2776..07dcedbb271e 100644
+index af35f5caadbe..34e88af114bd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1189,8 +1189,7 @@ config VM86
@@ -216,7 +216,7 @@ index e76d16ac2776..07dcedbb271e 100644
depends on MODIFY_LDT_SYSCALL
---help---
This option is required by programs like Wine to run 16-bit
-@@ -2274,7 +2273,7 @@ config COMPAT_VDSO
+@@ -2319,7 +2318,7 @@ config COMPAT_VDSO
choice
prompt "vsyscall table for legacy applications"
depends on X86_64
@@ -225,7 +225,7 @@ index e76d16ac2776..07dcedbb271e 100644
help
Legacy user code that does not know how to find the vDSO expects
to be able to issue three syscalls by calling fixed addresses in
-@@ -2355,8 +2354,7 @@ config CMDLINE_OVERRIDE
+@@ -2400,8 +2399,7 @@ config CMDLINE_OVERRIDE
be set to 'N' under normal conditions.
config MODIFY_LDT_SYSCALL
@@ -236,7 +236,7 @@ index e76d16ac2776..07dcedbb271e 100644
Linux can allow user programs to install a per-process x86
Local Descriptor Table (LDT) using the modify_ldt(2) system
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 7d68f0c7cfb1..85f04bbeadd8 100644
+index 687cd1a213d5..29075c2bc51f 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -101,6 +101,7 @@ config EFI_PGT_DUMP
@@ -403,10 +403,10 @@ index 79ec7add5f98..2950448e00ac 100644
native_write_cr4(cr4 ^ X86_CR4_PGE);
/* write old PGE again and flush TLBs */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 1073118b9bf0..2e34aede5c36 100644
+index a6458ab499c2..0be5291ec42e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
-@@ -1748,7 +1748,6 @@ void cpu_init(void)
+@@ -1790,7 +1790,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -479,7 +479,7 @@ index 6a78d4b36a79..715009f7a96c 100644
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 979e0a02cbe1..d6ab882a0091 100644
+index 79b95910fd9f..fcda13aa03d0 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -560,9 +560,9 @@ static void __init pagetable_init(void)
@@ -494,8 +494,8 @@ index 979e0a02cbe1..d6ab882a0091 100644
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
-@@ -873,7 +873,7 @@ int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
- #endif
+@@ -870,7 +870,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
+ }
#endif
-int kernel_set_to_readonly __read_mostly;
@@ -503,7 +503,7 @@ index 979e0a02cbe1..d6ab882a0091 100644
void set_kernel_text_rw(void)
{
-@@ -925,12 +925,11 @@ void mark_rodata_ro(void)
+@@ -922,12 +922,11 @@ void mark_rodata_ro(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
@@ -518,7 +518,7 @@ index 979e0a02cbe1..d6ab882a0091 100644
printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
start, start+size);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index a3e9c6ee3cf2..40bbcd978b0a 100644
+index 81e85a8dd300..f0403d1ba1b0 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -66,9 +66,9 @@
@@ -533,7 +533,7 @@ index a3e9c6ee3cf2..40bbcd978b0a 100644
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
-@@ -1201,7 +1201,7 @@ void __init mem_init(void)
+@@ -1190,7 +1190,7 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
@@ -542,7 +542,7 @@ index a3e9c6ee3cf2..40bbcd978b0a 100644
void set_kernel_text_rw(void)
{
-@@ -1250,9 +1250,8 @@ void mark_rodata_ro(void)
+@@ -1239,9 +1239,8 @@ void mark_rodata_ro(void)
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
@@ -567,10 +567,10 @@ index 15c1f5e12eb8..ff72cccec5b8 100644
struct list_head *cpu_list, local_list;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index adf28788cab5..cd4b3501eda9 100644
+index 75d582ca917f..38ba030b8e27 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
-@@ -5158,7 +5158,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+@@ -5161,7 +5161,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
@@ -579,7 +579,7 @@ index adf28788cab5..cd4b3501eda9 100644
ap = qc->ap;
qc->flags = 0;
-@@ -5175,7 +5175,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+@@ -5178,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
@@ -608,6 +608,306 @@ index 1df9cb8e659e..eb71148a4a69 100644
help
Say Y here if you want to support the /dev/port device. The /dev/port
device is similar to /dev/mem, but for I/O ports.
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 53e822793d46..c97b295338ce 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -281,11 +281,20 @@
+ /*
+ * Configuration information
+ */
++#ifdef CONFIG_HARDENED_RANDOM
++#define INPUT_POOL_SHIFT 18
++#define OUTPUT_POOL_SHIFT 16
++#else
+ #define INPUT_POOL_SHIFT 12
+-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
+ #define OUTPUT_POOL_SHIFT 10
++#endif
++#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
+ #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
++#ifdef CONFIG_HARDENED_RANDOM
++#define SEC_XFER_SIZE 32768
++#else
+ #define SEC_XFER_SIZE 512
++#endif
+ #define EXTRACT_SIZE 10
+
+
+@@ -294,9 +303,6 @@
+ /*
+ * To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits.
+- *
+- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+- * credit_entropy_bits() needs to be 64 bits wide.
+ */
+ #define ENTROPY_SHIFT 3
+ #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+@@ -361,15 +367,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+ */
+ static struct poolinfo {
+ int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
+-#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+- int tap1, tap2, tap3, tap4, tap5;
+-} poolinfo_table[] = {
++#define S(x) \
++ .poolbitshift = ilog2(x)+5, \
++ .poolwords = (x), \
++ .poolbytes = (x)*4, \
++ .poolbits = (x)*32, \
++ .poolfracbits = (x) << (ENTROPY_SHIFT+5)
++ int tap[5];
++} __randomize_layout poolinfo_table[] = {
++#ifdef CONFIG_HARDENED_RANDOM
++ /* x^8192 + x^104 + x^76 + x^51 +x^25 + x + 1 */
++ { S(8192), .tap = { 104, 76, 51, 25, 1 } },
++ /* x^2048 + x^26 + x^19 + x^14 + x^7 + x + 1 */
++ { S(2048), .tap = { 26, 19, 14, 7, 1 } }
++#else
+ /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+ /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
+- { S(128), 104, 76, 51, 25, 1 },
++ { S(128), .tap = { 104, 76, 51, 25, 1 } },
+ /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
+ /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
+- { S(32), 26, 19, 14, 7, 1 },
++ { S(32), .tap = { 26, 19, 14, 7, 1 } },
++#endif
+ #if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+ { S(2048), 1638, 1231, 819, 411, 1 },
+@@ -413,7 +431,7 @@ struct crng_state {
+ __u32 state[16];
+ unsigned long init_time;
+ spinlock_t lock;
+-};
++} __randomize_layout;
+
+ struct crng_state primary_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
+@@ -475,7 +493,7 @@ struct entropy_store {
+ unsigned int initialized:1;
+ unsigned int last_data_init:1;
+ __u8 last_data[EXTRACT_SIZE];
+-};
++} __randomize_layout;
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int rsvd);
+@@ -486,6 +504,8 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
+ static void push_to_pool(struct work_struct *work);
+ static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
++/* this actually doesn't need latent entropy */
++static __u32 secondary_xfer_buffer[OUTPUT_POOL_WORDS];
+
+ static struct entropy_store input_pool = {
+ .poolinfo = &poolinfo_table[0],
+@@ -504,9 +524,78 @@ static struct entropy_store blocking_pool = {
+ push_to_pool),
+ };
+
++#ifdef CONFIG_HARDENED_RANDOM
++static __u32 const twist_table[64][4] = {
++ { 0x6a09e668, 0xbb67ae86, 0x3c6ef373, 0xa54ff53a },
++ { 0x510e5280, 0x9b05688c, 0x1f83d9ac, 0x5be0cd19 },
++ { 0xcbbb9d5e, 0x629a292a, 0x9159015a, 0x152fecd9 },
++ { 0x67332668, 0x8eb44a87, 0xdb0c2e0d, 0x47b5481e },
++ { 0xae5f9157, 0xcf6c85d4, 0x2f73477d, 0x6d1826cb },
++ { 0x8b43d457, 0xe360b597, 0x1c456003, 0x6f196331 },
++ { 0xd94ebeb2, 0x0cc4a612, 0x261dc1f3, 0x5815a7be },
++ { 0x70b7ed68, 0xa1513c69, 0x44f93636, 0x720dcdfe },
++ { 0xb467369e, 0xca320b76, 0x34e0d42e, 0x49c7d9be },
++ { 0x87abb9f2, 0xc463a2fc, 0xec3fc3f4, 0x27277f6d },
++ { 0x610bebf3, 0x7420b49f, 0xd1fd8a34, 0xe4773594 },
++ { 0x092197f6, 0x1b530c96, 0x869d6343, 0xeee52e50 },
++ { 0x1107668a, 0x21fba37c, 0x43ab9fb6, 0x75a9f91d },
++ { 0x8630501a, 0xd7cd8174, 0x007fe010, 0x0379f514 },
++ { 0x066b651b, 0x0764ab84, 0x0a4b06be, 0x0c3578c1 },
++ { 0x0d2962a5, 0x11e039f4, 0x1857b7bf, 0x1a29bf2e },
++ { 0x1b11a32f, 0x1cdf34e8, 0x23183042, 0x25b89093 },
++ { 0x2a0c06a1, 0x2ae79843, 0x2c9cda69, 0x2f281f24 },
++ { 0x32841259, 0x3502e64e, 0x377c9c21, 0x39204cda },
++ { 0x3b91bf66, 0x3ecc38ca, 0x40665609, 0x43947938 },
++ { 0x47830769, 0x484ae4b8, 0x4c2b2b75, 0x4cf03d21 },
++ { 0x4f3cbb11, 0x50c2d3b5, 0x5308af16, 0x560a7a9a },
++ { 0x5788d981, 0x584769b4, 0x59c34f06, 0x5e2d564c },
++ { 0x6116d760, 0x62894c10, 0x6569b58c, 0x66d7b394 },
++ { 0x68f9f8dc, 0x6d34f03d, 0x6de8372f, 0x742687a4 },
++ { 0x76356021, 0x799d1235, 0x7ba455f4, 0x7da8d73b },
++ { 0x7e546743, 0x80554bdc, 0x83a63a3c, 0x85a01e39 },
++ { 0x879774ac, 0x883eac9f, 0x8a32aae0, 0x8c243210 },
++ { 0x8d6e8781, 0x8e134b6f, 0x91ea5892, 0x95166fe4 },
++ { 0x95b817e6, 0x96faa747, 0x98dca135, 0x9abc6593 },
++ { 0x9b5bd55a, 0x9f136df7, 0xa04ebd79, 0xa225f6ed },
++ { 0xa4970e49, 0xa79f5a6b, 0xaa0869af, 0xad06dcbd },
++ { 0xaf68312e, 0xb12efe0b, 0xb2f3ef5b, 0xb420e03a },
++ { 0xb6785656, 0xb837d738, 0xb9613115, 0xbbb18efb },
++ { 0xbcd89621, 0xc0db3814, 0xc3b2f2a3, 0xc71638d9 },
++ { 0xc7a6240f, 0xca73166e, 0xcb01f3ba, 0xcc1f293d },
++ { 0xccad81c8, 0xcf72acaf, 0xd34c7258, 0xd4649b7a },
++ { 0xd4f07147, 0xd607a013, 0xd9d3b47b, 0xdae803b5 },
++ { 0xdb71ef1a, 0xdc854e24, 0xe1dcf0ea, 0xe2eca719 },
++ { 0xe50a4ad8, 0xe7ac0990, 0xe9c46d3a, 0xeacfc33c },
++ { 0xec5fb417, 0xedee611c, 0xf18bc533, 0xf292ef77 },
++ { 0xf41cab36, 0xf5a531ec, 0xf7aeb45d, 0xf93474e9 },
++ { 0xfc3c7559, 0xfd3e1962, 0xfebf9bc1, 0xff3fdbf2 },
++ { 0x01bf3cab, 0x023ebd6b, 0x03bc8288, 0x06365a0f },
++ { 0x06b4c1d2, 0x092afcc1, 0x09a8ad2c, 0x0b21093c },
++ { 0x0f83d25e, 0x107c1074, 0x10f803d0, 0x11ef938d },
++ { 0x136212e8, 0x14d390a4, 0x16beab25, 0x182dd7d5 },
++ { 0x199c09bf, 0x1ed27f46, 0x1f4b2d3e, 0x21a502bc },
++ { 0x23849e06, 0x25d9d3da, 0x273ef0ca, 0x28a326f6 },
++ { 0x2a7cb5e4, 0x2d4019ba, 0x2e2b1e73, 0x2f8aec73 },
++ { 0x30e9ddcc, 0x315ea828, 0x32bc75cf, 0x357587f0 },
++ { 0x37b7de93, 0x3bc31ec6, 0x3c35b24a, 0x3d1a949b },
++ { 0x3e713d15, 0x3ee347da, 0x4038e0bf, 0x411c2bae },
++ { 0x418daf9a, 0x4270749e, 0x4516b0b0, 0x45876dcb },
++ { 0x46d92246, 0x4e448a56, 0x4f9141c0, 0x50dd3e71 },
++ { 0x5296c45b, 0x56738aac, 0x58961d02, 0x5b9010c1 },
++ { 0x5c6913ae, 0x5cd577f2, 0x5dae0649, 0x5ef24aeb },
++ { 0x60a199af, 0x6178ce9b, 0x61e44c97, 0x6326551c },
++ { 0x65a86b29, 0x67bd7e12, 0x6827e41c, 0x68fc7925 },
++ { 0x6966a836, 0x6a3acfa3, 0x6b78828a, 0x6df2017d },
++ { 0x7068fdbb, 0x720c4495, 0x747f226b, 0x75b7a753 },
++ { 0x7687a9e0, 0x77bf2d48, 0x795d98d4, 0x7a2c690b },
++ { 0x7bc93fa8, 0x7c974690, 0x7f6653f3, 0x80333127 },
++ { 0x81660244, 0x81cc2760, 0x829840e3, 0x83c9edd4 }
++};
++#else
+ static __u32 const twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++#endif
+
+ /*
+ * This function adds bytes into the entropy "pool". It does not
+@@ -521,17 +610,14 @@ static __u32 const twist_table[8] = {
+ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes)
+ {
+- unsigned long i, tap1, tap2, tap3, tap4, tap5;
++ unsigned long i, n, t1, t2, tap[5];
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+
+- tap1 = r->poolinfo->tap1;
+- tap2 = r->poolinfo->tap2;
+- tap3 = r->poolinfo->tap3;
+- tap4 = r->poolinfo->tap4;
+- tap5 = r->poolinfo->tap5;
++ for (n = 0; n < 5; n++)
++ tap[n] = r->poolinfo->tap[n];
+
+ input_rotate = r->input_rotate;
+ i = r->add_ptr;
+@@ -543,14 +629,17 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+
+ /* XOR in the various taps */
+ w ^= r->pool[i];
+- w ^= r->pool[(i + tap1) & wordmask];
+- w ^= r->pool[(i + tap2) & wordmask];
+- w ^= r->pool[(i + tap3) & wordmask];
+- w ^= r->pool[(i + tap4) & wordmask];
+- w ^= r->pool[(i + tap5) & wordmask];
++ for (n = 0; n < 5; n++)
++ w ^= r->pool[(i + tap[n]) & wordmask];
+
+ /* Mix the result back in with a twist */
++#ifdef CONFIG_HARDENED_RANDOM
++ t1 = rol32(w, 14) & 0x1FFF; // 0-63, 1111111111111
++ t2 = rol32(w, t1) & 0x3; // 0-3, 11
++ r->pool[i] = (w >> 3) ^ twist_table[t1][t2];
++#else
+ r->pool[i] = (w >> 3) ^ twist_table[w & 7];
++#endif
+
+ /*
+ * Normally, we add 7 bits of rotation to the pool.
+@@ -588,7 +677,7 @@ struct fast_pool {
+ unsigned long last;
+ unsigned short reg_idx;
+ unsigned char count;
+-};
++} __randomize_layout;
+
+ /*
+ * This is a fast mixing routine used by the interrupt randomness
+@@ -683,7 +772,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ /* The +2 corresponds to the /4 in the denominator */
+
+ do {
+- unsigned int anfrac = min(pnfrac, pool_size/2);
++ __u64 anfrac = min(pnfrac, pool_size/2);
+ unsigned int add =
+ ((pool_size - entropy_count)*anfrac*3) >> s;
+
+@@ -1061,7 +1150,7 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+
+ extract_crng(tmp);
+ i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE);
+- if (copy_to_user(buf, tmp, i)) {
++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -1089,9 +1178,9 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+ struct timer_rand_state {
+ cycles_t last_time;
+ long last_delta, last_delta2;
+-};
++} __randomize_layout;
+
+-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
++#define INIT_TIMER_RAND_STATE { .last_time = INITIAL_JIFFIES };
+
+ /*
+ * Add device- or boot-specific data to the input pool to help
+@@ -1334,20 +1423,18 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+
+ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+ {
+- __u32 tmp[OUTPUT_POOL_WORDS];
+-
+ int bytes = nbytes;
+
+ /* pull at least as much as a wakeup */
+ bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
+ /* but never more than the buffer size */
+- bytes = min_t(int, bytes, sizeof(tmp));
++ bytes = min_t(int, bytes, sizeof(secondary_xfer_buffer));
+
+ trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
+ ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
+- bytes = extract_entropy(r->pull, tmp, bytes,
++ bytes = extract_entropy(r->pull, secondary_xfer_buffer, bytes,
+ random_read_wakeup_bits / 8, 0);
+- mix_pool_bytes(r, tmp, bytes);
++ mix_pool_bytes(r, secondary_xfer_buffer, bytes);
+ credit_entropy_bits(r, bytes*8);
+ }
+
+@@ -1572,7 +1659,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+- if (copy_to_user(buf, tmp, i)) {
++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -2276,7 +2363,7 @@ struct batched_entropy {
+ };
+ unsigned int position;
+ spinlock_t batch_lock;
+-};
++} __randomize_layout;
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index e0a04bfc873e..ec93f827c599 100644
--- a/drivers/tty/Kconfig
@@ -661,10 +961,10 @@ index ac8025cd4a1f..a89e48f53fba 100644
return tty;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index eb24ec0e160d..68c93697cae9 100644
+index 27486b0a027a..82689c97b660 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
-@@ -41,6 +41,8 @@
+@@ -43,6 +43,8 @@
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
@@ -673,7 +973,7 @@ index eb24ec0e160d..68c93697cae9 100644
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
-@@ -4940,6 +4942,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+@@ -4961,6 +4963,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
@@ -686,8 +986,35 @@ index eb24ec0e160d..68c93697cae9 100644
if (hub_is_superspeed(hub->hdev))
unit_load = 150;
else
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index e5126fad57c5..2a59499ba24d 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -32,6 +32,10 @@ static struct vfsmount *debugfs_mount;
+ static int debugfs_mount_count;
+ static bool debugfs_registered;
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++extern int sysfs_restricted;
++#endif
++
+ static struct inode *debugfs_get_inode(struct super_block *sb)
+ {
+ struct inode *inode = new_inode(sb);
+@@ -517,6 +521,11 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
+ if (unlikely(!inode))
+ return failed_creating(dentry);
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ if (sysfs_restricted)
++ inode->i_mode = S_IFDIR | S_IRWXU;
++ else
++#endif
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
diff --git a/fs/exec.c b/fs/exec.c
-index 433b1257694a..f86201f25a4c 100644
+index 561ea64829ec..5d40794103eb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,6 +62,7 @@
@@ -708,10 +1035,21 @@ index 433b1257694a..f86201f25a4c 100644
err:
up_write(&mm->mmap_sem);
diff --git a/fs/namei.c b/fs/namei.c
-index 914178cdbe94..7422b5ce077a 100644
+index 327844fedf3d..c1b2eafa3b30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
-@@ -885,10 +885,10 @@ static inline void put_link(struct nameidata *nd)
+@@ -125,6 +125,10 @@
+
+ #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
+
++#ifdef CONFIG_HARDENED_FIFO
++extern int fifo_restrictions;
++#endif
++
+ struct filename *
+ getname_flags(const char __user *filename, int flags, int *empty)
+ {
+@@ -885,10 +889,10 @@ static inline void put_link(struct nameidata *nd)
path_put(&last->link);
}
@@ -726,8 +1064,57 @@ index 914178cdbe94..7422b5ce077a 100644
/**
* may_follow_link - Check symlink following for unsafe situations
+@@ -3252,6 +3256,32 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+ return error;
+ }
+
++/*
++ * Handles possibly restricted FIFO operations
++ * if the user doesn't own this directory.
++ */
++static int fifo_restricted(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const struct dentry *dir,
++ const int flag,
++ const int acc_mode) {
++#ifdef CONFIG_HARDENED_FIFO
++ const struct cred *cred;
++ struct inode *inode, *dir_inode;
++
++ cred = current_cred();
++ inode = d_backing_inode(dentry);
++ dir_inode = d_backing_inode(dir);
++
++ if (fifo_restrictions && S_ISFIFO(inode->i_mode) &&
++ !(flag & O_EXCL) && (dir_inode->i_mode & S_ISVTX) &&
++ !uid_eq(inode->i_uid, dir_inode->i_uid) &&
++ !uid_eq(cred->fsuid, inode->i_uid))
++ return -EACCES;
++#endif
++ return 0;
++}
++
+ /*
+ * Handle the last step of open()
+ */
+@@ -3370,6 +3400,15 @@ static int do_last(struct nameidata *nd,
+ return -ENOENT;
+ }
+
++ /*
++ * Only check if O_CREAT is specified, all other checks need to go
++ * into may_open().
++ */
++ if (fifo_restricted(path.dentry, path.mnt, dir, open_flag, acc_mode)) {
++ path_to_nameidata(&path, nd);
++ return -EACCES;
++ }
++
+ /*
+ * create/update audit record if it already exists.
+ */
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
-index 5f93cfacb3d1..cea0d7d3b23e 100644
+index ac3e06367cb6..06a2e4cf4cc1 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -195,4 +195,3 @@ config NFS_DEBUG
@@ -785,6 +1172,76 @@ index f8e6fb2c3657..240c1432e18f 100644
generic_fillattr(inode, stat);
return 0;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index feeae8081c22..a507113bbd3b 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -18,6 +18,10 @@
+
+ DEFINE_SPINLOCK(sysfs_symlink_target_lock);
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++extern int sysfs_restricted;
++#endif
++
+ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
+ {
+ char *buf;
+@@ -40,11 +44,19 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
+ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+ {
+ struct kernfs_node *parent, *kn;
++ const char* name;
++ umode_t mode;
+ kuid_t uid;
+ kgid_t gid;
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ const char *parent_name;
++#endif
++
+ BUG_ON(!kobj);
+
++ name = kobject_name(kobj);
++
+ if (kobj->parent)
+ parent = kobj->parent->sd;
+ else
+@@ -55,12 +67,30 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+
+ kobject_get_ownership(kobj, &uid, &gid);
+
+- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
+- S_IRWXU | S_IRUGO | S_IXUGO, uid, gid,
+- kobj, ns);
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ if (sysfs_restricted) {
++ parent_name = parent->name;
++ mode = S_IRWXU;
++
++ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") ||
++ !strcmp(name, "fs"))) ||
++ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
++ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") ||
++ !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
++ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
++ mode |= S_IRUGO | S_IXUGO;
++ }
++ else
++ mode = S_IRWXU | S_IRUGO | S_IXUGO;
++#else
++ mode = S_IRWXU | S_IRUGO | S_IXUGO;
++#endif
++
++ kn = kernfs_create_dir_ns(parent, name, mode, uid, gid, kobj, ns);
++
+ if (IS_ERR(kn)) {
+ if (PTR_ERR(kn) == -EEXIST)
+- sysfs_warn_dup(parent, kobject_name(kobj));
++ sysfs_warn_dup(parent, name);
+ return PTR_ERR(kn);
+ }
+
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..e7157c18c62c 100644
--- a/include/linux/cache.h
@@ -822,10 +1279,10 @@ index f640dcbc880c..2b4f5d651f19 100644
{
return true;
diff --git a/include/linux/fs.h b/include/linux/fs.h
-index d4e1b43a53c3..c925cbdd1d95 100644
+index 92420009b9bc..9238f04686ce 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -3466,4 +3466,15 @@ extern void inode_nohighmem(struct inode *inode);
+@@ -3468,4 +3468,15 @@ extern void inode_nohighmem(struct inode *inode);
extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
@@ -866,10 +1323,10 @@ index fd1ce10553bf..1905d2476d32 100644
mask |= FS_ISDIR;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
-index 24bcc5eec6b4..b1cdfc350596 100644
+index f78d1e89593f..ff139ff8d3d2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
-@@ -530,9 +530,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+@@ -553,9 +553,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
@@ -935,7 +1392,7 @@ index 069aa2ebef90..cb9e3637a620 100644
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index bdec425c8e14..58409dee149a 100644
+index 45f10f5896b7..7f251a536293 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -571,7 +571,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
@@ -972,10 +1429,10 @@ index 70b7123f38c7..09f3019489b2 100644
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 42fc852bf512..a6c5cacee3b5 100644
+index d8b4d31acd18..205c79491de3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
-@@ -1184,6 +1184,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+@@ -1194,6 +1194,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
int perf_event_max_stack_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -1137,7 +1594,7 @@ index f58e1ef76572..516caa40676e 100644
return __builtin_strcpy(p, q);
memcpy(p, q, strlen(q) + 1);
diff --git a/include/linux/tty.h b/include/linux/tty.h
-index 808fbfe86f85..e4429b7d6e8e 100644
+index 248a137112e8..02a63bec41ca 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -14,6 +14,7 @@
@@ -1148,7 +1605,7 @@ index 808fbfe86f85..e4429b7d6e8e 100644
/*
-@@ -336,6 +337,7 @@ struct tty_struct {
+@@ -338,6 +339,7 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
@@ -1156,7 +1613,7 @@ index 808fbfe86f85..e4429b7d6e8e 100644
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
-@@ -345,6 +347,8 @@ struct tty_file_private {
+@@ -347,6 +349,8 @@ struct tty_file_private {
struct list_head list;
};
@@ -1199,8 +1656,24 @@ index 398e9c95cd61..baab7195306a 100644
#ifndef CONFIG_MMU
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
+diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
+index e42d13b55cf3..3228bcfe7599 100644
+--- a/include/uapi/linux/ip.h
++++ b/include/uapi/linux/ip.h
+@@ -66,7 +66,11 @@
+
+ #define IPVERSION 4
+ #define MAXTTL 255
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++#define IPDEFTTL 128
++#else
+ #define IPDEFTTL 64
++#endif
+
+ #define IPOPT_OPTVAL 0
+ #define IPOPT_OLEN 1
diff --git a/init/Kconfig b/init/Kconfig
-index 47035b5a46f6..63b30636fdc0 100644
+index 47035b5a46f6..efbd3ac00d92 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -326,6 +326,7 @@ config USELIB
@@ -1211,7 +1684,30 @@ index 47035b5a46f6..63b30636fdc0 100644
help
Enable auditing infrastructure that can be used with another
kernel subsystem, such as SELinux (which requires this for
-@@ -1091,6 +1092,12 @@ config CC_OPTIMIZE_FOR_SIZE
+@@ -957,6 +958,22 @@ config USER_NS
+
+ If unsure, say N.
+
++config USER_NS_UNPRIVILEGED
++ bool "Allow unprivileged users to create namespaces"
++ depends on USER_NS
++ default n
++ help
++ When disabled, unprivileged users will not be able to create
++ new namespaces. Allowing users to create their own namespaces
++ has been part of several recent local privilege escalation
++ exploits, so if you need user namespaces but are
++ paranoid^Wsecurity-conscious you want to disable this.
++
++ This setting can be overridden at runtime via the
++ kernel.unprivileged_userns_clone sysctl.
++
++ If unsure, say N.
++
+ config PID_NS
+ bool "PID Namespaces"
+ default y
+@@ -1091,6 +1108,12 @@ config CC_OPTIMIZE_FOR_SIZE
endchoice
@@ -1224,7 +1720,7 @@ index 47035b5a46f6..63b30636fdc0 100644
config HAVE_LD_DEAD_CODE_DATA_ELIMINATION
bool
help
-@@ -1377,8 +1384,7 @@ config SHMEM
+@@ -1377,8 +1400,7 @@ config SHMEM
which may be appropriate on small systems without swap.
config AIO
@@ -1234,7 +1730,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
This option enables POSIX asynchronous I/O which may by used
by some high performance threaded applications. Disabling
-@@ -1595,7 +1601,7 @@ config VM_EVENT_COUNTERS
+@@ -1595,7 +1617,7 @@ config VM_EVENT_COUNTERS
config SLUB_DEBUG
default y
@@ -1243,7 +1739,7 @@ index 47035b5a46f6..63b30636fdc0 100644
depends on SLUB && SYSFS
help
SLUB has extensive debug support features. Disabling these can
-@@ -1619,7 +1625,6 @@ config SLUB_MEMCG_SYSFS_ON
+@@ -1619,7 +1641,6 @@ config SLUB_MEMCG_SYSFS_ON
config COMPAT_BRK
bool "Disable heap randomization"
@@ -1251,7 +1747,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
Randomizing heap placement makes heap exploits harder, but it
also breaks ancient binaries (including anything libc5 based).
-@@ -1666,7 +1671,6 @@ endchoice
+@@ -1666,7 +1687,6 @@ endchoice
config SLAB_MERGE_DEFAULT
bool "Allow slab caches to be merged"
@@ -1259,7 +1755,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
For reduced kernel memory fragmentation, slab caches can be
merged when they share the same size and other characteristics.
-@@ -1679,9 +1683,9 @@ config SLAB_MERGE_DEFAULT
+@@ -1679,9 +1699,9 @@ config SLAB_MERGE_DEFAULT
command line.
config SLAB_FREELIST_RANDOM
@@ -1270,7 +1766,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab
-@@ -1690,12 +1694,56 @@ config SLAB_FREELIST_RANDOM
+@@ -1690,12 +1710,56 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata"
depends on SLUB
@@ -1328,10 +1824,10 @@ index 47035b5a46f6..63b30636fdc0 100644
default y
depends on SLUB && SMP
diff --git a/kernel/audit.c b/kernel/audit.c
-index 2a8058764aa6..14e7a763db43 100644
+index 1f08c38e604a..2c4f577a4317 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
-@@ -1628,6 +1628,9 @@ static int __init audit_enable(char *str)
+@@ -1630,6 +1630,9 @@ static int __init audit_enable(char *str)
if (audit_default == AUDIT_OFF)
audit_initialized = AUDIT_DISABLED;
@@ -1342,20 +1838,20 @@ index 2a8058764aa6..14e7a763db43 100644
pr_err("audit: error setting audit state (%d)\n",
audit_default);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
-index bad9985b8a08..453be8764a8c 100644
+index 36be400c3e65..50fa38718408 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
-@@ -370,7 +370,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
-
+@@ -368,7 +368,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+ #ifdef CONFIG_BPF_JIT
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
-int bpf_jit_harden __read_mostly;
+int bpf_jit_harden __read_mostly = 2;
int bpf_jit_kallsyms __read_mostly;
- int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
+ long bpf_jit_limit __read_mostly;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index ede82382dd32..98591574df41 100644
+index 596959288eb9..fb76f46d61ce 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -48,7 +48,7 @@ static DEFINE_SPINLOCK(prog_idr_lock);
@@ -1368,10 +1864,10 @@ index ede82382dd32..98591574df41 100644
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _ops)
diff --git a/kernel/capability.c b/kernel/capability.c
-index 1e1c0236f55b..452062fe45ce 100644
+index 7718d7dcadc7..8a4ce459da0a 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
-@@ -431,6 +431,12 @@ bool capable(int cap)
+@@ -432,6 +432,12 @@ bool capable(int cap)
return ns_capable(&init_user_ns, cap);
}
EXPORT_SYMBOL(capable);
@@ -1385,7 +1881,7 @@ index 1e1c0236f55b..452062fe45ce 100644
/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 171b83ebed4a..c38d8bf68b6b 100644
+index 8c70ee23fbe9..7c45b1e38bc0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask;
@@ -1402,7 +1898,7 @@ index 171b83ebed4a..c38d8bf68b6b 100644
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
-@@ -10462,6 +10467,9 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -10529,6 +10534,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
@@ -1413,7 +1909,7 @@ index 171b83ebed4a..c38d8bf68b6b 100644
if (err)
return err;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 69874db3fba8..44f666f2b7b7 100644
+index 1a2d18e98bf9..f3a8e3df2e12 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -103,6 +103,11 @@
@@ -1428,7 +1924,7 @@ index 69874db3fba8..44f666f2b7b7 100644
/*
* Minimum number of threads to boot the kernel
-@@ -1674,6 +1679,10 @@ static __latent_entropy struct task_struct *copy_process(
+@@ -1679,6 +1684,10 @@ static __latent_entropy struct task_struct *copy_process(
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
return ERR_PTR(-EINVAL);
@@ -1439,7 +1935,7 @@ index 69874db3fba8..44f666f2b7b7 100644
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
-@@ -2503,6 +2512,12 @@ int ksys_unshare(unsigned long unshare_flags)
+@@ -2508,6 +2517,12 @@ int ksys_unshare(unsigned long unshare_flags)
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
@@ -1452,11 +1948,29 @@ index 69874db3fba8..44f666f2b7b7 100644
err = check_unshare_flags(unshare_flags);
if (err)
goto bad_unshare_out;
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index bc6addd9152b..008be43f6cdd 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -149,6 +149,13 @@ int __request_module(bool wait, const char *fmt, ...)
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_HARDENED_MODULE_LOAD
++ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
++ printk(KERN_ALERT "denied attempt to auto-load module %.64s\n", module_name);
++ return -EPERM;
++ }
++#endif
++
+ if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
+ pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
+ atomic_read(&kmod_concurrent_max),
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
-index 3d37c279c090..0789ca413f09 100644
+index f2635fc751d9..a4c445bf7f24 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
-@@ -1138,7 +1138,7 @@ void free_basic_memory_bitmaps(void)
+@@ -1145,7 +1145,7 @@ void free_basic_memory_bitmaps(void)
void clear_free_pages(void)
{
@@ -1465,7 +1979,7 @@ index 3d37c279c090..0789ca413f09 100644
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
-@@ -1155,7 +1155,7 @@ void clear_free_pages(void)
+@@ -1162,7 +1162,7 @@ void clear_free_pages(void)
}
memory_bm_position_reset(bm);
pr_info("free pages cleared after restore\n");
@@ -1501,10 +2015,10 @@ index f7e89c989df7..527c170810fc 100644
struct rcu_state *rsp;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 4a433608ba74..41d9b1656818 100644
+index 86ccaaf0c1bf..f3103b3f1bb4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -9627,7 +9627,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
+@@ -9719,7 +9719,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
@@ -1574,7 +2088,7 @@ index 6f584861d329..1943fe60f3b9 100644
void tasklet_init(struct tasklet_struct *t,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 9a85c7ae7362..20221265a603 100644
+index 4c4fd4339d33..10f5b0ecf5a4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -67,6 +67,7 @@
@@ -1585,7 +2099,7 @@ index 9a85c7ae7362..20221265a603 100644
#include <linux/uaccess.h>
#include <asm/processor.h>
-@@ -99,12 +100,19 @@
+@@ -99,12 +100,25 @@
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
@@ -1593,6 +2107,12 @@ index 9a85c7ae7362..20221265a603 100644
+int deny_new_usb __read_mostly = 0;
+EXPORT_SYMBOL(deny_new_usb);
+#endif
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++int __read_mostly sysfs_restricted = 1;
++#endif
++#ifdef CONFIG_HARDENED_FIFO
++int __read_mostly fifo_restrictions = 1;
++#endif
extern int suid_dumpable;
#ifdef CONFIG_COREDUMP
extern int core_uses_pid;
@@ -1605,7 +2125,7 @@ index 9a85c7ae7362..20221265a603 100644
extern int pid_max;
extern int pid_max_min, pid_max_max;
extern int percpu_pagelist_fraction;
-@@ -116,35 +124,35 @@ extern int sysctl_nr_trim_pages;
+@@ -116,35 +130,35 @@ extern int sysctl_nr_trim_pages;
/* Constants used for minimum and maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
@@ -1657,7 +2177,7 @@ index 9a85c7ae7362..20221265a603 100644
static const int cap_last_cap = CAP_LAST_CAP;
/*
-@@ -152,9 +160,12 @@ static const int cap_last_cap = CAP_LAST_CAP;
+@@ -152,9 +166,12 @@ static const int cap_last_cap = CAP_LAST_CAP;
* and hung_task_check_interval_secs
*/
#ifdef CONFIG_DETECT_HUNG_TASK
@@ -1671,7 +2191,7 @@ index 9a85c7ae7362..20221265a603 100644
#ifdef CONFIG_INOTIFY_USER
#include <linux/inotify.h>
#endif
-@@ -298,19 +309,19 @@ static struct ctl_table sysctl_base_table[] = {
+@@ -298,19 +315,19 @@ static struct ctl_table sysctl_base_table[] = {
};
#ifdef CONFIG_SCHED_DEBUG
@@ -1699,7 +2219,7 @@ index 9a85c7ae7362..20221265a603 100644
#endif
static struct ctl_table kern_table[] = {
-@@ -516,6 +527,15 @@ static struct ctl_table kern_table[] = {
+@@ -516,6 +533,15 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
@@ -1715,7 +2235,7 @@ index 9a85c7ae7362..20221265a603 100644
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
-@@ -864,6 +884,37 @@ static struct ctl_table kern_table[] = {
+@@ -864,6 +890,59 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
.extra2 = &two,
},
@@ -1750,14 +2270,36 @@ index 9a85c7ae7362..20221265a603 100644
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
++#endif
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ {
++ .procname = "sysfs_restricted",
++ .data = &sysfs_restricted,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
++#ifdef CONFIG_HARDENED_FIFO
++ {
++ .procname = "fifo_restrictions",
++ .data = &fifo_restrictions,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
#endif
{
.procname = "ngroups_max",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index e1a549c9e399..c560063e3a8c 100644
+index 7362554416fd..fb8902236deb 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1462,7 +1462,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+@@ -1465,7 +1465,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
}
}
@@ -1767,10 +2309,10 @@ index e1a549c9e399..c560063e3a8c 100644
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
unsigned long flags;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index fa49cd753dea..a16f8613282e 100644
+index ae64cb819a9a..52f5a5dcd09d 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1688,7 +1688,7 @@ static inline void __run_timers(struct timer_base *base)
+@@ -1690,7 +1690,7 @@ static inline void __run_timers(struct timer_base *base)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
@@ -1780,21 +2322,25 @@ index fa49cd753dea..a16f8613282e 100644
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
-index 923414a246e9..6b9dbc257e34 100644
+index 923414a246e9..107b17f0d528 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
-@@ -26,6 +26,9 @@
+@@ -26,6 +26,13 @@
#include <linux/bsearch.h>
#include <linux/sort.h>
+/* sysctl */
++#ifdef CONFIG_USER_NS_UNPRIVILEGED
++int unprivileged_userns_clone = 1;
++#else
+int unprivileged_userns_clone;
++#endif
+
static struct kmem_cache *user_ns_cachep __read_mostly;
static DEFINE_MUTEX(userns_state_mutex);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index 3dea52f7be9c..9074878fe25b 100644
+index 46a910acce3f..5b60c663ac69 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -950,6 +950,7 @@ endmenu # "Debug lockups and hangs"
@@ -1894,7 +2440,7 @@ index 812e59e13fe6..2c2104884c81 100644
static noinline_for_stack
char *restricted_pointer(char *buf, char *end, const void *ptr,
diff --git a/mm/Kconfig b/mm/Kconfig
-index de64ea658716..8bff017856eb 100644
+index b457e94ae618..ec2440e66c72 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -311,7 +311,8 @@ config KSM
@@ -1908,10 +2454,10 @@ index de64ea658716..8bff017856eb 100644
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
diff --git a/mm/mmap.c b/mm/mmap.c
-index 1480880ff814..0145114c44b4 100644
+index a98f09b83019..d1695e475b37 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
-@@ -230,6 +230,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -224,6 +224,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
@@ -1926,7 +2472,7 @@ index 1480880ff814..0145114c44b4 100644
goto set_brk;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 8e6932a140b8..1f9c55809c56 100644
+index e5c610d711f3..12f96ff5ef0e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,7 @@
@@ -1989,7 +2535,7 @@ index 8e6932a140b8..1f9c55809c56 100644
page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
-@@ -1876,8 +1908,8 @@ static inline int check_new_page(struct page *page)
+@@ -1884,8 +1916,8 @@ static inline int check_new_page(struct page *page)
static inline bool free_pages_prezeroed(void)
{
@@ -2000,7 +2546,7 @@ index 8e6932a140b8..1f9c55809c56 100644
}
#ifdef CONFIG_DEBUG_VM
-@@ -1934,6 +1966,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
+@@ -1942,6 +1974,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
post_alloc_hook(page, order, gfp_flags);
@@ -2066,7 +2612,7 @@ index 9632772e14be..802ff9ee8172 100644
/*
* Else we can use all the padding etc for the allocation
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index 4d3c2e76d1ba..7e943cb1eccd 100644
+index 39e382acb0b8..a87a9a2fdf4e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -27,10 +27,10 @@
@@ -2092,7 +2638,7 @@ index 4d3c2e76d1ba..7e943cb1eccd 100644
static int __init setup_slab_nomerge(char *str)
{
diff --git a/mm/slub.c b/mm/slub.c
-index 09c0e24a06d8..26e8c45a889a 100644
+index 9c3937c5ce38..6b063a76c419 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -124,6 +124,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
@@ -2371,7 +2917,7 @@ index 09c0e24a06d8..26e8c45a889a 100644
static int __init setup_slub_memcg_sysfs(char *str)
{
diff --git a/mm/swap.c b/mm/swap.c
-index a3fc028e338e..4a1a899e430c 100644
+index 45fdbfb6b2a6..55ec851eb819 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -93,6 +93,13 @@ static void __put_compound_page(struct page *page)
@@ -2389,10 +2935,10 @@ index a3fc028e338e..4a1a899e430c 100644
}
diff --git a/net/core/dev.c b/net/core/dev.c
-index 138951d28643..efc5c650c9d7 100644
+index c1a3baf16957..a6b3bba36e8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4533,7 +4533,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4536,7 +4536,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -2401,7 +2947,7 @@ index 138951d28643..efc5c650c9d7 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -6318,7 +6318,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+@@ -6304,7 +6304,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
return work;
}
@@ -2410,6 +2956,39 @@ index 138951d28643..efc5c650c9d7 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 6cec08cd0bb9..ffc74355a94d 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -36,6 +36,10 @@ static int net_msg_warn; /* Unused, but still a sysctl */
+ int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
+ EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++int sysctl_stealth_blackhole __read_mostly = 1;
++#endif
++
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+@@ -505,6 +509,17 @@ static struct ctl_table net_core_table[] = {
+ .proc_handler = set_default_qdisc
+ },
+ #endif
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ {
++ .procname = "ip_blackhole",
++ .data = &sysctl_stealth_blackhole,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
+ #endif /* CONFIG_NET */
+ {
+ .procname = "netdev_budget",
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 32cae39cdff6..9141d7ae99b2 100644
--- a/net/ipv4/Kconfig
@@ -2422,6 +3001,389 @@ index 32cae39cdff6..9141d7ae99b2 100644
---help---
Normal TCP/IP networking is open to an attack known as "SYN
flooding". This denial-of-service attack prevents legitimate remote
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 4efa5e33513e..ae82ff1ea5e7 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -195,6 +195,10 @@ struct icmp_control {
+ short error; /* This ICMP is classed as an error message */
+ };
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
+
+ /*
+@@ -934,6 +938,11 @@ static bool icmp_echo(struct sk_buff *skb)
+ {
+ struct net *net;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) && !(skb->dev->flags & IFF_LOOPBACK))
++ return true;
++#endif
++
+ net = dev_net(skb_dst(skb)->dev);
+ if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
+ struct icmp_bxm icmp_param;
+@@ -960,6 +969,12 @@ static bool icmp_echo(struct sk_buff *skb)
+ static bool icmp_timestamp(struct sk_buff *skb)
+ {
+ struct icmp_bxm icmp_param;
++
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) && !(skb->dev->flags & IFF_LOOPBACK))
++ return true;
++#endif
++
+ /*
+ * Too short.
+ */
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 523d26f5e22e..10070b040661 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -136,6 +136,10 @@
+ ((in_dev)->mr_v2_seen && \
+ time_before(jiffies, (in_dev)->mr_v2_seen)))
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static int unsolicited_report_interval(struct in_device *in_dev)
+ {
+ int interval_ms, interval_jiffies;
+@@ -737,6 +741,11 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
+ __be32 dst;
+ int hlen, tlen;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole))
++ return -1;
++#endif
++
+ if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
+ return igmpv3_send_report(in_dev, pmc);
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 38b6d8f90a44..9c67d386df71 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -307,11 +307,13 @@ static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
+ tp->ecn_flags &= ~TCP_ECN_OK;
+ }
+
++#ifndef CONFIG_HARDENED_NO_SIMULT_CONNECT
+ static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
+ {
+ if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
+ tp->ecn_flags &= ~TCP_ECN_OK;
+ }
++#endif
+
+ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
+ {
+@@ -5921,6 +5923,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ tcp_paws_reject(&tp->rx_opt, 0))
+ goto discard_and_undo;
+
++#ifndef CONFIG_HARDENED_NO_SIMULT_CONNECT
+ if (th->syn) {
+ /* We see SYN without ACK. It is attempt of
+ * simultaneous connect with crossed SYNs.
+@@ -5972,6 +5975,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+ #endif
+ }
++#endif
+ /* "fifth, if neither of the SYN or RST bits is set then
+ * drop the segment and return."
+ */
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 6da393016c11..e6171d1ea7c9 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -95,6 +95,10 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
+ struct inet_hashinfo tcp_hashinfo;
+ EXPORT_SYMBOL(tcp_hashinfo);
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static u32 tcp_v4_init_seq(const struct sk_buff *skb)
+ {
+ return secure_tcp_seq(ip_hdr(skb)->daddr,
+@@ -1561,6 +1565,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+ return 0;
+
+ reset:
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole))
++#endif
+ tcp_v4_send_reset(rsk, skb);
+ discard:
+ kfree_skb(skb);
+@@ -1709,6 +1716,27 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ if (!pskb_may_pull(skb, th->doff * 4))
+ goto discard_it;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) &&
++ (
++ th->res1 || !tcp_flag_word(th) ||
++ tcp_flag_word(th) == TCP_FLAG_PSH ||
++ tcp_flag_word(th) & (TCP_FLAG_CWR | TCP_FLAG_ECE) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_SYN | TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_URG
++ ) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_SYN
++ )
++ )
++ )
++ goto discard_it;
++#endif
++
+ /* An explanation is required here, I think.
+ * Packet length and doff are validated by header prediction,
+ * provided case of th->doff==0 is eliminated.
+@@ -1722,12 +1750,22 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ lookup:
+ sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
+ th->dest, sdif, &refcounted);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 1;
++#endif
++
+ goto no_tcp_socket;
++ }
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 2;
++#endif
++
+ goto do_time_wait;
++ }
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+@@ -1841,6 +1879,11 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ bad_packet:
+ __TCP_INC_STATS(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
++
+ tcp_v4_send_reset(NULL, skb);
+ }
+
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 7ba8a90772b0..8c359b649bcd 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -29,6 +29,10 @@
+ #include <net/xfrm.h>
+ #include <net/busy_poll.h>
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
+ {
+ if (seq == s_win)
+@@ -809,6 +813,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ * avoid becoming vulnerable to outside attack aiming at
+ * resetting legit local connections.
+ */
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || skb->dev->flags & IFF_LOOPBACK)
++#endif
++
+ req->rsk_ops->send_reset(sk, skb);
+ } else if (fastopen) { /* received a valid RST pkt */
+ reqsk_fastopen_remove(sk, req, true);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 0ef04cda1b27..6cb1efa826dc 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -128,6 +128,10 @@ EXPORT_SYMBOL(udp_memory_allocated);
+ #define MAX_UDP_PORTS 65536
+ #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ /* IPCB reference means this can not be used from early demux */
+ static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+ {
+@@ -2262,6 +2266,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ goto csum_error;
+
+ __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole) || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ /*
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 6d14cbe443f8..4eeebbdc452e 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -72,6 +72,10 @@
+
+ #include <linux/uaccess.h>
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ /*
+ * The ICMP socket(s). This is the most convenient way to flow control
+ * our ICMP output as well as maintain a clean interface throughout
+@@ -848,6 +852,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
+
+ switch (type) {
+ case ICMPV6_ECHO_REQUEST:
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || skb->dev->flags & IFF_LOOPBACK)
++#endif
+ if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
+ icmpv6_echo_reply(skb);
+ break;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index c5f4e89b6ff3..86375c9966ef 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -71,6 +71,10 @@
+
+ #include <trace/events/tcp.h>
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
+ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req);
+@@ -1356,6 +1360,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ return 0;
+
+ reset:
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole))
++#endif
++
+ tcp_v6_send_reset(sk, skb);
+ discard:
+ if (opt_skb)
+@@ -1453,6 +1461,27 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ if (!pskb_may_pull(skb, th->doff*4))
+ goto discard_it;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) &&
++ (
++ th->res1 || !tcp_flag_word(th) ||
++ tcp_flag_word(th) == TCP_FLAG_PSH ||
++ tcp_flag_word(th) & (TCP_FLAG_CWR | TCP_FLAG_ECE) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_SYN | TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_URG
++ ) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_SYN
++ )
++ )
++ )
++ goto discard_it;
++#endif
++
+ if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
+ goto csum_error;
+
+@@ -1463,12 +1492,22 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
+ th->source, th->dest, inet6_iif(skb), sdif,
+ &refcounted);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 1;
++#endif
++
+ goto no_tcp_socket;
++ }
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 2;
++#endif
++
+ goto do_time_wait;
++ }
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+@@ -1576,6 +1615,11 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ bad_packet:
+ __TCP_INC_STATS(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
++
+ tcp_v6_send_reset(NULL, skb);
+ }
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 1979922bcf67..6a8a3666648f 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -56,6 +56,10 @@
+ #include <trace/events/skb.h>
+ #include "udp_impl.h"
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+ {
+ #if defined(CONFIG_NET_L3_MASTER_DEV)
+@@ -863,6 +867,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ goto csum_error;
+
+ __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole) || skb->dev->flags & IFF_LOOPBACK)
++#endif
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index cb0c889e13aa..305f52f58c1a 100644
--- a/scripts/gcc-plugins/Kconfig
@@ -2439,7 +3401,7 @@ index cb0c889e13aa..305f52f58c1a 100644
secure!
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
-index 858cbe56b100..61ade07a967a 100644
+index 91a80036c05d..41692ca62c98 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -35,6 +35,7 @@ static int vmlinux_section_warnings = 1;
@@ -2471,7 +3433,7 @@ index 858cbe56b100..61ade07a967a 100644
}
};
-@@ -1255,10 +1263,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+@@ -1267,10 +1275,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
continue;
if (!is_valid_name(elf, sym))
continue;
@@ -2484,7 +3446,7 @@ index 858cbe56b100..61ade07a967a 100644
if (d < 0)
d = addr - sym->st_value;
if (d < distance) {
-@@ -1393,7 +1401,11 @@ static void report_sec_mismatch(const char *modname,
+@@ -1405,7 +1413,11 @@ static void report_sec_mismatch(const char *modname,
char *prl_from;
char *prl_to;
@@ -2497,7 +3459,7 @@ index 858cbe56b100..61ade07a967a 100644
if (!sec_mismatch_verbose)
return;
-@@ -1517,6 +1529,14 @@ static void report_sec_mismatch(const char *modname,
+@@ -1529,6 +1541,14 @@ static void report_sec_mismatch(const char *modname,
fatal("There's a special handler for this mismatch type, "
"we should never get here.");
break;
@@ -2512,7 +3474,7 @@ index 858cbe56b100..61ade07a967a 100644
}
fprintf(stderr, "\n");
}
-@@ -2528,6 +2548,14 @@ int main(int argc, char **argv)
+@@ -2540,6 +2560,14 @@ int main(int argc, char **argv)
}
}
free(buf.p);
@@ -2528,7 +3490,7 @@ index 858cbe56b100..61ade07a967a 100644
return err;
}
diff --git a/security/Kconfig b/security/Kconfig
-index d9aa521b5206..a921713b76ec 100644
+index d9aa521b5206..438acc17532e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -8,7 +8,7 @@ source security/keys/Kconfig
@@ -2636,6 +3598,135 @@ index d9aa521b5206..a921713b76ec 100644
config STATIC_USERMODEHELPER
bool "Force all usermode helper calls through a single binary"
help
+@@ -278,3 +329,128 @@ config DEFAULT_SECURITY
+
+ endmenu
+
++menu "Hardened Enhancements"
++
++config HARDENED_RANDOM
++ bool "Enhance the random number generator"
++ default n
++ help
++ Enabling this option enhances the Linux kernel random number generator.
++ This is done by:
++ - Increasing the pool size from 4096 bits to 262144 bits. ( 512B -> 32KB )
++ - Increasing the diffusion via the linear feedback shift register.
++ - Defines newer 64-bit polynomial fields for the input and output pools.
++
++ Overall, this enhances the total entropy available to the system and further
++ enhances the random number generator.
++
++
++config HARDENED_STEALTH_NETWORKING
++ bool "Enable stealth networking [GRSECURITY]"
++ default n
++ depends on NET
++ help
++ If you say Y here, neither TCP resets nor ICMP
++ destination-unreachable packets will be sent in response to packets
++ sent to ports for which no associated listening process exists.
++ This feature supports both IPV4 and IPV6 and exempts the
++ loopback interface from blackholing. Enabling this feature
++ makes a host more resilient to DoS attacks and reduces network
++ visibility against scanners.
++
++ The blackhole feature as-implemented is equivalent to the FreeBSD
++ blackhole feature, as it prevents RST responses to all packets, not
++ just SYNs. Under most application behavior this causes no
++ problems, but applications (like haproxy) may not close certain
++ connections in a way that cleanly terminates them on the remote
++ end, leaving the remote host in LAST_ACK state. Because of this
++ side-effect and to prevent intentional LAST_ACK DoSes, this
++ feature also adds automatic mitigation against such attacks.
++ The mitigation drastically reduces the amount of time a socket
++ can spend in LAST_ACK state. If you're using haproxy and not
++ all servers it connects to have this option enabled, consider
++ disabling this feature on the haproxy host.
++
++ If the sysctl option is enabled, a sysctl option with names
++ "ip_blackhole" will be created.
++ This sysctl, "ip_blackhole" takes the standard zero/non-zero
++ on/off toggle to enable or disable this feature.
++
++
++config HARDENED_NO_SIMULT_CONNECT
++ bool "Disable simultaneous TCP connections [GRSECURITY]"
++ default n
++ depends on NET
++ help
++ If you say Y here, a feature by Willy Tarreau will be enabled that
++ removes a weakness in Linux's strict implementation of TCP that
++ allows two clients to connect to each other without either entering
++ a listening state. The weakness allows an attacker to easily prevent
++ a client from connecting to a known server provided the source port
++ for the connection is guessed correctly.
++
++ As the weakness could be used to prevent an antivirus or IPS from
++ fetching updates, or prevent an SSL gateway from fetching a CRL,
++ it should be eliminated by enabling this option. Though Linux is
++ one of few operating systems supporting simultaneous connect, it
++ has no legitimate use in practice and is rarely supported by firewalls.
++
++
++config HARDENED_SYSFS_RESTRICT
++ bool "Restrict SysFS & DebugFS [GRSECURITY]"
++ default y
++ depends on SYSFS
++ help
++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
++ any filesystem normally mounted under it (e.g. debugfs) will be
++ mostly accessible only by root. These filesystems generally provide access
++ to hardware and debug information that isn't appropriate for unprivileged
++ users of the system. Sysfs and debugfs have also become a large source
++ of new vulnerabilities, ranging from infoleaks to local compromise.
++ There has been very little oversight with an eye toward security involved
++ in adding new exporters of information to these filesystems, so their
++ use is discouraged.
++ To enable or disable this feature at runtime, use the sysctl
++ kernel.sysfs_restricted.
++ For reasons of compatibility, a few directories have been whitelisted
++ for access by non-root users:
++ /sys/fs/selinux
++ /sys/fs/fuse
++ /sys/devices/system/cpu
++
++
++config HARDENED_FIFO
++ bool "Restrict FIFO [GRSECURITY]"
++ default y
++ help
++ If you say Y here, users will not be able to write to FIFOs they don't
++ own in world-writable +t directories (e.g. /tmp), unless the owner of
++ the FIFO is the same owner of the directory it's held in. If the sysctl
++ option is enabled, a sysctl option with name "fifo_restrictions" is
++ created.
++
++
++config HARDENED_MODULE_LOAD
++ bool "Harden module auto-loading [GRSECURITY]"
++ default y
++ depends on MODULES
++ help
++ If you say Y here, module auto-loading in response to use of some
++ feature implemented by an unloaded module will be restricted to
++ root users. Enabling this option helps defend against attacks
++ by unprivileged users who abuse the auto-loading behavior to
++ cause a vulnerable module to load that is then exploited.
++
++ If this option prevents a legitimate use of auto-loading for a
++ non-root user, the administrator can execute modprobe manually
++ with the exact name of the module mentioned in the alert log.
++ Alternatively, the administrator can add the module to the list
++ of modules loaded at boot by modifying init scripts.
++
++ Modification of init scripts will most likely be needed on
++ Ubuntu servers with encrypted home directory support enabled,
++ as the first non-root user logging in will cause the ecb(aes),
++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
++
++
++endmenu
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 8af7a690eb40..6539694b0fd3 100644
--- a/security/selinux/Kconfig
@@ -2674,7 +3765,7 @@ index 8af7a690eb40..6539694b0fd3 100644
-
- If you are unsure how to answer this question, answer 0.
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 70bad15ed7a0..a157a3d57cdd 100644
+index c574285966f9..667cca9fcd78 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -135,18 +135,7 @@ __setup("selinux=", selinux_enabled_setup);
@@ -2734,3 +3825,18 @@ index 96b27405558a..485c1b85c325 100644
help
This selects Yama, which extends DAC support with additional
system-wide security settings beyond regular Linux discretionary
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 4e499b78569b..55bba37f9517 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -618,6 +618,10 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
+ struct kvm_stat_data *stat_data;
+ struct kvm_stats_debugfs_item *p;
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ return 0;
++#endif
++
+ if (!debugfs_initialized())
+ return 0;
+
diff --git a/sys-kernel/linux-image-redcore-lts-legacy/linux-image-redcore-lts-legacy-4.19.104.ebuild b/sys-kernel/linux-image-redcore-lts-legacy/linux-image-redcore-lts-legacy-4.19.108.ebuild
index f7975ac8..f7975ac8 100644
--- a/sys-kernel/linux-image-redcore-lts-legacy/linux-image-redcore-lts-legacy-4.19.104.ebuild
+++ b/sys-kernel/linux-image-redcore-lts-legacy/linux-image-redcore-lts-legacy-4.19.108.ebuild
diff --git a/sys-kernel/linux-sources-redcore-lts-legacy/Manifest b/sys-kernel/linux-sources-redcore-lts-legacy/Manifest
index e9c22b9a..2847081b 100644
--- a/sys-kernel/linux-sources-redcore-lts-legacy/Manifest
+++ b/sys-kernel/linux-sources-redcore-lts-legacy/Manifest
@@ -1 +1 @@
-DIST linux-4.19.104.tar.xz 103438092 BLAKE2B 10324dae2d3e29048f4ae9474eb3b9dc6c210e7d0fad017b07620ca600cf76bf144b026a47ef7ff11e1dbb9ed7e60bba7d51e8ef6b278b32cae2205ac478a48c SHA512 08bd31c37e413a5f1f785a0fd53d7f044255fc30bf6af15876c15a88ad533c1895d905f8708d93c7fa769b4f6b13af46b442ff213b8ac8a936fee433c8fcd012
+DIST linux-4.19.108.tar.xz 103408396 BLAKE2B d8130982ef7b1bbcdc6b1e4099b6bf5bd707f722b74b2e8cf8bcd45e510f3e39c8418851c923013a6919c4c45c43c65fae8f39ba92563f4645c1c3a3fcfd2937 SHA512 c39099f314fe22e902f561f0dd17fad021a9b8d4ad3c154709a7680e710980a44bcadf46092da7f1a0a9c6910a9b2a8116cc874f8991e197ff05fafe4312da2d
diff --git a/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-amd64.config b/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-amd64.config
index 274f6f27..7dbc8f7a 100644
--- a/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-amd64.config
+++ b/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-amd64.config
@@ -1,6 +1,6 @@
#
# Automatically generated file; DO NOT EDIT.
-# Linux/x86 4.19.95-redcore-lts Kernel Configuration
+# Linux/x86 4.19.108-redcore-lts-legacy Kernel Configuration
#
#
@@ -159,6 +159,7 @@ CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
CONFIG_IPC_NS=y
CONFIG_USER_NS=y
+# CONFIG_USER_NS_UNPRIVILEGED is not set
CONFIG_PID_NS=y
CONFIG_NET_NS=y
# CONFIG_CHECKPOINT_RESTORE is not set
@@ -8743,6 +8744,16 @@ CONFIG_SECURITY_YAMA=y
CONFIG_DEFAULT_SECURITY_APPARMOR=y
# CONFIG_DEFAULT_SECURITY_DAC is not set
CONFIG_DEFAULT_SECURITY="apparmor"
+
+#
+# Hardened Enhancements
+#
+CONFIG_HARDENED_RANDOM=y
+CONFIG_HARDENED_STEALTH_NETWORKING=y
+CONFIG_HARDENED_NO_SIMULT_CONNECT=y
+CONFIG_HARDENED_SYSFS_RESTRICT=y
+CONFIG_HARDENED_FIFO=y
+# CONFIG_HARDENED_MODULE_LOAD is not set
CONFIG_XOR_BLOCKS=m
CONFIG_ASYNC_CORE=m
CONFIG_ASYNC_MEMCPY=m
diff --git a/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-linux-hardened.patch b/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-linux-hardened.patch
index 3cba43ca..2f477148 100644
--- a/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-linux-hardened.patch
+++ b/sys-kernel/linux-sources-redcore-lts-legacy/files/4.19-linux-hardened.patch
@@ -1,5 +1,5 @@
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index a29301d6e6c6..d3a259e762fa 100644
+index 8bf0c0532046..c81c652ecf44 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -496,16 +496,6 @@
@@ -19,7 +19,7 @@ index a29301d6e6c6..d3a259e762fa 100644
cio_ignore= [S390]
See Documentation/s390/CommonIO for details.
clk_ignore_unused
-@@ -3165,6 +3155,11 @@
+@@ -3207,6 +3197,11 @@
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -71,10 +71,10 @@ index 37a679501ddc..59b747920f4d 100644
The value in this file affects behavior of handling NMI. When the
diff --git a/Makefile b/Makefile
-index f7e7e365e2ff..7c69091b65a0 100644
+index 313f0c8dd66f..8e66fca67fd0 100644
--- a/Makefile
+++ b/Makefile
-@@ -693,6 +693,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
+@@ -696,6 +696,9 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
ifeq ($(cc-name),clang)
@@ -115,10 +115,10 @@ index a336548487e6..bbe821420e7a 100644
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 8790a29d0af4..265c6edd859f 100644
+index 51fe21f5d078..f345755446d9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -1031,6 +1031,7 @@ endif
+@@ -1033,6 +1033,7 @@ endif
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
@@ -126,7 +126,7 @@ index 8790a29d0af4..265c6edd859f 100644
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
-@@ -1206,6 +1207,7 @@ config RANDOMIZE_BASE
+@@ -1208,6 +1209,7 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
@@ -147,7 +147,7 @@ index 69c9170bdd24..a786227db0e3 100644
Generate a warning if any W+X mappings are found at boot.
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
-index db8d364f8476..67441db36c07 100644
+index 1a4f8b67bbe8..85273063eb56 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
@@ -187,10 +187,10 @@ index 433b9554c6a1..1f4b06317c9f 100644
#ifdef __AARCH64EB__
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
-index 7f1628effe6d..38bd2f95a961 100644
+index d6a49bb07a5f..16e4214c2305 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
-@@ -481,9 +481,9 @@ unsigned long arch_align_stack(unsigned long sp)
+@@ -517,9 +517,9 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (is_compat_task())
@@ -203,7 +203,7 @@ index 7f1628effe6d..38bd2f95a961 100644
/*
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index e76d16ac2776..07dcedbb271e 100644
+index af35f5caadbe..34e88af114bd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1189,8 +1189,7 @@ config VM86
@@ -216,7 +216,7 @@ index e76d16ac2776..07dcedbb271e 100644
depends on MODIFY_LDT_SYSCALL
---help---
This option is required by programs like Wine to run 16-bit
-@@ -2274,7 +2273,7 @@ config COMPAT_VDSO
+@@ -2319,7 +2318,7 @@ config COMPAT_VDSO
choice
prompt "vsyscall table for legacy applications"
depends on X86_64
@@ -225,7 +225,7 @@ index e76d16ac2776..07dcedbb271e 100644
help
Legacy user code that does not know how to find the vDSO expects
to be able to issue three syscalls by calling fixed addresses in
-@@ -2355,8 +2354,7 @@ config CMDLINE_OVERRIDE
+@@ -2400,8 +2399,7 @@ config CMDLINE_OVERRIDE
be set to 'N' under normal conditions.
config MODIFY_LDT_SYSCALL
@@ -236,7 +236,7 @@ index e76d16ac2776..07dcedbb271e 100644
Linux can allow user programs to install a per-process x86
Local Descriptor Table (LDT) using the modify_ldt(2) system
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 7d68f0c7cfb1..85f04bbeadd8 100644
+index 687cd1a213d5..29075c2bc51f 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -101,6 +101,7 @@ config EFI_PGT_DUMP
@@ -403,10 +403,10 @@ index 79ec7add5f98..2950448e00ac 100644
native_write_cr4(cr4 ^ X86_CR4_PGE);
/* write old PGE again and flush TLBs */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 1073118b9bf0..2e34aede5c36 100644
+index a6458ab499c2..0be5291ec42e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
-@@ -1748,7 +1748,6 @@ void cpu_init(void)
+@@ -1790,7 +1790,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -479,7 +479,7 @@ index 6a78d4b36a79..715009f7a96c 100644
/*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index 979e0a02cbe1..d6ab882a0091 100644
+index 79b95910fd9f..fcda13aa03d0 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -560,9 +560,9 @@ static void __init pagetable_init(void)
@@ -494,8 +494,8 @@ index 979e0a02cbe1..d6ab882a0091 100644
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
-@@ -873,7 +873,7 @@ int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
- #endif
+@@ -870,7 +870,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
+ }
#endif
-int kernel_set_to_readonly __read_mostly;
@@ -503,7 +503,7 @@ index 979e0a02cbe1..d6ab882a0091 100644
void set_kernel_text_rw(void)
{
-@@ -925,12 +925,11 @@ void mark_rodata_ro(void)
+@@ -922,12 +922,11 @@ void mark_rodata_ro(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
@@ -518,7 +518,7 @@ index 979e0a02cbe1..d6ab882a0091 100644
printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
start, start+size);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index a3e9c6ee3cf2..40bbcd978b0a 100644
+index 81e85a8dd300..f0403d1ba1b0 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -66,9 +66,9 @@
@@ -533,7 +533,7 @@ index a3e9c6ee3cf2..40bbcd978b0a 100644
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
-@@ -1201,7 +1201,7 @@ void __init mem_init(void)
+@@ -1190,7 +1190,7 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
@@ -542,7 +542,7 @@ index a3e9c6ee3cf2..40bbcd978b0a 100644
void set_kernel_text_rw(void)
{
-@@ -1250,9 +1250,8 @@ void mark_rodata_ro(void)
+@@ -1239,9 +1239,8 @@ void mark_rodata_ro(void)
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
@@ -567,10 +567,10 @@ index 15c1f5e12eb8..ff72cccec5b8 100644
struct list_head *cpu_list, local_list;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index adf28788cab5..cd4b3501eda9 100644
+index 75d582ca917f..38ba030b8e27 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
-@@ -5158,7 +5158,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+@@ -5161,7 +5161,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
struct ata_port *ap;
unsigned int tag;
@@ -579,7 +579,7 @@ index adf28788cab5..cd4b3501eda9 100644
ap = qc->ap;
qc->flags = 0;
-@@ -5175,7 +5175,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+@@ -5178,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap;
struct ata_link *link;
@@ -608,6 +608,306 @@ index 1df9cb8e659e..eb71148a4a69 100644
help
Say Y here if you want to support the /dev/port device. The /dev/port
device is similar to /dev/mem, but for I/O ports.
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 53e822793d46..c97b295338ce 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -281,11 +281,20 @@
+ /*
+ * Configuration information
+ */
++#ifdef CONFIG_HARDENED_RANDOM
++#define INPUT_POOL_SHIFT 18
++#define OUTPUT_POOL_SHIFT 16
++#else
+ #define INPUT_POOL_SHIFT 12
+-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
+ #define OUTPUT_POOL_SHIFT 10
++#endif
++#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
+ #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
++#ifdef CONFIG_HARDENED_RANDOM
++#define SEC_XFER_SIZE 32768
++#else
+ #define SEC_XFER_SIZE 512
++#endif
+ #define EXTRACT_SIZE 10
+
+
+@@ -294,9 +303,6 @@
+ /*
+ * To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits.
+- *
+- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+- * credit_entropy_bits() needs to be 64 bits wide.
+ */
+ #define ENTROPY_SHIFT 3
+ #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+@@ -361,15 +367,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+ */
+ static struct poolinfo {
+ int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
+-#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+- int tap1, tap2, tap3, tap4, tap5;
+-} poolinfo_table[] = {
++#define S(x) \
++ .poolbitshift = ilog2(x)+5, \
++ .poolwords = (x), \
++ .poolbytes = (x)*4, \
++ .poolbits = (x)*32, \
++ .poolfracbits = (x) << (ENTROPY_SHIFT+5)
++ int tap[5];
++} __randomize_layout poolinfo_table[] = {
++#ifdef CONFIG_HARDENED_RANDOM
++ /* x^8192 + x^104 + x^76 + x^51 +x^25 + x + 1 */
++ { S(8192), .tap = { 104, 76, 51, 25, 1 } },
++ /* x^2048 + x^26 + x^19 + x^14 + x^7 + x + 1 */
++ { S(2048), .tap = { 26, 19, 14, 7, 1 } }
++#else
+ /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+ /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
+- { S(128), 104, 76, 51, 25, 1 },
++ { S(128), .tap = { 104, 76, 51, 25, 1 } },
+ /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
+ /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
+- { S(32), 26, 19, 14, 7, 1 },
++ { S(32), .tap = { 26, 19, 14, 7, 1 } },
++#endif
+ #if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+ { S(2048), 1638, 1231, 819, 411, 1 },
+@@ -413,7 +431,7 @@ struct crng_state {
+ __u32 state[16];
+ unsigned long init_time;
+ spinlock_t lock;
+-};
++} __randomize_layout;
+
+ struct crng_state primary_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
+@@ -475,7 +493,7 @@ struct entropy_store {
+ unsigned int initialized:1;
+ unsigned int last_data_init:1;
+ __u8 last_data[EXTRACT_SIZE];
+-};
++} __randomize_layout;
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int rsvd);
+@@ -486,6 +504,8 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
+ static void push_to_pool(struct work_struct *work);
+ static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
++/* this actually doesn't need latent entropy */
++static __u32 secondary_xfer_buffer[OUTPUT_POOL_WORDS];
+
+ static struct entropy_store input_pool = {
+ .poolinfo = &poolinfo_table[0],
+@@ -504,9 +524,78 @@ static struct entropy_store blocking_pool = {
+ push_to_pool),
+ };
+
++#ifdef CONFIG_HARDENED_RANDOM
++static __u32 const twist_table[64][4] = {
++ { 0x6a09e668, 0xbb67ae86, 0x3c6ef373, 0xa54ff53a },
++ { 0x510e5280, 0x9b05688c, 0x1f83d9ac, 0x5be0cd19 },
++ { 0xcbbb9d5e, 0x629a292a, 0x9159015a, 0x152fecd9 },
++ { 0x67332668, 0x8eb44a87, 0xdb0c2e0d, 0x47b5481e },
++ { 0xae5f9157, 0xcf6c85d4, 0x2f73477d, 0x6d1826cb },
++ { 0x8b43d457, 0xe360b597, 0x1c456003, 0x6f196331 },
++ { 0xd94ebeb2, 0x0cc4a612, 0x261dc1f3, 0x5815a7be },
++ { 0x70b7ed68, 0xa1513c69, 0x44f93636, 0x720dcdfe },
++ { 0xb467369e, 0xca320b76, 0x34e0d42e, 0x49c7d9be },
++ { 0x87abb9f2, 0xc463a2fc, 0xec3fc3f4, 0x27277f6d },
++ { 0x610bebf3, 0x7420b49f, 0xd1fd8a34, 0xe4773594 },
++ { 0x092197f6, 0x1b530c96, 0x869d6343, 0xeee52e50 },
++ { 0x1107668a, 0x21fba37c, 0x43ab9fb6, 0x75a9f91d },
++ { 0x8630501a, 0xd7cd8174, 0x007fe010, 0x0379f514 },
++ { 0x066b651b, 0x0764ab84, 0x0a4b06be, 0x0c3578c1 },
++ { 0x0d2962a5, 0x11e039f4, 0x1857b7bf, 0x1a29bf2e },
++ { 0x1b11a32f, 0x1cdf34e8, 0x23183042, 0x25b89093 },
++ { 0x2a0c06a1, 0x2ae79843, 0x2c9cda69, 0x2f281f24 },
++ { 0x32841259, 0x3502e64e, 0x377c9c21, 0x39204cda },
++ { 0x3b91bf66, 0x3ecc38ca, 0x40665609, 0x43947938 },
++ { 0x47830769, 0x484ae4b8, 0x4c2b2b75, 0x4cf03d21 },
++ { 0x4f3cbb11, 0x50c2d3b5, 0x5308af16, 0x560a7a9a },
++ { 0x5788d981, 0x584769b4, 0x59c34f06, 0x5e2d564c },
++ { 0x6116d760, 0x62894c10, 0x6569b58c, 0x66d7b394 },
++ { 0x68f9f8dc, 0x6d34f03d, 0x6de8372f, 0x742687a4 },
++ { 0x76356021, 0x799d1235, 0x7ba455f4, 0x7da8d73b },
++ { 0x7e546743, 0x80554bdc, 0x83a63a3c, 0x85a01e39 },
++ { 0x879774ac, 0x883eac9f, 0x8a32aae0, 0x8c243210 },
++ { 0x8d6e8781, 0x8e134b6f, 0x91ea5892, 0x95166fe4 },
++ { 0x95b817e6, 0x96faa747, 0x98dca135, 0x9abc6593 },
++ { 0x9b5bd55a, 0x9f136df7, 0xa04ebd79, 0xa225f6ed },
++ { 0xa4970e49, 0xa79f5a6b, 0xaa0869af, 0xad06dcbd },
++ { 0xaf68312e, 0xb12efe0b, 0xb2f3ef5b, 0xb420e03a },
++ { 0xb6785656, 0xb837d738, 0xb9613115, 0xbbb18efb },
++ { 0xbcd89621, 0xc0db3814, 0xc3b2f2a3, 0xc71638d9 },
++ { 0xc7a6240f, 0xca73166e, 0xcb01f3ba, 0xcc1f293d },
++ { 0xccad81c8, 0xcf72acaf, 0xd34c7258, 0xd4649b7a },
++ { 0xd4f07147, 0xd607a013, 0xd9d3b47b, 0xdae803b5 },
++ { 0xdb71ef1a, 0xdc854e24, 0xe1dcf0ea, 0xe2eca719 },
++ { 0xe50a4ad8, 0xe7ac0990, 0xe9c46d3a, 0xeacfc33c },
++ { 0xec5fb417, 0xedee611c, 0xf18bc533, 0xf292ef77 },
++ { 0xf41cab36, 0xf5a531ec, 0xf7aeb45d, 0xf93474e9 },
++ { 0xfc3c7559, 0xfd3e1962, 0xfebf9bc1, 0xff3fdbf2 },
++ { 0x01bf3cab, 0x023ebd6b, 0x03bc8288, 0x06365a0f },
++ { 0x06b4c1d2, 0x092afcc1, 0x09a8ad2c, 0x0b21093c },
++ { 0x0f83d25e, 0x107c1074, 0x10f803d0, 0x11ef938d },
++ { 0x136212e8, 0x14d390a4, 0x16beab25, 0x182dd7d5 },
++ { 0x199c09bf, 0x1ed27f46, 0x1f4b2d3e, 0x21a502bc },
++ { 0x23849e06, 0x25d9d3da, 0x273ef0ca, 0x28a326f6 },
++ { 0x2a7cb5e4, 0x2d4019ba, 0x2e2b1e73, 0x2f8aec73 },
++ { 0x30e9ddcc, 0x315ea828, 0x32bc75cf, 0x357587f0 },
++ { 0x37b7de93, 0x3bc31ec6, 0x3c35b24a, 0x3d1a949b },
++ { 0x3e713d15, 0x3ee347da, 0x4038e0bf, 0x411c2bae },
++ { 0x418daf9a, 0x4270749e, 0x4516b0b0, 0x45876dcb },
++ { 0x46d92246, 0x4e448a56, 0x4f9141c0, 0x50dd3e71 },
++ { 0x5296c45b, 0x56738aac, 0x58961d02, 0x5b9010c1 },
++ { 0x5c6913ae, 0x5cd577f2, 0x5dae0649, 0x5ef24aeb },
++ { 0x60a199af, 0x6178ce9b, 0x61e44c97, 0x6326551c },
++ { 0x65a86b29, 0x67bd7e12, 0x6827e41c, 0x68fc7925 },
++ { 0x6966a836, 0x6a3acfa3, 0x6b78828a, 0x6df2017d },
++ { 0x7068fdbb, 0x720c4495, 0x747f226b, 0x75b7a753 },
++ { 0x7687a9e0, 0x77bf2d48, 0x795d98d4, 0x7a2c690b },
++ { 0x7bc93fa8, 0x7c974690, 0x7f6653f3, 0x80333127 },
++ { 0x81660244, 0x81cc2760, 0x829840e3, 0x83c9edd4 }
++};
++#else
+ static __u32 const twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++#endif
+
+ /*
+ * This function adds bytes into the entropy "pool". It does not
+@@ -521,17 +610,14 @@ static __u32 const twist_table[8] = {
+ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes)
+ {
+- unsigned long i, tap1, tap2, tap3, tap4, tap5;
++ unsigned long i, n, t1, t2, tap[5];
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+
+- tap1 = r->poolinfo->tap1;
+- tap2 = r->poolinfo->tap2;
+- tap3 = r->poolinfo->tap3;
+- tap4 = r->poolinfo->tap4;
+- tap5 = r->poolinfo->tap5;
++ for (n = 0; n < 5; n++)
++ tap[n] = r->poolinfo->tap[n];
+
+ input_rotate = r->input_rotate;
+ i = r->add_ptr;
+@@ -543,14 +629,17 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+
+ /* XOR in the various taps */
+ w ^= r->pool[i];
+- w ^= r->pool[(i + tap1) & wordmask];
+- w ^= r->pool[(i + tap2) & wordmask];
+- w ^= r->pool[(i + tap3) & wordmask];
+- w ^= r->pool[(i + tap4) & wordmask];
+- w ^= r->pool[(i + tap5) & wordmask];
++ for (n = 0; n < 5; n++)
++ w ^= r->pool[(i + tap[n]) & wordmask];
+
+ /* Mix the result back in with a twist */
++#ifdef CONFIG_HARDENED_RANDOM
++ t1 = rol32(w, 14) & 0x1FFF; // 0-63, 1111111111111
++ t2 = rol32(w, t1) & 0x3; // 0-3, 11
++ r->pool[i] = (w >> 3) ^ twist_table[t1][t2];
++#else
+ r->pool[i] = (w >> 3) ^ twist_table[w & 7];
++#endif
+
+ /*
+ * Normally, we add 7 bits of rotation to the pool.
+@@ -588,7 +677,7 @@ struct fast_pool {
+ unsigned long last;
+ unsigned short reg_idx;
+ unsigned char count;
+-};
++} __randomize_layout;
+
+ /*
+ * This is a fast mixing routine used by the interrupt randomness
+@@ -683,7 +772,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ /* The +2 corresponds to the /4 in the denominator */
+
+ do {
+- unsigned int anfrac = min(pnfrac, pool_size/2);
++ __u64 anfrac = min(pnfrac, pool_size/2);
+ unsigned int add =
+ ((pool_size - entropy_count)*anfrac*3) >> s;
+
+@@ -1061,7 +1150,7 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+
+ extract_crng(tmp);
+ i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE);
+- if (copy_to_user(buf, tmp, i)) {
++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -1089,9 +1178,9 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+ struct timer_rand_state {
+ cycles_t last_time;
+ long last_delta, last_delta2;
+-};
++} __randomize_layout;
+
+-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
++#define INIT_TIMER_RAND_STATE { .last_time = INITIAL_JIFFIES };
+
+ /*
+ * Add device- or boot-specific data to the input pool to help
+@@ -1334,20 +1423,18 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+
+ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+ {
+- __u32 tmp[OUTPUT_POOL_WORDS];
+-
+ int bytes = nbytes;
+
+ /* pull at least as much as a wakeup */
+ bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
+ /* but never more than the buffer size */
+- bytes = min_t(int, bytes, sizeof(tmp));
++ bytes = min_t(int, bytes, sizeof(secondary_xfer_buffer));
+
+ trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
+ ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
+- bytes = extract_entropy(r->pull, tmp, bytes,
++ bytes = extract_entropy(r->pull, secondary_xfer_buffer, bytes,
+ random_read_wakeup_bits / 8, 0);
+- mix_pool_bytes(r, tmp, bytes);
++ mix_pool_bytes(r, secondary_xfer_buffer, bytes);
+ credit_entropy_bits(r, bytes*8);
+ }
+
+@@ -1572,7 +1659,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+- if (copy_to_user(buf, tmp, i)) {
++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -2276,7 +2363,7 @@ struct batched_entropy {
+ };
+ unsigned int position;
+ spinlock_t batch_lock;
+-};
++} __randomize_layout;
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index e0a04bfc873e..ec93f827c599 100644
--- a/drivers/tty/Kconfig
@@ -661,10 +961,10 @@ index ac8025cd4a1f..a89e48f53fba 100644
return tty;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index eb24ec0e160d..68c93697cae9 100644
+index 27486b0a027a..82689c97b660 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
-@@ -41,6 +41,8 @@
+@@ -43,6 +43,8 @@
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
@@ -673,7 +973,7 @@ index eb24ec0e160d..68c93697cae9 100644
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
-@@ -4940,6 +4942,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+@@ -4961,6 +4963,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto done;
return;
}
@@ -686,8 +986,35 @@ index eb24ec0e160d..68c93697cae9 100644
if (hub_is_superspeed(hub->hdev))
unit_load = 150;
else
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index e5126fad57c5..2a59499ba24d 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -32,6 +32,10 @@ static struct vfsmount *debugfs_mount;
+ static int debugfs_mount_count;
+ static bool debugfs_registered;
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++extern int sysfs_restricted;
++#endif
++
+ static struct inode *debugfs_get_inode(struct super_block *sb)
+ {
+ struct inode *inode = new_inode(sb);
+@@ -517,6 +521,11 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
+ if (unlikely(!inode))
+ return failed_creating(dentry);
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ if (sysfs_restricted)
++ inode->i_mode = S_IFDIR | S_IRWXU;
++ else
++#endif
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
diff --git a/fs/exec.c b/fs/exec.c
-index 433b1257694a..f86201f25a4c 100644
+index 561ea64829ec..5d40794103eb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,6 +62,7 @@
@@ -708,10 +1035,21 @@ index 433b1257694a..f86201f25a4c 100644
err:
up_write(&mm->mmap_sem);
diff --git a/fs/namei.c b/fs/namei.c
-index 914178cdbe94..7422b5ce077a 100644
+index 327844fedf3d..c1b2eafa3b30 100644
--- a/fs/namei.c
+++ b/fs/namei.c
-@@ -885,10 +885,10 @@ static inline void put_link(struct nameidata *nd)
+@@ -125,6 +125,10 @@
+
+ #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
+
++#ifdef CONFIG_HARDENED_FIFO
++extern int fifo_restrictions;
++#endif
++
+ struct filename *
+ getname_flags(const char __user *filename, int flags, int *empty)
+ {
+@@ -885,10 +889,10 @@ static inline void put_link(struct nameidata *nd)
path_put(&last->link);
}
@@ -726,8 +1064,57 @@ index 914178cdbe94..7422b5ce077a 100644
/**
* may_follow_link - Check symlink following for unsafe situations
+@@ -3252,6 +3256,32 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+ return error;
+ }
+
++/*
++ * Handles possibly restricted FIFO operations
++ * if the user doesn't own this directory.
++ */
++static int fifo_restricted(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const struct dentry *dir,
++ const int flag,
++ const int acc_mode) {
++#ifdef CONFIG_HARDENED_FIFO
++ const struct cred *cred;
++ struct inode *inode, *dir_inode;
++
++ cred = current_cred();
++ inode = d_backing_inode(dentry);
++ dir_inode = d_backing_inode(dir);
++
++ if (fifo_restrictions && S_ISFIFO(inode->i_mode) &&
++ !(flag & O_EXCL) && (dir_inode->i_mode & S_ISVTX) &&
++ !uid_eq(inode->i_uid, dir_inode->i_uid) &&
++ !uid_eq(cred->fsuid, inode->i_uid))
++ return -EACCES;
++#endif
++ return 0;
++}
++
+ /*
+ * Handle the last step of open()
+ */
+@@ -3370,6 +3400,15 @@ static int do_last(struct nameidata *nd,
+ return -ENOENT;
+ }
+
++ /*
++ * Only check if O_CREAT is specified, all other checks need to go
++ * into may_open().
++ */
++ if (fifo_restricted(path.dentry, path.mnt, dir, open_flag, acc_mode)) {
++ path_to_nameidata(&path, nd);
++ return -EACCES;
++ }
++
+ /*
+ * create/update audit record if it already exists.
+ */
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
-index 5f93cfacb3d1..cea0d7d3b23e 100644
+index ac3e06367cb6..06a2e4cf4cc1 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -195,4 +195,3 @@ config NFS_DEBUG
@@ -785,6 +1172,76 @@ index f8e6fb2c3657..240c1432e18f 100644
generic_fillattr(inode, stat);
return 0;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index feeae8081c22..a507113bbd3b 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -18,6 +18,10 @@
+
+ DEFINE_SPINLOCK(sysfs_symlink_target_lock);
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++extern int sysfs_restricted;
++#endif
++
+ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
+ {
+ char *buf;
+@@ -40,11 +44,19 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
+ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+ {
+ struct kernfs_node *parent, *kn;
++ const char* name;
++ umode_t mode;
+ kuid_t uid;
+ kgid_t gid;
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ const char *parent_name;
++#endif
++
+ BUG_ON(!kobj);
+
++ name = kobject_name(kobj);
++
+ if (kobj->parent)
+ parent = kobj->parent->sd;
+ else
+@@ -55,12 +67,30 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+
+ kobject_get_ownership(kobj, &uid, &gid);
+
+- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
+- S_IRWXU | S_IRUGO | S_IXUGO, uid, gid,
+- kobj, ns);
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ if (sysfs_restricted) {
++ parent_name = parent->name;
++ mode = S_IRWXU;
++
++ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") ||
++ !strcmp(name, "fs"))) ||
++ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
++ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") ||
++ !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
++ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
++ mode |= S_IRUGO | S_IXUGO;
++ }
++ else
++ mode = S_IRWXU | S_IRUGO | S_IXUGO;
++#else
++ mode = S_IRWXU | S_IRUGO | S_IXUGO;
++#endif
++
++ kn = kernfs_create_dir_ns(parent, name, mode, uid, gid, kobj, ns);
++
+ if (IS_ERR(kn)) {
+ if (PTR_ERR(kn) == -EEXIST)
+- sysfs_warn_dup(parent, kobject_name(kobj));
++ sysfs_warn_dup(parent, name);
+ return PTR_ERR(kn);
+ }
+
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..e7157c18c62c 100644
--- a/include/linux/cache.h
@@ -822,10 +1279,10 @@ index f640dcbc880c..2b4f5d651f19 100644
{
return true;
diff --git a/include/linux/fs.h b/include/linux/fs.h
-index d4e1b43a53c3..c925cbdd1d95 100644
+index 92420009b9bc..9238f04686ce 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -3466,4 +3466,15 @@ extern void inode_nohighmem(struct inode *inode);
+@@ -3468,4 +3468,15 @@ extern void inode_nohighmem(struct inode *inode);
extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
@@ -866,10 +1323,10 @@ index fd1ce10553bf..1905d2476d32 100644
mask |= FS_ISDIR;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
-index 24bcc5eec6b4..b1cdfc350596 100644
+index f78d1e89593f..ff139ff8d3d2 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
-@@ -530,9 +530,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+@@ -553,9 +553,9 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
@@ -935,7 +1392,7 @@ index 069aa2ebef90..cb9e3637a620 100644
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index bdec425c8e14..58409dee149a 100644
+index 45f10f5896b7..7f251a536293 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -571,7 +571,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
@@ -972,10 +1429,10 @@ index 70b7123f38c7..09f3019489b2 100644
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
-index 42fc852bf512..a6c5cacee3b5 100644
+index d8b4d31acd18..205c79491de3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
-@@ -1184,6 +1184,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+@@ -1194,6 +1194,11 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
int perf_event_max_stack_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -1137,7 +1594,7 @@ index f58e1ef76572..516caa40676e 100644
return __builtin_strcpy(p, q);
memcpy(p, q, strlen(q) + 1);
diff --git a/include/linux/tty.h b/include/linux/tty.h
-index 808fbfe86f85..e4429b7d6e8e 100644
+index 248a137112e8..02a63bec41ca 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -14,6 +14,7 @@
@@ -1148,7 +1605,7 @@ index 808fbfe86f85..e4429b7d6e8e 100644
/*
-@@ -336,6 +337,7 @@ struct tty_struct {
+@@ -338,6 +339,7 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
@@ -1156,7 +1613,7 @@ index 808fbfe86f85..e4429b7d6e8e 100644
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
-@@ -345,6 +347,8 @@ struct tty_file_private {
+@@ -347,6 +349,8 @@ struct tty_file_private {
struct list_head list;
};
@@ -1199,8 +1656,24 @@ index 398e9c95cd61..baab7195306a 100644
#ifndef CONFIG_MMU
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
+diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
+index e42d13b55cf3..3228bcfe7599 100644
+--- a/include/uapi/linux/ip.h
++++ b/include/uapi/linux/ip.h
+@@ -66,7 +66,11 @@
+
+ #define IPVERSION 4
+ #define MAXTTL 255
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++#define IPDEFTTL 128
++#else
+ #define IPDEFTTL 64
++#endif
+
+ #define IPOPT_OPTVAL 0
+ #define IPOPT_OLEN 1
diff --git a/init/Kconfig b/init/Kconfig
-index 47035b5a46f6..63b30636fdc0 100644
+index 47035b5a46f6..efbd3ac00d92 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -326,6 +326,7 @@ config USELIB
@@ -1211,7 +1684,30 @@ index 47035b5a46f6..63b30636fdc0 100644
help
Enable auditing infrastructure that can be used with another
kernel subsystem, such as SELinux (which requires this for
-@@ -1091,6 +1092,12 @@ config CC_OPTIMIZE_FOR_SIZE
+@@ -957,6 +958,22 @@ config USER_NS
+
+ If unsure, say N.
+
++config USER_NS_UNPRIVILEGED
++ bool "Allow unprivileged users to create namespaces"
++ depends on USER_NS
++ default n
++ help
++ When disabled, unprivileged users will not be able to create
++ new namespaces. Allowing users to create their own namespaces
++ has been part of several recent local privilege escalation
++ exploits, so if you need user namespaces but are
++ paranoid^Wsecurity-conscious you want to disable this.
++
++ This setting can be overridden at runtime via the
++ kernel.unprivileged_userns_clone sysctl.
++
++ If unsure, say N.
++
+ config PID_NS
+ bool "PID Namespaces"
+ default y
+@@ -1091,6 +1108,12 @@ config CC_OPTIMIZE_FOR_SIZE
endchoice
@@ -1224,7 +1720,7 @@ index 47035b5a46f6..63b30636fdc0 100644
config HAVE_LD_DEAD_CODE_DATA_ELIMINATION
bool
help
-@@ -1377,8 +1384,7 @@ config SHMEM
+@@ -1377,8 +1400,7 @@ config SHMEM
which may be appropriate on small systems without swap.
config AIO
@@ -1234,7 +1730,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
This option enables POSIX asynchronous I/O which may by used
by some high performance threaded applications. Disabling
-@@ -1595,7 +1601,7 @@ config VM_EVENT_COUNTERS
+@@ -1595,7 +1617,7 @@ config VM_EVENT_COUNTERS
config SLUB_DEBUG
default y
@@ -1243,7 +1739,7 @@ index 47035b5a46f6..63b30636fdc0 100644
depends on SLUB && SYSFS
help
SLUB has extensive debug support features. Disabling these can
-@@ -1619,7 +1625,6 @@ config SLUB_MEMCG_SYSFS_ON
+@@ -1619,7 +1641,6 @@ config SLUB_MEMCG_SYSFS_ON
config COMPAT_BRK
bool "Disable heap randomization"
@@ -1251,7 +1747,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
Randomizing heap placement makes heap exploits harder, but it
also breaks ancient binaries (including anything libc5 based).
-@@ -1666,7 +1671,6 @@ endchoice
+@@ -1666,7 +1687,6 @@ endchoice
config SLAB_MERGE_DEFAULT
bool "Allow slab caches to be merged"
@@ -1259,7 +1755,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
For reduced kernel memory fragmentation, slab caches can be
merged when they share the same size and other characteristics.
-@@ -1679,9 +1683,9 @@ config SLAB_MERGE_DEFAULT
+@@ -1679,9 +1699,9 @@ config SLAB_MERGE_DEFAULT
command line.
config SLAB_FREELIST_RANDOM
@@ -1270,7 +1766,7 @@ index 47035b5a46f6..63b30636fdc0 100644
help
Randomizes the freelist order used on creating new pages. This
security feature reduces the predictability of the kernel slab
-@@ -1690,12 +1694,56 @@ config SLAB_FREELIST_RANDOM
+@@ -1690,12 +1710,56 @@ config SLAB_FREELIST_RANDOM
config SLAB_FREELIST_HARDENED
bool "Harden slab freelist metadata"
depends on SLUB
@@ -1328,10 +1824,10 @@ index 47035b5a46f6..63b30636fdc0 100644
default y
depends on SLUB && SMP
diff --git a/kernel/audit.c b/kernel/audit.c
-index 2a8058764aa6..14e7a763db43 100644
+index 1f08c38e604a..2c4f577a4317 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
-@@ -1628,6 +1628,9 @@ static int __init audit_enable(char *str)
+@@ -1630,6 +1630,9 @@ static int __init audit_enable(char *str)
if (audit_default == AUDIT_OFF)
audit_initialized = AUDIT_DISABLED;
@@ -1342,20 +1838,20 @@ index 2a8058764aa6..14e7a763db43 100644
pr_err("audit: error setting audit state (%d)\n",
audit_default);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
-index bad9985b8a08..453be8764a8c 100644
+index 36be400c3e65..50fa38718408 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
-@@ -370,7 +370,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
-
+@@ -368,7 +368,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+ #ifdef CONFIG_BPF_JIT
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
-int bpf_jit_harden __read_mostly;
+int bpf_jit_harden __read_mostly = 2;
int bpf_jit_kallsyms __read_mostly;
- int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
+ long bpf_jit_limit __read_mostly;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index ede82382dd32..98591574df41 100644
+index 596959288eb9..fb76f46d61ce 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -48,7 +48,7 @@ static DEFINE_SPINLOCK(prog_idr_lock);
@@ -1368,10 +1864,10 @@ index ede82382dd32..98591574df41 100644
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _ops)
diff --git a/kernel/capability.c b/kernel/capability.c
-index 1e1c0236f55b..452062fe45ce 100644
+index 7718d7dcadc7..8a4ce459da0a 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
-@@ -431,6 +431,12 @@ bool capable(int cap)
+@@ -432,6 +432,12 @@ bool capable(int cap)
return ns_capable(&init_user_ns, cap);
}
EXPORT_SYMBOL(capable);
@@ -1385,7 +1881,7 @@ index 1e1c0236f55b..452062fe45ce 100644
/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 171b83ebed4a..c38d8bf68b6b 100644
+index 8c70ee23fbe9..7c45b1e38bc0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -397,8 +397,13 @@ static cpumask_var_t perf_online_mask;
@@ -1402,7 +1898,7 @@ index 171b83ebed4a..c38d8bf68b6b 100644
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
-@@ -10462,6 +10467,9 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -10529,6 +10534,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
@@ -1413,7 +1909,7 @@ index 171b83ebed4a..c38d8bf68b6b 100644
if (err)
return err;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 69874db3fba8..44f666f2b7b7 100644
+index 1a2d18e98bf9..f3a8e3df2e12 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -103,6 +103,11 @@
@@ -1428,7 +1924,7 @@ index 69874db3fba8..44f666f2b7b7 100644
/*
* Minimum number of threads to boot the kernel
-@@ -1674,6 +1679,10 @@ static __latent_entropy struct task_struct *copy_process(
+@@ -1679,6 +1684,10 @@ static __latent_entropy struct task_struct *copy_process(
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
return ERR_PTR(-EINVAL);
@@ -1439,7 +1935,7 @@ index 69874db3fba8..44f666f2b7b7 100644
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
-@@ -2503,6 +2512,12 @@ int ksys_unshare(unsigned long unshare_flags)
+@@ -2508,6 +2517,12 @@ int ksys_unshare(unsigned long unshare_flags)
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
@@ -1452,11 +1948,29 @@ index 69874db3fba8..44f666f2b7b7 100644
err = check_unshare_flags(unshare_flags);
if (err)
goto bad_unshare_out;
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index bc6addd9152b..008be43f6cdd 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -149,6 +149,13 @@ int __request_module(bool wait, const char *fmt, ...)
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_HARDENED_MODULE_LOAD
++ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
++ printk(KERN_ALERT "denied attempt to auto-load module %.64s\n", module_name);
++ return -EPERM;
++ }
++#endif
++
+ if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
+ pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
+ atomic_read(&kmod_concurrent_max),
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
-index 3d37c279c090..0789ca413f09 100644
+index f2635fc751d9..a4c445bf7f24 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
-@@ -1138,7 +1138,7 @@ void free_basic_memory_bitmaps(void)
+@@ -1145,7 +1145,7 @@ void free_basic_memory_bitmaps(void)
void clear_free_pages(void)
{
@@ -1465,7 +1979,7 @@ index 3d37c279c090..0789ca413f09 100644
struct memory_bitmap *bm = free_pages_map;
unsigned long pfn;
-@@ -1155,7 +1155,7 @@ void clear_free_pages(void)
+@@ -1162,7 +1162,7 @@ void clear_free_pages(void)
}
memory_bm_position_reset(bm);
pr_info("free pages cleared after restore\n");
@@ -1501,10 +2015,10 @@ index f7e89c989df7..527c170810fc 100644
struct rcu_state *rsp;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 4a433608ba74..41d9b1656818 100644
+index 86ccaaf0c1bf..f3103b3f1bb4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -9627,7 +9627,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
+@@ -9719,7 +9719,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
@@ -1574,7 +2088,7 @@ index 6f584861d329..1943fe60f3b9 100644
void tasklet_init(struct tasklet_struct *t,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index 9a85c7ae7362..20221265a603 100644
+index 4c4fd4339d33..10f5b0ecf5a4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -67,6 +67,7 @@
@@ -1585,7 +2099,7 @@ index 9a85c7ae7362..20221265a603 100644
#include <linux/uaccess.h>
#include <asm/processor.h>
-@@ -99,12 +100,19 @@
+@@ -99,12 +100,25 @@
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
@@ -1593,6 +2107,12 @@ index 9a85c7ae7362..20221265a603 100644
+int deny_new_usb __read_mostly = 0;
+EXPORT_SYMBOL(deny_new_usb);
+#endif
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++int __read_mostly sysfs_restricted = 1;
++#endif
++#ifdef CONFIG_HARDENED_FIFO
++int __read_mostly fifo_restrictions = 1;
++#endif
extern int suid_dumpable;
#ifdef CONFIG_COREDUMP
extern int core_uses_pid;
@@ -1605,7 +2125,7 @@ index 9a85c7ae7362..20221265a603 100644
extern int pid_max;
extern int pid_max_min, pid_max_max;
extern int percpu_pagelist_fraction;
-@@ -116,35 +124,35 @@ extern int sysctl_nr_trim_pages;
+@@ -116,35 +130,35 @@ extern int sysctl_nr_trim_pages;
/* Constants used for minimum and maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
@@ -1657,7 +2177,7 @@ index 9a85c7ae7362..20221265a603 100644
static const int cap_last_cap = CAP_LAST_CAP;
/*
-@@ -152,9 +160,12 @@ static const int cap_last_cap = CAP_LAST_CAP;
+@@ -152,9 +166,12 @@ static const int cap_last_cap = CAP_LAST_CAP;
* and hung_task_check_interval_secs
*/
#ifdef CONFIG_DETECT_HUNG_TASK
@@ -1671,7 +2191,7 @@ index 9a85c7ae7362..20221265a603 100644
#ifdef CONFIG_INOTIFY_USER
#include <linux/inotify.h>
#endif
-@@ -298,19 +309,19 @@ static struct ctl_table sysctl_base_table[] = {
+@@ -298,19 +315,19 @@ static struct ctl_table sysctl_base_table[] = {
};
#ifdef CONFIG_SCHED_DEBUG
@@ -1699,7 +2219,7 @@ index 9a85c7ae7362..20221265a603 100644
#endif
static struct ctl_table kern_table[] = {
-@@ -516,6 +527,15 @@ static struct ctl_table kern_table[] = {
+@@ -516,6 +533,15 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
@@ -1715,7 +2235,7 @@ index 9a85c7ae7362..20221265a603 100644
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
-@@ -864,6 +884,37 @@ static struct ctl_table kern_table[] = {
+@@ -864,6 +890,59 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
.extra2 = &two,
},
@@ -1750,14 +2270,36 @@ index 9a85c7ae7362..20221265a603 100644
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
++#endif
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ {
++ .procname = "sysfs_restricted",
++ .data = &sysfs_restricted,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
++#ifdef CONFIG_HARDENED_FIFO
++ {
++ .procname = "fifo_restrictions",
++ .data = &fifo_restrictions,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
#endif
{
.procname = "ngroups_max",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index e1a549c9e399..c560063e3a8c 100644
+index 7362554416fd..fb8902236deb 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1462,7 +1462,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+@@ -1465,7 +1465,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
}
}
@@ -1767,10 +2309,10 @@ index e1a549c9e399..c560063e3a8c 100644
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
unsigned long flags;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index fa49cd753dea..a16f8613282e 100644
+index ae64cb819a9a..52f5a5dcd09d 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1688,7 +1688,7 @@ static inline void __run_timers(struct timer_base *base)
+@@ -1690,7 +1690,7 @@ static inline void __run_timers(struct timer_base *base)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
@@ -1780,21 +2322,25 @@ index fa49cd753dea..a16f8613282e 100644
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
-index 923414a246e9..6b9dbc257e34 100644
+index 923414a246e9..107b17f0d528 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
-@@ -26,6 +26,9 @@
+@@ -26,6 +26,13 @@
#include <linux/bsearch.h>
#include <linux/sort.h>
+/* sysctl */
++#ifdef CONFIG_USER_NS_UNPRIVILEGED
++int unprivileged_userns_clone = 1;
++#else
+int unprivileged_userns_clone;
++#endif
+
static struct kmem_cache *user_ns_cachep __read_mostly;
static DEFINE_MUTEX(userns_state_mutex);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index 3dea52f7be9c..9074878fe25b 100644
+index 46a910acce3f..5b60c663ac69 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -950,6 +950,7 @@ endmenu # "Debug lockups and hangs"
@@ -1894,7 +2440,7 @@ index 812e59e13fe6..2c2104884c81 100644
static noinline_for_stack
char *restricted_pointer(char *buf, char *end, const void *ptr,
diff --git a/mm/Kconfig b/mm/Kconfig
-index de64ea658716..8bff017856eb 100644
+index b457e94ae618..ec2440e66c72 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -311,7 +311,8 @@ config KSM
@@ -1908,10 +2454,10 @@ index de64ea658716..8bff017856eb 100644
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
diff --git a/mm/mmap.c b/mm/mmap.c
-index 1480880ff814..0145114c44b4 100644
+index a98f09b83019..d1695e475b37 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
-@@ -230,6 +230,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -224,6 +224,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
@@ -1926,7 +2472,7 @@ index 1480880ff814..0145114c44b4 100644
goto set_brk;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 8e6932a140b8..1f9c55809c56 100644
+index e5c610d711f3..12f96ff5ef0e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,7 @@
@@ -1989,7 +2535,7 @@ index 8e6932a140b8..1f9c55809c56 100644
page_zone(page)->managed_pages += nr_pages;
set_page_refcounted(page);
__free_pages(page, order);
-@@ -1876,8 +1908,8 @@ static inline int check_new_page(struct page *page)
+@@ -1884,8 +1916,8 @@ static inline int check_new_page(struct page *page)
static inline bool free_pages_prezeroed(void)
{
@@ -2000,7 +2546,7 @@ index 8e6932a140b8..1f9c55809c56 100644
}
#ifdef CONFIG_DEBUG_VM
-@@ -1934,6 +1966,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
+@@ -1942,6 +1974,11 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
post_alloc_hook(page, order, gfp_flags);
@@ -2066,7 +2612,7 @@ index 9632772e14be..802ff9ee8172 100644
/*
* Else we can use all the padding etc for the allocation
diff --git a/mm/slab_common.c b/mm/slab_common.c
-index 4d3c2e76d1ba..7e943cb1eccd 100644
+index 39e382acb0b8..a87a9a2fdf4e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -27,10 +27,10 @@
@@ -2092,7 +2638,7 @@ index 4d3c2e76d1ba..7e943cb1eccd 100644
static int __init setup_slab_nomerge(char *str)
{
diff --git a/mm/slub.c b/mm/slub.c
-index 09c0e24a06d8..26e8c45a889a 100644
+index 9c3937c5ce38..6b063a76c419 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -124,6 +124,16 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
@@ -2371,7 +2917,7 @@ index 09c0e24a06d8..26e8c45a889a 100644
static int __init setup_slub_memcg_sysfs(char *str)
{
diff --git a/mm/swap.c b/mm/swap.c
-index a3fc028e338e..4a1a899e430c 100644
+index 45fdbfb6b2a6..55ec851eb819 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -93,6 +93,13 @@ static void __put_compound_page(struct page *page)
@@ -2389,10 +2935,10 @@ index a3fc028e338e..4a1a899e430c 100644
}
diff --git a/net/core/dev.c b/net/core/dev.c
-index 138951d28643..efc5c650c9d7 100644
+index c1a3baf16957..a6b3bba36e8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4533,7 +4533,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4536,7 +4536,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -2401,7 +2947,7 @@ index 138951d28643..efc5c650c9d7 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -6318,7 +6318,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+@@ -6304,7 +6304,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
return work;
}
@@ -2410,6 +2956,39 @@ index 138951d28643..efc5c650c9d7 100644
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 6cec08cd0bb9..ffc74355a94d 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -36,6 +36,10 @@ static int net_msg_warn; /* Unused, but still a sysctl */
+ int sysctl_fb_tunnels_only_for_init_net __read_mostly = 0;
+ EXPORT_SYMBOL(sysctl_fb_tunnels_only_for_init_net);
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++int sysctl_stealth_blackhole __read_mostly = 1;
++#endif
++
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+@@ -505,6 +509,17 @@ static struct ctl_table net_core_table[] = {
+ .proc_handler = set_default_qdisc
+ },
+ #endif
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ {
++ .procname = "ip_blackhole",
++ .data = &sysctl_stealth_blackhole,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
++#endif
+ #endif /* CONFIG_NET */
+ {
+ .procname = "netdev_budget",
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 32cae39cdff6..9141d7ae99b2 100644
--- a/net/ipv4/Kconfig
@@ -2422,6 +3001,389 @@ index 32cae39cdff6..9141d7ae99b2 100644
---help---
Normal TCP/IP networking is open to an attack known as "SYN
flooding". This denial-of-service attack prevents legitimate remote
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 4efa5e33513e..ae82ff1ea5e7 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -195,6 +195,10 @@ struct icmp_control {
+ short error; /* This ICMP is classed as an error message */
+ };
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
+
+ /*
+@@ -934,6 +938,11 @@ static bool icmp_echo(struct sk_buff *skb)
+ {
+ struct net *net;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) && !(skb->dev->flags & IFF_LOOPBACK))
++ return true;
++#endif
++
+ net = dev_net(skb_dst(skb)->dev);
+ if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
+ struct icmp_bxm icmp_param;
+@@ -960,6 +969,12 @@ static bool icmp_echo(struct sk_buff *skb)
+ static bool icmp_timestamp(struct sk_buff *skb)
+ {
+ struct icmp_bxm icmp_param;
++
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) && !(skb->dev->flags & IFF_LOOPBACK))
++ return true;
++#endif
++
+ /*
+ * Too short.
+ */
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 523d26f5e22e..10070b040661 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -136,6 +136,10 @@
+ ((in_dev)->mr_v2_seen && \
+ time_before(jiffies, (in_dev)->mr_v2_seen)))
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static int unsolicited_report_interval(struct in_device *in_dev)
+ {
+ int interval_ms, interval_jiffies;
+@@ -737,6 +741,11 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
+ __be32 dst;
+ int hlen, tlen;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole))
++ return -1;
++#endif
++
+ if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
+ return igmpv3_send_report(in_dev, pmc);
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 38b6d8f90a44..9c67d386df71 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -307,11 +307,13 @@ static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
+ tp->ecn_flags &= ~TCP_ECN_OK;
+ }
+
++#ifndef CONFIG_HARDENED_NO_SIMULT_CONNECT
+ static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
+ {
+ if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
+ tp->ecn_flags &= ~TCP_ECN_OK;
+ }
++#endif
+
+ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
+ {
+@@ -5921,6 +5923,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ tcp_paws_reject(&tp->rx_opt, 0))
+ goto discard_and_undo;
+
++#ifndef CONFIG_HARDENED_NO_SIMULT_CONNECT
+ if (th->syn) {
+ /* We see SYN without ACK. It is attempt of
+ * simultaneous connect with crossed SYNs.
+@@ -5972,6 +5975,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+ #endif
+ }
++#endif
+ /* "fifth, if neither of the SYN or RST bits is set then
+ * drop the segment and return."
+ */
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 6da393016c11..e6171d1ea7c9 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -95,6 +95,10 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
+ struct inet_hashinfo tcp_hashinfo;
+ EXPORT_SYMBOL(tcp_hashinfo);
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static u32 tcp_v4_init_seq(const struct sk_buff *skb)
+ {
+ return secure_tcp_seq(ip_hdr(skb)->daddr,
+@@ -1561,6 +1565,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+ return 0;
+
+ reset:
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole))
++#endif
+ tcp_v4_send_reset(rsk, skb);
+ discard:
+ kfree_skb(skb);
+@@ -1709,6 +1716,27 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ if (!pskb_may_pull(skb, th->doff * 4))
+ goto discard_it;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) &&
++ (
++ th->res1 || !tcp_flag_word(th) ||
++ tcp_flag_word(th) == TCP_FLAG_PSH ||
++ tcp_flag_word(th) & (TCP_FLAG_CWR | TCP_FLAG_ECE) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_SYN | TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_URG
++ ) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_SYN
++ )
++ )
++ )
++ goto discard_it;
++#endif
++
+ /* An explanation is required here, I think.
+ * Packet length and doff are validated by header prediction,
+ * provided case of th->doff==0 is eliminated.
+@@ -1722,12 +1750,22 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ lookup:
+ sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
+ th->dest, sdif, &refcounted);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 1;
++#endif
++
+ goto no_tcp_socket;
++ }
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 2;
++#endif
++
+ goto do_time_wait;
++ }
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+@@ -1841,6 +1879,11 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ bad_packet:
+ __TCP_INC_STATS(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
++
+ tcp_v4_send_reset(NULL, skb);
+ }
+
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 7ba8a90772b0..8c359b649bcd 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -29,6 +29,10 @@
+ #include <net/xfrm.h>
+ #include <net/busy_poll.h>
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
+ {
+ if (seq == s_win)
+@@ -809,6 +813,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ * avoid becoming vulnerable to outside attack aiming at
+ * resetting legit local connections.
+ */
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || skb->dev->flags & IFF_LOOPBACK)
++#endif
++
+ req->rsk_ops->send_reset(sk, skb);
+ } else if (fastopen) { /* received a valid RST pkt */
+ reqsk_fastopen_remove(sk, req, true);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 0ef04cda1b27..6cb1efa826dc 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -128,6 +128,10 @@ EXPORT_SYMBOL(udp_memory_allocated);
+ #define MAX_UDP_PORTS 65536
+ #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ /* IPCB reference means this can not be used from early demux */
+ static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+ {
+@@ -2262,6 +2266,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ goto csum_error;
+
+ __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole) || (skb->dev->flags & IFF_LOOPBACK))
++#endif
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ /*
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 6d14cbe443f8..4eeebbdc452e 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -72,6 +72,10 @@
+
+ #include <linux/uaccess.h>
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ /*
+ * The ICMP socket(s). This is the most convenient way to flow control
+ * our ICMP output as well as maintain a clean interface throughout
+@@ -848,6 +852,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
+
+ switch (type) {
+ case ICMPV6_ECHO_REQUEST:
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || skb->dev->flags & IFF_LOOPBACK)
++#endif
+ if (!net->ipv6.sysctl.icmpv6_echo_ignore_all)
+ icmpv6_echo_reply(skb);
+ break;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index c5f4e89b6ff3..86375c9966ef 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -71,6 +71,10 @@
+
+ #include <trace/events/tcp.h>
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
+ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req);
+@@ -1356,6 +1360,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ return 0;
+
+ reset:
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole))
++#endif
++
+ tcp_v6_send_reset(sk, skb);
+ discard:
+ if (opt_skb)
+@@ -1453,6 +1461,27 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ if (!pskb_may_pull(skb, th->doff*4))
+ goto discard_it;
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (likely(sysctl_stealth_blackhole) &&
++ (
++ th->res1 || !tcp_flag_word(th) ||
++ tcp_flag_word(th) == TCP_FLAG_PSH ||
++ tcp_flag_word(th) & (TCP_FLAG_CWR | TCP_FLAG_ECE) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_SYN | TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_URG
++ ) ||
++ (
++ tcp_flag_word(th) &
++ (TCP_FLAG_FIN | TCP_FLAG_RST) &&
++ tcp_flag_word(th) & TCP_FLAG_SYN
++ )
++ )
++ )
++ goto discard_it;
++#endif
++
+ if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
+ goto csum_error;
+
+@@ -1463,12 +1492,22 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
+ th->source, th->dest, inet6_iif(skb), sdif,
+ &refcounted);
+- if (!sk)
++ if (!sk) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 1;
++#endif
++
+ goto no_tcp_socket;
++ }
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk->sk_state == TCP_TIME_WAIT) {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ ret = 2;
++#endif
++
+ goto do_time_wait;
++ }
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ struct request_sock *req = inet_reqsk(sk);
+@@ -1576,6 +1615,11 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ bad_packet:
+ __TCP_INC_STATS(net, TCP_MIB_INERRS);
+ } else {
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!sysctl_stealth_blackhole || (ret == 1 &&
++ (skb->dev->flags & IFF_LOOPBACK)))
++#endif
++
+ tcp_v6_send_reset(NULL, skb);
+ }
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 1979922bcf67..6a8a3666648f 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -56,6 +56,10 @@
+ #include <trace/events/skb.h>
+ #include "udp_impl.h"
+
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++extern int sysctl_stealth_blackhole;
++#endif
++
+ static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+ {
+ #if defined(CONFIG_NET_L3_MASTER_DEV)
+@@ -863,6 +867,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ goto csum_error;
+
+ __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
++#ifdef CONFIG_HARDENED_STEALTH_NETWORKING
++ if (!likely(sysctl_stealth_blackhole) || skb->dev->flags & IFF_LOOPBACK)
++#endif
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index cb0c889e13aa..305f52f58c1a 100644
--- a/scripts/gcc-plugins/Kconfig
@@ -2439,7 +3401,7 @@ index cb0c889e13aa..305f52f58c1a 100644
secure!
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
-index 858cbe56b100..61ade07a967a 100644
+index 91a80036c05d..41692ca62c98 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -35,6 +35,7 @@ static int vmlinux_section_warnings = 1;
@@ -2471,7 +3433,7 @@ index 858cbe56b100..61ade07a967a 100644
}
};
-@@ -1255,10 +1263,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+@@ -1267,10 +1275,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
continue;
if (!is_valid_name(elf, sym))
continue;
@@ -2484,7 +3446,7 @@ index 858cbe56b100..61ade07a967a 100644
if (d < 0)
d = addr - sym->st_value;
if (d < distance) {
-@@ -1393,7 +1401,11 @@ static void report_sec_mismatch(const char *modname,
+@@ -1405,7 +1413,11 @@ static void report_sec_mismatch(const char *modname,
char *prl_from;
char *prl_to;
@@ -2497,7 +3459,7 @@ index 858cbe56b100..61ade07a967a 100644
if (!sec_mismatch_verbose)
return;
-@@ -1517,6 +1529,14 @@ static void report_sec_mismatch(const char *modname,
+@@ -1529,6 +1541,14 @@ static void report_sec_mismatch(const char *modname,
fatal("There's a special handler for this mismatch type, "
"we should never get here.");
break;
@@ -2512,7 +3474,7 @@ index 858cbe56b100..61ade07a967a 100644
}
fprintf(stderr, "\n");
}
-@@ -2528,6 +2548,14 @@ int main(int argc, char **argv)
+@@ -2540,6 +2560,14 @@ int main(int argc, char **argv)
}
}
free(buf.p);
@@ -2528,7 +3490,7 @@ index 858cbe56b100..61ade07a967a 100644
return err;
}
diff --git a/security/Kconfig b/security/Kconfig
-index d9aa521b5206..a921713b76ec 100644
+index d9aa521b5206..438acc17532e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -8,7 +8,7 @@ source security/keys/Kconfig
@@ -2636,6 +3598,135 @@ index d9aa521b5206..a921713b76ec 100644
config STATIC_USERMODEHELPER
bool "Force all usermode helper calls through a single binary"
help
+@@ -278,3 +329,128 @@ config DEFAULT_SECURITY
+
+ endmenu
+
++menu "Hardened Enhancements"
++
++config HARDENED_RANDOM
++ bool "Enhance the random number generator"
++ default n
++ help
++ Enabling this option enhances the Linux kernel random number generator.
++ This is done by:
++ - Increasing the pool size from 4096 bits to 262144 bits. ( 512B -> 32KB )
++ - Increasing the diffusion via the linear feedback shift register.
++ - Defines newer 64-bit polynomial fields for the input and output pools.
++
++ Overall, this enhances the total entropy available to the system and further
++ enhances the random number generator.
++
++
++config HARDENED_STEALTH_NETWORKING
++ bool "Enable stealth networking [GRSECURITY]"
++ default n
++ depends on NET
++ help
++ If you say Y here, neither TCP resets nor ICMP
++ destination-unreachable packets will be sent in response to packets
++ sent to ports for which no associated listening process exists.
++ This feature supports both IPV4 and IPV6 and exempts the
++ loopback interface from blackholing. Enabling this feature
++ makes a host more resilient to DoS attacks and reduces network
++ visibility against scanners.
++
++ The blackhole feature as-implemented is equivalent to the FreeBSD
++ blackhole feature, as it prevents RST responses to all packets, not
++ just SYNs. Under most application behavior this causes no
++ problems, but applications (like haproxy) may not close certain
++ connections in a way that cleanly terminates them on the remote
++ end, leaving the remote host in LAST_ACK state. Because of this
++ side-effect and to prevent intentional LAST_ACK DoSes, this
++ feature also adds automatic mitigation against such attacks.
++ The mitigation drastically reduces the amount of time a socket
++ can spend in LAST_ACK state. If you're using haproxy and not
++ all servers it connects to have this option enabled, consider
++ disabling this feature on the haproxy host.
++
++ If the sysctl option is enabled, a sysctl option with names
++ "ip_blackhole" will be created.
++ This sysctl, "ip_blackhole" takes the standard zero/non-zero
++ on/off toggle to enable or disable this feature.
++
++
++config HARDENED_NO_SIMULT_CONNECT
++ bool "Disable simultaneous TCP connections [GRSECURITY]"
++ default n
++ depends on NET
++ help
++ If you say Y here, a feature by Willy Tarreau will be enabled that
++ removes a weakness in Linux's strict implementation of TCP that
++ allows two clients to connect to each other without either entering
++ a listening state. The weakness allows an attacker to easily prevent
++ a client from connecting to a known server provided the source port
++ for the connection is guessed correctly.
++
++ As the weakness could be used to prevent an antivirus or IPS from
++ fetching updates, or prevent an SSL gateway from fetching a CRL,
++ it should be eliminated by enabling this option. Though Linux is
++ one of few operating systems supporting simultaneous connect, it
++ has no legitimate use in practice and is rarely supported by firewalls.
++
++
++config HARDENED_SYSFS_RESTRICT
++ bool "Restrict SysFS & DebugFS [GRSECURITY]"
++ default y
++ depends on SYSFS
++ help
++ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
++ any filesystem normally mounted under it (e.g. debugfs) will be
++ mostly accessible only by root. These filesystems generally provide access
++ to hardware and debug information that isn't appropriate for unprivileged
++ users of the system. Sysfs and debugfs have also become a large source
++ of new vulnerabilities, ranging from infoleaks to local compromise.
++ There has been very little oversight with an eye toward security involved
++ in adding new exporters of information to these filesystems, so their
++ use is discouraged.
++ To enable or disable this feature at runtime, use the sysctl
++ kernel.sysfs_restricted.
++ For reasons of compatibility, a few directories have been whitelisted
++ for access by non-root users:
++ /sys/fs/selinux
++ /sys/fs/fuse
++ /sys/devices/system/cpu
++
++
++config HARDENED_FIFO
++ bool "Restrict FIFO [GRSECURITY]"
++ default y
++ help
++ If you say Y here, users will not be able to write to FIFOs they don't
++ own in world-writable +t directories (e.g. /tmp), unless the owner of
++ the FIFO is the same owner of the directory it's held in. If the sysctl
++ option is enabled, a sysctl option with name "fifo_restrictions" is
++ created.
++
++
++config HARDENED_MODULE_LOAD
++ bool "Harden module auto-loading [GRSECURITY]"
++ default y
++ depends on MODULES
++ help
++ If you say Y here, module auto-loading in response to use of some
++ feature implemented by an unloaded module will be restricted to
++ root users. Enabling this option helps defend against attacks
++ by unprivileged users who abuse the auto-loading behavior to
++ cause a vulnerable module to load that is then exploited.
++
++ If this option prevents a legitimate use of auto-loading for a
++ non-root user, the administrator can execute modprobe manually
++ with the exact name of the module mentioned in the alert log.
++ Alternatively, the administrator can add the module to the list
++ of modules loaded at boot by modifying init scripts.
++
++ Modification of init scripts will most likely be needed on
++ Ubuntu servers with encrypted home directory support enabled,
++ as the first non-root user logging in will cause the ecb(aes),
++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
++
++
++endmenu
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 8af7a690eb40..6539694b0fd3 100644
--- a/security/selinux/Kconfig
@@ -2674,7 +3765,7 @@ index 8af7a690eb40..6539694b0fd3 100644
-
- If you are unsure how to answer this question, answer 0.
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 70bad15ed7a0..a157a3d57cdd 100644
+index c574285966f9..667cca9fcd78 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -135,18 +135,7 @@ __setup("selinux=", selinux_enabled_setup);
@@ -2734,3 +3825,18 @@ index 96b27405558a..485c1b85c325 100644
help
This selects Yama, which extends DAC support with additional
system-wide security settings beyond regular Linux discretionary
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 4e499b78569b..55bba37f9517 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -618,6 +618,10 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
+ struct kvm_stat_data *stat_data;
+ struct kvm_stats_debugfs_item *p;
+
++#ifdef CONFIG_HARDENED_SYSFS_RESTRICT
++ return 0;
++#endif
++
+ if (!debugfs_initialized())
+ return 0;
+
diff --git a/sys-kernel/linux-sources-redcore-lts-legacy/linux-sources-redcore-lts-legacy-4.19.104.ebuild b/sys-kernel/linux-sources-redcore-lts-legacy/linux-sources-redcore-lts-legacy-4.19.108.ebuild
index 2f7dc850..2f7dc850 100644
--- a/sys-kernel/linux-sources-redcore-lts-legacy/linux-sources-redcore-lts-legacy-4.19.104.ebuild
+++ b/sys-kernel/linux-sources-redcore-lts-legacy/linux-sources-redcore-lts-legacy-4.19.108.ebuild