From 7224c1253228e5c29c78cb3f0f26ce34770f2356 Mon Sep 17 00:00:00 2001 From: BlackNoxis Date: Sat, 15 Feb 2014 23:24:26 +0200 Subject: Added ebuilds for kogaion desktop --- sys-kernel/compat-drivers-alx/Manifest | 27 + .../compat-drivers-alx-3.8_rc5.ebuild | 178 + .../compat-drivers-alx-3.8_rc7.ebuild | 186 + .../files/3.8-grsec/00-read-only.patch | 23 + .../files/3.8-grsec/01-read-only.patch | 12 + .../files/3.8-grsec/02-read-only-ath.patch | 240 + .../files/3.8-grsec/03-read-only-memory.patch | 11 + .../files/3.8-grsec/04-read-only-brcm80211.patch | 11 + .../files/3.8-grsec/05-read-only-i915.patch | 17 + .../files/3.8-grsec/06-read-only-radeon.patch | 58 + .../files/3.8-grsec/07-read-only-wl1251.patch | 11 + .../files/3.8-grsec/08-read-only-ti.patch | 11 + .../files/3.8-grsec/09-read-only-nouveau.patch | 33 + .../3.8-grsec/20-version-disagrement-iwlwifi.patch | 139 + ...0211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch | 17 + .../files/4004_zd1211rw-2.6.28.patch | 37 + .../files/ath_regd_optional.patch | 39 + .../compat-drivers-3.7_rc1_p6-grsec-warnings.patch | 34 + .../files/compat-drivers-3.7_rc1_p6-grsec.patch | 8365 +++++++++++++++++++ .../files/compat-drivers-3.8-ath6kl.patch | 37 + .../files/compat-drivers-3.8-bt_tty.patch | 37 + .../files/compat-drivers-3.8-driver-select | 845 ++ .../files/ipw2200-inject.3.4.6.patch | 120 + .../files/leds-disable-strict-3.6.6.patch | 30 + .../files/leds-disable-strict-3.7_rc1_p6.patch | 30 + .../files/leds-disable-strict-3.8.patch | 30 + sys-kernel/compat-drivers/Manifest | 33 + .../compat-drivers-3.7_rc1_p6-r1.ebuild | 177 + .../compat-drivers-3.7_rc1_p6.ebuild | 185 + .../compat-drivers/compat-drivers-3.8.ebuild | 174 + .../compat-drivers/compat-drivers-3.8_rc5.ebuild | 176 + .../compat-drivers/compat-drivers-3.8_rc7.ebuild | 184 + .../files/3.8-grsec/00-read-only.patch | 23 + .../files/3.8-grsec/01-read-only.patch | 12 + .../files/3.8-grsec/02-read-only-ath.patch | 240 + .../files/3.8-grsec/03-read-only-memory.patch | 11 + .../files/3.8-grsec/04-read-only-brcm80211.patch | 11 + .../files/3.8-grsec/05-read-only-i915.patch | 17 + .../files/3.8-grsec/06-read-only-radeon.patch | 58 + .../files/3.8-grsec/07-read-only-wl1251.patch | 11 + .../files/3.8-grsec/08-read-only-ti.patch | 11 + .../files/3.8-grsec/09-read-only-nouveau.patch | 33 + .../3.8-grsec/20-version-disagrement-iwlwifi.patch | 139 + ...0211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch | 17 + .../files/4004_zd1211rw-2.6.28.patch | 37 + .../compat-drivers/files/ath_regd_optional.patch | 39 + .../compat-drivers-3.7_rc1_p6-grsec-warnings.patch | 34 + .../files/compat-drivers-3.7_rc1_p6-grsec.patch | 8365 +++++++++++++++++++ .../files/compat-drivers-3.8-ath6kl.patch | 37 + .../files/compat-drivers-3.8-bt_tty.patch | 37 + .../files/compat-drivers-3.8-driver-select | 845 ++ .../files/ipw2200-inject.3.4.6.patch | 120 + .../files/leds-disable-strict-3.6.6.patch | 30 + .../files/leds-disable-strict-3.7_rc1_p6.patch | 30 + .../files/leds-disable-strict-3.8.patch | 30 + sys-kernel/compat-drivers/metadata.xml | 35 + sys-kernel/debian-sources-lts/Manifest | 4 + .../debian-sources-lts-2.6.32.41-r1.ebuild | 161 + .../debian-sources-lts-2.6.32.41.ebuild | 156 + .../debian-sources-lts-2.6.32.43-r1.ebuild | 161 + .../debian-sources-lts-2.6.32.43.ebuild | 156 + .../debian-sources-lts-2.6.32.46.ebuild | 162 + sys-kernel/debian-sources-lts/files/config-extract | 216 + .../files/debian-sources-2.6.32.30-bridgemac.patch | 15 + .../files/debian-sources-2.6.38.3-bridgemac.patch | 15 + sys-kernel/debian-sources-lts/metadata.xml | 6 + sys-kernel/debian-sources/Manifest | 16 + .../debian-sources/debian-sources-3.10.11.ebuild | 145 + .../debian-sources/debian-sources-3.12.3.ebuild | 145 + sys-kernel/debian-sources/files/config-extract | 219 + .../debian-sources-3.10.11-xfs-libcrc32c-fix.patch | 21 + .../debian-sources-3.12.3-xfs-libcrc32c-fix.patch | 21 + sys-kernel/debian-sources/metadata.xml | 10 + sys-kernel/kogaion-sources/Manifest | 1 + ...groups-kconfig-build-bits-for-BFQ-v7-3.10.patch | 103 + ...k-introduce-the-BFQ-v7-I-O-sched-for-3.10.patch | 5969 +++++++++++++ ...arly-Queue-Merge-EQM-to-BFQ-v7-for-3.10.0.patch | 1034 +++ .../kogaion-sources/files/desktop/3.10-ck1.patch | 8732 ++++++++++++++++++++ .../desktop/change-default-console-loglevel.patch | 13 + .../files/desktop/criu-no-expert.patch | 23 + .../desktop/enable_haswell_pstate_driver.patch | 33 + .../set_kogaion_extraversion_in_makefile.patch | 12 + .../files/desktop/uksm-0.1.2.2-for-v3.10.patch | 7064 ++++++++++++++++ ...rect-invalid-use-of-user-timespec-in-the-.patch | 80 + .../kogaion-sources/kogaion-sources-3.10.25.ebuild | 55 + sys-kernel/linux-sabayon/Manifest | 2 + .../linux-sabayon/linux-sabayon-3.2-r2.ebuild | 12 + .../rogentos-sources-3.10.26.ebuild | 22 + .../rogentos-sources-3.10.27.ebuild | 22 + .../rogentos-sources-3.11.10.ebuild | 22 + .../rogentos-sources-3.12.10.ebuild | 22 + .../rogentos-sources-3.12.7.ebuild | 22 + .../rogentos-sources-3.12.8.ebuild | 22 + .../rogentos-sources-3.2-r2.ebuild | 19 + .../rogentos-sources-3.4.76.ebuild | 22 + .../rogentos-sources-3.4.77.ebuild | 22 + .../rogentos-sources-3.8.13.ebuild | 21 + .../rogentos-sources-3.9.11.ebuild | 22 + sys-kernel/rogentos-sources/rogentos-sources.skel | 22 + 99 files changed, 46751 insertions(+) create mode 100644 sys-kernel/compat-drivers-alx/Manifest create mode 100644 sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc5.ebuild create mode 100644 sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc7.ebuild create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/00-read-only.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/01-read-only.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/02-read-only-ath.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/03-read-only-memory.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/04-read-only-brcm80211.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/05-read-only-i915.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/06-read-only-radeon.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/07-read-only-wl1251.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/08-read-only-ti.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/09-read-only-nouveau.patch create mode 100644 sys-kernel/compat-drivers-alx/files/3.8-grsec/20-version-disagrement-iwlwifi.patch create mode 100644 sys-kernel/compat-drivers-alx/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch create mode 100644 sys-kernel/compat-drivers-alx/files/4004_zd1211rw-2.6.28.patch create mode 100644 sys-kernel/compat-drivers-alx/files/ath_regd_optional.patch create mode 100644 sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch create mode 100644 sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec.patch create mode 100644 sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-ath6kl.patch create mode 100644 sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-bt_tty.patch create mode 100755 sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-driver-select create mode 100644 sys-kernel/compat-drivers-alx/files/ipw2200-inject.3.4.6.patch create mode 100644 sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.6.6.patch create mode 100644 sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.7_rc1_p6.patch create mode 100644 sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.8.patch create mode 100644 sys-kernel/compat-drivers/Manifest create mode 100644 sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6-r1.ebuild create mode 100644 sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6.ebuild create mode 100644 sys-kernel/compat-drivers/compat-drivers-3.8.ebuild create mode 100644 sys-kernel/compat-drivers/compat-drivers-3.8_rc5.ebuild create mode 100644 sys-kernel/compat-drivers/compat-drivers-3.8_rc7.ebuild create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/00-read-only.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/01-read-only.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/02-read-only-ath.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/03-read-only-memory.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/04-read-only-brcm80211.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/05-read-only-i915.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/06-read-only-radeon.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/07-read-only-wl1251.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/08-read-only-ti.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/09-read-only-nouveau.patch create mode 100644 sys-kernel/compat-drivers/files/3.8-grsec/20-version-disagrement-iwlwifi.patch create mode 100644 sys-kernel/compat-drivers/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch create mode 100644 sys-kernel/compat-drivers/files/4004_zd1211rw-2.6.28.patch create mode 100644 sys-kernel/compat-drivers/files/ath_regd_optional.patch create mode 100644 sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch create mode 100644 sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec.patch create mode 100644 sys-kernel/compat-drivers/files/compat-drivers-3.8-ath6kl.patch create mode 100644 sys-kernel/compat-drivers/files/compat-drivers-3.8-bt_tty.patch create mode 100755 sys-kernel/compat-drivers/files/compat-drivers-3.8-driver-select create mode 100644 sys-kernel/compat-drivers/files/ipw2200-inject.3.4.6.patch create mode 100644 sys-kernel/compat-drivers/files/leds-disable-strict-3.6.6.patch create mode 100644 sys-kernel/compat-drivers/files/leds-disable-strict-3.7_rc1_p6.patch create mode 100644 sys-kernel/compat-drivers/files/leds-disable-strict-3.8.patch create mode 100644 sys-kernel/compat-drivers/metadata.xml create mode 100644 sys-kernel/debian-sources-lts/Manifest create mode 100644 sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41-r1.ebuild create mode 100644 sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41.ebuild create mode 100644 sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43-r1.ebuild create mode 100644 sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43.ebuild create mode 100644 sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.46.ebuild create mode 100755 sys-kernel/debian-sources-lts/files/config-extract create mode 100644 sys-kernel/debian-sources-lts/files/debian-sources-2.6.32.30-bridgemac.patch create mode 100644 sys-kernel/debian-sources-lts/files/debian-sources-2.6.38.3-bridgemac.patch create mode 100644 sys-kernel/debian-sources-lts/metadata.xml create mode 100644 sys-kernel/debian-sources/Manifest create mode 100644 sys-kernel/debian-sources/debian-sources-3.10.11.ebuild create mode 100644 sys-kernel/debian-sources/debian-sources-3.12.3.ebuild create mode 100755 sys-kernel/debian-sources/files/config-extract create mode 100644 sys-kernel/debian-sources/files/debian-sources-3.10.11-xfs-libcrc32c-fix.patch create mode 100644 sys-kernel/debian-sources/files/debian-sources-3.12.3-xfs-libcrc32c-fix.patch create mode 100644 sys-kernel/debian-sources/metadata.xml create mode 100644 sys-kernel/kogaion-sources/Manifest create mode 100644 sys-kernel/kogaion-sources/files/desktop/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7-3.10.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/0002-block-introduce-the-BFQ-v7-I-O-sched-for-3.10.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7-for-3.10.0.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/3.10-ck1.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/change-default-console-loglevel.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/criu-no-expert.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/enable_haswell_pstate_driver.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/set_kogaion_extraversion_in_makefile.patch create mode 100644 sys-kernel/kogaion-sources/files/desktop/uksm-0.1.2.2-for-v3.10.patch create mode 100644 sys-kernel/kogaion-sources/files/security/0001-x86-x32-Correct-invalid-use-of-user-timespec-in-the-.patch create mode 100644 sys-kernel/kogaion-sources/kogaion-sources-3.10.25.ebuild create mode 100644 sys-kernel/linux-sabayon/Manifest create mode 100644 sys-kernel/linux-sabayon/linux-sabayon-3.2-r2.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.10.26.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.10.27.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.11.10.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.12.10.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.12.7.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.12.8.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.2-r2.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.4.76.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.4.77.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.8.13.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources-3.9.11.ebuild create mode 100644 sys-kernel/rogentos-sources/rogentos-sources.skel (limited to 'sys-kernel') diff --git a/sys-kernel/compat-drivers-alx/Manifest b/sys-kernel/compat-drivers-alx/Manifest new file mode 100644 index 00000000..1b4c3379 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/Manifest @@ -0,0 +1,27 @@ +AUX 3.8-grsec/00-read-only.patch 604 SHA256 1b4109d2cb389e622252738390bd56a665f968c428accd905e6debcdf9c4e679 SHA512 34bd0312909701251317dfbc2b2ebd080f8d5e3dff18e3e0c80a48dbd6697d2877fe7e8a0fa52151f76a97a05368ebb4091166e4432dcaa3ce63469c2b830441 WHIRLPOOL 590b93713ffc2e59ce04b1c7b3a34f502eec32b25f01c45f8adab91cc1621c701a38ed0136ac5d052fe93d9d5738c0facf969189721b332a058db118835908bf +AUX 3.8-grsec/01-read-only.patch 271 SHA256 f389b6d40b0c5512af8b3c024c69aed69830ca22d53403f19be0a8834ee4573c SHA512 9d2f878aeb6ebe8c3d74aa038e3fec14829ac709c2f65b9c7fe6482dd1c15432cf73a40638059ccee9350d6305989b60883903fcd0c7545ec38c1d22e006c9c2 WHIRLPOOL 111a64490618524af32c56c9631d0ff7f29ae4ebc692422df63cf6eaae0a42c06d4be7186c3049b3f3827de9535e8c0b14f6c5fecf9d956dc1e92236a053e545 +AUX 3.8-grsec/02-read-only-ath.patch 8178 SHA256 2a779d9a72fdf6c26620733f2b7fd12e65cd50969e9689cf1cf48a26f6bbfad9 SHA512 288d359778430d9b84fb9c66afce8473142270646fea3dd53471b16b8b7a2ecf78c28d4dfdc27a914b2c7d2561cb5d2ac267e71f50ec315b2b984ad3b59dfec7 WHIRLPOOL 8e50052ad0dd875e086795fd787c1ab83c434ccb1c6bd2b0ff8eaef4392b9a26681a02a52c343a05fa655198d51d643ed4dd9cdd0f4f1415504219598d0fa95d +AUX 3.8-grsec/03-read-only-memory.patch 251 SHA256 927886f135b9e122a51d607d99837eef047b9fa50a4f2dfea9ee4ce6076192b1 SHA512 9f1efc869370655ffba01463d71c72f7a7e3b677f455fb4893f1e9898dd2896008da1f87b66d2803542a80fc073fcb3d94abcbf626bf7240d3235293b13e1c35 WHIRLPOOL e5dbf45e9552824a3ebb3bd96e55ec77d559b5eed8aba383b907902b7115031c7fcbdf57b94d920cb66230eb492d151f957c09a883c255c83139ca63438b2848 +AUX 3.8-grsec/04-read-only-brcm80211.patch 337 SHA256 752991b5f486c0dea194be247e473e9d9d2626f3e392246d3d1da44d4864478f SHA512 494f37fc79dd1c11ab6c51d95a7521d8276f2c4d41dfcabb491c3a7d0b79ff9b2512feeef9d166da9e6297ebf2778a3f773cb52d72a325cf151f78b2e28cfb97 WHIRLPOOL 4f96dced0f32328769ddb74e74ffb6e003c68d8054331c635971658909009079ebb810839a857474dae3b5ce12e2c6d5641646462027dafad974791e0b9e5944 +AUX 3.8-grsec/05-read-only-i915.patch 473 SHA256 bfb3cdf6d8793ce328b16f6808c062d72863ea40be9f3cd942a831548e3bdb22 SHA512 2ef16324332c257e326d48fce34a65f095236f518990b6fff0aa3022893d40668acce156085bf77b669629a40696db9098bb8504614bf60d2720bf78364f9a33 WHIRLPOOL 23d44a4f3b2b0937e5b0769354a8a01ab7f4c19b0c38f85bcd2715a9fdf10b5755a1a54dfcb048dcd0badca7c99321ce91bf9c7cf23a7e3bcba54b28ac472612 +AUX 3.8-grsec/06-read-only-radeon.patch 1974 SHA256 7227c0ed3bdc8ce4c130b263a4ef3ed8b44db33a3961bb9ef9110a94dc8cf549 SHA512 c58cce6f8996ef3ac25d93c680db0f0ef3a555b9e9ad97a39d1e339135a7f449c039582d21d50fbe13cb0b9cd23c2a9fc1ff80143e4f681ebf3161377383f5dc WHIRLPOOL 9a6ab2740ca49b814c06dcd0a4533c1f33d4913470df7ac2884ed6f1a747b2c29b3899ad0b53190c832a307fea0cfb990e2068685b9205fd37a9d6ca3e4ae5a8 +AUX 3.8-grsec/07-read-only-wl1251.patch 296 SHA256 7d89a83529b7ae522a06c0a38959515b6de72fcc9412d9fa83ce744d803c438b SHA512 54fe4cc8b8f5088aa0b27fd0cf38d8795507ead26e7c8ad96b54e2d2b9d65ed6c76f29ee3359f7dee50883138511c34a4c2e2f22384aadc034f7dd46061b3307 WHIRLPOOL 4d4b53e6520c34284f6794b7afecdab36b2e5d97e12a9237e83c52fb23eee1facee829ed140e8d9b13e98e201e9e8a2cf2aa32853e9728504bd16cd3a01927e7 +AUX 3.8-grsec/08-read-only-ti.patch 327 SHA256 804cfe5f58fe4927933e64a45aa2c0b407675c22d8d80f75fe89904f7fa9a595 SHA512 387ffd8a7636d7055b65a3e2c2cb799bc514e270d24a44982da5c1bdae7160e96e7f829fc2de7323e2228f1cfd6e4517327d2c18b6df42f062a570aa5b5fcb21 WHIRLPOOL 546e75ed0970d82a6b343a82421c9368b1dc5410c4795f82cf3812845bd8d7c9f909b7955213144cd0727d62ea606af216c8dc552739bba818513ceded0ae339 +AUX 3.8-grsec/09-read-only-nouveau.patch 1039 SHA256 f797b74b8bdf86e5cd6f0c87ce1955c706f9ba82a8ee2e54f098bfc1d6952aac SHA512 27065f1c9e3fc427253ddd58fb6c11027f5cec9f356204cfa5635ae6d81d8e40b571c83ebc457600f4ce0c8d9e7401f1118523bdac6ca4e2c661dd843bb1e411 WHIRLPOOL 315756a38d0d99e696818b80adbf78ced2ca70c25a75fff3598de1f7716c036292b6cc0df9650b20d3f9981dda9d3f02ddb873033262f592869d817b665e8ba8 +AUX 3.8-grsec/20-version-disagrement-iwlwifi.patch 2832 SHA256 7f89d1c6c61735874a7c3efec1c51f7aaed05b7a59cb97e3e25e4929bbbcfc42 SHA512 2f156ba0626404234d77fe0309e8f9ef7bdaa67f2efd6c46fe3ebc7616b6b9ab27c9bb6fe0a57b97e63c3a1c31994731c569f9eb0489897e7120810325a1bd98 WHIRLPOOL e8aab7f6137b8660a6c8b78fa1900dfb600a2ca9ba14c3e1f5f75c8c24ed2d3aad5b5936c6f01a76d990871abafaab1e21d9e8522611fa2f57c79a45927bdaa6 +AUX 4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch 559 SHA256 2c9222e0c6aaafabac091766c7e0a71442f0e9521ec1c65fc7024fbca60a3354 SHA512 9bf0b852a4ffc66afc12fc60c3fc683d689a45ff5e18470db68f25f001e14fc09c813aa01fec2b4583af1c19141e748a10c7dfd6022d727cb8cf245f6c33a2ff WHIRLPOOL 2efd4c3f2d58a833be803df6034e01d3601cb1891921fe2031c50a241a3f5e8a9e8d54aa0b29566fcc6b2bac71d48e504a3f88585a1049884e0a5986724048a1 +AUX 4004_zd1211rw-2.6.28.patch 1412 SHA256 6696295acb2a8d12a33208525cba9ce8bd2971c9c0adaabb31debfa9ec15c7be SHA512 2aa73fa8e87cb06220b589f528ba25fcec818a14a98a8d5f1173b12c287ee85e48981a48bbce772aa7fc24a33a833df2eb5bc7af0d569ea8c839090c481566ec WHIRLPOOL 4245c75995d7579b9fcc0aa1412b4f9bc4a3a924076aca24b466658740a99b0592c293420c39a616a6241ec4dce213496bf4317b73fd4b94ab1dcefbe5130382 +AUX ath_regd_optional.patch 906 SHA256 ebf117d071363e854505c18aef8c9af2652fd4a7b9cc5efe961cf5efb410dbd7 SHA512 dddc5e428f7d2a372ac17cce913c397ac408e20eb17af55633ea8984d6e69e8f7fe134436a4176606740d64fd66579c6a30a358cac393cb22f9a09d10f66e9d3 WHIRLPOOL b46164a1444d74f22d0e4f1e02ed556b52f87d5132871698ab93abd2076656e4ecadb7dc914d571df15a9bd18169ebaef67d4539a7919f1e2ffdd3df3b60cd90 +AUX compat-drivers-3.7_rc1_p6-grsec-warnings.patch 1261 SHA256 d960b976fe4e82beb2ceba3cc3eccdd064f54d9cc269f2b08d351c5a7fb528f7 SHA512 044020bacf79790f5ce10c0373f05568dfca2456f0087b3a04e5162b7602fb97371923acc6987d8732a64373ad77a4df65dae90f55e2bd7b20757f0b32d6af8c WHIRLPOOL d4b61222a97d496516575ff1bee4b9bec0d86fe9115a4ea4789b994fb1890444c568f866b4289f2ecaf9a399aec7453588aa6a842e7c48bce61320d48cd2eb86 +AUX compat-drivers-3.7_rc1_p6-grsec.patch 356575 SHA256 aefac7264cd36791af4194a4c9ed5a5a90a68d51c3ef1ba1af280c263ad394e2 SHA512 6aa48f042efac4c9a94ca7592f548e6dc5b8796c56a1827239b9d2e357ba23d14373b986e4cd789aedf07ce0a02eca2bdd3304a0bef751b8914367c9dc1b7fff WHIRLPOOL 986b202f601b4efb894ed8b0ab39f099af14b61deb687f670c83c3a587947e83b1180910d79bb701a6c5c92607ab529f6be229f840844cb08efaef165a83123f +AUX compat-drivers-3.8-ath6kl.patch 1415 SHA256 53510c3ee47144a77cf4514e016d62e43f8c92be1480f930b5ddbcc311be53fd SHA512 e88c0e7a0ebf4740643ab7a44b67e452a5ecea990ebca33ef608bec3072b0888f0d27e8bac1f452dd78efe889a7eff0a3ccd000b7d78ea50148e97297bf3590c WHIRLPOOL 47ed0e6e8364f48f7f2aa3dae9effba16a00dab43adbef110dd56e042455bcab112c064fd222b802faa45d24a15d18e92e52bf99b91f719c925dc39d90a6a483 +AUX compat-drivers-3.8-bt_tty.patch 1295 SHA256 e1307946a4ed6185b1850d95f3920ff747e584e6b23dad95220c8b0809fe6de1 SHA512 a4f5e68cb2e7e8972c9271d9519c688fe26cf0ba35b925ecd2270668d4ac8d22a25137596a07cc106cca53afe31e7771d77151b4e4361c22d3332ae5920062ee WHIRLPOOL ff3fe54d241411d5885e002571226247933d8e5d85e5786c2ffeb0ab6637b5c692c362beae5f85b0454ff01454f3277b11f339aeef0d9da4cb71d4356cc9101e +AUX compat-drivers-3.8-driver-select 22310 SHA256 31d7fc0eaa6f7a9528b1dbf948af9e9b4d5990f9a7fe3e060030a6108e0ce457 SHA512 67650594fe29a277bb1a2c1e0beebb11610b001a205abd14c42b996a49a47bd94eecf5ad265a3b53ecef3db7732624b37ecf9ef3dda62278c266d7cdb84a743b WHIRLPOOL 32e4e11eab76ea22555f481d9e15256dc09b1760b0e63a78d0f18a9a9dcfcb1b726ee90b7ea47ddf9a6f60e07c94e73115f6f2c6d35030ff9df79cdee69d6339 +AUX ipw2200-inject.3.4.6.patch 4173 SHA256 0b649bd7b6d2bf22667edc96949b5ab92cc7fb5c543b4385c17c5e0f47fe4109 SHA512 ebee3efda7b94898ea18a89f57c515d5237ef3c2a1eaf0bd13949ec4663a600eadede4655178355ac3f5b8ddc2eccc2cceb88eba0281ed3f614ada186a041463 WHIRLPOOL 15d94b3176719d006363f4d42a11c505643fdead8d521ccfb149cf5eee8851488aa006d4f8c750ffb5e81b23ff03d275e5fde781505e508467ff76303e612570 +AUX leds-disable-strict-3.6.6.patch 799 SHA256 e1b5947608d9c53263efd76aed404eb7e4107d81669faeb484f5d680e4cfb570 SHA512 6373efc6697b4589c15c3cd2cfd8f3faa06fe90eeeb4fa9acaf187ecc682fac394444949dd26c2dde71c0f45aedb1198374764b580b824ddc6bc1e937273b5f1 WHIRLPOOL 40bdef714aa0ff4069bb50abf434dff88610d97f570b80f36622d29a43301bf013a90e90cb7f3f8044c2bf53a09154b1033a6f53b1edf186513452fdb681d55f +AUX leds-disable-strict-3.7_rc1_p6.patch 799 SHA256 e1b5947608d9c53263efd76aed404eb7e4107d81669faeb484f5d680e4cfb570 SHA512 6373efc6697b4589c15c3cd2cfd8f3faa06fe90eeeb4fa9acaf187ecc682fac394444949dd26c2dde71c0f45aedb1198374764b580b824ddc6bc1e937273b5f1 WHIRLPOOL 40bdef714aa0ff4069bb50abf434dff88610d97f570b80f36622d29a43301bf013a90e90cb7f3f8044c2bf53a09154b1033a6f53b1edf186513452fdb681d55f +AUX leds-disable-strict-3.8.patch 799 SHA256 e1b5947608d9c53263efd76aed404eb7e4107d81669faeb484f5d680e4cfb570 SHA512 6373efc6697b4589c15c3cd2cfd8f3faa06fe90eeeb4fa9acaf187ecc682fac394444949dd26c2dde71c0f45aedb1198374764b580b824ddc6bc1e937273b5f1 WHIRLPOOL 40bdef714aa0ff4069bb50abf434dff88610d97f570b80f36622d29a43301bf013a90e90cb7f3f8044c2bf53a09154b1033a6f53b1edf186513452fdb681d55f +DIST compat-drivers-3.8-rc5-1-u.tar.xz 5299080 SHA256 0952e211e7352bbb4b236853c751ab4ffffebf4923517316f9579e644e0eabc8 SHA512 bc80b129d709aaf7eb76d6ec388d4b76ce60eb9a61101eae8ad6a43c09097d927651f462810735f56e53b035f3b7ce3c422a2894ca4ccca234bba9c61850877c WHIRLPOOL f31192ad24b81f55d7bcf1b03ddc70b6e9d20acd8f106ab7edcfd8a461d44270d1deb1d87451f540e050c04bef2ec20524d75777357a7ebc56ef76baa7257d21 +DIST compat-drivers-3.8-rc7-1-u.tar.xz 5301132 SHA256 b53ddac5b0423d72d3945235637cbbb4559b5b527bd74d4cc9dd9e098efad4c6 SHA512 ee8f72d2acfb0aa41e90268fc68f1c42d87a7e14597f7866dd9c96a00dd8ea318b92a5b25e499d65e00b617600baa09cfc639379465e306f54f73689c3fc93c1 WHIRLPOOL 98a0b602e67f6f305f4795b8a53ad87bcabd282aa45d5e74c5c1add07084fb97fbac1d651e59e6f6a4838a4084f5d5418d59a7d49441613f91df33172b480582 +EBUILD compat-drivers-alx-3.8_rc5.ebuild 6958 SHA256 acb8407d6578d328c06e969314807736c9b52c6eeba7c2db3ec4a86a18c32c35 SHA512 a0349518e8de6683b8d63d3ecd695442887d681c34c12d7a38095aa6f924917c138544598f13a835dd0f05da72c9b3c34a1824b1d27449477c846ccdb02cf28b WHIRLPOOL ec2dd2dc2f0d56b143d345d6e7075367aaa690ad857c0976383963e8f5dc27a6d7219bd8cf1bbb5d275d147bf551faf11cb85e37114c82c18e0f2e19648a73f8 +EBUILD compat-drivers-alx-3.8_rc7.ebuild 7253 SHA256 d763f2519fadbc9b43e68561070b53d8fcf06bb0eab87c3d20bcad7bfec85766 SHA512 69c5acd4d67ac79d5a64717db2d9ab727dd5f6e865282e04302eda8b2547307a6459b902f2de8bc105a75b4e6164546b97fbe94a7d2c01855f36145e95035f12 WHIRLPOOL 0a4ad03a00636a138a2be5c70bcfbcccfc25d326cbdebc5d126a5b458da718990b042adc6187eee91b0c71b57e7fc3f8ba6e8dba08320c9acb54206a95c66fdf diff --git a/sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc5.ebuild b/sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc5.ebuild new file mode 100644 index 00000000..874a38cc --- /dev/null +++ b/sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc5.ebuild @@ -0,0 +1,178 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +EAPI="5" + +# USE_EXPAND categories +CPD_USE_EXPAND="wifi ethernet various" +# These are officially supported +CPD_USE_EXPAND_wifi="ath5k ath9k ath9k_ap ath9k_htc ath6kl b43 brcmsmac brcmfmac carl9170 rt2x00 wl1251 wl12xx zd1211rw" +# This might work (not officially supported) +CPD_USE_EXPAND_wifi+=" wl18xx" +# This might work (added by pentoo) +CPD_USE_EXPAND_wifi+=" b44" + +# These are officially supported +CPD_USE_EXPAND_ethernet="atl1 atl1c atl1e atl2" +# This might work (not officially supported) +CPD_USE_EXPAND_ethernet+=" atlxx" + +# These are officially supported +CPD_USE_EXPAND_various="i915" +# This might work (not officially supported) +CPD_USE_EXPAND_various+=" bt drm" +# This might work (added by pentoo) +CPD_USE_EXPAND_various+=" staging usbnet" + +inherit linux-mod linux-info versionator eutils compat-drivers-3.7 + +# upstream versioning, ex.: 3.7-rc1-6 +UPSTREAM_PVR="${PV//_/-}" && UPSTREAM_PVR="${UPSTREAM_PVR/-p/-}" +# ex.: 3.7-rc1 +UPSTREAM_PV=${UPSTREAM_PVR%-*} + +MY_PN="compat-drivers" + +DESCRIPTION="Stable kernel pre-release wifi subsystem backport" +HOMEPAGE="http://backports.wiki.kernel.org" +SRC_URI="mirror://kernel/linux/kernel/projects/backports/stable/v${UPSTREAM_PVR}/${MY_PN}-${UPSTREAM_PVR}-1-u.tar.xz" + +LICENSE="GPL-2" +SLOT="0" +KEYWORDS="" + +IUSE="atheros_obey_crda debugfs debug-driver full-debug injection livecd loadmodules noleds pax_kernel" + +DEPEND="!net-wireless/compat-wireless-builder + !net-wireless/compat-wireless" +RDEPEND="${DEPEND} + >=sys-kernel/linux-firmware-20110219 + virtual/udev" + +S="${WORKDIR}/${MY_PN}-${UPSTREAM_PVR}-1-u" + +RESTRICT="strip" + +CONFIG_CHECK="!DYNAMIC_FTRACE" + +pkg_setup() { + CONFIG_CHECK="~NET_SCHED" + CONFIG_CHECK="~IPW2200_PROMISCUOUS" + linux-mod_pkg_setup + kernel_is -lt 2 6 27 && die "kernel 2.6.27 or higher is required for compat drivers to be installed" + kernel_is -gt $(get_version_component_range 1) $(get_version_component_range 2) $(get_version_component_range 3) && die "The version of compat drivers you are trying to install contains older modules than your kernel. Failing before downgrading your system." + + #these things are not optional + linux_chkconfig_module MAC80211 || die "CONFIG_MAC80211 must be built as a _module_ !" + linux_chkconfig_module CFG80211 || die "CONFIG_CFG80211 must be built as a _module_ !" + linux_chkconfig_module LIBIPW || ewarn "CONFIG_LIBIPW really should be set or there will be no WEXT compat" + + if use compat_drivers_wifi_b43; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b43" + fi + if use compat_drivers_wifi_b44; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b44" + fi +} + +src_prepare() { + use pax_kernel && epatch "${FILESDIR}"/${MY_PN}-3.7_rc1_p6-grsec.patch + use pax_kernel && epatch "${FILESDIR}"/${MY_PN}-3.7_rc1_p6-grsec-warnings.patch + + #mcgrof said prep for inclusion in compat-wireless.git but this causes issues + #find "${S}" -name Makefile | xargs sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' -e 's/CONFIG_COMPAT_CHECK/CONFIG_CHECK/' + #sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' "${S}"/config.mk + + # CONFIG_CFG80211_REG_DEBUG=y + sed -i '/CFG80211_REG_DEBUG/s/^# *//' "${S}"/config.mk + + #this patch ignores the regulatory settings of an atheros card and uses what CRDA thinks is right + if use atheros_obey_crda; then + ewarn "You have enabled atheros_obey_crda which doesn't do what you think." + ewarn "This use flag will cause the eeprom of the card to be ignored and force" + ewarn "world roaming on the device until crda provides a valid regdomain." + ewarn "Short version, this is not a way to break the law, this will automatically" + ewarn "make your card less functional unless you set a proper regdomain with iw/crda." + epatch "${FILESDIR}"/ath_regd_optional.patch + fi + + if use injection; then + epatch "${FILESDIR}"/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch + epatch "${FILESDIR}"/4004_zd1211rw-2.6.28.patch + # epatch "${FILESDIR}"/mac80211.compat08082009.wl_frag+ack_v1.patch + # epatch "${FILESDIR}"/4013-runtime-enable-disable-of-mac80211-packet-injection.patch + epatch "${FILESDIR}"/ipw2200-inject.3.4.6.patch + fi + if use noleds; then + sed -ir 's/^\(export CONFIG_.*_LEDS=\)y$/\1n/' config.mk + epatch "${FILESDIR}/leds-disable-strict-${PV}.patch" + fi + use debug-driver && sed -i '/DEBUG=y/s/^# *//' "${S}"/config.mk + use debugfs && sed -i '/DEBUGFS/s/^# *//' "${S}"/config.mk + if use full-debug; then + if use debug-driver ; then + sed -i '/CONFIG=/s/^# *//' "${S}"/config.mk + else + ewarn "Enabling full-debug includes debug-driver." + sed -i '/DEBUG=/s/^# *//' "${S}"/config.mk + fi + fi + + #avoid annoying ACCESS DENIED sandbox errors + sed -i "s/\${MAKE} -C \${KLIB_BUILD} kernelversion/echo ${KV_FULL}/g" compat/scripts/gen-compat-config.sh || die "sed failed" + sed -i "s/shell \$(MAKE) -C \$(KLIB_BUILD) kernelversion/echo ${KV_FULL}/g" config.mk || die "sed failed" + sed -i "s/make -C \$KLIB_BUILD kernelversion/echo ${KV_FULL}/g" scripts/gen-compat-autoconf.sh || die "sed failed" +} + +src_compile() { + addpredict "${KERNEL_DIR}" + set_arch_to_kernel + emake KLIB_BUILD="${DESTDIR}"/lib/modules/"${KV_FULL}"/build || die "emake failed" +} + +src_install() { + insinto "${DESTDIR}"/lib/modules/"${KV_FULL}" + insinto "/lib/modules/${KV_FULL}/updates/drivers/net/ethernet/" + doins -r "./drivers/net/ethernet/atheros/" + + dosbin scripts/athenable scripts/b43load scripts/iwl-enable \ + scripts/madwifi-unload scripts/athload scripts/iwl-load \ + scripts/b43enable scripts/unload.sh + + dodir /usr/lib/compat-wireless + exeinto /usr/lib/compat-wireless + doexe scripts/modlib.sh + + dodoc README.md + dodir /$(get_libdir)/udev/rules.d/ + insinto /$(get_libdir)/udev/rules.d/ + doins udev/50-compat_firmware.rules + exeinto /$(get_libdir)/udev/ + doexe udev/compat_firmware.sh +} + +pkg_postinst() { + update_depmod + update_moduledb + + if use !livecd; then + if use loadmodules; then + einfo "Attempting to unload modules..." + /usr/sbin/unload.sh 2>&1 | grep -E FATAL && ewarn "Unable to remove running modules, system may be unhappy, reboot HIGHLY recommended!" + einfo "Triggering automatic reload of needed modules..." + /sbin/udevadm trigger + einfo "We have attempted to load your new modules for you, this may fail horribly, or may just cause a network hiccup." + einfo "If you experience any issues reboot is the simplest course of action." + fi + fi + if use !loadmodules; then + einfo "You didn't USE=loadmodules but you can still attempt to switch to the new drivers without reboot." + einfo "Run 'unload.sh' then 'udevadm trigger' to cause udev to load the needed drivers." + einfo "If unload.sh fails for some reason you should be able to simply reboot to fix everything and load the new modules." + fi +} + +pkg_postrm() { + remove_moduledb +} diff --git a/sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc7.ebuild b/sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc7.ebuild new file mode 100644 index 00000000..e80c50be --- /dev/null +++ b/sys-kernel/compat-drivers-alx/compat-drivers-alx-3.8_rc7.ebuild @@ -0,0 +1,186 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +EAPI="5" + +# USE_EXPAND categories +CPD_USE_EXPAND="wifi ethernet various" +# These are officially supported +CPD_USE_EXPAND_wifi="ath5k ath9k ath9k_ap ath9k_htc ath6kl b43 brcmsmac brcmfmac carl9170 rt2x00 wil6210 wl1251 wl12xx zd1211rw" +# This might work (not officially supported) +CPD_USE_EXPAND_wifi+=" wl18xx" +# This might work (added by pentoo) +CPD_USE_EXPAND_wifi+=" b44" + +# These are officially supported +CPD_USE_EXPAND_ethernet="alx atl1 atl1c atl1e atl2" +# This might work (not officially supported) +CPD_USE_EXPAND_ethernet+=" atlxx" + +# These are officially supported +CPD_USE_EXPAND_various="i915" +# This might work (not officially supported) +CPD_USE_EXPAND_various+=" bt drm" +# This might work (added by pentoo) +CPD_USE_EXPAND_various+=" staging usbnet" + +inherit linux-mod linux-info versionator eutils compat-drivers-3.8 + +# upstream versioning, ex.: 3.7-rc1-6 +UPSTREAM_PVR="${PV//_/-}" && UPSTREAM_PVR="${UPSTREAM_PVR/-p/-}" +# ex.: 3.7-rc1 +UPSTREAM_PV=${UPSTREAM_PVR%-*} + +MY_PN="compat-drivers" + +DESCRIPTION="Stable kernel pre-release wifi subsystem backport" +HOMEPAGE="http://backports.wiki.kernel.org" +SRC_URI="mirror://kernel/linux/kernel/projects/backports/stable/v${UPSTREAM_PVR}/${MY_PN}-${UPSTREAM_PVR}-1-u.tar.xz" + +LICENSE="GPL-2" +SLOT="0" +KEYWORDS="" + +IUSE="atheros_obey_crda debugfs debug-driver full-debug injection livecd loadmodules noleds pax_kernel" + +DEPEND="!net-wireless/compat-wireless-builder + !net-wireless/compat-wireless" +RDEPEND="${DEPEND} + >=sys-kernel/linux-firmware-20110219 + virtual/udev" + +S="${WORKDIR}/${MY_PN}-${UPSTREAM_PVR}-1-u" + +RESTRICT="strip" + +CONFIG_CHECK="!DYNAMIC_FTRACE" + +pkg_setup() { + CONFIG_CHECK="~NET_SCHED" + CONFIG_CHECK="~IPW2200_PROMISCUOUS" + linux-mod_pkg_setup + kernel_is -lt 2 6 27 && die "kernel 2.6.27 or higher is required for compat drivers to be installed" + kernel_is -gt $(get_version_component_range 1) $(get_version_component_range 2) $(get_version_component_range 3) && die "The version of compat drivers you are trying to install contains older modules than your kernel. Failing before downgrading your system." + + #these things are not optional + linux_chkconfig_module MAC80211 || die "CONFIG_MAC80211 must be built as a _module_ !" + linux_chkconfig_module CFG80211 || die "CONFIG_CFG80211 must be built as a _module_ !" + linux_chkconfig_module LIBIPW || ewarn "CONFIG_LIBIPW really should be set or there will be no WEXT compat" + + if use compat_drivers_wifi_b43; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b43" + fi + if use compat_drivers_wifi_b44; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b44" + fi +} + +src_prepare() { + # use pax_kernel && epatch "${FILESDIR}"/${MY_PN}-3.7_rc1_p6-grsec.patch + # use pax_kernel && epatch "${FILESDIR}"/${MY_PN}-3.8-grsec-readonly.patch + if use pax_kernel; then + for gpatch in "${FILESDIR}"/3.8-grsec/*; do + epatch "${gpatch}" + done + fi + # upstream might want to see this + use pax_kernel && epatch "${FILESDIR}"/${MY_PN}-3.8-bt_tty.patch + # use pax_kernel && epatch "${FILESDIR}"/${MY_PN}-3.7_rc1_p6-grsec-warnings.patch + + #mcgrof said prep for inclusion in compat-wireless.git but this causes issues + #find "${S}" -name Makefile | xargs sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' -e 's/CONFIG_COMPAT_CHECK/CONFIG_CHECK/' + #sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' "${S}"/config.mk + + # CONFIG_CFG80211_REG_DEBUG=y + sed -i '/CFG80211_REG_DEBUG/s/^# *//' "${S}"/config.mk + + #this patch ignores the regulatory settings of an atheros card and uses what CRDA thinks is right + if use atheros_obey_crda; then + ewarn "You have enabled atheros_obey_crda which doesn't do what you think." + ewarn "This use flag will cause the eeprom of the card to be ignored and force" + ewarn "world roaming on the device until crda provides a valid regdomain." + ewarn "Short version, this is not a way to break the law, this will automatically" + ewarn "make your card less functional unless you set a proper regdomain with iw/crda." + epatch "${FILESDIR}"/ath_regd_optional.patch + fi + + if use injection; then + epatch "${FILESDIR}"/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch + epatch "${FILESDIR}"/4004_zd1211rw-2.6.28.patch + # epatch "${FILESDIR}"/mac80211.compat08082009.wl_frag+ack_v1.patch + # epatch "${FILESDIR}"/4013-runtime-enable-disable-of-mac80211-packet-injection.patch + epatch "${FILESDIR}"/ipw2200-inject.3.4.6.patch + fi + if use noleds; then + sed -ir 's/^\(export CONFIG_.*_LEDS=\)y$/\1n/' config.mk + epatch "${FILESDIR}/leds-disable-strict-${PV}.patch" + fi + use debug-driver && sed -i '/DEBUG=y/s/^# *//' "${S}"/config.mk + use debugfs && sed -i '/DEBUGFS/s/^# *//' "${S}"/config.mk + if use full-debug; then + if use debug-driver ; then + sed -i '/CONFIG=/s/^# *//' "${S}"/config.mk + else + ewarn "Enabling full-debug includes debug-driver." + sed -i '/DEBUG=/s/^# *//' "${S}"/config.mk + fi + fi + + #avoid annoying ACCESS DENIED sandbox errors + sed -i "s/\${MAKE} -C \${KLIB_BUILD} kernelversion/echo ${KV_FULL}/g" compat/scripts/gen-compat-config.sh || die "sed failed" + sed -i "s/shell \$(MAKE) -C \$(KLIB_BUILD) kernelversion/echo ${KV_FULL}/g" config.mk || die "sed failed" + sed -i "s/make -C \$KLIB_BUILD kernelversion/echo ${KV_FULL}/g" scripts/gen-compat-autoconf.sh || die "sed failed" +} + +src_compile() { + addpredict "${KERNEL_DIR}" + set_arch_to_kernel + emake KLIB_BUILD="${DESTDIR}"/lib/modules/"${KV_FULL}"/build || die "emake failed" +} + +src_install() { + insinto "${DESTDIR}"/lib/modules/"${KV_FULL}" + insinto "/lib/modules/${KV_FULL}/updates/drivers/net/ethernet/atheros/alx/" + doins "./drivers/net/ethernet/atheros/alx/alx.ko" + + dosbin scripts/athenable scripts/b43load scripts/iwl-enable \ + scripts/madwifi-unload scripts/athload scripts/iwl-load \ + scripts/b43enable scripts/unload.sh + + dodir /usr/lib/compat-wireless + exeinto /usr/lib/compat-wireless + doexe scripts/modlib.sh + + dodoc README.md + dodir /$(get_libdir)/udev/rules.d/ + insinto /$(get_libdir)/udev/rules.d/ + doins udev/50-compat_firmware.rules + exeinto /$(get_libdir)/udev/ + doexe udev/compat_firmware.sh +} + +pkg_postinst() { + update_depmod + update_moduledb + + if use !livecd; then + if use loadmodules; then + einfo "Attempting to unload modules..." + /usr/sbin/unload.sh 2>&1 | grep -E FATAL && ewarn "Unable to remove running modules, system may be unhappy, reboot HIGHLY recommended!" + einfo "Triggering automatic reload of needed modules..." + /sbin/udevadm trigger + einfo "We have attempted to load your new modules for you, this may fail horribly, or may just cause a network hiccup." + einfo "If you experience any issues reboot is the simplest course of action." + fi + fi + if use !loadmodules; then + einfo "You didn't USE=loadmodules but you can still attempt to switch to the new drivers without reboot." + einfo "Run 'unload.sh' then 'udevadm trigger' to cause udev to load the needed drivers." + einfo "If unload.sh fails for some reason you should be able to simply reboot to fix everything and load the new modules." + fi +} + +pkg_postrm() { + remove_moduledb +} diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/00-read-only.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/00-read-only.patch new file mode 100644 index 00000000..a7066c36 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/00-read-only.patch @@ -0,0 +1,23 @@ +--- ./include/net/bluetooth/bluetooth.h ++++ ./include/net/bluetooth/bluetooth.h +@@ -206,7 +206,7 @@ + struct file_operations fops; + int (* custom_seq_show)(struct seq_file *, void *); + #endif +-}; ++} __no_const; + + int bt_sock_register(int proto, const struct net_proto_family *ops); + int bt_sock_unregister(int proto); +--- ./include/net/mac80211.h ++++ ./include/net/mac80211.h +@@ -2652,7 +2652,7 @@ + struct ieee80211_chanctx_conf *ctx); + + void (*restart_complete)(struct ieee80211_hw *hw); +-}; ++} __no_const; + + /** + * ieee80211_alloc_hw - Allocate a new hardware device + diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/01-read-only.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/01-read-only.patch new file mode 100644 index 00000000..88ecf46f --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/01-read-only.patch @@ -0,0 +1,12 @@ +How/why does this even work? +--- ./net/wireless/core.h ++++ ./net/wireless/core.h +@@ -28,7 +28,7 @@ + struct mutex mtx; + + /* rfkill support */ +- struct rfkill_ops rfkill_ops; ++ rfkill_ops_no_const rfkill_ops; + struct rfkill *rfkill; + struct work_struct rfkill_sync; + diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/02-read-only-ath.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/02-read-only-ath.patch new file mode 100644 index 00000000..ddb69f6b --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/02-read-only-ath.patch @@ -0,0 +1,240 @@ +--- ./drivers/net/wireless/ath/ath.h ++++ ./drivers/net/wireless/ath/ath.h +@@ -119,6 +119,7 @@ struct ath_ops { + void (*write_flush) (void *); + u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); + }; ++typedef struct ath_ops __no_const ath_ops_no_const; + + struct ath_common; + struct ath_bus_ops; +--- ./drivers/net/wireless/ath/ath9k/ar9002_mac.c ++++ ./drivers/net/wireless/ath/ath9k/ar9002_mac.c +@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; + +- ACCESS_ONCE(ads->ds_link) = i->link; +- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->ds_link) = i->link; ++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0]; + + ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); + ctl6 = SM(i->keytype, AR_EncrType); +@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + + if ((i->is_first || i->is_last) && + i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { +- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ds_ctl2) = 0; +- ACCESS_ONCE(ads->ds_ctl3) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0; + } + + if (!i->is_first) { +- ACCESS_ONCE(ads->ds_ctl0) = 0; +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + return; + } + +@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + break; + } + +- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + + if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) + return; + +- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) +--- ./drivers/net/wireless/ath/ath9k/ar9003_mac.c ++++ ./drivers/net/wireless/ath/ath9k/ar9003_mac.c +@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + (i->qcu << AR_TxQcuNum_S) | desc_len; + + checksum += val; +- ACCESS_ONCE(ads->info) = val; ++ ACCESS_ONCE_RW(ads->info) = val; + + checksum += i->link; +- ACCESS_ONCE(ads->link) = i->link; ++ ACCESS_ONCE_RW(ads->link) = i->link; + + checksum += i->buf_addr[0]; +- ACCESS_ONCE(ads->data0) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0]; + checksum += i->buf_addr[1]; +- ACCESS_ONCE(ads->data1) = i->buf_addr[1]; ++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1]; + checksum += i->buf_addr[2]; +- ACCESS_ONCE(ads->data2) = i->buf_addr[2]; ++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2]; + checksum += i->buf_addr[3]; +- ACCESS_ONCE(ads->data3) = i->buf_addr[3]; ++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3]; + + checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl3) = val; ++ ACCESS_ONCE_RW(ads->ctl3) = val; + checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl5) = val; ++ ACCESS_ONCE_RW(ads->ctl5) = val; + checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl7) = val; ++ ACCESS_ONCE_RW(ads->ctl7) = val; + checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl9) = val; ++ ACCESS_ONCE_RW(ads->ctl9) = val; + + checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); +- ACCESS_ONCE(ads->ctl10) = checksum; ++ ACCESS_ONCE_RW(ads->ctl10) = checksum; + + if (i->is_first || i->is_last) { +- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ctl13) = 0; +- ACCESS_ONCE(ads->ctl14) = 0; ++ ACCESS_ONCE_RW(ads->ctl13) = 0; ++ ACCESS_ONCE_RW(ads->ctl14) = 0; + } + + ads->ctl20 = 0; +@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + + ctl17 = SM(i->keytype, AR_EncrType); + if (!i->is_first) { +- ACCESS_ONCE(ads->ctl11) = 0; +- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; +- ACCESS_ONCE(ads->ctl15) = 0; +- ACCESS_ONCE(ads->ctl16) = 0; +- ACCESS_ONCE(ads->ctl17) = ctl17; +- ACCESS_ONCE(ads->ctl18) = 0; +- ACCESS_ONCE(ads->ctl19) = 0; ++ ACCESS_ONCE_RW(ads->ctl11) = 0; ++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore; ++ ACCESS_ONCE_RW(ads->ctl15) = 0; ++ ACCESS_ONCE_RW(ads->ctl16) = 0; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl18) = 0; ++ ACCESS_ONCE_RW(ads->ctl19) = 0; + return; + } + +- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; + ctl12 |= SM(val, AR_PAPRDChainMask); + +- ACCESS_ONCE(ads->ctl12) = ctl12; +- ACCESS_ONCE(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl12) = ctl12; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; + +- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) + | SM(i->rtscts_rate, AR_RTSCTSRate); + +- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; ++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding; + } + + static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) +--- ./drivers/net/wireless/ath/ath9k/hw.h ++++ ./drivers/net/wireless/ath/ath9k/hw.h +@@ -657,7 +657,7 @@ struct ath_hw_private_ops { + + /* ANI */ + void (*ani_cache_ini_regs)(struct ath_hw *ah); +-}; ++} __no_const; + + /** + * struct ath_hw_ops - callbacks used by hardware code and driver code +@@ -687,7 +687,7 @@ struct ath_hw_ops { + void (*antdiv_comb_conf_set)(struct ath_hw *ah, + struct ath_hw_antcomb_conf *antconf); + void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable); +-}; ++} __no_const; + + struct ath_nf_limits { + s16 max; +@@ -707,7 +707,7 @@ enum ath_cal_list { + #define AH_FASTCC 0x4 + + struct ath_hw { +- struct ath_ops reg_ops; ++ ath_ops_no_const reg_ops; + + struct ieee80211_hw *hw; + struct ath_common common; diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/03-read-only-memory.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/03-read-only-memory.patch new file mode 100644 index 00000000..41c87b9a --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/03-read-only-memory.patch @@ -0,0 +1,11 @@ +--- ./include/drm/ttm/ttm_memory.h ++++ ./include/drm/ttm/ttm_memory.h +@@ -48,7 +48,7 @@ + + struct ttm_mem_shrink { + int (*do_shrink) (struct ttm_mem_shrink *); +-}; ++} __no_const; + + /** + * struct ttm_mem_global - Global memory accounting structure. diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/04-read-only-brcm80211.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/04-read-only-brcm80211.patch new file mode 100644 index 00000000..865a62d5 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/04-read-only-brcm80211.patch @@ -0,0 +1,11 @@ +--- ./drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h ++++ ./drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h +@@ -545,7 +545,7 @@ + void (*carrsuppr)(struct brcms_phy *); + s32 (*rxsigpwr)(struct brcms_phy *, s32); + void (*detach)(struct brcms_phy *); +-}; ++} __no_const; + + struct brcms_phy { + struct brcms_phy_pub pubpi_ro; diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/05-read-only-i915.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/05-read-only-i915.patch new file mode 100644 index 00000000..15a62f5e --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/05-read-only-i915.patch @@ -0,0 +1,17 @@ +--- ./drivers/gpu/drm/i915/i915_drv.h ++++ ./drivers/gpu/drm/i915/i915_drv.h +@@ -284,12 +284,12 @@ + /* render clock increase/decrease */ + /* display clock increase/decrease */ + /* pll clock increase/decrease */ +-}; ++} __no_const; + + struct drm_i915_gt_funcs { + void (*force_wake_get)(struct drm_i915_private *dev_priv); + void (*force_wake_put)(struct drm_i915_private *dev_priv); +-}; ++} __no_const; + + #define DEV_INFO_FLAGS \ + DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/06-read-only-radeon.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/06-read-only-radeon.patch new file mode 100644 index 00000000..f9180c45 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/06-read-only-radeon.patch @@ -0,0 +1,58 @@ +--- ./drivers/gpu/drm/radeon/radeon.h ++++ ./drivers/gpu/drm/radeon/radeon.h +@@ -741,7 +741,7 @@ + int x2, int y2); + void (*draw_auto)(struct radeon_device *rdev); + void (*set_default_state)(struct radeon_device *rdev); +-}; ++} __no_const; + + struct r600_blit { + struct radeon_bo *shader_obj; +@@ -1173,7 +1173,7 @@ + struct { + void (*tlb_flush)(struct radeon_device *rdev); + int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); +- } gart; ++ } __no_const gart; + struct { + int (*init)(struct radeon_device *rdev); + void (*fini)(struct radeon_device *rdev); +@@ -1214,7 +1214,7 @@ + void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); + /* get backlight level */ + u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); +- } display; ++ } __no_const display; + /* copy functions for bo handling */ + struct { + int (*blit)(struct radeon_device *rdev, +@@ -1266,7 +1266,7 @@ + int (*get_pcie_lanes)(struct radeon_device *rdev); + void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); + void (*set_clock_gating)(struct radeon_device *rdev, int enable); +- } pm; ++ } __no_const pm; + /* pageflipping */ + struct { + void (*pre_page_flip)(struct radeon_device *rdev, int crtc); +@@ -1542,6 +1542,8 @@ + typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); + typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); + ++typedef struct vm_operations_struct __no_const vm_operations_struct_no_const; ++ + struct radeon_device { + struct device *dev; + struct drm_device *ddev; +--- ./drivers/gpu/drm/radeon/radeon_ttm.c ++++ ./drivers/gpu/drm/radeon/radeon_ttm.c +@@ -791,7 +791,7 @@ + man->size = size >> PAGE_SHIFT; + } + +-static struct vm_operations_struct radeon_ttm_vm_ops; ++static vm_operations_struct_no_const radeon_ttm_vm_ops; + static const struct vm_operations_struct *ttm_vm_ops = NULL; + + static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/07-read-only-wl1251.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/07-read-only-wl1251.patch new file mode 100644 index 00000000..158f59ab --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/07-read-only-wl1251.patch @@ -0,0 +1,11 @@ +--- ./drivers/net/wireless/ti/wl1251/wl1251.h ++++ ./drivers/net/wireless/ti/wl1251/wl1251.h +@@ -266,7 +266,7 @@ + void (*reset)(struct wl1251 *wl); + void (*enable_irq)(struct wl1251 *wl); + void (*disable_irq)(struct wl1251 *wl); +-}; ++} __no_const; + + struct wl1251 { + struct ieee80211_hw *hw; diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/08-read-only-ti.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/08-read-only-ti.patch new file mode 100644 index 00000000..8236b601 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/08-read-only-ti.patch @@ -0,0 +1,11 @@ +--- ./drivers/net/wireless/ti/wlcore/wlcore.h ++++ ./drivers/net/wireless/ti/wlcore/wlcore.h +@@ -88,7 +88,7 @@ + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf); + u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len); +-}; ++} __no_const; + + enum wlcore_partitions { + PART_DOWN, diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/09-read-only-nouveau.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/09-read-only-nouveau.patch new file mode 100644 index 00000000..d4ac0e52 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/09-read-only-nouveau.patch @@ -0,0 +1,33 @@ +--- ./drivers/gpu/drm/nouveau/nouveau_fence.h ++++ ./drivers/gpu/drm/nouveau/nouveau_fence.h +@@ -43,7 +43,7 @@ + int (*sync)(struct nouveau_fence *, struct nouveau_channel *, + struct nouveau_channel *); + u32 (*read)(struct nouveau_channel *); +-}; ++} __no_const; + + #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence) + +--- ./drivers/gpu/drm/nouveau/nouveau_bios.c ++++ ./drivers/gpu/drm/nouveau/nouveau_bios.c +@@ -1015,7 +1015,7 @@ + struct bit_table { + const char id; + int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); +-}; ++} __no_const; + + #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) + +--- ./include/drm/drm_crtc_helper.h ++++ ./include/drm/drm_crtc_helper.h +@@ -109,7 +109,7 @@ + struct drm_connector *connector); + /* disable encoder when not in use - more explicit than dpms off */ + void (*disable)(struct drm_encoder *encoder); +-}; ++} __no_const; + + /** + * drm_connector_helper_funcs - helper operations for connectors diff --git a/sys-kernel/compat-drivers-alx/files/3.8-grsec/20-version-disagrement-iwlwifi.patch b/sys-kernel/compat-drivers-alx/files/3.8-grsec/20-version-disagrement-iwlwifi.patch new file mode 100644 index 00000000..37791f2d --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/3.8-grsec/20-version-disagrement-iwlwifi.patch @@ -0,0 +1,139 @@ +--- ./drivers/net/wireless/iwlwifi/dvm/debugfs.c ++++ ./drivers/net/wireless/iwlwifi/dvm/debugfs.c +@@ -203,7 +203,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[64]; +- int buf_size; ++ size_t buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); +@@ -473,7 +473,7 @@ + struct iwl_priv *priv = file->private_data; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -554,7 +554,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); +@@ -606,7 +606,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int value; + + memset(buf, 0, sizeof(buf)); +@@ -1871,7 +1871,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); +@@ -1916,7 +1916,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); +@@ -1987,7 +1987,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); +@@ -2028,7 +2028,7 @@ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); +@@ -2088,7 +2088,7 @@ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); +@@ -2178,7 +2178,7 @@ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int rts; + + if (!priv->cfg->ht_params) +@@ -2220,7 +2220,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +@@ -2256,7 +2256,7 @@ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + /* check that the interface is up */ + if (!iwl_is_ready(priv)) +@@ -2310,7 +2310,7 @@ + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +--- ./drivers/net/wireless/iwlwifi/pcie/trans.c ++++ ./drivers/net/wireless/iwlwifi/pcie/trans.c +@@ -1100,7 +1100,7 @@ + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -1121,7 +1121,7 @@ + { + struct iwl_trans *trans = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int csr; + + memset(buf, 0, sizeof(buf)); diff --git a/sys-kernel/compat-drivers-alx/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch b/sys-kernel/compat-drivers-alx/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch new file mode 100644 index 00000000..a2b080f5 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch @@ -0,0 +1,17 @@ + tx.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index b47435d..751934b 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -539,7 +539,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) + if (tx->sta) + tx->sta->last_tx_rate = txrc.reported_rate; + +- if (unlikely(!info->control.rates[0].count)) ++ if (unlikely(!info->control.rates[0].count) || ++ info->flags & IEEE80211_TX_CTL_NO_ACK) + info->control.rates[0].count = 1; + + if (is_multicast_ether_addr(hdr->addr1)) { diff --git a/sys-kernel/compat-drivers-alx/files/4004_zd1211rw-2.6.28.patch b/sys-kernel/compat-drivers-alx/files/4004_zd1211rw-2.6.28.patch new file mode 100644 index 00000000..c0697dee --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/4004_zd1211rw-2.6.28.patch @@ -0,0 +1,37 @@ +diff -Naur linux-2.6.28-pentoo-r1-orig/drivers/net/wireless/zd1211rw/zd_mac.c linux-2.6.28-pentoo-r1-improved/drivers/net/wireless/zd1211rw/zd_mac.c +--- linux-2.6.28-pentoo-r1-orig/drivers/net/wireless/zd1211rw/zd_mac.c 2009-01-18 17:49:00.000000000 -0500 ++++ linux-2.6.28-pentoo-r1-improved/drivers/net/wireless/zd1211rw/zd_mac.c 2009-01-18 18:46:44.000000000 -0500 +@@ -191,14 +191,19 @@ + static int set_rx_filter(struct zd_mac *mac) + { + unsigned long flags; +- u32 filter = STA_RX_FILTER; ++ struct zd_ioreq32 ioreqs[] = { ++ {CR_RX_FILTER, STA_RX_FILTER}, ++ { CR_SNIFFER_ON, 0U }, ++ }; + + spin_lock_irqsave(&mac->lock, flags); +- if (mac->pass_ctrl) +- filter |= RX_FILTER_CTRL; ++ if (mac->pass_ctrl) { ++ ioreqs[0].value |= 0xFFFFFFFF; ++ ioreqs[1].value = 0x1; ++ } + spin_unlock_irqrestore(&mac->lock, flags); + +- return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); ++ return zd_iowrite32a(&mac->chip, ioreqs, ARRAY_SIZE(ioreqs)); + } + + static int set_mc_hash(struct zd_mac *mac) +@@ -657,7 +662,8 @@ + /* Caller has to ensure that length >= sizeof(struct rx_status). */ + status = (struct rx_status *) + (buffer + (length - sizeof(struct rx_status))); +- if (status->frame_status & ZD_RX_ERROR) { ++ if ((status->frame_status & ZD_RX_ERROR) || ++ (status->frame_status & ~0x21)) { + if (mac->pass_failed_fcs && + (status->frame_status & ZD_RX_CRC32_ERROR)) { + stats.flag |= RX_FLAG_FAILED_FCS_CRC; diff --git a/sys-kernel/compat-drivers-alx/files/ath_regd_optional.patch b/sys-kernel/compat-drivers-alx/files/ath_regd_optional.patch new file mode 100644 index 00000000..415fc896 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/ath_regd_optional.patch @@ -0,0 +1,39 @@ +diff -Naur compat-wireless-3.0-rc4-1-orig/drivers/net/wireless/ath/regd.c compat-wireless-3.0-rc4-1/drivers/net/wireless/ath/regd.c +--- compat-wireless-3.0-rc4-1-orig/drivers/net/wireless/ath/regd.c 2011-06-23 19:02:22.000000000 -0400 ++++ compat-wireless-3.0-rc4-1/drivers/net/wireless/ath/regd.c 2011-06-26 01:52:35.000000000 -0400 +@@ -193,6 +193,8 @@ + u32 bandwidth = 0; + int r; + ++ return; ++ + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + + if (!wiphy->bands[band]) +@@ -252,6 +254,8 @@ + u32 bandwidth = 0; + int r; + ++ return; ++ + sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + + /* +@@ -299,6 +303,8 @@ + struct ieee80211_channel *ch; + unsigned int i; + ++ return; ++ + if (!wiphy->bands[IEEE80211_BAND_5GHZ]) + return; + +@@ -466,6 +472,8 @@ + { + const struct ieee80211_regdomain *regd; + ++ return 0; ++ + wiphy->reg_notifier = reg_notifier; + wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY; + diff --git a/sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch new file mode 100644 index 00000000..4ff7a73b --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch @@ -0,0 +1,34 @@ +Fixes for: +drivers/net/wireless/ath/ath6kl/sdio.c: In function ‘ath6kl_sdio_alloc_prep_scat_req’: +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the buf_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the sg_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +--- drivers/net/wireless/ath/ath6kl/sdio.c ++++ drivers/net/wireless/ath/ath6kl/sdio.c +@@ -341,11 +341,14 @@ + scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); + scat_req_sz = sizeof(*s_req) + scat_list_sz; + +- if (!virt_scat) ++ if (!virt_scat) { + sg_sz = sizeof(struct scatterlist) * n_scat_entry; +- else ++ buf_sz = 0; ++ } else { ++ sg_sz = 0; + buf_sz = 2 * L1_CACHE_BYTES + + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; ++ } + + for (i = 0; i < n_scat_req; i++) { + /* allocate the scatter request */ +--- drivers/gpu/drm/i915/intel_display.c ++++ drivers/gpu/drm/i915/intel_display.c +@@ -6216,7 +6216,7 @@ + obj = work->old_fb_obj; + + atomic_clear_mask(1 << intel_crtc->plane, +- &obj->pending_flip.counter); ++ &obj->pending_flip); + + wake_up(&dev_priv->pending_flip_queue); + schedule_work(&work->work); diff --git a/sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec.patch b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec.patch new file mode 100644 index 00000000..9dfc5adf --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.7_rc1_p6-grsec.patch @@ -0,0 +1,8365 @@ +--- drivers/net/wireless/ath/ath.h 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ath/ath.h 2012-10-15 17:30:59.818924529 +0000 +@@ -119,6 +119,7 @@ struct ath_ops { + void (*write_flush) (void *); + u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); + }; ++typedef struct ath_ops __no_const ath_ops_no_const; + + struct ath_common; + struct ath_bus_ops; +--- drivers/net/wireless/ath/ath9k/ar9002_mac.c 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/ath/ath9k/ar9002_mac.c 2012-10-15 17:30:59.816924531 +0000 +@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; + +- ACCESS_ONCE(ads->ds_link) = i->link; +- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->ds_link) = i->link; ++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0]; + + ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); + ctl6 = SM(i->keytype, AR_EncrType); +@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + + if ((i->is_first || i->is_last) && + i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { +- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ds_ctl2) = 0; +- ACCESS_ONCE(ads->ds_ctl3) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0; + } + + if (!i->is_first) { +- ACCESS_ONCE(ads->ds_ctl0) = 0; +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + return; + } + +@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + break; + } + +- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + + if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) + return; + +- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) +--- drivers/net/wireless/ath/ath9k/ar9003_mac.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ath/ath9k/ar9003_mac.c 2012-10-15 17:30:59.817924530 +0000 +@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + (i->qcu << AR_TxQcuNum_S) | desc_len; + + checksum += val; +- ACCESS_ONCE(ads->info) = val; ++ ACCESS_ONCE_RW(ads->info) = val; + + checksum += i->link; +- ACCESS_ONCE(ads->link) = i->link; ++ ACCESS_ONCE_RW(ads->link) = i->link; + + checksum += i->buf_addr[0]; +- ACCESS_ONCE(ads->data0) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0]; + checksum += i->buf_addr[1]; +- ACCESS_ONCE(ads->data1) = i->buf_addr[1]; ++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1]; + checksum += i->buf_addr[2]; +- ACCESS_ONCE(ads->data2) = i->buf_addr[2]; ++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2]; + checksum += i->buf_addr[3]; +- ACCESS_ONCE(ads->data3) = i->buf_addr[3]; ++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3]; + + checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl3) = val; ++ ACCESS_ONCE_RW(ads->ctl3) = val; + checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl5) = val; ++ ACCESS_ONCE_RW(ads->ctl5) = val; + checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl7) = val; ++ ACCESS_ONCE_RW(ads->ctl7) = val; + checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl9) = val; ++ ACCESS_ONCE_RW(ads->ctl9) = val; + + checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); +- ACCESS_ONCE(ads->ctl10) = checksum; ++ ACCESS_ONCE_RW(ads->ctl10) = checksum; + + if (i->is_first || i->is_last) { +- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ctl13) = 0; +- ACCESS_ONCE(ads->ctl14) = 0; ++ ACCESS_ONCE_RW(ads->ctl13) = 0; ++ ACCESS_ONCE_RW(ads->ctl14) = 0; + } + + ads->ctl20 = 0; +@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + + ctl17 = SM(i->keytype, AR_EncrType); + if (!i->is_first) { +- ACCESS_ONCE(ads->ctl11) = 0; +- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; +- ACCESS_ONCE(ads->ctl15) = 0; +- ACCESS_ONCE(ads->ctl16) = 0; +- ACCESS_ONCE(ads->ctl17) = ctl17; +- ACCESS_ONCE(ads->ctl18) = 0; +- ACCESS_ONCE(ads->ctl19) = 0; ++ ACCESS_ONCE_RW(ads->ctl11) = 0; ++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore; ++ ACCESS_ONCE_RW(ads->ctl15) = 0; ++ ACCESS_ONCE_RW(ads->ctl16) = 0; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl18) = 0; ++ ACCESS_ONCE_RW(ads->ctl19) = 0; + return; + } + +- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; + ctl12 |= SM(val, AR_PAPRDChainMask); + +- ACCESS_ONCE(ads->ctl12) = ctl12; +- ACCESS_ONCE(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl12) = ctl12; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; + +- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) + | SM(i->rtscts_rate, AR_RTSCTSRate); + +- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; ++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding; + } + + static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) +--- drivers/net/wireless/ath/ath9k/hw.h 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ath/ath9k/hw.h 2012-10-15 17:30:59.817924530 +0000 +@@ -657,7 +657,7 @@ struct ath_hw_private_ops { + + /* ANI */ + void (*ani_cache_ini_regs)(struct ath_hw *ah); +-}; ++} __no_const; + + /** + * struct ath_hw_ops - callbacks used by hardware code and driver code +@@ -687,7 +687,7 @@ struct ath_hw_ops { + void (*antdiv_comb_conf_set)(struct ath_hw *ah, + struct ath_hw_antcomb_conf *antconf); + void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable); +-}; ++} __no_const; + + struct ath_nf_limits { + s16 max; +@@ -707,7 +707,7 @@ enum ath_cal_list { + #define AH_FASTCC 0x4 + + struct ath_hw { +- struct ath_ops reg_ops; ++ ath_ops_no_const reg_ops; + + struct ieee80211_hw *hw; + struct ath_common common; +--- drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h 2012-10-15 17:30:59.818924529 +0000 +@@ -545,7 +545,7 @@ struct phy_func_ptr { + void (*carrsuppr)(struct brcms_phy *); + s32 (*rxsigpwr)(struct brcms_phy *, s32); + void (*detach)(struct brcms_phy *); +-}; ++} __no_const; + + struct brcms_phy { + struct brcms_phy_pub pubpi_ro; +--- drivers/net/wireless/iwlegacy/3945-mac.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/iwlegacy/3945-mac.c 2012-10-15 17:30:59.819924529 +0000 +@@ -3613,7 +3613,9 @@ il3945_pci_probe(struct pci_dev *pdev, c + */ + if (il3945_mod_params.disable_hw_scan) { + D_INFO("Disabling hw_scan\n"); +- il3945_mac_ops.hw_scan = NULL; ++ pax_open_kernel(); ++ *(void **)&il3945_mac_ops.hw_scan = NULL; ++ pax_close_kernel(); + } + + D_INFO("*** LOAD DRIVER ***\n"); +--- drivers/net/wireless/iwlwifi/dvm/debugfs.c 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/iwlwifi/dvm/debugfs.c 2012-10-15 17:30:59.819924529 +0000 +@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(stru + { + struct iwl_priv *priv = file->private_data; + char buf[64]; +- int buf_size; ++ size_t buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); +@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_wri + struct iwl_priv *priv = file->private_data; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_wr + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); +@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_ove + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int value; + + memset(buf, 0, sizeof(buf)); +@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_sta + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); +@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_w + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); +@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_w + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); +@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_writ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); +@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_wr + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); +@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int rts; + + if (!priv->cfg->ht_params) +@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + /* check that the interface is up */ + if (!iwl_is_ready(priv)) +@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_ + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +--- drivers/net/wireless/iwlwifi/pcie/trans.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/iwlwifi/pcie/trans.c 2012-10-15 17:30:59.820924530 +0000 +@@ -1944,7 +1944,7 @@ static ssize_t iwl_dbgfs_interrupt_write + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -1965,7 +1965,7 @@ static ssize_t iwl_dbgfs_csr_write(struc + { + struct iwl_trans *trans = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int csr; + + memset(buf, 0, sizeof(buf)); +--- drivers/net/wireless/mac80211_hwsim.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/mac80211_hwsim.c 2012-10-15 17:30:59.820924530 +0000 +@@ -1748,9 +1748,11 @@ static int __init init_mac80211_hwsim(vo + return -EINVAL; + + if (fake_hw_scan) { +- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; +- mac80211_hwsim_ops.sw_scan_start = NULL; +- mac80211_hwsim_ops.sw_scan_complete = NULL; ++ pax_open_kernel(); ++ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; ++ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL; ++ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL; ++ pax_close_kernel(); + } + + spin_lock_init(&hwsim_radio_lock); +--- drivers/net/wireless/mwifiex/main.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/mwifiex/main.h 2012-10-15 17:30:59.820924530 +0000 +@@ -571,7 +571,7 @@ struct mwifiex_if_ops { + int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); +-}; ++} __no_const; + + struct mwifiex_adapter { + u8 iface_type; +--- drivers/net/wireless/rndis_wlan.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/rndis_wlan.c 2012-10-15 17:30:59.821924531 +0000 +@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbn + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); + +- if (rts_threshold < 0 || rts_threshold > 2347) ++ if (rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); +--- drivers/net/wireless/rt2x00/rt2x00.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/rt2x00/rt2x00.h 2012-10-15 17:30:59.821924531 +0000 +@@ -397,7 +397,7 @@ struct rt2x00_intf { + * for hardware which doesn't support hardware + * sequence counting. + */ +- atomic_t seqno; ++ atomic_unchecked_t seqno; + }; + + static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) +--- drivers/net/wireless/rt2x00/rt2x00queue.c 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/rt2x00/rt2x00queue.c 2012-10-15 17:30:59.822924531 +0000 +@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descri + * sequence counter given by mac80211. + */ + if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) +- seqno = atomic_add_return(0x10, &intf->seqno); ++ seqno = atomic_add_return_unchecked(0x10, &intf->seqno); + else +- seqno = atomic_read(&intf->seqno); ++ seqno = atomic_read_unchecked(&intf->seqno); + + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seqno); +--- drivers/net/wireless/ti/wl1251/wl1251.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/ti/wl1251/wl1251.h 2012-10-15 17:30:59.822924531 +0000 +@@ -266,7 +266,7 @@ struct wl1251_if_operations { + void (*reset)(struct wl1251 *wl); + void (*enable_irq)(struct wl1251 *wl); + void (*disable_irq)(struct wl1251 *wl); +-}; ++} __no_const; + + struct wl1251 { + struct ieee80211_hw *hw; +--- drivers/net/wireless/ti/wlcore/wlcore.h 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ti/wlcore/wlcore.h 2012-10-15 17:30:59.822924531 +0000 +@@ -81,7 +81,7 @@ struct wlcore_ops { + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf); + u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len); +-}; ++} __no_const; + + enum wlcore_partitions { + PART_DOWN, +--- include/linux/unaligned/access_ok.h 2012-09-17 19:15:56.000000000 +0000 ++++ include/linux/unaligned/access_ok.h 2012-10-15 17:30:59.823924531 +0000 +@@ -6,32 +6,32 @@ + + static inline u16 get_unaligned_le16(const void *p) + { +- return le16_to_cpup((__le16 *)p); ++ return le16_to_cpup((const __le16 *)p); + } + + static inline u32 get_unaligned_le32(const void *p) + { +- return le32_to_cpup((__le32 *)p); ++ return le32_to_cpup((const __le32 *)p); + } + + static inline u64 get_unaligned_le64(const void *p) + { +- return le64_to_cpup((__le64 *)p); ++ return le64_to_cpup((const __le64 *)p); + } + + static inline u16 get_unaligned_be16(const void *p) + { +- return be16_to_cpup((__be16 *)p); ++ return be16_to_cpup((const __be16 *)p); + } + + static inline u32 get_unaligned_be32(const void *p) + { +- return be32_to_cpup((__be32 *)p); ++ return be32_to_cpup((const __be32 *)p); + } + + static inline u64 get_unaligned_be64(const void *p) + { +- return be64_to_cpup((__be64 *)p); ++ return be64_to_cpup((const __be64 *)p); + } + + static inline void put_unaligned_le16(u16 val, void *p) +--- net/bluetooth/hci_sock.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/hci_sock.c 2012-10-15 17:30:59.825924531 +0000 +@@ -940,7 +940,7 @@ static int hci_sock_setsockopt(struct so + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + +- len = min_t(unsigned int, len, sizeof(uf)); ++ len = min((size_t)len, sizeof(uf)); + if (copy_from_user(&uf, optval, len)) { + err = -EFAULT; + break; +--- net/bluetooth/l2cap_core.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/bluetooth/l2cap_core.c 2012-10-15 17:30:59.825924531 +0000 +@@ -3165,8 +3165,10 @@ static int l2cap_parse_conf_rsp(struct l + break; + + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *)val, olen); ++ if (olen != sizeof(rfc)) ++ break; ++ ++ memcpy(&rfc, (void *)val, olen); + + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && + rfc.mode != chan->mode) +--- net/bluetooth/l2cap_sock.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/l2cap_sock.c 2012-10-15 17:30:59.826924531 +0000 +@@ -467,7 +467,8 @@ static int l2cap_sock_setsockopt_old(str + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct l2cap_options opts; +- int len, err = 0; ++ int err = 0; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -489,7 +490,7 @@ static int l2cap_sock_setsockopt_old(str + opts.max_tx = chan->max_tx; + opts.txwin_size = chan->tx_win; + +- len = min_t(unsigned int, sizeof(opts), optlen); ++ len = min(sizeof(opts), len); + if (copy_from_user((char *) &opts, optval, len)) { + err = -EFAULT; + break; +@@ -574,7 +575,8 @@ static int l2cap_sock_setsockopt(struct + struct bt_security sec; + struct bt_power pwr; + struct l2cap_conn *conn; +- int len, err = 0; ++ int err = 0; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -597,7 +599,7 @@ static int l2cap_sock_setsockopt(struct + + sec.level = BT_SECURITY_LOW; + +- len = min_t(unsigned int, sizeof(sec), optlen); ++ len = min(sizeof(sec), len); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; +@@ -694,7 +696,7 @@ static int l2cap_sock_setsockopt(struct + + pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; + +- len = min_t(unsigned int, sizeof(pwr), optlen); ++ len = min(sizeof(pwr), len); + if (copy_from_user((char *) &pwr, optval, len)) { + err = -EFAULT; + break; +--- net/bluetooth/rfcomm/sock.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/rfcomm/sock.c 2012-10-15 17:30:59.826924531 +0000 +@@ -676,7 +676,7 @@ static int rfcomm_sock_setsockopt(struct + struct sock *sk = sock->sk; + struct bt_security sec; + int err = 0; +- size_t len; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -698,7 +698,7 @@ static int rfcomm_sock_setsockopt(struct + + sec.level = BT_SECURITY_LOW; + +- len = min_t(unsigned int, sizeof(sec), optlen); ++ len = min(sizeof(sec), len); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; +--- net/bluetooth/rfcomm/tty.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/rfcomm/tty.c 2012-10-15 17:30:59.826924531 +0000 +@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm + BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (dev->port.count > 0) { ++ if (atomic_read(&dev->port.count) > 0) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return; + } +@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_st + return -ENODEV; + + BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), +- dev->channel, dev->port.count); ++ dev->channel, atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (++dev->port.count > 1) { ++ if (atomic_inc_return(&dev->port.count) > 1) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return 0; + } +@@ -736,10 +736,10 @@ static void rfcomm_tty_close(struct tty_ + return; + + BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, +- dev->port.count); ++ atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (!--dev->port.count) { ++ if (!atomic_dec_return(&dev->port.count)) { + spin_unlock_irqrestore(&dev->port.lock, flags); + if (dev->tty_dev->parent) + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) +--- net/mac80211/ieee80211_i.h 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/ieee80211_i.h 2012-10-15 17:30:59.827924531 +0000 +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include "key.h" + #include "sta_info.h" + #include "debug.h" +@@ -840,7 +841,7 @@ struct ieee80211_local { + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + +- int open_count; ++ local_t open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, +--- net/mac80211/iface.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/iface.c 2012-10-15 17:30:59.827924531 +0000 +@@ -454,7 +454,7 @@ static int ieee80211_do_open(struct net_ + break; + } + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + res = drv_start(local); + if (res) + goto err_del_bss; +@@ -497,7 +497,7 @@ static int ieee80211_do_open(struct net_ + break; + } + +- if (local->monitors == 0 && local->open_count == 0) { ++ if (local->monitors == 0 && local_read(&local->open_count) == 0) { + res = ieee80211_add_virtual_monitor(local); + if (res) + goto err_stop; +@@ -594,7 +594,7 @@ static int ieee80211_do_open(struct net_ + mutex_unlock(&local->mtx); + + if (coming_up) +- local->open_count++; ++ local_inc(&local->open_count); + + if (hw_reconf_flags) + ieee80211_hw_config(local, hw_reconf_flags); +@@ -607,7 +607,7 @@ static int ieee80211_do_open(struct net_ + err_del_interface: + drv_remove_interface(local, sdata); + err_stop: +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; +@@ -741,7 +741,7 @@ static void ieee80211_do_stop(struct iee + } + + if (going_down) +- local->open_count--; ++ local_dec(&local->open_count); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: +@@ -801,7 +801,7 @@ static void ieee80211_do_stop(struct iee + + ieee80211_recalc_ps(local, -1); + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + if (local->ops->napi_poll) + napi_disable(&local->napi); + ieee80211_clear_tx_pending(local); +@@ -833,7 +833,7 @@ static void ieee80211_do_stop(struct iee + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + +- if (local->monitors == local->open_count && local->monitors > 0) ++ if (local->monitors == local_read(&local->open_count) && local->monitors > 0) + ieee80211_add_virtual_monitor(local); + } + +--- net/mac80211/main.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/main.c 2012-10-15 17:30:59.827924531 +0000 +@@ -170,7 +170,7 @@ int ieee80211_hw_config(struct ieee80211 + local->hw.conf.power_level = power; + } + +- if (changed && local->open_count) { ++ if (changed && local_read(&local->open_count)) { + ret = drv_config(local, changed); + /* + * Goal: +--- net/mac80211/pm.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/mac80211/pm.c 2012-10-15 17:30:59.828924531 +0000 +@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211 + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto suspend; + + ieee80211_scan_cancel(local); +@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211 + cancel_work_sync(&local->dynamic_ps_enable_work); + del_timer_sync(&local->dynamic_ps_timer); + +- local->wowlan = wowlan && local->open_count; ++ local->wowlan = wowlan && local_read(&local->open_count); + if (local->wowlan) { + int err = drv_suspend(local, wowlan); + if (err < 0) { +@@ -143,7 +143,7 @@ int __ieee80211_suspend(struct ieee80211 + drv_remove_interface(local, sdata); + + /* stop hardware - this must stop RX */ +- if (local->open_count) ++ if (local_read(&local->open_count)) + ieee80211_stop_device(local); + + suspend: +--- net/mac80211/rate.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/rate.c 2012-10-15 17:30:59.828924531 +0000 +@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct + + ASSERT_RTNL(); + +- if (local->open_count) ++ if (local_read(&local->open_count)) + return -EBUSY; + + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { +--- net/mac80211/rc80211_pid_debugfs.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/mac80211/rc80211_pid_debugfs.c 2012-10-15 17:30:59.828924531 +0000 +@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_r + + spin_unlock_irqrestore(&events->lock, status); + +- if (copy_to_user(buf, pb, p)) ++ if (p > sizeof(pb) || copy_to_user(buf, pb, p)) + return -EFAULT; + + return p; +--- net/mac80211/util.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/mac80211/util.c 2012-10-15 17:30:59.828924531 +0000 +@@ -1251,7 +1251,7 @@ int ieee80211_reconfig(struct ieee80211_ + } + #endif + /* everything else happens only if HW was up & running */ +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto wake_up; + + /* +--- net/wireless/core.h 2012-09-27 23:19:11.000000000 +0000 ++++ net/wireless/core.h 2012-10-15 17:30:59.829924531 +0000 +@@ -28,7 +28,7 @@ struct cfg80211_registered_device { + struct mutex mtx; + + /* rfkill support */ +- struct rfkill_ops rfkill_ops; ++ rfkill_ops_no_const rfkill_ops; + struct rfkill *rfkill; + struct work_struct rfkill_sync; + +--- net/wireless/wext-core.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/wireless/wext-core.c 2012-10-15 17:30:59.829924531 +0000 +@@ -792,8 +792,7 @@ static int ioctl_standard_iw_point(struc + */ + + /* Support for very large requests */ +- if ((descr->flags & IW_DESCR_FLAG_NOMAX) && +- (user_length > descr->max_tokens)) { ++ if (user_length > descr->max_tokens) { + /* Allow userspace to GET more than max so + * we can support any size GET requests. + * There is still a limit : -ENOMEM. +@@ -832,22 +831,6 @@ static int ioctl_standard_iw_point(struc + } + } + +- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { +- /* +- * If this is a GET, but not NOMAX, it means that the extra +- * data is not bounded by userspace, but by max_tokens. Thus +- * set the length to max_tokens. This matches the extra data +- * allocation. +- * The driver should fill it with the number of tokens it +- * provided, and it may check iwp->length rather than having +- * knowledge of max_tokens. If the driver doesn't change the +- * iwp->length, this ioctl just copies back max_token tokens +- * filled with zeroes. Hopefully the driver isn't claiming +- * them to be valid data. +- */ +- iwp->length = descr->max_tokens; +- } +- + err = handler(dev, info, (union iwreq_data *) iwp, extra); + + iwp->length += essid_compat; +--- scripts/gcc-plugin.sh 1970-01-01 00:00:00.000000000 +0000 ++++ scripts/gcc-plugin.sh 2012-10-15 17:30:59.829924531 +0000 +@@ -0,0 +1,17 @@ ++#!/bin/bash ++plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 < ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to implement various sparse (source code checker) features ++ * ++ * TODO: ++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch) ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++#include "target.h" ++ ++extern void c_register_addr_space (const char *str, addr_space_t as); ++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t); ++extern enum machine_mode default_addr_space_address_mode (addr_space_t); ++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as); ++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as); ++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as); ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info checker_plugin_info = { ++ .version = "201111150100", ++}; ++ ++#define ADDR_SPACE_KERNEL 0 ++#define ADDR_SPACE_FORCE_KERNEL 1 ++#define ADDR_SPACE_USER 2 ++#define ADDR_SPACE_FORCE_USER 3 ++#define ADDR_SPACE_IOMEM 0 ++#define ADDR_SPACE_FORCE_IOMEM 0 ++#define ADDR_SPACE_PERCPU 0 ++#define ADDR_SPACE_FORCE_PERCPU 0 ++#define ADDR_SPACE_RCU 0 ++#define ADDR_SPACE_FORCE_RCU 0 ++ ++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC); ++} ++ ++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC); ++} ++ ++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_valid_pointer_mode(mode, as); ++} ++ ++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as) ++{ ++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC); ++} ++ ++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_legitimize_address(x, oldx, mode, as); ++} ++ ++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset) ++{ ++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ return subset == superset; ++} ++ ++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type) ++{ ++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type)); ++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type)); ++ ++ return op; ++} ++ ++static void register_checker_address_spaces(void *event_data, void *data) ++{ ++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL); ++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL); ++ c_register_addr_space("__user", ADDR_SPACE_USER); ++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER); ++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM); ++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM); ++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU); ++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU); ++// c_register_addr_space("__rcu", ADDR_SPACE_RCU); ++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU); ++ ++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode; ++ targetm.addr_space.address_mode = checker_addr_space_address_mode; ++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode; ++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p; ++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address; ++ targetm.addr_space.subset_p = checker_addr_space_subset_p; ++ targetm.addr_space.convert = checker_addr_space_convert; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info); ++ ++ for (i = 0; i < argc; ++i) ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL); ++ ++ return 0; ++} +--- tools/gcc/colorize_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/colorize_plugin.c 2012-10-15 17:30:59.830924531 +0000 +@@ -0,0 +1,148 @@ ++/* ++ * Copyright 2012 by PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to colorize diagnostic output ++ * ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info colorize_plugin_info = { ++ .version = "201203092200", ++ .help = NULL, ++}; ++ ++#define GREEN "\033[32m\033[2m" ++#define LIGHTGREEN "\033[32m\033[1m" ++#define YELLOW "\033[33m\033[2m" ++#define LIGHTYELLOW "\033[33m\033[1m" ++#define RED "\033[31m\033[2m" ++#define LIGHTRED "\033[31m\033[1m" ++#define BLUE "\033[34m\033[2m" ++#define LIGHTBLUE "\033[34m\033[1m" ++#define BRIGHT "\033[m\033[1m" ++#define NORMAL "\033[m" ++ ++static diagnostic_starter_fn old_starter; ++static diagnostic_finalizer_fn old_finalizer; ++ ++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ const char *color; ++ char *newprefix; ++ ++ switch (diagnostic->kind) { ++ case DK_NOTE: ++ color = LIGHTBLUE; ++ break; ++ ++ case DK_PEDWARN: ++ case DK_WARNING: ++ color = LIGHTYELLOW; ++ break; ++ ++ case DK_ERROR: ++ case DK_FATAL: ++ case DK_ICE: ++ case DK_PERMERROR: ++ case DK_SORRY: ++ color = LIGHTRED; ++ break; ++ ++ default: ++ color = NORMAL; ++ } ++ ++ old_starter(context, diagnostic); ++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix)) ++ return; ++ pp_destroy_prefix(context->printer); ++ pp_set_prefix(context->printer, newprefix); ++} ++ ++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ old_finalizer(context, diagnostic); ++} ++ ++static void colorize_arm(void) ++{ ++ old_starter = diagnostic_starter(global_dc); ++ old_finalizer = diagnostic_finalizer(global_dc); ++ ++ diagnostic_starter(global_dc) = start_colorize; ++ diagnostic_finalizer(global_dc) = finalize_colorize; ++} ++ ++static unsigned int execute_colorize_rearm(void) ++{ ++ if (diagnostic_starter(global_dc) == start_colorize) ++ return 0; ++ ++ colorize_arm(); ++ return 0; ++} ++ ++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = { ++ .pass = { ++ .type = SIMPLE_IPA_PASS, ++ .name = "colorize_rearm", ++ .gate = NULL, ++ .execute = execute_colorize_rearm, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static void colorize_start_unit(void *gcc_data, void *user_data) ++{ ++ colorize_arm(); ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info colorize_rearm_pass_info = { ++ .pass = &pass_ipa_colorize_rearm.pass, ++ .reference_pass_name = "*free_lang_data", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info); ++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info); ++ return 0; ++} +--- tools/gcc/constify_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/constify_plugin.c 2012-10-15 17:30:59.830924531 +0000 +@@ -0,0 +1,331 @@ ++/* ++ * Copyright 2011 by Emese Revfy ++ * Copyright 2011 by PaX Team ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification. ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/const_plugin/ ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c ++ * $ gcc -fplugin=constify_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info const_plugin_info = { ++ .version = "201205300030", ++ .help = "no-constify\tturn off constification\n", ++}; ++ ++static void deconstify_tree(tree node); ++ ++static void deconstify_type(tree type) ++{ ++ tree field; ++ ++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) { ++ tree type = TREE_TYPE(field); ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ if (!TYPE_READONLY(type)) ++ continue; ++ ++ deconstify_tree(field); ++ } ++ TYPE_READONLY(type) = 0; ++ C_TYPE_FIELDS_READONLY(type) = 0; ++} ++ ++static void deconstify_tree(tree node) ++{ ++ tree old_type, new_type, field; ++ ++ old_type = TREE_TYPE(node); ++ ++ gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST)); ++ ++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST); ++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type)); ++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field)) ++ DECL_FIELD_CONTEXT(field) = new_type; ++ ++ deconstify_type(new_type); ++ ++ TREE_READONLY(node) = 0; ++ TREE_TYPE(node) = new_type; ++} ++ ++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ tree type; ++ ++ *no_add_attrs = true; ++ if (TREE_CODE(*node) == FUNCTION_DECL) { ++ error("%qE attribute does not apply to functions", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == VAR_DECL) { ++ error("%qE attribute does not apply to variables", name); ++ return NULL_TREE; ++ } ++ ++ if (TYPE_P(*node)) { ++ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE) ++ *no_add_attrs = false; ++ else ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ type = TREE_TYPE(*node); ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) { ++ error("%qE attribute is already applied to the type", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) { ++ error("%qE attribute used on type that is not constified", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL) { ++ deconstify_tree(*node); ++ return NULL_TREE; ++ } ++ ++ return NULL_TREE; ++} ++ ++static void constify_type(tree type) ++{ ++ TYPE_READONLY(type) = 1; ++ C_TYPE_FIELDS_READONLY(type) = 1; ++} ++ ++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = true; ++ if (!TYPE_P(*node)) { ++ error("%qE attribute applies to types only", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ *no_add_attrs = false; ++ constify_type(*node); ++ return NULL_TREE; ++} ++ ++static struct attribute_spec no_const_attr = { ++ .name = "no_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_no_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static struct attribute_spec do_const_attr = { ++ .name = "do_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_do_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&no_const_attr); ++ register_attribute(&do_const_attr); ++} ++ ++static bool is_fptr(tree field) ++{ ++ tree ptr = TREE_TYPE(field); ++ ++ if (TREE_CODE(ptr) != POINTER_TYPE) ++ return false; ++ ++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE; ++} ++ ++static bool walk_struct(tree node) ++{ ++ tree field; ++ ++ if (TYPE_FIELDS(node) == NULL_TREE) ++ return false; ++ ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) { ++ gcc_assert(!TYPE_READONLY(node)); ++ deconstify_type(node); ++ return false; ++ } ++ ++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) { ++ tree type = TREE_TYPE(field); ++ enum tree_code code = TREE_CODE(type); ++ ++ if (node == type) ++ return false; ++ if (code == RECORD_TYPE || code == UNION_TYPE) { ++ if (!(walk_struct(type))) ++ return false; ++ } else if (!is_fptr(field) && !TREE_READONLY(field)) ++ return false; ++ } ++ return true; ++} ++ ++static void finish_type(void *event_data, void *data) ++{ ++ tree type = (tree)event_data; ++ ++ if (type == NULL_TREE || type == error_mark_node) ++ return; ++ ++ if (TYPE_READONLY(type)) ++ return; ++ ++ if (walk_struct(type)) ++ constify_type(type); ++} ++ ++static unsigned int check_local_variables(void); ++ ++struct gimple_opt_pass pass_local_variable = { ++ { ++ .type = GIMPLE_PASS, ++ .name = "check_local_variables", ++ .gate = NULL, ++ .execute = check_local_variables, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static unsigned int check_local_variables(void) ++{ ++ tree var; ++ referenced_var_iterator rvi; ++ ++#if BUILDING_GCC_VERSION == 4005 ++ FOR_EACH_REFERENCED_VAR(var, rvi) { ++#else ++ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) { ++#endif ++ tree type = TREE_TYPE(var); ++ ++ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var)) ++ continue; ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ ++ if (!TYPE_READONLY(type)) ++ continue; ++ ++// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var))) ++// continue; ++ ++// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) ++// continue; ++ ++ if (walk_struct(type)) { ++ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ bool constify = true; ++ ++ struct register_pass_info local_variable_pass_info = { ++ .pass = &pass_local_variable.pass, ++ .reference_pass_name = "*referenced_vars", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!(strcmp(argv[i].key, "no-constify"))) { ++ constify = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info); ++ if (constify) { ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +--- tools/gcc/generate_size_overflow_hash.sh 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/generate_size_overflow_hash.sh 2012-10-15 17:30:59.830924531 +0000 +@@ -0,0 +1,94 @@ ++#!/bin/bash ++ ++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c). ++ ++header1="size_overflow_hash.h" ++database="size_overflow_hash.data" ++n=65536 ++ ++usage() { ++cat <> "$header1" ++ done ++ echo >> "$header1" ++} ++ ++create_structs () { ++ rm -f "$header1" ++ ++ create_defines ++ ++ cat "$database" | while read data ++ do ++ data_array=($data) ++ struct_hash_name="${data_array[0]}" ++ funcn="${data_array[1]}" ++ params="${data_array[2]}" ++ next="${data_array[5]}" ++ ++ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1" ++ ++ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1" ++ echo -en "\t.param\t= " >> "$header1" ++ line= ++ for param_num in ${params//-/ }; ++ do ++ line="${line}PARAM"$param_num"|" ++ done ++ ++ echo -e "${line%?},\n};\n" >> "$header1" ++ done ++} ++ ++create_headers () { ++ echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1" ++} ++ ++create_array_elements () { ++ index=0 ++ grep -v "nohasharray" $database | sort -n -k 4 | while read data ++ do ++ data_array=($data) ++ i="${data_array[3]}" ++ hash="${data_array[4]}" ++ while [[ $index -lt $i ]] ++ do ++ echo -e "\t["$index"]\t= NULL," >> "$header1" ++ index=$(($index + 1)) ++ done ++ index=$(($index + 1)) ++ echo -e "\t["$i"]\t= &"$hash"," >> "$header1" ++ done ++ echo '};' >> $header1 ++} ++ ++create_structs ++create_headers ++create_array_elements ++ ++exit 0 +--- tools/gcc/kallocstat_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/kallocstat_plugin.c 2012-10-15 17:30:59.831924531 +0000 +@@ -0,0 +1,167 @@ ++/* ++ * Copyright 2011 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to find the distribution of k*alloc sizes ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static const char * const kalloc_functions[] = { ++ "__kmalloc", ++ "kmalloc", ++ "kmalloc_large", ++ "kmalloc_node", ++ "kmalloc_order", ++ "kmalloc_order_trace", ++ "kmalloc_slab", ++ "kzalloc", ++ "kzalloc_node", ++}; ++ ++static struct plugin_info kallocstat_plugin_info = { ++ .version = "201111150100", ++}; ++ ++static unsigned int execute_kallocstat(void); ++ ++static struct gimple_opt_pass kallocstat_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kallocstat", ++ .gate = NULL, ++ .execute = execute_kallocstat, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static bool is_kalloc(const char *fnname) ++{ ++ size_t i; ++ ++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++) ++ if (!strcmp(fnname, kalloc_functions[i])) ++ return true; ++ return false; ++} ++ ++static unsigned int execute_kallocstat(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: ++ tree fndecl, size; ++ gimple call_stmt; ++ const char *fnname; ++ ++ // is it a call ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fndecl = gimple_call_fndecl(call_stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (TREE_CODE(fndecl) != FUNCTION_DECL) ++ continue; ++ ++ // is it a call to k*alloc ++ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl)); ++ if (!is_kalloc(fnname)) ++ continue; ++ ++ // is the size arg the result of a simple const assignment ++ size = gimple_call_arg(call_stmt, 0); ++ while (true) { ++ gimple def_stmt; ++ expanded_location xloc; ++ size_t size_val; ++ ++ if (TREE_CODE(size) != SSA_NAME) ++ break; ++ def_stmt = SSA_NAME_DEF_STMT(size); ++ if (!def_stmt || !is_gimple_assign(def_stmt)) ++ break; ++ if (gimple_num_ops(def_stmt) != 2) ++ break; ++ size = gimple_assign_rhs1(def_stmt); ++ if (!TREE_CONSTANT(size)) ++ continue; ++ xloc = expand_location(gimple_location(def_stmt)); ++ if (!xloc.file) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ size_val = TREE_INT_CST_LOW(size); ++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line); ++ break; ++ } ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_node(stderr, "pax", fndecl, 4); ++ } ++ } ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info kallocstat_pass_info = { ++ .pass = &kallocstat_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info); ++ ++ return 0; ++} +--- tools/gcc/kernexec_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/kernexec_plugin.c 2012-10-15 17:30:59.831924531 +0000 +@@ -0,0 +1,427 @@ ++/* ++ * Copyright 2011 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386 ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info kernexec_plugin_info = { ++ .version = "201111291120", ++ .help = "method=[bts|or]\tinstrumentation method\n" ++}; ++ ++static unsigned int execute_kernexec_reload(void); ++static unsigned int execute_kernexec_fptr(void); ++static unsigned int execute_kernexec_retaddr(void); ++static bool kernexec_cmodel_check(void); ++ ++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *); ++static void (*kernexec_instrument_retaddr)(rtx); ++ ++static struct gimple_opt_pass kernexec_reload_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_reload", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_reload, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct gimple_opt_pass kernexec_fptr_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_fptr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_fptr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct rtl_opt_pass kernexec_retaddr_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "kernexec_retaddr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_retaddr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect ++ } ++}; ++ ++static bool kernexec_cmodel_check(void) ++{ ++ tree section; ++ ++ if (ix86_cmodel != CM_KERNEL) ++ return false; ++ ++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); ++ if (!section || !TREE_VALUE(section)) ++ return true; ++ ++ section = TREE_VALUE(TREE_VALUE(section)); ++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10)) ++ return true; ++ ++ return false; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered ++ */ ++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_movabs_stmt; ++ ++ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : ); ++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL); ++ gimple_asm_set_volatile(asm_movabs_stmt, true); ++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(asm_movabs_stmt); ++} ++ ++/* ++ * find all asm() stmts that clobber r10 and add a reload of r10 ++ */ ++static unsigned int execute_kernexec_reload(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: __asm__ ("" : : : "r10"); ++ gimple asm_stmt; ++ size_t nclobbers; ++ ++ // is it an asm ... ++ asm_stmt = gsi_stmt(gsi); ++ if (gimple_code(asm_stmt) != GIMPLE_ASM) ++ continue; ++ ++ // ... clobbering r10 ++ nclobbers = gimple_asm_nclobbers(asm_stmt); ++ while (nclobbers--) { ++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); ++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10")) ++ continue; ++ kernexec_reload_fptr_mask(&gsi); ++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO); ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce ++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference ++ */ ++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) ++{ ++ gimple assign_intptr, assign_new_fptr, call_stmt; ++ tree intptr, old_fptr, new_fptr, kernexec_mask; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary unsigned long variable used for bitops and cast fptr to it ++ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); ++ add_referenced_var(intptr); ++ mark_sym_for_renaming(intptr); ++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // apply logical or to temporary unsigned long and bitmask ++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); ++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); ++ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // cast temporary unsigned long back to a temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); ++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); ++ update_stmt(assign_new_fptr); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_or_stmt, call_stmt; ++ tree old_fptr, new_fptr, input, output; ++ VEC(tree, gc) *inputs = NULL; ++ VEC(tree, gc) *outputs = NULL; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ ++ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); ++ input = build_tree_list(NULL_TREE, build_string(2, "0")); ++ input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); ++ output = build_tree_list(NULL_TREE, build_string(3, "=r")); ++ output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); ++ VEC_safe_push(tree, gc, inputs, input); ++ VEC_safe_push(tree, gc, outputs, output); ++ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); ++ gimple_asm_set_volatile(asm_or_stmt, true); ++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); ++ update_stmt(asm_or_stmt); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++/* ++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer ++ */ ++static unsigned int execute_kernexec_fptr(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); ++ tree fn; ++ gimple call_stmt; ++ ++ // is it a call ... ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fn = gimple_call_fn(call_stmt); ++ if (TREE_CODE(fn) == ADDR_EXPR) ++ continue; ++ if (TREE_CODE(fn) != SSA_NAME) ++ gcc_unreachable(); ++ ++ // ... through a function pointer ++ fn = SSA_NAME_VAR(fn); ++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != POINTER_TYPE) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != FUNCTION_TYPE) ++ continue; ++ ++ kernexec_instrument_fptr(&gsi); ++ ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++ } ++ } ++ ++ return 0; ++} ++ ++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn ++static void kernexec_instrument_retaddr_bts(rtx insn) ++{ ++ rtx btsq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("btsq $63,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(btsq) = 1; ++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(btsq, insn); ++} ++ ++// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn ++static void kernexec_instrument_retaddr_or(rtx insn) ++{ ++ rtx orq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("orq %%r10,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(orq) = 1; ++// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(orq, insn); ++} ++ ++/* ++ * find all asm level function returns and forcibly set the highest bit of the return address ++ */ ++static unsigned int execute_kernexec_retaddr(void) ++{ ++ rtx insn; ++ ++ // 1. find function returns ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) ++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) ++ rtx body; ++ ++ // is it a retn ++ if (!JUMP_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) == PARALLEL) ++ body = XVECEXP(body, 0, 0); ++ if (GET_CODE(body) != RETURN) ++ continue; ++ kernexec_instrument_retaddr(insn); ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info kernexec_reload_pass_info = { ++ .pass = &kernexec_reload_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_fptr_pass_info = { ++ .pass = &kernexec_fptr_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_retaddr_pass_info = { ++ .pass = &kernexec_retaddr_pass.pass, ++ .reference_pass_name = "pro_and_epilogue", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "method")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ if (!strcmp(argv[i].value, "bts")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_bts; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; ++ } else if (!strcmp(argv[i].value, "or")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_or; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or; ++ fix_register("r10", 1, 1); ++ } else ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) ++ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); ++ ++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or) ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); ++ ++ return 0; ++} +--- tools/gcc/latent_entropy_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/latent_entropy_plugin.c 2012-10-15 17:30:59.831924531 +0000 +@@ -0,0 +1,295 @@ ++/* ++ * Copyright 2012 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help generate a little bit of entropy from program state, ++ * used during boot in the kernel ++ * ++ * TODO: ++ * - add ipa pass to identify not explicitly marked candidate functions ++ * - mix in more program state (function arguments/return values, loop variables, etc) ++ * - more instrumentation control via attribute parameters ++ * ++ * BUGS: ++ * - LTO needs -flto-partition=none for now ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++int plugin_is_GPL_compatible; ++ ++static tree latent_entropy_decl; ++ ++static struct plugin_info latent_entropy_plugin_info = { ++ .version = "201207271820", ++ .help = NULL ++}; ++ ++static unsigned int execute_latent_entropy(void); ++static bool gate_latent_entropy(void); ++ ++static struct gimple_opt_pass latent_entropy_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "latent_entropy", ++ .gate = gate_latent_entropy, ++ .execute = execute_latent_entropy, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa ++ } ++}; ++ ++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ if (TREE_CODE(*node) != FUNCTION_DECL) { ++ *no_add_attrs = true; ++ error("%qE attribute only applies to functions", name); ++ } ++ return NULL_TREE; ++} ++ ++static struct attribute_spec latent_entropy_attr = { ++ .name = "latent_entropy", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_latent_entropy_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&latent_entropy_attr); ++} ++ ++static bool gate_latent_entropy(void) ++{ ++ tree latent_entropy_attr; ++ ++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl)); ++ return latent_entropy_attr != NULL_TREE; ++} ++ ++static unsigned HOST_WIDE_INT seed; ++static unsigned HOST_WIDE_INT get_random_const(void) ++{ ++ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL); ++ return seed; ++} ++ ++static enum tree_code get_op(tree *rhs) ++{ ++ static enum tree_code op; ++ unsigned HOST_WIDE_INT random_const; ++ ++ random_const = get_random_const(); ++ ++ switch (op) { ++ case BIT_XOR_EXPR: ++ op = PLUS_EXPR; ++ break; ++ ++ case PLUS_EXPR: ++ if (rhs) { ++ op = LROTATE_EXPR; ++ random_const &= HOST_BITS_PER_WIDE_INT - 1; ++ break; ++ } ++ ++ case LROTATE_EXPR: ++ default: ++ op = BIT_XOR_EXPR; ++ break; ++ } ++ if (rhs) ++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const); ++ return op; ++} ++ ++static void perturb_local_entropy(basic_block bb, tree local_entropy) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ tree addxorrol, rhs; ++ enum tree_code op; ++ ++ op = get_op(&rhs); ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs); ++ assign = gimple_build_assign(local_entropy, addxorrol); ++ find_referenced_vars_in(assign); ++//debug_bb(bb); ++ gsi = gsi_after_labels(bb); ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++} ++ ++static void perturb_latent_entropy(basic_block bb, tree rhs) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ tree addxorrol, temp; ++ ++ // 1. create temporary copy of latent_entropy ++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy"); ++ add_referenced_var(temp); ++ mark_sym_for_renaming(temp); ++ ++ // 2. read... ++ assign = gimple_build_assign(temp, latent_entropy_decl); ++ find_referenced_vars_in(assign); ++ gsi = gsi_after_labels(bb); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ ++ // 3. ...modify... ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs); ++ assign = gimple_build_assign(temp, addxorrol); ++ find_referenced_vars_in(assign); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ ++ // 4. ...write latent_entropy ++ assign = gimple_build_assign(latent_entropy_decl, temp); ++ find_referenced_vars_in(assign); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++} ++ ++static unsigned int execute_latent_entropy(void) ++{ ++ basic_block bb; ++ gimple assign; ++ gimple_stmt_iterator gsi; ++ tree local_entropy; ++ ++ if (!latent_entropy_decl) { ++ struct varpool_node *node; ++ ++ for (node = varpool_nodes; node; node = node->next) { ++ tree var = node->decl; ++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy")) ++ continue; ++ latent_entropy_decl = var; ++// debug_tree(var); ++ break; ++ } ++ if (!latent_entropy_decl) { ++// debug_tree(current_function_decl); ++ return 0; ++ } ++ } ++ ++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl))); ++ ++ // 1. create local entropy variable ++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy"); ++ add_referenced_var(local_entropy); ++ mark_sym_for_renaming(local_entropy); ++ ++ // 2. initialize local entropy variable ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); ++ gsi = gsi_start_bb(bb); ++ ++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const())); ++// gimple_set_location(assign, loc); ++ find_referenced_vars_in(assign); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ bb = bb->next_bb; ++ ++ // 3. instrument each BB with an operation on the local entropy variable ++ while (bb != EXIT_BLOCK_PTR) { ++ perturb_local_entropy(bb, local_entropy); ++ bb = bb->next_bb; ++ }; ++ ++ // 4. mix local entropy into the global entropy variable ++ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy); ++ return 0; ++} ++ ++static void start_unit_callback(void *gcc_data, void *user_data) ++{ ++#if BUILDING_GCC_VERSION >= 4007 ++ seed = get_random_seed(false); ++#else ++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed); ++ seed *= seed; ++#endif ++ ++ if (in_lto_p) ++ return; ++ ++ // extern u64 latent_entropy ++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node); ++ ++ TREE_STATIC(latent_entropy_decl) = 1; ++ TREE_PUBLIC(latent_entropy_decl) = 1; ++ TREE_USED(latent_entropy_decl) = 1; ++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1; ++ DECL_EXTERNAL(latent_entropy_decl) = 1; ++ DECL_ARTIFICIAL(latent_entropy_decl) = 0; ++ DECL_INITIAL(latent_entropy_decl) = NULL; ++// DECL_ASSEMBLER_NAME(latent_entropy_decl); ++// varpool_finalize_decl(latent_entropy_decl); ++// varpool_mark_needed_node(latent_entropy_decl); ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info latent_entropy_pass_info = { ++ .pass = &latent_entropy_pass.pass, ++ .reference_pass_name = "optimized", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info); ++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info); ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +--- tools/gcc/size_overflow_hash.data 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/size_overflow_hash.data 2012-10-15 17:30:59.834924531 +0000 +@@ -0,0 +1,3597 @@ ++_000001_hash alloc_dr 2 65495 _000001_hash NULL ++_000002_hash __copy_from_user 3 10918 _000002_hash NULL ++_000003_hash copy_from_user 3 17559 _000003_hash NULL ++_000004_hash __copy_from_user_inatomic 3 4365 _000004_hash NULL ++_000005_hash __copy_from_user_nocache 3 39351 _000005_hash NULL ++_000006_hash __copy_to_user_inatomic 3 19214 _000006_hash NULL ++_000007_hash do_xip_mapping_read 5 60297 _000007_hash NULL ++_000008_hash hugetlbfs_read 3 11268 _000008_hash NULL ++_000009_hash kmalloc 1 60432 _003302_hash NULL nohasharray ++_000010_hash kmalloc_array 1-2 9444 _000010_hash NULL ++_000012_hash __kmalloc_reserve 1 17080 _000012_hash NULL ++_000013_hash kmalloc_slab 1 11917 _000013_hash NULL ++_000014_hash kmemdup 2 64015 _000014_hash NULL ++_000015_hash __krealloc 2 14857 _000340_hash NULL nohasharray ++_000016_hash memdup_user 2 59590 _000016_hash NULL ++_000017_hash module_alloc 1 63630 _000017_hash NULL ++_000018_hash read_default_ldt 2 14302 _000018_hash NULL ++_000019_hash read_kcore 3 63488 _000019_hash NULL ++_000020_hash read_ldt 2 47570 _000020_hash NULL ++_000021_hash read_zero 3 19366 _000021_hash NULL ++_000022_hash __vmalloc_node 1 39308 _000022_hash NULL ++_000023_hash aac_convert_sgraw2 4 51598 _000023_hash NULL ++_000024_hash aa_simple_write_to_buffer 4-3 49683 _000024_hash NULL ++_000025_hash ablkcipher_copy_iv 3 64140 _000025_hash NULL ++_000026_hash ablkcipher_next_slow 3-4 47274 _000026_hash NULL ++_000028_hash acpi_battery_write_alarm 3 1240 _000028_hash NULL ++_000029_hash acpi_os_allocate 1 14892 _000029_hash NULL ++_000030_hash acpi_system_write_wakeup_device 3 34853 _000030_hash NULL ++_000031_hash adu_write 3 30487 _000031_hash NULL ++_000032_hash aer_inject_write 3 52399 _000032_hash NULL ++_000033_hash afs_alloc_flat_call 2-3 36399 _000033_hash NULL ++_000035_hash afs_proc_cells_write 3 61139 _000035_hash NULL ++_000036_hash afs_proc_rootcell_write 3 15822 _000036_hash NULL ++_000037_hash agp_3_5_isochronous_node_enable 3 49465 _000037_hash NULL ++_000038_hash agp_alloc_page_array 1 22554 _000038_hash NULL ++_000039_hash ah_alloc_tmp 2-3 54378 _000039_hash NULL ++_000041_hash ahash_setkey_unaligned 3 33521 _000041_hash NULL ++_000042_hash alg_setkey 3 31485 _000042_hash NULL ++_000043_hash aligned_kmalloc 1 3628 _000043_hash NULL ++_000044_hash alloc_context 1 3194 _000044_hash NULL ++_000045_hash alloc_ep_req 2 54860 _000045_hash NULL ++_000046_hash alloc_fdmem 1 27083 _000046_hash NULL ++_000047_hash alloc_flex_gd 1 57259 _000047_hash NULL ++_000048_hash alloc_sglist 1-3-2 22960 _000048_hash NULL ++_000049_hash __alloc_skb 1 23940 _000049_hash NULL ++_000050_hash aoedev_flush 2 44398 _000050_hash NULL ++_000051_hash append_to_buffer 3 63550 _000051_hash NULL ++_000052_hash asix_read_cmd 5 13245 _000052_hash NULL ++_000053_hash asix_write_cmd 5 58192 _000053_hash NULL ++_000054_hash at76_set_card_command 4 4471 _000054_hash NULL ++_000055_hash ath6kl_add_bss_if_needed 6 24317 _000055_hash NULL ++_000056_hash ath6kl_debug_roam_tbl_event 3 5224 _000056_hash NULL ++_000057_hash ath6kl_mgmt_powersave_ap 6 13791 _000057_hash NULL ++_000058_hash ath6kl_send_go_probe_resp 3 21113 _000058_hash NULL ++_000059_hash ath6kl_set_ap_probe_resp_ies 3 50539 _000059_hash NULL ++_000060_hash ath6kl_set_assoc_req_ies 3 43185 _000060_hash NULL ++_000061_hash ath6kl_wmi_bssinfo_event_rx 3 2275 _000061_hash NULL ++_000062_hash ath6kl_wmi_send_action_cmd 7 58860 _000062_hash NULL ++_000063_hash __ath6kl_wmi_send_mgmt_cmd 7 38971 _000063_hash NULL ++_000064_hash attach_hdlc_protocol 3 19986 _000064_hash NULL ++_000065_hash audio_write 4 54261 _001782_hash NULL nohasharray ++_000066_hash audit_unpack_string 3 13748 _000066_hash NULL ++_000067_hash ax25_setsockopt 5 42740 _000067_hash NULL ++_000068_hash b43_debugfs_write 3 34838 _000068_hash NULL ++_000069_hash b43legacy_debugfs_write 3 28556 _000069_hash NULL ++_000070_hash batadv_hash_new 1 40491 _000070_hash NULL ++_000071_hash batadv_orig_node_add_if 2 18433 _000071_hash NULL ++_000072_hash batadv_orig_node_del_if 2 4 _000072_hash NULL ++_000073_hash batadv_tt_changes_fill_buff 4 40323 _000073_hash NULL ++_000074_hash batadv_tt_realloc_packet_buff 4 49960 _000074_hash NULL ++_000075_hash bch_alloc 1 4593 _000075_hash NULL ++_000076_hash befs_nls2utf 3 17163 _000076_hash NULL ++_000077_hash befs_utf2nls 3 25628 _000077_hash NULL ++_000078_hash bfad_debugfs_write_regrd 3 15218 _000078_hash NULL ++_000079_hash bfad_debugfs_write_regwr 3 61841 _000079_hash NULL ++_000080_hash bio_alloc_map_data 1-2 50782 _000080_hash NULL ++_000082_hash bio_kmalloc 2 54672 _000082_hash NULL ++_000083_hash bitmap_storage_alloc 2 55077 _000083_hash NULL ++_000084_hash blkcipher_copy_iv 3 24075 _000084_hash NULL ++_000085_hash blkcipher_next_slow 3-4 52733 _000085_hash NULL ++_000087_hash bl_pipe_downcall 3 34264 _000087_hash NULL ++_000088_hash bnad_debugfs_write_regrd 3 6706 _000088_hash NULL ++_000089_hash bnad_debugfs_write_regwr 3 57500 _000089_hash NULL ++_000090_hash bnx2fc_cmd_mgr_alloc 2-3 24873 _000090_hash NULL ++_000092_hash bnx2fc_process_unsol_compl 2 15576 _000092_hash NULL ++_000093_hash bnx2_nvram_write 2-4 7790 _000093_hash NULL ++_000095_hash btmrvl_gpiogap_write 3 35053 _000095_hash NULL ++_000096_hash btmrvl_hscfgcmd_write 3 27143 _000096_hash NULL ++_000097_hash btmrvl_hscmd_write 3 27089 _000097_hash NULL ++_000098_hash btmrvl_hsmode_write 3 42252 _000098_hash NULL ++_000099_hash btmrvl_pscmd_write 3 29504 _000099_hash NULL ++_000100_hash btmrvl_psmode_write 3 3703 _000100_hash NULL ++_000101_hash btrfs_alloc_delayed_item 1 11678 _000101_hash NULL ++_000102_hash c4iw_id_table_alloc 3 48163 _000102_hash NULL ++_000103_hash cache_do_downcall 3 6926 _000103_hash NULL ++_000104_hash cachefiles_cook_key 2 33274 _000104_hash NULL ++_000105_hash cachefiles_daemon_write 3 43535 _000105_hash NULL ++_000106_hash capi_write 3 35104 _003607_hash NULL nohasharray ++_000107_hash carl9170_debugfs_write 3 50857 _000107_hash NULL ++_000108_hash cciss_allocate_sg_chain_blocks 2-3 5368 _000108_hash NULL ++_000110_hash cciss_proc_write 3 10259 _000110_hash NULL ++_000111_hash cdrom_read_cdda_old 4 27664 _000111_hash NULL ++_000112_hash ceph_alloc_page_vector 1 18710 _000112_hash NULL ++_000113_hash ceph_buffer_new 1 35974 _000113_hash NULL ++_000114_hash ceph_copy_user_to_page_vector 3-4 656 _000114_hash NULL ++_000116_hash ceph_get_direct_page_vector 2 41917 _000116_hash NULL ++_000117_hash ceph_msg_new 2 5846 _000117_hash NULL ++_000118_hash ceph_setxattr 4 18913 _000118_hash NULL ++_000119_hash cfi_read_pri 3 24366 _000119_hash NULL ++_000120_hash cgroup_write_string 5 10900 _000120_hash NULL ++_000121_hash cgroup_write_X64 5 54514 _000121_hash NULL ++_000122_hash change_xattr 5 61390 _000122_hash NULL ++_000123_hash check_load_and_stores 2 2143 _000123_hash NULL ++_000124_hash cifs_idmap_key_instantiate 3 54503 _000124_hash NULL ++_000125_hash cifs_security_flags_proc_write 3 5484 _000125_hash NULL ++_000126_hash cifs_setxattr 4 23957 _000126_hash NULL ++_000127_hash cifs_spnego_key_instantiate 3 23588 _000127_hash NULL ++_000128_hash cld_pipe_downcall 3 15058 _000128_hash NULL ++_000129_hash clear_refs_write 3 61904 _000129_hash NULL ++_000130_hash clusterip_proc_write 3 44729 _000130_hash NULL ++_000131_hash cm4040_write 3 58079 _000131_hash NULL ++_000132_hash cm_copy_private_data 2 3649 _000132_hash NULL ++_000133_hash cmm_write 3 2896 _000133_hash NULL ++_000134_hash cm_write 3 36858 _000134_hash NULL ++_000135_hash coda_psdev_write 3 1711 _000135_hash NULL ++_000136_hash codec_reg_read_file 3 36280 _000136_hash NULL ++_000137_hash command_file_write 3 31318 _000137_hash NULL ++_000138_hash command_write 3 58841 _000138_hash NULL ++_000139_hash comm_write 3 44537 _001714_hash NULL nohasharray ++_000140_hash concat_writev 3 21451 _000140_hash NULL ++_000141_hash copy_and_check 3 19089 _000141_hash NULL ++_000142_hash copy_from_user_toio 3 31966 _000142_hash NULL ++_000143_hash copy_items 6 50140 _000143_hash NULL ++_000144_hash copy_macs 4 45534 _000144_hash NULL ++_000145_hash __copy_to_user 3 17551 _000145_hash NULL ++_000146_hash copy_vm86_regs_from_user 3 45340 _000146_hash NULL ++_000147_hash core_sys_select 1 47494 _000147_hash NULL ++_000148_hash cosa_write 3 1774 _000148_hash NULL ++_000149_hash cp210x_set_config 4 46447 _000149_hash NULL ++_000150_hash create_entry 2 33479 _000150_hash NULL ++_000151_hash create_queues 2-3 9088 _000151_hash NULL ++_000153_hash create_xattr 5 54106 _000153_hash NULL ++_000154_hash create_xattr_datum 5 33356 _003443_hash NULL nohasharray ++_000155_hash csum_partial_copy_fromiovecend 3-4 9957 _000155_hash NULL ++_000157_hash ctrl_out 3-5 8712 _000157_hash NULL ++_000159_hash cxacru_cm_get_array 4 4412 _000159_hash NULL ++_000160_hash cxgbi_alloc_big_mem 1 4707 _000160_hash NULL ++_000161_hash dac960_user_command_proc_write 3 3071 _000161_hash NULL ++_000162_hash datablob_format 2 39571 _002490_hash NULL nohasharray ++_000163_hash dccp_feat_clone_sp_val 3 11942 _000163_hash NULL ++_000164_hash dccp_setsockopt_ccid 4 30701 _000164_hash NULL ++_000165_hash dccp_setsockopt_cscov 2 37766 _000165_hash NULL ++_000166_hash dccp_setsockopt_service 4 65336 _000166_hash NULL ++_000167_hash ddebug_proc_write 3 18055 _000167_hash NULL ++_000168_hash dev_config 3 8506 _000168_hash NULL ++_000169_hash device_write 3 45156 _000169_hash NULL ++_000170_hash devm_kzalloc 2 4966 _000170_hash NULL ++_000171_hash devres_alloc 2 551 _000171_hash NULL ++_000172_hash dfs_file_write 3 41196 _000172_hash NULL ++_000173_hash direct_entry 3 38836 _000173_hash NULL ++_000174_hash dispatch_ioctl 2 32357 _000174_hash NULL ++_000175_hash dispatch_proc_write 3 44320 _000175_hash NULL ++_000176_hash diva_os_copy_from_user 4 7792 _000176_hash NULL ++_000177_hash dlm_alloc_pagevec 1 54296 _000177_hash NULL ++_000178_hash dlmfs_file_read 3 28385 _000178_hash NULL ++_000179_hash dlmfs_file_write 3 6892 _000179_hash NULL ++_000180_hash dm_read 3 15674 _000180_hash NULL ++_000181_hash dm_write 3 2513 _000181_hash NULL ++_000182_hash __dn_setsockopt 5 13060 _000182_hash NULL ++_000183_hash dns_query 3 9676 _000183_hash NULL ++_000184_hash dns_resolver_instantiate 3 63314 _000184_hash NULL ++_000185_hash do_add_counters 3 3992 _000185_hash NULL ++_000186_hash __do_config_autodelink 3 58763 _000186_hash NULL ++_000187_hash do_ip_setsockopt 5 41852 _000187_hash NULL ++_000188_hash do_ipv6_setsockopt 5 18215 _000188_hash NULL ++_000189_hash do_ip_vs_set_ctl 4 48641 _000189_hash NULL ++_000190_hash do_kimage_alloc 3 64827 _000190_hash NULL ++_000191_hash do_register_entry 4 29478 _000191_hash NULL ++_000192_hash do_tty_write 5 44896 _000192_hash NULL ++_000193_hash do_update_counters 4 2259 _000193_hash NULL ++_000194_hash dsp_write 2 46218 _000194_hash NULL ++_000195_hash dup_to_netobj 3 26363 _000195_hash NULL ++_000196_hash dwc3_link_state_write 3 12641 _000196_hash NULL ++_000197_hash dwc3_mode_write 3 51997 _000197_hash NULL ++_000198_hash dwc3_testmode_write 3 30516 _000198_hash NULL ++_000199_hash ecryptfs_copy_filename 4 11868 _000199_hash NULL ++_000200_hash ecryptfs_miscdev_write 3 26847 _000200_hash NULL ++_000201_hash ecryptfs_send_miscdev 2 64816 _000201_hash NULL ++_000202_hash efx_tsoh_heap_alloc 2 58545 _000202_hash NULL ++_000203_hash emi26_writememory 4 57908 _000203_hash NULL ++_000204_hash emi62_writememory 4 29731 _000204_hash NULL ++_000205_hash encrypted_instantiate 3 3168 _000205_hash NULL ++_000206_hash encrypted_update 3 13414 _000206_hash NULL ++_000207_hash ep0_write 3 14536 _001422_hash NULL nohasharray ++_000208_hash ep_read 3 58813 _000208_hash NULL ++_000209_hash ep_write 3 59008 _000209_hash NULL ++_000210_hash erst_dbg_write 3 46715 _000210_hash NULL ++_000211_hash esp_alloc_tmp 2-3 40558 _000211_hash NULL ++_000213_hash evdev_do_ioctl 2 24459 _000213_hash NULL ++_000214_hash exofs_read_lookup_dev_table 3 17733 _000214_hash NULL ++_000215_hash ext4_kvmalloc 1 14796 _000215_hash NULL ++_000216_hash ezusb_writememory 4 45976 _000216_hash NULL ++_000217_hash fanotify_write 3 64623 _000217_hash NULL ++_000218_hash fd_copyin 3 56247 _000218_hash NULL ++_000219_hash ffs_epfile_io 3 64886 _000219_hash NULL ++_000220_hash ffs_prepare_buffer 2 59892 _000220_hash NULL ++_000221_hash f_hidg_write 3 7932 _000221_hash NULL ++_000222_hash file_read_actor 4 1401 _000222_hash NULL ++_000223_hash fill_write_buffer 3 3142 _000223_hash NULL ++_000224_hash __find_xattr 6 2117 _002425_hash NULL nohasharray ++_000225_hash fl_create 5 56435 _000225_hash NULL ++_000226_hash fs_path_ensure_buf 2 59445 _000226_hash NULL ++_000227_hash ftdi_elan_write 3 57309 _000227_hash NULL ++_000228_hash fw_iso_buffer_alloc 2 13704 _000228_hash NULL ++_000229_hash garmin_write_bulk 3 58191 _000229_hash NULL ++_000230_hash garp_attr_create 3 3883 _000230_hash NULL ++_000231_hash get_arg 3 5694 _000231_hash NULL ++_000232_hash getdqbuf 1 62908 _000232_hash NULL ++_000233_hash get_fdb_entries 3 41916 _000233_hash NULL ++_000234_hash get_fd_set 1 3866 _000234_hash NULL ++_000235_hash get_indirect_ea 4 51869 _000235_hash NULL ++_000236_hash get_registers 3 26187 _000236_hash NULL ++_000237_hash get_scq 2 10897 _000237_hash NULL ++_000238_hash get_server_iovec 2 16804 _000238_hash NULL ++_000239_hash get_ucode_user 3 38202 _000239_hash NULL ++_000240_hash get_user_cpu_mask 2 14861 _000240_hash NULL ++_000241_hash gfs2_alloc_sort_buffer 1 18275 _000241_hash NULL ++_000242_hash gfs2_glock_nq_m 1 20347 _000242_hash NULL ++_000243_hash gigaset_initcs 2 43753 _000243_hash NULL ++_000244_hash gigaset_initdriver 2 1060 _000244_hash NULL ++_000245_hash groups_alloc 1 7614 _000245_hash NULL ++_000246_hash gs_alloc_req 2 58883 _000246_hash NULL ++_000247_hash gs_buf_alloc 2 25067 _000247_hash NULL ++_000248_hash gsm_data_alloc 3 42437 _000248_hash NULL ++_000249_hash gss_pipe_downcall 3 23182 _000249_hash NULL ++_000250_hash handle_request 9 10024 _000250_hash NULL ++_000251_hash hashtab_create 3 33769 _000251_hash NULL ++_000252_hash hcd_buffer_alloc 2 27495 _000252_hash NULL ++_000253_hash hci_sock_setsockopt 5 28993 _000253_hash NULL ++_000254_hash heap_init 2 49617 _000254_hash NULL ++_000255_hash hest_ghes_dev_register 1 46766 _000255_hash NULL ++_000256_hash hidg_alloc_ep_req 2 10159 _000256_hash NULL ++_000257_hash hid_parse_report 3 51737 _000257_hash NULL ++_000258_hash hidraw_get_report 3 45609 _000258_hash NULL ++_000259_hash hidraw_report_event 3 20503 _000259_hash NULL ++_000260_hash hidraw_send_report 3 23449 _000260_hash NULL ++_000261_hash hpfs_translate_name 3 41497 _000261_hash NULL ++_000262_hash hysdn_conf_write 3 52145 _000262_hash NULL ++_000263_hash __i2400mu_send_barker 3 23652 _000263_hash NULL ++_000264_hash i2cdev_read 3 1206 _000264_hash NULL ++_000265_hash i2cdev_write 3 23310 _000265_hash NULL ++_000266_hash i2o_parm_field_get 5 34477 _000266_hash NULL ++_000267_hash i2o_parm_table_get 6 61635 _000267_hash NULL ++_000268_hash ib_copy_from_udata 3 59502 _000268_hash NULL ++_000269_hash ib_ucm_alloc_data 3 36885 _000269_hash NULL ++_000270_hash ib_umad_write 3 47993 _000270_hash NULL ++_000271_hash ib_uverbs_unmarshall_recv 5 12251 _000271_hash NULL ++_000272_hash icn_writecmd 2 38629 _000272_hash NULL ++_000273_hash ide_driver_proc_write 3 32493 _000273_hash NULL ++_000274_hash ide_settings_proc_write 3 35110 _000274_hash NULL ++_000275_hash idetape_chrdev_write 3 53976 _000275_hash NULL ++_000276_hash idmap_pipe_downcall 3 14591 _000276_hash NULL ++_000277_hash ieee80211_build_probe_req 7-5 27660 _000277_hash NULL ++_000278_hash ieee80211_if_write 3 34894 _000278_hash NULL ++_000279_hash if_write 3 51756 _000279_hash NULL ++_000280_hash ilo_write 3 64378 _000280_hash NULL ++_000281_hash ima_write_policy 3 40548 _000281_hash NULL ++_000282_hash init_data_container 1 60709 _000282_hash NULL ++_000283_hash init_send_hfcd 1 34586 _000283_hash NULL ++_000284_hash insert_dent 7 65034 _000284_hash NULL ++_000285_hash interpret_user_input 2 19393 _000285_hash NULL ++_000286_hash int_proc_write 3 39542 _000286_hash NULL ++_000287_hash ioctl_private_iw_point 7 1273 _000287_hash NULL ++_000288_hash iov_iter_copy_from_user 4 31942 _000288_hash NULL ++_000289_hash iov_iter_copy_from_user_atomic 4 56368 _000289_hash NULL ++_000290_hash iowarrior_write 3 18604 _000290_hash NULL ++_000291_hash ipc_alloc 1 1192 _000291_hash NULL ++_000292_hash ipc_rcu_alloc 1 21208 _000292_hash NULL ++_000293_hash ip_options_get_from_user 4 64958 _000293_hash NULL ++_000294_hash ipv6_renew_option 3 38813 _000294_hash NULL ++_000295_hash ip_vs_conn_fill_param_sync 6 29771 _001898_hash NULL nohasharray ++_000296_hash ip_vs_create_timeout_table 2 64478 _000296_hash NULL ++_000297_hash ipw_queue_tx_init 3 49161 _000297_hash NULL ++_000298_hash irda_setsockopt 5 19824 _000298_hash NULL ++_000299_hash irias_new_octseq_value 2 13596 _003821_hash NULL nohasharray ++_000300_hash irnet_ctrl_write 3 24139 _000300_hash NULL ++_000301_hash isdn_add_channels 3 40905 _000301_hash NULL ++_000302_hash isdn_ppp_fill_rq 2 41428 _000302_hash NULL ++_000303_hash isdn_ppp_write 4 29109 _000303_hash NULL ++_000304_hash isdn_read 3 50021 _000304_hash NULL ++_000305_hash isdn_v110_open 3 2418 _000305_hash NULL ++_000306_hash isdn_writebuf_stub 4 52383 _000306_hash NULL ++_000307_hash islpci_mgt_transmit 5 34133 _000307_hash NULL ++_000308_hash iso_callback 3 43208 _000308_hash NULL ++_000309_hash iso_packets_buffer_init 3-4 29061 _000309_hash NULL ++_000310_hash it821x_firmware_command 3 8628 _000310_hash NULL ++_000311_hash iwch_alloc_fastreg_pbl 2 40153 _000311_hash NULL ++_000312_hash iwl_calib_set 3 34400 _003754_hash NULL nohasharray ++_000313_hash jbd2_journal_init_revoke_table 1 36336 _000313_hash NULL ++_000314_hash jffs2_alloc_full_dirent 1 60179 _001158_hash NULL nohasharray ++_000315_hash journal_init_revoke_table 1 56331 _000315_hash NULL ++_000316_hash kcalloc 1-2 27770 _000316_hash NULL ++_000318_hash keyctl_instantiate_key_common 4 47889 _000318_hash NULL ++_000319_hash keyctl_update_key 3 26061 _000319_hash NULL ++_000320_hash __kfifo_alloc 2-3 22173 _000320_hash NULL ++_000322_hash kfifo_copy_from_user 3 5091 _000322_hash NULL ++_000323_hash kmalloc_node 1 50163 _003818_hash NULL nohasharray ++_000324_hash kmalloc_parameter 1 65279 _000324_hash NULL ++_000325_hash kmem_alloc 1 31920 _000325_hash NULL ++_000326_hash kobj_map 2-3 9566 _000326_hash NULL ++_000328_hash kone_receive 4 4690 _000328_hash NULL ++_000329_hash kone_send 4 63435 _000329_hash NULL ++_000330_hash krealloc 2 14908 _000330_hash NULL ++_000331_hash kvmalloc 1 32646 _000331_hash NULL ++_000332_hash kvm_read_guest_atomic 4 10765 _000332_hash NULL ++_000333_hash kvm_read_guest_cached 4 39666 _000333_hash NULL ++_000334_hash kvm_read_guest_page 5 18074 _000334_hash NULL ++_000335_hash kzalloc 1 54740 _000335_hash NULL ++_000336_hash l2cap_sock_setsockopt 5 50207 _000336_hash NULL ++_000337_hash l2cap_sock_setsockopt_old 4 29346 _000337_hash NULL ++_000338_hash lane2_associate_req 4 45398 _000338_hash NULL ++_000339_hash lbs_debugfs_write 3 48413 _000339_hash NULL ++_000340_hash lcd_write 3 14857 _000340_hash &_000015_hash ++_000341_hash ldm_frag_add 2 5611 _000341_hash NULL ++_000342_hash __lgread 4 31668 _000342_hash NULL ++_000343_hash libipw_alloc_txb 1-3-2 27579 _000343_hash NULL ++_000344_hash link_send_sections_long 4 46556 _000344_hash NULL ++_000345_hash listxattr 3 12769 _000345_hash NULL ++_000346_hash load_msg 2 95 _000346_hash NULL ++_000347_hash lpfc_debugfs_dif_err_write 3 17424 _000347_hash NULL ++_000348_hash lp_write 3 9511 _000348_hash NULL ++_000349_hash mb_cache_create 2 17307 _000349_hash NULL ++_000350_hash mce_write 3 26201 _000350_hash NULL ++_000351_hash mcs7830_get_reg 3 33308 _000351_hash NULL ++_000352_hash mcs7830_set_reg 3 31413 _000352_hash NULL ++_000353_hash memcpy_fromiovec 3 55247 _000353_hash NULL ++_000354_hash memcpy_fromiovecend 3-4 2707 _000354_hash NULL ++_000356_hash mempool_resize 2 47983 _002039_hash NULL nohasharray ++_000357_hash mem_rw 3 22085 _000357_hash NULL ++_000358_hash mgmt_control 3 7349 _000358_hash NULL ++_000359_hash mgmt_pending_add 5 46976 _000359_hash NULL ++_000360_hash mlx4_ib_alloc_fast_reg_page_list 2 46119 _000360_hash NULL ++_000361_hash mmc_alloc_sg 1 21504 _000361_hash NULL ++_000362_hash mmc_send_bus_test 4 18285 _000362_hash NULL ++_000363_hash mmc_send_cxd_data 5 38655 _000363_hash NULL ++_000364_hash module_alloc_update_bounds 1 47205 _000364_hash NULL ++_000365_hash move_addr_to_kernel 2 32673 _000365_hash NULL ++_000366_hash mpi_alloc_limb_space 1 23190 _000366_hash NULL ++_000367_hash mpi_resize 2 44674 _000367_hash NULL ++_000368_hash mptctl_getiocinfo 2 28545 _000368_hash NULL ++_000369_hash mtdchar_readoob 4 31200 _000369_hash NULL ++_000370_hash mtdchar_write 3 56831 _002122_hash NULL nohasharray ++_000371_hash mtdchar_writeoob 4 3393 _000371_hash NULL ++_000372_hash mtd_device_parse_register 5 5024 _000372_hash NULL ++_000373_hash mtf_test_write 3 18844 _000373_hash NULL ++_000374_hash mthca_alloc_icm_table 3-4 38268 _002459_hash NULL nohasharray ++_000376_hash mthca_alloc_init 2 21754 _000376_hash NULL ++_000377_hash mthca_array_init 2 39987 _000377_hash NULL ++_000378_hash mthca_buf_alloc 2 35861 _000378_hash NULL ++_000379_hash mtrr_write 3 59622 _000379_hash NULL ++_000380_hash musb_test_mode_write 3 33518 _000380_hash NULL ++_000381_hash mwifiex_get_common_rates 3 17131 _000381_hash NULL ++_000382_hash __mxt_write_reg 3 57326 _000382_hash NULL ++_000383_hash nand_bch_init 2-3 16280 _001439_hash NULL nohasharray ++_000385_hash ncp_file_write 3 3813 _000385_hash NULL ++_000386_hash ncp__vol2io 5 4804 _000386_hash NULL ++_000387_hash nes_alloc_fast_reg_page_list 2 33523 _000387_hash NULL ++_000388_hash nfc_targets_found 3 29886 _000388_hash NULL ++_000389_hash __nf_ct_ext_add_length 3 12364 _000389_hash NULL ++_000390_hash nfs4_acl_new 1 49806 _000390_hash NULL ++_000391_hash nfs4_write_cached_acl 4 15070 _000391_hash NULL ++_000392_hash nfsd_symlink 6 63442 _000392_hash NULL ++_000393_hash nfs_idmap_get_desc 2-4 42990 _000393_hash NULL ++_000395_hash nfs_readdir_make_qstr 3 12509 _000395_hash NULL ++_000396_hash note_last_dentry 3 12285 _000396_hash NULL ++_000397_hash ntfs_copy_from_user 3-5 15072 _000397_hash NULL ++_000399_hash __ntfs_copy_from_user_iovec_inatomic 3-4 38153 _000399_hash NULL ++_000401_hash ntfs_ucstonls 3-5 23097 _000401_hash NULL ++_000403_hash nvme_alloc_iod 1 56027 _000403_hash NULL ++_000404_hash nvram_write 3 3894 _000404_hash NULL ++_000405_hash o2hb_debug_create 4 18744 _000405_hash NULL ++_000406_hash o2net_send_message_vec 4 879 _002013_hash NULL nohasharray ++_000407_hash ocfs2_control_cfu 2 37750 _000407_hash NULL ++_000408_hash oom_adjust_write 3 41116 _000408_hash NULL ++_000409_hash oom_score_adj_write 3 42594 _000409_hash NULL ++_000410_hash oprofilefs_ulong_from_user 3 57251 _000410_hash NULL ++_000411_hash opticon_write 4 60775 _000411_hash NULL ++_000412_hash p9_check_zc_errors 4 15534 _000412_hash NULL ++_000413_hash packet_buffer_init 2 1607 _000413_hash NULL ++_000414_hash packet_setsockopt 5 17662 _000414_hash NULL ++_000415_hash parse_command 2 37079 _000415_hash NULL ++_000416_hash pcbit_writecmd 2 12332 _000416_hash NULL ++_000417_hash pcmcia_replace_cis 3 57066 _000417_hash NULL ++_000418_hash pgctrl_write 3 50453 _000418_hash NULL ++_000419_hash pg_write 3 40766 _000419_hash NULL ++_000420_hash pidlist_allocate 1 64404 _000420_hash NULL ++_000421_hash pipe_iov_copy_from_user 3 23102 _000421_hash NULL ++_000422_hash pipe_iov_copy_to_user 3 3447 _000422_hash NULL ++_000423_hash pkt_add 3 39897 _000423_hash NULL ++_000424_hash pktgen_if_write 3 55628 _000424_hash NULL ++_000425_hash platform_device_add_data 3 310 _000425_hash NULL ++_000426_hash platform_device_add_resources 3 13289 _000426_hash NULL ++_000427_hash pmcraid_copy_sglist 3 38431 _000427_hash NULL ++_000428_hash pm_qos_power_write 3 52513 _000428_hash NULL ++_000429_hash pnpbios_proc_write 3 19758 _000429_hash NULL ++_000430_hash pool_allocate 3 42012 _000430_hash NULL ++_000431_hash posix_acl_alloc 1 48063 _000431_hash NULL ++_000432_hash ppp_cp_parse_cr 4 5214 _000432_hash NULL ++_000433_hash ppp_write 3 34034 _000433_hash NULL ++_000434_hash pp_read 3 33210 _000434_hash NULL ++_000435_hash pp_write 3 39554 _000435_hash NULL ++_000436_hash printer_req_alloc 2 62687 _000436_hash NULL ++_000437_hash printer_write 3 60276 _000437_hash NULL ++_000438_hash prism2_info_scanresults 3 59729 _000438_hash NULL ++_000439_hash prism2_set_genericelement 3 29277 _000439_hash NULL ++_000440_hash __probe_kernel_read 3 61119 _000440_hash NULL ++_000441_hash __probe_kernel_write 3 29842 _000441_hash NULL ++_000442_hash proc_coredump_filter_write 3 25625 _000442_hash NULL ++_000443_hash _proc_do_string 2 6376 _000443_hash NULL ++_000444_hash process_vm_rw_pages 5-6 15954 _000444_hash NULL ++_000446_hash proc_loginuid_write 3 63648 _000446_hash NULL ++_000447_hash proc_pid_attr_write 3 63845 _000447_hash NULL ++_000448_hash proc_scsi_devinfo_write 3 32064 _000448_hash NULL ++_000449_hash proc_scsi_write 3 29142 _000449_hash NULL ++_000450_hash proc_scsi_write_proc 3 267 _000450_hash NULL ++_000451_hash pskb_expand_head 2-3 42881 _000451_hash NULL ++_000453_hash pstore_mkfile 5 50830 _000453_hash NULL ++_000454_hash pti_char_write 3 60960 _000454_hash NULL ++_000455_hash ptrace_writedata 4 45021 _000455_hash NULL ++_000456_hash pt_write 3 40159 _000456_hash NULL ++_000457_hash qdisc_class_hash_alloc 1 18262 _000457_hash NULL ++_000458_hash r3964_write 4 57662 _000458_hash NULL ++_000459_hash raw_seticmpfilter 3 6888 _000459_hash NULL ++_000460_hash raw_setsockopt 5 45800 _000460_hash NULL ++_000461_hash rawv6_seticmpfilter 5 12137 _000461_hash NULL ++_000462_hash ray_cs_essid_proc_write 3 17875 _000462_hash NULL ++_000463_hash rbd_add 3 16366 _000463_hash NULL ++_000464_hash rbd_snap_add 4 19678 _000464_hash NULL ++_000465_hash rdma_set_ib_paths 3 45592 _000465_hash NULL ++_000466_hash rds_page_copy_user 4 35691 _000466_hash NULL ++_000467_hash read 3 9397 _000467_hash NULL ++_000468_hash read_buf 2 20469 _000468_hash NULL ++_000469_hash read_cis_cache 4 29735 _000469_hash NULL ++_000470_hash realloc_buffer 2 25816 _000470_hash NULL ++_000471_hash receive_DataRequest 3 9904 _000471_hash NULL ++_000472_hash recent_mt_proc_write 3 8206 _000472_hash NULL ++_000473_hash regmap_access_read_file 3 37223 _000473_hash NULL ++_000474_hash regmap_bulk_write 4 59049 _000474_hash NULL ++_000475_hash regmap_map_read_file 3 37685 _000475_hash NULL ++_000476_hash regset_tls_set 4 18459 _000476_hash NULL ++_000477_hash reiserfs_add_entry 4 23062 _002792_hash NULL nohasharray ++_000478_hash remote_settings_file_write 3 22987 _000478_hash NULL ++_000479_hash request_key_auth_new 3 38092 _000479_hash NULL ++_000480_hash restore_i387_fxsave 2 17528 _000480_hash NULL ++_000481_hash revalidate 2 19043 _000481_hash NULL ++_000482_hash rfcomm_sock_setsockopt 5 18254 _000482_hash NULL ++_000483_hash rndis_add_response 2 58544 _000483_hash NULL ++_000484_hash rndis_set_oid 4 6547 _000484_hash NULL ++_000485_hash rngapi_reset 3 34366 _002137_hash NULL nohasharray ++_000486_hash roccat_common2_receive 4 50369 _000486_hash NULL ++_000487_hash roccat_common2_send 4 2422 _000487_hash NULL ++_000488_hash rpc_malloc 2 43573 _000488_hash NULL ++_000489_hash rt2x00debug_write_bbp 3 8212 _000489_hash NULL ++_000490_hash rt2x00debug_write_csr 3 64753 _000490_hash NULL ++_000491_hash rt2x00debug_write_eeprom 3 23091 _000491_hash NULL ++_000492_hash rt2x00debug_write_rf 3 38195 _000492_hash NULL ++_000493_hash rt2x00debug_write_rfcsr 3 41473 _000493_hash NULL ++_000494_hash rts51x_read_mem 4 26577 _002730_hash NULL nohasharray ++_000495_hash rts51x_read_status 4 11830 _000495_hash NULL ++_000496_hash rts51x_write_mem 4 17598 _000496_hash NULL ++_000497_hash rw_copy_check_uvector 3 45748 _003716_hash NULL nohasharray ++_000498_hash rxrpc_request_key 3 27235 _000498_hash NULL ++_000499_hash rxrpc_server_keyring 3 16431 _000499_hash NULL ++_000500_hash savemem 3 58129 _000500_hash NULL ++_000501_hash sb16_copy_from_user 10-7-6 55836 _000501_hash NULL ++_000504_hash sched_autogroup_write 3 10984 _000504_hash NULL ++_000505_hash scsi_mode_select 6 37330 _000505_hash NULL ++_000506_hash scsi_tgt_copy_sense 3 26933 _000506_hash NULL ++_000507_hash sctp_auth_create_key 1 51641 _000507_hash NULL ++_000508_hash sctp_getsockopt_delayed_ack 2 9232 _000508_hash NULL ++_000509_hash sctp_getsockopt_local_addrs 2 25178 _000509_hash NULL ++_000510_hash sctp_make_abort_user 3 29654 _000510_hash NULL ++_000511_hash sctp_setsockopt_active_key 3 43755 _000511_hash NULL ++_000512_hash sctp_setsockopt_adaptation_layer 3 26935 _003246_hash NULL nohasharray ++_000513_hash sctp_setsockopt_associnfo 3 51684 _000513_hash NULL ++_000514_hash sctp_setsockopt_auth_chunk 3 30843 _000514_hash NULL ++_000515_hash sctp_setsockopt_auth_key 3 3793 _000515_hash NULL ++_000516_hash sctp_setsockopt_autoclose 3 5775 _000516_hash NULL ++_000517_hash sctp_setsockopt_bindx 3 49870 _000517_hash NULL ++_000518_hash __sctp_setsockopt_connectx 3 46949 _000518_hash NULL ++_000519_hash sctp_setsockopt_context 3 31091 _000519_hash NULL ++_000520_hash sctp_setsockopt_default_send_param 3 49578 _000520_hash NULL ++_000521_hash sctp_setsockopt_delayed_ack 3 40129 _000521_hash NULL ++_000522_hash sctp_setsockopt_del_key 3 42304 _002709_hash NULL nohasharray ++_000523_hash sctp_setsockopt_events 3 18862 _000523_hash NULL ++_000524_hash sctp_setsockopt_hmac_ident 3 11687 _000524_hash NULL ++_000525_hash sctp_setsockopt_initmsg 3 1383 _000525_hash NULL ++_000526_hash sctp_setsockopt_maxburst 3 28041 _000526_hash NULL ++_000527_hash sctp_setsockopt_maxseg 3 11829 _000527_hash NULL ++_000528_hash sctp_setsockopt_peer_addr_params 3 734 _000528_hash NULL ++_000529_hash sctp_setsockopt_peer_primary_addr 3 13440 _000529_hash NULL ++_000530_hash sctp_setsockopt_rtoinfo 3 30941 _000530_hash NULL ++_000531_hash security_context_to_sid_core 2 29248 _000531_hash NULL ++_000532_hash sel_commit_bools_write 3 46077 _000532_hash NULL ++_000533_hash sel_write_avc_cache_threshold 3 2256 _000533_hash NULL ++_000534_hash sel_write_bool 3 46996 _000534_hash NULL ++_000535_hash sel_write_checkreqprot 3 60774 _000535_hash NULL ++_000536_hash sel_write_disable 3 10511 _000536_hash NULL ++_000537_hash sel_write_enforce 3 48998 _000537_hash NULL ++_000538_hash sel_write_load 3 63830 _000538_hash NULL ++_000539_hash send_bulk_static_data 3 61932 _000539_hash NULL ++_000540_hash set_aoe_iflist 2 42737 _000540_hash NULL ++_000541_hash setkey_unaligned 3 39474 _000541_hash NULL ++_000542_hash set_registers 3 53582 _000542_hash NULL ++_000543_hash setsockopt 5 54539 _000543_hash NULL ++_000544_hash setup_req 3 5848 _000544_hash NULL ++_000545_hash setxattr 4 37006 _000545_hash NULL ++_000546_hash sfq_alloc 1 2861 _000546_hash NULL ++_000547_hash sg_kmalloc 1 50240 _000547_hash NULL ++_000548_hash sgl_map_user_pages 2 30610 _000548_hash NULL ++_000549_hash shash_setkey_unaligned 3 8620 _000549_hash NULL ++_000550_hash shmem_xattr_alloc 2 61190 _000550_hash NULL ++_000551_hash sierra_setup_urb 5 46029 _000551_hash NULL ++_000552_hash simple_transaction_get 3 50633 _000552_hash NULL ++_000553_hash simple_write_to_buffer 2-5 3122 _000553_hash NULL ++_000555_hash sisusb_send_bulk_msg 3 17864 _000555_hash NULL ++_000556_hash skb_add_data 3 48363 _000556_hash NULL ++_000557_hash skb_do_copy_data_nocache 5 12465 _000557_hash NULL ++_000558_hash sl_alloc_bufs 2 50380 _000558_hash NULL ++_000559_hash sl_realloc_bufs 2 64086 _000559_hash NULL ++_000560_hash smk_set_cipso 3 20379 _000560_hash NULL ++_000561_hash smk_write_ambient 3 45691 _000561_hash NULL ++_000562_hash smk_write_direct 3 46363 _000562_hash NULL ++_000563_hash smk_write_doi 3 49621 _000563_hash NULL ++_000564_hash smk_write_logging 3 2618 _000564_hash NULL ++_000565_hash smk_write_mapped 3 13519 _000565_hash NULL ++_000566_hash smk_write_netlbladdr 3 42525 _000566_hash NULL ++_000567_hash smk_write_onlycap 3 14400 _000567_hash NULL ++_000568_hash smk_write_rules_list 3 18565 _000568_hash NULL ++_000569_hash snd_ctl_elem_user_tlv 3 11695 _000569_hash NULL ++_000570_hash snd_emu10k1_fx8010_read 5 9605 _000570_hash NULL ++_000571_hash snd_emu10k1_synth_copy_from_user 3-5 9061 _000571_hash NULL ++_000573_hash snd_gus_dram_poke 4 18525 _000573_hash NULL ++_000574_hash snd_hdsp_playback_copy 5 20676 _000574_hash NULL ++_000575_hash snd_info_entry_write 3 63474 _000575_hash NULL ++_000576_hash snd_korg1212_copy_from 6 36169 _000576_hash NULL ++_000577_hash snd_mem_proc_write 3 9786 _000577_hash NULL ++_000578_hash snd_midi_channel_init_set 1 30092 _000578_hash NULL ++_000579_hash snd_midi_event_new 1 9893 _000764_hash NULL nohasharray ++_000580_hash snd_opl4_mem_proc_write 5 9670 _000580_hash NULL ++_000581_hash snd_pcm_aio_read 3 13900 _000581_hash NULL ++_000582_hash snd_pcm_aio_write 3 28738 _000582_hash NULL ++_000583_hash snd_pcm_oss_write1 3 10872 _000583_hash NULL ++_000584_hash snd_pcm_oss_write2 3 27332 _000584_hash NULL ++_000585_hash snd_rawmidi_kernel_write1 4 56847 _000585_hash NULL ++_000586_hash snd_rme9652_playback_copy 5 20970 _000586_hash NULL ++_000587_hash snd_sb_csp_load_user 3 45190 _000587_hash NULL ++_000588_hash snd_usb_ctl_msg 8 8436 _000588_hash NULL ++_000589_hash sock_bindtodevice 3 50942 _000589_hash NULL ++_000590_hash sock_kmalloc 2 62205 _000590_hash NULL ++_000591_hash spidev_ioctl 2 12846 _000591_hash NULL ++_000592_hash spidev_write 3 44510 _000592_hash NULL ++_000593_hash squashfs_read_table 3 16945 _000593_hash NULL ++_000594_hash srpt_alloc_ioctx 2-3 51042 _000594_hash NULL ++_000596_hash srpt_alloc_ioctx_ring 2-4-3 49330 _000596_hash NULL ++_000597_hash st5481_setup_isocpipes 6-4 61340 _000597_hash NULL ++_000598_hash sta_agg_status_write 3 45164 _000598_hash NULL ++_000599_hash svc_setsockopt 5 36876 _000599_hash NULL ++_000600_hash sys_add_key 4 61288 _000600_hash NULL ++_000601_hash sys_modify_ldt 3 18824 _000601_hash NULL ++_000602_hash sys_semtimedop 3 4486 _000602_hash NULL ++_000603_hash sys_setdomainname 2 4373 _000603_hash NULL ++_000604_hash sys_sethostname 2 42962 _000604_hash NULL ++_000605_hash tomoyo_write_self 3 45161 _000605_hash NULL ++_000606_hash tower_write 3 8580 _000606_hash NULL ++_000607_hash tpm_write 3 50798 _000607_hash NULL ++_000608_hash trusted_instantiate 3 4710 _000608_hash NULL ++_000609_hash trusted_update 3 12664 _000609_hash NULL ++_000610_hash tty_buffer_alloc 2 45437 _000610_hash NULL ++_000611_hash __tun_chr_ioctl 4 22300 _000611_hash NULL ++_000612_hash ubi_more_leb_change_data 4 63534 _000612_hash NULL ++_000613_hash ubi_more_update_data 4 39189 _000613_hash NULL ++_000614_hash ubi_resize_volume 2 50172 _000614_hash NULL ++_000615_hash udf_alloc_i_data 2 35786 _000615_hash NULL ++_000616_hash uea_idma_write 3 64139 _000616_hash NULL ++_000617_hash uea_request 4 47613 _000617_hash NULL ++_000618_hash uea_send_modem_cmd 3 3888 _000618_hash NULL ++_000619_hash uio_write 3 43202 _000619_hash NULL ++_000620_hash um_idi_write 3 18293 _000620_hash NULL ++_000621_hash us122l_ctl_msg 8 13330 _000621_hash NULL ++_000622_hash usb_alloc_urb 1 43436 _000622_hash NULL ++_000623_hash usblp_new_writeurb 2 22894 _000623_hash NULL ++_000624_hash usblp_write 3 23178 _000624_hash NULL ++_000625_hash usbtest_alloc_urb 3-5 34446 _000625_hash NULL ++_000627_hash usbtmc_write 3 64340 _000627_hash NULL ++_000628_hash user_instantiate 3 26131 _000628_hash NULL ++_000629_hash user_update 3 41332 _000629_hash NULL ++_000630_hash uwb_rc_cmd_done 4 35892 _000630_hash NULL ++_000631_hash uwb_rc_neh_grok_event 3 55799 _000631_hash NULL ++_000632_hash v9fs_alloc_rdir_buf 2 42150 _000632_hash NULL ++_000633_hash vc_do_resize 3-4 48842 _000633_hash NULL ++_000635_hash vcs_write 3 3910 _000635_hash NULL ++_000636_hash vga_arb_write 3 36112 _000636_hash NULL ++_000637_hash vga_switcheroo_debugfs_write 3 33984 _000637_hash NULL ++_000638_hash vhci_get_user 3 45039 _000638_hash NULL ++_000639_hash video_proc_write 3 6724 _000639_hash NULL ++_000640_hash vlsi_alloc_ring 3-4 57003 _000640_hash NULL ++_000642_hash __vmalloc 1 61168 _000642_hash NULL ++_000643_hash vmalloc_32 1 1135 _000643_hash NULL ++_000644_hash vmalloc_32_user 1 37519 _000644_hash NULL ++_000645_hash vmalloc_exec 1 36132 _000645_hash NULL ++_000646_hash vmalloc_node 1 58700 _000646_hash NULL ++_000647_hash __vmalloc_node_flags 1 30352 _000647_hash NULL ++_000648_hash vmalloc_user 1 32308 _000648_hash NULL ++_000649_hash vol_cdev_direct_write 3 20751 _000649_hash NULL ++_000650_hash vp_request_msix_vectors 2 28849 _000650_hash NULL ++_000651_hash vring_add_indirect 3-4 20737 _000651_hash NULL ++_000653_hash vring_new_virtqueue 1 9671 _000653_hash NULL ++_000654_hash vxge_os_dma_malloc 2 46184 _000654_hash NULL ++_000655_hash vxge_os_dma_malloc_async 3 56348 _000655_hash NULL ++_000656_hash wdm_write 3 53735 _000656_hash NULL ++_000657_hash wiimote_hid_send 3 48528 _000657_hash NULL ++_000658_hash wlc_phy_loadsampletable_nphy 3 64367 _000658_hash NULL ++_000659_hash write 3 62671 _000659_hash NULL ++_000660_hash write_flush 3 50803 _000660_hash NULL ++_000661_hash write_rio 3 54837 _000661_hash NULL ++_000662_hash x25_asy_change_mtu 2 26928 _000662_hash NULL ++_000663_hash xdi_copy_from_user 4 8395 _000663_hash NULL ++_000664_hash xfrm_dst_alloc_copy 3 3034 _000664_hash NULL ++_000665_hash xfrm_user_policy 4 62573 _000665_hash NULL ++_000666_hash xfs_attrmulti_attr_set 4 59346 _000666_hash NULL ++_000667_hash xfs_handle_to_dentry 3 12135 _000667_hash NULL ++_000668_hash xip_file_read 3 58592 _000668_hash NULL ++_000669_hash __xip_file_write 3-4 2733 _000669_hash NULL ++_000671_hash xprt_rdma_allocate 2 31372 _000671_hash NULL ++_000672_hash zd_usb_iowrite16v_async 3 23984 _000672_hash NULL ++_000673_hash zd_usb_read_fw 4 22049 _000673_hash NULL ++_000674_hash zerocopy_sg_from_iovec 3 11828 _000674_hash NULL ++_000675_hash __a2mp_build 3 60987 _000675_hash NULL ++_000677_hash acpi_ex_allocate_name_string 2-1 7685 _001169_hash NULL nohasharray ++_000678_hash acpi_os_allocate_zeroed 1 37422 _000678_hash NULL ++_000679_hash acpi_ut_initialize_buffer 2 47143 _002830_hash NULL nohasharray ++_000680_hash ad7879_spi_xfer 3 36311 _000680_hash NULL ++_000681_hash add_new_gdb 3 27643 _000681_hash NULL ++_000682_hash add_numbered_child 5 14273 _000682_hash NULL ++_000683_hash add_res_range 4 21310 _000683_hash NULL ++_000684_hash addtgt 3 54703 _000684_hash NULL ++_000685_hash add_uuid 4 49831 _000685_hash NULL ++_000686_hash afs_cell_alloc 2 24052 _000686_hash NULL ++_000687_hash aggr_recv_addba_req_evt 4 38037 _000687_hash NULL ++_000688_hash agp_create_memory 1 1075 _000688_hash NULL ++_000689_hash agp_create_user_memory 1 62955 _000689_hash NULL ++_000690_hash alg_setsockopt 5 20985 _000690_hash NULL ++_000691_hash alloc_async 1 14208 _000691_hash NULL ++_000692_hash ___alloc_bootmem_nopanic 1 53626 _000692_hash NULL ++_000693_hash alloc_buf 1 34532 _000693_hash NULL ++_000694_hash alloc_chunk 1 49575 _000694_hash NULL ++_000695_hash alloc_context 1 41283 _000695_hash NULL ++_000696_hash alloc_ctrl_packet 1 44667 _000696_hash NULL ++_000697_hash alloc_data_packet 1 46698 _000697_hash NULL ++_000698_hash alloc_dca_provider 2 59670 _000698_hash NULL ++_000699_hash __alloc_dev_table 2 54343 _000699_hash NULL ++_000700_hash alloc_ep 1 17269 _000700_hash NULL ++_000701_hash __alloc_extent_buffer 3 15093 _000701_hash NULL ++_000702_hash alloc_group_attrs 2 9194 _000727_hash NULL nohasharray ++_000703_hash alloc_large_system_hash 2 22391 _000703_hash NULL ++_000704_hash alloc_netdev_mqs 1 30030 _000704_hash NULL ++_000705_hash __alloc_objio_seg 1 7203 _000705_hash NULL ++_000706_hash alloc_ring 2-4 15345 _000706_hash NULL ++_000707_hash alloc_ring 2-4 39151 _000707_hash NULL ++_000710_hash alloc_session 1-2 64171 _000710_hash NULL ++_000714_hash alloc_skb 1 55439 _000714_hash NULL ++_000715_hash alloc_skb_fclone 1 3467 _000715_hash NULL ++_000716_hash alloc_smp_req 1 51337 _000716_hash NULL ++_000717_hash alloc_smp_resp 1 3566 _000717_hash NULL ++_000718_hash alloc_ts_config 1 45775 _000718_hash NULL ++_000719_hash alloc_upcall 2 62186 _000719_hash NULL ++_000720_hash altera_drscan 2 48698 _000720_hash NULL ++_000721_hash altera_irscan 2 62396 _000721_hash NULL ++_000722_hash altera_set_dr_post 2 54291 _000722_hash NULL ++_000723_hash altera_set_dr_pre 2 64862 _000723_hash NULL ++_000724_hash altera_set_ir_post 2 20948 _000724_hash NULL ++_000725_hash altera_set_ir_pre 2 54103 _000725_hash NULL ++_000726_hash altera_swap_dr 2 50090 _000726_hash NULL ++_000727_hash altera_swap_ir 2 9194 _000727_hash &_000702_hash ++_000728_hash amd_create_gatt_pages 1 20537 _000728_hash NULL ++_000729_hash aoechr_write 3 62883 _003674_hash NULL nohasharray ++_000730_hash applesmc_create_nodes 2 49392 _000730_hash NULL ++_000731_hash array_zalloc 1-2 7519 _000731_hash NULL ++_000733_hash arvo_sysfs_read 6 31617 _000733_hash NULL ++_000734_hash arvo_sysfs_write 6 3311 _000734_hash NULL ++_000735_hash asd_store_update_bios 4 10165 _000735_hash NULL ++_000736_hash ata_host_alloc 2 46094 _000736_hash NULL ++_000737_hash atalk_sendmsg 4 21677 _000737_hash NULL ++_000738_hash ath6kl_cfg80211_connect_event 7-9-8 13443 _000738_hash NULL ++_000739_hash ath6kl_mgmt_tx 9 21153 _000739_hash NULL ++_000740_hash ath6kl_wmi_proc_events_vif 5 42549 _003190_hash NULL nohasharray ++_000741_hash ath6kl_wmi_roam_tbl_event_rx 3 43440 _000741_hash NULL ++_000742_hash ath6kl_wmi_send_mgmt_cmd 7 17347 _000742_hash NULL ++_000743_hash ath_descdma_setup 5 12257 _000743_hash NULL ++_000744_hash ath_rx_edma_init 2 65483 _000744_hash NULL ++_000745_hash ati_create_gatt_pages 1 4722 _003275_hash NULL nohasharray ++_000746_hash audit_expand 2 2098 _000746_hash NULL ++_000747_hash audit_init_entry 1 38644 _000747_hash NULL ++_000748_hash ax25_sendmsg 4 62770 _000748_hash NULL ++_000749_hash b1_alloc_card 1 36155 _000749_hash NULL ++_000750_hash b43_nphy_load_samples 3 36481 _000750_hash NULL ++_000751_hash batadv_orig_hash_add_if 2 10033 _000751_hash NULL ++_000752_hash batadv_orig_hash_del_if 2 48972 _000752_hash NULL ++_000753_hash batadv_tt_append_diff 4 20588 _000753_hash NULL ++_000754_hash batadv_tt_commit_changes 4 2008 _000754_hash NULL ++_000755_hash batadv_tt_prepare_packet_buff 4 1280 _000755_hash NULL ++_000756_hash bio_copy_user_iov 4 37660 _000756_hash NULL ++_000757_hash __bio_map_kern 3 47379 _000757_hash NULL ++_000758_hash bitmap_resize 2 33054 _000758_hash NULL ++_000759_hash blk_check_plugged 3 50736 _000759_hash NULL ++_000760_hash blk_register_region 1-2 51424 _000760_hash NULL ++_000762_hash bm_entry_write 3 28338 _000762_hash NULL ++_000763_hash bm_realloc_pages 2 9431 _000763_hash NULL ++_000764_hash bm_register_write 3 9893 _000764_hash &_000579_hash ++_000765_hash bm_status_write 3 12964 _000765_hash NULL ++_000766_hash br_mdb_rehash 2 42643 _000766_hash NULL ++_000767_hash btmrvl_sdio_host_to_card 3 12152 _000767_hash NULL ++_000768_hash btrfs_copy_from_user 1-3 43806 _000768_hash NULL ++_000770_hash btrfs_insert_delayed_dir_index 4 63720 _000770_hash NULL ++_000771_hash __btrfs_map_block 3 49839 _000771_hash NULL ++_000772_hash c4iw_init_resource 2-3 30393 _000772_hash NULL ++_000774_hash cache_downcall 3 13666 _000774_hash NULL ++_000775_hash cache_slow_downcall 2 8570 _000775_hash NULL ++_000776_hash caif_seqpkt_sendmsg 4 22961 _000776_hash NULL ++_000777_hash caif_stream_sendmsg 4 9110 _000777_hash NULL ++_000778_hash carl9170_cmd_buf 3 950 _000778_hash NULL ++_000779_hash cdev_add 2-3 38176 _000779_hash NULL ++_000781_hash cdrom_read_cdda 4 50478 _000781_hash NULL ++_000782_hash ceph_dns_resolve_name 2 62488 _000782_hash NULL ++_000783_hash ceph_msgpool_get 2 54258 _000783_hash NULL ++_000784_hash cfg80211_connect_result 4-6 56515 _000784_hash NULL ++_000786_hash cfg80211_disconnected 4 57 _000786_hash NULL ++_000787_hash cfg80211_inform_bss 8 19332 _000787_hash NULL ++_000788_hash cfg80211_inform_bss_frame 4 41078 _000788_hash NULL ++_000789_hash cfg80211_mlme_register_mgmt 5 19852 _000789_hash NULL ++_000790_hash cfg80211_roamed_bss 4-6 50198 _000790_hash NULL ++_000792_hash cgroup_file_write 3 52417 _000792_hash NULL ++_000793_hash cifs_readdata_alloc 1 26360 _000793_hash NULL ++_000794_hash cifs_readv_from_socket 3 19109 _000794_hash NULL ++_000795_hash cifs_writedata_alloc 1 32880 _003097_hash NULL nohasharray ++_000796_hash cnic_alloc_dma 3 34641 _000796_hash NULL ++_000797_hash cnic_init_id_tbl 2 41354 _000797_hash NULL ++_000798_hash configfs_write_file 3 61621 _000798_hash NULL ++_000799_hash construct_key 3 11329 _000799_hash NULL ++_000800_hash context_alloc 3 24645 _000800_hash NULL ++_000801_hash copy_to_user 3 57835 _000801_hash NULL ++_000802_hash cp210x_get_config 4 56229 _000802_hash NULL ++_000803_hash create_attr_set 1 22861 _000803_hash NULL ++_000804_hash create_bounce_buffer 3 39155 _000804_hash NULL ++_000805_hash create_gpadl_header 2 19064 _000805_hash NULL ++_000806_hash _create_sg_bios 4 31244 _000806_hash NULL ++_000807_hash cryptd_alloc_instance 2-3 18048 _000807_hash NULL ++_000809_hash crypto_ahash_setkey 3 55134 _000809_hash NULL ++_000810_hash crypto_alloc_instance2 3 25277 _000810_hash NULL ++_000811_hash crypto_shash_setkey 3 60483 _000811_hash NULL ++_000812_hash cxgb_alloc_mem 1 24007 _000812_hash NULL ++_000813_hash cxgbi_device_portmap_create 3 25747 _000813_hash NULL ++_000814_hash cxgbi_device_register 1-2 36746 _000814_hash NULL ++_000816_hash __cxio_init_resource_fifo 3 23447 _000816_hash NULL ++_000817_hash dccp_sendmsg 4 56058 _000817_hash NULL ++_000818_hash ddp_make_gl 1 12179 _000818_hash NULL ++_000819_hash depth_write 3 3021 _000819_hash NULL ++_000820_hash dev_irnet_write 3 11398 _000820_hash NULL ++_000821_hash dev_set_alias 3 50084 _000821_hash NULL ++_000822_hash dev_write 3 7708 _000822_hash NULL ++_000823_hash dfs_global_file_write 3 6112 _000823_hash NULL ++_000824_hash dgram_sendmsg 4 45679 _000824_hash NULL ++_000825_hash disconnect 4 32521 _000825_hash NULL ++_000826_hash dma_attach 6-7 50831 _000826_hash NULL ++_000828_hash dma_declare_coherent_memory 4-2 14244 _000828_hash NULL ++_000829_hash dn_sendmsg 4 38390 _000829_hash NULL ++_000830_hash dn_setsockopt 5 314 _000830_hash NULL ++_000831_hash do_arpt_set_ctl 4 51053 _000831_hash NULL ++_000832_hash do_dccp_setsockopt 5 54377 _003195_hash NULL nohasharray ++_000833_hash do_ip6t_set_ctl 4 60040 _000833_hash NULL ++_000834_hash do_ipt_set_ctl 4 56238 _000834_hash NULL ++_000835_hash do_jffs2_setxattr 5 25910 _000835_hash NULL ++_000836_hash do_msgsnd 4 1387 _000836_hash NULL ++_000837_hash do_pselect 1 62061 _000837_hash NULL ++_000838_hash do_raw_setsockopt 5 55215 _000838_hash NULL ++_000839_hash do_readv_writev 4 51849 _000839_hash NULL ++_000840_hash do_sync 1 9604 _000840_hash NULL ++_000841_hash dup_array 3 33551 _000841_hash NULL ++_000842_hash ecryptfs_decode_and_decrypt_filename 5 10379 _000842_hash NULL ++_000843_hash ecryptfs_encrypt_and_encode_filename 6 2109 _000843_hash NULL ++_000844_hash ecryptfs_send_message_locked 2 31801 _000844_hash NULL ++_000845_hash edac_device_alloc_ctl_info 1 5941 _000845_hash NULL ++_000846_hash edac_mc_alloc 4 3611 _000846_hash NULL ++_000847_hash edac_pci_alloc_ctl_info 1 63388 _000847_hash NULL ++_000848_hash efivar_create_sysfs_entry 2 19485 _000848_hash NULL ++_000849_hash enable_write 3 30456 _000849_hash NULL ++_000850_hash enclosure_register 3 57412 _000850_hash NULL ++_000851_hash enlarge_skb 2 44248 _002839_hash NULL nohasharray ++_000852_hash evdev_ioctl_handler 2 21705 _000852_hash NULL ++_000853_hash ext4_kvzalloc 1 47605 _000853_hash NULL ++_000854_hash extend_netdev_table 2 21453 _000854_hash NULL ++_000855_hash fcoe_ctlr_device_add 3 1793 _000855_hash NULL ++_000856_hash fd_do_readv 3 51297 _000856_hash NULL ++_000857_hash fd_do_writev 3 29329 _000857_hash NULL ++_000858_hash __feat_register_sp 6 64712 _000858_hash NULL ++_000859_hash __ffs_ep0_read_events 3 48868 _000859_hash NULL ++_000860_hash ffs_ep0_write 3 9438 _000860_hash NULL ++_000861_hash ffs_epfile_read 3 18775 _000861_hash NULL ++_000862_hash ffs_epfile_write 3 48014 _000862_hash NULL ++_000863_hash fib_info_hash_alloc 1 9075 _000863_hash NULL ++_000864_hash fillonedir 3 41746 _000864_hash NULL ++_000865_hash fs_devrw_entry 3 11924 _000865_hash NULL ++_000866_hash fs_path_prepare_for_add 2 61854 _000866_hash NULL ++_000867_hash fuse_fill_write_pages 4 53682 _000867_hash NULL ++_000868_hash fw_device_op_ioctl 2 11595 _000868_hash NULL ++_000869_hash fw_iso_buffer_init 3 54582 _000869_hash NULL ++_000870_hash fw_node_create 2 9559 _000870_hash NULL ++_000871_hash garmin_read_process 3 27509 _000871_hash NULL ++_000872_hash garp_request_join 4 7471 _000872_hash NULL ++_000873_hash generic_perform_write 3 54832 _000873_hash NULL ++_000874_hash gen_pool_add_virt 4 39913 _000874_hash NULL ++_000875_hash get_derived_key 4 61100 _000875_hash NULL ++_000876_hash get_new_cssid 2 51665 _000876_hash NULL ++_000877_hash getxattr 4 24398 _003758_hash NULL nohasharray ++_000878_hash gsm_control_reply 4 53333 _000878_hash NULL ++_000879_hash hcd_alloc_coherent 5 55862 _000879_hash NULL ++_000880_hash hci_sock_sendmsg 4 37420 _000880_hash NULL ++_000881_hash hidraw_ioctl 2 63658 _000881_hash NULL ++_000882_hash hidraw_write 3 31536 _000882_hash NULL ++_000883_hash hid_register_field 2-3 4874 _000883_hash NULL ++_000885_hash hid_report_raw_event 4 2762 _000885_hash NULL ++_000886_hash hpi_alloc_control_cache 1 35351 _000886_hash NULL ++_000887_hash hugetlbfs_read_actor 2-5-4 34547 _000887_hash NULL ++_000890_hash hvc_alloc 4 12579 _000890_hash NULL ++_000891_hash __hwahc_dev_set_key 5 46328 _000891_hash NULL ++_000892_hash i2400m_zrealloc_2x 3 54166 _001549_hash NULL nohasharray ++_000893_hash ib_alloc_device 1 26483 _000893_hash NULL ++_000894_hash ib_create_send_mad 5 1196 _000894_hash NULL ++_000895_hash ibmasm_new_command 2 25714 _000895_hash NULL ++_000896_hash ib_send_cm_drep 3 50186 _000896_hash NULL ++_000897_hash ib_send_cm_mra 4 60202 _003063_hash NULL nohasharray ++_000898_hash ib_send_cm_rtu 3 63138 _000898_hash NULL ++_000899_hash ide_core_cp_entry 3 22636 _000899_hash NULL ++_000900_hash ieee80211_if_write_smps 3 35550 _000900_hash NULL ++_000901_hash ieee80211_if_write_tkip_mic_test 3 58748 _000901_hash NULL ++_000902_hash ieee80211_if_write_tsf 3 36077 _000902_hash NULL ++_000903_hash ieee80211_if_write_uapsd_max_sp_len 3 14233 _000903_hash NULL ++_000904_hash ieee80211_if_write_uapsd_queues 3 51526 _000904_hash NULL ++_000905_hash ieee80211_key_alloc 3 19065 _000905_hash NULL ++_000906_hash ieee80211_send_probe_req 6-4 6924 _000906_hash NULL ++_000907_hash ieee80211_skb_resize 3 50211 _000907_hash NULL ++_000908_hash if_spi_host_to_card 4 62890 _000908_hash NULL ++_000909_hash if_writecmd 2 815 _000909_hash NULL ++_000910_hash init_bch 1-2 64130 _000910_hash NULL ++_000912_hash init_ipath 1 48187 _000912_hash NULL ++_000913_hash init_list_set 2-3 39188 _000913_hash NULL ++_000915_hash init_q 4 132 _000915_hash NULL ++_000916_hash init_state 2 60165 _000916_hash NULL ++_000917_hash init_tag_map 3 57515 _000917_hash NULL ++_000918_hash input_ff_create 2 21240 _000918_hash NULL ++_000919_hash input_mt_init_slots 2 31183 _000919_hash NULL ++_000920_hash interfaces 2 38859 _000920_hash NULL ++_000921_hash int_hardware_entry 3 36833 _000921_hash NULL ++_000922_hash int_hw_irq_en 3 46776 _000922_hash NULL ++_000923_hash int_tasklet_entry 3 52500 _000923_hash NULL ++_000924_hash ioat2_alloc_ring 2 11172 _000924_hash NULL ++_000925_hash ip_generic_getfrag 3-4 12187 _000925_hash NULL ++_000927_hash ip_options_get_alloc 1 7448 _000927_hash NULL ++_000928_hash ipr_alloc_ucode_buffer 1 40199 _000928_hash NULL ++_000929_hash ip_set_alloc 1 57953 _000929_hash NULL ++_000930_hash ip_setsockopt 5 33487 _000930_hash NULL ++_000931_hash ipv6_flowlabel_opt 3 58135 _001179_hash NULL nohasharray ++_000932_hash ipv6_renew_options 5 28867 _000932_hash NULL ++_000933_hash ipv6_setsockopt 5 29871 _000933_hash NULL ++_000934_hash ipxrtr_route_packet 4 54036 _000934_hash NULL ++_000935_hash irda_sendmsg 4 4388 _000935_hash NULL ++_000936_hash irda_sendmsg_dgram 4 38563 _000936_hash NULL ++_000937_hash irda_sendmsg_ultra 4 42047 _000937_hash NULL ++_000938_hash irias_add_octseq_attrib 4 29983 _000938_hash NULL ++_000939_hash irq_alloc_generic_chip 2 26650 _000939_hash NULL ++_000940_hash iscsi_alloc_session 3 49390 _000940_hash NULL ++_000941_hash iscsi_create_conn 2 50425 _000941_hash NULL ++_000942_hash iscsi_create_endpoint 1 15193 _000942_hash NULL ++_000943_hash iscsi_create_iface 5 38510 _000943_hash NULL ++_000944_hash iscsi_decode_text_input 4 58292 _000944_hash NULL ++_000945_hash iscsi_pool_init 2-4 54913 _000945_hash NULL ++_000947_hash iscsit_dump_data_payload 2 38683 _000947_hash NULL ++_000948_hash isdn_write 3 45863 _000948_hash NULL ++_000949_hash isku_receive 4 54130 _000949_hash NULL ++_000950_hash islpci_mgt_transaction 5 23610 _000950_hash NULL ++_000951_hash iso_alloc_urb 4-5 45206 _000951_hash NULL ++_000952_hash iso_sched_alloc 1 13377 _003325_hash NULL nohasharray ++_000953_hash iwl_trans_txq_alloc 3 36147 _000953_hash NULL ++_000954_hash ixgbe_alloc_q_vector 4-6 24439 _000954_hash NULL ++_000956_hash jbd2_journal_init_revoke 2 51088 _000956_hash NULL ++_000957_hash jffs2_write_dirent 5 37311 _000957_hash NULL ++_000958_hash journal_init_revoke 2 56933 _000958_hash NULL ++_000959_hash keyctl_instantiate_key 3 41855 _000959_hash NULL ++_000960_hash keyctl_instantiate_key_iov 3 16969 _000960_hash NULL ++_000961_hash __kfifo_from_user 3 20399 _000961_hash NULL ++_000962_hash kimage_crash_alloc 3 3233 _000962_hash NULL ++_000963_hash kimage_normal_alloc 3 31140 _000963_hash NULL ++_000964_hash kmem_realloc 2 37489 _000964_hash NULL ++_000965_hash kmem_zalloc 1 11510 _000965_hash NULL ++_000966_hash koneplus_sysfs_read 6 42792 _000966_hash NULL ++_000967_hash kvm_kvzalloc 1 52894 _000967_hash NULL ++_000968_hash kvm_read_guest_page_mmu 6 37611 _000968_hash NULL ++_000969_hash kvm_set_irq_routing 3 48704 _000969_hash NULL ++_000970_hash kvm_write_guest_cached 4 11106 _000970_hash NULL ++_000971_hash kvm_write_guest_page 5 63555 _002812_hash NULL nohasharray ++_000972_hash kzalloc_node 1 24352 _000972_hash NULL ++_000973_hash l2cap_skbuff_fromiovec 3-4 35003 _000973_hash NULL ++_000975_hash l2tp_ip_sendmsg 4 50411 _000975_hash NULL ++_000976_hash l2tp_session_create 1 25286 _000976_hash NULL ++_000977_hash lc_create 3 48662 _000977_hash NULL ++_000978_hash leaf_dealloc 3 29566 _000978_hash NULL ++_000979_hash linear_conf 2 23485 _003837_hash NULL nohasharray ++_000980_hash llc_ui_sendmsg 4 24987 _000980_hash NULL ++_000981_hash load_module 2 60056 _003010_hash NULL nohasharray ++_000982_hash lpfc_sli4_queue_alloc 3 62646 _000982_hash NULL ++_000983_hash mdiobus_alloc_size 1 52259 _000983_hash NULL ++_000984_hash mempool_create_node 1 3191 _000984_hash NULL ++_000985_hash mem_read 3 57631 _000985_hash NULL ++_000986_hash memstick_alloc_host 1 142 _000986_hash NULL ++_000987_hash mem_swapout_entry 3 32586 _000987_hash NULL ++_000988_hash mem_write 3 22232 _000988_hash NULL ++_000989_hash mesh_table_alloc 1 22305 _000989_hash NULL ++_000990_hash mfd_add_devices 4 16668 _000990_hash NULL ++_000991_hash mISDN_sock_sendmsg 4 41035 _000991_hash NULL ++_000992_hash mlx4_init_icm_table 4-5 2151 _000992_hash NULL ++_000994_hash mmc_alloc_host 1 48097 _000994_hash NULL ++_000995_hash mmc_test_alloc_mem 2-3 28102 _000995_hash NULL ++_000997_hash mon_bin_ioctl 3 2771 _000997_hash NULL ++_000998_hash mpi_alloc 1 18094 _000998_hash NULL ++_000999_hash mpihelp_mul_karatsuba_case 5-3 23918 _003061_hash NULL nohasharray ++_001000_hash __mptctl_ioctl 2 15875 _001000_hash NULL ++_001001_hash mtd_concat_create 2 14416 _001001_hash NULL ++_001002_hash mthca_alloc_cq_buf 3 46512 _001002_hash NULL ++_001003_hash mvumi_alloc_mem_resource 3 47750 _001003_hash NULL ++_001004_hash mwifiex_11n_create_rx_reorder_tbl 4 63806 _001004_hash NULL ++_001005_hash mwifiex_alloc_sdio_mpa_buffers 2-3 60961 _001005_hash NULL ++_001007_hash mwl8k_cmd_set_beacon 4 23110 _001007_hash NULL ++_001008_hash neigh_hash_alloc 1 17595 _001008_hash NULL ++_001009_hash __netdev_alloc_skb 2 18595 _001009_hash NULL ++_001010_hash __netlink_change_ngroups 2 46156 _001010_hash NULL ++_001011_hash netlink_sendmsg 4 33708 _001236_hash NULL nohasharray ++_001012_hash netxen_alloc_sds_rings 2 13417 _001012_hash NULL ++_001013_hash new_bind_ctl 2 35324 _001013_hash NULL ++_001014_hash new_dir 3 31919 _001014_hash NULL ++_001015_hash new_tape_buffer 2 32866 _001015_hash NULL ++_001016_hash nfc_llcp_build_tlv 3 19536 _001016_hash NULL ++_001017_hash nfc_llcp_send_i_frame 3 59130 _001017_hash NULL ++_001018_hash nf_ct_ext_create 3 51232 _001018_hash NULL ++_001019_hash nfs4_alloc_pages 1 48426 _001019_hash NULL ++_001020_hash nfs4_alloc_slots 1 2454 _003345_hash NULL nohasharray ++_001021_hash nfsctl_transaction_write 3 64800 _001021_hash NULL ++_001022_hash nfs_fscache_get_super_cookie 3 44355 _001850_hash NULL nohasharray ++_001023_hash nfs_idmap_request_key 3 30208 _001023_hash NULL ++_001024_hash nfs_pgarray_set 2 1085 _001024_hash NULL ++_001025_hash nl_pid_hash_zalloc 1 23314 _001025_hash NULL ++_001026_hash nr_sendmsg 4 53656 _001026_hash NULL ++_001027_hash nsm_create_handle 4 38060 _001027_hash NULL ++_001028_hash ntfs_copy_from_user_iovec 3-6 49829 _001028_hash NULL ++_001030_hash ntfs_file_buffered_write 4-6 41442 _001030_hash NULL ++_001032_hash __ntfs_malloc 1 34022 _001032_hash NULL ++_001033_hash nvme_alloc_queue 3 46865 _001033_hash NULL ++_001034_hash nvme_map_user_pages 3-4 41093 _001639_hash NULL nohasharray ++_001036_hash ocfs2_acl_from_xattr 2 21604 _001036_hash NULL ++_001037_hash ocfs2_control_message 3 19564 _001037_hash NULL ++_001038_hash _ore_get_io_state 3-5-4 2166 _001038_hash NULL ++_001041_hash orinoco_set_key 5-7 17878 _001041_hash NULL ++_001043_hash osdmap_set_max_osd 2 57630 _002267_hash NULL nohasharray ++_001044_hash _osd_realloc_seg 3 54352 _001044_hash NULL ++_001045_hash osst_execute 7-6 17607 _001045_hash NULL ++_001046_hash osst_write 3 31581 _001046_hash NULL ++_001047_hash otp_read 2-5-4 10594 _001047_hash NULL ++_001050_hash ovs_vport_alloc 1 33475 _001050_hash NULL ++_001051_hash p54_parse_rssical 3 64493 _001051_hash NULL ++_001052_hash p9_client_zc_rpc 7 14345 _001052_hash NULL ++_001053_hash packet_sendmsg_spkt 4 28885 _001053_hash NULL ++_001054_hash pair_device 4 61175 _003161_hash NULL nohasharray ++_001055_hash pccard_store_cis 6 18176 _001055_hash NULL ++_001056_hash pci_add_cap_save_buffer 3 3426 _001056_hash NULL ++_001057_hash pcnet32_realloc_rx_ring 3 36598 _001057_hash NULL ++_001058_hash pcnet32_realloc_tx_ring 3 38428 _001058_hash NULL ++_001059_hash pcpu_mem_zalloc 1 22948 _001059_hash NULL ++_001060_hash pep_sendmsg 4 62524 _001060_hash NULL ++_001061_hash pfkey_sendmsg 4 47394 _001061_hash NULL ++_001062_hash pidlist_resize 2 496 _001062_hash NULL ++_001063_hash pin_code_reply 4 46510 _001063_hash NULL ++_001064_hash ping_getfrag 3-4 8360 _001064_hash NULL ++_001066_hash pipe_set_size 2 5204 _001066_hash NULL ++_001067_hash pkt_bio_alloc 1 48284 _001067_hash NULL ++_001068_hash platform_create_bundle 4-6 12785 _001068_hash NULL ++_001070_hash pm8001_store_update_fw 4 55716 _001070_hash NULL ++_001071_hash pmcraid_alloc_sglist 1 9864 _001071_hash NULL ++_001072_hash pn533_dep_link_up 5 22154 _001072_hash NULL ++_001073_hash pn533_init_target_frame 3 65438 _001073_hash NULL ++_001074_hash pnp_alloc 1 24869 _001538_hash NULL nohasharray ++_001075_hash pn_sendmsg 4 12640 _001075_hash NULL ++_001076_hash pppoe_sendmsg 4 48039 _001076_hash NULL ++_001077_hash pppol2tp_sendmsg 4 56420 _001077_hash NULL ++_001078_hash prism2_info_hostscanresults 3 39657 _001078_hash NULL ++_001079_hash process_vm_rw 3-5 47533 _001079_hash NULL ++_001081_hash process_vm_rw_single_vec 1-2 26213 _001081_hash NULL ++_001083_hash proc_write 3 51003 _001083_hash NULL ++_001084_hash profile_load 3 58267 _001084_hash NULL ++_001085_hash profile_remove 3 8556 _001085_hash NULL ++_001086_hash profile_replace 3 14652 _001086_hash NULL ++_001087_hash pscsi_get_bio 1 56103 _001087_hash NULL ++_001088_hash __pskb_copy 2 9038 _001088_hash NULL ++_001089_hash __pskb_pull_tail 2 60287 _001089_hash NULL ++_001090_hash qla4xxx_alloc_work 2 44813 _001090_hash NULL ++_001091_hash qlcnic_alloc_msix_entries 2 46160 _001091_hash NULL ++_001092_hash qlcnic_alloc_sds_rings 2 26795 _001092_hash NULL ++_001093_hash queue_received_packet 5 9657 _001093_hash NULL ++_001094_hash raw_send_hdrinc 4 58803 _001094_hash NULL ++_001095_hash raw_sendmsg 4 23078 _003316_hash NULL nohasharray ++_001096_hash rawsock_sendmsg 4 60010 _001096_hash NULL ++_001097_hash rawv6_send_hdrinc 3 35425 _001097_hash NULL ++_001098_hash rawv6_setsockopt 5 56165 _001098_hash NULL ++_001099_hash rb_alloc 1 3102 _001099_hash NULL ++_001100_hash rbd_alloc_coll 1 33678 _001100_hash NULL ++_001101_hash rbd_create_rw_ops 1 55297 _001101_hash NULL ++_001102_hash rds_ib_inc_copy_to_user 3 55007 _001102_hash NULL ++_001103_hash rds_iw_inc_copy_to_user 3 29214 _001103_hash NULL ++_001104_hash rds_message_alloc 1 10517 _001104_hash NULL ++_001105_hash rds_message_copy_from_user 3 45510 _001105_hash NULL ++_001106_hash rds_message_inc_copy_to_user 3 26540 _001106_hash NULL ++_001107_hash regcache_rbtree_insert_to_block 5 58009 _001107_hash NULL ++_001108_hash _regmap_raw_write 4 42652 _001108_hash NULL ++_001109_hash regmap_register_patch 3 21681 _001109_hash NULL ++_001110_hash relay_alloc_page_array 1 52735 _001110_hash NULL ++_001111_hash remove_uuid 4 64505 _001111_hash NULL ++_001112_hash reshape_ring 2 29147 _001112_hash NULL ++_001113_hash RESIZE_IF_NEEDED 2 56286 _001113_hash NULL ++_001114_hash resize_info_buffer 2 62889 _001114_hash NULL ++_001115_hash resize_stripes 2 61650 _001115_hash NULL ++_001116_hash rfcomm_sock_sendmsg 4 37661 _003661_hash NULL nohasharray ++_001117_hash roccat_common2_send_with_status 4 50343 _001117_hash NULL ++_001118_hash rose_sendmsg 4 20249 _001118_hash NULL ++_001119_hash rsc_mgr_init 3 16299 _001119_hash NULL ++_001120_hash rxrpc_send_data 5 21553 _001120_hash NULL ++_001121_hash rxrpc_setsockopt 5 50286 _001121_hash NULL ++_001122_hash savu_sysfs_read 6 49473 _001122_hash NULL ++_001124_hash sco_send_frame 3 41815 _001124_hash NULL ++_001125_hash scsi_dispatch_cmd_entry 3 49848 _001125_hash NULL ++_001126_hash scsi_host_alloc 2 63041 _001126_hash NULL ++_001127_hash scsi_tgt_kspace_exec 8 9522 _001127_hash NULL ++_001128_hash sctp_sendmsg 4 61919 _001128_hash NULL ++_001129_hash sctp_setsockopt 5 44788 _001129_hash NULL ++_001130_hash sctp_setsockopt_connectx 3 6073 _001130_hash NULL ++_001131_hash sctp_setsockopt_connectx_old 3 22631 _001131_hash NULL ++_001132_hash sctp_tsnmap_grow 2 32784 _001132_hash NULL ++_001133_hash sctp_tsnmap_init 2 36446 _001133_hash NULL ++_001134_hash sctp_user_addto_chunk 2-3 62047 _001134_hash NULL ++_001136_hash security_context_to_sid 2 19839 _001136_hash NULL ++_001137_hash security_context_to_sid_default 2 3492 _003841_hash NULL nohasharray ++_001138_hash security_context_to_sid_force 2 20724 _001138_hash NULL ++_001139_hash self_check_write 5 50856 _001139_hash NULL ++_001140_hash selinux_transaction_write 3 59038 _001140_hash NULL ++_001141_hash sel_write_access 3 51704 _001141_hash NULL ++_001142_hash sel_write_create 3 11353 _001142_hash NULL ++_001143_hash sel_write_member 3 28800 _001143_hash NULL ++_001144_hash sel_write_relabel 3 55195 _001144_hash NULL ++_001145_hash sel_write_user 3 45060 _001145_hash NULL ++_001146_hash __seq_open_private 3 40715 _001146_hash NULL ++_001147_hash serverworks_create_gatt_pages 1 46582 _001147_hash NULL ++_001148_hash set_connectable 4 56458 _001148_hash NULL ++_001149_hash set_dev_class 4 39645 _001921_hash NULL nohasharray ++_001150_hash set_discoverable 4 48141 _001150_hash NULL ++_001151_hash set_fd_set 1 35249 _001151_hash NULL ++_001152_hash setkey 3 14987 _001152_hash NULL ++_001153_hash set_le 4 30581 _001153_hash NULL ++_001154_hash set_link_security 4 4502 _001154_hash NULL ++_001155_hash set_local_name 4 55757 _001155_hash NULL ++_001156_hash set_powered 4 12129 _001156_hash NULL ++_001157_hash set_ssp 4 62411 _001157_hash NULL ++_001158_hash sg_build_sgat 3 60179 _001158_hash &_000314_hash ++_001159_hash sg_read_oxfer 3 51724 _001159_hash NULL ++_001160_hash shmem_xattr_set 4 11843 _001160_hash NULL ++_001161_hash simple_alloc_urb 3 60420 _001161_hash NULL ++_001162_hash sisusb_send_bridge_packet 2 11649 _001162_hash NULL ++_001163_hash sisusb_send_packet 2 20891 _001163_hash NULL ++_001164_hash sisusb_write_mem_bulk 4 29678 _001164_hash NULL ++_001165_hash skb_add_data_nocache 4 4682 _001165_hash NULL ++_001166_hash skb_copy_datagram_from_iovec 2-5-4 52014 _001166_hash NULL ++_001169_hash skb_copy_expand 2-3 7685 _001169_hash &_000677_hash ++_001171_hash skb_copy_to_page_nocache 6 58624 _001171_hash NULL ++_001172_hash __skb_cow 2 39254 _001172_hash NULL ++_001173_hash skb_cow_data 2 11565 _001173_hash NULL ++_001174_hash skb_pad 2 17302 _001174_hash NULL ++_001175_hash skb_realloc_headroom 2 19516 _001175_hash NULL ++_001176_hash sk_chk_filter 2 42095 _001176_hash NULL ++_001177_hash skcipher_sendmsg 4 30290 _001177_hash NULL ++_001178_hash sl_change_mtu 2 7396 _001178_hash NULL ++_001179_hash slhc_init 1-2 58135 _001179_hash &_000931_hash ++_001181_hash sm501_create_subdev 3-4 48668 _001245_hash NULL nohasharray ++_001183_hash smk_user_access 3 24440 _001183_hash NULL ++_001184_hash smk_write_cipso2 3 1021 _001184_hash NULL ++_001185_hash smk_write_cipso 3 17989 _001185_hash NULL ++_001186_hash smk_write_load2 3 52155 _001186_hash NULL ++_001187_hash smk_write_load 3 26829 _001187_hash NULL ++_001188_hash smk_write_load_self2 3 591 _001188_hash NULL ++_001189_hash smk_write_load_self 3 7958 _001189_hash NULL ++_001190_hash snapshot_write 3 28351 _001190_hash NULL ++_001191_hash snd_ac97_pcm_assign 2 30218 _001191_hash NULL ++_001192_hash snd_card_create 4 64418 _001529_hash NULL nohasharray ++_001193_hash snd_emux_create_port 3 42533 _001193_hash NULL ++_001194_hash snd_gus_dram_write 4 38784 _001194_hash NULL ++_001195_hash snd_midi_channel_alloc_set 1 28153 _001195_hash NULL ++_001196_hash _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 _001196_hash NULL ++_001197_hash snd_pcm_oss_sync1 2 45298 _001197_hash NULL ++_001198_hash snd_pcm_oss_write 3 38108 _001198_hash NULL ++_001199_hash snd_pcm_plugin_build 5 25505 _001199_hash NULL ++_001200_hash snd_rawmidi_kernel_write 3 25106 _001200_hash NULL ++_001201_hash snd_rawmidi_write 3 28008 _001201_hash NULL ++_001202_hash snd_rme32_playback_copy 5 43732 _001202_hash NULL ++_001203_hash snd_rme96_playback_copy 5 13111 _001203_hash NULL ++_001204_hash snd_seq_device_new 4 31753 _001204_hash NULL ++_001205_hash snd_seq_oss_readq_new 2 14283 _001205_hash NULL ++_001206_hash snd_vx_create 4 40948 _001206_hash NULL ++_001207_hash sock_setsockopt 5 50088 _001207_hash NULL ++_001208_hash sound_write 3 5102 _001208_hash NULL ++_001209_hash _sp2d_alloc 1-3-2 16944 _001209_hash NULL ++_001212_hash spi_alloc_master 2 45223 _001212_hash NULL ++_001213_hash spidev_message 3 5518 _001213_hash NULL ++_001214_hash spi_register_board_info 2 35651 _001214_hash NULL ++_001215_hash squashfs_cache_init 2 41656 _001215_hash NULL ++_001216_hash squashfs_read_data 6 59440 _001216_hash NULL ++_001217_hash squashfs_read_fragment_index_table 4 2506 _001217_hash NULL ++_001218_hash squashfs_read_id_index_table 4 61961 _001218_hash NULL ++_001219_hash squashfs_read_inode_lookup_table 4 64739 _001219_hash NULL ++_001220_hash srp_alloc_iu 2 44227 _001220_hash NULL ++_001221_hash srp_iu_pool_alloc 2 17920 _001221_hash NULL ++_001222_hash srp_ring_alloc 2 26760 _001222_hash NULL ++_001226_hash start_isoc_chain 2 565 _001226_hash NULL ++_001227_hash st_write 3 16874 _001227_hash NULL ++_001228_hash svc_pool_map_alloc_arrays 2 47181 _001228_hash NULL ++_001229_hash symtab_init 2 61050 _001229_hash NULL ++_001230_hash sys_bind 3 10799 _001230_hash NULL ++_001231_hash sys_connect 3 15291 _003816_hash NULL nohasharray ++_001232_hash sys_flistxattr 3 41407 _001232_hash NULL ++_001233_hash sys_fsetxattr 4 49736 _001233_hash NULL ++_001234_hash sysfs_write_file 3 57116 _001234_hash NULL ++_001235_hash sys_ipc 3 4889 _001235_hash NULL ++_001236_hash sys_keyctl 4 33708 _001236_hash &_001011_hash ++_001237_hash sys_listxattr 3 27833 _001237_hash NULL ++_001238_hash sys_llistxattr 3 4532 _001238_hash NULL ++_001239_hash sys_lsetxattr 4 61177 _001239_hash NULL ++_001240_hash sys_mq_timedsend 3 57661 _001240_hash NULL ++_001241_hash sys_sched_setaffinity 2 32046 _001241_hash NULL ++_001242_hash sys_select 1 38827 _001242_hash NULL ++_001243_hash sys_semop 3 39457 _001243_hash NULL ++_001244_hash sys_sendto 6 20809 _001244_hash NULL ++_001245_hash sys_setgroups 1 48668 _001245_hash &_001181_hash ++_001246_hash sys_setgroups16 1 48882 _001246_hash NULL ++_001247_hash sys_setxattr 4 37880 _001247_hash NULL ++_001248_hash t4_alloc_mem 1 32342 _001248_hash NULL ++_001249_hash tcf_hash_create 4 54360 _001249_hash NULL ++_001250_hash tcp_send_rcvq 3 11316 _001250_hash NULL ++_001251_hash __team_options_register 3 63941 _001251_hash NULL ++_001252_hash test_unaligned_bulk 3 52333 _001252_hash NULL ++_001253_hash tifm_alloc_adapter 1 10903 _001253_hash NULL ++_001254_hash timeout_write 3 50991 _001254_hash NULL ++_001255_hash timeradd_entry 3 49850 _001255_hash NULL ++_001256_hash tipc_link_send_sections_fast 4 37920 _001256_hash NULL ++_001257_hash tipc_subseq_alloc 1 5957 _001257_hash NULL ++_001258_hash tnode_alloc 1 49407 _001258_hash NULL ++_001259_hash tomoyo_commit_ok 2 20167 _001259_hash NULL ++_001260_hash tomoyo_scan_bprm 2-4 15642 _003488_hash NULL nohasharray ++_001262_hash tps6586x_writes 3 58689 _001262_hash NULL ++_001263_hash tty_buffer_find 2 2443 _001263_hash NULL ++_001264_hash tty_write 3 5494 _001264_hash NULL ++_001265_hash ubifs_setxattr 4 59650 _001477_hash NULL nohasharray ++_001266_hash ubi_self_check_all_ff 4 41959 _001266_hash NULL ++_001267_hash udf_sb_alloc_partition_maps 2 62313 _001267_hash NULL ++_001268_hash udplite_getfrag 3-4 14479 _001268_hash NULL ++_001270_hash ulong_write_file 3 26485 _001270_hash NULL ++_001271_hash unix_stream_sendmsg 4 61455 _001271_hash NULL ++_001272_hash unlink_queued 3-4 645 _001272_hash NULL ++_001273_hash update_pmkid 4 2481 _001273_hash NULL ++_001274_hash usb_alloc_coherent 2 65444 _001274_hash NULL ++_001275_hash vc_resize 2-3 3585 _001275_hash NULL ++_001277_hash vhci_write 3 2224 _001277_hash NULL ++_001278_hash __vhost_add_used_n 3 26554 _001278_hash NULL ++_001279_hash virtqueue_add_buf 3-4 59470 _001279_hash NULL ++_001281_hash vmalloc 1 15464 _001281_hash NULL ++_001282_hash vol_cdev_write 3 40915 _001282_hash NULL ++_001283_hash vxge_device_register 4 7752 _001283_hash NULL ++_001284_hash __vxge_hw_blockpool_malloc 2 5786 _001284_hash NULL ++_001285_hash __vxge_hw_channel_allocate 3 55462 _001285_hash NULL ++_001286_hash vzalloc 1 47421 _001286_hash NULL ++_001287_hash vzalloc_node 1 23424 _001287_hash NULL ++_001288_hash wa_nep_queue 2 8858 _001288_hash NULL ++_001289_hash __wa_xfer_setup_segs 2 56725 _001289_hash NULL ++_001290_hash wiphy_new 2 2482 _001290_hash NULL ++_001291_hash wm8350_block_write 3 19727 _001291_hash NULL ++_001292_hash wpan_phy_alloc 1 48056 _001292_hash NULL ++_001293_hash write_flush_pipefs 3 2021 _001293_hash NULL ++_001294_hash write_flush_procfs 3 44011 _001294_hash NULL ++_001295_hash wusb_ccm_mac 7 32199 _001295_hash NULL ++_001296_hash x25_sendmsg 4 12487 _001296_hash NULL ++_001297_hash xfrm_hash_alloc 1 10997 _001297_hash NULL ++_001298_hash _xfs_buf_get_pages 2 46811 _001298_hash NULL ++_001299_hash xfs_da_grow_inode_int 3 21785 _001299_hash NULL ++_001300_hash xfs_dir_cilookup_result 3 64288 _003160_hash NULL nohasharray ++_001301_hash xfs_idata_realloc 2 26199 _001301_hash NULL ++_001302_hash xfs_iext_add_indirect_multi 3 32400 _001302_hash NULL ++_001303_hash xfs_iext_inline_to_direct 2 12384 _001303_hash NULL ++_001304_hash xfs_iformat_local 4 49472 _001304_hash NULL ++_001305_hash xfs_iroot_realloc 2 46826 _001305_hash NULL ++_001306_hash xhci_alloc_stream_info 3 63902 _001306_hash NULL ++_001307_hash xlog_recover_add_to_trans 4 62839 _001307_hash NULL ++_001308_hash xprt_alloc 2 1475 _001308_hash NULL ++_001309_hash xt_alloc_table_info 1 57903 _001309_hash NULL ++_001310_hash _zd_iowrite32v_async_locked 3 39034 _001310_hash NULL ++_001311_hash zd_usb_iowrite16v 3 49744 _001311_hash NULL ++_001312_hash a2mp_send 4 41615 _001312_hash NULL ++_001313_hash acpi_ds_build_internal_package_obj 3 58271 _001313_hash NULL ++_001314_hash acpi_system_read_event 3 55362 _001314_hash NULL ++_001315_hash acpi_ut_create_buffer_object 1 42030 _001315_hash NULL ++_001316_hash acpi_ut_create_package_object 1 17594 _001316_hash NULL ++_001317_hash acpi_ut_create_string_object 1 15360 _001317_hash NULL ++_001318_hash ad7879_spi_multi_read 3 8218 _001318_hash NULL ++_001319_hash add_child 4 45201 _001319_hash NULL ++_001320_hash add_port 2 54941 _001320_hash NULL ++_001321_hash adu_read 3 24177 _001321_hash NULL ++_001322_hash afs_cell_create 2 27346 _001322_hash NULL ++_001323_hash agp_allocate_memory 2 58761 _001323_hash NULL ++_001324_hash agp_generic_alloc_user 1 9470 _001324_hash NULL ++_001325_hash alc_auto_create_extra_outs 2 18975 _001325_hash NULL ++_001326_hash alloc_agpphysmem_i8xx 1 39427 _001326_hash NULL ++_001327_hash allocate_cnodes 1 5329 _001327_hash NULL ++_001328_hash ___alloc_bootmem 1 11410 _001328_hash NULL ++_001329_hash __alloc_bootmem_low_node 2 25726 _001662_hash NULL nohasharray ++_001330_hash __alloc_bootmem_node 2 1992 _001330_hash NULL ++_001331_hash __alloc_bootmem_node_nopanic 2 6432 _001331_hash NULL ++_001332_hash __alloc_bootmem_nopanic 1 65397 _001332_hash NULL ++_001333_hash alloc_candev 1-2 7776 _001333_hash NULL ++_001335_hash _alloc_cdb_cont 2 23609 _001335_hash NULL ++_001336_hash alloc_dummy_extent_buffer 2 56374 _001336_hash NULL ++_001337_hash ____alloc_ei_netdev 1 51475 _001337_hash NULL ++_001338_hash alloc_etherdev_mqs 1 36450 _001338_hash NULL ++_001339_hash alloc_extent_buffer 3 52824 _001339_hash NULL ++_001340_hash alloc_fcdev 1 18780 _001340_hash NULL ++_001341_hash alloc_fddidev 1 15382 _001341_hash NULL ++_001342_hash _alloc_get_attr_desc 2 470 _001342_hash NULL ++_001343_hash alloc_hippi_dev 1 51320 _001343_hash NULL ++_001344_hash alloc_irdadev 1 19140 _001344_hash NULL ++_001345_hash alloc_ldt 2 21972 _001345_hash NULL ++_001346_hash alloc_ltalkdev 1 38071 _001346_hash NULL ++_001347_hash alloc_one_pg_vec_page 1 10747 _001347_hash NULL ++_001348_hash alloc_orinocodev 1 21371 _001348_hash NULL ++_001349_hash alloc_ring 2-4 18278 _001349_hash NULL ++_001351_hash _alloc_set_attr_list 4 48991 _001351_hash NULL ++_001353_hash alloc_tx 2 32143 _001353_hash NULL ++_001354_hash alloc_wr 1-2 24635 _001354_hash NULL ++_001356_hash async_setkey 3 35521 _001356_hash NULL ++_001357_hash ata_host_alloc_pinfo 3 17325 _001357_hash NULL ++_001360_hash ath6kl_connect_event 7-9-8 14267 _001360_hash NULL ++_001361_hash ath6kl_fwlog_block_read 3 49836 _001361_hash NULL ++_001362_hash ath6kl_fwlog_read 3 32101 _001362_hash NULL ++_001363_hash ath9k_wmi_cmd 4 327 _001363_hash NULL ++_001364_hash ath_rx_init 2 43564 _001364_hash NULL ++_001365_hash ath_tx_init 2 60515 _001365_hash NULL ++_001366_hash atm_alloc_charge 2 19517 _001914_hash NULL nohasharray ++_001367_hash atm_get_addr 3 31221 _001367_hash NULL ++_001368_hash audit_log_n_hex 3 45617 _001368_hash NULL ++_001369_hash audit_log_n_string 3 31705 _001369_hash NULL ++_001370_hash ax25_output 2 22736 _001370_hash NULL ++_001371_hash bcsp_prepare_pkt 3 12961 _001371_hash NULL ++_001372_hash bdx_rxdb_create 1 46525 _001372_hash NULL ++_001373_hash bdx_tx_db_init 2 41719 _001373_hash NULL ++_001374_hash bio_map_kern 3 64751 _001374_hash NULL ++_001375_hash bits_to_user 2-3 47733 _001375_hash NULL ++_001377_hash __blk_queue_init_tags 2 9778 _001377_hash NULL ++_001378_hash blk_queue_resize_tags 2 28670 _001378_hash NULL ++_001379_hash blk_rq_map_user_iov 5 16772 _001379_hash NULL ++_001380_hash bm_init 2 13529 _001380_hash NULL ++_001381_hash brcmf_alloc_wdev 1 60347 _001381_hash NULL ++_001382_hash __btrfs_buffered_write 3 35311 _002735_hash NULL nohasharray ++_001383_hash btrfs_insert_dir_item 4 59304 _001383_hash NULL ++_001384_hash btrfs_map_block 3 64379 _001384_hash NULL ++_001385_hash bt_skb_alloc 1 6404 _001385_hash NULL ++_001386_hash c4_add_card 3 54968 _001386_hash NULL ++_001387_hash cache_read 3 24790 _001387_hash NULL ++_001388_hash cache_write 3 13589 _001388_hash NULL ++_001389_hash calc_hmac 3 32010 _001389_hash NULL ++_001390_hash capinc_tty_write 3 28539 _001390_hash NULL ++_001391_hash ccid_getsockopt_builtin_ccids 2 53634 _001391_hash NULL ++_001392_hash ceph_copy_page_vector_to_user 3-4 31270 _001392_hash NULL ++_001394_hash ceph_parse_server_name 2 60318 _001394_hash NULL ++_001395_hash ceph_read_dir 3 17005 _001395_hash NULL ++_001396_hash cfg80211_roamed 5-7 32632 _001396_hash NULL ++_001398_hash cfpkt_add_body 3 44630 _001398_hash NULL ++_001399_hash cfpkt_create_pfx 1-2 23594 _001399_hash NULL ++_001401_hash cmd_complete 6 51629 _001401_hash NULL ++_001402_hash cmtp_add_msgpart 4 9252 _001402_hash NULL ++_001403_hash cmtp_send_interopmsg 7 376 _001403_hash NULL ++_001404_hash coda_psdev_read 3 35029 _001404_hash NULL ++_001405_hash construct_key_and_link 4 8321 _001405_hash NULL ++_001406_hash copy_counters_to_user 5 17027 _001406_hash NULL ++_001407_hash copy_entries_to_user 1 52367 _001407_hash NULL ++_001408_hash copy_from_buf 2-4 27308 _001408_hash NULL ++_001410_hash copy_oldmem_page 3-1 26164 _001410_hash NULL ++_001411_hash copy_to_user_fromio 3 57432 _001411_hash NULL ++_001412_hash cryptd_hash_setkey 3 42781 _001412_hash NULL ++_001413_hash crypto_authenc_esn_setkey 3 6985 _001413_hash NULL ++_001414_hash crypto_authenc_setkey 3 80 _001414_hash NULL ++_001415_hash cxgb3_get_cpl_reply_skb 2 10620 _001415_hash NULL ++_001416_hash cxgbi_ddp_reserve 4 30091 _001416_hash NULL ++_001417_hash cxio_init_resource_fifo 3 28764 _001417_hash NULL ++_001418_hash cxio_init_resource_fifo_random 3 47151 _001418_hash NULL ++_001419_hash datablob_hmac_append 3 40038 _001419_hash NULL ++_001420_hash datablob_hmac_verify 4 24786 _001420_hash NULL ++_001421_hash dataflash_read_fact_otp 3-2 33204 _001421_hash NULL ++_001422_hash dataflash_read_user_otp 3-2 14536 _001422_hash &_000207_hash ++_001423_hash dccp_feat_register_sp 5 17914 _001423_hash NULL ++_001424_hash dccp_setsockopt 5 60367 _001424_hash NULL ++_001425_hash __dev_alloc_skb 1 28681 _001425_hash NULL ++_001426_hash disk_expand_part_tbl 2 30561 _001426_hash NULL ++_001427_hash diva_os_alloc_message_buffer 1 64568 _001427_hash NULL ++_001428_hash diva_os_copy_to_user 4 48508 _001428_hash NULL ++_001429_hash diva_os_malloc 2 16406 _001429_hash NULL ++_001430_hash dmam_declare_coherent_memory 4-2 43679 _001430_hash NULL ++_001431_hash dm_vcalloc 1-2 16814 _001431_hash NULL ++_001433_hash dn_alloc_skb 2 6631 _001433_hash NULL ++_001434_hash do_proc_readlink 3 14096 _001434_hash NULL ++_001435_hash do_readlink 2 43518 _001435_hash NULL ++_001436_hash __do_replace 5 37227 _001436_hash NULL ++_001437_hash do_sigpending 2 9766 _001437_hash NULL ++_001438_hash drbd_bm_resize 2 20522 _001438_hash NULL ++_001439_hash drbd_setsockopt 5 16280 _001439_hash &_000383_hash ++_001440_hash dump_midi 3 51040 _001440_hash NULL ++_001441_hash ecryptfs_filldir 3 6622 _001441_hash NULL ++_001442_hash ecryptfs_send_message 2 18322 _001442_hash NULL ++_001443_hash ep0_read 3 38095 _001443_hash NULL ++_001444_hash evdev_ioctl 2 22371 _001444_hash NULL ++_001445_hash ext4_add_new_descs 3 19509 _001445_hash NULL ++_001446_hash fat_ioctl_filldir 3 36621 _001446_hash NULL ++_001447_hash _fc_frame_alloc 1 43568 _001447_hash NULL ++_001448_hash fc_host_post_vendor_event 3 30903 _001448_hash NULL ++_001449_hash fd_copyout 3 59323 _001449_hash NULL ++_001450_hash f_hidg_read 3 6238 _001450_hash NULL ++_001451_hash filldir 3 55137 _001451_hash NULL ++_001452_hash filldir64 3 46469 _001452_hash NULL ++_001453_hash find_skb 2 20431 _001453_hash NULL ++_001454_hash from_buffer 3 18625 _001454_hash NULL ++_001455_hash fsm_init 2 16134 _001455_hash NULL ++_001456_hash fs_path_add 3 15648 _001456_hash NULL ++_001457_hash fs_path_add_from_extent_buffer 4 27702 _001457_hash NULL ++_001458_hash fuse_perform_write 4 18457 _001458_hash NULL ++_001459_hash gem_alloc_skb 2 51715 _001459_hash NULL ++_001460_hash generic_file_buffered_write 4 25464 _001460_hash NULL ++_001461_hash gen_pool_add 3 21776 _001461_hash NULL ++_001462_hash get_packet 3 41914 _001462_hash NULL ++_001463_hash get_packet 3 5747 _001463_hash NULL ++_001464_hash get_packet_pg 4 28023 _001464_hash NULL ++_001465_hash get_skb 2 63008 _001465_hash NULL ++_001466_hash get_subdir 3 62581 _001466_hash NULL ++_001467_hash gsm_control_message 4 18209 _001467_hash NULL ++_001468_hash gsm_control_modem 3 55303 _001468_hash NULL ++_001469_hash gsm_control_rls 3 3353 _001469_hash NULL ++_001470_hash handle_received_packet 3 22457 _001470_hash NULL ++_001471_hash hash_setkey 3 48310 _001471_hash NULL ++_001472_hash hdlcdrv_register 2 6792 _001472_hash NULL ++_001473_hash hiddev_ioctl 2 36816 _001473_hash NULL ++_001474_hash hid_input_report 4 32458 _001474_hash NULL ++_001475_hash hidp_queue_report 3 1881 _001475_hash NULL ++_001476_hash __hidp_send_ctrl_message 4 28303 _001476_hash NULL ++_001477_hash hidraw_read 3 59650 _001477_hash &_001265_hash ++_001478_hash HiSax_readstatus 2 15752 _001478_hash NULL ++_001480_hash __hwahc_op_set_gtk 4 42038 _001480_hash NULL ++_001481_hash __hwahc_op_set_ptk 5 36510 _001481_hash NULL ++_001482_hash hycapi_rx_capipkt 3 11602 _001482_hash NULL ++_001483_hash i2400m_net_rx 5 27170 _001483_hash NULL ++_001484_hash ib_copy_to_udata 3 27525 _001484_hash NULL ++_001485_hash idetape_chrdev_read 3 2097 _001485_hash NULL ++_001486_hash ieee80211_alloc_hw 1 43829 _001486_hash NULL ++_001487_hash ieee80211_bss_info_update 4 13991 _001487_hash NULL ++_001488_hash igmpv3_newpack 2 35912 _001488_hash NULL ++_001489_hash ilo_read 3 32531 _001489_hash NULL ++_001490_hash init_map_ipmac 3-4 63896 _001490_hash NULL ++_001492_hash init_tid_tabs 2-4-3 13252 _001492_hash NULL ++_001495_hash iowarrior_read 3 53483 _001495_hash NULL ++_001496_hash ip_options_get 4 56538 _001496_hash NULL ++_001497_hash ipv6_getsockopt_sticky 5 56711 _001497_hash NULL ++_001498_hash ipwireless_send_packet 4 8328 _001498_hash NULL ++_001499_hash ipx_sendmsg 4 1362 _001499_hash NULL ++_001500_hash irq_domain_add_linear 2 29236 _001500_hash NULL ++_001501_hash iscsi_conn_setup 2 35159 _001501_hash NULL ++_001502_hash iscsi_create_session 3 51647 _001502_hash NULL ++_001503_hash iscsi_host_alloc 2 36671 _001503_hash NULL ++_001504_hash iscsi_if_send_reply 7 52219 _001504_hash NULL ++_001505_hash iscsi_offload_mesg 5 58425 _001505_hash NULL ++_001506_hash iscsi_ping_comp_event 5 38263 _001506_hash NULL ++_001507_hash iscsi_post_host_event 4 13473 _001507_hash NULL ++_001508_hash iscsi_recv_pdu 4 16755 _001508_hash NULL ++_001509_hash iscsi_session_setup 4-5 196 _001509_hash NULL ++_001511_hash iscsit_find_cmd_from_itt_or_dump 3 17194 _003122_hash NULL nohasharray ++_001512_hash isdn_net_ciscohdlck_alloc_skb 2 55209 _001951_hash NULL nohasharray ++_001513_hash isdn_ppp_ccp_xmit_reset 6 63297 _001513_hash NULL ++_001514_hash isdn_ppp_read 4 50356 _001514_hash NULL ++_001515_hash isdn_ppp_skb_push 2 5236 _001515_hash NULL ++_001516_hash isku_sysfs_read 6 58806 _001516_hash NULL ++_001517_hash isku_sysfs_write 6 49767 _001517_hash NULL ++_001520_hash jbd2_alloc 1 41359 _001520_hash NULL ++_001521_hash jffs2_do_link 6 42048 _001521_hash NULL ++_001522_hash jffs2_do_unlink 4 62020 _001522_hash NULL ++_001523_hash jffs2_security_setxattr 4 62107 _001523_hash NULL ++_001524_hash jffs2_trusted_setxattr 4 17048 _001524_hash NULL ++_001525_hash jffs2_user_setxattr 4 10182 _001525_hash NULL ++_001526_hash joydev_ioctl_common 2 49359 _001526_hash NULL ++_001527_hash kernel_setsockopt 5 35913 _001527_hash NULL ++_001528_hash keyctl_describe_key 3 36853 _001528_hash NULL ++_001529_hash keyctl_get_security 3 64418 _001529_hash &_001192_hash ++_001530_hash keyring_read 3 13438 _001530_hash NULL ++_001531_hash kfifo_copy_to_user 3 20646 _001531_hash NULL ++_001532_hash kmem_zalloc_large 1 56128 _001532_hash NULL ++_001533_hash kmp_init 2 41373 _001533_hash NULL ++_001534_hash koneplus_sysfs_write 6 35993 _001534_hash NULL ++_001535_hash kvm_clear_guest_page 4 2308 _001535_hash NULL ++_001536_hash kvm_read_nested_guest_page 5 13337 _001536_hash NULL ++_001537_hash _l2_alloc_skb 1 11883 _001537_hash NULL ++_001538_hash l2cap_create_basic_pdu 3 24869 _001538_hash &_001074_hash ++_001539_hash l2cap_create_connless_pdu 3 37327 _001539_hash NULL ++_001540_hash l2cap_create_iframe_pdu 3 40055 _001540_hash NULL ++_001541_hash l3_alloc_skb 1 32289 _001541_hash NULL ++_001542_hash __lgwrite 4 57669 _001542_hash NULL ++_001543_hash libfc_host_alloc 2 7917 _001543_hash NULL ++_001544_hash llc_alloc_frame 4 64366 _001544_hash NULL ++_001545_hash llcp_sock_sendmsg 4 1092 _001545_hash NULL ++_001546_hash mac_drv_rx_init 2 48898 _001546_hash NULL ++_001547_hash macvtap_get_user 4 28185 _001547_hash NULL ++_001548_hash mdc800_device_read 3 22896 _001548_hash NULL ++_001549_hash memcpy_toiovec 3 54166 _001549_hash &_000892_hash ++_001550_hash memcpy_toiovecend 3-4 19736 _001550_hash NULL ++_001552_hash mempool_create 1 29437 _001552_hash NULL ++_001553_hash mgmt_event 4 12810 _001553_hash NULL ++_001554_hash mgt_set_varlen 4 60916 _001554_hash NULL ++_001555_hash mI_alloc_skb 1 24770 _001555_hash NULL ++_001556_hash mlx4_en_create_rx_ring 3 62498 _001556_hash NULL ++_001557_hash mlx4_en_create_tx_ring 4 48501 _001557_hash NULL ++_001558_hash mlx4_init_cmpt_table 3 11569 _001558_hash NULL ++_001559_hash mon_bin_get_event 4 52863 _001559_hash NULL ++_001560_hash mousedev_read 3 47123 _001560_hash NULL ++_001561_hash move_addr_to_user 2 2868 _001561_hash NULL ++_001562_hash mpihelp_mul 5-3 27805 _001562_hash NULL ++_001564_hash mpi_set_buffer 3 65294 _001564_hash NULL ++_001565_hash mptctl_ioctl 2 12355 _001565_hash NULL ++_001566_hash msnd_fifo_alloc 2 23179 _001566_hash NULL ++_001567_hash mtdswap_init 2 55719 _001567_hash NULL ++_001568_hash mthca_alloc_resize_buf 3 60394 _001568_hash NULL ++_001569_hash mthca_init_cq 2 60011 _001569_hash NULL ++_001570_hash nci_skb_alloc 2 49757 _001570_hash NULL ++_001571_hash neigh_hash_grow 2 17283 _001571_hash NULL ++_001572_hash netdev_alloc_skb 2 62437 _001572_hash NULL ++_001573_hash __netdev_alloc_skb_ip_align 2 55067 _001573_hash NULL ++_001574_hash netlink_change_ngroups 2 16457 _001574_hash NULL ++_001575_hash new_skb 1 21148 _001575_hash NULL ++_001576_hash nfc_alloc_recv_skb 1 10244 _001576_hash NULL ++_001577_hash nfcwilink_skb_alloc 1 16167 _001577_hash NULL ++_001578_hash __nf_nat_mangle_tcp_packet 5-7 8190 _001578_hash NULL ++_001580_hash nf_nat_mangle_udp_packet 5-7 13321 _001580_hash NULL ++_001582_hash nfqnl_mangle 4-2 36226 _001582_hash NULL ++_001583_hash nfs4_realloc_slot_table 2 22859 _001583_hash NULL ++_001584_hash nfs_idmap_get_key 2 39616 _001584_hash NULL ++_001585_hash nfs_readdata_alloc 2 65015 _001585_hash NULL ++_001586_hash nfs_writedata_alloc 2 12133 _001586_hash NULL ++_001587_hash nfulnl_alloc_skb 2 65207 _001587_hash NULL ++_001588_hash ni65_alloc_mem 3 10664 _001588_hash NULL ++_001589_hash nsm_get_handle 4 52089 _001589_hash NULL ++_001590_hash ntfs_malloc_nofs 1 49572 _001590_hash NULL ++_001591_hash ntfs_malloc_nofs_nofail 1 63631 _001591_hash NULL ++_001592_hash nvme_create_queue 3 170 _001592_hash NULL ++_001593_hash ocfs2_control_write 3 54737 _001593_hash NULL ++_001595_hash orinoco_add_extscan_result 3 18207 _001595_hash NULL ++_001596_hash osd_req_read_sg_kern 5 6378 _001596_hash NULL ++_001597_hash osd_req_write_sg_kern 5 10514 _001597_hash NULL ++_001599_hash override_release 2 52032 _001599_hash NULL ++_001600_hash p9_client_read 5 19750 _001600_hash NULL ++_001601_hash packet_snd 3 13634 _001601_hash NULL ++_001602_hash pcbit_stat 2 27364 _001602_hash NULL ++_001603_hash pcpu_extend_area_map 2 12589 _001603_hash NULL ++_001604_hash pep_alloc_skb 3 46303 _001604_hash NULL ++_001605_hash pg_read 3 17276 _001605_hash NULL ++_001606_hash picolcd_debug_eeprom_read 3 14549 _001606_hash NULL ++_001607_hash pkt_alloc_packet_data 1 37928 _001607_hash NULL ++_001608_hash pmcraid_build_passthrough_ioadls 2 62034 _001608_hash NULL ++_001609_hash pn_raw_send 2 54330 _001609_hash NULL ++_001610_hash posix_clock_register 2 5662 _001610_hash NULL ++_001611_hash printer_read 3 54851 _001611_hash NULL ++_001612_hash __proc_file_read 3 54978 _001612_hash NULL ++_001613_hash pskb_may_pull 2 22546 _001613_hash NULL ++_001614_hash __pskb_pull 2 42602 _001614_hash NULL ++_001615_hash ptp_read 4 63251 _001615_hash NULL ++_001616_hash pt_read 3 49136 _001616_hash NULL ++_001617_hash put_cmsg 4 36589 _001617_hash NULL ++_001618_hash px_raw_event 4 49371 _001618_hash NULL ++_001619_hash qla4xxx_post_aen_work 3 46953 _001619_hash NULL ++_001620_hash qla4xxx_post_ping_evt_work 4 8074 _001819_hash NULL nohasharray ++_001621_hash raid5_resize 2 63306 _001621_hash NULL ++_001622_hash rawv6_sendmsg 4 20080 _001622_hash NULL ++_001623_hash rds_message_map_pages 2 31487 _001623_hash NULL ++_001624_hash rds_sendmsg 4 40976 _001624_hash NULL ++_001625_hash read_flush 3 43851 _001625_hash NULL ++_001626_hash read_profile 3 27859 _001626_hash NULL ++_001627_hash read_vmcore 3 26501 _001627_hash NULL ++_001628_hash redirected_tty_write 3 65297 _001628_hash NULL ++_001629_hash refill_pool 2 19477 _001629_hash NULL ++_001630_hash __register_chrdev 2-3 54223 _001630_hash NULL ++_001632_hash regmap_raw_write 4 53803 _001632_hash NULL ++_001633_hash reiserfs_allocate_list_bitmaps 3 21732 _001633_hash NULL ++_001634_hash reiserfs_resize 2 34377 _001634_hash NULL ++_001635_hash request_key_auth_read 3 24109 _001635_hash NULL ++_001636_hash rfcomm_wmalloc 2 58090 _001636_hash NULL ++_001637_hash rfkill_fop_read 3 54711 _001637_hash NULL ++_001638_hash rng_dev_read 3 41581 _001638_hash NULL ++_001639_hash roccat_read 3 41093 _001639_hash &_001034_hash ++_001640_hash rx 4 57944 _001640_hash NULL ++_001641_hash rxrpc_client_sendmsg 5 23236 _001641_hash NULL ++_001642_hash rxrpc_kernel_send_data 3 60083 _001642_hash NULL ++_001643_hash rxrpc_server_sendmsg 4 37331 _001643_hash NULL ++_001644_hash savu_sysfs_write 6 42273 _001644_hash NULL ++_001645_hash sco_sock_sendmsg 4 62542 _001645_hash NULL ++_001646_hash scsi_nl_send_vendor_msg 5 16394 _001646_hash NULL ++_001647_hash scsi_register 2 49094 _001647_hash NULL ++_001648_hash sctp_datamsg_from_user 4 55342 _001648_hash NULL ++_001649_hash sctp_getsockopt_events 2 3607 _001649_hash NULL ++_001650_hash sctp_getsockopt_maxburst 2 42941 _001650_hash NULL ++_001651_hash sctp_getsockopt_maxseg 2 10737 _001651_hash NULL ++_001652_hash sctp_make_chunk 4 12986 _001652_hash NULL ++_001653_hash sctpprobe_read 3 17741 _001653_hash NULL ++_001654_hash sctp_tsnmap_mark 2 35929 _001654_hash NULL ++_001655_hash sctp_ulpevent_new 1 33377 _001655_hash NULL ++_001656_hash sdhci_alloc_host 2 7509 _001656_hash NULL ++_001657_hash selinux_inode_post_setxattr 4 26037 _001657_hash NULL ++_001658_hash selinux_inode_setsecurity 4 18148 _001658_hash NULL ++_001659_hash selinux_inode_setxattr 4 10708 _001659_hash NULL ++_001660_hash selinux_secctx_to_secid 2 63744 _001660_hash NULL ++_001661_hash selinux_setprocattr 4 55611 _001661_hash NULL ++_001662_hash sel_write_context 3 25726 _001662_hash &_001329_hash ++_001663_hash send_command 4 10832 _001663_hash NULL ++_001664_hash seq_copy_in_user 3 18543 _001664_hash NULL ++_001665_hash seq_open_net 4 8968 _001779_hash NULL nohasharray ++_001666_hash seq_open_private 3 61589 _001666_hash NULL ++_001667_hash set_arg 3 42824 _001667_hash NULL ++_001668_hash sg_read 3 25799 _001668_hash NULL ++_001669_hash shash_async_setkey 3 10720 _003506_hash NULL nohasharray ++_001670_hash shash_compat_setkey 3 12267 _001670_hash NULL ++_001671_hash shmem_setxattr 4 55867 _001671_hash NULL ++_001672_hash simple_read_from_buffer 2-5 55957 _001672_hash NULL ++_001674_hash sisusb_clear_vram 2-3 57466 _001674_hash NULL ++_001676_hash sisusbcon_do_font_op 9 52271 _001676_hash NULL ++_001677_hash sisusb_copy_memory 4 35016 _001677_hash NULL ++_001678_hash sisusb_write 3 44834 _001678_hash NULL ++_001680_hash skb_cow 2 26138 _001680_hash NULL ++_001681_hash skb_cow_head 2 52495 _001681_hash NULL ++_001682_hash skb_make_writable 2 24783 _001682_hash NULL ++_001683_hash skb_padto 2 50759 _001683_hash NULL ++_001684_hash sk_stream_alloc_skb 2 57622 _001684_hash NULL ++_001685_hash smk_write_access2 3 19170 _001685_hash NULL ++_001686_hash smk_write_access 3 49561 _001686_hash NULL ++_001687_hash snd_es1938_capture_copy 5 25930 _001687_hash NULL ++_001688_hash snd_gus_dram_peek 4 9062 _001688_hash NULL ++_001689_hash snd_hdsp_capture_copy 5 4011 _001689_hash NULL ++_001690_hash snd_korg1212_copy_to 6 92 _001690_hash NULL ++_001691_hash snd_opl4_mem_proc_read 5 63774 _001691_hash NULL ++_001692_hash snd_pcm_oss_read1 3 63771 _001692_hash NULL ++_001693_hash snd_pcm_plugin_alloc 2 12580 _001693_hash NULL ++_001694_hash snd_rawmidi_kernel_read1 4 36740 _001694_hash NULL ++_001695_hash snd_rme9652_capture_copy 5 10287 _001695_hash NULL ++_001696_hash sock_alloc_send_pskb 2 21246 _001696_hash NULL ++_001697_hash sock_rmalloc 2 59740 _002491_hash NULL nohasharray ++_001698_hash sock_wmalloc 2 16472 _001698_hash NULL ++_001699_hash solos_param_store 4 34755 _001699_hash NULL ++_001702_hash srp_target_alloc 3 37288 _001702_hash NULL ++_001703_hash store_ifalias 4 35088 _001703_hash NULL ++_001704_hash store_msg 3 56417 _001704_hash NULL ++_001705_hash str_to_user 2 11411 _001705_hash NULL ++_001706_hash subbuf_read_actor 3 2071 _001706_hash NULL ++_001707_hash sys_fgetxattr 4 25166 _001707_hash NULL ++_001708_hash sys_gethostname 2 49698 _001708_hash NULL ++_001709_hash sys_getxattr 4 37418 _001709_hash NULL ++_001710_hash sys_init_module 2 36047 _001710_hash NULL ++_001711_hash sys_kexec_load 2 14222 _001711_hash NULL ++_001712_hash sys_lgetxattr 4 45531 _001712_hash NULL ++_001713_hash syslog_print 2 307 _001713_hash NULL ++_001714_hash sys_msgsnd 3 44537 _001714_hash &_000139_hash ++_001715_hash sys_process_vm_readv 3-5 19090 _003104_hash NULL nohasharray ++_001717_hash sys_process_vm_writev 3-5 4928 _001717_hash NULL ++_001719_hash sys_pselect6 1 57449 _001719_hash NULL ++_001720_hash sys_sched_getaffinity 2 60033 _001720_hash NULL ++_001721_hash sys_setsockopt 5 35320 _001721_hash NULL ++_001722_hash t3_init_l2t 1 8261 _001722_hash NULL ++_001723_hash t4vf_pktgl_to_skb 2 39005 _001723_hash NULL ++_001724_hash tcp_collapse 5-6 63294 _001724_hash NULL ++_001726_hash tcp_sendmsg 4 30296 _001726_hash NULL ++_001727_hash team_options_register 3 20091 _001727_hash NULL ++_001728_hash tipc_buf_acquire 1 60437 _001728_hash NULL ++_001729_hash tipc_cfg_reply_alloc 1 27606 _001729_hash NULL ++_001730_hash tipc_send2name 6 16809 _001730_hash NULL ++_001731_hash tipc_send2port 5 63935 _001731_hash NULL ++_001732_hash tipc_send 4 51238 _001732_hash NULL ++_001733_hash tnode_new 3 44757 _002769_hash NULL nohasharray ++_001734_hash tomoyo_read_self 3 33539 _001734_hash NULL ++_001735_hash tomoyo_update_domain 2 5498 _001735_hash NULL ++_001736_hash tomoyo_update_policy 2 40458 _001736_hash NULL ++_001737_hash tpm_read 3 50344 _001737_hash NULL ++_001738_hash TSS_rawhmac 3 17486 _001738_hash NULL ++_001739_hash __tty_buffer_request_room 2 27700 _001739_hash NULL ++_001740_hash tun_get_user 4 39099 _001740_hash NULL ++_001741_hash ubi_dump_flash 4 46381 _001741_hash NULL ++_001742_hash ubi_io_write 4-5 15870 _003453_hash NULL nohasharray ++_001744_hash udp_setsockopt 5 25985 _001744_hash NULL ++_001745_hash udpv6_setsockopt 5 18487 _001745_hash NULL ++_001746_hash uio_read 3 49300 _001746_hash NULL ++_001747_hash ulog_alloc_skb 1 23427 _001747_hash NULL ++_001748_hash unix_dgram_sendmsg 4 45699 _001748_hash NULL ++_001749_hash unlink1 3 63059 _001749_hash NULL ++_001751_hash usbdev_read 3 45114 _001751_hash NULL ++_001752_hash usblp_ioctl 2 30203 _001752_hash NULL ++_001753_hash usblp_read 3 57342 _003832_hash NULL nohasharray ++_001754_hash usbtmc_read 3 32377 _001754_hash NULL ++_001755_hash _usb_writeN_sync 4 31682 _001755_hash NULL ++_001756_hash user_read 3 51881 _001756_hash NULL ++_001757_hash vcs_read 3 8017 _001757_hash NULL ++_001758_hash vdma_mem_alloc 1 6171 _001758_hash NULL ++_001759_hash venus_create 4 20555 _001759_hash NULL ++_001760_hash venus_link 5 32165 _001760_hash NULL ++_001761_hash venus_lookup 4 8121 _001761_hash NULL ++_001762_hash venus_mkdir 4 8967 _001762_hash NULL ++_001763_hash venus_remove 4 59781 _001763_hash NULL ++_001764_hash venus_rename 4-5 17707 _003784_hash NULL nohasharray ++_001766_hash venus_rmdir 4 45564 _001766_hash NULL ++_001767_hash venus_symlink 4-6 23570 _001767_hash NULL ++_001769_hash vfs_readlink 3 54368 _001769_hash NULL ++_001770_hash vfs_readv 3 38011 _001770_hash NULL ++_001771_hash vfs_writev 3 25278 _001771_hash NULL ++_001772_hash vga_arb_read 3 4886 _001772_hash NULL ++_001773_hash vgacon_adjust_height 2 28124 _001773_hash NULL ++_001774_hash vhci_put_user 4 12604 _001774_hash NULL ++_001775_hash vhost_add_used_n 3 10760 _001775_hash NULL ++_001776_hash virtnet_send_command 5-6 61993 _001776_hash NULL ++_001778_hash vmbus_establish_gpadl 3 4495 _001778_hash NULL ++_001779_hash vol_cdev_read 3 8968 _001779_hash &_001665_hash ++_001780_hash wdm_read 3 6549 _001780_hash NULL ++_001781_hash write_adapter_mem 3 3234 _001781_hash NULL ++_001782_hash wusb_prf 7 54261 _001782_hash &_000065_hash ++_001783_hash xdi_copy_to_user 4 48900 _001783_hash NULL ++_001784_hash xfs_buf_associate_memory 3 17915 _001784_hash NULL ++_001785_hash xfs_buf_get_maps 2 4581 _001785_hash NULL ++_001786_hash xfs_buf_get_uncached 2 51477 _001786_hash NULL ++_001787_hash xfs_buf_item_get_format 2 189 _001787_hash NULL ++_001788_hash xfs_buf_map_from_irec 5 2368 _002641_hash NULL nohasharray ++_001789_hash xfs_dir2_block_to_sf 3 37868 _001789_hash NULL ++_001790_hash xfs_dir2_leaf_getdents 3 23841 _001790_hash NULL ++_001791_hash xfs_dir2_sf_addname_hard 3 54254 _001791_hash NULL ++_001792_hash xfs_efd_init 3 5463 _001792_hash NULL ++_001793_hash xfs_efi_init 2 5476 _001793_hash NULL ++_001794_hash xfs_iext_realloc_direct 2 20521 _001794_hash NULL ++_001795_hash xfs_iext_realloc_indirect 2 59211 _001795_hash NULL ++_001796_hash xfs_inumbers_fmt 3 12817 _001796_hash NULL ++_001797_hash xhci_alloc_streams 5 37586 _001797_hash NULL ++_001798_hash xlog_recover_add_to_cont_trans 4 44102 _001798_hash NULL ++_001799_hash xz_dec_lzma2_create 2 36353 _002713_hash NULL nohasharray ++_001800_hash _zd_iowrite32v_locked 3 44725 _001800_hash NULL ++_001801_hash a2mp_chan_alloc_skb_cb 2 27159 _001801_hash NULL ++_001802_hash aat2870_reg_read_file 3 12221 _001802_hash NULL ++_001803_hash add_partition 2 55588 _001803_hash NULL ++_001804_hash add_sctp_bind_addr 3 12269 _001804_hash NULL ++_001805_hash _add_sg_continuation_descriptor 3 54721 _001805_hash NULL ++_001806_hash afs_cell_lookup 2 8482 _001806_hash NULL ++_001807_hash afs_send_simple_reply 3 63940 _001807_hash NULL ++_001808_hash agp_allocate_memory_wrap 1 16576 _001808_hash NULL ++_001809_hash __alloc_bootmem 1 31498 _001809_hash NULL ++_001810_hash __alloc_bootmem_low 1 43423 _003425_hash NULL nohasharray ++_001811_hash __alloc_bootmem_node_high 2 65076 _001811_hash NULL ++_001812_hash alloc_cc770dev 1 48186 _001812_hash NULL ++_001813_hash __alloc_ei_netdev 1 29338 _001813_hash NULL ++_001814_hash __alloc_eip_netdev 1 51549 _001814_hash NULL ++_001815_hash alloc_libipw 1 22708 _001815_hash NULL ++_001816_hash _alloc_mISDN_skb 3 52232 _001816_hash NULL ++_001817_hash alloc_pg_vec 2 8533 _001817_hash NULL ++_001818_hash alloc_sja1000dev 1 17868 _001818_hash NULL ++_001819_hash alloc_targets 2 8074 _001819_hash &_001620_hash ++_001822_hash ath6kl_disconnect_timeout_read 3 3650 _001822_hash NULL ++_001823_hash ath6kl_endpoint_stats_read 3 41554 _001823_hash NULL ++_001824_hash ath6kl_fwlog_mask_read 3 2050 _001824_hash NULL ++_001825_hash ath6kl_keepalive_read 3 44303 _001825_hash NULL ++_001826_hash ath6kl_listen_int_read 3 10355 _001826_hash NULL ++_001827_hash ath6kl_lrssi_roam_read 3 61022 _001827_hash NULL ++_001828_hash ath6kl_regdump_read 3 14393 _001828_hash NULL ++_001829_hash ath6kl_regread_read 3 25884 _001829_hash NULL ++_001830_hash ath6kl_regwrite_read 3 48747 _001830_hash NULL ++_001831_hash ath6kl_roam_table_read 3 26166 _001831_hash NULL ++_001832_hash ath9k_debugfs_read_buf 3 25316 _001832_hash NULL ++_001833_hash ath9k_multi_regread 4 65056 _001833_hash NULL ++_001834_hash ath_rxbuf_alloc 2 24745 _001834_hash NULL ++_001835_hash atk_debugfs_ggrp_read 3 29522 _001835_hash NULL ++_001836_hash audit_log_n_untrustedstring 3 9548 _001836_hash NULL ++_001837_hash ax25_send_frame 2 19964 _001837_hash NULL ++_001838_hash b43_debugfs_read 3 24425 _001838_hash NULL ++_001839_hash b43legacy_debugfs_read 3 2473 _001839_hash NULL ++_001840_hash batadv_bla_is_backbone_gw 3 58488 _001840_hash NULL ++_001841_hash batadv_check_management_packet 3 52993 _001841_hash NULL ++_001842_hash batadv_check_unicast_packet 2 10866 _001842_hash NULL ++_001843_hash batadv_interface_rx 4 8568 _001843_hash NULL ++_001844_hash batadv_skb_head_push 2 11360 _001844_hash NULL ++_001845_hash bchannel_get_rxbuf 2 37213 _001845_hash NULL ++_001846_hash bcm_recvmsg 4 43992 _001846_hash NULL ++_001847_hash bfad_debugfs_read 3 13119 _001847_hash NULL ++_001848_hash bfad_debugfs_read_regrd 3 57830 _001848_hash NULL ++_001849_hash blk_init_tags 1 30592 _001849_hash NULL ++_001850_hash blk_queue_init_tags 2 44355 _001850_hash &_001022_hash ++_001851_hash blk_rq_map_kern 4 47004 _001851_hash NULL ++_001852_hash bm_entry_read 3 10976 _001852_hash NULL ++_001853_hash bm_status_read 3 19583 _001853_hash NULL ++_001854_hash bnad_debugfs_read 3 50665 _001854_hash NULL ++_001855_hash bnad_debugfs_read_regrd 3 51308 _001855_hash NULL ++_001856_hash bnx2i_send_nl_mesg 4 53353 _001856_hash NULL ++_001857_hash brcmf_debugfs_sdio_counter_read 3 58369 _001857_hash NULL ++_001858_hash brcmf_sdio_assert_info 4 52653 _001858_hash NULL ++_001859_hash brcmf_sdio_dump_console 4 37455 _001859_hash NULL ++_001860_hash brcmf_sdio_trap_info 4 48510 _001860_hash NULL ++_001861_hash btmrvl_curpsmode_read 3 46939 _001861_hash NULL ++_001862_hash btmrvl_gpiogap_read 3 4718 _001862_hash NULL ++_001863_hash btmrvl_hscfgcmd_read 3 56303 _001863_hash NULL ++_001864_hash btmrvl_hscmd_read 3 1614 _001864_hash NULL ++_001865_hash btmrvl_hsmode_read 3 1647 _001865_hash NULL ++_001866_hash btmrvl_hsstate_read 3 920 _001866_hash NULL ++_001867_hash btmrvl_pscmd_read 3 24308 _001867_hash NULL ++_001868_hash btmrvl_psmode_read 3 22395 _001868_hash NULL ++_001869_hash btmrvl_psstate_read 3 50683 _001869_hash NULL ++_001870_hash btmrvl_txdnldready_read 3 413 _001870_hash NULL ++_001871_hash btrfs_add_link 5 9973 _001871_hash NULL ++_001872_hash __btrfs_direct_write 4 22273 _001872_hash NULL ++_001873_hash btrfs_discard_extent 2 38547 _001873_hash NULL ++_001874_hash btrfs_file_aio_write 4 21520 _001874_hash NULL ++_001875_hash btrfs_find_create_tree_block 3 55812 _001875_hash NULL ++_001876_hash btrfsic_map_block 2 56751 _001876_hash NULL ++_001877_hash cache_read_pipefs 3 47615 _001877_hash NULL ++_001878_hash cache_read_procfs 3 52882 _001878_hash NULL ++_001879_hash cache_write_pipefs 3 48270 _001879_hash NULL ++_001880_hash cache_write_procfs 3 22491 _001880_hash NULL ++_001881_hash caif_stream_recvmsg 4 13173 _001881_hash NULL ++_001882_hash carl9170_alloc 1 27 _001882_hash NULL ++_001883_hash carl9170_debugfs_read 3 47738 _001883_hash NULL ++_001884_hash ceph_msgpool_init 4 34599 _001884_hash NULL ++_001885_hash cfpkt_add_trail 3 27260 _001885_hash NULL ++_001886_hash cfpkt_create 1 18197 _001886_hash NULL ++_001887_hash cfpkt_pad_trail 2 55511 _003606_hash NULL nohasharray ++_001888_hash cfpkt_split 2 47541 _001888_hash NULL ++_001889_hash cgroup_read_s64 5 19570 _001889_hash NULL ++_001890_hash cgroup_read_u64 5 45532 _001890_hash NULL ++_001891_hash channel_type_read 3 47308 _001891_hash NULL ++_001892_hash check_header 2 56930 _001892_hash NULL ++_001893_hash codec_list_read_file 3 24910 _001893_hash NULL ++_001894_hash configfs_read_file 3 1683 _001894_hash NULL ++_001895_hash console_store 4 36007 _001895_hash NULL ++_001896_hash cpuset_common_file_read 5 8800 _001896_hash NULL ++_001897_hash create_subvol 4 30836 _001897_hash NULL ++_001898_hash cxio_hal_init_resource 2-7-6 29771 _001898_hash &_000295_hash ++_001901_hash cxio_hal_init_rhdl_resource 1 25104 _001901_hash NULL ++_001902_hash dai_list_read_file 3 25421 _001902_hash NULL ++_001903_hash dapm_bias_read_file 3 64715 _001903_hash NULL ++_001904_hash dapm_widget_power_read_file 3 59950 _001983_hash NULL nohasharray ++_001907_hash dbgfs_frame 3 45917 _001907_hash NULL ++_001908_hash dbgfs_state 3 38894 _001908_hash NULL ++_001909_hash dccp_manip_pkt 2 30229 _001909_hash NULL ++_001910_hash ddp_ppod_write_idata 5 25610 _001910_hash NULL ++_001911_hash debugfs_read 3 62535 _001911_hash NULL ++_001912_hash debug_output 3 18575 _001912_hash NULL ++_001913_hash debug_read 3 19322 _001913_hash NULL ++_001914_hash dev_alloc_skb 1 19517 _001914_hash &_001366_hash ++_001915_hash dfs_file_read 3 18116 _001915_hash NULL ++_001916_hash diva_alloc_dma_map 2 23798 _001916_hash NULL ++_001917_hash diva_xdi_write 4 63975 _001917_hash NULL ++_001918_hash dma_memcpy_pg_to_iovec 6 1725 _001918_hash NULL ++_001919_hash dma_memcpy_to_iovec 5 12173 _001919_hash NULL ++_001920_hash dma_show_regs 3 35266 _001920_hash NULL ++_001921_hash dm_exception_table_init 2 39645 _001921_hash &_001149_hash ++_001922_hash dn_nsp_do_disc 2-6 49474 _001922_hash NULL ++_001924_hash dn_recvmsg 4 17213 _001924_hash NULL ++_001925_hash dns_resolver_read 3 54658 _001925_hash NULL ++_001926_hash do_msgrcv 4 5590 _001926_hash NULL ++_001927_hash do_syslog 3 56807 _001927_hash NULL ++_001928_hash dpcm_state_read_file 3 65489 _001928_hash NULL ++_001929_hash dsp_cmx_send_member 2 15625 _001929_hash NULL ++_001930_hash fallback_on_nodma_alloc 2 35332 _001930_hash NULL ++_001931_hash fc_frame_alloc 2 1596 _001931_hash NULL ++_001932_hash fc_frame_alloc_fill 2 59394 _001932_hash NULL ++_001933_hash filter_read 3 61692 _001933_hash NULL ++_001934_hash __finish_unordered_dir 4 33198 _001934_hash NULL ++_001935_hash format_devstat_counter 3 32550 _001935_hash NULL ++_001936_hash fragmentation_threshold_read 3 61718 _001936_hash NULL ++_001937_hash fuse_conn_limit_read 3 20084 _001937_hash NULL ++_001938_hash fuse_conn_waiting_read 3 49762 _001938_hash NULL ++_001939_hash fuse_file_aio_write 4 46399 _001939_hash NULL ++_001940_hash generic_readlink 3 32654 _001940_hash NULL ++_001941_hash gre_manip_pkt 2 38785 _001941_hash NULL ++_001942_hash handle_eviocgbit 3 44193 _001942_hash NULL ++_001943_hash handle_response 5 55951 _001943_hash NULL ++_001944_hash handle_response_icmp 7 39574 _001944_hash NULL ++_001945_hash hash_recvmsg 4 50924 _001945_hash NULL ++_001946_hash hci_send_cmd 3 43810 _001946_hash NULL ++_001947_hash hci_si_event 3 1404 _001947_hash NULL ++_001948_hash help 4 14971 _001948_hash NULL ++_001949_hash hfcpci_empty_bfifo 4 62323 _001949_hash NULL ++_001950_hash hidp_send_ctrl_message 4 43702 _001950_hash NULL ++_001951_hash ht40allow_map_read 3 55209 _001951_hash &_001512_hash ++_001952_hash hwflags_read 3 52318 _001952_hash NULL ++_001953_hash hysdn_conf_read 3 42324 _001953_hash NULL ++_001954_hash hysdn_sched_rx 3 60533 _001954_hash NULL ++_001955_hash i2400m_rx_stats_read 3 57706 _001955_hash NULL ++_001956_hash i2400m_tx_stats_read 3 28527 _001956_hash NULL ++_001957_hash icmp_manip_pkt 2 48801 _001957_hash NULL ++_001958_hash idmouse_read 3 63374 _001958_hash NULL ++_001959_hash ieee80211_if_read 3 6785 _001959_hash NULL ++_001960_hash ieee80211_rx_bss_info 3 61630 _001960_hash NULL ++_001961_hash ikconfig_read_current 3 1658 _001961_hash NULL ++_001962_hash il3945_sta_dbgfs_stats_table_read 3 48802 _001962_hash NULL ++_001963_hash il3945_ucode_general_stats_read 3 46111 _001963_hash NULL ++_001964_hash il3945_ucode_rx_stats_read 3 3048 _001964_hash NULL ++_001965_hash il3945_ucode_tx_stats_read 3 36016 _001965_hash NULL ++_001966_hash il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 _001966_hash NULL ++_001967_hash il4965_rs_sta_dbgfs_scale_table_read 3 38564 _001967_hash NULL ++_001968_hash il4965_rs_sta_dbgfs_stats_table_read 3 49206 _001968_hash NULL ++_001969_hash il4965_ucode_general_stats_read 3 56277 _001969_hash NULL ++_001970_hash il4965_ucode_rx_stats_read 3 61948 _001970_hash NULL ++_001971_hash il4965_ucode_tx_stats_read 3 12064 _001971_hash NULL ++_001972_hash il_dbgfs_chain_noise_read 3 38044 _001972_hash NULL ++_001973_hash il_dbgfs_channels_read 3 25005 _001973_hash NULL ++_001974_hash il_dbgfs_disable_ht40_read 3 42386 _001974_hash NULL ++_001975_hash il_dbgfs_fh_reg_read 3 40993 _001975_hash NULL ++_001976_hash il_dbgfs_force_reset_read 3 57517 _001976_hash NULL ++_001977_hash il_dbgfs_interrupt_read 3 3351 _001977_hash NULL ++_001978_hash il_dbgfs_missed_beacon_read 3 59956 _001978_hash NULL ++_001979_hash il_dbgfs_nvm_read 3 12288 _001979_hash NULL ++_001980_hash il_dbgfs_power_save_status_read 3 43165 _001980_hash NULL ++_001981_hash il_dbgfs_qos_read 3 33615 _001981_hash NULL ++_001982_hash il_dbgfs_rxon_filter_flags_read 3 19281 _001982_hash NULL ++_001983_hash il_dbgfs_rxon_flags_read 3 59950 _001983_hash &_001904_hash ++_001984_hash il_dbgfs_rx_queue_read 3 11221 _001984_hash NULL ++_001985_hash il_dbgfs_rx_stats_read 3 15243 _001985_hash NULL ++_001986_hash il_dbgfs_sensitivity_read 3 2370 _001986_hash NULL ++_001987_hash il_dbgfs_sram_read 3 62296 _001987_hash NULL ++_001988_hash il_dbgfs_stations_read 3 21532 _001988_hash NULL ++_001989_hash il_dbgfs_status_read 3 58388 _001989_hash NULL ++_001990_hash il_dbgfs_tx_queue_read 3 55668 _001990_hash NULL ++_001991_hash il_dbgfs_tx_stats_read 3 32913 _001991_hash NULL ++_001992_hash ima_show_htable_value 2 57136 _001992_hash NULL ++_001994_hash intel_fake_agp_alloc_by_type 1 1 _001994_hash NULL ++_001995_hash ip4ip6_err 5 36772 _001995_hash NULL ++_001996_hash ip6_append_data 4-5 36490 _003601_hash NULL nohasharray ++_001997_hash ip6ip6_err 5 18308 _001997_hash NULL ++_001998_hash __ip_append_data 7-8 36191 _001998_hash NULL ++_001999_hash ip_vs_icmp_xmit 4 59624 _001999_hash NULL ++_002000_hash ip_vs_icmp_xmit_v6 4 20464 _002000_hash NULL ++_002001_hash ipw_write 3 59807 _002001_hash NULL ++_002002_hash irda_recvmsg_stream 4 35280 _002002_hash NULL ++_002003_hash irq_domain_add_simple 2 46734 _002003_hash NULL ++_002004_hash __iscsi_complete_pdu 4 10726 _002004_hash NULL ++_002005_hash iscsi_nop_out_rsp 4 51117 _002005_hash NULL ++_002006_hash iscsi_tcp_conn_setup 2 16376 _002006_hash NULL ++_002007_hash iwl_dbgfs_bt_traffic_read 3 35534 _002007_hash NULL ++_002008_hash iwl_dbgfs_calib_disabled_read 3 22649 _002008_hash NULL ++_002009_hash iwl_dbgfs_chain_noise_read 3 46355 _002009_hash NULL ++_002010_hash iwl_dbgfs_channels_read 3 6784 _002010_hash NULL ++_002011_hash iwl_dbgfs_current_sleep_command_read 3 2081 _002011_hash NULL ++_002012_hash iwl_dbgfs_disable_ht40_read 3 35761 _002012_hash NULL ++_002013_hash iwl_dbgfs_fh_reg_read 3 879 _002013_hash &_000406_hash ++_002014_hash iwl_dbgfs_interrupt_read 3 23574 _002014_hash NULL ++_002015_hash iwl_dbgfs_log_event_read 3 2107 _002015_hash NULL ++_002016_hash iwl_dbgfs_missed_beacon_read 3 50584 _002016_hash NULL ++_002017_hash iwl_dbgfs_nvm_read 3 23845 _002017_hash NULL ++_002018_hash iwl_dbgfs_plcp_delta_read 3 55407 _002018_hash NULL ++_002019_hash iwl_dbgfs_power_save_status_read 3 54392 _002019_hash NULL ++_002020_hash iwl_dbgfs_protection_mode_read 3 13943 _002020_hash NULL ++_002021_hash iwl_dbgfs_qos_read 3 11753 _002021_hash NULL ++_002022_hash iwl_dbgfs_reply_tx_error_read 3 19205 _002022_hash NULL ++_002023_hash iwl_dbgfs_rf_reset_read 3 26512 _002023_hash NULL ++_002024_hash iwl_dbgfs_rx_handlers_read 3 18708 _002024_hash NULL ++_002025_hash iwl_dbgfs_rxon_filter_flags_read 3 28832 _002025_hash NULL ++_002026_hash iwl_dbgfs_rxon_flags_read 3 20795 _002026_hash NULL ++_002027_hash iwl_dbgfs_rx_queue_read 3 19943 _002027_hash NULL ++_002028_hash iwl_dbgfs_sensitivity_read 3 63116 _002731_hash NULL nohasharray ++_002029_hash iwl_dbgfs_sleep_level_override_read 3 3038 _002029_hash NULL ++_002030_hash iwl_dbgfs_sram_read 3 44505 _002030_hash NULL ++_002031_hash iwl_dbgfs_stations_read 3 9309 _002031_hash NULL ++_002032_hash iwl_dbgfs_status_read 3 5171 _002032_hash NULL ++_002033_hash iwl_dbgfs_temperature_read 3 29224 _002033_hash NULL ++_002034_hash iwl_dbgfs_thermal_throttling_read 3 38779 _002034_hash NULL ++_002035_hash iwl_dbgfs_tx_queue_read 3 4635 _002035_hash NULL ++_002036_hash iwl_dbgfs_ucode_bt_stats_read 3 42820 _002036_hash NULL ++_002037_hash iwl_dbgfs_ucode_general_stats_read 3 49199 _002037_hash NULL ++_002038_hash iwl_dbgfs_ucode_rx_stats_read 3 58023 _002038_hash NULL ++_002039_hash iwl_dbgfs_ucode_tracing_read 3 47983 _002039_hash &_000356_hash ++_002040_hash iwl_dbgfs_ucode_tx_stats_read 3 31611 _002040_hash NULL ++_002041_hash iwl_dbgfs_wowlan_sram_read 3 540 _002041_hash NULL ++_002042_hash joydev_ioctl 2 33343 _002042_hash NULL ++_002043_hash kernel_readv 3 35617 _002043_hash NULL ++_002044_hash key_algorithm_read 3 57946 _002044_hash NULL ++_002045_hash key_icverrors_read 3 20895 _002045_hash NULL ++_002046_hash key_key_read 3 3241 _002046_hash NULL ++_002047_hash key_replays_read 3 62746 _002047_hash NULL ++_002048_hash key_rx_spec_read 3 12736 _002048_hash NULL ++_002049_hash key_tx_spec_read 3 4862 _002049_hash NULL ++_002050_hash __kfifo_to_user 3 36555 _002568_hash NULL nohasharray ++_002051_hash __kfifo_to_user_r 3 39123 _002051_hash NULL ++_002052_hash kmem_zalloc_greedy 2-3 65268 _002052_hash NULL ++_002054_hash l1oip_socket_recv 6 56537 _002054_hash NULL ++_002055_hash l2cap_build_cmd 4 48676 _002055_hash NULL ++_002056_hash l2cap_chan_send 3 49995 _002056_hash NULL ++_002057_hash l2cap_segment_sdu 4 48772 _002057_hash NULL ++_002058_hash l2down_create 4 21755 _002058_hash NULL ++_002059_hash l2tp_xmit_skb 3 42672 _002059_hash NULL ++_002060_hash l2up_create 3 6430 _002060_hash NULL ++_002061_hash lbs_debugfs_read 3 30721 _002061_hash NULL ++_002062_hash lbs_dev_info 3 51023 _002062_hash NULL ++_002063_hash lbs_host_sleep_read 3 31013 _002063_hash NULL ++_002064_hash lbs_rdbbp_read 3 45805 _002064_hash NULL ++_002065_hash lbs_rdmac_read 3 418 _002065_hash NULL ++_002066_hash lbs_rdrf_read 3 41431 _002066_hash NULL ++_002067_hash lbs_sleepparams_read 3 10840 _002067_hash NULL ++_002068_hash lbs_threshold_read 5 21046 _002068_hash NULL ++_002069_hash ldisc_receive 4 41516 _002069_hash NULL ++_002070_hash libfc_vport_create 2 4415 _002070_hash NULL ++_002073_hash lkdtm_debugfs_read 3 45752 _002073_hash NULL ++_002074_hash llcp_sock_recvmsg 4 13556 _002074_hash NULL ++_002075_hash long_retry_limit_read 3 59766 _002075_hash NULL ++_002076_hash lpfc_debugfs_dif_err_read 3 36303 _002076_hash NULL ++_002077_hash lpfc_debugfs_read 3 16566 _002077_hash NULL ++_002078_hash lpfc_idiag_baracc_read 3 58466 _002972_hash NULL nohasharray ++_002079_hash lpfc_idiag_ctlacc_read 3 33943 _002079_hash NULL ++_002080_hash lpfc_idiag_drbacc_read 3 15948 _002080_hash NULL ++_002081_hash lpfc_idiag_extacc_read 3 48301 _002081_hash NULL ++_002082_hash lpfc_idiag_mbxacc_read 3 28061 _002082_hash NULL ++_002083_hash lpfc_idiag_pcicfg_read 3 50334 _002083_hash NULL ++_002084_hash lpfc_idiag_queacc_read 3 13950 _002084_hash NULL ++_002085_hash lpfc_idiag_queinfo_read 3 55662 _002085_hash NULL ++_002086_hash lro_gen_skb 6 2644 _002086_hash NULL ++_002087_hash mac80211_format_buffer 2 41010 _002087_hash NULL ++_002088_hash macvtap_alloc_skb 2-4-3 50629 _002088_hash NULL ++_002091_hash macvtap_put_user 4 55609 _002091_hash NULL ++_002092_hash macvtap_sendmsg 4 30629 _002092_hash NULL ++_002093_hash mangle_packet 6-8 27864 _002093_hash NULL ++_002095_hash manip_pkt 3 7741 _002095_hash NULL ++_002096_hash mempool_create_kmalloc_pool 1 41650 _002096_hash NULL ++_002097_hash mempool_create_page_pool 1 30189 _002097_hash NULL ++_002098_hash mempool_create_slab_pool 1 62907 _002098_hash NULL ++_002099_hash mgmt_device_found 10 14146 _002099_hash NULL ++_002100_hash minstrel_stats_read 3 17290 _002100_hash NULL ++_002101_hash mmc_ext_csd_read 3 13205 _002101_hash NULL ++_002102_hash mon_bin_read 3 6841 _002102_hash NULL ++_002103_hash mon_stat_read 3 25238 _002103_hash NULL ++_002105_hash mqueue_read_file 3 6228 _002105_hash NULL ++_002106_hash mwifiex_debug_read 3 53074 _002106_hash NULL ++_002107_hash mwifiex_getlog_read 3 54269 _002107_hash NULL ++_002108_hash mwifiex_info_read 3 53447 _002108_hash NULL ++_002109_hash mwifiex_rdeeprom_read 3 51429 _002109_hash NULL ++_002110_hash mwifiex_regrdwr_read 3 34472 _002110_hash NULL ++_002111_hash named_prepare_buf 2 24532 _002111_hash NULL ++_002112_hash nci_send_cmd 3 58206 _002112_hash NULL ++_002113_hash netdev_alloc_skb_ip_align 2 40811 _002113_hash NULL ++_002114_hash netpoll_send_udp 3 58955 _002114_hash NULL ++_002115_hash nfcwilink_send_bts_cmd 3 10802 _002115_hash NULL ++_002116_hash nf_nat_mangle_tcp_packet 5-7 8643 _002116_hash NULL ++_002119_hash nfsd_vfs_read 6 62605 _002616_hash NULL nohasharray ++_002120_hash nfsd_vfs_write 6 54577 _002120_hash NULL ++_002121_hash nfs_idmap_lookup_id 2 10660 _002121_hash NULL ++_002122_hash ntfs_rl_realloc 3 56831 _002122_hash &_000370_hash ++_002123_hash ntfs_rl_realloc_nofail 3 32173 _002123_hash NULL ++_002124_hash o2hb_debug_read 3 37851 _002124_hash NULL ++_002125_hash o2net_debug_read 3 52105 _002125_hash NULL ++_002126_hash ocfs2_control_read 3 56405 _002126_hash NULL ++_002127_hash ocfs2_debug_read 3 14507 _002127_hash NULL ++_002128_hash oom_adjust_read 3 25127 _002128_hash NULL ++_002129_hash oom_score_adj_read 3 39921 _002426_hash NULL nohasharray ++_002130_hash oprofilefs_str_to_user 3 42182 _002130_hash NULL ++_002131_hash oprofilefs_ulong_to_user 3 11582 _002131_hash NULL ++_002132_hash osd_req_add_get_attr_list 3 49278 _002132_hash NULL ++_002133_hash _osd_req_list_objects 6 4204 _002133_hash NULL ++_002134_hash osd_req_read_kern 5 59990 _002134_hash NULL ++_002135_hash osd_req_write_kern 5 53486 _002135_hash NULL ++_002136_hash osst_read 3 40237 _002136_hash NULL ++_002137_hash p54_alloc_skb 3 34366 _002137_hash &_000485_hash ++_002138_hash p54_init_common 1 23850 _002138_hash NULL ++_002139_hash packet_alloc_skb 2-5-4 62602 _002139_hash NULL ++_002142_hash packet_sendmsg 4 24954 _002142_hash NULL ++_002143_hash page_readlink 3 23346 _002143_hash NULL ++_002144_hash pcf50633_write_block 3 2124 _002144_hash NULL ++_002145_hash pcpu_alloc_alloc_info 1-2 45813 _002145_hash NULL ++_002147_hash pep_indicate 5 38611 _002147_hash NULL ++_002148_hash pep_reply 5 50582 _002148_hash NULL ++_002149_hash pipe_handler_request 5 50774 _003582_hash NULL nohasharray ++_002150_hash platform_list_read_file 3 34734 _002150_hash NULL ++_002151_hash pm860x_bulk_write 3 43875 _002151_hash NULL ++_002152_hash pm_qos_power_read 3 55891 _002152_hash NULL ++_002153_hash port_show_regs 3 5904 _002153_hash NULL ++_002154_hash proc_coredump_filter_read 3 39153 _002154_hash NULL ++_002155_hash proc_fdinfo_read 3 62043 _002155_hash NULL ++_002156_hash proc_file_read 3 53905 _002156_hash NULL ++_002157_hash proc_info_read 3 63344 _002157_hash NULL ++_002158_hash proc_loginuid_read 3 15631 _002158_hash NULL ++_002159_hash proc_pid_attr_read 3 10173 _002159_hash NULL ++_002160_hash proc_pid_readlink 3 52186 _002160_hash NULL ++_002161_hash proc_read 3 43614 _002161_hash NULL ++_002162_hash proc_self_readlink 3 38094 _002162_hash NULL ++_002163_hash proc_sessionid_read 3 6911 _002299_hash NULL nohasharray ++_002164_hash provide_user_output 3 41105 _002164_hash NULL ++_002165_hash pskb_network_may_pull 2 35336 _002165_hash NULL ++_002166_hash pskb_pull 2 65005 _002166_hash NULL ++_002167_hash pstore_file_read 3 57288 _002167_hash NULL ++_002168_hash ql_process_mac_rx_page 4 15543 _002168_hash NULL ++_002169_hash ql_process_mac_rx_skb 4 6689 _002169_hash NULL ++_002170_hash queues_read 3 24877 _002170_hash NULL ++_002171_hash raw_recvmsg 4 17277 _002171_hash NULL ++_002172_hash rcname_read 3 25919 _002172_hash NULL ++_002173_hash read_4k_modal_eeprom 3 30212 _002173_hash NULL ++_002174_hash read_9287_modal_eeprom 3 59327 _002174_hash NULL ++_002175_hash reada_find_extent 2 63486 _002175_hash NULL ++_002176_hash read_def_modal_eeprom 3 14041 _002176_hash NULL ++_002177_hash read_enabled_file_bool 3 37744 _002177_hash NULL ++_002178_hash read_file_ani 3 23161 _002178_hash NULL ++_002179_hash read_file_antenna 3 13574 _002179_hash NULL ++_002180_hash read_file_base_eeprom 3 42168 _002180_hash NULL ++_002181_hash read_file_beacon 3 32595 _002181_hash NULL ++_002182_hash read_file_blob 3 57406 _002182_hash NULL ++_002183_hash read_file_bool 3 4180 _002183_hash NULL ++_002184_hash read_file_credit_dist_stats 3 54367 _002184_hash NULL ++_002185_hash read_file_debug 3 58256 _002185_hash NULL ++_002186_hash read_file_disable_ani 3 6536 _002186_hash NULL ++_002187_hash read_file_dma 3 9530 _002187_hash NULL ++_002188_hash read_file_dump_nfcal 3 18766 _002188_hash NULL ++_002189_hash read_file_frameerrors 3 64001 _002189_hash NULL ++_002190_hash read_file_interrupt 3 61742 _002197_hash NULL nohasharray ++_002191_hash read_file_misc 3 9948 _002191_hash NULL ++_002192_hash read_file_modal_eeprom 3 39909 _002192_hash NULL ++_002193_hash read_file_queue 3 40895 _002193_hash NULL ++_002194_hash read_file_rcstat 3 22854 _002194_hash NULL ++_002195_hash read_file_recv 3 48232 _002195_hash NULL ++_002196_hash read_file_regidx 3 33370 _002196_hash NULL ++_002197_hash read_file_regval 3 61742 _002197_hash &_002190_hash ++_002198_hash read_file_reset 3 52310 _002198_hash NULL ++_002199_hash read_file_rx_chainmask 3 41605 _002199_hash NULL ++_002200_hash read_file_slot 3 50111 _002200_hash NULL ++_002201_hash read_file_stations 3 35795 _002201_hash NULL ++_002202_hash read_file_tgt_int_stats 3 20697 _002202_hash NULL ++_002203_hash read_file_tgt_rx_stats 3 33944 _002203_hash NULL ++_002204_hash read_file_tgt_stats 3 8959 _002204_hash NULL ++_002205_hash read_file_tgt_tx_stats 3 51847 _002205_hash NULL ++_002206_hash read_file_tx_chainmask 3 3829 _002206_hash NULL ++_002207_hash read_file_war_stats 3 292 _002207_hash NULL ++_002208_hash read_file_xmit 3 21487 _002208_hash NULL ++_002209_hash read_flush_pipefs 3 20171 _002209_hash NULL ++_002210_hash read_flush_procfs 3 27642 _002210_hash NULL ++_002211_hash read_from_oldmem 2 3337 _002211_hash NULL ++_002212_hash read_oldmem 3 55658 _002212_hash NULL ++_002213_hash receive_packet 2 12367 _002213_hash NULL ++_002214_hash regmap_name_read_file 3 39379 _002214_hash NULL ++_002215_hash repair_io_failure 4 4815 _002215_hash NULL ++_002216_hash request_key_and_link 4 42693 _002216_hash NULL ++_002217_hash res_counter_read 4 33499 _002217_hash NULL ++_002218_hash rfcomm_tty_write 3 51603 _002218_hash NULL ++_002219_hash rs_sta_dbgfs_rate_scale_data_read 3 47165 _002219_hash NULL ++_002220_hash rs_sta_dbgfs_scale_table_read 3 40262 _002220_hash NULL ++_002221_hash rs_sta_dbgfs_stats_table_read 3 56573 _002221_hash NULL ++_002222_hash rts_threshold_read 3 44384 _002222_hash NULL ++_002223_hash rxrpc_sendmsg 4 29049 _002223_hash NULL ++_002224_hash scrub_setup_recheck_block 3-4 56245 _002224_hash NULL ++_002226_hash scsi_adjust_queue_depth 3 12802 _002226_hash NULL ++_002227_hash sctp_make_abort 3 34459 _002227_hash NULL ++_002228_hash sctp_make_asconf 3 4078 _002228_hash NULL ++_002229_hash sctp_make_asconf_ack 3 31726 _002229_hash NULL ++_002230_hash sctp_make_datafrag_empty 3 34737 _002230_hash NULL ++_002231_hash sctp_make_fwdtsn 3 53265 _002231_hash NULL ++_002232_hash sctp_make_heartbeat_ack 4 34411 _002232_hash NULL ++_002233_hash sctp_make_init 4 58401 _002233_hash NULL ++_002234_hash sctp_make_init_ack 4 3335 _002234_hash NULL ++_002235_hash sctp_make_op_error_space 3 5528 _002235_hash NULL ++_002236_hash sctp_manip_pkt 2 40620 _002236_hash NULL ++_002237_hash selinux_inode_notifysecctx 3 36896 _002237_hash NULL ++_002238_hash sel_read_avc_cache_threshold 3 33942 _002238_hash NULL ++_002239_hash sel_read_avc_hash_stats 3 1984 _002239_hash NULL ++_002240_hash sel_read_bool 3 24236 _002240_hash NULL ++_002241_hash sel_read_checkreqprot 3 33068 _002241_hash NULL ++_002242_hash sel_read_class 3 12669 _002960_hash NULL nohasharray ++_002243_hash sel_read_enforce 3 2828 _002243_hash NULL ++_002244_hash sel_read_handle_status 3 56139 _002244_hash NULL ++_002245_hash sel_read_handle_unknown 3 57933 _002245_hash NULL ++_002246_hash sel_read_initcon 3 32362 _002246_hash NULL ++_002247_hash sel_read_mls 3 25369 _002247_hash NULL ++_002248_hash sel_read_perm 3 42302 _002248_hash NULL ++_002249_hash sel_read_policy 3 55947 _002249_hash NULL ++_002250_hash sel_read_policycap 3 28544 _002250_hash NULL ++_002251_hash sel_read_policyvers 3 55 _002827_hash NULL nohasharray ++_002252_hash send_mpa_reject 3 7135 _002252_hash NULL ++_002253_hash send_mpa_reply 3 32372 _002253_hash NULL ++_002254_hash send_msg 4 37323 _002254_hash NULL ++_002255_hash send_packet 4 52960 _002255_hash NULL ++_002256_hash set_rxd_buffer_pointer 8 9950 _002256_hash NULL ++_002257_hash sge_rx 3 50594 _002257_hash NULL ++_002258_hash short_retry_limit_read 3 4687 _002258_hash NULL ++_002259_hash simple_attr_read 3 24738 _002259_hash NULL ++_002260_hash simple_transaction_read 3 17076 _002260_hash NULL ++_002261_hash sisusbcon_bmove 5-7-6 21873 _002261_hash NULL ++_002264_hash sisusbcon_clear 3-5-4 64329 _002264_hash NULL ++_002267_hash sisusbcon_putcs 3 57630 _002267_hash &_001043_hash ++_002268_hash sisusbcon_scroll 5-3-2 31315 _002268_hash NULL ++_002269_hash sisusbcon_scroll_area 3-4 25899 _002269_hash NULL ++_002271_hash skb_copy_datagram_const_iovec 2-5-4 48102 _002271_hash NULL ++_002274_hash skb_copy_datagram_iovec 2-4 5806 _002274_hash NULL ++_002276_hash skb_gro_header_slow 2 34958 _002276_hash NULL ++_002277_hash smk_read_ambient 3 61220 _002277_hash NULL ++_002278_hash smk_read_direct 3 15803 _002278_hash NULL ++_002279_hash smk_read_doi 3 30813 _002279_hash NULL ++_002280_hash smk_read_logging 3 37804 _002280_hash NULL ++_002281_hash smk_read_mapped 3 7562 _002281_hash NULL ++_002282_hash smk_read_onlycap 3 3855 _002282_hash NULL ++_002283_hash smp_build_cmd 3 45853 _002283_hash NULL ++_002284_hash snapshot_read 3 22601 _002284_hash NULL ++_002285_hash snd_cs4281_BA0_read 5 6847 _002285_hash NULL ++_002286_hash snd_cs4281_BA1_read 5 20323 _002286_hash NULL ++_002287_hash snd_cs46xx_io_read 5 45734 _002287_hash NULL ++_002288_hash snd_gus_dram_read 4 56686 _002288_hash NULL ++_002289_hash snd_mixart_BA0_read 5 45069 _002289_hash NULL ++_002290_hash snd_mixart_BA1_read 5 5082 _002290_hash NULL ++_002291_hash snd_pcm_oss_read 3 28317 _002291_hash NULL ++_002292_hash snd_pcm_plug_alloc 2 42339 _002292_hash NULL ++_002293_hash snd_rawmidi_kernel_read 3 4328 _002293_hash NULL ++_002294_hash snd_rawmidi_read 3 56337 _002294_hash NULL ++_002295_hash snd_rme32_capture_copy 5 39653 _002295_hash NULL ++_002296_hash snd_rme96_capture_copy 5 58484 _002296_hash NULL ++_002297_hash snd_soc_hw_bulk_write_raw 4 14245 _002297_hash NULL ++_002298_hash sock_alloc_send_skb 2 23720 _002298_hash NULL ++_002299_hash spi_show_regs 3 6911 _002299_hash &_002163_hash ++_002300_hash sta_agg_status_read 3 14058 _002300_hash NULL ++_002301_hash sta_connected_time_read 3 17435 _002301_hash NULL ++_002302_hash sta_flags_read 3 56710 _002302_hash NULL ++_002303_hash sta_ht_capa_read 3 10366 _002303_hash NULL ++_002304_hash sta_last_seq_ctrl_read 3 19106 _002304_hash NULL ++_002305_hash sta_num_ps_buf_frames_read 3 1488 _002305_hash NULL ++_002306_hash st_read 3 51251 _002306_hash NULL ++_002307_hash supply_map_read_file 3 10608 _002307_hash NULL ++_002308_hash sysfs_read_file 3 42113 _002308_hash NULL ++_002309_hash sys_preadv 3 17100 _002309_hash NULL ++_002310_hash sys_pwritev 3 41722 _002310_hash NULL ++_002311_hash sys_readv 3 50664 _002311_hash NULL ++_002312_hash sys_rt_sigpending 2 24961 _002312_hash NULL ++_002313_hash sys_writev 3 28384 _002313_hash NULL ++_002314_hash tcf_csum_skb_nextlayer 3 64025 _002314_hash NULL ++_002315_hash tcp_fragment 3 20436 _002315_hash NULL ++_002316_hash tcp_manip_pkt 2 14202 _002316_hash NULL ++_002317_hash teiup_create 3 43201 _002317_hash NULL ++_002318_hash test_iso_queue 5 62534 _002318_hash NULL ++_002319_hash tg3_run_loopback 2 30093 _002319_hash NULL ++_002320_hash tipc_msg_build 4 12326 _002320_hash NULL ++_002321_hash TSS_authhmac 3 12839 _002321_hash NULL ++_002322_hash TSS_checkhmac1 5 31429 _002322_hash NULL ++_002323_hash TSS_checkhmac2 5-7 40520 _002323_hash NULL ++_002325_hash tty_audit_log 8 47280 _002325_hash NULL ++_002326_hash tty_buffer_request_room 2 23228 _002326_hash NULL ++_002327_hash tty_insert_flip_string_fixed_flag 4 37428 _002327_hash NULL ++_002328_hash tty_insert_flip_string_flags 4 30969 _002328_hash NULL ++_002329_hash tty_prepare_flip_string 3 39955 _002329_hash NULL ++_002330_hash tty_prepare_flip_string_flags 4 59240 _002330_hash NULL ++_002331_hash tun_alloc_skb 2-4-3 41216 _002331_hash NULL ++_002334_hash tun_sendmsg 4 10337 _002334_hash NULL ++_002335_hash u32_array_read 3 2219 _002335_hash NULL ++_002336_hash ubi_io_write_data 4-5 40305 _002336_hash NULL ++_002338_hash udplite_manip_pkt 2 62433 _002338_hash NULL ++_002339_hash udp_manip_pkt 2 50770 _002339_hash NULL ++_002340_hash uhci_debug_read 3 5911 _002340_hash NULL ++_002341_hash um_idi_read 3 850 _002341_hash NULL ++_002342_hash unix_seqpacket_sendmsg 4 27893 _002342_hash NULL ++_002343_hash unix_stream_recvmsg 4 35210 _002343_hash NULL ++_002344_hash unlink_simple 3 47506 _002344_hash NULL ++_002345_hash use_pool 2 64607 _002345_hash NULL ++_002346_hash v9fs_fid_readn 4 60544 _002346_hash NULL ++_002347_hash v9fs_file_read 3 40858 _002347_hash NULL ++_002348_hash vhci_read 3 47878 _002348_hash NULL ++_002349_hash vhost_add_used_and_signal_n 4 8038 _002349_hash NULL ++_002350_hash vmbus_open 2-3 12154 _002350_hash NULL ++_002352_hash vxge_rx_alloc 3 52024 _002352_hash NULL ++_002353_hash waiters_read 3 40902 _002353_hash NULL ++_002354_hash wm8994_bulk_write 3 13615 _002354_hash NULL ++_002355_hash write_pbl 4 59583 _002355_hash NULL ++_002356_hash wusb_prf_256 7 29203 _002356_hash NULL ++_002357_hash wusb_prf_64 7 51065 _002357_hash NULL ++_002358_hash _xfs_buf_alloc 3 38058 _002358_hash NULL ++_002359_hash xfs_buf_read_uncached 3 42844 _002359_hash NULL ++_002360_hash xfs_file_buffered_aio_write 4 11492 _002360_hash NULL ++_002361_hash xfs_iext_add 3 41422 _002361_hash NULL ++_002362_hash xfs_iext_remove_direct 3 40744 _002362_hash NULL ++_002363_hash xfs_readdir 3 41200 _002363_hash NULL ++_002364_hash xfs_trans_get_efd 3 51148 _002364_hash NULL ++_002365_hash xfs_trans_get_efi 2 7898 _002365_hash NULL ++_002366_hash xlog_bread_offset 3 60030 _002366_hash NULL ++_002367_hash xlog_get_bp 2 23229 _002367_hash NULL ++_002368_hash xz_dec_init 2 29029 _002368_hash NULL ++_002369_hash aac_change_queue_depth 2 825 _002369_hash NULL ++_002370_hash add_rx_skb 3 8257 _002370_hash NULL ++_002371_hash afs_extract_data 5 50261 _002371_hash NULL ++_002372_hash arcmsr_adjust_disk_queue_depth 2 16756 _002372_hash NULL ++_002373_hash atalk_recvmsg 4 22053 _002373_hash NULL ++_002374_hash ath6kl_buf_alloc 1 57304 _002374_hash NULL ++_002376_hash atomic_read_file 3 16227 _002376_hash NULL ++_002377_hash ax25_recvmsg 4 64441 _002377_hash NULL ++_002378_hash batadv_add_packet 3 12136 _002378_hash NULL ++_002379_hash batadv_iv_ogm_aggregate_new 2 54761 _002379_hash NULL ++_002380_hash batadv_tt_response_fill_table 1 39236 _002380_hash NULL ++_002381_hash beiscsi_process_async_pdu 7 39834 _002381_hash NULL ++_002382_hash bioset_create 1 5580 _002382_hash NULL ++_002383_hash bioset_integrity_create 2 62708 _002383_hash NULL ++_002384_hash biovec_create_pools 2 9575 _002384_hash NULL ++_002385_hash bnx2fc_process_l2_frame_compl 3 65072 _002385_hash NULL ++_002386_hash brcmf_sdbrcm_died_dump 3 15841 _002386_hash NULL ++_002387_hash brcmu_pkt_buf_get_skb 1 5556 _002387_hash NULL ++_002388_hash br_send_bpdu 3 29669 _002388_hash NULL ++_002389_hash btrfs_error_discard_extent 2 50444 _002389_hash NULL ++_002390_hash __btrfs_free_reserved_extent 2 31207 _002390_hash NULL ++_002391_hash btrfsic_cmp_log_and_dev_bytenr 2 49628 _002391_hash NULL ++_002392_hash btrfsic_create_link_to_next_block 4 58246 _002392_hash NULL ++_002393_hash btrfs_init_new_buffer 4 55761 _002393_hash NULL ++_002394_hash btrfs_mksubvol 3 58240 _002394_hash NULL ++_002395_hash bt_skb_send_alloc 2 6581 _002395_hash NULL ++_002396_hash bt_sock_recvmsg 4 12316 _002396_hash NULL ++_002397_hash bt_sock_stream_recvmsg 4 52518 _002397_hash NULL ++_002398_hash c4iw_reject_cr 3 28174 _002398_hash NULL ++_002399_hash caif_seqpkt_recvmsg 4 32241 _002399_hash NULL ++_002400_hash carl9170_rx_copy_data 2 21656 _002400_hash NULL ++_002401_hash cfpkt_append 3 61206 _002401_hash NULL ++_002402_hash cfpkt_setlen 2 49343 _002402_hash NULL ++_002403_hash cgroup_file_read 3 28804 _002403_hash NULL ++_002404_hash cosa_net_setup_rx 2 38594 _002404_hash NULL ++_002405_hash cpu_type_read 3 36540 _002405_hash NULL ++_002406_hash cxgb4_pktgl_to_skb 2 61899 _002406_hash NULL ++_002408_hash dccp_recvmsg 4 16056 _002408_hash NULL ++_002409_hash ddp_clear_map 4 46152 _002409_hash NULL ++_002410_hash ddp_set_map 4 751 _002410_hash NULL ++_002411_hash depth_read 3 31112 _002411_hash NULL ++_002412_hash dfs_global_file_read 3 7787 _002412_hash NULL ++_002413_hash dgram_recvmsg 4 23104 _002413_hash NULL ++_002414_hash diva_init_dma_map 3 58336 _002414_hash NULL ++_002415_hash divas_write 3 63901 _002415_hash NULL ++_002416_hash dma_push_rx 2 39973 _002416_hash NULL ++_002417_hash dma_skb_copy_datagram_iovec 3-5 21516 _002417_hash NULL ++_002419_hash dm_table_create 3 35687 _002419_hash NULL ++_002420_hash dn_alloc_send_pskb 2 4465 _002420_hash NULL ++_002421_hash dn_nsp_return_disc 2 60296 _002421_hash NULL ++_002422_hash dn_nsp_send_disc 2 23469 _002422_hash NULL ++_002423_hash dsp_tone_hw_message 3 17678 _002423_hash NULL ++_002424_hash e1000_check_copybreak 3 62448 _002424_hash NULL ++_002425_hash enable_read 3 2117 _002425_hash &_000224_hash ++_002426_hash exofs_read_kern 6 39921 _002426_hash &_002129_hash ++_002427_hash fast_rx_path 3 59214 _002427_hash NULL ++_002428_hash fc_change_queue_depth 2 36841 _002428_hash NULL ++_002429_hash fc_fcp_frame_alloc 2 12624 _002429_hash NULL ++_002430_hash fcoe_ctlr_send_keep_alive 3 15308 _002430_hash NULL ++_002431_hash frequency_read 3 64031 _003698_hash NULL nohasharray ++_002432_hash ftdi_process_packet 5 45005 _002432_hash NULL ++_002433_hash fuse_conn_congestion_threshold_read 3 51028 _002433_hash NULL ++_002434_hash fuse_conn_max_background_read 3 10855 _002434_hash NULL ++_002435_hash fwnet_incoming_packet 3 40380 _002435_hash NULL ++_002436_hash fwnet_pd_new 4 39947 _003402_hash NULL nohasharray ++_002437_hash get_alua_req 3 4166 _002437_hash NULL ++_002438_hash get_rdac_req 3 45882 _002438_hash NULL ++_002439_hash got_frame 2 16028 _002439_hash NULL ++_002440_hash gsm_mux_rx_netchar 3 33336 _002440_hash NULL ++_002441_hash hci_sock_recvmsg 4 7072 _002441_hash NULL ++_002442_hash hdlcdev_rx 3 997 _002442_hash NULL ++_002443_hash hdlc_empty_fifo 2 18397 _002443_hash NULL ++_002444_hash hfc_empty_fifo 2 57972 _002444_hash NULL ++_002445_hash hfcpci_empty_fifo 4 2427 _002445_hash NULL ++_002446_hash hfcsusb_rx_frame 3 52745 _002446_hash NULL ++_002447_hash hidp_output_raw_report 3 5629 _002447_hash NULL ++_002448_hash hpsa_change_queue_depth 2 15449 _002448_hash NULL ++_002449_hash hptiop_adjust_disk_queue_depth 2 20122 _002449_hash NULL ++_002450_hash hscx_empty_fifo 2 13360 _002450_hash NULL ++_002451_hash hysdn_rx_netpkt 3 16136 _002451_hash NULL ++_002452_hash i2o_pool_alloc 4 55485 _002452_hash NULL ++_002453_hash ide_queue_pc_tail 5 11673 _002453_hash NULL ++_002454_hash ide_raw_taskfile 4 42355 _002454_hash NULL ++_002455_hash idetape_queue_rw_tail 3 29562 _002455_hash NULL ++_002456_hash ieee80211_amsdu_to_8023s 5 15561 _002456_hash NULL ++_002457_hash ieee80211_fragment 4 33112 _002457_hash NULL ++_002458_hash ieee80211_if_read_aid 3 9705 _002458_hash NULL ++_002459_hash ieee80211_if_read_auto_open_plinks 3 38268 _002459_hash &_000374_hash ++_002460_hash ieee80211_if_read_ave_beacon 3 64924 _002460_hash NULL ++_002461_hash ieee80211_if_read_bssid 3 35161 _002461_hash NULL ++_002462_hash ieee80211_if_read_channel_type 3 23884 _002462_hash NULL ++_002463_hash ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 _002463_hash NULL ++_002464_hash ieee80211_if_read_dot11MeshForwarding 3 13940 _002464_hash NULL ++_002465_hash ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 _002465_hash NULL ++_002466_hash ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 _002466_hash NULL ++_002467_hash ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 _002467_hash NULL ++_002468_hash ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 _002468_hash NULL ++_002469_hash ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 _002469_hash NULL ++_002470_hash ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 _002470_hash NULL ++_002471_hash ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 _002471_hash NULL ++_002472_hash ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 _002472_hash NULL ++_002473_hash ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 _002473_hash NULL ++_002474_hash ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 _002474_hash NULL ++_002475_hash ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 _002475_hash NULL ++_002476_hash ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 _002476_hash NULL ++_002477_hash ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 _002477_hash NULL ++_002478_hash ieee80211_if_read_dot11MeshMaxRetries 3 12756 _002478_hash NULL ++_002479_hash ieee80211_if_read_dot11MeshRetryTimeout 3 52168 _002479_hash NULL ++_002480_hash ieee80211_if_read_dot11MeshTTL 3 58307 _002480_hash NULL ++_002481_hash ieee80211_if_read_dropped_frames_congestion 3 32603 _002481_hash NULL ++_002482_hash ieee80211_if_read_dropped_frames_no_route 3 33383 _002482_hash NULL ++_002483_hash ieee80211_if_read_dropped_frames_ttl 3 44500 _002483_hash NULL ++_002484_hash ieee80211_if_read_drop_unencrypted 3 37053 _002484_hash NULL ++_002485_hash ieee80211_if_read_dtim_count 3 38419 _002485_hash NULL ++_002486_hash ieee80211_if_read_element_ttl 3 18869 _002486_hash NULL ++_002487_hash ieee80211_if_read_estab_plinks 3 32533 _002487_hash NULL ++_002488_hash ieee80211_if_read_flags 3 57470 _002919_hash NULL nohasharray ++_002489_hash ieee80211_if_read_fwded_frames 3 36520 _002489_hash NULL ++_002490_hash ieee80211_if_read_fwded_mcast 3 39571 _002490_hash &_000162_hash ++_002491_hash ieee80211_if_read_fwded_unicast 3 59740 _002491_hash &_001697_hash ++_002492_hash ieee80211_if_read_ht_opmode 3 29044 _002492_hash NULL ++_002493_hash ieee80211_if_read_last_beacon 3 31257 _002493_hash NULL ++_002494_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002494_hash NULL ++_002495_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002495_hash NULL ++_002496_hash ieee80211_if_read_num_mcast_sta 3 12419 _002496_hash NULL ++_002497_hash ieee80211_if_read_num_sta_ps 3 34722 _002497_hash NULL ++_002498_hash ieee80211_if_read_path_refresh_time 3 25545 _002498_hash NULL ++_002499_hash ieee80211_if_read_peer 3 45233 _002499_hash NULL ++_002500_hash ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 _002500_hash NULL ++_002501_hash ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 _002501_hash NULL ++_002502_hash ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 _002502_hash NULL ++_002503_hash ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 _002503_hash NULL ++_002504_hash ieee80211_if_read_rssi_threshold 3 49260 _002504_hash NULL ++_002505_hash ieee80211_if_read_smps 3 27416 _002505_hash NULL ++_002506_hash ieee80211_if_read_state 3 9813 _002707_hash NULL nohasharray ++_002507_hash ieee80211_if_read_tkip_mic_test 3 19565 _002507_hash NULL ++_002508_hash ieee80211_if_read_tsf 3 16420 _002508_hash NULL ++_002509_hash ieee80211_if_read_uapsd_max_sp_len 3 15067 _002509_hash NULL ++_002510_hash ieee80211_if_read_uapsd_queues 3 55150 _002510_hash NULL ++_002511_hash ieee80211_mgmt_tx 9 46860 _002511_hash NULL ++_002512_hash ieee80211_probereq_get 4-6 29069 _002512_hash NULL ++_002514_hash ieee80211_rx_mgmt_beacon 3 24430 _002514_hash NULL ++_002515_hash ieee80211_rx_mgmt_probe_resp 3 6918 _002515_hash NULL ++_002516_hash ieee80211_send_auth 5 24121 _002516_hash NULL ++_002517_hash ieee80211_set_probe_resp 3 10077 _002517_hash NULL ++_002518_hash ieee80211_tdls_mgmt 8 9581 _002518_hash NULL ++_002519_hash ima_show_htable_violations 3 10619 _002519_hash NULL ++_002520_hash ima_show_measurements_count 3 23536 _002520_hash NULL ++_002521_hash insert_one_name 7 61668 _002521_hash NULL ++_002522_hash ip6_ufo_append_data 5-7-6 4780 _002522_hash NULL ++_002525_hash ip_append_data 5-6 16942 _002525_hash NULL ++_002526_hash ip_make_skb 5-6 13129 _002526_hash NULL ++_002527_hash ip_nat_sdp_port 6 52938 _002527_hash NULL ++_002528_hash ip_nat_sip_expect 7 45693 _002528_hash NULL ++_002529_hash ipr_change_queue_depth 2 6431 _002529_hash NULL ++_002530_hash ip_recv_error 3 23109 _002530_hash NULL ++_002531_hash ip_ufo_append_data 6-8-7 12775 _002531_hash NULL ++_002534_hash ipv6_recv_error 3 56347 _002534_hash NULL ++_002535_hash ipv6_recv_rxpmtu 3 7142 _002535_hash NULL ++_002536_hash ipw_packet_received_skb 2 1230 _002536_hash NULL ++_002537_hash ipx_recvmsg 4 44366 _002537_hash NULL ++_002538_hash irda_recvmsg_dgram 4 32631 _002538_hash NULL ++_002539_hash iscsi_change_queue_depth 2 23416 _002539_hash NULL ++_002540_hash iscsi_complete_pdu 4 48372 _002540_hash NULL ++_002541_hash iwch_reject_cr 3 23901 _002541_hash NULL ++_002542_hash ixgb_check_copybreak 3 5847 _002542_hash NULL ++_002543_hash key_conf_hw_key_idx_read 3 25003 _002543_hash NULL ++_002544_hash key_conf_keyidx_read 3 42443 _002544_hash NULL ++_002545_hash key_conf_keylen_read 3 49758 _002545_hash NULL ++_002546_hash key_flags_read 3 25931 _002546_hash NULL ++_002547_hash key_ifindex_read 3 31411 _002547_hash NULL ++_002548_hash key_tx_rx_count_read 3 44742 _002548_hash NULL ++_002549_hash kmsg_read 3 46514 _002549_hash NULL ++_002550_hash l1oip_socket_parse 4 4507 _002550_hash NULL ++_002551_hash l2cap_send_cmd 4 14548 _002551_hash NULL ++_002552_hash l2cap_sock_sendmsg 4 63427 _002552_hash NULL ++_002553_hash l2tp_ip6_recvmsg 4 62874 _002553_hash NULL ++_002554_hash l2tp_ip6_sendmsg 4 7461 _002554_hash NULL ++_002555_hash l2tp_ip_recvmsg 4 22681 _002555_hash NULL ++_002556_hash lbs_bcnmiss_read 3 8678 _002556_hash NULL ++_002557_hash lbs_failcount_read 3 31063 _002557_hash NULL ++_002558_hash lbs_highrssi_read 3 64089 _002558_hash NULL ++_002559_hash lbs_highsnr_read 3 5931 _002559_hash NULL ++_002560_hash lbs_lowrssi_read 3 32242 _002560_hash NULL ++_002561_hash lbs_lowsnr_read 3 29571 _002561_hash NULL ++_002563_hash llc_ui_recvmsg 4 3826 _002563_hash NULL ++_002564_hash lowpan_fragment_xmit 3-4 22095 _002564_hash NULL ++_002566_hash lpfc_change_queue_depth 2 25905 _002566_hash NULL ++_002568_hash macvtap_do_read 4 36555 _002568_hash &_002050_hash ++_002569_hash mangle_sdp_packet 9 36279 _002569_hash NULL ++_002570_hash map_addr 6 4666 _002570_hash NULL ++_002571_hash mcs_unwrap_fir 3 25733 _002571_hash NULL ++_002572_hash mcs_unwrap_mir 3 9455 _002572_hash NULL ++_002573_hash megaraid_change_queue_depth 2 64815 _002573_hash NULL ++_002574_hash megasas_change_queue_depth 2 32747 _002574_hash NULL ++_002575_hash mld_newpack 2 50950 _002575_hash NULL ++_002576_hash mptscsih_change_queue_depth 2 26036 _002576_hash NULL ++_002577_hash named_distribute 4 48544 _002577_hash NULL ++_002578_hash NCR_700_change_queue_depth 2 31742 _002578_hash NULL ++_002579_hash netlink_recvmsg 4 61600 _002579_hash NULL ++_002580_hash nfc_alloc_send_skb 4 3167 _002580_hash NULL ++_002581_hash nf_nat_ftp 5 47948 _002581_hash NULL ++_002582_hash nfsctl_transaction_read 3 48250 _002582_hash NULL ++_002583_hash nfsd_read 5 19568 _002583_hash NULL ++_002584_hash nfsd_read_file 6 62241 _002584_hash NULL ++_002585_hash nfsd_write 6 54809 _002585_hash NULL ++_002586_hash nfs_map_group_to_gid 3 15892 _002586_hash NULL ++_002587_hash nfs_map_name_to_uid 3 51132 _002587_hash NULL ++_002588_hash nr_recvmsg 4 12649 _002588_hash NULL ++_002589_hash ntfs_rl_append 2-4 6037 _002589_hash NULL ++_002591_hash ntfs_rl_insert 2-4 4931 _002591_hash NULL ++_002593_hash ntfs_rl_replace 2-4 14136 _002593_hash NULL ++_002595_hash ntfs_rl_split 2-4 52328 _002595_hash NULL ++_002597_hash osd_req_list_collection_objects 5 36664 _002597_hash NULL ++_002598_hash osd_req_list_partition_objects 5 56464 _002598_hash NULL ++_002599_hash osd_req_read_sg 5 47905 _002599_hash NULL ++_002600_hash osd_req_write_sg 5 50908 _002600_hash NULL ++_002602_hash p54_download_eeprom 4 43842 _002602_hash NULL ++_002604_hash packet_recv_error 3 16669 _002604_hash NULL ++_002605_hash packet_recvmsg 4 47700 _002605_hash NULL ++_002606_hash pep_recvmsg 4 19402 _002606_hash NULL ++_002607_hash pfkey_recvmsg 4 53604 _002607_hash NULL ++_002608_hash ping_recvmsg 4 25597 _002608_hash NULL ++_002609_hash pmcraid_change_queue_depth 2 9116 _002609_hash NULL ++_002610_hash pn_recvmsg 4 30887 _002610_hash NULL ++_002611_hash pointer_size_read 3 51863 _002611_hash NULL ++_002612_hash power_read 3 15939 _002612_hash NULL ++_002613_hash pppoe_recvmsg 4 15073 _002613_hash NULL ++_002614_hash pppol2tp_recvmsg 4 57742 _002993_hash NULL nohasharray ++_002615_hash ppp_tx_cp 5 62044 _002615_hash NULL ++_002616_hash prism2_send_mgmt 4 62605 _002616_hash &_002119_hash ++_002617_hash prism2_sta_send_mgmt 5 43916 _002617_hash NULL ++_002618_hash prison_create 1 43623 _002618_hash NULL ++_002619_hash qla2x00_adjust_sdev_qdepth_up 2 20097 _002619_hash NULL ++_002620_hash qla2x00_change_queue_depth 2 24742 _002620_hash NULL ++_002621_hash _queue_data 4 54983 _002621_hash NULL ++_002622_hash raw_recvmsg 4 52529 _002622_hash NULL ++_002623_hash rawsock_recvmsg 4 12144 _002623_hash NULL ++_002624_hash rawv6_recvmsg 4 30265 _002624_hash NULL ++_002625_hash rds_tcp_data_recv 3 53476 _002625_hash NULL ++_002626_hash reada_add_block 2 54247 _002626_hash NULL ++_002627_hash readahead_tree_block 3 36285 _002627_hash NULL ++_002628_hash reada_tree_block_flagged 3 18402 _002628_hash NULL ++_002629_hash read_dma 3 55086 _002629_hash NULL ++_002630_hash read_fifo 3 826 _002630_hash NULL ++_002631_hash read_tree_block 3 841 _002631_hash NULL ++_002632_hash receive_copy 3 12216 _002632_hash NULL ++_002633_hash recover_peb 6-7 29238 _002633_hash NULL ++_002635_hash recv_msg 4 48709 _002635_hash NULL ++_002636_hash recv_stream 4 30138 _002636_hash NULL ++_002637_hash _req_append_segment 2 41031 _002637_hash NULL ++_002638_hash request_key_async 4 6990 _002638_hash NULL ++_002639_hash request_key_async_with_auxdata 4 46624 _002639_hash NULL ++_002640_hash request_key_with_auxdata 4 24515 _002640_hash NULL ++_002641_hash rose_recvmsg 4 2368 _002641_hash &_001788_hash ++_002642_hash rtl8169_try_rx_copy 3 705 _002642_hash NULL ++_002643_hash _rtl92s_firmware_downloadcode 3 14021 _002643_hash NULL ++_002644_hash rx_data 4 60442 _002644_hash NULL ++_002645_hash rxrpc_recvmsg 4 26233 _002645_hash NULL ++_002646_hash sas_change_queue_depth 2 18555 _002646_hash NULL ++_002647_hash scsi_activate_tcq 2 42640 _002647_hash NULL ++_002648_hash scsi_deactivate_tcq 2 47086 _002648_hash NULL ++_002649_hash scsi_execute 5 33596 _002649_hash NULL ++_002650_hash _scsih_adjust_queue_depth 2 1083 _002650_hash NULL ++_002651_hash scsi_init_shared_tag_map 2 59812 _002651_hash NULL ++_002652_hash scsi_track_queue_full 2 44239 _002652_hash NULL ++_002653_hash sctp_abort_pkt_new 5 55218 _002653_hash NULL ++_002654_hash sctp_make_abort_violation 4 27959 _002654_hash NULL ++_002655_hash sctp_make_op_error 5-6 7057 _002655_hash NULL ++_002657_hash sctp_recvmsg 4 23265 _002657_hash NULL ++_002658_hash send_stream 4 3397 _002658_hash NULL ++_002659_hash sis190_try_rx_copy 3 57069 _002659_hash NULL ++_002664_hash skb_copy_and_csum_datagram_iovec 2 24466 _002664_hash NULL ++_002666_hash skge_rx_get 3 40598 _002666_hash NULL ++_002667_hash smp_send_cmd 3 512 _002667_hash NULL ++_002668_hash snd_gf1_mem_proc_dump 5 16926 _003499_hash NULL nohasharray ++_002669_hash sta_dev_read 3 14782 _002669_hash NULL ++_002670_hash sta_inactive_ms_read 3 25690 _002670_hash NULL ++_002671_hash sta_last_signal_read 3 31818 _002671_hash NULL ++_002672_hash stats_dot11ACKFailureCount_read 3 45558 _002672_hash NULL ++_002673_hash stats_dot11FCSErrorCount_read 3 28154 _002673_hash NULL ++_002674_hash stats_dot11RTSFailureCount_read 3 43948 _002674_hash NULL ++_002675_hash stats_dot11RTSSuccessCount_read 3 33065 _002675_hash NULL ++_002676_hash storvsc_connect_to_vsp 2 22 _002676_hash NULL ++_002677_hash sys_msgrcv 3 959 _002677_hash NULL ++_002678_hash sys_syslog 3 10746 _002678_hash NULL ++_002679_hash tcf_csum_ipv4_icmp 3 9258 _002679_hash NULL ++_002680_hash tcf_csum_ipv4_igmp 3 60446 _002680_hash NULL ++_002681_hash tcf_csum_ipv4_tcp 4 39713 _002681_hash NULL ++_002682_hash tcf_csum_ipv4_udp 4 30777 _002682_hash NULL ++_002683_hash tcf_csum_ipv6_icmp 4 11738 _002683_hash NULL ++_002684_hash tcf_csum_ipv6_tcp 4 54877 _002684_hash NULL ++_002685_hash tcf_csum_ipv6_udp 4 25241 _002685_hash NULL ++_002686_hash tcm_loop_change_queue_depth 2 42454 _002686_hash NULL ++_002687_hash tcp_copy_to_iovec 3 28344 _002687_hash NULL ++_002688_hash tcp_mark_head_lost 2 35895 _002688_hash NULL ++_002689_hash tcp_match_skb_to_sack 4 23568 _002689_hash NULL ++_002690_hash timeout_read 3 47915 _002690_hash NULL ++_002691_hash tipc_multicast 5 49144 _002691_hash NULL ++_002692_hash tipc_port_recv_sections 4 42890 _002692_hash NULL ++_002693_hash tipc_port_reject_sections 5 55229 _002693_hash NULL ++_002694_hash total_ps_buffered_read 3 16365 _002694_hash NULL ++_002695_hash tso_fragment 3 29050 _002695_hash NULL ++_002696_hash tty_insert_flip_string 3 34042 _002696_hash NULL ++_002698_hash tun_put_user 4 59849 _002698_hash NULL ++_002699_hash twa_change_queue_depth 2 48808 _002699_hash NULL ++_002700_hash tw_change_queue_depth 2 11116 _002700_hash NULL ++_002701_hash twl_change_queue_depth 2 41342 _002701_hash NULL ++_002702_hash ubi_eba_atomic_leb_change 5 60379 _002702_hash NULL ++_002703_hash ubi_eba_write_leb 5-6 36029 _002703_hash NULL ++_002705_hash ubi_eba_write_leb_st 5 44343 _002705_hash NULL ++_002706_hash udp_recvmsg 4 42558 _002706_hash NULL ++_002707_hash udpv6_recvmsg 4 9813 _002707_hash &_002506_hash ++_002708_hash udpv6_sendmsg 4 22316 _002708_hash NULL ++_002709_hash ulong_read_file 3 42304 _002709_hash &_000522_hash ++_002710_hash unix_dgram_recvmsg 4 14952 _002710_hash NULL ++_002711_hash user_power_read 3 39414 _002711_hash NULL ++_002712_hash v9fs_direct_read 3 45546 _002712_hash NULL ++_002713_hash v9fs_file_readn 4 36353 _002713_hash &_001799_hash ++_002714_hash vcc_recvmsg 4 37198 _002714_hash NULL ++_002715_hash velocity_rx_copy 2 34583 _002715_hash NULL ++_002716_hash W6692_empty_Bfifo 2 47804 _002716_hash NULL ++_002717_hash wep_iv_read 3 54744 _002717_hash NULL ++_002718_hash x25_recvmsg 4 42777 _002718_hash NULL ++_002719_hash xfs_buf_get_map 3 24522 _002719_hash NULL ++_002720_hash xfs_file_aio_write 4 33234 _002720_hash NULL ++_002721_hash xfs_iext_insert 3 18667 _002741_hash NULL nohasharray ++_002722_hash xfs_iext_remove 3 50909 _002722_hash NULL ++_002723_hash xlog_do_recovery_pass 3 21618 _002723_hash NULL ++_002724_hash xlog_find_verify_log_record 2 18870 _002724_hash NULL ++_002725_hash zd_mac_rx 3 38296 _002725_hash NULL ++_002726_hash aircable_process_packet 5 46639 _002726_hash NULL ++_002727_hash ath6kl_wmi_get_new_buf 1 52304 _002727_hash NULL ++_002728_hash batadv_iv_ogm_queue_add 3 46319 _002728_hash NULL ++_002729_hash batadv_receive_client_update_packet 3 41578 _002729_hash NULL ++_002730_hash batadv_receive_server_sync_packet 3 26577 _002730_hash &_000494_hash ++_002731_hash brcmf_alloc_pkt_and_read 2 63116 _002731_hash &_002028_hash ++_002732_hash brcmf_sdcard_recv_buf 6 38179 _002732_hash NULL ++_002733_hash brcmf_sdcard_rwdata 5 65041 _002733_hash NULL ++_002734_hash brcmf_sdcard_send_buf 6 7713 _002734_hash NULL ++_002735_hash brcmf_sdio_forensic_read 3 35311 _002735_hash &_001382_hash ++_002736_hash btrfs_alloc_free_block 3 8986 _002736_hash NULL ++_002737_hash btrfs_free_and_pin_reserved_extent 2 53016 _002737_hash NULL ++_002738_hash btrfs_free_reserved_extent 2 9867 _002738_hash NULL ++_002739_hash carl9170_handle_mpdu 3 11056 _002739_hash NULL ++_002740_hash do_trimming 3 26952 _002740_hash NULL ++_002741_hash edge_tty_recv 4 18667 _002741_hash &_002721_hash ++_002742_hash fwnet_receive_packet 9 50537 _002742_hash NULL ++_002743_hash gigaset_if_receive 3 4861 _002743_hash NULL ++_002744_hash gsm_dlci_data 3 14155 _002744_hash NULL ++_002745_hash handle_rx_packet 3 58993 _002745_hash NULL ++_002746_hash HDLC_irq 2 8709 _002746_hash NULL ++_002747_hash hdlc_rpr_irq 2 10240 _002747_hash NULL ++_002749_hash ifx_spi_insert_flip_string 3 51752 _002749_hash NULL ++_002753_hash ip_nat_sdp_media 8 23386 _002753_hash NULL ++_002754_hash ip_send_unicast_reply 6 38714 _002754_hash NULL ++_002756_hash ipwireless_network_packet_received 4 51277 _002756_hash NULL ++_002757_hash ipwireless_tty_received 3 49154 _002757_hash NULL ++_002758_hash iscsi_iser_recv 4 41948 _002758_hash NULL ++_002759_hash l2cap_bredr_sig_cmd 3 49065 _002759_hash NULL ++_002760_hash l2cap_sock_alloc_skb_cb 2 33532 _002760_hash NULL ++_002761_hash l2cap_sock_recvmsg 4 59886 _002761_hash NULL ++_002762_hash llcp_allocate_pdu 3 19866 _002762_hash NULL ++_002763_hash macvtap_recvmsg 4 63949 _002763_hash NULL ++_002764_hash osd_req_list_dev_partitions 4 60027 _002764_hash NULL ++_002765_hash osd_req_list_partition_collections 5 38223 _002765_hash NULL ++_002766_hash osst_do_scsi 4 44410 _002766_hash NULL ++_002767_hash ping_sendmsg 4 3782 _002767_hash NULL ++_002768_hash ppp_cp_event 6 2965 _002768_hash NULL ++_002769_hash pty_write 3 44757 _002769_hash &_001733_hash ++_002770_hash push_rx 3 28939 _002770_hash NULL ++_002772_hash qla2x00_handle_queue_full 2 24365 _002772_hash NULL ++_002773_hash qla4xxx_change_queue_depth 2 1268 _002773_hash NULL ++_002774_hash rfcomm_sock_recvmsg 4 22227 _002774_hash NULL ++_002775_hash scsi_execute_req 5 42088 _002775_hash NULL ++_002776_hash _scsih_change_queue_depth 2 26230 _002776_hash NULL ++_002777_hash sctp_sf_abort_violation 6 38380 _002777_hash NULL ++_002778_hash send_to_tty 3 45141 _002778_hash NULL ++_002780_hash sky2_receive 2 13407 _002780_hash NULL ++_002781_hash spi_execute 5 28736 _002781_hash NULL ++_002782_hash submit_inquiry 3 42108 _002782_hash NULL ++_002783_hash tcp_dma_try_early_copy 3 4457 _002783_hash NULL ++_002784_hash tcp_sacktag_walk 6 49703 _002784_hash NULL ++_002785_hash tcp_write_xmit 2 64602 _002785_hash NULL ++_002786_hash ti_recv 4 22027 _002786_hash NULL ++_002787_hash tun_do_read 4 50800 _002787_hash NULL ++_002788_hash ubi_leb_change 4 10289 _002788_hash NULL ++_002789_hash ubi_leb_write 4-5 5478 _002789_hash NULL ++_002791_hash udp_sendmsg 4 4492 _002791_hash NULL ++_002792_hash unix_seqpacket_recvmsg 4 23062 _002792_hash &_000477_hash ++_002793_hash v9fs_cached_file_read 3 2514 _002793_hash NULL ++_002794_hash write_leb 5 36957 _002794_hash NULL ++_002795_hash xfs_buf_read_map 3 40226 _002795_hash NULL ++_002796_hash xfs_trans_get_buf_map 4 2927 _002796_hash NULL ++_002797_hash xlog_do_log_recovery 3 17550 _002797_hash NULL ++_002798_hash ath6kl_wmi_add_wow_pattern_cmd 4 12842 _002798_hash NULL ++_002799_hash ath6kl_wmi_beginscan_cmd 8 25462 _002799_hash NULL ++_002800_hash ath6kl_wmi_send_probe_response_cmd 6 31728 _002800_hash NULL ++_002801_hash ath6kl_wmi_set_appie_cmd 5 39266 _002801_hash NULL ++_002802_hash ath6kl_wmi_set_ie_cmd 6 37260 _002802_hash NULL ++_002803_hash ath6kl_wmi_startscan_cmd 8 33674 _002803_hash NULL ++_002804_hash ath6kl_wmi_test_cmd 3 27312 _002804_hash NULL ++_002805_hash brcmf_sdbrcm_membytes 3-5 37324 _002805_hash NULL ++_002807_hash brcmf_sdbrcm_read_control 3 22721 _002807_hash NULL ++_002808_hash brcmf_tx_frame 3 20978 _002808_hash NULL ++_002809_hash __carl9170_rx 3 56784 _002809_hash NULL ++_002810_hash ch_do_scsi 4 31171 _002810_hash NULL ++_002811_hash dbg_leb_change 4 23555 _002811_hash NULL ++_002812_hash dbg_leb_write 4-5 63555 _002812_hash &_000971_hash ++_002814_hash gluebi_write 3 27905 _002814_hash NULL ++_002815_hash hdlc_irq_one 2 3944 _002815_hash NULL ++_002819_hash iser_rcv_completion 2 8048 _002819_hash NULL ++_002820_hash lock_loop 1 61681 _002820_hash NULL ++_002821_hash process_rcvd_data 3 6679 _002821_hash NULL ++_002822_hash brcmf_sdbrcm_bus_txctl 3 42492 _002822_hash NULL ++_002823_hash carl9170_rx 3 13272 _002823_hash NULL ++_002824_hash carl9170_rx_stream 3 1334 _002824_hash NULL ++_002826_hash mpt_lan_receive_post_turbo 2 13592 _002826_hash NULL ++_002827_hash padzero 1 55 _002827_hash &_002251_hash ++_002828_hash scsi_mode_sense 5 16835 _002828_hash NULL ++_002829_hash scsi_vpd_inquiry 4 30040 _002829_hash NULL ++_002830_hash ses_recv_diag 4 47143 _002830_hash &_000679_hash ++_002831_hash ses_send_diag 4 64527 _002831_hash NULL ++_002832_hash tcp_push_one 2 48816 _002832_hash NULL ++_002833_hash __tcp_push_pending_frames 2 48148 _002833_hash NULL ++_002834_hash trim_bitmaps 3 24158 _002834_hash NULL ++_002835_hash tun_recvmsg 4 48463 _002835_hash NULL ++_002836_hash ubifs_leb_change 4 17789 _002836_hash NULL ++_002837_hash ubifs_leb_write 4-5 22679 _002837_hash NULL ++_002839_hash xfs_buf_readahead_map 3 44248 _002839_hash &_000851_hash ++_002840_hash xfs_trans_read_buf_map 5 37487 _002840_hash NULL ++_002841_hash xlog_do_recover 3 59789 _002841_hash NULL ++_002842_hash btrfs_trim_block_group 3 28963 _002842_hash NULL ++_002843_hash do_write_orph_node 2 64343 _002843_hash NULL ++_002844_hash fix_unclean_leb 3 23188 _002844_hash NULL ++_002845_hash fixup_leb 3 43256 _002845_hash NULL ++_002846_hash recover_head 3 17904 _002846_hash NULL ++_002847_hash scsi_get_vpd_page 4 51951 _002847_hash NULL ++_002848_hash sd_do_mode_sense 5 11507 _002848_hash NULL ++_002849_hash tcp_push 3 10680 _002849_hash NULL ++_002850_hash ubifs_wbuf_write_nolock 3 64946 _002850_hash NULL ++_002851_hash ubifs_write_node 3-5 11258 _002851_hash NULL ++_002852_hash ubifs_recover_leb 3 60639 _002852_hash NULL ++_002853_hash write_head 4 30481 _002853_hash NULL ++_002854_hash write_node 4 33121 _002854_hash NULL ++_002855_hash ubifs_recover_log_leb 3 12079 _002855_hash NULL ++_002856_hash replay_log_leb 3 18704 _002856_hash NULL ++_002857_hash alloc_cpu_rmap 1 65363 _002857_hash NULL ++_002858_hash alloc_ebda_hpc 1-2 50046 _002858_hash NULL ++_002860_hash alloc_sched_domains 1 28972 _002860_hash NULL ++_002861_hash amthi_read 4 45831 _002861_hash NULL ++_002862_hash bcm_char_read 3 31750 _002862_hash NULL ++_002863_hash BcmCopySection 5 2035 _002863_hash NULL ++_002864_hash buffer_from_user 3 51826 _002864_hash NULL ++_002865_hash buffer_to_user 3 35439 _002865_hash NULL ++_002866_hash card_send_command 3 40757 _002866_hash NULL ++_002867_hash chd_dec_fetch_cdata 3 50926 _002867_hash NULL ++_002868_hash copy_nodes_to_user 2 63807 _002868_hash NULL ++_002869_hash create_log 2 8225 _002869_hash NULL ++_002870_hash crystalhd_create_dio_pool 2 3427 _002870_hash NULL ++_002871_hash crystalhd_user_data 3 18407 _002871_hash NULL ++_002872_hash do_pages_stat 2 4437 _002872_hash NULL ++_002873_hash do_read_log_to_user 4 3236 _002873_hash NULL ++_002874_hash do_write_log_from_user 3 39362 _002874_hash NULL ++_002875_hash evm_read_key 3 54674 _002875_hash NULL ++_002876_hash evm_write_key 3 27715 _002876_hash NULL ++_002877_hash fir16_create 3 5574 _002877_hash NULL ++_002878_hash get_nodes 3 39012 _002878_hash NULL ++_002879_hash __iio_allocate_kfifo 2-3 55738 _002879_hash NULL ++_002881_hash __iio_allocate_sw_ring_buffer 3 4843 _002881_hash NULL ++_002882_hash iio_debugfs_read_reg 3 60908 _002882_hash NULL ++_002883_hash iio_debugfs_write_reg 3 22742 _002883_hash NULL ++_002884_hash iio_device_alloc 1 41440 _002884_hash NULL ++_002885_hash iio_event_chrdev_read 3 54757 _002885_hash NULL ++_002886_hash iio_read_first_n_kfifo 2 57910 _002886_hash NULL ++_002887_hash iio_read_first_n_sw_rb 2 51911 _002887_hash NULL ++_002888_hash ioapic_setup_resources 1 35255 _002888_hash NULL ++_002889_hash keymap_store 4 45406 _002889_hash NULL ++_002890_hash line6_alloc_sysex_buffer 4 28225 _002890_hash NULL ++_002891_hash line6_dumpreq_initbuf 3 53123 _002891_hash NULL ++_002892_hash line6_midibuf_init 2 52425 _002892_hash NULL ++_002893_hash _malloc 1 54077 _002893_hash NULL ++_002894_hash mei_read 3 6507 _002894_hash NULL ++_002895_hash mei_write 3 4005 _002895_hash NULL ++_002896_hash msg_set 3 51725 _002896_hash NULL ++_002897_hash newpart 6 47485 _002897_hash NULL ++_002898_hash OS_kmalloc 1 36909 _002898_hash NULL ++_002899_hash OS_mem_token_alloc 1 14276 _002899_hash NULL ++_002900_hash packet_came 3 18072 _002900_hash NULL ++_002901_hash pcpu_alloc_bootmem 2 62074 _002901_hash NULL ++_002902_hash pcpu_build_alloc_info 1-3-2 41443 _002902_hash NULL ++_002905_hash pcpu_get_vm_areas 3 50085 _002905_hash NULL ++_002906_hash resource_from_user 3 30341 _002906_hash NULL ++_002907_hash rtsx_read_cfg_seq 3-5 48139 _002907_hash NULL ++_002909_hash rtsx_write_cfg_seq 3-5 27485 _002909_hash NULL ++_002911_hash sca3000_read_data 4 57064 _002911_hash NULL ++_002912_hash sca3000_read_first_n_hw_rb 2 11479 _002912_hash NULL ++_002913_hash send_midi_async 3 57463 _002913_hash NULL ++_002914_hash sep_create_dcb_dmatables_context 6 37551 _002914_hash NULL ++_002915_hash sep_create_dcb_dmatables_context_kernel 6 49728 _002915_hash NULL ++_002916_hash sep_create_msgarea_context 4 33829 _002916_hash NULL ++_002917_hash sep_lli_table_secure_dma 2-3 64042 _002917_hash NULL ++_002919_hash sep_lock_user_pages 2-3 57470 _002919_hash &_002488_hash ++_002921_hash sep_prepare_input_output_dma_table_in_dcb 4-5-2-3 63087 _002921_hash NULL ++_002923_hash sep_read 3 17161 _002923_hash NULL ++_002924_hash TransmitTcb 4 12989 _002924_hash NULL ++_002925_hash ValidateDSDParamsChecksum 3 63654 _002925_hash NULL ++_002926_hash Wb35Reg_BurstWrite 4 62327 _002926_hash NULL ++_002927_hash alloc_irq_cpu_rmap 1 28459 _002927_hash NULL ++_002928_hash InterfaceTransmitPacket 3 42058 _002928_hash NULL ++_002929_hash line6_dumpreq_init 3 34473 _002929_hash NULL ++_002931_hash pcpu_embed_first_chunk 1-3-2 24224 _002931_hash NULL ++_002933_hash pcpu_fc_alloc 2 11818 _002933_hash NULL ++_002934_hash pcpu_page_first_chunk 1 20712 _002934_hash NULL ++_002935_hash pod_alloc_sysex_buffer 3 31651 _002935_hash NULL ++_002936_hash r8712_usbctrl_vendorreq 6 48489 _002936_hash NULL ++_002937_hash r871x_set_wpa_ie 3 7000 _002937_hash NULL ++_002938_hash sep_prepare_input_dma_table 2-3 2009 _002938_hash NULL ++_002940_hash sep_prepare_input_output_dma_table 2-4-3 63429 _002940_hash NULL ++_002943_hash sys_get_mempolicy 3 30379 _002943_hash NULL ++_002944_hash sys_mbind 5 7990 _002944_hash NULL ++_002945_hash sys_migrate_pages 2 39825 _002945_hash NULL ++_002946_hash sys_move_pages 2 42626 _002946_hash NULL ++_002947_hash sys_set_mempolicy 3 32608 _002947_hash NULL ++_002948_hash variax_alloc_sysex_buffer 3 15237 _002948_hash NULL ++_002949_hash vme_user_read 3 55338 _002949_hash NULL ++_002950_hash vme_user_write 3 15587 _002950_hash NULL ++_002954_hash variax_set_raw2 4 32374 _002954_hash NULL ++_002955_hash copy_in_user 3 57502 _002955_hash NULL ++_002956_hash __earlyonly_bootmem_alloc 2 23824 _002956_hash NULL ++_002957_hash rfc4106_set_key 3 54519 _002957_hash NULL ++_002958_hash sparse_early_usemaps_alloc_pgdat_section 2 62304 _002958_hash NULL ++_002959_hash sparse_early_usemaps_alloc_node 4 9269 _002959_hash NULL ++_002960_hash sparse_mem_maps_populate_node 4 12669 _002960_hash &_002242_hash ++_002961_hash vmemmap_alloc_block 1 43245 _002961_hash NULL ++_002962_hash sparse_early_mem_maps_alloc_node 4 36971 _002962_hash NULL ++_002963_hash vmemmap_alloc_block_buf 1 61126 _002963_hash NULL ++_002964_hash alloc_mr 1 45935 _002964_hash NULL ++_002965_hash atomic_counters_read 3 48827 _002965_hash NULL ++_002966_hash atomic_stats_read 3 36228 _002966_hash NULL ++_002967_hash capabilities_read 3 58457 _002967_hash NULL ++_002968_hash compat_core_sys_select 1 65285 _002968_hash NULL ++_002969_hash compat_dccp_setsockopt 5 51263 _002969_hash NULL ++_002970_hash compat_do_arpt_set_ctl 4 12184 _002970_hash NULL ++_002971_hash compat_do_ip6t_set_ctl 4 3184 _002971_hash NULL ++_002972_hash compat_do_ipt_set_ctl 4 58466 _002972_hash &_002078_hash ++_002973_hash compat_filldir 3 32999 _002973_hash NULL ++_002974_hash compat_filldir64 3 35354 _002974_hash NULL ++_002975_hash compat_fillonedir 3 15620 _002975_hash NULL ++_002976_hash compat_ip_setsockopt 5 13870 _003094_hash NULL nohasharray ++_002977_hash compat_ipv6_setsockopt 5 20468 _002977_hash NULL ++_002978_hash compat_mpctl_ioctl 2 45671 _002978_hash NULL ++_002979_hash compat_raw_setsockopt 5 30634 _002979_hash NULL ++_002980_hash compat_rawv6_setsockopt 5 4967 _002980_hash NULL ++_002981_hash compat_rw_copy_check_uvector 3 22001 _003263_hash NULL nohasharray ++_002982_hash compat_sock_setsockopt 5 23 _002982_hash NULL ++_002983_hash compat_sys_get_mempolicy 3 31109 _002983_hash NULL ++_002984_hash compat_sys_kexec_load 2 35674 _002984_hash NULL ++_002985_hash compat_sys_keyctl 4 9639 _002985_hash NULL ++_002986_hash compat_sys_mbind 5 36256 _002986_hash NULL ++_002987_hash compat_sys_migrate_pages 2 3157 _002987_hash NULL ++_002988_hash compat_sys_move_pages 2 5861 _002988_hash NULL ++_002989_hash compat_sys_mq_timedsend 3 31060 _002989_hash NULL ++_002990_hash compat_sys_msgrcv 2 7482 _002990_hash NULL ++_002991_hash compat_sys_msgsnd 2 10738 _002991_hash NULL ++_002992_hash compat_sys_semtimedop 3 3606 _002992_hash NULL ++_002993_hash compat_sys_set_mempolicy 3 57742 _002993_hash &_002614_hash ++_002994_hash __copy_in_user 3 34790 _002994_hash NULL ++_002995_hash dev_counters_read 3 19216 _002995_hash NULL ++_002996_hash dev_names_read 3 38509 _002996_hash NULL ++_002997_hash driver_names_read 3 60399 _002997_hash NULL ++_002998_hash driver_stats_read 3 8944 _002998_hash NULL ++_002999_hash evdev_ioctl_compat 2 13851 _002999_hash NULL ++_003000_hash evtchn_read 3 3569 _003000_hash NULL ++_003001_hash evtchn_write 3 43278 _003001_hash NULL ++_003002_hash fat_compat_ioctl_filldir 3 36328 _003002_hash NULL ++_003003_hash flash_read 3 57843 _003003_hash NULL ++_003004_hash flash_write 3 62354 _003004_hash NULL ++_003005_hash fw_device_op_compat_ioctl 2 42804 _003005_hash NULL ++_003006_hash gather_array 3 56641 _003006_hash NULL ++_003007_hash ghash_async_setkey 3 60001 _003007_hash NULL ++_003008_hash gntdev_alloc_map 2 35145 _003008_hash NULL ++_003009_hash gnttab_map 2 56439 _003009_hash NULL ++_003010_hash gru_alloc_gts 2-3 60056 _003010_hash &_000981_hash ++_003012_hash hiddev_compat_ioctl 2 41255 _003012_hash NULL ++_003013_hash init_cdev 1 8274 _003013_hash NULL ++_003014_hash init_per_cpu 1 17880 _003014_hash NULL ++_003015_hash ipath_create_cq 2 45586 _003015_hash NULL ++_003016_hash ipath_get_base_info 3 7043 _003016_hash NULL ++_003017_hash ipath_init_qp_table 2 25167 _003017_hash NULL ++_003018_hash ipath_resize_cq 2 712 _003018_hash NULL ++_003019_hash joydev_compat_ioctl 2 8765 _003019_hash NULL ++_003020_hash mon_bin_compat_ioctl 3 50234 _003020_hash NULL ++_003021_hash options_write 3 47243 _003021_hash NULL ++_003022_hash portcntrs_1_read 3 47253 _003022_hash NULL ++_003023_hash portcntrs_2_read 3 56586 _003023_hash NULL ++_003024_hash portnames_read 3 41958 _003024_hash NULL ++_003025_hash ptc_proc_write 3 12076 _003025_hash NULL ++_003026_hash put_cmsg_compat 4 35937 _003026_hash NULL ++_003027_hash qib_alloc_devdata 2 51819 _003027_hash NULL ++_003028_hash qib_alloc_fast_reg_page_list 2 10507 _003028_hash NULL ++_003029_hash qib_cdev_init 1 34778 _003029_hash NULL ++_003030_hash qib_create_cq 2 27497 _003030_hash NULL ++_003031_hash qib_diag_write 3 62133 _003031_hash NULL ++_003032_hash qib_get_base_info 3 11369 _003032_hash NULL ++_003033_hash qib_resize_cq 2 53090 _003033_hash NULL ++_003034_hash qsfp_1_read 3 21915 _003034_hash NULL ++_003035_hash qsfp_2_read 3 31491 _003035_hash NULL ++_003036_hash queue_reply 3 22416 _003036_hash NULL ++_003037_hash spidev_compat_ioctl 2 63778 _003037_hash NULL ++_003038_hash split 2 11691 _003038_hash NULL ++_003039_hash stats_read_ul 3 32751 _003039_hash NULL ++_003040_hash sys32_ipc 3 7238 _003040_hash NULL ++_003041_hash sys32_rt_sigpending 2 25814 _003041_hash NULL ++_003042_hash tunables_read 3 36385 _003042_hash NULL ++_003043_hash tunables_write 3 59563 _003043_hash NULL ++_003044_hash xenbus_file_write 3 6282 _003044_hash NULL ++_003045_hash xlbd_reserve_minors 1-2 18365 _003045_hash NULL ++_003047_hash xpc_kmalloc_cacheline_aligned 1 42895 _003047_hash NULL ++_003048_hash xpc_kzalloc_cacheline_aligned 1 65433 _003048_hash NULL ++_003049_hash xsd_read 3 15653 _003049_hash NULL ++_003050_hash compat_do_readv_writev 4 49102 _003050_hash NULL ++_003051_hash compat_keyctl_instantiate_key_iov 3 57431 _003088_hash NULL nohasharray ++_003052_hash compat_process_vm_rw 3-5 22254 _003052_hash NULL ++_003054_hash compat_sys_select 1 16131 _003054_hash NULL ++_003055_hash compat_sys_setsockopt 5 3326 _003055_hash NULL ++_003056_hash compat_udp_setsockopt 5 38840 _003056_hash NULL ++_003057_hash compat_udpv6_setsockopt 5 42981 _003057_hash NULL ++_003058_hash do_compat_pselect 1 10398 _003058_hash NULL ++_003059_hash gnttab_expand 1 15817 _003059_hash NULL ++_003060_hash ipath_cdev_init 1 37752 _003060_hash NULL ++_003061_hash ipath_reg_phys_mr 3 23918 _003061_hash &_000999_hash ++_003062_hash qib_alloc_fast_reg_mr 2 12526 _003062_hash NULL ++_003063_hash qib_reg_phys_mr 3 60202 _003063_hash &_000897_hash ++_003064_hash compat_readv 3 30273 _003064_hash NULL ++_003065_hash compat_sys_process_vm_readv 3-5 15374 _003065_hash NULL ++_003067_hash compat_sys_process_vm_writev 3-5 41194 _003067_hash NULL ++_003069_hash compat_sys_pselect6 1 14105 _003069_hash NULL ++_003070_hash compat_writev 3 60063 _003070_hash NULL ++_003071_hash get_free_entries 1 46030 _003071_hash NULL ++_003072_hash compat_sys_preadv64 3 24283 _003072_hash NULL ++_003073_hash compat_sys_pwritev64 3 51151 _003073_hash NULL ++_003074_hash compat_sys_readv 3 20911 _003074_hash NULL ++_003075_hash compat_sys_writev 3 5784 _003075_hash NULL ++_003076_hash gnttab_alloc_grant_references 1 18240 _003076_hash NULL ++_003077_hash compat_sys_preadv 3 583 _003077_hash NULL ++_003078_hash compat_sys_pwritev 3 17886 _003078_hash NULL ++_003079_hash aes_decrypt_fail_read 3 54815 _003079_hash NULL ++_003080_hash aes_decrypt_interrupt_read 3 19910 _003080_hash NULL ++_003081_hash aes_decrypt_packets_read 3 10155 _003081_hash NULL ++_003082_hash aes_encrypt_fail_read 3 32562 _003082_hash NULL ++_003083_hash aes_encrypt_interrupt_read 3 39919 _003083_hash NULL ++_003084_hash aes_encrypt_packets_read 3 48666 _003084_hash NULL ++_003085_hash agp_remap 2 30665 _003085_hash NULL ++_003086_hash alloc_apertures 1 56561 _003086_hash NULL ++_003087_hash allocate_probes 1 40204 _003087_hash NULL ++_003088_hash alloc_ftrace_hash 1 57431 _003088_hash &_003051_hash ++_003089_hash alloc_page_cgroup 1 2919 _003089_hash NULL ++_003090_hash __alloc_preds 2 9492 _003090_hash NULL ++_003091_hash __alloc_pred_stack 2 26687 _003091_hash NULL ++_003092_hash alloc_sched_domains 1 47756 _003092_hash NULL ++_003093_hash alloc_trace_probe 6 38720 _003093_hash NULL ++_003094_hash alloc_trace_uprobe 3 13870 _003094_hash &_002976_hash ++_003095_hash ath6kl_sdio_alloc_prep_scat_req 2 51986 _003095_hash NULL ++_003096_hash ath6kl_usb_post_recv_transfers 2 32892 _003096_hash NULL ++_003097_hash ath6kl_usb_submit_ctrl_in 6 32880 _003097_hash &_000795_hash ++_003098_hash ath6kl_usb_submit_ctrl_out 6 9978 _003098_hash NULL ++_003099_hash av7110_ipack_init 2 46655 _003099_hash NULL ++_003100_hash av7110_vbi_write 3 34384 _003100_hash NULL ++_003101_hash bin_uuid 3 28999 _003101_hash NULL ++_003102_hash blk_dropped_read 3 4168 _003102_hash NULL ++_003103_hash blk_msg_write 3 13655 _003103_hash NULL ++_003104_hash brcmf_usbdev_qinit 2 19090 _003104_hash &_001715_hash ++_003105_hash brcmf_usb_dl_cmd 4 53130 _003105_hash NULL ++_003106_hash ci_ll_init 3 12930 _003106_hash NULL ++_003107_hash ci_ll_write 4 3740 _003107_hash NULL ++_003108_hash conf_read 3 55786 _003108_hash NULL ++_003109_hash __copy_from_user_inatomic_nocache 3 49921 _003109_hash NULL ++_003110_hash cx24116_writeregN 4 41975 _003110_hash NULL ++_003111_hash cyttsp_probe 4 1940 _003111_hash NULL ++_003112_hash dccpprobe_read 3 52549 _003112_hash NULL ++_003113_hash ddb_input_read 3 9743 _003113_hash NULL ++_003114_hash ddb_output_write 3 31902 _003114_hash NULL ++_003115_hash __devres_alloc 2 25598 _003115_hash NULL ++_003116_hash dma_rx_errors_read 3 52045 _003116_hash NULL ++_003117_hash dma_rx_requested_read 3 65354 _003117_hash NULL ++_003118_hash dma_tx_errors_read 3 46060 _003118_hash NULL ++_003119_hash dma_tx_requested_read 3 16110 _003203_hash NULL nohasharray ++_003120_hash do_dmabuf_dirty_sou 7 3017 _003120_hash NULL ++_003121_hash do_surface_dirty_sou 7 39678 _003121_hash NULL ++_003122_hash driver_state_read 3 17194 _003122_hash &_001511_hash ++_003123_hash drm_agp_bind_pages 3 56748 _003123_hash NULL ++_003124_hash drm_buffer_alloc 2 44405 _003124_hash NULL ++_003125_hash drm_calloc_large 1-2 65421 _003125_hash NULL ++_003127_hash drm_fb_helper_init 3-4 19044 _003127_hash NULL ++_003129_hash drm_ht_create 2 18853 _003129_hash NULL ++_003130_hash drm_ioctl 2 42813 _003130_hash NULL ++_003131_hash drm_malloc_ab 1-2 16831 _003131_hash NULL ++_003133_hash drm_mode_crtc_set_gamma_size 2 31881 _003133_hash NULL ++_003134_hash drm_plane_init 6 28731 _003134_hash NULL ++_003135_hash drm_property_create 4 51239 _003135_hash NULL ++_003136_hash drm_property_create_blob 2 7414 _003136_hash NULL ++_003137_hash drm_vblank_init 2 11362 _003137_hash NULL ++_003138_hash drm_vmalloc_dma 1 14550 _003138_hash NULL ++_003139_hash dvb_aplay 3 56296 _003139_hash NULL ++_003140_hash dvb_ca_en50221_init 4 45718 _003140_hash NULL ++_003141_hash dvb_ca_en50221_io_write 3 43533 _003141_hash NULL ++_003142_hash dvb_dmxdev_set_buffer_size 2 55643 _003142_hash NULL ++_003143_hash dvbdmx_write 3 19423 _003143_hash NULL ++_003144_hash dvb_dvr_set_buffer_size 2 9840 _003144_hash NULL ++_003145_hash dvb_net_sec 3 37884 _003145_hash NULL ++_003146_hash dvb_play 3 50814 _003146_hash NULL ++_003147_hash dvb_ringbuffer_pkt_read_user 2-5-3 4303 _003147_hash NULL ++_003150_hash dvb_ringbuffer_read_user 3 56702 _003150_hash NULL ++_003151_hash dvb_usercopy 2 14036 _003151_hash NULL ++_003152_hash dw210x_op_rw 6 39915 _003152_hash NULL ++_003153_hash edt_ft5x06_debugfs_raw_data_read 3 28002 _003153_hash NULL ++_003154_hash em_canid_change 3 14150 _003154_hash NULL ++_003155_hash event_calibration_read 3 21083 _003155_hash NULL ++_003156_hash event_enable_read 3 7074 _003156_hash NULL ++_003157_hash event_filter_read 3 23494 _003157_hash NULL ++_003158_hash event_filter_write 3 56609 _003158_hash NULL ++_003159_hash event_heart_beat_read 3 48961 _003159_hash NULL ++_003160_hash event_id_read 3 64288 _003160_hash &_001300_hash ++_003161_hash event_oom_late_read 3 61175 _003161_hash &_001054_hash ++_003162_hash event_phy_transmit_error_read 3 10471 _003162_hash NULL ++_003163_hash event_rx_mem_empty_read 3 40363 _003163_hash NULL ++_003164_hash event_rx_mismatch_read 3 38518 _003164_hash NULL ++_003165_hash event_rx_pool_read 3 25792 _003165_hash NULL ++_003166_hash event_tx_stuck_read 3 19305 _003166_hash NULL ++_003167_hash excessive_retries_read 3 60425 _003167_hash NULL ++_003168_hash flexcop_device_kmalloc 1 54793 _003168_hash NULL ++_003169_hash fm_send_cmd 5 39639 _003169_hash NULL ++_003170_hash __fprog_create 2 41263 _003170_hash NULL ++_003171_hash fq_codel_zalloc 1 15378 _003171_hash NULL ++_003172_hash ftrace_pid_write 3 39710 _003172_hash NULL ++_003173_hash ftrace_profile_read 3 21327 _003173_hash NULL ++_003174_hash fw_stats_raw_read 3 1369 _003174_hash NULL ++_003175_hash get_info 3 55681 _003175_hash NULL ++_003176_hash __get_vm_area_node 1 55305 _003176_hash NULL ++_003177_hash gpio_power_read 3 36059 _003177_hash NULL ++_003178_hash h5_prepare_pkt 4 12085 _003178_hash NULL ++_003179_hash hsc_msg_alloc 1 60990 _003179_hash NULL ++_003180_hash hsc_write 3 55875 _003180_hash NULL ++_003181_hash hsi_alloc_controller 1 41802 _003181_hash NULL ++_003182_hash hsi_register_board_info 2 13820 _003182_hash NULL ++_003183_hash hugetlb_cgroup_read 5 49259 _003183_hash NULL ++_003184_hash i915_cache_sharing_read 3 24775 _003184_hash NULL ++_003185_hash i915_cache_sharing_write 3 57961 _003185_hash NULL ++_003186_hash i915_max_freq_read 3 20581 _003186_hash NULL ++_003187_hash i915_max_freq_write 3 11350 _003187_hash NULL ++_003188_hash i915_min_freq_read 3 38470 _003188_hash NULL ++_003189_hash i915_min_freq_write 3 10981 _003189_hash NULL ++_003190_hash i915_ring_stop_read 3 42549 _003190_hash &_000740_hash ++_003191_hash i915_ring_stop_write 3 59010 _003191_hash NULL ++_003192_hash i915_wedged_read 3 35474 _003192_hash NULL ++_003193_hash i915_wedged_write 3 47771 _003193_hash NULL ++_003194_hash ieee802154_alloc_device 1 13767 _003194_hash NULL ++_003195_hash intel_sdvo_write_cmd 4 54377 _003195_hash &_000832_hash ++_003196_hash isr_cmd_cmplt_read 3 53439 _003196_hash NULL ++_003197_hash isr_commands_read 3 41398 _003197_hash NULL ++_003198_hash isr_decrypt_done_read 3 49490 _003198_hash NULL ++_003199_hash isr_dma0_done_read 3 8574 _003199_hash NULL ++_003200_hash isr_dma1_done_read 3 48159 _003200_hash NULL ++_003201_hash isr_fiqs_read 3 34687 _003201_hash NULL ++_003202_hash isr_host_acknowledges_read 3 54136 _003202_hash NULL ++_003203_hash isr_hw_pm_mode_changes_read 3 16110 _003203_hash &_003119_hash ++_003204_hash isr_irqs_read 3 9181 _003204_hash NULL ++_003205_hash isr_low_rssi_read 3 64789 _003205_hash NULL ++_003206_hash isr_pci_pm_read 3 30271 _003206_hash NULL ++_003207_hash isr_rx_headers_read 3 38325 _003207_hash NULL ++_003208_hash isr_rx_mem_overflow_read 3 43025 _003208_hash NULL ++_003209_hash isr_rx_procs_read 3 31804 _003209_hash NULL ++_003210_hash isr_rx_rdys_read 3 35283 _003210_hash NULL ++_003211_hash isr_tx_exch_complete_read 3 16103 _003211_hash NULL ++_003212_hash isr_tx_procs_read 3 23084 _003212_hash NULL ++_003213_hash isr_wakeups_read 3 49607 _003213_hash NULL ++_003214_hash LoadBitmap 2 19658 _003214_hash NULL ++_003215_hash mem_cgroup_read 5 22461 _003215_hash NULL ++_003216_hash mic_calc_failure_read 3 59700 _003216_hash NULL ++_003217_hash mic_rx_pkts_read 3 27972 _003217_hash NULL ++_003218_hash __module_alloc 1 50004 _003218_hash NULL ++_003219_hash module_alloc_update_bounds_rw 1 63233 _003219_hash NULL ++_003220_hash module_alloc_update_bounds_rx 1 58634 _003220_hash NULL ++_003221_hash mwifiex_usb_submit_rx_urb 2 54558 _003221_hash NULL ++_003222_hash nfc_hci_hcp_message_tx 6 14534 _003222_hash NULL ++_003223_hash nfc_hci_set_param 5 40697 _003223_hash NULL ++_003224_hash nfc_shdlc_alloc_skb 2 12741 _003224_hash NULL ++_003225_hash opera1_xilinx_rw 5 31453 _003225_hash NULL ++_003226_hash persistent_ram_vmap 1-2 709 _003226_hash NULL ++_003228_hash prctl_set_mm 3 64538 _003228_hash NULL ++_003229_hash probe_kernel_write 3 17481 _003229_hash NULL ++_003230_hash proc_fault_inject_read 3 36802 _003230_hash NULL ++_003231_hash proc_fault_inject_write 3 21058 _003231_hash NULL ++_003232_hash ps_pspoll_max_apturn_read 3 6699 _003232_hash NULL ++_003233_hash ps_pspoll_timeouts_read 3 11776 _003233_hash NULL ++_003234_hash ps_pspoll_utilization_read 3 5361 _003234_hash NULL ++_003235_hash ps_upsd_max_apturn_read 3 19918 _003235_hash NULL ++_003236_hash ps_upsd_max_sptime_read 3 63362 _003236_hash NULL ++_003237_hash ps_upsd_timeouts_read 3 28924 _003237_hash NULL ++_003238_hash ps_upsd_utilization_read 3 51669 _003238_hash NULL ++_003239_hash ptp_filter_init 2 36780 _003239_hash NULL ++_003240_hash pwr_disable_ps_read 3 13176 _003240_hash NULL ++_003241_hash pwr_elp_enter_read 3 5324 _003241_hash NULL ++_003242_hash pwr_enable_ps_read 3 17686 _003242_hash NULL ++_003243_hash pwr_fix_tsf_ps_read 3 26627 _003243_hash NULL ++_003244_hash pwr_missing_bcns_read 3 25824 _003244_hash NULL ++_003245_hash pwr_power_save_off_read 3 18355 _003245_hash NULL ++_003246_hash pwr_ps_enter_read 3 26935 _003246_hash &_000512_hash ++_003247_hash pwr_rcvd_awake_beacons_read 3 50505 _003247_hash NULL ++_003248_hash pwr_rcvd_beacons_read 3 52836 _003248_hash NULL ++_003249_hash pwr_tx_without_ps_read 3 48423 _003249_hash NULL ++_003250_hash pwr_tx_with_ps_read 3 60851 _003250_hash NULL ++_003251_hash pwr_wake_on_host_read 3 26321 _003251_hash NULL ++_003252_hash pwr_wake_on_timer_exp_read 3 22640 _003252_hash NULL ++_003253_hash rb_simple_read 3 45972 _003253_hash NULL ++_003254_hash read_file_dfs 3 43145 _003254_hash NULL ++_003255_hash retry_count_read 3 52129 _003255_hash NULL ++_003256_hash rx_dropped_read 3 44799 _003256_hash NULL ++_003257_hash rx_fcs_err_read 3 62844 _003257_hash NULL ++_003258_hash rx_hdr_overflow_read 3 64407 _003258_hash NULL ++_003259_hash rx_hw_stuck_read 3 57179 _003259_hash NULL ++_003260_hash rx_out_of_mem_read 3 10157 _003260_hash NULL ++_003261_hash rx_path_reset_read 3 23801 _003261_hash NULL ++_003262_hash rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 _003262_hash NULL ++_003263_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _003263_hash &_002981_hash ++_003264_hash rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 _003264_hash NULL ++_003265_hash rxpipe_rx_prep_beacon_drop_read 3 2403 _003265_hash NULL ++_003266_hash rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 _003266_hash NULL ++_003267_hash rx_reset_counter_read 3 58001 _003267_hash NULL ++_003268_hash rx_xfr_hint_trig_read 3 40283 _003268_hash NULL ++_003269_hash saa7146_vmalloc_build_pgtable 2 19780 _003269_hash NULL ++_003270_hash sched_feat_write 3 55202 _003270_hash NULL ++_003271_hash sd_alloc_ctl_entry 1 29708 _003271_hash NULL ++_003272_hash shmem_pread_fast 3 34147 _003272_hash NULL ++_003273_hash shmem_pread_slow 3 3198 _003273_hash NULL ++_003274_hash shmem_pwrite_slow 3 31741 _003274_hash NULL ++_003275_hash show_header 3 4722 _003275_hash &_000745_hash ++_003276_hash stack_max_size_read 3 1445 _003276_hash NULL ++_003277_hash subsystem_filter_read 3 62310 _003277_hash NULL ++_003278_hash subsystem_filter_write 3 13022 _003278_hash NULL ++_003279_hash swap_cgroup_swapon 2 13614 _003279_hash NULL ++_003280_hash system_enable_read 3 25815 _003280_hash NULL ++_003281_hash tda10048_writeregbulk 4 11050 _003281_hash NULL ++_003282_hash tlbflush_read_file 3 64661 _003282_hash NULL ++_003283_hash trace_options_core_read 3 47390 _003283_hash NULL ++_003284_hash trace_options_read 3 11419 _003284_hash NULL ++_003285_hash trace_parser_get_init 2 31379 _003285_hash NULL ++_003286_hash traceprobe_probes_write 3 64969 _003286_hash NULL ++_003287_hash trace_seq_to_user 3 65398 _003287_hash NULL ++_003288_hash tracing_buffers_read 3 11124 _003288_hash NULL ++_003289_hash tracing_clock_write 3 27961 _003289_hash NULL ++_003290_hash tracing_cpumask_read 3 7010 _003290_hash NULL ++_003291_hash tracing_ctrl_read 3 46922 _003291_hash NULL ++_003292_hash tracing_entries_read 3 8345 _003292_hash NULL ++_003293_hash tracing_max_lat_read 3 8890 _003293_hash NULL ++_003294_hash tracing_read_dyn_info 3 45468 _003294_hash NULL ++_003295_hash tracing_readme_read 3 16493 _003295_hash NULL ++_003296_hash tracing_saved_cmdlines_read 3 21434 _003296_hash NULL ++_003297_hash tracing_set_trace_read 3 44122 _003297_hash NULL ++_003298_hash tracing_set_trace_write 3 57096 _003298_hash NULL ++_003299_hash tracing_stats_read 3 34537 _003299_hash NULL ++_003300_hash tracing_total_entries_read 3 62817 _003300_hash NULL ++_003301_hash tracing_trace_options_write 3 153 _003301_hash NULL ++_003302_hash tstats_write 3 60432 _003302_hash &_000009_hash ++_003303_hash ttm_bo_fbdev_io 4 9805 _003303_hash NULL ++_003304_hash ttm_bo_io 5 47000 _003304_hash NULL ++_003305_hash ttm_dma_page_pool_free 2 34135 _003305_hash NULL ++_003306_hash ttm_page_pool_free 2 61661 _003306_hash NULL ++_003307_hash ttusb2_msg 4 3100 _003307_hash NULL ++_003308_hash tx_internal_desc_overflow_read 3 47300 _003308_hash NULL ++_003309_hash tx_queue_len_read 3 1463 _003309_hash NULL ++_003310_hash tx_queue_status_read 3 44978 _003310_hash NULL ++_003311_hash u_memcpya 2-3 30139 _003311_hash NULL ++_003313_hash usb_allocate_stream_buffers 3 8964 _003313_hash NULL ++_003314_hash vifs_state_read 3 33762 _003314_hash NULL ++_003315_hash vmalloc_to_sg 2 58354 _003315_hash NULL ++_003316_hash vm_map_ram 2 23078 _003316_hash &_001095_hash ++_003317_hash vmw_execbuf_process 5 22885 _003317_hash NULL ++_003318_hash vmw_fifo_reserve 2 12141 _003318_hash NULL ++_003319_hash vmw_kms_present 9 38130 _003319_hash NULL ++_003320_hash vmw_kms_readback 6 5727 _003320_hash NULL ++_003321_hash wep_addr_key_count_read 3 20174 _003321_hash NULL ++_003322_hash wep_decrypt_fail_read 3 58567 _003322_hash NULL ++_003323_hash wep_default_key_count_read 3 43035 _003323_hash NULL ++_003324_hash wep_interrupt_read 3 41492 _003324_hash NULL ++_003325_hash wep_key_not_found_read 3 13377 _003325_hash &_000952_hash ++_003326_hash wep_packets_read 3 18751 _003326_hash NULL ++_003327_hash wl1251_cmd_template_set 4 6172 _003327_hash NULL ++_003328_hash wl1271_format_buffer 2 20834 _003328_hash NULL ++_003329_hash wl1271_rx_filter_alloc_field 5 46721 _003329_hash NULL ++_003330_hash wl12xx_cmd_build_probe_req 6-8 54946 _003330_hash NULL ++_003332_hash wlcore_alloc_hw 1 7785 _003332_hash NULL ++_003333_hash aggr_size_rx_size_read 3 33526 _003333_hash NULL ++_003334_hash aggr_size_tx_agg_vs_rate_read 3 21438 _003334_hash NULL ++_003335_hash alloc_and_copy_ftrace_hash 1 29368 _003335_hash NULL ++_003336_hash alloc_bulk_urbs_generic 5 12127 _003336_hash NULL ++_003337_hash alloc_ieee80211 1 20063 _003337_hash NULL ++_003338_hash alloc_ieee80211_rsl 1 34564 _003338_hash NULL ++_003339_hash alloc_perm_bits 2 1532 _003339_hash NULL ++_003340_hash alloc_private 2 22399 _003340_hash NULL ++_003341_hash alloc_rtllib 1 51136 _003341_hash NULL ++_003342_hash alloc_rx_desc_ring 2 18016 _003342_hash NULL ++_003343_hash arcfb_write 3 8702 _003343_hash NULL ++_003344_hash ath6kl_usb_bmi_read 3 48745 _003344_hash NULL ++_003345_hash ath6kl_usb_bmi_write 3 2454 _003345_hash &_001020_hash ++_003346_hash ath6kl_usb_ctrl_msg_exchange 4 33327 _003346_hash NULL ++_003347_hash au0828_init_isoc 2-3 61917 _003347_hash NULL ++_003349_hash auok190xfb_write 3 37001 _003349_hash NULL ++_003350_hash beacon_interval_read 3 7091 _003350_hash NULL ++_003351_hash brcmf_usb_attach 1-2 44656 _003351_hash NULL ++_003353_hash broadsheetfb_write 3 39976 _003353_hash NULL ++_003354_hash broadsheet_spiflash_rewrite_sector 2 54864 _003354_hash NULL ++_003355_hash ci13xxx_add_device 3 14456 _003355_hash NULL ++_003356_hash cmpk_message_handle_tx 4 54024 _003356_hash NULL ++_003357_hash comedi_alloc_subdevices 2 29207 _003357_hash NULL ++_003358_hash comedi_buf_alloc 3 24822 _003358_hash NULL ++_003359_hash comedi_read 3 13199 _003359_hash NULL ++_003360_hash comedi_write 3 47926 _003360_hash NULL ++_003361_hash create_trace_probe 1 20175 _003361_hash NULL ++_003362_hash create_trace_uprobe 1 13184 _003362_hash NULL ++_003363_hash cx18_copy_buf_to_user 4 22735 _003363_hash NULL ++_003364_hash cx231xx_init_bulk 2-3 47024 _003364_hash NULL ++_003366_hash cx231xx_init_isoc 2-3 56453 _003366_hash NULL ++_003368_hash cx231xx_init_vbi_isoc 2-3 28053 _003368_hash NULL ++_003370_hash da9052_group_write 3 4534 _003370_hash NULL ++_003371_hash debug_debug1_read 3 8856 _003371_hash NULL ++_003372_hash debug_debug2_read 3 30526 _003372_hash NULL ++_003373_hash debug_debug3_read 3 56894 _003373_hash NULL ++_003374_hash debug_debug4_read 3 61367 _003374_hash NULL ++_003375_hash debug_debug5_read 3 2291 _003375_hash NULL ++_003376_hash debug_debug6_read 3 33168 _003376_hash NULL ++_003377_hash dev_read 3 56369 _003377_hash NULL ++_003378_hash do_dmabuf_dirty_ldu 6 52241 _003378_hash NULL ++_003379_hash drm_compat_ioctl 2 51717 _003379_hash NULL ++_003380_hash drm_mode_create_tv_properties 2 23122 _003380_hash NULL ++_003381_hash drm_property_create_bitmask 5 30195 _003381_hash NULL ++_003382_hash drm_property_create_enum 5 29201 _003382_hash NULL ++_003383_hash dsp_buffer_alloc 2 11684 _003383_hash NULL ++_003384_hash dt3155_alloc_coherent 2 58073 _003384_hash NULL ++_003385_hash dtim_interval_read 3 654 _003385_hash NULL ++_003386_hash dvb_audio_write 3 51275 _003386_hash NULL ++_003387_hash dvb_ca_en50221_io_ioctl 2 26490 _003387_hash NULL ++_003388_hash dvb_ca_write 3 41171 _003388_hash NULL ++_003389_hash dvb_demux_ioctl 2 42733 _003389_hash NULL ++_003390_hash dvb_dmxdev_buffer_read 4 20682 _003390_hash NULL ++_003391_hash dvb_dvr_ioctl 2 49182 _003391_hash NULL ++_003392_hash dvb_generic_ioctl 2 21810 _003392_hash NULL ++_003393_hash dvb_net_ioctl 2 61559 _003393_hash NULL ++_003394_hash dvb_net_sec_callback 2 28786 _003394_hash NULL ++_003396_hash dvb_video_write 3 754 _003396_hash NULL ++_003397_hash dynamic_ps_timeout_read 3 10110 _003397_hash NULL ++_003398_hash easycap_alsa_vmalloc 2 14426 _003398_hash NULL ++_003399_hash em28xx_alloc_isoc 4 46892 _003399_hash NULL ++_003400_hash error_error_bar_retry_read 3 64305 _003400_hash NULL ++_003401_hash error_error_frame_cts_nul_flid_read 3 17262 _003401_hash NULL ++_003402_hash error_error_frame_read 3 39947 _003402_hash &_002436_hash ++_003403_hash error_error_null_Frame_tx_start_read 3 55024 _003403_hash NULL ++_003404_hash error_error_numll_frame_cts_start_read 3 47781 _003404_hash NULL ++_003405_hash ext_sd_execute_read_data 9 48589 _003405_hash NULL ++_003406_hash ext_sd_execute_write_data 9 8175 _003406_hash NULL ++_003407_hash fast_user_write 5 20494 _003407_hash NULL ++_003408_hash f_audio_buffer_alloc 1 41110 _003408_hash NULL ++_003409_hash fb_alloc_cmap_gfp 2 20792 _003409_hash NULL ++_003410_hash fbcon_do_set_font 2-3 4079 _003410_hash NULL ++_003412_hash fb_read 3 33506 _003412_hash NULL ++_003413_hash fb_sys_read 3 13778 _003413_hash NULL ++_003414_hash fb_sys_write 3 33130 _003414_hash NULL ++_003415_hash fb_write 3 46924 _003415_hash NULL ++_003416_hash firmwareUpload 3 32794 _003416_hash NULL ++_003417_hash fmc_send_cmd 5 20435 _003417_hash NULL ++_003418_hash fops_read 3 40672 _003418_hash NULL ++_003419_hash forced_ps_read 3 31685 _003419_hash NULL ++_003420_hash frame_alloc 4 15981 _003420_hash NULL ++_003421_hash framebuffer_alloc 1 59145 _003421_hash NULL ++_003422_hash ftrace_write 3 29551 _003422_hash NULL ++_003423_hash fw_download_code 3 13249 _003423_hash NULL ++_003424_hash fwSendNullPacket 2 54618 _003424_hash NULL ++_003425_hash gdm_wimax_netif_rx 3 43423 _003425_hash &_001810_hash ++_003426_hash get_vm_area 1 18080 _003426_hash NULL ++_003427_hash __get_vm_area 1 61599 _003427_hash NULL ++_003428_hash get_vm_area_caller 1 10527 _003428_hash NULL ++_003429_hash __get_vm_area_caller 1 56416 _003828_hash NULL nohasharray ++_003430_hash gspca_dev_probe2 4 59833 _003430_hash NULL ++_003431_hash hdpvr_read 3 9273 _003431_hash NULL ++_003432_hash hecubafb_write 3 26942 _003432_hash NULL ++_003433_hash i915_compat_ioctl 2 3656 _003433_hash NULL ++_003434_hash i915_gem_execbuffer_relocate_slow 7 25355 _003434_hash NULL ++_003435_hash ieee80211_alloc_txb 1-2 52477 _003435_hash NULL ++_003437_hash ieee80211_authentication_req 3 63973 _003437_hash NULL ++_003438_hash ieee80211_wx_set_gen_ie 3 51399 _003438_hash NULL ++_003439_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _003458_hash NULL nohasharray ++_003440_hash intel_sdvo_set_value 4 2311 _003440_hash NULL ++_003441_hash ir_lirc_transmit_ir 3 64403 _003441_hash NULL ++_003442_hash irq_blk_threshold_read 3 33666 _003442_hash NULL ++_003443_hash irq_pkt_threshold_read 3 33356 _003443_hash &_000154_hash ++_003444_hash irq_timeout_read 3 54653 _003444_hash NULL ++_003445_hash ivtv_buf_copy_from_user 4 25502 _003445_hash NULL ++_003446_hash ivtv_copy_buf_to_user 4 6159 _003446_hash NULL ++_003447_hash ivtvfb_write 3 40023 _003447_hash NULL ++_003448_hash kgdb_hex2mem 3 24755 _003448_hash NULL ++_003449_hash lirc_buffer_init 2-3 53282 _003449_hash NULL ++_003451_hash lirc_write 3 20604 _003451_hash NULL ++_003452_hash mce_request_packet 3 1073 _003452_hash NULL ++_003453_hash media_entity_init 2-4 15870 _003453_hash &_001742_hash ++_003455_hash mem_fw_gen_free_mem_blks_read 3 11413 _003455_hash NULL ++_003456_hash mem_fwlog_free_mem_blks_read 3 59616 _003456_hash NULL ++_003457_hash mem_rx_free_mem_blks_read 3 675 _003457_hash NULL ++_003458_hash mem_tx_free_mem_blks_read 3 3521 _003458_hash &_003439_hash ++_003459_hash metronomefb_write 3 8823 _003459_hash NULL ++_003460_hash mga_compat_ioctl 2 52170 _003460_hash NULL ++_003461_hash mmio_read 4 40348 _003461_hash NULL ++_003462_hash netlink_send 5 38434 _003462_hash NULL ++_003463_hash nfc_hci_execute_cmd 5 43882 _003463_hash NULL ++_003464_hash nfc_hci_send_event 5 21452 _003464_hash NULL ++_003465_hash nfc_hci_send_response 5 56462 _003465_hash NULL ++_003466_hash ni_gpct_device_construct 5 610 _003466_hash NULL ++_003467_hash nouveau_compat_ioctl 2 28305 _003467_hash NULL ++_003468_hash odev_update 2 50169 _003468_hash NULL ++_003469_hash opera1_usb_i2c_msgxfer 4 64521 _003469_hash NULL ++_003470_hash OSDSetBlock 2-4 38986 _003470_hash NULL ++_003472_hash oz_add_farewell 5 20652 _003472_hash NULL ++_003473_hash oz_cdev_read 3 20659 _003473_hash NULL ++_003474_hash oz_cdev_write 3 33852 _003474_hash NULL ++_003475_hash oz_ep_alloc 2 5587 _003475_hash NULL ++_003476_hash oz_events_read 3 47535 _003476_hash NULL ++_003477_hash persistent_ram_buffer_map 1-2 11332 _003477_hash NULL ++_003479_hash pipeline_cs_rx_packet_in_read 3 37089 _003479_hash NULL ++_003480_hash pipeline_cs_rx_packet_out_read 3 58926 _003480_hash NULL ++_003481_hash pipeline_csum_to_rx_xfer_swi_read 3 15403 _003481_hash NULL ++_003482_hash pipeline_dec_packet_in_fifo_full_read 3 33052 _003482_hash NULL ++_003483_hash pipeline_dec_packet_in_read 3 47076 _003483_hash NULL ++_003484_hash pipeline_dec_packet_out_read 3 54052 _003484_hash NULL ++_003485_hash pipeline_defrag_to_csum_swi_read 3 63037 _003485_hash NULL ++_003486_hash pipeline_enc_rx_stat_fifo_int_read 3 7107 _003486_hash NULL ++_003487_hash pipeline_enc_tx_stat_fifo_int_read 3 14680 _003487_hash NULL ++_003488_hash pipeline_hs_tx_stat_fifo_int_read 3 15642 _003488_hash &_001260_hash ++_003489_hash pipeline_pipeline_fifo_full_read 3 34095 _003489_hash NULL ++_003490_hash pipeline_post_proc_swi_read 3 24108 _003490_hash NULL ++_003491_hash pipeline_pre_proc_swi_read 3 3898 _003491_hash NULL ++_003492_hash pipeline_pre_to_defrag_swi_read 3 56321 _003492_hash NULL ++_003493_hash pipeline_rx_complete_stat_fifo_int_read 3 40671 _003493_hash NULL ++_003494_hash pipeline_sec_frag_swi_read 3 30294 _003494_hash NULL ++_003495_hash pipeline_tcp_rx_stat_fifo_int_read 3 26745 _003495_hash NULL ++_003496_hash pipeline_tcp_tx_stat_fifo_int_read 3 32589 _003496_hash NULL ++_003497_hash play_iframe 3 8219 _003497_hash NULL ++_003498_hash probes_write 3 29711 _003498_hash NULL ++_003499_hash psb_unlocked_ioctl 2 16926 _003499_hash &_002668_hash ++_003500_hash ps_poll_ps_poll_max_ap_turn_read 3 53140 _003500_hash NULL ++_003501_hash ps_poll_ps_poll_timeouts_read 3 5934 _003501_hash NULL ++_003502_hash ps_poll_ps_poll_utilization_read 3 39383 _003502_hash NULL ++_003503_hash ps_poll_upsd_max_ap_turn_read 3 42050 _003503_hash NULL ++_003504_hash ps_poll_upsd_timeouts_read 3 36755 _003504_hash NULL ++_003505_hash ps_poll_upsd_utilization_read 3 28519 _003505_hash NULL ++_003506_hash pvr2_ioread_read 3 10720 _003506_hash &_001669_hash ++_003507_hash pvr2_ioread_set_sync_key 3 59882 _003507_hash NULL ++_003508_hash pvr2_stream_buffer_count 2 33719 _003508_hash NULL ++_003509_hash pwr_connection_out_of_sync_read 3 35061 _003509_hash NULL ++_003510_hash pwr_cont_miss_bcns_spread_read 3 39250 _003515_hash NULL nohasharray ++_003511_hash pwr_missing_bcns_cnt_read 3 45113 _003511_hash NULL ++_003512_hash pwr_rcvd_awake_bcns_cnt_read 3 12632 _003512_hash NULL ++_003513_hash pwr_rcvd_bcns_cnt_read 3 4774 _003513_hash NULL ++_003514_hash qc_capture 3 19298 _003514_hash NULL ++_003515_hash r128_compat_ioctl 2 39250 _003515_hash &_003510_hash ++_003516_hash radeon_compat_ioctl 2 59150 _003516_hash NULL ++_003517_hash radeon_kms_compat_ioctl 2 51371 _003517_hash NULL ++_003518_hash Realloc 2 34961 _003518_hash NULL ++_003519_hash redrat3_transmit_ir 3 64244 _003519_hash NULL ++_003520_hash reg_w_buf 3 27724 _003520_hash NULL ++_003521_hash reg_w_ixbuf 4 34736 _003521_hash NULL ++_003522_hash rtllib_alloc_txb 1-2 21687 _003522_hash NULL ++_003524_hash rtllib_authentication_req 3 26713 _003524_hash NULL ++_003525_hash rtllib_wx_set_gen_ie 3 59808 _003525_hash NULL ++_003526_hash rts51x_transfer_data_partial 6 5735 _003526_hash NULL ++_003527_hash rvmalloc 1 46873 _003527_hash NULL ++_003528_hash rx_decrypt_key_not_found_read 3 37820 _003528_hash NULL ++_003529_hash rx_defrag_called_read 3 1897 _003529_hash NULL ++_003530_hash rx_defrag_decrypt_failed_read 3 41411 _003530_hash NULL ++_003531_hash rx_defrag_init_called_read 3 35935 _003531_hash NULL ++_003532_hash rx_defrag_in_process_called_read 3 59338 _003532_hash NULL ++_003533_hash rx_defrag_need_decrypt_read 3 42253 _003533_hash NULL ++_003534_hash rx_defrag_need_defrag_read 3 28117 _003534_hash NULL ++_003535_hash rx_defrag_tkip_called_read 3 21031 _003535_hash NULL ++_003536_hash rx_filter_accum_arp_pend_requests_read 3 11003 _003536_hash NULL ++_003537_hash rx_filter_arp_filter_read 3 61914 _003537_hash NULL ++_003538_hash rx_filter_beacon_filter_read 3 49279 _003538_hash NULL ++_003539_hash rx_filter_data_filter_read 3 30098 _003539_hash NULL ++_003540_hash rx_filter_dup_filter_read 3 37238 _003540_hash NULL ++_003541_hash rx_filter_ibss_filter_read 3 50167 _003541_hash NULL ++_003542_hash rx_filter_max_arp_queue_dep_read 3 5851 _003542_hash NULL ++_003543_hash rx_filter_mc_filter_read 3 25712 _003543_hash NULL ++_003544_hash rx_filter_protection_filter_read 3 39282 _003544_hash NULL ++_003545_hash rx_rate_rx_frames_per_rates_read 3 7282 _003545_hash NULL ++_003546_hash rx_rx_beacon_early_term_read 3 21559 _003546_hash NULL ++_003547_hash rx_rx_checksum_result_read 3 50617 _003547_hash NULL ++_003548_hash rx_rx_cmplt_read 3 14753 _003548_hash NULL ++_003549_hash rx_rx_cmplt_task_read 3 35226 _003549_hash NULL ++_003550_hash rx_rx_defrag_end_read 3 505 _003550_hash NULL ++_003551_hash rx_rx_defrag_read 3 2010 _003551_hash NULL ++_003552_hash rx_rx_done_read 3 65217 _003552_hash NULL ++_003553_hash rx_rx_dropped_frame_read 3 23748 _003553_hash NULL ++_003554_hash rx_rx_frame_checksum_read 3 40140 _003554_hash NULL ++_003555_hash rx_rx_hdr_overflow_read 3 35002 _003555_hash NULL ++_003556_hash rx_rx_out_of_mpdu_nodes_read 3 64668 _003556_hash NULL ++_003557_hash rx_rx_phy_hdr_read 3 20950 _003557_hash NULL ++_003558_hash rx_rx_pre_complt_read 3 41653 _003558_hash NULL ++_003559_hash rx_rx_timeout_read 3 62389 _003559_hash NULL ++_003560_hash rx_rx_timeout_wa_read 3 50204 _003560_hash NULL ++_003561_hash rx_rx_tkip_replays_read 3 60193 _003561_hash NULL ++_003562_hash rx_rx_wa_ba_not_expected_read 3 61341 _003562_hash NULL ++_003563_hash rx_rx_wa_density_dropped_frame_read 3 26095 _003563_hash NULL ++_003564_hash rx_streaming_always_read 3 49401 _003564_hash NULL ++_003565_hash rx_streaming_interval_read 3 55291 _003565_hash NULL ++_003566_hash saa7164_buffer_alloc_user 2 9627 _003566_hash NULL ++_003567_hash send_control_msg 6 48498 _003567_hash NULL ++_003568_hash SendTxCommandPacket 3 42901 _003568_hash NULL ++_003569_hash setup_window 2-7-5-4 59178 _003569_hash NULL ++_003573_hash shmem_pwrite_fast 3 46842 _003573_hash NULL ++_003574_hash sleep_auth_read 3 19159 _003574_hash NULL ++_003575_hash sn9c102_read 3 29305 _003575_hash NULL ++_003576_hash snd_pcm_alloc_vmalloc_buffer 2 44595 _003576_hash NULL ++_003577_hash split_scan_timeout_read 3 20029 _003577_hash NULL ++_003578_hash stk_prepare_sio_buffers 2 57168 _003578_hash NULL ++_003579_hash store_debug_level 3 35652 _003579_hash NULL ++_003580_hash suspend_dtim_interval_read 3 64971 _003580_hash NULL ++_003581_hash sys_prctl 4 8766 _003581_hash NULL ++_003582_hash tm6000_read_write_usb 7 50774 _003582_hash &_002149_hash ++_003583_hash tracing_read_pipe 3 35312 _003583_hash NULL ++_003584_hash ts_read 3 44687 _003584_hash NULL ++_003585_hash ts_write 3 64336 _003585_hash NULL ++_003586_hash tt3650_ci_msg 4 57219 _003586_hash NULL ++_003587_hash ttm_object_device_init 2 10321 _003587_hash NULL ++_003588_hash ttm_object_file_init 2 27804 _003588_hash NULL ++_003589_hash tx_frag_bad_mblk_num_read 3 28064 _003589_hash NULL ++_003590_hash tx_frag_cache_hit_read 3 29639 _003590_hash NULL ++_003591_hash tx_frag_cache_miss_read 3 28394 _003591_hash NULL ++_003592_hash tx_frag_called_read 3 1748 _003592_hash NULL ++_003593_hash tx_frag_failed_read 3 43540 _003593_hash NULL ++_003594_hash tx_frag_init_called_read 3 48377 _003594_hash NULL ++_003595_hash tx_frag_in_process_called_read 3 1290 _003595_hash NULL ++_003596_hash tx_frag_key_not_found_read 3 22971 _003596_hash NULL ++_003597_hash tx_frag_mpdu_alloc_failed_read 3 41167 _003597_hash NULL ++_003598_hash tx_frag_need_fragmentation_read 3 50153 _003598_hash NULL ++_003599_hash tx_frag_tkip_called_read 3 31575 _003599_hash NULL ++_003600_hash tx_tx_burst_programmed_read 3 20320 _003600_hash NULL ++_003601_hash tx_tx_checksum_result_read 3 36490 _003601_hash &_001996_hash ++_003602_hash tx_tx_cmplt_read 3 35854 _003602_hash NULL ++_003603_hash tx_tx_data_prepared_read 3 43497 _003603_hash NULL ++_003604_hash tx_tx_data_programmed_read 3 36871 _003604_hash NULL ++_003605_hash tx_tx_done_data_read 3 6799 _003605_hash NULL ++_003606_hash tx_tx_done_int_template_read 3 55511 _003606_hash &_001887_hash ++_003607_hash tx_tx_done_template_read 3 35104 _003607_hash &_000106_hash ++_003608_hash tx_tx_exch_expiry_read 3 8749 _003608_hash NULL ++_003609_hash tx_tx_exch_pending_read 3 53018 _003609_hash NULL ++_003610_hash tx_tx_exch_read 3 52986 _003610_hash NULL ++_003611_hash tx_tx_frame_checksum_read 3 41553 _003611_hash NULL ++_003612_hash tx_tx_imm_resp_read 3 55964 _003612_hash NULL ++_003613_hash tx_tx_prepared_descs_read 3 9221 _003613_hash NULL ++_003614_hash tx_tx_retry_data_read 3 1926 _003614_hash NULL ++_003615_hash tx_tx_retry_template_read 3 57623 _003615_hash NULL ++_003616_hash tx_tx_start_data_read 3 53219 _003616_hash NULL ++_003617_hash tx_tx_start_fw_gen_read 3 58648 _003617_hash NULL ++_003618_hash tx_tx_start_int_templates_read 3 58324 _003618_hash NULL ++_003619_hash tx_tx_start_null_frame_read 3 6281 _003619_hash NULL ++_003620_hash tx_tx_starts_read 3 3617 _003620_hash NULL ++_003621_hash tx_tx_start_templates_read 3 17164 _003621_hash NULL ++_003622_hash tx_tx_template_prepared_read 3 30424 _003622_hash NULL ++_003623_hash tx_tx_template_programmed_read 3 30461 _003623_hash NULL ++_003624_hash udi_log_event 3 58105 _003624_hash NULL ++_003625_hash udl_prime_create 2 57159 _003625_hash NULL ++_003626_hash uf_create_device_nodes 2 24948 _003626_hash NULL ++_003627_hash uf_sme_queue_message 3 15697 _003627_hash NULL ++_003628_hash ufx_alloc_urb_list 3 10349 _003628_hash NULL ++_003629_hash unifi_net_data_malloc 3 24716 _003629_hash NULL ++_003630_hash unifi_read 3 14899 _003630_hash NULL ++_003631_hash unifi_write 3 65012 _003631_hash NULL ++_003632_hash usb_buffer_alloc 2 36276 _003632_hash NULL ++_003633_hash usbvision_rvmalloc 1 19655 _003633_hash NULL ++_003634_hash usbvision_v4l2_read 3 34386 _003634_hash NULL ++_003635_hash uvc_alloc_buffers 2-3 9656 _003635_hash NULL ++_003637_hash uvc_alloc_entity 3-4 20836 _003637_hash NULL ++_003639_hash uvc_debugfs_stats_read 3 56651 _003639_hash NULL ++_003640_hash uvc_simplify_fraction 3 31303 _003640_hash NULL ++_003641_hash v4l2_ctrl_new 7 24927 _003641_hash NULL ++_003642_hash v4l2_event_subscribe 3 53687 _003642_hash NULL ++_003643_hash v4l_stk_read 3 39672 _003643_hash NULL ++_003644_hash __vb2_perform_fileio 3 63033 _003644_hash NULL ++_003645_hash vfd_write 3 14717 _003645_hash NULL ++_003646_hash vfio_config_do_rw 3 46091 _003646_hash NULL ++_003647_hash vfio_msi_enable 2 20906 _003647_hash NULL ++_003648_hash viafb_dvp0_proc_write 3 23023 _003648_hash NULL ++_003649_hash viafb_dvp1_proc_write 3 48864 _003649_hash NULL ++_003650_hash viafb_vt1636_proc_write 3 16018 _003650_hash NULL ++_003651_hash __videobuf_alloc_vb 1 27062 _003651_hash NULL ++_003652_hash __videobuf_alloc_vb 1 5665 _003652_hash NULL ++_003653_hash __videobuf_copy_to_user 4 15423 _003653_hash NULL ++_003654_hash videobuf_dma_init_kernel 3 6963 _003654_hash NULL ++_003655_hash videobuf_pages_to_sg 2 3708 _003655_hash NULL ++_003656_hash videobuf_vmalloc_to_sg 2 4548 _003656_hash NULL ++_003657_hash video_usercopy 2 62151 _003657_hash NULL ++_003658_hash virtscsi_alloc_tgt 2 6643 _003658_hash NULL ++_003659_hash vmw_cursor_update_image 3-4 16332 _003659_hash NULL ++_003661_hash vmw_framebuffer_dmabuf_dirty 6 37661 _003661_hash &_001116_hash ++_003662_hash vmw_framebuffer_surface_dirty 6 48132 _003662_hash NULL ++_003663_hash vmw_gmr2_bind 3 21305 _003663_hash NULL ++_003664_hash vmw_unlocked_ioctl 2 19212 _003664_hash NULL ++_003665_hash w9966_v4l_read 3 31148 _003665_hash NULL ++_003666_hash wl1273_fm_fops_write 3 60621 _003666_hash NULL ++_003667_hash zoran_write 3 22404 _003667_hash NULL ++_003668_hash alloc_vm_area 1 15989 _003668_hash NULL ++_003669_hash cx18_copy_mdl_to_user 4 45549 _003669_hash NULL ++_003670_hash dlfb_ops_write 3 64150 _003670_hash NULL ++_003671_hash dvb_demux_read 3 13981 _003671_hash NULL ++_003672_hash dvb_dmxdev_read_sec 4 7892 _003672_hash NULL ++_003673_hash dvb_dvr_read 3 17073 _003673_hash NULL ++_003674_hash em28xx_init_isoc 4 62883 _003674_hash &_000729_hash ++_003675_hash fb_alloc_cmap 2 6554 _003675_hash NULL ++_003676_hash gspca_dev_probe 4 2570 _003676_hash NULL ++_003677_hash ieee80211_auth_challenge 3 18810 _003677_hash NULL ++_003678_hash ieee80211_rtl_auth_challenge 3 61897 _003678_hash NULL ++_003679_hash init_pci_cap_msi_perm 2 59033 _003679_hash NULL ++_003680_hash __ioremap_caller 1-2 21800 _003680_hash NULL ++_003682_hash ivtv_read 3 57796 _003682_hash NULL ++_003683_hash ivtv_v4l2_write 3 39226 _003683_hash NULL ++_003684_hash mce_async_out 3 58056 _003684_hash NULL ++_003685_hash mce_flush_rx_buffer 2 14976 _003685_hash NULL ++_003686_hash ms_read_multiple_pages 4-5 8052 _003686_hash NULL ++_003688_hash ms_write_multiple_pages 5-6 10362 _003688_hash NULL ++_003690_hash nfc_hci_send_cmd 5 55714 _003690_hash NULL ++_003691_hash persistent_ram_new 1-2 40501 _003691_hash NULL ++_003693_hash picolcd_fb_write 3 2318 _003693_hash NULL ++_003694_hash process_bulk_data_command 4 38906 _003694_hash NULL ++_003695_hash pvr2_v4l2_read 3 18006 _003695_hash NULL ++_003696_hash qcam_read 3 13977 _003696_hash NULL ++_003697_hash register_unifi_sdio 2 55239 _003697_hash NULL ++_003698_hash resize_async_buffer 4 64031 _003698_hash &_002431_hash ++_003699_hash rtllib_auth_challenge 3 12493 _003699_hash NULL ++_003702_hash stk_allocate_buffers 2 16291 _003702_hash NULL ++_003703_hash subdev_ioctl 2 28417 _003703_hash NULL ++_003704_hash _sys_packet_req 4 46793 _003704_hash NULL ++_003705_hash tm6000_i2c_recv_regs16 5 2949 _003705_hash NULL ++_003706_hash tm6000_i2c_recv_regs 5 46215 _003706_hash NULL ++_003707_hash tm6000_i2c_send_regs 5 20250 _003707_hash NULL ++_003708_hash tt3650_ci_msg_locked 4 8013 _003708_hash NULL ++_003709_hash ufx_ops_write 3 54848 _003709_hash NULL ++_003710_hash update_macheader 7 1775 _003710_hash NULL ++_003711_hash usbdux_attach_common 4 51764 _003750_hash NULL nohasharray ++_003712_hash usbduxfast_attach_common 4 52538 _003712_hash NULL ++_003713_hash usbduxsigma_attach_common 4 40847 _003713_hash NULL ++_003714_hash uvc_v4l2_ioctl 2 8411 _003714_hash NULL ++_003715_hash v4l2_ctrl_new_int_menu 4 41151 _003715_hash NULL ++_003716_hash v4l2_ctrl_new_std 5 45748 _003716_hash &_000497_hash ++_003717_hash v4l2_ctrl_new_std_menu 4 6221 _003717_hash NULL ++_003718_hash vb2_read 3 42703 _003718_hash NULL ++_003719_hash vb2_write 3 31948 _003719_hash NULL ++_003720_hash vfio_pci_set_msi_trigger 3-4 26507 _003720_hash NULL ++_003722_hash viafb_iga1_odev_proc_write 3 36241 _003722_hash NULL ++_003723_hash viafb_iga2_odev_proc_write 3 2363 _003723_hash NULL ++_003724_hash __videobuf_alloc_cached 1 12740 _003724_hash NULL ++_003725_hash __videobuf_alloc_uncached 1 55711 _003725_hash NULL ++_003726_hash __videobuf_copy_stream 4 44769 _003726_hash NULL ++_003727_hash videobuf_read_one 3 31637 _003727_hash NULL ++_003728_hash video_ioctl2 2 21380 _003728_hash NULL ++_003729_hash vmap 2 15025 _003729_hash NULL ++_003730_hash vmw_cursor_update_dmabuf 3-4 32045 _003730_hash NULL ++_003732_hash vmw_gmr_bind 3 44130 _003732_hash NULL ++_003733_hash xd_read_multiple_pages 4-5 11422 _003733_hash NULL ++_003735_hash xd_write_multiple_pages 5-6 53633 _003735_hash NULL ++_003737_hash xenfb_write 3 43412 _003737_hash NULL ++_003738_hash arch_gnttab_map_shared 3 41306 _003738_hash NULL ++_003739_hash arch_gnttab_map_status 3 49812 _003739_hash NULL ++_003740_hash bttv_read 3 11432 _003740_hash NULL ++_003741_hash cx18_read 3 23699 _003741_hash NULL ++_003742_hash cx2341x_ctrl_new_menu 3 49700 _003742_hash NULL ++_003743_hash cx2341x_ctrl_new_std 4 57061 _003743_hash NULL ++_003744_hash cx25821_video_ioctl 2 30188 _003744_hash NULL ++_003745_hash dt3155_read 3 59226 _003745_hash NULL ++_003746_hash ioremap_cache 1-2 47189 _003746_hash NULL ++_003748_hash ioremap_nocache 1-2 2439 _003748_hash NULL ++_003750_hash ioremap_prot 1-2 51764 _003750_hash &_003711_hash ++_003752_hash ioremap_wc 1-2 62695 _003752_hash NULL ++_003754_hash ivtv_read_pos 3 34400 _003754_hash &_000312_hash ++_003755_hash mcam_v4l_read 3 36513 _003755_hash NULL ++_003756_hash ms_rw_multi_sector 3-4 7459 _003756_hash NULL ++_003758_hash pvr2_v4l2_ioctl 2 24398 _003758_hash &_000877_hash ++_003759_hash ramoops_init_prz 5 12134 _003759_hash NULL ++_003761_hash ttm_bo_kmap_ttm 3 5922 _003761_hash NULL ++_003762_hash uf_ap_process_data_pdu 7 25860 _003762_hash NULL ++_003763_hash vb2_fop_read 3 24080 _003763_hash NULL ++_003764_hash vb2_fop_write 3 30420 _003764_hash NULL ++_003765_hash videobuf_read_stream 3 14956 _003765_hash NULL ++_003766_hash video_read 3 28148 _003766_hash NULL ++_003767_hash vmw_du_crtc_cursor_set 4-5 28479 _003767_hash NULL ++_003769_hash xd_rw 3-4 49020 _003769_hash NULL ++_003771_hash zoran_ioctl 2 30465 _003771_hash NULL ++_003772_hash zr364xx_read 3 2354 _003772_hash NULL ++_003773_hash acpi_os_ioremap 1-2 49523 _003773_hash NULL ++_003775_hash au0828_v4l2_read 3 40220 _003775_hash NULL ++_003776_hash ca91cx42_alloc_resource 2 10502 _003776_hash NULL ++_003778_hash cx18_read_pos 3 4683 _003778_hash NULL ++_003779_hash cx18_v4l2_read 3 21196 _003779_hash NULL ++_003780_hash cx231xx_v4l2_read 3 55014 _003780_hash NULL ++_003781_hash devm_ioremap_nocache 2-3 2036 _003781_hash NULL ++_003783_hash do_test 1 15766 _003783_hash NULL ++_003784_hash __einj_error_trigger 1 17707 _003784_hash &_001764_hash ++_003785_hash em28xx_v4l2_read 3 16701 _003785_hash NULL ++_003786_hash init_chip_wc_pat 2 62768 _003786_hash NULL ++_003787_hash intel_render_ring_init_dri 2-3 45446 _003787_hash NULL ++_003789_hash io_mapping_create_wc 1-2 1354 _003789_hash NULL ++_003791_hash iommu_map_mmio_space 1 30919 _003791_hash NULL ++_003792_hash ioremap 1-2 23172 _003792_hash NULL ++_003794_hash ivtv_v4l2_read 3 1964 _003794_hash NULL ++_003795_hash mga_ioremap 1-2 8571 _003795_hash NULL ++_003797_hash mpeg_read 3 6708 _003797_hash NULL ++_003798_hash msix_map_region 3 3411 _003798_hash NULL ++_003799_hash ms_rw 3-4 17220 _003799_hash NULL ++_003801_hash pci_iomap 3 47575 _003801_hash NULL ++_003802_hash pd_video_read 3 24510 _003802_hash NULL ++_003803_hash sfi_map_memory 1-2 5183 _003803_hash NULL ++_003805_hash solo_enc_read 3 33553 _003805_hash NULL ++_003806_hash solo_v4l2_read 3 59247 _003806_hash NULL ++_003807_hash timblogiw_read 3 48305 _003807_hash NULL ++_003808_hash tm6000_read 3 4151 _003808_hash NULL ++_003809_hash tsi148_alloc_resource 2 24563 _003809_hash NULL ++_003810_hash ttm_bo_ioremap 2-3 31082 _003810_hash NULL ++_003812_hash ttm_bo_kmap 3-2 60118 _003812_hash NULL ++_003813_hash vb2_vmalloc_get_userptr 3 31374 _003813_hash NULL ++_003814_hash vbi_read 3 63673 _003814_hash NULL ++_003815_hash viacam_read 3 54526 _003815_hash NULL ++_003816_hash xlate_dev_mem_ptr 1 15291 _003816_hash &_001231_hash ++_003817_hash a4t_cs_init 3 27734 _003817_hash NULL ++_003818_hash aac_nark_ioremap 2 50163 _003818_hash &_000323_hash ++_003819_hash aac_rkt_ioremap 2 3333 _003819_hash NULL ++_003820_hash aac_rx_ioremap 2 52410 _003820_hash NULL ++_003821_hash aac_sa_ioremap 2 13596 _003821_hash &_000299_hash ++_003822_hash aac_src_ioremap 2 41688 _003822_hash NULL ++_003823_hash aac_srcv_ioremap 2 6659 _003823_hash NULL ++_003824_hash acpi_map 1-2 58725 _003824_hash NULL ++_003826_hash acpi_os_read_memory 1-3 54186 _003826_hash NULL ++_003828_hash acpi_os_write_memory 1-3 56416 _003828_hash &_003429_hash ++_003830_hash atyfb_setup_generic 3 49151 _003830_hash NULL ++_003831_hash ca91cx42_master_set 4 23146 _003831_hash NULL ++_003832_hash check_mirror 1-2 57342 _003832_hash &_001753_hash ++_003834_hash cycx_setup 4 47562 _003834_hash NULL ++_003835_hash devm_ioremap 2-3 29235 _003835_hash NULL ++_003837_hash divasa_remap_pci_bar 3-4 23485 _003837_hash &_000979_hash ++_003839_hash doc_probe 1 23285 _003839_hash NULL ++_003840_hash DoC_Probe 1 57534 _003840_hash NULL ++_003841_hash efi_ioremap 1-2 3492 _003841_hash &_001137_hash ++_003843_hash ems_pcmcia_add_card 2 62627 _003843_hash NULL ++_003844_hash isp1760_register 1-2 628 _003844_hash NULL ++_003846_hash mid_get_vbt_data_r0 2 10876 _003846_hash NULL ++_003847_hash mid_get_vbt_data_r10 2 6308 _003847_hash NULL ++_003848_hash mid_get_vbt_data_r1 2 26170 _003848_hash NULL ++_003849_hash mthca_map_reg 2-3 5664 _003849_hash NULL ++_003851_hash mthca_setup_cmd_doorbells 2 53954 _003851_hash NULL ++_003852_hash netxen_nic_map_indirect_address_128M 2 42257 _003852_hash NULL ++_003853_hash pcim_iomap 3 58334 _003853_hash NULL ++_003854_hash persistent_ram_iomap 1-2 47156 _003854_hash NULL ++_003856_hash read_vbt_r0 1 503 _003856_hash NULL ++_003857_hash read_vbt_r10 1 60679 _003857_hash NULL ++_003858_hash register_device 2-3 60015 _003858_hash NULL ++_003860_hash remap_pci_mem 1-2 15966 _003860_hash NULL ++_003862_hash rtl_port_map 1-2 2385 _003862_hash NULL ++_003864_hash sfi_map_table 1 5462 _003864_hash NULL ++_003865_hash sriov_enable_migration 2 14889 _003865_hash NULL ++_003866_hash ssb_bus_scan 2 36578 _003866_hash NULL ++_003867_hash ssb_ioremap 2 5228 _003867_hash NULL ++_003868_hash tpci200_slot_map_space 2 3848 _003868_hash NULL ++_003869_hash tpm_tis_init 2-3 15304 _003869_hash NULL ++_003871_hash tsi148_master_set 4 14685 _003871_hash NULL ++_003872_hash acpi_os_map_memory 1-2 11161 _003872_hash NULL ++_003874_hash com90xx_found 3 13974 _003874_hash NULL ++_003875_hash netxen_nic_hw_read_wx_128M 2 26858 _003875_hash NULL ++_003876_hash netxen_nic_hw_write_wx_128M 2 33488 _003876_hash NULL ++_003877_hash sfi_check_table 1 6772 _003877_hash NULL ++_003878_hash sfi_sysfs_install_table 1 51688 _003878_hash NULL ++_003879_hash sriov_enable 2 59689 _003879_hash NULL ++_003880_hash ssb_bus_register 3 65183 _003880_hash NULL ++_003881_hash acpi_ex_system_memory_space_handler 2 31192 _003881_hash NULL ++_003882_hash acpi_tb_check_xsdt 1 21862 _003882_hash NULL ++_003883_hash acpi_tb_install_table 1 12988 _003883_hash NULL ++_003884_hash acpi_tb_parse_root_table 1 53455 _003884_hash NULL ++_003885_hash check_vendor_extension 1 3254 _003885_hash NULL ++_003886_hash pci_enable_sriov 2 35745 _003886_hash NULL ++_003887_hash ssb_bus_pcmciabus_register 3 56020 _003887_hash NULL ++_003888_hash ssb_bus_ssbbus_register 2 2217 _003888_hash NULL ++_003889_hash lpfc_sli_probe_sriov_nr_virtfn 2 26004 _003889_hash NULL ++_003890_hash alloc_vm_area 1 36149 _003890_hash NULL ++_003891_hash cma_create_area 2 38642 _003891_hash NULL ++_003893_hash fbcon_prepare_logo 5 6246 _003893_hash NULL ++_003894_hash io_mapping_map_wc 2 19284 _003894_hash NULL ++_003895_hash nfs_dns_resolve_name 3 25036 _003895_hash NULL ++_003896_hash nfs_parse_server_name 2 1899 _003896_hash NULL +--- tools/gcc/size_overflow_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/size_overflow_plugin.c 2012-10-15 17:30:59.835924531 +0000 +@@ -0,0 +1,1879 @@ ++/* ++ * Copyright 2011, 2012 by Emese Revfy ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c ++ * $ gcc -fplugin=size_overflow_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "intl.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "toplev.h" ++#include "function.h" ++#include "tree-flow.h" ++#include "plugin.h" ++#include "gimple.h" ++#include "c-common.h" ++#include "diagnostic.h" ++#include "cfgloop.h" ++ ++#if BUILDING_GCC_VERSION >= 4007 ++#include "c-tree.h" ++#else ++#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) ++#endif ++ ++struct size_overflow_hash { ++ const struct size_overflow_hash * const next; ++ const char * const name; ++ const unsigned int param; ++}; ++ ++#include "size_overflow_hash.h" ++ ++enum marked { ++ MARKED_NO, MARKED_YES, MARKED_NOT_INTENTIONAL ++}; ++ ++#define __unused __attribute__((__unused__)) ++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node)) ++#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node)) ++#define BEFORE_STMT true ++#define AFTER_STMT false ++#define CREATE_NEW_VAR NULL_TREE ++#define CODES_LIMIT 32 ++#define MAX_PARAM 32 ++#define MY_STMT GF_PLF_1 ++#define NO_CAST_CHECK GF_PLF_2 ++ ++#if BUILDING_GCC_VERSION == 4005 ++#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE))) ++#endif ++ ++int plugin_is_GPL_compatible; ++void debug_gimple_stmt(gimple gs); ++ ++static tree expand(struct pointer_set_t *visited, tree lhs); ++static bool pre_expand(struct pointer_set_t *visited, const_tree lhs); ++static tree report_size_overflow_decl; ++static const_tree const_char_ptr_type_node; ++static unsigned int handle_function(void); ++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before); ++static tree get_size_overflow_type(gimple stmt, const_tree node); ++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3); ++ ++static struct plugin_info size_overflow_plugin_info = { ++ .version = "20120930beta", ++ .help = "no-size-overflow\tturn off size overflow checking\n", ++}; ++ ++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count; ++ enum tree_code code = TREE_CODE(*node); ++ ++ switch (code) { ++ case FUNCTION_DECL: ++ arg_count = type_num_arguments(TREE_TYPE(*node)); ++ break; ++ case FUNCTION_TYPE: ++ case METHOD_TYPE: ++ arg_count = type_num_arguments(*node); ++ break; ++ default: ++ *no_add_attrs = true; ++ error("%s: %qE attribute only applies to functions", __func__, name); ++ return NULL_TREE; ++ } ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position)); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static const char* get_asm_name(tree node) ++{ ++ return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node)); ++} ++ ++static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count, arg_num; ++ enum tree_code code = TREE_CODE(*node); ++ ++ switch (code) { ++ case FUNCTION_DECL: ++ arg_count = type_num_arguments(TREE_TYPE(*node)); ++ break; ++ case FUNCTION_TYPE: ++ case METHOD_TYPE: ++ arg_count = type_num_arguments(*node); ++ break; ++ case FIELD_DECL: ++ arg_num = TREE_INT_CST_LOW(TREE_VALUE(args)); ++ if (arg_num != 0) { ++ *no_add_attrs = true; ++ error("%s: %qE attribute parameter can only be 0 in structure fields", __func__, name); ++ } ++ return NULL_TREE; ++ default: ++ *no_add_attrs = true; ++ error("%qE attribute only applies to functions", name); ++ return NULL_TREE; ++ } ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position)); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static struct attribute_spec size_overflow_attr = { ++ .name = "size_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_size_overflow_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static struct attribute_spec intentional_overflow_attr = { ++ .name = "intentional_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_intentional_overflow_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static void register_attributes(void __unused *event_data, void __unused *data) ++{ ++ register_attribute(&size_overflow_attr); ++ register_attribute(&intentional_overflow_attr); ++} ++ ++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html ++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed) ++{ ++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); } ++#define cwmixa( in ) { cwfold( in, m, k, h ); } ++#define cwmixb( in ) { cwfold( in, n, h, k ); } ++ ++ unsigned int m = 0x57559429; ++ unsigned int n = 0x5052acdb; ++ const unsigned int *key4 = (const unsigned int *)key; ++ unsigned int h = len; ++ unsigned int k = len + seed + n; ++ unsigned long long p; ++ ++ while (len >= 8) { ++ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2; ++ len -= 8; ++ } ++ if (len >= 4) { ++ cwmixb(key4[0]) key4 += 1; ++ len -= 4; ++ } ++ if (len) ++ cwmixa(key4[0] & ((1 << (len * 8)) - 1 )); ++ cwmixb(h ^ (k + n)); ++ return k ^ h; ++ ++#undef cwfold ++#undef cwmixa ++#undef cwmixb ++} ++ ++static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed) ++{ ++ unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff; ++ unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff; ++ return fn ^ codes; ++} ++ ++static inline tree get_original_function_decl(tree fndecl) ++{ ++ if (DECL_ABSTRACT_ORIGIN(fndecl)) ++ return DECL_ABSTRACT_ORIGIN(fndecl); ++ return fndecl; ++} ++ ++static inline gimple get_def_stmt(const_tree node) ++{ ++ gcc_assert(node != NULL_TREE); ++ gcc_assert(TREE_CODE(node) == SSA_NAME); ++ return SSA_NAME_DEF_STMT(node); ++} ++ ++static unsigned char get_tree_code(const_tree type) ++{ ++ switch (TREE_CODE(type)) { ++ case ARRAY_TYPE: ++ return 0; ++ case BOOLEAN_TYPE: ++ return 1; ++ case ENUMERAL_TYPE: ++ return 2; ++ case FUNCTION_TYPE: ++ return 3; ++ case INTEGER_TYPE: ++ return 4; ++ case POINTER_TYPE: ++ return 5; ++ case RECORD_TYPE: ++ return 6; ++ case UNION_TYPE: ++ return 7; ++ case VOID_TYPE: ++ return 8; ++ case REAL_TYPE: ++ return 9; ++ case VECTOR_TYPE: ++ return 10; ++ case REFERENCE_TYPE: ++ return 11; ++ case OFFSET_TYPE: ++ return 12; ++ case COMPLEX_TYPE: ++ return 13; ++ default: ++ debug_tree((tree)type); ++ gcc_unreachable(); ++ } ++} ++ ++static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len) ++{ ++ gcc_assert(type != NULL_TREE); ++ ++ while (type && len < CODES_LIMIT) { ++ tree_codes[len] = get_tree_code(type); ++ len++; ++ type = TREE_TYPE(type); ++ } ++ return len; ++} ++ ++static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes) ++{ ++ const_tree arg, result, arg_field, type = TREE_TYPE(fndecl); ++ enum tree_code code = TREE_CODE(type); ++ size_t len = 0; ++ ++ gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE); ++ ++ arg = TYPE_ARG_TYPES(type); ++ // skip builtins __builtin_constant_p ++ if (!arg && DECL_BUILT_IN(fndecl)) ++ return 0; ++ ++ if (TREE_CODE_CLASS(code) == tcc_type) ++ result = type; ++ else ++ result = DECL_RESULT(fndecl); ++ ++ gcc_assert(result != NULL_TREE); ++ len = add_type_codes(TREE_TYPE(result), tree_codes, len); ++ ++ if (arg == NULL_TREE) { ++ gcc_assert(CODE_CONTAINS_STRUCT(TREE_CODE(fndecl), TS_DECL_NON_COMMON)); ++ arg_field = DECL_ARGUMENT_FLD(fndecl); ++ if (arg_field == NULL_TREE) ++ return 0; ++ arg = TREE_TYPE(arg_field); ++ len = add_type_codes(arg, tree_codes, len); ++ gcc_assert(len != 0); ++ return len; ++ } ++ ++ gcc_assert(arg != NULL_TREE && TREE_CODE(arg) == TREE_LIST); ++ while (arg && len < CODES_LIMIT) { ++ len = add_type_codes(TREE_VALUE(arg), tree_codes, len); ++ arg = TREE_CHAIN(arg); ++ } ++ ++ gcc_assert(len != 0); ++ return len; ++} ++ ++static const struct size_overflow_hash *get_function_hash(tree fndecl) ++{ ++ unsigned int hash; ++ const struct size_overflow_hash *entry; ++ unsigned char tree_codes[CODES_LIMIT]; ++ size_t len; ++ const char *func_name = get_asm_name(fndecl); ++ ++ len = get_function_decl(fndecl, tree_codes); ++ if (len == 0) ++ return NULL; ++ ++ hash = get_hash_num(func_name, (const char*) tree_codes, len, 0); ++ ++ entry = size_overflow_hash[hash]; ++ while (entry) { ++ if (!strcmp(entry->name, func_name)) ++ return entry; ++ entry = entry->next; ++ } ++ ++ return NULL; ++} ++ ++static void check_arg_type(const_tree arg) ++{ ++ const_tree type = TREE_TYPE(arg); ++ enum tree_code code = TREE_CODE(type); ++ ++ gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE || ++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) || ++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE)); ++} ++ ++static int find_arg_number(const_tree arg, tree func) ++{ ++ tree var; ++ unsigned int argnum = 1; ++ ++ if (TREE_CODE(arg) == SSA_NAME) ++ arg = SSA_NAME_VAR(arg); ++ ++ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) { ++ if (strcmp(NAME(arg), NAME(var))) { ++ argnum++; ++ continue; ++ } ++ check_arg_type(var); ++ return argnum; ++ } ++ gcc_unreachable(); ++} ++ ++static tree create_new_var(tree type) ++{ ++ tree new_var = create_tmp_var(type, "cicus"); ++ ++ add_referenced_var(new_var); ++ mark_sym_for_renaming(new_var); ++ return new_var; ++} ++ ++static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree type = TREE_TYPE(rhs1); ++ tree lhs = create_new_var(type); ++ ++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2); ++ gimple_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ gimple_set_plf(assign, MY_STMT, true); ++ return assign; ++} ++ ++static bool is_bool(const_tree node) ++{ ++ const_tree type; ++ ++ if (node == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(node); ++ if (!INTEGRAL_TYPE_P(type)) ++ return false; ++ if (TREE_CODE(type) == BOOLEAN_TYPE) ++ return true; ++ if (TYPE_PRECISION(type) == 1) ++ return true; ++ return false; ++} ++ ++static tree cast_a_tree(tree type, tree var) ++{ ++ gcc_assert(type != NULL_TREE); ++ gcc_assert(var != NULL_TREE); ++ gcc_assert(fold_convertible_p(type, var)); ++ ++ return fold_convert(type, var); ++} ++ ++static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before) ++{ ++ gimple assign; ++ ++ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE); ++ if (gsi_end_p(*gsi) && before == AFTER_STMT) ++ gcc_unreachable(); ++ ++ if (lhs == CREATE_NEW_VAR) ++ lhs = create_new_var(dst_type); ++ ++ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs)); ++ ++ if (!gsi_end_p(*gsi)) { ++ location_t loc = gimple_location(gsi_stmt(*gsi)); ++ gimple_set_location(assign, loc); ++ } ++ ++ gimple_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ if (before) ++ gsi_insert_before(gsi, assign, GSI_NEW_STMT); ++ else ++ gsi_insert_after(gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ gimple_set_plf(assign, MY_STMT, true); ++ ++ return assign; ++} ++ ++static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before) ++{ ++ const_gimple assign; ++ gimple_stmt_iterator gsi; ++ ++ if (new_rhs1 == NULL_TREE) ++ return NULL_TREE; ++ ++ if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) { ++ gsi = gsi_for_stmt(stmt); ++ assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before); ++ return gimple_get_lhs(assign); ++ } ++ return new_rhs1; ++} ++ ++static tree follow_overflow_type_and_dup(struct pointer_set_t *visited, gimple stmt, const_tree node, tree new_rhs1, tree new_rhs2, tree new_rhs3) ++{ ++ tree size_overflow_type = get_size_overflow_type(stmt, node); ++ ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ ++ if (new_rhs2 != NULL_TREE) ++ new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT); ++ ++ if (new_rhs3 != NULL_TREE) ++ new_rhs3 = cast_to_new_size_overflow_type(stmt, new_rhs3, size_overflow_type, BEFORE_STMT); ++ ++ return dup_assign(visited, stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3); ++} ++ ++ ++static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before) ++{ ++ tree size_overflow_type, lhs; ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ ++ if (rhs1 == NULL_TREE) { ++ debug_gimple_stmt(oldstmt); ++ error("%s: rhs1 is NULL_TREE", __func__); ++ gcc_unreachable(); ++ } ++ ++ if (gimple_code(oldstmt) == GIMPLE_ASM) ++ lhs = rhs1; ++ else ++ lhs = gimple_get_lhs(oldstmt); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ pointer_set_insert(visited, oldstmt); ++ if (lookup_stmt_eh_lp(oldstmt) != 0) { ++ basic_block next_bb, cur_bb; ++ const_edge e; ++ ++ gcc_assert(before == false); ++ gcc_assert(stmt_can_throw_internal(oldstmt)); ++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ cur_bb = gimple_bb(oldstmt); ++ next_bb = cur_bb->next_bb; ++ e = find_edge(cur_bb, next_bb); ++ gcc_assert(e != NULL); ++ gcc_assert(e->flags & EDGE_FALLTHRU); ++ ++ gsi = gsi_after_labels(next_bb); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ before = true; ++ oldstmt = gsi_stmt(gsi); ++ } ++ ++ size_overflow_type = get_size_overflow_type(oldstmt, lhs); ++ ++ stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before); ++ gimple_set_plf(stmt, MY_STMT, true); ++ return gimple_get_lhs(stmt); ++} ++ ++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3) ++{ ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ tree new_var, lhs = gimple_get_lhs(oldstmt); ++ ++ if (gimple_plf(oldstmt, MY_STMT)) ++ return lhs; ++ ++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) { ++ rhs1 = gimple_assign_rhs1(oldstmt); ++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT); ++ } ++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) { ++ rhs2 = gimple_assign_rhs2(oldstmt); ++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT); ++ } ++ ++ stmt = gimple_copy(oldstmt); ++ gimple_set_location(stmt, gimple_location(oldstmt)); ++ gimple_set_plf(stmt, MY_STMT, true); ++ ++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR) ++ gimple_assign_set_rhs_code(stmt, MULT_EXPR); ++ ++ if (is_bool(lhs)) ++ new_var = SSA_NAME_VAR(lhs); ++ else ++ new_var = create_new_var(size_overflow_type); ++ new_var = make_ssa_name(new_var, stmt); ++ gimple_set_lhs(stmt, new_var); ++ ++ if (rhs1 != NULL_TREE) { ++ if (!gimple_assign_cast_p(oldstmt)) ++ rhs1 = cast_a_tree(size_overflow_type, rhs1); ++ gimple_assign_set_rhs1(stmt, rhs1); ++ } ++ ++ if (rhs2 != NULL_TREE) ++ gimple_assign_set_rhs2(stmt, rhs2); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (rhs3 != NULL_TREE) ++ gimple_assign_set_rhs3(stmt, rhs3); ++#endif ++ gimple_set_vuse(stmt, gimple_vuse(oldstmt)); ++ gimple_set_vdef(stmt, gimple_vdef(oldstmt)); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited, oldstmt); ++ return gimple_get_lhs(stmt); ++} ++ ++static gimple overflow_create_phi_node(gimple oldstmt, tree result) ++{ ++ basic_block bb; ++ gimple phi; ++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt); ++ ++ bb = gsi_bb(gsi); ++ ++ phi = create_phi_node(result, bb); ++ gsi = gsi_last(phi_nodes(bb)); ++ gsi_remove(&gsi, false); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT); ++ gimple_set_bb(phi, bb); ++ gimple_set_plf(phi, MY_STMT, true); ++ return phi; ++} ++ ++static basic_block create_a_first_bb(void) ++{ ++ basic_block first_bb; ++ ++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR); ++ return first_bb; ++} ++ ++static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i) ++{ ++ basic_block bb; ++ const_gimple newstmt; ++ gimple_stmt_iterator gsi; ++ bool before = BEFORE_STMT; ++ ++ if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) { ++ gsi = gsi_for_stmt(get_def_stmt(arg)); ++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT); ++ return gimple_get_lhs(newstmt); ++ } ++ ++ bb = gimple_phi_arg_edge(oldstmt, i)->src; ++ gsi = gsi_after_labels(bb); ++ if (bb->index == 0) { ++ bb = create_a_first_bb(); ++ gsi = gsi_start_bb(bb); ++ } ++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before); ++ return gimple_get_lhs(newstmt); ++} ++ ++static const_gimple handle_new_phi_arg(const_tree arg, tree new_var, tree new_rhs) ++{ ++ gimple newstmt; ++ gimple_stmt_iterator gsi; ++ void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update); ++ gimple def_newstmt = get_def_stmt(new_rhs); ++ ++ gsi_insert = gsi_insert_after; ++ gsi = gsi_for_stmt(def_newstmt); ++ ++ switch (gimple_code(get_def_stmt(arg))) { ++ case GIMPLE_PHI: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ gsi = gsi_after_labels(gimple_bb(def_newstmt)); ++ gsi_insert = gsi_insert_before; ++ break; ++ case GIMPLE_ASM: ++ case GIMPLE_CALL: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ break; ++ case GIMPLE_ASSIGN: ++ newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt)); ++ break; ++ default: ++ /* unknown gimple_code (handle_build_new_phi_arg) */ ++ gcc_unreachable(); ++ } ++ ++ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt)); ++ gsi_insert(&gsi, newstmt, GSI_NEW_STMT); ++ gimple_set_plf(newstmt, MY_STMT, true); ++ update_stmt(newstmt); ++ return newstmt; ++} ++ ++static tree build_new_phi_arg(struct pointer_set_t *visited, tree size_overflow_type, tree arg, tree new_var) ++{ ++ const_gimple newstmt; ++ gimple def_stmt; ++ tree new_rhs; ++ ++ new_rhs = expand(visited, arg); ++ if (new_rhs == NULL_TREE) ++ return NULL_TREE; ++ ++ def_stmt = get_def_stmt(new_rhs); ++ if (gimple_code(def_stmt) == GIMPLE_NOP) ++ return NULL_TREE; ++ new_rhs = cast_to_new_size_overflow_type(def_stmt, new_rhs, size_overflow_type, AFTER_STMT); ++ ++ newstmt = handle_new_phi_arg(arg, new_var, new_rhs); ++ return gimple_get_lhs(newstmt); ++} ++ ++static tree build_new_phi(struct pointer_set_t *visited, tree orig_result) ++{ ++ gimple phi, oldstmt = get_def_stmt(orig_result); ++ tree new_result, size_overflow_type; ++ unsigned int i; ++ unsigned int n = gimple_phi_num_args(oldstmt); ++ ++ size_overflow_type = get_size_overflow_type(oldstmt, orig_result); ++ ++ new_result = create_new_var(size_overflow_type); ++ ++ pointer_set_insert(visited, oldstmt); ++ phi = overflow_create_phi_node(oldstmt, new_result); ++ for (i = 0; i < n; i++) { ++ tree arg, lhs; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ if (is_gimple_constant(arg)) ++ arg = cast_a_tree(size_overflow_type, arg); ++ lhs = build_new_phi_arg(visited, size_overflow_type, arg, new_result); ++ if (lhs == NULL_TREE) ++ lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_result, i); ++ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt)); ++ } ++ ++ update_stmt(phi); ++ return gimple_phi_result(phi); ++} ++ ++static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs) ++{ ++ const_gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree origtype = TREE_TYPE(orig_rhs); ++ ++ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN); ++ ++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT); ++ return gimple_get_lhs(assign); ++} ++ ++static void change_rhs1(gimple stmt, tree new_rhs1) ++{ ++ tree assign_rhs; ++ const_tree rhs = gimple_assign_rhs1(stmt); ++ ++ assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1); ++ gimple_assign_set_rhs1(stmt, assign_rhs); ++ update_stmt(stmt); ++} ++ ++static bool check_mode_type(const_gimple stmt) ++{ ++ const_tree lhs = gimple_get_lhs(stmt); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ const_tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt)); ++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type); ++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type); ++ ++ if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type)) ++ return false; ++ ++ if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type))) ++ return false; ++ ++ return true; ++} ++ ++static bool check_undefined_integer_operation(const_gimple stmt) ++{ ++ const_gimple def_stmt; ++ const_tree lhs = gimple_get_lhs(stmt); ++ const_tree rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs1_type = TREE_TYPE(rhs1); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ ++ if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type)) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs1); ++ if (gimple_code(def_stmt) != GIMPLE_ASSIGN) ++ return false; ++ ++ if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR) ++ return false; ++ return true; ++} ++ ++static bool is_a_cast_and_const_overflow(const_tree no_const_rhs) ++{ ++ const_tree rhs1, lhs, rhs1_type, lhs_type; ++ enum machine_mode lhs_mode, rhs_mode; ++ gimple def_stmt = get_def_stmt(no_const_rhs); ++ ++ if (!gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ lhs = gimple_get_lhs(def_stmt); ++ rhs1_type = TREE_TYPE(rhs1); ++ lhs_type = TREE_TYPE(lhs); ++ rhs_mode = TYPE_MODE(rhs1_type); ++ lhs_mode = TYPE_MODE(lhs_type); ++ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode) ++ return false; ++ ++ return true; ++} ++ ++static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt) ++{ ++ tree size_overflow_type, lhs = gimple_get_lhs(stmt); ++ tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs1_type = TREE_TYPE(rhs1); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ ++ new_rhs1 = expand(visited, rhs1); ++ ++ if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (gimple_plf(stmt, MY_STMT)) ++ return lhs; ++ ++ if (gimple_plf(stmt, NO_CAST_CHECK)) ++ return follow_overflow_type_and_dup(visited, stmt, rhs1, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) { ++ size_overflow_type = get_size_overflow_type(stmt, rhs1); ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT); ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ } ++ ++ if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt)) ++ return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ size_overflow_type = get_size_overflow_type(stmt, rhs1); ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ ++ change_rhs1(stmt, new_rhs1); ++ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT); ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ rhs1_type = TREE_TYPE(rhs1); ++ if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type)) ++ return create_assign(visited, stmt, rhs1, AFTER_STMT); ++ ++ if (!check_mode_type(stmt)) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ size_overflow_type = get_size_overflow_type(stmt, lhs); ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ ++ check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, BEFORE_STMT); ++ ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++} ++ ++static tree handle_unary_ops(struct pointer_set_t *visited, tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ tree rhs1 = gimple_assign_rhs1(def_stmt); ++ ++ if (is_gimple_constant(rhs1)) ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ ++ gcc_assert(TREE_CODE(rhs1) != COND_EXPR); ++ switch (TREE_CODE(rhs1)) { ++ case SSA_NAME: ++ return handle_unary_rhs(visited, def_stmt); ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case ADDR_EXPR: ++ case COMPONENT_REF: ++ case INDIRECT_REF: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case PARM_DECL: ++ case TARGET_MEM_REF: ++ case VAR_DECL: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ ++ default: ++ debug_gimple_stmt(def_stmt); ++ debug_tree(rhs1); ++ gcc_unreachable(); ++ } ++} ++ ++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value) ++{ ++ gimple cond_stmt; ++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb); ++ ++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE); ++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(cond_stmt); ++} ++ ++static tree create_string_param(tree string) ++{ ++ tree i_type, a_type; ++ const int length = TREE_STRING_LENGTH(string); ++ ++ gcc_assert(length > 0); ++ ++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1)); ++ a_type = build_array_type(char_type_node, i_type); ++ ++ TREE_TYPE(string) = a_type; ++ TREE_CONSTANT(string) = 1; ++ TREE_READONLY(string) = 1; ++ ++ return build1(ADDR_EXPR, ptr_type_node, string); ++} ++ ++static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min) ++{ ++ gimple func_stmt; ++ const_gimple def_stmt; ++ const_tree loc_line; ++ tree loc_file, ssa_name, current_func; ++ expanded_location xloc; ++ char ssa_name_buf[256]; ++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true); ++ ++ def_stmt = get_def_stmt(arg); ++ xloc = expand_location(gimple_location(def_stmt)); ++ ++ if (!gimple_has_location(def_stmt)) { ++ xloc = expand_location(gimple_location(stmt)); ++ if (!gimple_has_location(stmt)) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ } ++ ++ loc_line = build_int_cstu(unsigned_type_node, xloc.line); ++ ++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file); ++ loc_file = create_string_param(loc_file); ++ ++ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl)); ++ current_func = create_string_param(current_func); ++ ++ snprintf(ssa_name_buf, 256, "%s_%u (%s)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max"); ++ ssa_name = build_string(256, ssa_name_buf); ++ ssa_name = create_string_param(ssa_name); ++ ++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name) ++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name); ++ ++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING); ++} ++ ++static void __unused print_the_code_insertions(const_gimple stmt) ++{ ++ location_t loc = gimple_location(stmt); ++ ++ inform(loc, "Integer size_overflow check applied here."); ++} ++ ++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min) ++{ ++ basic_block cond_bb, join_bb, bb_true; ++ edge e; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ cond_bb = gimple_bb(stmt); ++ if (before) ++ gsi_prev(&gsi); ++ if (gsi_end_p(gsi)) ++ e = split_block_after_labels(cond_bb); ++ else ++ e = split_block(cond_bb, gsi_stmt(gsi)); ++ cond_bb = e->src; ++ join_bb = e->dest; ++ e->flags = EDGE_FALSE_VALUE; ++ e->probability = REG_BR_PROB_BASE; ++ ++ bb_true = create_empty_bb(cond_bb); ++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE); ++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE); ++ make_edge(bb_true, join_bb, EDGE_FALLTHRU); ++ ++ if (dom_info_available_p(CDI_DOMINATORS)) { ++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb); ++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb); ++ } ++ ++ if (current_loops != NULL) { ++ gcc_assert(cond_bb->loop_father == join_bb->loop_father); ++ add_bb_to_loop(bb_true, cond_bb->loop_father); ++ } ++ ++ insert_cond(cond_bb, arg, cond_code, type_value); ++ insert_cond_result(bb_true, stmt, arg, min); ++ ++// print_the_code_insertions(stmt); ++} ++ ++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before) ++{ ++ const_tree rhs_type = TREE_TYPE(rhs); ++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min; ++ ++ gcc_assert(rhs_type != NULL_TREE); ++ if (TREE_CODE(rhs_type) == POINTER_TYPE) ++ return; ++ ++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE); ++ ++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type)); ++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type)); ++ ++ gcc_assert(!TREE_OVERFLOW(type_max)); ++ ++ cast_rhs_type = TREE_TYPE(cast_rhs); ++ type_max_type = TREE_TYPE(type_max); ++ type_min_type = TREE_TYPE(type_min); ++ gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type)); ++ gcc_assert(useless_type_conversion_p(type_max_type, type_min_type)); ++ ++ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false); ++ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true); ++} ++ ++static tree get_size_overflow_type_for_intentional_overflow(gimple def_stmt, tree change_rhs) ++{ ++ gimple change_rhs_def_stmt; ++ tree lhs = gimple_get_lhs(def_stmt); ++ tree lhs_type = TREE_TYPE(lhs); ++ tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt)); ++ tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt)); ++ ++ if (change_rhs == NULL_TREE) ++ return get_size_overflow_type(def_stmt, lhs); ++ ++ change_rhs_def_stmt = get_def_stmt(change_rhs); ++ ++ if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison) ++ return get_size_overflow_type(change_rhs_def_stmt, change_rhs); ++ ++ if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR) ++ return get_size_overflow_type(change_rhs_def_stmt, change_rhs); ++ ++ if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR) ++ return get_size_overflow_type(change_rhs_def_stmt, change_rhs); ++ ++ if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) { ++ debug_gimple_stmt(def_stmt); ++ gcc_unreachable(); ++ } ++ ++ return get_size_overflow_type(def_stmt, lhs); ++} ++ ++static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs) ++{ ++ if (gimple_assign_rhs_code(stmt) == MIN_EXPR) ++ return false; ++ if (!is_gimple_constant(rhs)) ++ return false; ++ return true; ++} ++ ++static tree get_cast_def_stmt_rhs(const_tree new_rhs) ++{ ++ gimple def_stmt; ++ ++ def_stmt = get_def_stmt(new_rhs); ++ // get_size_overflow_type ++ if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode)) ++ gcc_assert(gimple_assign_cast_p(def_stmt)); ++ return gimple_assign_rhs1(def_stmt); ++} ++ ++static tree cast_to_int_TI_type_and_check(gimple stmt, tree new_rhs) ++{ ++ gimple_stmt_iterator gsi; ++ const_gimple cast_stmt; ++ gimple def_stmt; ++ enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs)); ++ ++ if (mode != TImode && mode != DImode) { ++ def_stmt = get_def_stmt(new_rhs); ++ gcc_assert(gimple_assign_cast_p(def_stmt)); ++ new_rhs = gimple_assign_rhs1(def_stmt); ++ mode = TYPE_MODE(TREE_TYPE(new_rhs)); ++ } ++ ++ gcc_assert(mode == TImode || mode == DImode); ++ ++ if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node)) ++ return new_rhs; ++ ++ gsi = gsi_for_stmt(stmt); ++ cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT); ++ new_rhs = gimple_get_lhs(cast_stmt); ++ ++ if (mode == DImode) ++ return new_rhs; ++ ++ check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, BEFORE_STMT); ++ ++ return new_rhs; ++} ++ ++static bool is_an_integer_trunction(const_gimple stmt) ++{ ++ gimple rhs1_def_stmt, rhs2_def_stmt; ++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1; ++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode; ++ const_tree rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs2 = gimple_assign_rhs2(stmt); ++ enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1)); ++ enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2)); ++ ++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2)) ++ return false; ++ ++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME); ++ ++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode) ++ return false; ++ ++ rhs1_def_stmt = get_def_stmt(rhs1); ++ rhs2_def_stmt = get_def_stmt(rhs2); ++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt)) ++ return false; ++ ++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); ++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt); ++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1)); ++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1)); ++ if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode) ++ return false; ++ ++ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true); ++ gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true); ++ return true; ++} ++ ++static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs) ++{ ++ tree new_rhs1, new_rhs2; ++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs; ++ tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type; ++ gimple assign, stmt = get_def_stmt(lhs); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ ++ if (!is_an_integer_trunction(stmt)) ++ return NULL_TREE; ++ ++ new_rhs1 = expand(visited, rhs1); ++ new_rhs2 = expand(visited, rhs2); ++ ++ new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1); ++ new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2); ++ ++ new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1); ++ new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1); ++ ++ if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) { ++ new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs1_def_stmt_rhs1); ++ new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs2_def_stmt_rhs1); ++ } ++ ++ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1); ++ new_lhs = gimple_get_lhs(assign); ++ check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT); ++ ++ return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return false; ++ ++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs); ++ if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR) ++ return false; ++ ++ return true; ++} ++ ++static tree handle_intentional_overflow(struct pointer_set_t *visited, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs1, tree new_rhs2) ++{ ++ tree new_rhs, size_overflow_type, orig_rhs; ++ void (*gimple_assign_set_rhs)(gimple, tree); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ tree lhs = gimple_get_lhs(stmt); ++ ++ if (change_rhs == NULL_TREE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (new_rhs2 == NULL_TREE) { ++ size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs1); ++ new_rhs2 = cast_a_tree(size_overflow_type, rhs2); ++ orig_rhs = rhs1; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs1; ++ } else { ++ size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs2); ++ new_rhs1 = cast_a_tree(size_overflow_type, rhs1); ++ orig_rhs = rhs2; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs2; ++ } ++ ++ change_rhs = cast_to_new_size_overflow_type(stmt, change_rhs, size_overflow_type, BEFORE_STMT); ++ ++ if (check_overflow) ++ check_size_overflow(stmt, size_overflow_type, change_rhs, orig_rhs, BEFORE_STMT); ++ ++ new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs); ++ gimple_assign_set_rhs(stmt, new_rhs); ++ update_stmt(stmt); ++ ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++} ++ ++static tree handle_binary_ops(struct pointer_set_t *visited, tree lhs) ++{ ++ tree rhs1, rhs2, new_lhs; ++ gimple def_stmt = get_def_stmt(lhs); ++ tree new_rhs1 = NULL_TREE; ++ tree new_rhs2 = NULL_TREE; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ /* no DImode/TImode division in the 32/64 bit kernel */ ++ switch (gimple_assign_rhs_code(def_stmt)) { ++ case RDIV_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ case EXACT_DIV_EXPR: ++ case POINTER_PLUS_EXPR: ++ case BIT_AND_EXPR: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ default: ++ break; ++ } ++ ++ new_lhs = handle_integer_truncation(visited, lhs); ++ if (new_lhs != NULL_TREE) ++ return new_lhs; ++ ++ if (TREE_CODE(rhs1) == SSA_NAME) ++ new_rhs1 = expand(visited, rhs1); ++ if (TREE_CODE(rhs2) == SSA_NAME) ++ new_rhs2 = expand(visited, rhs2); ++ ++ if (is_a_neg_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, true, def_stmt, new_rhs1, new_rhs1, NULL_TREE); ++ if (is_a_neg_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, true, def_stmt, new_rhs2, NULL_TREE, new_rhs2); ++ ++ if (is_a_constant_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, new_rhs1, NULL_TREE); ++ if (is_a_constant_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, NULL_TREE, new_rhs2); ++ ++ return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++#if BUILDING_GCC_VERSION >= 4007 ++static tree get_new_rhs(struct pointer_set_t *visited, tree size_overflow_type, tree rhs) ++{ ++ if (is_gimple_constant(rhs)) ++ return cast_a_tree(size_overflow_type, rhs); ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return NULL_TREE; ++ return expand(visited, rhs); ++} ++ ++static tree handle_ternary_ops(struct pointer_set_t *visited, tree lhs) ++{ ++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ size_overflow_type = get_size_overflow_type(def_stmt, lhs); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs3 = gimple_assign_rhs3(def_stmt); ++ new_rhs1 = get_new_rhs(visited, size_overflow_type, rhs1); ++ new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2); ++ new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3); ++ ++ return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3); ++} ++#endif ++ ++static tree get_size_overflow_type(gimple stmt, const_tree node) ++{ ++ const_tree type; ++ ++ gcc_assert(node != NULL_TREE); ++ ++ type = TREE_TYPE(node); ++ ++ if (gimple_plf(stmt, MY_STMT)) ++ return TREE_TYPE(node); ++ ++ switch (TYPE_MODE(type)) { ++ case QImode: ++ return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node; ++ case HImode: ++ return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node; ++ case SImode: ++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node; ++ case DImode: ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) ++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node; ++ return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node; ++ default: ++ debug_tree((tree)node); ++ error("%s: unsupported gcc configuration.", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++static tree expand_visited(gimple def_stmt) ++{ ++ const_gimple next_stmt; ++ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt); ++ ++ gsi_next(&gsi); ++ next_stmt = gsi_stmt(gsi); ++ ++ gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT)); ++ ++ switch (gimple_code(next_stmt)) { ++ case GIMPLE_ASSIGN: ++ return gimple_get_lhs(next_stmt); ++ case GIMPLE_PHI: ++ return gimple_phi_result(next_stmt); ++ case GIMPLE_CALL: ++ return gimple_call_lhs(next_stmt); ++ default: ++ return NULL_TREE; ++ } ++} ++ ++static tree expand(struct pointer_set_t *visited, tree lhs) ++{ ++ gimple def_stmt; ++ enum tree_code code = TREE_CODE(TREE_TYPE(lhs)); ++ ++ if (is_gimple_constant(lhs)) ++ return NULL_TREE; ++ ++ if (TREE_CODE(lhs) == ADDR_EXPR) ++ return NULL_TREE; ++ ++ if (code == REAL_TYPE) ++ return NULL_TREE; ++ ++ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE); ++ ++ ++ def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt) ++ return NULL_TREE; ++ ++ if (gimple_plf(def_stmt, MY_STMT)) ++ return lhs; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return expand_visited(def_stmt); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return NULL_TREE; ++ case GIMPLE_PHI: ++ return build_new_phi(visited, lhs); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return handle_unary_ops(visited, lhs); ++ case 3: ++ return handle_binary_ops(visited, lhs); ++#if BUILDING_GCC_VERSION >= 4007 ++ case 4: ++ return handle_ternary_ops(visited, lhs); ++#endif ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++static void change_function_arg(gimple stmt, const_tree origarg, unsigned int argnum, tree newarg) ++{ ++ const_gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree origtype = TREE_TYPE(origarg); ++ ++ gcc_assert(gimple_code(stmt) == GIMPLE_CALL); ++ ++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT); ++ ++ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign)); ++ update_stmt(stmt); ++} ++ ++static bool get_function_arg(unsigned int* argnum, const_tree fndecl) ++{ ++ const char *origid; ++ tree arg; ++ const_tree origarg; ++ ++ if (!DECL_ABSTRACT_ORIGIN(fndecl)) ++ return true; ++ ++ origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl)); ++ while (origarg && *argnum) { ++ (*argnum)--; ++ origarg = TREE_CHAIN(origarg); ++ } ++ ++ gcc_assert(*argnum == 0); ++ ++ gcc_assert(origarg != NULL_TREE); ++ origid = NAME(origarg); ++ *argnum = 0; ++ for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) { ++ if (!strcmp(origid, NAME(arg))) ++ return true; ++ (*argnum)++; ++ } ++ return false; ++} ++ ++static bool skip_types(const_tree var) ++{ ++ switch (TREE_CODE(var)) { ++ case ADDR_EXPR: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case INDIRECT_REF: ++ case TARGET_MEM_REF: ++ case VAR_DECL: ++ return true; ++ default: ++ break; ++ } ++ return false; ++} ++ ++static bool walk_phi(struct pointer_set_t *visited, const_tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ if (!phi) ++ return false; ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ const_tree arg = gimple_phi_arg_def(phi, i); ++ if (pre_expand(visited, arg)) ++ return true; ++ } ++ return false; ++} ++ ++static bool walk_unary_ops(struct pointer_set_t *visited, const_tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ const_tree rhs; ++ ++ if (!def_stmt) ++ return false; ++ ++ rhs = gimple_assign_rhs1(def_stmt); ++ if (pre_expand(visited, rhs)) ++ return true; ++ return false; ++} ++ ++static bool walk_binary_ops(struct pointer_set_t *visited, const_tree lhs) ++{ ++ bool rhs1_found, rhs2_found; ++ gimple def_stmt = get_def_stmt(lhs); ++ const_tree rhs1, rhs2; ++ ++ if (!def_stmt) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs1_found = pre_expand(visited, rhs1); ++ rhs2_found = pre_expand(visited, rhs2); ++ ++ return rhs1_found || rhs2_found; ++} ++ ++static const_tree search_field_decl(const_tree comp_ref) ++{ ++ const_tree field = NULL_TREE; ++ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref); ++ ++ for (i = 0; i < len; i++) { ++ field = TREE_OPERAND(comp_ref, i); ++ if (TREE_CODE(field) == FIELD_DECL) ++ break; ++ } ++ gcc_assert(TREE_CODE(field) == FIELD_DECL); ++ return field; ++} ++ ++static enum marked mark_status(const_tree fndecl, unsigned int argnum) ++{ ++ const_tree attr, p; ++ ++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl)); ++ if (!attr || !TREE_VALUE(attr)) ++ return MARKED_NO; ++ ++ p = TREE_VALUE(attr); ++ if (!TREE_INT_CST_LOW(TREE_VALUE(p))) ++ return MARKED_NOT_INTENTIONAL; ++ ++ do { ++ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p))) ++ return MARKED_YES; ++ p = TREE_CHAIN(p); ++ } while (p); ++ ++ return MARKED_NO; ++} ++ ++static void print_missing_msg(tree func, unsigned int argnum) ++{ ++ unsigned int new_hash; ++ size_t len; ++ unsigned char tree_codes[CODES_LIMIT]; ++ location_t loc = DECL_SOURCE_LOCATION(func); ++ const char *curfunc = get_asm_name(func); ++ ++ len = get_function_decl(func, tree_codes); ++ new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0); ++ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash); ++} ++ ++static unsigned int search_missing_attribute(const_tree arg) ++{ ++ const_tree type = TREE_TYPE(arg); ++ tree func = get_original_function_decl(current_function_decl); ++ unsigned int argnum; ++ const struct size_overflow_hash *hash; ++ ++ gcc_assert(TREE_CODE(arg) != COMPONENT_REF); ++ ++ if (TREE_CODE(type) == POINTER_TYPE) ++ return 0; ++ ++ argnum = find_arg_number(arg, func); ++ if (argnum == 0) ++ return 0; ++ ++ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func))) ++ return argnum; ++ ++ hash = get_function_hash(func); ++ if (!hash || !(hash->param & (1U << argnum))) { ++ print_missing_msg(func, argnum); ++ return 0; ++ } ++ return argnum; ++} ++ ++static bool is_already_marked(const_tree lhs) ++{ ++ unsigned int argnum; ++ const_tree fndecl; ++ ++ argnum = search_missing_attribute(lhs); ++ fndecl = get_original_function_decl(current_function_decl); ++ if (argnum && mark_status(fndecl, argnum) == MARKED_YES) ++ return true; ++ return false; ++} ++ ++static bool pre_expand(struct pointer_set_t *visited, const_tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (is_gimple_constant(lhs)) ++ return false; ++ ++ if (skip_types(lhs)) ++ return false; ++ ++ if (TREE_CODE(lhs) == PARM_DECL) ++ return is_already_marked(lhs); ++ ++ if (TREE_CODE(lhs) == COMPONENT_REF) { ++ const_tree field, attr; ++ ++ field = search_field_decl(lhs); ++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field)); ++ if (!attr || !TREE_VALUE(attr)) ++ return false; ++ return true; ++ } ++ ++ def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt) ++ return false; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return false; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL) ++ return is_already_marked(lhs); ++ return false; ++ case GIMPLE_PHI: ++ return walk_phi(visited, lhs); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return false; ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return walk_unary_ops(visited, lhs); ++ case 3: ++ return walk_binary_ops(visited, lhs); ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++static bool search_attributes(tree fndecl, const_tree arg, unsigned int argnum) ++{ ++ struct pointer_set_t *visited; ++ bool is_found; ++ enum marked is_marked; ++ location_t loc; ++ ++ visited = pointer_set_create(); ++ is_found = pre_expand(visited, arg); ++ pointer_set_destroy(visited); ++ ++ is_marked = mark_status(fndecl, argnum + 1); ++ if ((is_found && is_marked == MARKED_YES) || is_marked == MARKED_NOT_INTENTIONAL) ++ return true; ++ ++ if (is_found) { ++ loc = DECL_SOURCE_LOCATION(fndecl); ++ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", get_asm_name(fndecl), argnum + 1); ++ return true; ++ } ++ return false; ++} ++ ++static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum) ++{ ++ struct pointer_set_t *visited; ++ tree arg, newarg; ++ bool match; ++ ++ match = get_function_arg(&argnum, fndecl); ++ if (!match) ++ return; ++ gcc_assert(gimple_call_num_args(stmt) > argnum); ++ arg = gimple_call_arg(stmt, argnum); ++ if (arg == NULL_TREE) ++ return; ++ ++ if (is_gimple_constant(arg)) ++ return; ++ ++ if (search_attributes(fndecl, arg, argnum)) ++ return; ++ ++ if (TREE_CODE(arg) != SSA_NAME) ++ return; ++ ++ check_arg_type(arg); ++ ++ visited = pointer_set_create(); ++ newarg = expand(visited, arg); ++ pointer_set_destroy(visited); ++ ++ if (newarg == NULL_TREE) ++ return; ++ ++ change_function_arg(stmt, arg, argnum, newarg); ++ ++ check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, BEFORE_STMT); ++} ++ ++static void handle_function_by_attribute(gimple stmt, const_tree attr, tree fndecl) ++{ ++ tree p = TREE_VALUE(attr); ++ do { ++ handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1); ++ p = TREE_CHAIN(p); ++ } while (p); ++} ++ ++static void handle_function_by_hash(gimple stmt, tree fndecl) ++{ ++ tree orig_fndecl; ++ unsigned int num; ++ const struct size_overflow_hash *hash; ++ ++ orig_fndecl = get_original_function_decl(fndecl); ++ if (C_DECL_IMPLICIT(orig_fndecl)) ++ return; ++ hash = get_function_hash(orig_fndecl); ++ if (!hash) ++ return; ++ ++ for (num = 1; num <= MAX_PARAM; num++) ++ if (hash->param & (1U << num)) ++ handle_function_arg(stmt, fndecl, num - 1); ++} ++ ++static void set_plf_false(void) ++{ ++ basic_block bb; ++ ++ FOR_ALL_BB(bb) { ++ gimple_stmt_iterator si; ++ ++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) ++ gimple_set_plf(gsi_stmt(si), MY_STMT, false); ++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si)) ++ gimple_set_plf(gsi_stmt(si), MY_STMT, false); ++ } ++} ++ ++static unsigned int handle_function(void) ++{ ++ basic_block next, bb = ENTRY_BLOCK_PTR->next_bb; ++ ++ set_plf_false(); ++ ++ do { ++ gimple_stmt_iterator gsi; ++ next = bb->next_bb; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ tree fndecl, attr; ++ gimple stmt = gsi_stmt(gsi); ++ ++ if (!(is_gimple_call(stmt))) ++ continue; ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (gimple_call_num_args(stmt) == 0) ++ continue; ++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl)); ++ if (!attr || !TREE_VALUE(attr)) ++ handle_function_by_hash(stmt, fndecl); ++ else ++ handle_function_by_attribute(stmt, attr, fndecl); ++ gsi = gsi_for_stmt(stmt); ++ next = gimple_bb(stmt)->next_bb; ++ } ++ bb = next; ++ } while (bb); ++ return 0; ++} ++ ++static struct gimple_opt_pass size_overflow_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "size_overflow", ++ .gate = NULL, ++ .execute = handle_function, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg | PROP_referenced_vars, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow ++ } ++}; ++ ++static void start_unit_callback(void __unused *gcc_data, void __unused *user_data) ++{ ++ tree fntype; ++ ++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0)); ++ ++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var) ++ fntype = build_function_type_list(void_type_node, ++ const_char_ptr_type_node, ++ unsigned_type_node, ++ const_char_ptr_type_node, ++ const_char_ptr_type_node, ++ NULL_TREE); ++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype); ++ ++ DECL_ASSEMBLER_NAME(report_size_overflow_decl); ++ TREE_PUBLIC(report_size_overflow_decl) = 1; ++ DECL_EXTERNAL(report_size_overflow_decl) = 1; ++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1; ++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ ++ struct register_pass_info size_overflow_pass_info = { ++ .pass = &size_overflow_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "no-size-overflow")) { ++ enable = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info); ++ if (enable) { ++ register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +--- tools/gcc/stackleak_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/stackleak_plugin.c 2012-10-15 17:30:59.835924531 +0000 +@@ -0,0 +1,313 @@ ++/* ++ * Copyright 2011 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help implement various PaX features ++ * ++ * - track lowest stack pointer ++ * ++ * TODO: ++ * - initialize all local variables ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static int track_frame_size = -1; ++static const char track_function[] = "pax_track_stack"; ++static const char check_function[] = "pax_check_alloca"; ++static bool init_locals; ++ ++static struct plugin_info stackleak_plugin_info = { ++ .version = "201203140940", ++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n" ++// "initialize-locals\t\tforcibly initialize all stack frames\n" ++}; ++ ++static bool gate_stackleak_track_stack(void); ++static unsigned int execute_stackleak_tree_instrument(void); ++static unsigned int execute_stackleak_final(void); ++ ++static struct gimple_opt_pass stackleak_tree_instrument_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "stackleak_tree_instrument", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_tree_instrument, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa ++ } ++}; ++ ++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "stackleak_final", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_final, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func ++ } ++}; ++ ++static bool gate_stackleak_track_stack(void) ++{ ++ return track_frame_size >= 0; ++} ++ ++static void stackleak_check_alloca(gimple_stmt_iterator *gsi) ++{ ++ gimple check_alloca; ++ tree fntype, fndecl, alloca_size; ++ ++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE); ++ fndecl = build_fn_decl(check_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_check_alloca(unsigned long size) ++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0); ++ check_alloca = gimple_build_call(fndecl, 1, alloca_size); ++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT); ++} ++ ++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi) ++{ ++ gimple track_stack; ++ tree fntype, fndecl; ++ ++ fntype = build_function_type_list(void_type_node, NULL_TREE); ++ fndecl = build_fn_decl(track_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_track_stack(void) ++ track_stack = gimple_build_call(fndecl, 0); ++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING); ++} ++ ++#if BUILDING_GCC_VERSION == 4005 ++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code) ++{ ++ tree fndecl; ++ ++ if (!is_gimple_call(stmt)) ++ return false; ++ fndecl = gimple_call_fndecl(stmt); ++ if (!fndecl) ++ return false; ++ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL) ++ return false; ++// print_node(stderr, "pax", fndecl, 4); ++ return DECL_FUNCTION_CODE(fndecl) == code; ++} ++#endif ++ ++static bool is_alloca(gimple stmt) ++{ ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) ++ return true; ++ ++#if BUILDING_GCC_VERSION >= 4007 ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ return true; ++#endif ++ ++ return false; ++} ++ ++static unsigned int execute_stackleak_tree_instrument(void) ++{ ++ basic_block bb, entry_bb; ++ bool prologue_instrumented = false, is_leaf = true; ++ ++ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt; ++ ++ stmt = gsi_stmt(gsi); ++ ++ if (is_gimple_call(stmt)) ++ is_leaf = false; ++ ++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes ++ if (!is_alloca(stmt)) ++ continue; ++ ++ // 2. insert stack overflow check before each __builtin_alloca call ++ stackleak_check_alloca(&gsi); ++ ++ // 3. insert track call after each __builtin_alloca call ++ stackleak_add_instrumentation(&gsi); ++ if (bb == entry_bb) ++ prologue_instrumented = true; ++ } ++ } ++ ++ // special cases for some bad linux code: taking the address of static inline functions will materialize them ++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI ++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI. ++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here. ++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl)) ++ return 0; ++ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10)) ++ return 0; ++ ++ // 4. insert track call at the beginning ++ if (!prologue_instrumented) { ++ gimple_stmt_iterator gsi; ++ ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); ++ gsi = gsi_start_bb(bb); ++ stackleak_add_instrumentation(&gsi); ++ } ++ ++ return 0; ++} ++ ++static unsigned int execute_stackleak_final(void) ++{ ++ rtx insn; ++ ++ if (cfun->calls_alloca) ++ return 0; ++ ++ // keep calls only if function frame is big enough ++ if (get_frame_size() >= track_frame_size) ++ return 0; ++ ++ // 1. find pax_track_stack calls ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] ) [0 S1 A8]) (4)) -1 (nil) (nil)) ++ rtx body; ++ ++ if (!CALL_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) != CALL) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != MEM) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != SYMBOL_REF) ++ continue; ++ if (strcmp(XSTR(body, 0), track_function)) ++ continue; ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ // 2. delete call ++ insn = delete_insn_and_edges(insn); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION) ++ insn = delete_insn_and_edges(insn); ++#endif ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info stackleak_tree_instrument_pass_info = { ++ .pass = &stackleak_tree_instrument_pass.pass, ++// .reference_pass_name = "tree_profile", ++ .reference_pass_name = "optimized", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ struct register_pass_info stackleak_final_pass_info = { ++ .pass = &stackleak_final_rtl_opt_pass.pass, ++ .reference_pass_name = "final", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info); ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "track-lowest-sp")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ track_frame_size = atoi(argv[i].value); ++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0) ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ if (!strcmp(argv[i].key, "initialize-locals")) { ++ if (argv[i].value) { ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ init_locals = true; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info); ++ ++ return 0; ++} +--- include/net/bluetooth/bluetooth.h ++++ include/net/bluetooth/bluetooth.h +@@ -207,7 +207,7 @@ + struct file_operations fops; + int (* custom_seq_show)(struct seq_file *, void *); + #endif +-}; ++} __no_const; + + int bt_sock_register(int proto, const struct net_proto_family *ops); + int bt_sock_unregister(int proto); +--- drivers/gpu/drm/i915/i915_drv.h ++++ drivers/gpu/drm/i915/i915_drv.h +@@ -274,12 +274,12 @@ + /* render clock increase/decrease */ + /* display clock increase/decrease */ + /* pll clock increase/decrease */ +-}; ++} __no_const; + + struct drm_i915_gt_funcs { + void (*force_wake_get)(struct drm_i915_private *dev_priv); + void (*force_wake_put)(struct drm_i915_private *dev_priv); +-}; ++} __no_const; + + #define DEV_INFO_FLAGS \ + DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ diff --git a/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-ath6kl.patch b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-ath6kl.patch new file mode 100644 index 00000000..8bec2867 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-ath6kl.patch @@ -0,0 +1,37 @@ +Fixes for: +drivers/net/wireless/ath/ath6kl/sdio.c: In function ‘ath6kl_sdio_alloc_prep_scat_req’: +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the buf_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the sg_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +--- ./drivers/net/wireless/ath/ath6kl/sdio.c ++++ ./drivers/net/wireless/ath/ath6kl/sdio.c +@@ -341,11 +341,14 @@ + scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); + scat_req_sz = sizeof(*s_req) + scat_list_sz; + +- if (!virt_scat) +- sg_sz = sizeof(struct scatterlist) * n_scat_entry; +- else +- buf_sz = 2 * L1_CACHE_BYTES + +- ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; ++ if (!virt_scat) { ++ sg_sz = sizeof(struct scatterlist) * n_scat_entry; ++ buf_sz = 0; ++ } else { ++ sg_sz = 0; ++ buf_sz = 2 * L1_CACHE_BYTES + ++ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; ++ } + + for (i = 0; i < n_scat_req; i++) { + /* allocate the scatter request */ +--- ./drivers/gpu/drm/i915/intel_display.c ++++ ./drivers/gpu/drm/i915/intel_display.c +@@ -7110,7 +7110,7 @@ + obj = work->old_fb_obj; + + atomic_clear_mask(1 << intel_crtc->plane, +- &obj->pending_flip.counter); ++ &obj->pending_flip); + wake_up(&dev_priv->pending_flip_queue); + + queue_work(dev_priv->wq, &work->work); diff --git a/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-bt_tty.patch b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-bt_tty.patch new file mode 100644 index 00000000..dd299121 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-bt_tty.patch @@ -0,0 +1,37 @@ +--- compat-drivers-3.8-rc7-1-u.orig/net/bluetooth/rfcomm/tty.c 2013-02-11 00:31:59.000000000 +0100 ++++ compat-drivers-3.8-rc7-1-u/net/bluetooth/rfcomm/tty.c 2013-02-13 12:39:58.983001215 +0100 +@@ -309,7 +309,7 @@ + BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (dev->port.count > 0) { ++ if (atomic_read(&dev->port.count) > 0) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return; + } +@@ -664,10 +664,10 @@ + return -ENODEV; + + BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, +- dev->channel, dev->port.count); ++ dev->channel, atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (++dev->port.count > 1) { ++ if (atomic_inc_return(&dev->port.count) > 1) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return 0; + } +@@ -736,10 +736,10 @@ + return; + + BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, +- dev->port.count); ++ atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (!--dev->port.count) { ++ if (!atomic_dec_return(&dev->port.count)) { + spin_unlock_irqrestore(&dev->port.lock, flags); + if (dev->tty_dev->parent) + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) diff --git a/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-driver-select b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-driver-select new file mode 100755 index 00000000..bafaf352 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/compat-drivers-3.8-driver-select @@ -0,0 +1,845 @@ +#!/usr/bin/env bash +# Copyright 2009 Luis R. Rodriguez +# +# This script allows you to select your compat-drivers driver and +# reduce compilation time. + +# Heavily modified by Stefan Kuhn +# Configures compat-drivers for multiple drivers at once +# Suited for package managers + +# This internal variable contains a list of all 'Makefile's +CPD_MAKEFILES=" + MAKEFILE + COMPAT_CONFIG_CW + DRIVERS_MAKEFILE + ATH_MAKEFILE + ATH9K_MAKEFILE + BRCM80211_MAKEFILE + RT2X00_MAKEFILE + TI_MAKEFILE + NET_WIRELESS_MAKEFILE + EEPROM_MAKEFILE + DRIVERS_NET_ATHEROS + DRIVERS_NET_BROADCOM + DRIVERS_NET_USB_MAKEFILE + SSB_MAKEFILE + BCMA_MAKEFILE" + +# This internal variable contains an array with paths to all files +CPD_MAKEFILES_ARRAY=( + MAKEFILE=Makefile + COMPAT_CONFIG_CW=config.mk + DRIVERS_MAKEFILE=drivers/net/wireless/Makefile + ATH_MAKEFILE=drivers/net/wireless/ath/Makefile + ATH9K_MAKEFILE=drivers/net/wireless/ath/ath9k/Makefile + BRCM80211_MAKEFILE=drivers/net/wireless/brcm80211/Makefile + RT2X00_MAKEFILE=drivers/net/wireless/rt2x00/Makefile + TI_MAKEFILE=drivers/net/wireless/ti/Makefile + NET_WIRELESS_MAKEFILE=net/wireless/Makefile + EEPROM_MAKEFILE=drivers/misc/eeprom/Makefile + DRIVERS_NET_ATHEROS=drivers/net/ethernet/atheros/Makefile + DRIVERS_NET_BROADCOM=drivers/net/ethernet/broadcom/Makefile + DRIVERS_NET_USB_MAKEFILE=drivers/net/usb/Makefile + SSB_MAKEFILE=drivers/ssb/Makefile + BCMA_MAKEFILE=drivers/bcma/Makefile +) + +# This internal variable holds modules to be added to the atheros Makefile +CPD_ADD_ATHEROS="" + +# This internal variable controls the execution phase (and write protection) +# phases: 0=start, 1=configure, 2=write, 3=restore +# no file should be touched below phase 2 +CPD_PHASE=0 + +# CPD_MODULE +# This internal variable contains a temporary value, the currently processed +# argument + +# CPD_DISABLE_${CPD_MODULE} +# These internal variables contains the 'disable-actions' of the # currently +# processed argument + +# This internal variable stores selected drivers (and groups) +CPD_SELECTED_DRIVERS='' + +function die { + echo "$1" 1>&2 + exit 1 +} + +function check_phase { + [ ${CPD_PHASE} -lt ${1} ] && \ + die "Current phase ${CPD_PHASE} lower then ${1}. Check failed" +} + +# This internal function returns the path to a file from CPD_MAKEFILES_ARRAY +function get_makefile { + local file + for file in "${CPD_MAKEFILES_ARRAY[@]}"; do + if [ "${file%%=*}" = "${1}" ]; then + echo "${file#*=}" + return 0 + fi + done + die "File ${1} not found" +} + + +# used to backup files from foo to foo.${BACKUP_EXT} +BACKUP_EXT="bk" + +# Pretty colors +GREEN="\033[01;32m" +YELLOW="\033[01;33m" +NORMAL="\033[00m" +BLUE="\033[34m" +RED="\033[31m" +PURPLE="\033[35m" +CYAN="\033[36m" +UNDERLINE="\033[02m" + +# this internal function disables colors +function unset_colors { + GREEN= + YELLOW= + NORMAL= + BLUE= + RED= + PURPLE= + CYAN= + UNDERLINE= +} + +SUPPORTED_80211_DRIVERS="ath5k ath9k ath9k_ap ath9k_htc carl9170 ath6kl wil6210 b43 zd1211rw rt2x00 wl1251 wl12xx brcmsmac brcmfmac" + +# b43 needs some more work for driver-select, the SSB stuff, plus +# what if you update b44 but not b43? It will bust. +SUPPORTED_ETH_DRIVERS="atl1 atl2 atl1e atl1c alx" + +SUPPORTED_DRM_DRIVERS="i915" + +SUPPORTED_DRIVERS="${SUPPORTED_80211_DRIVERS} ${SUPPORTED_ETH_DRIVERS} ${SUPPORTED_DRM_DRIVERS}" + +function usage { + echo -e "${GREEN}Usage${NORMAL}: ${BOLD}$0${NORMAL} [${PURPLE}-q${NORMAL}] [ ${PURPLE}${NORMAL} | ${CYAN}${NORMAL} | ${GREEN}restore${NORMAL} ]" + + # These should match the switch below. + echo -e "Supported 802.11 drivers:" + local i + for i in $SUPPORTED_80211_DRIVERS; do + echo -e "\t${PURPLE}${i}${NORMAL}" + done + + echo + echo -e "Supported Ethernet drivers:" + for i in $SUPPORTED_ETH_DRIVERS; do + echo -e "\t${PURPLE}${i}${NORMAL}" + done + + echo -e "Supported DRM drivers:" + for i in $SUPPORTED_DRM_DRIVERS; do + echo -e "\t${PURPLE}${i}${NORMAL}" + done + + # These should match the switch below. + echo -e "\nSupported group drivers:" + echo -e "\t${CYAN}atheros${NORMAL} < ${PURPLE} ath5k ath9k carl9170 zd1211rw ath6kl wil6210${NORMAL}>" + echo -e "\t${CYAN}ath${NORMAL} < ${PURPLE} ath5k ath9k carl9170 ath6kl wil6210${NORMAL}>" + echo -e "\t${CYAN}brcm80211${NORMAL} < ${PURPLE} brcmsmac brcmfmac ${NORMAL}>" + echo -e "\t${CYAN}intel${NORMAL} < ${PURPLE} iwlwifi, iwlegacy ${NORMAL}>" + echo -e "\t${CYAN}rtl818x${NORMAL} < ${PURPLE} rtl8180 rtl8187 ${NORMAL}>" + echo -e "\t${CYAN}rtlwifi${NORMAL} < ${PURPLE} rtl8192ce ${NORMAL}>" + echo -e "\t${CYAN}ti${NORMAL} < ${PURPLE} wl1251 wl12xx (SPI and SDIO)${NORMAL}>" + + echo -e "\nSupported group drivers: Bluetooth & Ethernet:" + echo -e "\t${CYAN}atlxx${NORMAL} < ${PURPLE} atl1 atl2 atl1e alx${NORMAL}>" + echo -e "\t${CYAN}bt${NORMAL} < ${PURPLE} Linux bluetooth drivers ${NORMAL}>" + + echo -e "\nSupported group drivers: DRM:" + echo -e "\t${CYAN}drm${NORMAL} < ${PURPLE} i915${NORMAL}>" + + echo + echo -e "Restoring compat-drivers:" + echo -e "\t${GREEN}restore${NORMAL}: you can use this option to restore compat-drivers to the original state" + + echo + echo -e "Options:" + echo -e "\t${PURPLE}-q${NORMAL}:\tDisables colored output" +} + +function backup_file { + check_phase 2 + if [ -f $1.${BACKUP_EXT} ]; then + echo -e "Backup exists: ${CYAN}${1}.${BACKUP_EXT}${NORMAL}" + return + fi + echo -e "Backing up makefile: ${CYAN}${1}.${BACKUP_EXT}${NORMAL}" + cp "${1}" "${1}.${BACKUP_EXT}" || die +} + +# This internal function registers a 'disable' action for a module. +# It writes to a variable CPD_DISABLE_${CPD_MODULE} +function disable { + check_phase 1 + eval "CPD_DISABLE_${CPD_MODULE}+=\" ${*}\"" || die +} + +# This internal function clears a Makefile completely. +function disable_makefile +{ + check_phase 2 + backup_file $1 + echo > $1 +} + +function select_drivers_from_makefile +{ + check_phase 2 + local MAKEFILE=$(get_makefile "$1") + shift + backup_file $MAKEFILE + local CONFIGS="" + local i + for i in $@; do + if [[ "$CONFIGS" = "" ]]; then + CONFIGS="$i" + else + CONFIGS="${CONFIGS}|$i" + fi + done + egrep "$CONFIGS" $MAKEFILE > ${MAKEFILE}.tmp + mv ${MAKEFILE}.tmp ${MAKEFILE} +} + +# This internal function registers filters for the drivers Makefile +function select_drivers { + check_phase 1 + eval "CPD_DRIVERS_MAKEFILE+=\" ${*}\"" || die +} + +# This internal function disables "lib80211" +function disable_lib80211 +{ + check_phase 2 + backup_file "$(get_makefile NET_WIRELESS_MAKEFILE)" + # perl -i -ne 'print if ! /LIB80211/ ' $NET_WIRELESS_MAKEFILE + sed -i '/LIB80211/d' "$(get_makefile NET_WIRELESS_MAKEFILE)" || die +} + +# This internal function disables "b44" +function disable_b44 { + check_phase 2 + backup_file "$(get_makefile DRIVERS_NET_BROADCOM)" + # perl -i -ne 'print if ! /CONFIG_B44/ ' $DRIVERS_NET_BROADCOM + sed -i '/CONFIG_B44/d' "$(get_makefile DRIVERS_NET_BROADCOM)" || die +} + +# This internal function disables "ssb" +function disable_ssb +{ + check_phase 2 + disable_makefile "$(get_makefile ${SSB_MAKEFILE})" + # perl -i -ne 'print if ! /drivers\/ssb\//' Makefile + sed -i '/drivers\/ssb\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "bcma" +function disable_bcma +{ + check_phase 2 + disable_makefile "$(get_makefile ${BCMA_MAKEFILE})" + # perl -i -ne 'print if ! /drivers\/bcma\//' Makefile + sed -i '/drivers\/bcma\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "rfkill" +function disable_rfkill +{ + check_phase 2 + backup_file "$(get_makefile MAKEFILE)" + # perl -i -ne 'print if ! /CONFIG_COMPAT_RFKILL/' Makefile + sed -i '/CONFIG_COMPAT_RFKILL/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "eprom" +function disable_eeprom +{ + check_phase 2 + disable_makefile "$(get_makefile ${EEPROM_MAKEFILE})" || die + # perl -i -ne 'print if ! /drivers\/misc\/eeprom\//' Makefile + sed -i '/drivers\/misc\/eeprom\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "usbnet" +# TODO: this function is twice in driver-select script!?! Why? +function disable_usbnet +{ + check_phase 2 + disable_makefile ${DRIVERS_NET_USB_MAKEFILE} || die + # perl -i -ne 'print if ! /drivers\/net\/usb\//' Makefile + sed -i '/drivers\/net\/usb\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "usbnet" +# TODO: this function is twice in driver-select script!?! Why? +function disable_usbnet { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_NET_USB_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_NET_USB_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "ethernet" +function disable_ethernet { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_NETWORK_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_NETWORK_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "var_03" +function disable_var_03 { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_VAR_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_VAR_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "bt" +function disable_bt { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_BLUETOOTH/' Makefile + sed -i '/CONFIG_COMPAT_BLUETOOTH/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "80211" +function disable_80211 { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_WIRELESS/' Makefile + sed -i '/CONFIG_COMPAT_WIRELESS/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "drm" +function disable_drm { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_VIDEO_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_VIDEO_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +function disable_bt_usb_ethernet { + check_phase 1 + # backup_file Makefile + disable usbnet + disable ethernet + disable bt + disable update-initramfs + disable drm +} + +function disable_bt_usb_ethernet_var { + check_phase 1 + # backup_file Makefile + disable bt_usb_ethernet + disable var_03 +} + +function enable_only_ethernet { + check_phase 1 + # backup_file Makefile + # backup_file $DRIVERS_NET_BROADCOM + # backup_file $DRIVERS_NET_ATHEROS + disable staging + disable usbnet + disable var_03 + disable bt + disable drm + # rfkill may be needed if you enable b44 as you may have b43 + disable rfkill + disable 80211 +} + +function disable_var { + check_phase 1 + disable ssb + disable bcma + disable usbnet + disable eeprom + disable update-initramfs +} + +function disable_var_01 { + check_phase 1 + disable lib80211 + disable var +} + +function disable_var_02 { + check_phase 1 + #var_01 with eeprom not disabled + disable lib80211 + disable ssb + disable bcma + disable usbnet + disable update-initramfs +} + +# This internal function disables "staging" +function disable_staging { + check_phase 2 + backup_file "$(get_makefile MAKEFILE)" + # perl -i -ne 'print if ! /CONFIG_COMPAT_STAGING/ ' Makefile + sed -i '/CONFIG_COMPAT_STAGING/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "update-initramfs" +function disable_update-initramfs +{ + check_phase 2 + backup_file "$(get_makefile MAKEFILE)" + # perl -i -ne 'print if ! /update-initramfs/' Makefile + sed -i '/update-initramfs/d' "$(get_makefile MAKEFILE)" || die +} + +function enable_only_drm { + check_phase 1 + # backup_file Makefile + disable ethernet + disable staging + disable usbnet + disable var_03 + disable bt + # rfkill may be needed if you enable b44 as you may have b43 + disable rfkill + disable 80211 +} + +# This internal function registers filters for the ath Makefile +function select_ath_driver +{ + check_phase 1 + # backup_file $ATH_MAKEFILE + # perl -i -ne 'print if /'$1'/ || /CONFIG_ATH_/ || /ath-objs/ || /regd.o/ || /hw.o/ || /key.o/' $ATH_MAKEFILE + eval "CPD_ATH_MAKEFILE+=\" ${*} CONFIG_ATH_ ath-objs regd.o hw.o key.o\"" || die + disable var_01 +} + +# This internal function registers no-common filters for the ath Makefile +function select_ath_no_common +{ + check_phase 1 + # backup_file $ATH_MAKEFILE + # perl -i -ne 'print if /'$1'/' $ATH_MAKEFILE + eval "CPD_ATH_MAKEFILE+=\" ${*}\"" || die + disable var_01 +} + +function select_ath9k_driver +{ + check_phase 1 + select_ath_driver CONFIG_ATH9K_HW + # In the future here we'll add stuff to disable ath9k_htc +} + +function select_ath9k_driver_ap +{ + check_phase 1 + select_ath9k_driver + # backup_file $COMPAT_CONFIG_CW + # perl -i -ne 'print if ! /CONFIG_COMPAT_ATH9K_RATE_CONTROL/ ' $COMPAT_CONFIG_CW + # this does not work with multipe drivers, since it's the only filter to that file + # It is only applied when only the ath9k_ap driver is selected and nothing else + # eval "CPD_COMPAT_CONFIG_CW+=\" CONFIG_COMPAT_ATH9K_RATE_CONTROL\"" || die +} + +# This internal function registers filters for the ti Makefile +function select_ti_drivers +{ + check_phase 1 + select_drivers CONFIG_WL_TI + # select_drivers_from_makefile $TI_MAKEFILE $@ + eval "CPD_TI_MAKEFILE+=\" ${*}\"" || die +} + +# This internal function registers filters for the brcm80211 Makefile +function select_brcm80211_driver +{ + check_phase 1 + # backup_file $BRCM80211_MAKEFILE + # perl -i -ne 'print if /'$1'/ || /CONFIG_BRCMUTIL/ ' $BRCM80211_MAKEFILE + eval "CPD_BRCM80211_MAKEFILE+=\" ${*} CONFIG_BRCMUTIL\"" || die +} + +function restore_file { + check_phase 3 + local ORIG="${1%%.${BACKUP_EXT}}" || die + cp $1 $ORIG || die + rm -f $1 || die + echo -e "Restored makefile: ${CYAN}${ORIG}${NORMAL} (and removed backup)" +} + +function restore_compat { + check_phase 3 + local FILES=$(find ./ -type f -name *\."${BACKUP_EXT}") || die + local i + for i in $FILES; do + restore_file $i + done +} + +if [ ! -f .compat_version ]; then + die "Must run $0 from the compat-drivers top level directory" +fi + +# set phase to configure +CPD_PHASE=1 + +# loop over all arguments +# This sets the configuration for each flag/module +for arg in "$@"; do + # clear/set global vars + CPD_MODULE="$arg" + CPD_SELECTED_DRIVERS+=" $arg" + case "$arg" in + restore) + CPD_PHASE=3 + restore_compat + exit 0 + ;; + usage) + usage + exit 0 + ;; + -q) + unset_colors || die + CPD_SELECTED_DRIVERS="${CPD_SELECTED_DRIVERS% -q}" + ;; + # Group drivers + atheros) + select_drivers CONFIG_ATH_CARDS \ + CONFIG_COMPAT_ZD1211RW + disable staging + disable_bt_usb_ethernet_var + disable var_01 + ;; + ath) + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + disable var_01 + ;; + intel) + select_drivers CONFIG_IWLWIFI \ + CONFIG_IWLEGACY \ + CONFIG_IPW + disable staging + disable var + disable bt + disable ethernet + disable usbnet + ;; + iwlwifi) + select_drivers CONFIG_IWLWIFI + disable staging + disable var_01 + disable bt + disable ethernet + disable usbnet + ;; + iwlegacy) + select_drivers CONFIG_IWLEGACY + disable staging + disable var_01 + disable bt + disable ethernet + disable usbnet + ;; + rtl818x) + select_drivers CONFIG_RTL8180 CONFIG_RTL8187 + disable staging + disable bt_usb_ethernet + disable ssb + disable bcma + disable lib80211 + ;; + rtlwifi) + select_drivers CONFIG_RTL8192CE CONFIG_RTLWIFI + disable staging + disable_bt_usb_ethernet_var + disable lib80211 + ;; + ti) + select_drivers CONFIG_WL_TI + disable_bt_usb_ethernet_var + disable staging + disable var_01 + ;; + brcm80211) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_BRCMUTIL \ + CONFIG_BRCMFMAC \ + CONFIG_BRCMSMAC + ;; + # Singular modules + ath5k) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_driver CONFIG_ATH5K + #patch -p1 < enable-older-kernels/enable-2.6.23.patch + ;; + ath9k) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath9k_driver + ;; + ath9k_ap) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath9k_driver_ap + ;; + carl9170) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_driver CONFIG_CARL9170 + ;; + ath9k_htc) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath9k_driver + ;; + ath6kl) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_driver CONFIG_ATH6KL + ;; + wil6210) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_no_common CONFIG_WIL6210 + ;; + brcmsmac) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_BRCMSMAC + select_brcm80211_driver CONFIG_BRCMSMAC CONFIG_BRCMUTIL + ;; + brcmfmac) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_BRCMFMAC + select_brcm80211_driver CONFIG_BRCMFMAC CONFIG_BRCMUTIL + ;; + zd1211rw) + select_drivers CONFIG_COMPAT_ZD1211RW + disable staging + disable var_01 + ;; + b43) + disable staging + disable bt_usb_ethernet + disable eeprom + disable lib80211 + select_drivers CONFIG_B43 + ;; + rt2x00) + select_drivers CONFIG_RT2X00 + disable staging + disable_bt_usb_ethernet + disable var_02 + ;; + wl1251) + select_ti_drivers CONFIG_WL1251 + disable staging + disable var_01 + ;; + wl12xx) + select_ti_drivers CONFIG_WL12XX + disable staging + disable var_01 + ;; + wl18xx) + select_ti_drivers CONFIG_WL18XX + disable staging + disable var_01 + ;; + # Ethernet and Bluetooth drivers + atl1) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL1) += atlx/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL1) += atlx/\n" + ;; + atl2) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL2) += atlx/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL2) += atlx/\n" + ;; + atl1e) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL1E) += atl1e/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL1E) += atl1e/\n" + ;; + atl1c) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL1C) += atl1c/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL1C) += atl1c/\n" + ;; + alx) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ALX) += alx/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ALX) += alx/\n" + ;; + atlxx) + select_drivers CONFIG_ATL1 CONFIG_ATL2 CONFIG_ATL1E CONFIG_ALX + enable_only_ethernet + disable b44 + disable update-initramfs + ;; + bt) + select_drivers CONFIG_BT + disable var + disable ethernet + disable staging + disable 80211 + ;; + i915) + enable_only_drm + ;; + drm) + enable_only_drm + ;; + *) + ./$0 usage + die "Unsupported driver: ${arg}" + exit 1 + ;; + esac +done + +# special for ath9k_ap +# this filter is only applied when no other driver is selected +if [ "${CPD_SELECTED_DRIVERS}" == " ath9k_ap" ]; then + eval "CPD_COMPAT_CONFIG_CW+=\" CONFIG_COMPAT_ATH9K_RATE_CONTROL\"" \ + || die "Failed to apply special filter for ath9k_ap" +fi + +if [[ ! -f built-in.o ]]; then + if [[ "$1" != "restore" ]]; then + echo -e "${PURPLE}Processing new driver-select request...${NORMAL}" + fi +fi + +# This internal function checks if the first argument is contained in the rest +# of the arguments +function has { + local x=$1 + shift + local y + for y in "$@"; do + [ "${y}" = "${x}" ] && return 0 + done + return 1 +} + +# this internal function checks if both groups and single modules were selected +# this is not supported +function check_groups { + local mods= + local grps= + for CPD_MODULE in ${CPD_SELECTED_DRIVERS}; do + if has "${CPD_MODULE}" ${SUPPORTED_DRIVERS}; then + mods+=" ${CPD_MODULE}" + else + grps+=" ${CPD_MODULE}" + fi + done + [ ! "${mods}" == '' ] && [ ! "${grps}" == '' ] && \ + die "Mixing group and single drivers is not supported by this script! Groups: <${grps}> Drivers: <${mods}>" +} +check_groups + +# set phase to write +CPD_PHASE=2 + +# Always backup the top level Makefile, unless restoring +if [[ "$1" != "restore" ]]; then + backup_file Makefile +fi + +# If a user selects a new driver make sure we clean up for them +# first and also restore the backup makefiles then. Otherwise +# we'll be trying to leave drivers on Makefiles which are not +# already there from a previous run. +if [ -f built-in.o ]; then + echo -e "${PURPLE}Old build found, going to clean this up first...${NORMAL}" + make clean + echo -e "${PURPLE}Restoring Makefiles...${NORMAL}" + ./$0 restore +fi + +# This function reads the configuration (disable-actions and filters) for each +# single active flag, then constructs and applies the common configuration set. +function src_configure { + local use_enabled_list="${CPD_SELECTED_DRIVERS}" + # compose common disable list for all flags + # 1st module/flag + local iuse1="$(echo $use_enabled_list | cut -d ' ' -f 1)" || die + eval "local disable_list=\$CPD_DISABLE_${iuse1}" || die + local iuse + local dis + for iuse in ${use_enabled_list}; do + if [ "${iuse}" != "${iuse1}" ]; then + local disable_list_new='' + eval "local disable_list_other=\$CPD_DISABLE_${iuse}" || die + for dis in ${disable_list}; do + has "${dis}" ${disable_list_other} && \ + disable_list_new+=" ${dis}" + done + disable_list="${disable_list_new}" + fi + done + # sort and remove duplicates + disable_list=$(printf '%s\n' ${disable_list} | sort -u | tr '\n' ' ') || die + + # prepend to atheros Makefile + if [ "${CPD_ADD_ATHEROS}" != '' ]; then + # ensure that backup file exists + backup_file "$(get_makefile DRIVERS_NET_ATHEROS)" + # prepend using backup + echo -e "${CPD_ADD_ATHEROS}"|cat - \ + "$(get_makefile DRIVERS_NET_ATHEROS).${BACKUP_EXT}" > \ + "$(get_makefile DRIVERS_NET_ATHEROS)" \ + || die "Failed to prepend to atheros Makefile" + echo -e "Prepended to atheros Makefile ...${NORMAL}" + fi + # execute all filters for the Makefiles + local file + for file in ${CPD_MAKEFILES}; do + eval "local filter_list=\$CPD_${file}" || die + if [ -n "${filter_list}" ]; then + # sort and remove duplicates + filter_list=$(printf '%s\n' ${filter_list} | sort -u | tr '\n' ' ')\ + || die + echo -e "Filtering ${CYAN}$(get_makefile ${file})${NORMAL} for: ${CYAN}${filter_list}${NORMAL}" + select_drivers_from_makefile "${file}" "${filter_list}" || die + fi + done + # execute common disable list + echo -e "Common disable list: ${CYAN}${disable_list}${NORMAL}" + for dis in ${disable_list}; do + echo -e "Running disable function: ${CYAN}disable_${dis}${NORMAL}" + eval "disable_${dis}" || die + done +} + +# call src_configure ... +src_configure || die "Failed on src_configure ..." diff --git a/sys-kernel/compat-drivers-alx/files/ipw2200-inject.3.4.6.patch b/sys-kernel/compat-drivers-alx/files/ipw2200-inject.3.4.6.patch new file mode 100644 index 00000000..941bbc50 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/ipw2200-inject.3.4.6.patch @@ -0,0 +1,120 @@ +diff -urN linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.c linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.c +--- linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.c 2010-10-21 04:30:22.000000000 +0800 ++++ linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.c 2010-12-08 22:22:41.937999976 +0800 +@@ -216,6 +216,7 @@ + static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, + int len, int sync); + ++static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, int pri); + static void ipw_tx_queue_free(struct ipw_priv *); + + static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); +@@ -1911,6 +1912,63 @@ + static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO, + show_net_stats, store_net_stats); + ++/* SYSFS INJECT */ ++static ssize_t store_inject(struct device *d, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct ipw_priv *priv = dev_get_drvdata(d); ++ struct libipw_device *ieee = priv->ieee; ++ struct libipw_txb *txb; ++ struct sk_buff *skb_frag; ++ unsigned char *newbuf; ++ unsigned long flags; ++ ++ // should test (ieee->is_queue_full) ++ ++ // Fw only accepts data, so avoid accidental fw errors. ++ if ( (buf[0]&0x0c) != '\x08') { ++ //printk("ipw2200: inject: discarding non-data frame (type=%02X)\n",(int)(unsigned char)buf[0]); ++ return count; ++ } ++ ++ if (count>1500) { ++ count=1500; ++ printk("ipw2200: inject: cutting down frame to 1500 bytes\n"); ++ } ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ ++ // Create a txb with one skb ++ txb = kmalloc(sizeof(struct libipw_txb) + sizeof(u8 *), GFP_ATOMIC); ++ if (!txb) ++ goto nosepuede; ++ txb->nr_frags=1; ++ txb->frag_size = ieee->tx_headroom; ++ txb->fragments[0]=__dev_alloc_skb(count + ieee->tx_headroom, GFP_ATOMIC); ++ if (!txb->fragments[0]) { ++ kfree(txb); ++ goto nosepuede; ++ } ++ skb_reserve(txb->fragments[0], ieee->tx_headroom); ++ txb->encrypted=0; ++ txb->payload_size=count; ++ skb_frag = txb->fragments[0]; ++ newbuf=skb_put(skb_frag, count); ++ ++ // copy data into txb->skb and send it ++ memcpy(newbuf, buf, count); ++ ++ ipw_tx_skb(priv, txb, 0); ++ ++nosepuede: ++ spin_unlock_irqrestore(&priv->lock, flags); ++ return count; ++} ++ ++ ++static DEVICE_ATTR(inject, S_IWUSR, NULL, store_inject); ++ + static ssize_t show_channels(struct device *d, + struct device_attribute *attr, + char *buf) +@@ -10214,7 +10272,6 @@ + modify to send one tfd per fragment instead of using chunking. otherwise + we need to heavily modify the libipw_skb_to_txb. + */ +- + static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, + int pri) + { +@@ -10544,6 +10601,12 @@ + mutex_lock(&priv->mutex); + priv->config |= CFG_CUSTOM_MAC; + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); ++ ++#ifdef CONFIG_IPW2200_PROMISCUOUS ++ if (rtap_iface) ++ memcpy(priv->prom_net_dev->dev_addr, addr->sa_data, ETH_ALEN); ++#endif ++ + printk(KERN_INFO "%s: Setting MAC to %pM\n", + priv->net_dev->name, priv->mac_addr); + schedule_work(&priv->adapter_restart); +@@ -11597,6 +11660,7 @@ + #ifdef CONFIG_IPW2200_PROMISCUOUS + &dev_attr_rtap_iface.attr, + &dev_attr_rtap_filter.attr, ++ &dev_attr_inject.attr, + #endif + NULL + }; +diff -urN linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.h linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.h +--- linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.h 2010-10-21 04:30:22.000000000 +0800 ++++ linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.h 2010-12-08 22:20:01.561000000 +0800 +@@ -2014,4 +2014,12 @@ + + #define IPW_MAX_CONFIG_RETRIES 10 + ++/* ++ * Hhack to get code compiling on new kernels, the define below ++ * seem to be removed from the linux headers. ++ */ ++#ifndef MAC_ARG ++#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5] ++#endif ++ + #endif /* __ipw2200_h__ */ diff --git a/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.6.6.patch b/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.6.6.patch new file mode 100644 index 00000000..fbc8d159 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.6.6.patch @@ -0,0 +1,30 @@ +--- drivers/net/wireless/rt2x00/rt2x00leds.c ++++ drivers/net/wireless/rt2x00/rt2x00leds.c +@@ -29,6 +29,7 @@ + #include "rt2x00.h" + #include "rt2x00lib.h" + ++#ifdef CONFIG_RT2X00_LIB_LEDS + void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) + { + struct rt2x00_led *led = &rt2x00dev->led_qual; +@@ -244,3 +245,4 @@ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_qual); + } ++#endif /* CONFIG_RT2X00_LIB_LEDS */ +--- net/mac80211/led.c ++++ net/mac80211/led.c +@@ -12,6 +12,7 @@ + #include + #include "led.h" + ++#ifdef CONFIG_MAC80211_LEDS + void ieee80211_led_rx(struct ieee80211_local *local) + { + if (unlikely(!local->rx_led)) +@@ -307,3 +308,4 @@ + else + ieee80211_start_tpt_led_trig(local); + } ++#endif /* CONFIG_MAC80211_LEDS */ diff --git a/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.7_rc1_p6.patch b/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.7_rc1_p6.patch new file mode 100644 index 00000000..fbc8d159 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.7_rc1_p6.patch @@ -0,0 +1,30 @@ +--- drivers/net/wireless/rt2x00/rt2x00leds.c ++++ drivers/net/wireless/rt2x00/rt2x00leds.c +@@ -29,6 +29,7 @@ + #include "rt2x00.h" + #include "rt2x00lib.h" + ++#ifdef CONFIG_RT2X00_LIB_LEDS + void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) + { + struct rt2x00_led *led = &rt2x00dev->led_qual; +@@ -244,3 +245,4 @@ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_qual); + } ++#endif /* CONFIG_RT2X00_LIB_LEDS */ +--- net/mac80211/led.c ++++ net/mac80211/led.c +@@ -12,6 +12,7 @@ + #include + #include "led.h" + ++#ifdef CONFIG_MAC80211_LEDS + void ieee80211_led_rx(struct ieee80211_local *local) + { + if (unlikely(!local->rx_led)) +@@ -307,3 +308,4 @@ + else + ieee80211_start_tpt_led_trig(local); + } ++#endif /* CONFIG_MAC80211_LEDS */ diff --git a/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.8.patch b/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.8.patch new file mode 100644 index 00000000..fbc8d159 --- /dev/null +++ b/sys-kernel/compat-drivers-alx/files/leds-disable-strict-3.8.patch @@ -0,0 +1,30 @@ +--- drivers/net/wireless/rt2x00/rt2x00leds.c ++++ drivers/net/wireless/rt2x00/rt2x00leds.c +@@ -29,6 +29,7 @@ + #include "rt2x00.h" + #include "rt2x00lib.h" + ++#ifdef CONFIG_RT2X00_LIB_LEDS + void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) + { + struct rt2x00_led *led = &rt2x00dev->led_qual; +@@ -244,3 +245,4 @@ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_qual); + } ++#endif /* CONFIG_RT2X00_LIB_LEDS */ +--- net/mac80211/led.c ++++ net/mac80211/led.c +@@ -12,6 +12,7 @@ + #include + #include "led.h" + ++#ifdef CONFIG_MAC80211_LEDS + void ieee80211_led_rx(struct ieee80211_local *local) + { + if (unlikely(!local->rx_led)) +@@ -307,3 +308,4 @@ + else + ieee80211_start_tpt_led_trig(local); + } ++#endif /* CONFIG_MAC80211_LEDS */ diff --git a/sys-kernel/compat-drivers/Manifest b/sys-kernel/compat-drivers/Manifest new file mode 100644 index 00000000..bcd4268d --- /dev/null +++ b/sys-kernel/compat-drivers/Manifest @@ -0,0 +1,33 @@ +AUX 3.8-grsec/00-read-only.patch 604 SHA256 1b4109d2cb389e622252738390bd56a665f968c428accd905e6debcdf9c4e679 SHA512 34bd0312909701251317dfbc2b2ebd080f8d5e3dff18e3e0c80a48dbd6697d2877fe7e8a0fa52151f76a97a05368ebb4091166e4432dcaa3ce63469c2b830441 WHIRLPOOL 590b93713ffc2e59ce04b1c7b3a34f502eec32b25f01c45f8adab91cc1621c701a38ed0136ac5d052fe93d9d5738c0facf969189721b332a058db118835908bf +AUX 3.8-grsec/01-read-only.patch 271 SHA256 f389b6d40b0c5512af8b3c024c69aed69830ca22d53403f19be0a8834ee4573c SHA512 9d2f878aeb6ebe8c3d74aa038e3fec14829ac709c2f65b9c7fe6482dd1c15432cf73a40638059ccee9350d6305989b60883903fcd0c7545ec38c1d22e006c9c2 WHIRLPOOL 111a64490618524af32c56c9631d0ff7f29ae4ebc692422df63cf6eaae0a42c06d4be7186c3049b3f3827de9535e8c0b14f6c5fecf9d956dc1e92236a053e545 +AUX 3.8-grsec/02-read-only-ath.patch 8178 SHA256 2a779d9a72fdf6c26620733f2b7fd12e65cd50969e9689cf1cf48a26f6bbfad9 SHA512 288d359778430d9b84fb9c66afce8473142270646fea3dd53471b16b8b7a2ecf78c28d4dfdc27a914b2c7d2561cb5d2ac267e71f50ec315b2b984ad3b59dfec7 WHIRLPOOL 8e50052ad0dd875e086795fd787c1ab83c434ccb1c6bd2b0ff8eaef4392b9a26681a02a52c343a05fa655198d51d643ed4dd9cdd0f4f1415504219598d0fa95d +AUX 3.8-grsec/03-read-only-memory.patch 251 SHA256 927886f135b9e122a51d607d99837eef047b9fa50a4f2dfea9ee4ce6076192b1 SHA512 9f1efc869370655ffba01463d71c72f7a7e3b677f455fb4893f1e9898dd2896008da1f87b66d2803542a80fc073fcb3d94abcbf626bf7240d3235293b13e1c35 WHIRLPOOL e5dbf45e9552824a3ebb3bd96e55ec77d559b5eed8aba383b907902b7115031c7fcbdf57b94d920cb66230eb492d151f957c09a883c255c83139ca63438b2848 +AUX 3.8-grsec/04-read-only-brcm80211.patch 337 SHA256 752991b5f486c0dea194be247e473e9d9d2626f3e392246d3d1da44d4864478f SHA512 494f37fc79dd1c11ab6c51d95a7521d8276f2c4d41dfcabb491c3a7d0b79ff9b2512feeef9d166da9e6297ebf2778a3f773cb52d72a325cf151f78b2e28cfb97 WHIRLPOOL 4f96dced0f32328769ddb74e74ffb6e003c68d8054331c635971658909009079ebb810839a857474dae3b5ce12e2c6d5641646462027dafad974791e0b9e5944 +AUX 3.8-grsec/05-read-only-i915.patch 473 SHA256 bfb3cdf6d8793ce328b16f6808c062d72863ea40be9f3cd942a831548e3bdb22 SHA512 2ef16324332c257e326d48fce34a65f095236f518990b6fff0aa3022893d40668acce156085bf77b669629a40696db9098bb8504614bf60d2720bf78364f9a33 WHIRLPOOL 23d44a4f3b2b0937e5b0769354a8a01ab7f4c19b0c38f85bcd2715a9fdf10b5755a1a54dfcb048dcd0badca7c99321ce91bf9c7cf23a7e3bcba54b28ac472612 +AUX 3.8-grsec/06-read-only-radeon.patch 1974 SHA256 7227c0ed3bdc8ce4c130b263a4ef3ed8b44db33a3961bb9ef9110a94dc8cf549 SHA512 c58cce6f8996ef3ac25d93c680db0f0ef3a555b9e9ad97a39d1e339135a7f449c039582d21d50fbe13cb0b9cd23c2a9fc1ff80143e4f681ebf3161377383f5dc WHIRLPOOL 9a6ab2740ca49b814c06dcd0a4533c1f33d4913470df7ac2884ed6f1a747b2c29b3899ad0b53190c832a307fea0cfb990e2068685b9205fd37a9d6ca3e4ae5a8 +AUX 3.8-grsec/07-read-only-wl1251.patch 296 SHA256 7d89a83529b7ae522a06c0a38959515b6de72fcc9412d9fa83ce744d803c438b SHA512 54fe4cc8b8f5088aa0b27fd0cf38d8795507ead26e7c8ad96b54e2d2b9d65ed6c76f29ee3359f7dee50883138511c34a4c2e2f22384aadc034f7dd46061b3307 WHIRLPOOL 4d4b53e6520c34284f6794b7afecdab36b2e5d97e12a9237e83c52fb23eee1facee829ed140e8d9b13e98e201e9e8a2cf2aa32853e9728504bd16cd3a01927e7 +AUX 3.8-grsec/08-read-only-ti.patch 327 SHA256 804cfe5f58fe4927933e64a45aa2c0b407675c22d8d80f75fe89904f7fa9a595 SHA512 387ffd8a7636d7055b65a3e2c2cb799bc514e270d24a44982da5c1bdae7160e96e7f829fc2de7323e2228f1cfd6e4517327d2c18b6df42f062a570aa5b5fcb21 WHIRLPOOL 546e75ed0970d82a6b343a82421c9368b1dc5410c4795f82cf3812845bd8d7c9f909b7955213144cd0727d62ea606af216c8dc552739bba818513ceded0ae339 +AUX 3.8-grsec/09-read-only-nouveau.patch 1039 SHA256 f797b74b8bdf86e5cd6f0c87ce1955c706f9ba82a8ee2e54f098bfc1d6952aac SHA512 27065f1c9e3fc427253ddd58fb6c11027f5cec9f356204cfa5635ae6d81d8e40b571c83ebc457600f4ce0c8d9e7401f1118523bdac6ca4e2c661dd843bb1e411 WHIRLPOOL 315756a38d0d99e696818b80adbf78ced2ca70c25a75fff3598de1f7716c036292b6cc0df9650b20d3f9981dda9d3f02ddb873033262f592869d817b665e8ba8 +AUX 3.8-grsec/20-version-disagrement-iwlwifi.patch 2832 SHA256 7f89d1c6c61735874a7c3efec1c51f7aaed05b7a59cb97e3e25e4929bbbcfc42 SHA512 2f156ba0626404234d77fe0309e8f9ef7bdaa67f2efd6c46fe3ebc7616b6b9ab27c9bb6fe0a57b97e63c3a1c31994731c569f9eb0489897e7120810325a1bd98 WHIRLPOOL e8aab7f6137b8660a6c8b78fa1900dfb600a2ca9ba14c3e1f5f75c8c24ed2d3aad5b5936c6f01a76d990871abafaab1e21d9e8522611fa2f57c79a45927bdaa6 +AUX 4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch 559 SHA256 2c9222e0c6aaafabac091766c7e0a71442f0e9521ec1c65fc7024fbca60a3354 SHA512 9bf0b852a4ffc66afc12fc60c3fc683d689a45ff5e18470db68f25f001e14fc09c813aa01fec2b4583af1c19141e748a10c7dfd6022d727cb8cf245f6c33a2ff WHIRLPOOL 2efd4c3f2d58a833be803df6034e01d3601cb1891921fe2031c50a241a3f5e8a9e8d54aa0b29566fcc6b2bac71d48e504a3f88585a1049884e0a5986724048a1 +AUX 4004_zd1211rw-2.6.28.patch 1412 SHA256 6696295acb2a8d12a33208525cba9ce8bd2971c9c0adaabb31debfa9ec15c7be SHA512 2aa73fa8e87cb06220b589f528ba25fcec818a14a98a8d5f1173b12c287ee85e48981a48bbce772aa7fc24a33a833df2eb5bc7af0d569ea8c839090c481566ec WHIRLPOOL 4245c75995d7579b9fcc0aa1412b4f9bc4a3a924076aca24b466658740a99b0592c293420c39a616a6241ec4dce213496bf4317b73fd4b94ab1dcefbe5130382 +AUX ath_regd_optional.patch 906 SHA256 ebf117d071363e854505c18aef8c9af2652fd4a7b9cc5efe961cf5efb410dbd7 SHA512 dddc5e428f7d2a372ac17cce913c397ac408e20eb17af55633ea8984d6e69e8f7fe134436a4176606740d64fd66579c6a30a358cac393cb22f9a09d10f66e9d3 WHIRLPOOL b46164a1444d74f22d0e4f1e02ed556b52f87d5132871698ab93abd2076656e4ecadb7dc914d571df15a9bd18169ebaef67d4539a7919f1e2ffdd3df3b60cd90 +AUX compat-drivers-3.7_rc1_p6-grsec-warnings.patch 1261 SHA256 d960b976fe4e82beb2ceba3cc3eccdd064f54d9cc269f2b08d351c5a7fb528f7 SHA512 044020bacf79790f5ce10c0373f05568dfca2456f0087b3a04e5162b7602fb97371923acc6987d8732a64373ad77a4df65dae90f55e2bd7b20757f0b32d6af8c WHIRLPOOL d4b61222a97d496516575ff1bee4b9bec0d86fe9115a4ea4789b994fb1890444c568f866b4289f2ecaf9a399aec7453588aa6a842e7c48bce61320d48cd2eb86 +AUX compat-drivers-3.7_rc1_p6-grsec.patch 356575 SHA256 aefac7264cd36791af4194a4c9ed5a5a90a68d51c3ef1ba1af280c263ad394e2 SHA512 6aa48f042efac4c9a94ca7592f548e6dc5b8796c56a1827239b9d2e357ba23d14373b986e4cd789aedf07ce0a02eca2bdd3304a0bef751b8914367c9dc1b7fff WHIRLPOOL 986b202f601b4efb894ed8b0ab39f099af14b61deb687f670c83c3a587947e83b1180910d79bb701a6c5c92607ab529f6be229f840844cb08efaef165a83123f +AUX compat-drivers-3.8-ath6kl.patch 1415 SHA256 53510c3ee47144a77cf4514e016d62e43f8c92be1480f930b5ddbcc311be53fd SHA512 e88c0e7a0ebf4740643ab7a44b67e452a5ecea990ebca33ef608bec3072b0888f0d27e8bac1f452dd78efe889a7eff0a3ccd000b7d78ea50148e97297bf3590c WHIRLPOOL 47ed0e6e8364f48f7f2aa3dae9effba16a00dab43adbef110dd56e042455bcab112c064fd222b802faa45d24a15d18e92e52bf99b91f719c925dc39d90a6a483 +AUX compat-drivers-3.8-bt_tty.patch 1295 SHA256 e1307946a4ed6185b1850d95f3920ff747e584e6b23dad95220c8b0809fe6de1 SHA512 a4f5e68cb2e7e8972c9271d9519c688fe26cf0ba35b925ecd2270668d4ac8d22a25137596a07cc106cca53afe31e7771d77151b4e4361c22d3332ae5920062ee WHIRLPOOL ff3fe54d241411d5885e002571226247933d8e5d85e5786c2ffeb0ab6637b5c692c362beae5f85b0454ff01454f3277b11f339aeef0d9da4cb71d4356cc9101e +AUX compat-drivers-3.8-driver-select 22310 SHA256 31d7fc0eaa6f7a9528b1dbf948af9e9b4d5990f9a7fe3e060030a6108e0ce457 SHA512 67650594fe29a277bb1a2c1e0beebb11610b001a205abd14c42b996a49a47bd94eecf5ad265a3b53ecef3db7732624b37ecf9ef3dda62278c266d7cdb84a743b WHIRLPOOL 32e4e11eab76ea22555f481d9e15256dc09b1760b0e63a78d0f18a9a9dcfcb1b726ee90b7ea47ddf9a6f60e07c94e73115f6f2c6d35030ff9df79cdee69d6339 +AUX ipw2200-inject.3.4.6.patch 4173 SHA256 0b649bd7b6d2bf22667edc96949b5ab92cc7fb5c543b4385c17c5e0f47fe4109 SHA512 ebee3efda7b94898ea18a89f57c515d5237ef3c2a1eaf0bd13949ec4663a600eadede4655178355ac3f5b8ddc2eccc2cceb88eba0281ed3f614ada186a041463 WHIRLPOOL 15d94b3176719d006363f4d42a11c505643fdead8d521ccfb149cf5eee8851488aa006d4f8c750ffb5e81b23ff03d275e5fde781505e508467ff76303e612570 +AUX leds-disable-strict-3.6.6.patch 799 SHA256 e1b5947608d9c53263efd76aed404eb7e4107d81669faeb484f5d680e4cfb570 SHA512 6373efc6697b4589c15c3cd2cfd8f3faa06fe90eeeb4fa9acaf187ecc682fac394444949dd26c2dde71c0f45aedb1198374764b580b824ddc6bc1e937273b5f1 WHIRLPOOL 40bdef714aa0ff4069bb50abf434dff88610d97f570b80f36622d29a43301bf013a90e90cb7f3f8044c2bf53a09154b1033a6f53b1edf186513452fdb681d55f +AUX leds-disable-strict-3.7_rc1_p6.patch 799 SHA256 e1b5947608d9c53263efd76aed404eb7e4107d81669faeb484f5d680e4cfb570 SHA512 6373efc6697b4589c15c3cd2cfd8f3faa06fe90eeeb4fa9acaf187ecc682fac394444949dd26c2dde71c0f45aedb1198374764b580b824ddc6bc1e937273b5f1 WHIRLPOOL 40bdef714aa0ff4069bb50abf434dff88610d97f570b80f36622d29a43301bf013a90e90cb7f3f8044c2bf53a09154b1033a6f53b1edf186513452fdb681d55f +AUX leds-disable-strict-3.8.patch 799 SHA256 e1b5947608d9c53263efd76aed404eb7e4107d81669faeb484f5d680e4cfb570 SHA512 6373efc6697b4589c15c3cd2cfd8f3faa06fe90eeeb4fa9acaf187ecc682fac394444949dd26c2dde71c0f45aedb1198374764b580b824ddc6bc1e937273b5f1 WHIRLPOOL 40bdef714aa0ff4069bb50abf434dff88610d97f570b80f36622d29a43301bf013a90e90cb7f3f8044c2bf53a09154b1033a6f53b1edf186513452fdb681d55f +DIST compat-drivers-3.7-rc1-6.tar.gz 7877572 SHA256 688406f3d9a84246dc0d57b93b5335c9b0f276cc216e3dc7032ad6152bfff873 SHA512 1a6a747a325b19f03cfb28036232ee9186010a0cb78e753d2358d8932d81322516878df97c13e56cec6472515d30c5b99816f44c6331fc58a261cae4ab33ad43 WHIRLPOOL c33025583834726f1f2988d8eaab8c716b117d74b427b035000ae5d5b35310be486ac76fcdb0589effae6036b5a3058ae0859384e4e6c9466b659f9848036ac1 +DIST compat-drivers-3.8-1-u.tar.xz 5301964 SHA256 5208d0606ea0ebfa9ff80c7f690dc7c75d92074553b413a984c56ce51b820176 SHA512 78733504c5acc1f3c00a75e8a651ee558dc63f5ad98d8b12942c7e1b93a4451b1fca7b90cb09108b9445f79b8851e83c5f75d5ca7bbef9e1a041f1c3a6d03b2b WHIRLPOOL b9ae6ae9594a71f44e9ca25ca9036ca7ba82c69115e407e8f907cd41d1b61c4e41f2be4a4f6679be9b6b15c9e4fe4c8e6076561d525e91a25c357fccd0d05cf2 +DIST compat-drivers-3.8-rc5-1-u.tar.xz 5299080 SHA256 0952e211e7352bbb4b236853c751ab4ffffebf4923517316f9579e644e0eabc8 SHA512 bc80b129d709aaf7eb76d6ec388d4b76ce60eb9a61101eae8ad6a43c09097d927651f462810735f56e53b035f3b7ce3c422a2894ca4ccca234bba9c61850877c WHIRLPOOL f31192ad24b81f55d7bcf1b03ddc70b6e9d20acd8f106ab7edcfd8a461d44270d1deb1d87451f540e050c04bef2ec20524d75777357a7ebc56ef76baa7257d21 +DIST compat-drivers-3.8-rc7-1-u.tar.xz 5301132 SHA256 b53ddac5b0423d72d3945235637cbbb4559b5b527bd74d4cc9dd9e098efad4c6 SHA512 ee8f72d2acfb0aa41e90268fc68f1c42d87a7e14597f7866dd9c96a00dd8ea318b92a5b25e499d65e00b617600baa09cfc639379465e306f54f73689c3fc93c1 WHIRLPOOL 98a0b602e67f6f305f4795b8a53ad87bcabd282aa45d5e74c5c1add07084fb97fbac1d651e59e6f6a4838a4084f5d5418d59a7d49441613f91df33172b480582 +EBUILD compat-drivers-3.7_rc1_p6-r1.ebuild 6976 SHA256 82a760ef9a1448e865dd2936ab3a9824da2c2f6195bce0dec091a9c569a36f18 SHA512 dc907bea77600697458d478418fc719b046ec4e3d09443891ab7235ba40e256b5ac57f7a9355ac67ab25eca4c01e4502a4cda0667a961165549cc4c6758b4249 WHIRLPOOL 7d2361bbd4f6417f934fe7763a5d33a49da18d3a17be53d85fe422c6ce0888b889a136407edae05d10e855ac61d711800bf763a7ebe15e92f2871299b1bc51ed +EBUILD compat-drivers-3.7_rc1_p6.ebuild 7446 SHA256 1b80374769f13fb010ab0d288b5b03ff8a1eb3edde602f09cb8e242debda222d SHA512 0020e15c77d0b3692d3ffb38cd4268b245acb8f9995612f978c41df30b98eecb7d2dc989b7ea82fb5f589eb9abfb7c4b694217fafdad1db818c9b6b521edd9d1 WHIRLPOOL 967238ad9352b14ae10c6c47d71e4fd6aa44a2ec205f161b9c74f8a1112306b2ecad6c0bff2b11c5eec54c526629639e64073204c742555731ab2e94eb047130 +EBUILD compat-drivers-3.8.ebuild 6653 SHA256 4c1be0527b999f3f9af58d603314b4cca513c6d897aea2da878066b6a14f942b SHA512 b2e717baab520f1c2ec6dda8d27ad406508bf90dd1edecb961e711e8973cd4a627f0614ea69a5f05bed53af9735a568949096740d81a802f34cec707f87ada03 WHIRLPOOL 61d7fafaaad23da136b6fb853cab9e5f1b1740b8431edec8d3020b25f9b95cd6f1e39c0fb894fc5fd9675fad6f9f16ee5666424f883bdc32e83872dab356b05b +EBUILD compat-drivers-3.8_rc5.ebuild 6868 SHA256 5c9d8912bed6a553e855b305e6cc39dc15f983bade5b7763441b05999b241cfa SHA512 0c41ee5aa982194775f1a837cdb168564e9ec094676b2bbfff54465bbe3f296210f79eae2b8330d1c1fcbf7249d002d9a2b20c171e6b89f305768e8bf5dd54e5 WHIRLPOOL 2d38c9bc1491377d1f80d580292548d787c5370af29114e4845016a589649a30fdfee1d10fb7b4d2f1f3d37c7581f7a3ef4bfa51e623b7e784f33e62c41bdda2 +EBUILD compat-drivers-3.8_rc7.ebuild 7159 SHA256 4570a3d28d1787eb7e7ccdba41629ec311430d3c0801f53b643ba438cc19216d SHA512 919925a704f9e25ee59140c2e52b24af889ac85eae7f63b923c68faca90ecea860180e91687b26553aba8b0d5457cc7e506ddb9413fcf0841dcb7838448d53c8 WHIRLPOOL 5ca17d0e915d1c478ca9036e2d87240a43a7dbffa8e4a228305af0a6a36afbbb3b75ba4a931b2981b2035acc8467b365133e7c9c51e6a6ebeacd8f5a6484d32a +MISC metadata.xml 3542 SHA256 644b2f777a31515bdf4f60dc8a73b1b4f713b6568793095a7cded840385cb356 SHA512 8a6b6a8f13ad183d5dc17991a0b9f4919ec955b009190ef7b2cbfb0d718c4e61e99b23a782b80ee4441516b0bf4025341c578330302c1a1c5486aa4ed1c61078 WHIRLPOOL 911eb253abf84ab62d36afcec8c1c74213d8295a13b71811ae5aed06df48e7efaea4dc2252b967c62cb26e659a9f8209b4e45b41777c52097c4d34a0b1eb25ff diff --git a/sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6-r1.ebuild b/sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6-r1.ebuild new file mode 100644 index 00000000..1495c198 --- /dev/null +++ b/sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6-r1.ebuild @@ -0,0 +1,177 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +EAPI="5" + +# USE_EXPAND categories +CPD_USE_EXPAND="wifi ethernet various" +# These are officially supported +CPD_USE_EXPAND_wifi="ath5k ath9k ath9k_ap +ath9k_htc ath6kl b43 brcmsmac brcmfmac carl9170 rt2x00 wl1251 wl12xx zd1211rw" +# This might work (not officially supported) +CPD_USE_EXPAND_wifi+=" wl18xx" +# This might work (added by pentoo) +CPD_USE_EXPAND_wifi+=" b44" + +# These are officially supported +CPD_USE_EXPAND_ethernet="atl1 atl1c atl1e atl2" +# This might work (not officially supported) +CPD_USE_EXPAND_ethernet+=" atlxx" + +# These are officially supported +CPD_USE_EXPAND_various="i915" +# This might work (not officially supported) +CPD_USE_EXPAND_various+=" bt drm" +# This might work (added by pentoo) +CPD_USE_EXPAND_various+=" staging usbnet" + +inherit linux-mod linux-info versionator eutils compat-drivers-3.7 + +# upstream versioning, ex.: 3.7-rc1-6 +UPSTREAM_PVR="${PV//_/-}" && UPSTREAM_PVR="${UPSTREAM_PVR/-p/-}" +# ex.: 3.7-rc1 +UPSTREAM_PV=${UPSTREAM_PVR%-*} + +DESCRIPTION="Stable kernel pre-release wifi subsystem backport" +HOMEPAGE="http://backports.wiki.kernel.org" +# SRC_URI="http://www.kernel.org/pub/linux/kernel/projects/backports/stable/v${UPSTREAM_PV}/${PN}-${UPSTREAM_PVR}.tar.gz" +SRC_URI="mirror://kernel/linux/kernel/projects/backports/stable/v${UPSTREAM_PV}/${PN}-${UPSTREAM_PVR}.tar.gz" + +LICENSE="GPL-2" +SLOT="0" +KEYWORDS="~amd64 ~arm ~x86" + +IUSE="atheros_obey_crda debugfs debug-driver full-debug injection livecd loadmodules noleds pax_kernel" + +DEPEND="!net-wireless/compat-wireless-builder + !net-wireless/compat-wireless" +RDEPEND="${DEPEND} + >=sys-kernel/linux-firmware-20110219 + virtual/udev" + +S="${WORKDIR}/${PN}-${UPSTREAM_PVR}" + +RESTRICT="strip" + +CONFIG_CHECK="!DYNAMIC_FTRACE" + +pkg_setup() { + CONFIG_CHECK="~NET_SCHED" + CONFIG_CHECK="~IPW2200_PROMISCUOUS" + linux-mod_pkg_setup + kernel_is -lt 2 6 27 && die "kernel 2.6.27 or higher is required for compat wireless to be installed" + kernel_is -gt $(get_version_component_range 1) $(get_version_component_range 2) $(get_version_component_range 3) && die "The version of compat-wireless you are trying to install contains older modules than your kernel. Failing before downgrading your system." + + #these things are not optional + linux_chkconfig_module MAC80211 || die "CONFIG_MAC80211 must be built as a _module_ !" + linux_chkconfig_module CFG80211 || die "CONFIG_CFG80211 must be built as a _module_ !" + linux_chkconfig_module LIBIPW || ewarn "CONFIG_LIBIPW really should be set or there will be no WEXT compat" + + if use compat_drivers_wifi_b43; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b43" + fi + if use compat_drivers_wifi_b44; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b44" + fi +} + +src_prepare() { + use pax_kernel && epatch "${FILESDIR}"/${P}-grsec.patch + use pax_kernel && epatch "${FILESDIR}"/${P}-grsec-warnings.patch + + #mcgrof said prep for inclusion in compat-wireless.git but this causes issues + #find "${S}" -name Makefile | xargs sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' -e 's/CONFIG_COMPAT_CHECK/CONFIG_CHECK/' + #sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' "${S}"/config.mk + + # CONFIG_CFG80211_REG_DEBUG=y + sed -i '/CFG80211_REG_DEBUG/s/^# *//' "${S}"/config.mk + + #this patch ignores the regulatory settings of an atheros card and uses what CRDA thinks is right + if use atheros_obey_crda; then + ewarn "You have enabled atheros_obey_crda which doesn't do what you think." + ewarn "This use flag will cause the eeprom of the card to be ignored and force" + ewarn "world roaming on the device until crda provides a valid regdomain." + ewarn "Short version, this is not a way to break the law, this will automatically" + ewarn "make your card less functional unless you set a proper regdomain with iw/crda." + epatch "${FILESDIR}"/ath_regd_optional.patch + fi + + if use injection; then + epatch "${FILESDIR}"/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch + epatch "${FILESDIR}"/4004_zd1211rw-2.6.28.patch + # epatch "${FILESDIR}"/mac80211.compat08082009.wl_frag+ack_v1.patch + # epatch "${FILESDIR}"/4013-runtime-enable-disable-of-mac80211-packet-injection.patch + epatch "${FILESDIR}"/ipw2200-inject.3.4.6.patch + fi + if use noleds; then + sed -ir 's/^\(export CONFIG_.*_LEDS=\)y$/\1n/' config.mk + epatch "${FILESDIR}/leds-disable-strict-${PV}.patch" + fi + use debug-driver && sed -i '/DEBUG=y/s/^# *//' "${S}"/config.mk + use debugfs && sed -i '/DEBUGFS/s/^# *//' "${S}"/config.mk + if use full-debug; then + if use debug-driver ; then + sed -i '/CONFIG=/s/^# *//' "${S}"/config.mk + else + ewarn "Enabling full-debug includes debug-driver." + sed -i '/DEBUG=/s/^# *//' "${S}"/config.mk + fi + fi + + #avoid annoying ACCESS DENIED sandbox errors + sed -i "s/\${MAKE} -C \${KLIB_BUILD} kernelversion/echo ${KV_FULL}/g" compat/scripts/gen-compat-config.sh || die "sed failed" + sed -i "s/shell \$(MAKE) -C \$(KLIB_BUILD) kernelversion/echo ${KV_FULL}/g" config.mk || die "sed failed" + sed -i "s/make -C \$KLIB_BUILD kernelversion/echo ${KV_FULL}/g" scripts/gen-compat-autoconf.sh || die "sed failed" +} + +src_compile() { + addpredict "${KERNEL_DIR}" + set_arch_to_kernel + emake KLIB_BUILD="${DESTDIR}"/lib/modules/"${KV_FULL}"/build || die "emake failed" +} + +src_install() { + for file in $(find -name \*.ko); do + insinto "/lib/modules/${KV_FULL}/updates/$(dirname ${file})" + doins "${file}" + done + dosbin scripts/athenable scripts/b43load scripts/iwl-enable \ + scripts/madwifi-unload scripts/athload scripts/iwl-load \ + scripts/b43enable scripts/unload.sh + + dodir /usr/lib/compat-wireless + exeinto /usr/lib/compat-wireless + doexe scripts/modlib.sh + + dodoc README.md + dodir /$(get_libdir)/udev/rules.d/ + insinto /$(get_libdir)/udev/rules.d/ + doins udev/50-compat_firmware.rules + exeinto /$(get_libdir)/udev/ + doexe udev/compat_firmware.sh +} + +pkg_postinst() { + update_depmod + update_moduledb + + if use !livecd; then + if use loadmodules; then + einfo "Attempting to unload modules..." + /usr/sbin/unload.sh 2>&1 | grep -E FATAL && ewarn "Unable to remove running modules, system may be unhappy, reboot HIGHLY recommended!" + einfo "Triggering automatic reload of needed modules..." + /sbin/udevadm trigger + einfo "We have attempted to load your new modules for you, this may fail horribly, or may just cause a network hiccup." + einfo "If you experience any issues reboot is the simplest course of action." + fi + fi + if use !loadmodules; then + einfo "You didn't USE=loadmodules but you can still attempt to switch to the new drivers without reboot." + einfo "Run 'unload.sh' then 'udevadm trigger' to cause udev to load the needed drivers." + einfo "If unload.sh fails for some reason you should be able to simply reboot to fix everything and load the new modules." + fi +} + +pkg_postrm() { + remove_moduledb +} diff --git a/sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6.ebuild b/sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6.ebuild new file mode 100644 index 00000000..1b693791 --- /dev/null +++ b/sys-kernel/compat-drivers/compat-drivers-3.7_rc1_p6.ebuild @@ -0,0 +1,185 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +EAPI="5" +inherit linux-mod linux-info versionator eutils + +# upstream versioning, ex.: 3.7-rc1-6 +UPSTREAM_PVR="${PV//_/-}" && UPSTREAM_PVR="${UPSTREAM_PVR/-p/-}" +# ex.: 3.7-rc1 +UPSTREAM_PV=${UPSTREAM_PVR%-*} + +DESCRIPTION="Stable kernel pre-release wifi subsystem backport" +HOMEPAGE="http://backports.wiki.kernel.org" +# SRC_URI="http://www.kernel.org/pub/linux/kernel/projects/backports/stable/v${UPSTREAM_PV}/${PN}-${UPSTREAM_PVR}.tar.gz" +SRC_URI="mirror://kernel/linux/kernel/projects/backports/stable/v${UPSTREAM_PV}/${PN}-${UPSTREAM_PVR}.tar.gz" + +LICENSE="GPL-2" +SLOT="0" +KEYWORDS="~amd64 ~arm ~x86" +IUSE="+alx +ath9k_htc atheros_obey_crda bluetooth b43 b44 debugfs debug-driver full-debug injection livecd loadmodules noleds pax_kernel" + +DEPEND="!net-wireless/compat-wireless-builder + !net-wireless/compat-wireless" +RDEPEND="${DEPEND} + >=sys-kernel/linux-firmware-20110219 + virtual/udev" + +S="${WORKDIR}/${PN}-${UPSTREAM_PVR}" + +RESTRICT="strip" + +CONFIG_CHECK="!DYNAMIC_FTRACE" + +pkg_setup() { + CONFIG_CHECK="~NET_SCHED" + CONFIG_CHECK="~IPW2200_PROMISCUOUS" + linux-mod_pkg_setup + kernel_is -lt 2 6 27 && die "kernel 2.6.27 or higher is required for compat wireless to be installed" + kernel_is -gt $(get_version_component_range 1) $(get_version_component_range 2) $(get_version_component_range 3) && die "The version of compat-wireless you are trying to install contains older modules than your kernel. Failing before downgrading your system." + + #these things are not optional + linux_chkconfig_module MAC80211 || die "CONFIG_MAC80211 must be built as a _module_ !" + linux_chkconfig_module CFG80211 || die "CONFIG_CFG80211 must be built as a _module_ !" + linux_chkconfig_module LIBIPW || ewarn "CONFIG_LIBIPW really should be set or there will be no WEXT compat" + + if use b43; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b43" + fi + if use b44; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b44" + fi +} + +src_prepare() { + use pax_kernel && epatch "${FILESDIR}"/${P}-grsec.patch + use pax_kernel && epatch "${FILESDIR}"/${P}-grsec-warnings.patch + + #mcgrof said prep for inclusion in compat-wireless.git but this causes issues + #find "${S}" -name Makefile | xargs sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' -e 's/CONFIG_COMPAT_CHECK/CONFIG_CHECK/' + #sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' "${S}"/config.mk + + # CONFIG_CFG80211_REG_DEBUG=y + sed -i '/CFG80211_REG_DEBUG/s/^# *//' "${S}"/config.mk + + #this patch ignores the regulatory settings of an atheros card and uses what CRDA thinks is right + if use atheros_obey_crda; then + ewarn "You have enabled atheros_obey_crda which doesn't do what you think." + ewarn "This use flag will cause the eeprom of the card to be ignored and force" + ewarn "world roaming on the device until crda provides a valid regdomain." + ewarn "Short version, this is not a way to break the law, this will automatically" + ewarn "make your card less functional unless you set a proper regdomain with iw/crda." + epatch "${FILESDIR}"/ath_regd_optional.patch + fi + + if use injection; then + epatch "${FILESDIR}"/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch + epatch "${FILESDIR}"/4004_zd1211rw-2.6.28.patch + # epatch "${FILESDIR}"/mac80211.compat08082009.wl_frag+ack_v1.patch + # epatch "${FILESDIR}"/4013-runtime-enable-disable-of-mac80211-packet-injection.patch + epatch "${FILESDIR}"/ipw2200-inject.3.4.6.patch + fi + if use noleds; then + sed -ir 's/^\(export CONFIG_.*_LEDS=\)y$/\1n/' config.mk + epatch "${FILESDIR}/leds-disable-strict-${PV}.patch" + fi + use debug-driver && sed -i '/DEBUG=y/s/^# *//' "${S}"/config.mk + use debugfs && sed -i '/DEBUGFS/s/^# *//' "${S}"/config.mk + if use full-debug; then + if use debug-driver ; then + sed -i '/CONFIG=/s/^# *//' "${S}"/config.mk + else + ewarn "Enabling full-debug includes debug-driver." + sed -i '/DEBUG=/s/^# *//' "${S}"/config.mk + fi + fi +# Disable B44 ethernet driver + if ! use b44; then + sed -i '/B44=/s/ */#/' "${S}"/config.mk || die "unable to disable B44 driver" + sed -i '/B44_PCI=/s/ */#/' "${S}"/config.mk || die "unable to disable B44 driver" + fi + +# Disable B43 driver + if ! use b43; then + sed -i '/B43=/s/ */#/' "${S}"/config.mk || die "unable to disable B43 driver" + sed -i '/B43_PCI_AUTOSELECT=/s/ */#/' "${S}"/config.mk || die "unable to disable B43 driver" + #CONFIG_B43LEGACY= + fi + +# fixme: there are more bluethooth settings in the config.mk + if ! use bluetooth; then + sed -i '/COMPAT_BLUETOOTH=/s/ */#/' "${S}"/config.mk || die "unable to disable bluetooth driver" + sed -i '/COMPAT_BLUETOOTH_MODULES=/s/ */#/' "${S}"/config.mk || die "unable to bluetooth B44 driver" + fi + + #enable alx atheros ethernet driver + if use alx; then + sed -i 's/ALX=n/ALX=m/' "${S}"/config.mk || die "Failed to enable Atheros ALX driver" + else + sed -i 's/ALX=m/ALX=n/' "${S}"/config.mk || die "Failed to disable Atheros ALX driver" + fi + + if use ath9k_htc; then + sed -i 's/ATH9K_HTC=n/ATH9K_HTC=m/' "${S}"/config.mk || die "Failed to enable Atheros 9k htc driver" + else + sed -i 's/ATH9K_HTC=m/ATH9K_HTC=n/' "${S}"/config.mk || die "Failed to disable Atheros 9k htc driver" + fi + + #avoid annoying ACCESS DENIED sandbox errors + sed -i "s/\${MAKE} -C \${KLIB_BUILD} kernelversion/echo ${KV_FULL}/g" compat/scripts/gen-compat-config.sh || die "sed failed" + sed -i "s/shell \$(MAKE) -C \$(KLIB_BUILD) kernelversion/echo ${KV_FULL}/g" config.mk || die "sed failed" + sed -i "s/make -C \$KLIB_BUILD kernelversion/echo ${KV_FULL}/g" scripts/gen-compat-autoconf.sh || die "sed failed" +} + +src_compile() { + addpredict "${KERNEL_DIR}" + set_arch_to_kernel + emake KLIB_BUILD="${DESTDIR}"/lib/modules/"${KV_FULL}"/build || die "emake failed" +} + +src_install() { + for file in $(find -name \*.ko); do + insinto "/lib/modules/${KV_FULL}/updates/$(dirname ${file})" + doins "${file}" + done + dosbin scripts/athenable scripts/b43load scripts/iwl-enable \ + scripts/madwifi-unload scripts/athload scripts/iwl-load \ + scripts/b43enable scripts/unload.sh + + dodir /usr/lib/compat-wireless + exeinto /usr/lib/compat-wireless + doexe scripts/modlib.sh + + dodoc README.md + dodir /$(get_libdir)/udev/rules.d/ + insinto /$(get_libdir)/udev/rules.d/ + doins udev/50-compat_firmware.rules + exeinto /$(get_libdir)/udev/ + doexe udev/compat_firmware.sh +} + +pkg_postinst() { + update_depmod + update_moduledb + + if use !livecd; then + if use loadmodules; then + einfo "Attempting to unload modules..." + /usr/sbin/unload.sh 2>&1 | grep -E FATAL && ewarn "Unable to remove running modules, system may be unhappy, reboot HIGHLY recommended!" + einfo "Triggering automatic reload of needed modules..." + /sbin/udevadm trigger + einfo "We have attempted to load your new modules for you, this may fail horribly, or may just cause a network hiccup." + einfo "If you experience any issues reboot is the simplest course of action." + fi + fi + if use !loadmodules; then + einfo "You didn't USE=loadmodules but you can still attempt to switch to the new drivers without reboot." + einfo "Run 'unload.sh' then 'udevadm trigger' to cause udev to load the needed drivers." + einfo "If unload.sh fails for some reason you should be able to simply reboot to fix everything and load the new modules." + fi +} + +pkg_postrm() { + remove_moduledb +} diff --git a/sys-kernel/compat-drivers/compat-drivers-3.8.ebuild b/sys-kernel/compat-drivers/compat-drivers-3.8.ebuild new file mode 100644 index 00000000..ab7c1a96 --- /dev/null +++ b/sys-kernel/compat-drivers/compat-drivers-3.8.ebuild @@ -0,0 +1,174 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +EAPI="5" + +# USE_EXPAND categories +CPD_USE_EXPAND="wifi ethernet various" +# These are officially supported +CPD_USE_EXPAND_wifi="ath5k ath9k ath9k_ap ath9k_htc ath6kl b43 brcmsmac brcmfmac carl9170 rt2x00 wil6210 wl1251 wl12xx zd1211rw" + +# These are officially supported +CPD_USE_EXPAND_ethernet="alx atl1 atl1c atl1e atl2" + +# These are officially supported +CPD_USE_EXPAND_various="i915" + +inherit linux-mod linux-info versionator eutils compat-drivers-3.8-r1 + +# upstream versioning, ex.: 3.7-rc1-6 +UPSTREAM_PVR="${PV//_/-}" && UPSTREAM_PVR="${UPSTREAM_PVR/-p/-}" +# ex.: 3.7-rc1 +UPSTREAM_PV=${UPSTREAM_PVR%-*} + +DESCRIPTION="Stable kernel pre-release wifi subsystem backport" +HOMEPAGE="http://backports.wiki.kernel.org" +SRC_URI="mirror://kernel/linux/kernel/projects/backports/stable/v${UPSTREAM_PVR}/${PN}-${UPSTREAM_PVR}-1-u.tar.xz" + +LICENSE="GPL-2" +SLOT="0" +KEYWORDS="" + +IUSE="atheros_obey_crda debugfs debug-driver full-debug injection livecd loadmodules noleds pax_kernel" + +DEPEND="!net-wireless/compat-wireless-builder + !net-wireless/compat-wireless" +RDEPEND="${DEPEND} + >=sys-kernel/linux-firmware-20110219 + virtual/udev" + +S="${WORKDIR}/${PN}-${UPSTREAM_PVR}-1-u" + +RESTRICT="strip" + +CONFIG_CHECK="!DYNAMIC_FTRACE" + +pkg_setup() { + CONFIG_CHECK="~NET_SCHED" + CONFIG_CHECK="~IPW2200_PROMISCUOUS" + linux-mod_pkg_setup + kernel_is -lt 2 6 27 && die "kernel 2.6.27 or higher is required for compat drivers to be installed" + kernel_is -gt $(get_version_component_range 1) $(get_version_component_range 2) $(get_version_component_range 3) && die "The version of compat drivers you are trying to install contains older modules than your kernel. Failing before downgrading your system." + + #these things are not optional + linux_chkconfig_module MAC80211 || die "CONFIG_MAC80211 must be built as a _module_ !" + linux_chkconfig_module CFG80211 || die "CONFIG_CFG80211 must be built as a _module_ !" + linux_chkconfig_module LIBIPW || ewarn "CONFIG_LIBIPW really should be set or there will be no WEXT compat" + + if use compat_drivers_wifi_b43; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b43" + fi +} + +src_prepare() { + if use pax_kernel; then + for gpatch in "${FILESDIR}"/3.8-grsec/*; do + epatch "${gpatch}" + done + fi + # upstream might want to see this + epatch "${FILESDIR}"/${PN}-3.8-bt_tty.patch + epatch "${FILESDIR}"/${PN}-3.8-ath6kl.patch + + #mcgrof said prep for inclusion in compat-wireless.git but this causes issues + #find "${S}" -name Makefile | xargs sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' -e 's/CONFIG_COMPAT_CHECK/CONFIG_CHECK/' + #sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' "${S}"/config.mk + + # CONFIG_CFG80211_REG_DEBUG=y + sed -i '/CFG80211_REG_DEBUG/s/^# *//' "${S}"/config.mk + + #this patch ignores the regulatory settings of an atheros card and uses what CRDA thinks is right + if use atheros_obey_crda; then + ewarn "You have enabled atheros_obey_crda which doesn't do what you think." + ewarn "This use flag will cause the eeprom of the card to be ignored and force" + ewarn "world roaming on the device until crda provides a valid regdomain." + ewarn "Short version, this is not a way to break the law, this will automatically" + ewarn "make your card less functional unless you set a proper regdomain with iw/crda." + epatch "${FILESDIR}"/ath_regd_optional.patch + fi + + if use injection; then + epatch "${FILESDIR}"/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch + epatch "${FILESDIR}"/4004_zd1211rw-2.6.28.patch + # epatch "${FILESDIR}"/mac80211.compat08082009.wl_frag+ack_v1.patch + # epatch "${FILESDIR}"/4013-runtime-enable-disable-of-mac80211-packet-injection.patch + epatch "${FILESDIR}"/ipw2200-inject.3.4.6.patch + fi + if use noleds; then + sed -ir 's/^\(export CONFIG_.*_LEDS=\)y$/\1n/' config.mk + epatch "${FILESDIR}/leds-disable-strict-${PV}.patch" + fi + use debug-driver && sed -i '/DEBUG=y/s/^# *//' "${S}"/config.mk + use debugfs && sed -i '/DEBUGFS/s/^# *//' "${S}"/config.mk + if use full-debug; then + if use debug-driver ; then + sed -i '/CONFIG=/s/^# *//' "${S}"/config.mk + else + ewarn "Enabling full-debug includes debug-driver." + sed -i '/DEBUG=/s/^# *//' "${S}"/config.mk + fi + fi + + #avoid annoying ACCESS DENIED sandbox errors + sed -i "s/\${MAKE} -C \${KLIB_BUILD} kernelversion/echo ${KV_FULL}/g" compat/scripts/gen-compat-config.sh || die "sed failed" + sed -i "s/shell \$(MAKE) -C \$(KLIB_BUILD) kernelversion/echo ${KV_FULL}/g" config.mk || die "sed failed" + sed -i "s/make -C \$KLIB_BUILD kernelversion/echo ${KV_FULL}/g" scripts/gen-compat-autoconf.sh || die "sed failed" + + # replace scripts/driver-select + # TODO: convince upstream to adopt this script + cp "${FILESDIR}/${PF}-driver-select" scripts/driver-select || \ + die "Replacing driver-select failed" +} + +src_compile() { + addpredict "${KERNEL_DIR}" + set_arch_to_kernel + emake KLIB_BUILD="${DESTDIR}"/lib/modules/"${KV_FULL}"/build || die "emake failed" +} + +src_install() { + for file in $(find -name \*.ko); do + insinto "/lib/modules/${KV_FULL}/updates/$(dirname ${file})" + doins "${file}" + done + dosbin scripts/athenable scripts/b43load scripts/iwl-enable \ + scripts/madwifi-unload scripts/athload scripts/iwl-load \ + scripts/b43enable scripts/unload.sh + + dodir /usr/lib/compat-wireless + exeinto /usr/lib/compat-wireless + doexe scripts/modlib.sh + + dodoc README.md + dodir /$(get_libdir)/udev/rules.d/ + insinto /$(get_libdir)/udev/rules.d/ + doins udev/50-compat_firmware.rules + exeinto /$(get_libdir)/udev/ + doexe udev/compat_firmware.sh +} + +pkg_postinst() { + update_depmod + update_moduledb + + if use !livecd; then + if use loadmodules; then + einfo "Attempting to unload modules..." + /usr/sbin/unload.sh 2>&1 | grep -E FATAL && ewarn "Unable to remove running modules, system may be unhappy, reboot HIGHLY recommended!" + einfo "Triggering automatic reload of needed modules..." + /sbin/udevadm trigger + einfo "We have attempted to load your new modules for you, this may fail horribly, or may just cause a network hiccup." + einfo "If you experience any issues reboot is the simplest course of action." + fi + fi + if use !loadmodules; then + einfo "You didn't USE=loadmodules but you can still attempt to switch to the new drivers without reboot." + einfo "Run 'unload.sh' then 'udevadm trigger' to cause udev to load the needed drivers." + einfo "If unload.sh fails for some reason you should be able to simply reboot to fix everything and load the new modules." + fi +} + +pkg_postrm() { + remove_moduledb +} diff --git a/sys-kernel/compat-drivers/compat-drivers-3.8_rc5.ebuild b/sys-kernel/compat-drivers/compat-drivers-3.8_rc5.ebuild new file mode 100644 index 00000000..710ad2b8 --- /dev/null +++ b/sys-kernel/compat-drivers/compat-drivers-3.8_rc5.ebuild @@ -0,0 +1,176 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +EAPI="5" + +# USE_EXPAND categories +CPD_USE_EXPAND="wifi ethernet various" +# These are officially supported +CPD_USE_EXPAND_wifi="ath5k ath9k ath9k_ap ath9k_htc ath6kl b43 brcmsmac brcmfmac carl9170 rt2x00 wl1251 wl12xx zd1211rw" +# This might work (not officially supported) +CPD_USE_EXPAND_wifi+=" wl18xx" +# This might work (added by pentoo) +CPD_USE_EXPAND_wifi+=" b44" + +# These are officially supported +CPD_USE_EXPAND_ethernet="atl1 atl1c atl1e atl2" +# This might work (not officially supported) +CPD_USE_EXPAND_ethernet+=" atlxx" + +# These are officially supported +CPD_USE_EXPAND_various="i915" +# This might work (not officially supported) +CPD_USE_EXPAND_various+=" bt drm" +# This might work (added by pentoo) +CPD_USE_EXPAND_various+=" staging usbnet" + +inherit linux-mod linux-info versionator eutils compat-drivers-3.7 + +# upstream versioning, ex.: 3.7-rc1-6 +UPSTREAM_PVR="${PV//_/-}" && UPSTREAM_PVR="${UPSTREAM_PVR/-p/-}" +# ex.: 3.7-rc1 +UPSTREAM_PV=${UPSTREAM_PVR%-*} + +DESCRIPTION="Stable kernel pre-release wifi subsystem backport" +HOMEPAGE="http://backports.wiki.kernel.org" +SRC_URI="mirror://kernel/linux/kernel/projects/backports/stable/v${UPSTREAM_PVR}/${PN}-${UPSTREAM_PVR}-1-u.tar.xz" + +LICENSE="GPL-2" +SLOT="0" +KEYWORDS="" + +IUSE="atheros_obey_crda debugfs debug-driver full-debug injection livecd loadmodules noleds pax_kernel" + +DEPEND="!net-wireless/compat-wireless-builder + !net-wireless/compat-wireless" +RDEPEND="${DEPEND} + >=sys-kernel/linux-firmware-20110219 + virtual/udev" + +S="${WORKDIR}/${PN}-${UPSTREAM_PVR}-1-u" + +RESTRICT="strip" + +CONFIG_CHECK="!DYNAMIC_FTRACE" + +pkg_setup() { + CONFIG_CHECK="~NET_SCHED" + CONFIG_CHECK="~IPW2200_PROMISCUOUS" + linux-mod_pkg_setup + kernel_is -lt 2 6 27 && die "kernel 2.6.27 or higher is required for compat drivers to be installed" + kernel_is -gt $(get_version_component_range 1) $(get_version_component_range 2) $(get_version_component_range 3) && die "The version of compat drivers you are trying to install contains older modules than your kernel. Failing before downgrading your system." + + #these things are not optional + linux_chkconfig_module MAC80211 || die "CONFIG_MAC80211 must be built as a _module_ !" + linux_chkconfig_module CFG80211 || die "CONFIG_CFG80211 must be built as a _module_ !" + linux_chkconfig_module LIBIPW || ewarn "CONFIG_LIBIPW really should be set or there will be no WEXT compat" + + if use compat_drivers_wifi_b43; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b43" + fi + if use compat_drivers_wifi_b44; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b44" + fi +} + +src_prepare() { + use pax_kernel && epatch "${FILESDIR}"/${PN}-3.7_rc1_p6-grsec.patch + use pax_kernel && epatch "${FILESDIR}"/${PN}-3.7_rc1_p6-grsec-warnings.patch + + #mcgrof said prep for inclusion in compat-wireless.git but this causes issues + #find "${S}" -name Makefile | xargs sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' -e 's/CONFIG_COMPAT_CHECK/CONFIG_CHECK/' + #sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' "${S}"/config.mk + + # CONFIG_CFG80211_REG_DEBUG=y + sed -i '/CFG80211_REG_DEBUG/s/^# *//' "${S}"/config.mk + + #this patch ignores the regulatory settings of an atheros card and uses what CRDA thinks is right + if use atheros_obey_crda; then + ewarn "You have enabled atheros_obey_crda which doesn't do what you think." + ewarn "This use flag will cause the eeprom of the card to be ignored and force" + ewarn "world roaming on the device until crda provides a valid regdomain." + ewarn "Short version, this is not a way to break the law, this will automatically" + ewarn "make your card less functional unless you set a proper regdomain with iw/crda." + epatch "${FILESDIR}"/ath_regd_optional.patch + fi + + if use injection; then + epatch "${FILESDIR}"/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch + epatch "${FILESDIR}"/4004_zd1211rw-2.6.28.patch + # epatch "${FILESDIR}"/mac80211.compat08082009.wl_frag+ack_v1.patch + # epatch "${FILESDIR}"/4013-runtime-enable-disable-of-mac80211-packet-injection.patch + epatch "${FILESDIR}"/ipw2200-inject.3.4.6.patch + fi + if use noleds; then + sed -ir 's/^\(export CONFIG_.*_LEDS=\)y$/\1n/' config.mk + epatch "${FILESDIR}/leds-disable-strict-${PV}.patch" + fi + use debug-driver && sed -i '/DEBUG=y/s/^# *//' "${S}"/config.mk + use debugfs && sed -i '/DEBUGFS/s/^# *//' "${S}"/config.mk + if use full-debug; then + if use debug-driver ; then + sed -i '/CONFIG=/s/^# *//' "${S}"/config.mk + else + ewarn "Enabling full-debug includes debug-driver." + sed -i '/DEBUG=/s/^# *//' "${S}"/config.mk + fi + fi + + #avoid annoying ACCESS DENIED sandbox errors + sed -i "s/\${MAKE} -C \${KLIB_BUILD} kernelversion/echo ${KV_FULL}/g" compat/scripts/gen-compat-config.sh || die "sed failed" + sed -i "s/shell \$(MAKE) -C \$(KLIB_BUILD) kernelversion/echo ${KV_FULL}/g" config.mk || die "sed failed" + sed -i "s/make -C \$KLIB_BUILD kernelversion/echo ${KV_FULL}/g" scripts/gen-compat-autoconf.sh || die "sed failed" +} + +src_compile() { + addpredict "${KERNEL_DIR}" + set_arch_to_kernel + emake KLIB_BUILD="${DESTDIR}"/lib/modules/"${KV_FULL}"/build || die "emake failed" +} + +src_install() { + for file in $(find -name \*.ko); do + insinto "/lib/modules/${KV_FULL}/updates/$(dirname ${file})" + doins "${file}" + done + dosbin scripts/athenable scripts/b43load scripts/iwl-enable \ + scripts/madwifi-unload scripts/athload scripts/iwl-load \ + scripts/b43enable scripts/unload.sh + + dodir /usr/lib/compat-wireless + exeinto /usr/lib/compat-wireless + doexe scripts/modlib.sh + + dodoc README.md + dodir /$(get_libdir)/udev/rules.d/ + insinto /$(get_libdir)/udev/rules.d/ + doins udev/50-compat_firmware.rules + exeinto /$(get_libdir)/udev/ + doexe udev/compat_firmware.sh +} + +pkg_postinst() { + update_depmod + update_moduledb + + if use !livecd; then + if use loadmodules; then + einfo "Attempting to unload modules..." + /usr/sbin/unload.sh 2>&1 | grep -E FATAL && ewarn "Unable to remove running modules, system may be unhappy, reboot HIGHLY recommended!" + einfo "Triggering automatic reload of needed modules..." + /sbin/udevadm trigger + einfo "We have attempted to load your new modules for you, this may fail horribly, or may just cause a network hiccup." + einfo "If you experience any issues reboot is the simplest course of action." + fi + fi + if use !loadmodules; then + einfo "You didn't USE=loadmodules but you can still attempt to switch to the new drivers without reboot." + einfo "Run 'unload.sh' then 'udevadm trigger' to cause udev to load the needed drivers." + einfo "If unload.sh fails for some reason you should be able to simply reboot to fix everything and load the new modules." + fi +} + +pkg_postrm() { + remove_moduledb +} diff --git a/sys-kernel/compat-drivers/compat-drivers-3.8_rc7.ebuild b/sys-kernel/compat-drivers/compat-drivers-3.8_rc7.ebuild new file mode 100644 index 00000000..48059958 --- /dev/null +++ b/sys-kernel/compat-drivers/compat-drivers-3.8_rc7.ebuild @@ -0,0 +1,184 @@ +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ + +EAPI="5" + +# USE_EXPAND categories +CPD_USE_EXPAND="wifi ethernet various" +# These are officially supported +CPD_USE_EXPAND_wifi="ath5k ath9k ath9k_ap ath9k_htc ath6kl b43 brcmsmac brcmfmac carl9170 rt2x00 wil6210 wl1251 wl12xx zd1211rw" +# This might work (not officially supported) +CPD_USE_EXPAND_wifi+=" wl18xx" +# This might work (added by pentoo) +CPD_USE_EXPAND_wifi+=" b44" + +# These are officially supported +CPD_USE_EXPAND_ethernet="alx atl1 atl1c atl1e atl2" +# This might work (not officially supported) +CPD_USE_EXPAND_ethernet+=" atlxx" + +# These are officially supported +CPD_USE_EXPAND_various="i915" +# This might work (not officially supported) +CPD_USE_EXPAND_various+=" bt drm" +# This might work (added by pentoo) +CPD_USE_EXPAND_various+=" staging usbnet" + +inherit linux-mod linux-info versionator eutils compat-drivers-3.8 + +# upstream versioning, ex.: 3.7-rc1-6 +UPSTREAM_PVR="${PV//_/-}" && UPSTREAM_PVR="${UPSTREAM_PVR/-p/-}" +# ex.: 3.7-rc1 +UPSTREAM_PV=${UPSTREAM_PVR%-*} + +DESCRIPTION="Stable kernel pre-release wifi subsystem backport" +HOMEPAGE="http://backports.wiki.kernel.org" +SRC_URI="mirror://kernel/linux/kernel/projects/backports/stable/v${UPSTREAM_PVR}/${PN}-${UPSTREAM_PVR}-1-u.tar.xz" + +LICENSE="GPL-2" +SLOT="0" +KEYWORDS="" + +IUSE="atheros_obey_crda debugfs debug-driver full-debug injection livecd loadmodules noleds pax_kernel" + +DEPEND="!net-wireless/compat-wireless-builder + !net-wireless/compat-wireless" +RDEPEND="${DEPEND} + >=sys-kernel/linux-firmware-20110219 + virtual/udev" + +S="${WORKDIR}/${PN}-${UPSTREAM_PVR}-1-u" + +RESTRICT="strip" + +CONFIG_CHECK="!DYNAMIC_FTRACE" + +pkg_setup() { + CONFIG_CHECK="~NET_SCHED" + CONFIG_CHECK="~IPW2200_PROMISCUOUS" + linux-mod_pkg_setup + kernel_is -lt 2 6 27 && die "kernel 2.6.27 or higher is required for compat drivers to be installed" + kernel_is -gt $(get_version_component_range 1) $(get_version_component_range 2) $(get_version_component_range 3) && die "The version of compat drivers you are trying to install contains older modules than your kernel. Failing before downgrading your system." + + #these things are not optional + linux_chkconfig_module MAC80211 || die "CONFIG_MAC80211 must be built as a _module_ !" + linux_chkconfig_module CFG80211 || die "CONFIG_CFG80211 must be built as a _module_ !" + linux_chkconfig_module LIBIPW || ewarn "CONFIG_LIBIPW really should be set or there will be no WEXT compat" + + if use compat_drivers_wifi_b43; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b43" + fi + if use compat_drivers_wifi_b44; then + linux_chkconfig_module SSB || die "You need to enable CONFIG_SSB or USE=-b44" + fi +} + +src_prepare() { + # use pax_kernel && epatch "${FILESDIR}"/${PN}-3.7_rc1_p6-grsec.patch + # use pax_kernel && epatch "${FILESDIR}"/${PN}-3.8-grsec-readonly.patch + if use pax_kernel; then + for gpatch in "${FILESDIR}"/3.8-grsec/*; do + epatch "${gpatch}" + done + fi + # upstream might want to see this + use pax_kernel && epatch "${FILESDIR}"/${PN}-3.8-bt_tty.patch + # use pax_kernel && epatch "${FILESDIR}"/${PN}-3.7_rc1_p6-grsec-warnings.patch + + #mcgrof said prep for inclusion in compat-wireless.git but this causes issues + #find "${S}" -name Makefile | xargs sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' -e 's/CONFIG_COMPAT_CHECK/CONFIG_CHECK/' + #sed -i -e 's/export CONFIG_/export CONFIG_COMPAT_/' -e 's/COMPAT_COMPAT_/COMPAT_/' "${S}"/config.mk + + # CONFIG_CFG80211_REG_DEBUG=y + sed -i '/CFG80211_REG_DEBUG/s/^# *//' "${S}"/config.mk + + #this patch ignores the regulatory settings of an atheros card and uses what CRDA thinks is right + if use atheros_obey_crda; then + ewarn "You have enabled atheros_obey_crda which doesn't do what you think." + ewarn "This use flag will cause the eeprom of the card to be ignored and force" + ewarn "world roaming on the device until crda provides a valid regdomain." + ewarn "Short version, this is not a way to break the law, this will automatically" + ewarn "make your card less functional unless you set a proper regdomain with iw/crda." + epatch "${FILESDIR}"/ath_regd_optional.patch + fi + + if use injection; then + epatch "${FILESDIR}"/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch + epatch "${FILESDIR}"/4004_zd1211rw-2.6.28.patch + # epatch "${FILESDIR}"/mac80211.compat08082009.wl_frag+ack_v1.patch + # epatch "${FILESDIR}"/4013-runtime-enable-disable-of-mac80211-packet-injection.patch + epatch "${FILESDIR}"/ipw2200-inject.3.4.6.patch + fi + if use noleds; then + sed -ir 's/^\(export CONFIG_.*_LEDS=\)y$/\1n/' config.mk + epatch "${FILESDIR}/leds-disable-strict-${PV}.patch" + fi + use debug-driver && sed -i '/DEBUG=y/s/^# *//' "${S}"/config.mk + use debugfs && sed -i '/DEBUGFS/s/^# *//' "${S}"/config.mk + if use full-debug; then + if use debug-driver ; then + sed -i '/CONFIG=/s/^# *//' "${S}"/config.mk + else + ewarn "Enabling full-debug includes debug-driver." + sed -i '/DEBUG=/s/^# *//' "${S}"/config.mk + fi + fi + + #avoid annoying ACCESS DENIED sandbox errors + sed -i "s/\${MAKE} -C \${KLIB_BUILD} kernelversion/echo ${KV_FULL}/g" compat/scripts/gen-compat-config.sh || die "sed failed" + sed -i "s/shell \$(MAKE) -C \$(KLIB_BUILD) kernelversion/echo ${KV_FULL}/g" config.mk || die "sed failed" + sed -i "s/make -C \$KLIB_BUILD kernelversion/echo ${KV_FULL}/g" scripts/gen-compat-autoconf.sh || die "sed failed" +} + +src_compile() { + addpredict "${KERNEL_DIR}" + set_arch_to_kernel + emake KLIB_BUILD="${DESTDIR}"/lib/modules/"${KV_FULL}"/build || die "emake failed" +} + +src_install() { + for file in $(find -name \*.ko); do + insinto "/lib/modules/${KV_FULL}/updates/$(dirname ${file})" + doins "${file}" + done + dosbin scripts/athenable scripts/b43load scripts/iwl-enable \ + scripts/madwifi-unload scripts/athload scripts/iwl-load \ + scripts/b43enable scripts/unload.sh + + dodir /usr/lib/compat-wireless + exeinto /usr/lib/compat-wireless + doexe scripts/modlib.sh + + dodoc README.md + dodir /$(get_libdir)/udev/rules.d/ + insinto /$(get_libdir)/udev/rules.d/ + doins udev/50-compat_firmware.rules + exeinto /$(get_libdir)/udev/ + doexe udev/compat_firmware.sh +} + +pkg_postinst() { + update_depmod + update_moduledb + + if use !livecd; then + if use loadmodules; then + einfo "Attempting to unload modules..." + /usr/sbin/unload.sh 2>&1 | grep -E FATAL && ewarn "Unable to remove running modules, system may be unhappy, reboot HIGHLY recommended!" + einfo "Triggering automatic reload of needed modules..." + /sbin/udevadm trigger + einfo "We have attempted to load your new modules for you, this may fail horribly, or may just cause a network hiccup." + einfo "If you experience any issues reboot is the simplest course of action." + fi + fi + if use !loadmodules; then + einfo "You didn't USE=loadmodules but you can still attempt to switch to the new drivers without reboot." + einfo "Run 'unload.sh' then 'udevadm trigger' to cause udev to load the needed drivers." + einfo "If unload.sh fails for some reason you should be able to simply reboot to fix everything and load the new modules." + fi +} + +pkg_postrm() { + remove_moduledb +} diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/00-read-only.patch b/sys-kernel/compat-drivers/files/3.8-grsec/00-read-only.patch new file mode 100644 index 00000000..a7066c36 --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/00-read-only.patch @@ -0,0 +1,23 @@ +--- ./include/net/bluetooth/bluetooth.h ++++ ./include/net/bluetooth/bluetooth.h +@@ -206,7 +206,7 @@ + struct file_operations fops; + int (* custom_seq_show)(struct seq_file *, void *); + #endif +-}; ++} __no_const; + + int bt_sock_register(int proto, const struct net_proto_family *ops); + int bt_sock_unregister(int proto); +--- ./include/net/mac80211.h ++++ ./include/net/mac80211.h +@@ -2652,7 +2652,7 @@ + struct ieee80211_chanctx_conf *ctx); + + void (*restart_complete)(struct ieee80211_hw *hw); +-}; ++} __no_const; + + /** + * ieee80211_alloc_hw - Allocate a new hardware device + diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/01-read-only.patch b/sys-kernel/compat-drivers/files/3.8-grsec/01-read-only.patch new file mode 100644 index 00000000..88ecf46f --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/01-read-only.patch @@ -0,0 +1,12 @@ +How/why does this even work? +--- ./net/wireless/core.h ++++ ./net/wireless/core.h +@@ -28,7 +28,7 @@ + struct mutex mtx; + + /* rfkill support */ +- struct rfkill_ops rfkill_ops; ++ rfkill_ops_no_const rfkill_ops; + struct rfkill *rfkill; + struct work_struct rfkill_sync; + diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/02-read-only-ath.patch b/sys-kernel/compat-drivers/files/3.8-grsec/02-read-only-ath.patch new file mode 100644 index 00000000..ddb69f6b --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/02-read-only-ath.patch @@ -0,0 +1,240 @@ +--- ./drivers/net/wireless/ath/ath.h ++++ ./drivers/net/wireless/ath/ath.h +@@ -119,6 +119,7 @@ struct ath_ops { + void (*write_flush) (void *); + u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); + }; ++typedef struct ath_ops __no_const ath_ops_no_const; + + struct ath_common; + struct ath_bus_ops; +--- ./drivers/net/wireless/ath/ath9k/ar9002_mac.c ++++ ./drivers/net/wireless/ath/ath9k/ar9002_mac.c +@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; + +- ACCESS_ONCE(ads->ds_link) = i->link; +- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->ds_link) = i->link; ++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0]; + + ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); + ctl6 = SM(i->keytype, AR_EncrType); +@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + + if ((i->is_first || i->is_last) && + i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { +- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ds_ctl2) = 0; +- ACCESS_ONCE(ads->ds_ctl3) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0; + } + + if (!i->is_first) { +- ACCESS_ONCE(ads->ds_ctl0) = 0; +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + return; + } + +@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + break; + } + +- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + + if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) + return; + +- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) +--- ./drivers/net/wireless/ath/ath9k/ar9003_mac.c ++++ ./drivers/net/wireless/ath/ath9k/ar9003_mac.c +@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + (i->qcu << AR_TxQcuNum_S) | desc_len; + + checksum += val; +- ACCESS_ONCE(ads->info) = val; ++ ACCESS_ONCE_RW(ads->info) = val; + + checksum += i->link; +- ACCESS_ONCE(ads->link) = i->link; ++ ACCESS_ONCE_RW(ads->link) = i->link; + + checksum += i->buf_addr[0]; +- ACCESS_ONCE(ads->data0) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0]; + checksum += i->buf_addr[1]; +- ACCESS_ONCE(ads->data1) = i->buf_addr[1]; ++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1]; + checksum += i->buf_addr[2]; +- ACCESS_ONCE(ads->data2) = i->buf_addr[2]; ++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2]; + checksum += i->buf_addr[3]; +- ACCESS_ONCE(ads->data3) = i->buf_addr[3]; ++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3]; + + checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl3) = val; ++ ACCESS_ONCE_RW(ads->ctl3) = val; + checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl5) = val; ++ ACCESS_ONCE_RW(ads->ctl5) = val; + checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl7) = val; ++ ACCESS_ONCE_RW(ads->ctl7) = val; + checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl9) = val; ++ ACCESS_ONCE_RW(ads->ctl9) = val; + + checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); +- ACCESS_ONCE(ads->ctl10) = checksum; ++ ACCESS_ONCE_RW(ads->ctl10) = checksum; + + if (i->is_first || i->is_last) { +- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ctl13) = 0; +- ACCESS_ONCE(ads->ctl14) = 0; ++ ACCESS_ONCE_RW(ads->ctl13) = 0; ++ ACCESS_ONCE_RW(ads->ctl14) = 0; + } + + ads->ctl20 = 0; +@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + + ctl17 = SM(i->keytype, AR_EncrType); + if (!i->is_first) { +- ACCESS_ONCE(ads->ctl11) = 0; +- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; +- ACCESS_ONCE(ads->ctl15) = 0; +- ACCESS_ONCE(ads->ctl16) = 0; +- ACCESS_ONCE(ads->ctl17) = ctl17; +- ACCESS_ONCE(ads->ctl18) = 0; +- ACCESS_ONCE(ads->ctl19) = 0; ++ ACCESS_ONCE_RW(ads->ctl11) = 0; ++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore; ++ ACCESS_ONCE_RW(ads->ctl15) = 0; ++ ACCESS_ONCE_RW(ads->ctl16) = 0; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl18) = 0; ++ ACCESS_ONCE_RW(ads->ctl19) = 0; + return; + } + +- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; + ctl12 |= SM(val, AR_PAPRDChainMask); + +- ACCESS_ONCE(ads->ctl12) = ctl12; +- ACCESS_ONCE(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl12) = ctl12; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; + +- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) + | SM(i->rtscts_rate, AR_RTSCTSRate); + +- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; ++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding; + } + + static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) +--- ./drivers/net/wireless/ath/ath9k/hw.h ++++ ./drivers/net/wireless/ath/ath9k/hw.h +@@ -657,7 +657,7 @@ struct ath_hw_private_ops { + + /* ANI */ + void (*ani_cache_ini_regs)(struct ath_hw *ah); +-}; ++} __no_const; + + /** + * struct ath_hw_ops - callbacks used by hardware code and driver code +@@ -687,7 +687,7 @@ struct ath_hw_ops { + void (*antdiv_comb_conf_set)(struct ath_hw *ah, + struct ath_hw_antcomb_conf *antconf); + void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable); +-}; ++} __no_const; + + struct ath_nf_limits { + s16 max; +@@ -707,7 +707,7 @@ enum ath_cal_list { + #define AH_FASTCC 0x4 + + struct ath_hw { +- struct ath_ops reg_ops; ++ ath_ops_no_const reg_ops; + + struct ieee80211_hw *hw; + struct ath_common common; diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/03-read-only-memory.patch b/sys-kernel/compat-drivers/files/3.8-grsec/03-read-only-memory.patch new file mode 100644 index 00000000..41c87b9a --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/03-read-only-memory.patch @@ -0,0 +1,11 @@ +--- ./include/drm/ttm/ttm_memory.h ++++ ./include/drm/ttm/ttm_memory.h +@@ -48,7 +48,7 @@ + + struct ttm_mem_shrink { + int (*do_shrink) (struct ttm_mem_shrink *); +-}; ++} __no_const; + + /** + * struct ttm_mem_global - Global memory accounting structure. diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/04-read-only-brcm80211.patch b/sys-kernel/compat-drivers/files/3.8-grsec/04-read-only-brcm80211.patch new file mode 100644 index 00000000..865a62d5 --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/04-read-only-brcm80211.patch @@ -0,0 +1,11 @@ +--- ./drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h ++++ ./drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h +@@ -545,7 +545,7 @@ + void (*carrsuppr)(struct brcms_phy *); + s32 (*rxsigpwr)(struct brcms_phy *, s32); + void (*detach)(struct brcms_phy *); +-}; ++} __no_const; + + struct brcms_phy { + struct brcms_phy_pub pubpi_ro; diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/05-read-only-i915.patch b/sys-kernel/compat-drivers/files/3.8-grsec/05-read-only-i915.patch new file mode 100644 index 00000000..15a62f5e --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/05-read-only-i915.patch @@ -0,0 +1,17 @@ +--- ./drivers/gpu/drm/i915/i915_drv.h ++++ ./drivers/gpu/drm/i915/i915_drv.h +@@ -284,12 +284,12 @@ + /* render clock increase/decrease */ + /* display clock increase/decrease */ + /* pll clock increase/decrease */ +-}; ++} __no_const; + + struct drm_i915_gt_funcs { + void (*force_wake_get)(struct drm_i915_private *dev_priv); + void (*force_wake_put)(struct drm_i915_private *dev_priv); +-}; ++} __no_const; + + #define DEV_INFO_FLAGS \ + DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/06-read-only-radeon.patch b/sys-kernel/compat-drivers/files/3.8-grsec/06-read-only-radeon.patch new file mode 100644 index 00000000..f9180c45 --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/06-read-only-radeon.patch @@ -0,0 +1,58 @@ +--- ./drivers/gpu/drm/radeon/radeon.h ++++ ./drivers/gpu/drm/radeon/radeon.h +@@ -741,7 +741,7 @@ + int x2, int y2); + void (*draw_auto)(struct radeon_device *rdev); + void (*set_default_state)(struct radeon_device *rdev); +-}; ++} __no_const; + + struct r600_blit { + struct radeon_bo *shader_obj; +@@ -1173,7 +1173,7 @@ + struct { + void (*tlb_flush)(struct radeon_device *rdev); + int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); +- } gart; ++ } __no_const gart; + struct { + int (*init)(struct radeon_device *rdev); + void (*fini)(struct radeon_device *rdev); +@@ -1214,7 +1214,7 @@ + void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); + /* get backlight level */ + u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); +- } display; ++ } __no_const display; + /* copy functions for bo handling */ + struct { + int (*blit)(struct radeon_device *rdev, +@@ -1266,7 +1266,7 @@ + int (*get_pcie_lanes)(struct radeon_device *rdev); + void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); + void (*set_clock_gating)(struct radeon_device *rdev, int enable); +- } pm; ++ } __no_const pm; + /* pageflipping */ + struct { + void (*pre_page_flip)(struct radeon_device *rdev, int crtc); +@@ -1542,6 +1542,8 @@ + typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); + typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); + ++typedef struct vm_operations_struct __no_const vm_operations_struct_no_const; ++ + struct radeon_device { + struct device *dev; + struct drm_device *ddev; +--- ./drivers/gpu/drm/radeon/radeon_ttm.c ++++ ./drivers/gpu/drm/radeon/radeon_ttm.c +@@ -791,7 +791,7 @@ + man->size = size >> PAGE_SHIFT; + } + +-static struct vm_operations_struct radeon_ttm_vm_ops; ++static vm_operations_struct_no_const radeon_ttm_vm_ops; + static const struct vm_operations_struct *ttm_vm_ops = NULL; + + static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/07-read-only-wl1251.patch b/sys-kernel/compat-drivers/files/3.8-grsec/07-read-only-wl1251.patch new file mode 100644 index 00000000..158f59ab --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/07-read-only-wl1251.patch @@ -0,0 +1,11 @@ +--- ./drivers/net/wireless/ti/wl1251/wl1251.h ++++ ./drivers/net/wireless/ti/wl1251/wl1251.h +@@ -266,7 +266,7 @@ + void (*reset)(struct wl1251 *wl); + void (*enable_irq)(struct wl1251 *wl); + void (*disable_irq)(struct wl1251 *wl); +-}; ++} __no_const; + + struct wl1251 { + struct ieee80211_hw *hw; diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/08-read-only-ti.patch b/sys-kernel/compat-drivers/files/3.8-grsec/08-read-only-ti.patch new file mode 100644 index 00000000..8236b601 --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/08-read-only-ti.patch @@ -0,0 +1,11 @@ +--- ./drivers/net/wireless/ti/wlcore/wlcore.h ++++ ./drivers/net/wireless/ti/wlcore/wlcore.h +@@ -88,7 +88,7 @@ + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf); + u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len); +-}; ++} __no_const; + + enum wlcore_partitions { + PART_DOWN, diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/09-read-only-nouveau.patch b/sys-kernel/compat-drivers/files/3.8-grsec/09-read-only-nouveau.patch new file mode 100644 index 00000000..d4ac0e52 --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/09-read-only-nouveau.patch @@ -0,0 +1,33 @@ +--- ./drivers/gpu/drm/nouveau/nouveau_fence.h ++++ ./drivers/gpu/drm/nouveau/nouveau_fence.h +@@ -43,7 +43,7 @@ + int (*sync)(struct nouveau_fence *, struct nouveau_channel *, + struct nouveau_channel *); + u32 (*read)(struct nouveau_channel *); +-}; ++} __no_const; + + #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence) + +--- ./drivers/gpu/drm/nouveau/nouveau_bios.c ++++ ./drivers/gpu/drm/nouveau/nouveau_bios.c +@@ -1015,7 +1015,7 @@ + struct bit_table { + const char id; + int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); +-}; ++} __no_const; + + #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry }) + +--- ./include/drm/drm_crtc_helper.h ++++ ./include/drm/drm_crtc_helper.h +@@ -109,7 +109,7 @@ + struct drm_connector *connector); + /* disable encoder when not in use - more explicit than dpms off */ + void (*disable)(struct drm_encoder *encoder); +-}; ++} __no_const; + + /** + * drm_connector_helper_funcs - helper operations for connectors diff --git a/sys-kernel/compat-drivers/files/3.8-grsec/20-version-disagrement-iwlwifi.patch b/sys-kernel/compat-drivers/files/3.8-grsec/20-version-disagrement-iwlwifi.patch new file mode 100644 index 00000000..37791f2d --- /dev/null +++ b/sys-kernel/compat-drivers/files/3.8-grsec/20-version-disagrement-iwlwifi.patch @@ -0,0 +1,139 @@ +--- ./drivers/net/wireless/iwlwifi/dvm/debugfs.c ++++ ./drivers/net/wireless/iwlwifi/dvm/debugfs.c +@@ -203,7 +203,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[64]; +- int buf_size; ++ size_t buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); +@@ -473,7 +473,7 @@ + struct iwl_priv *priv = file->private_data; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -554,7 +554,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); +@@ -606,7 +606,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int value; + + memset(buf, 0, sizeof(buf)); +@@ -1871,7 +1871,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); +@@ -1916,7 +1916,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); +@@ -1987,7 +1987,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); +@@ -2028,7 +2028,7 @@ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); +@@ -2088,7 +2088,7 @@ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); +@@ -2178,7 +2178,7 @@ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int rts; + + if (!priv->cfg->ht_params) +@@ -2220,7 +2220,7 @@ + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +@@ -2256,7 +2256,7 @@ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + /* check that the interface is up */ + if (!iwl_is_ready(priv)) +@@ -2310,7 +2310,7 @@ + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +--- ./drivers/net/wireless/iwlwifi/pcie/trans.c ++++ ./drivers/net/wireless/iwlwifi/pcie/trans.c +@@ -1100,7 +1100,7 @@ + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -1121,7 +1121,7 @@ + { + struct iwl_trans *trans = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int csr; + + memset(buf, 0, sizeof(buf)); diff --git a/sys-kernel/compat-drivers/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch b/sys-kernel/compat-drivers/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch new file mode 100644 index 00000000..a2b080f5 --- /dev/null +++ b/sys-kernel/compat-drivers/files/4002_mac80211-2.6.29-fix-tx-ctl-no-ack-retry-count.patch @@ -0,0 +1,17 @@ + tx.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index b47435d..751934b 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -539,7 +539,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) + if (tx->sta) + tx->sta->last_tx_rate = txrc.reported_rate; + +- if (unlikely(!info->control.rates[0].count)) ++ if (unlikely(!info->control.rates[0].count) || ++ info->flags & IEEE80211_TX_CTL_NO_ACK) + info->control.rates[0].count = 1; + + if (is_multicast_ether_addr(hdr->addr1)) { diff --git a/sys-kernel/compat-drivers/files/4004_zd1211rw-2.6.28.patch b/sys-kernel/compat-drivers/files/4004_zd1211rw-2.6.28.patch new file mode 100644 index 00000000..c0697dee --- /dev/null +++ b/sys-kernel/compat-drivers/files/4004_zd1211rw-2.6.28.patch @@ -0,0 +1,37 @@ +diff -Naur linux-2.6.28-pentoo-r1-orig/drivers/net/wireless/zd1211rw/zd_mac.c linux-2.6.28-pentoo-r1-improved/drivers/net/wireless/zd1211rw/zd_mac.c +--- linux-2.6.28-pentoo-r1-orig/drivers/net/wireless/zd1211rw/zd_mac.c 2009-01-18 17:49:00.000000000 -0500 ++++ linux-2.6.28-pentoo-r1-improved/drivers/net/wireless/zd1211rw/zd_mac.c 2009-01-18 18:46:44.000000000 -0500 +@@ -191,14 +191,19 @@ + static int set_rx_filter(struct zd_mac *mac) + { + unsigned long flags; +- u32 filter = STA_RX_FILTER; ++ struct zd_ioreq32 ioreqs[] = { ++ {CR_RX_FILTER, STA_RX_FILTER}, ++ { CR_SNIFFER_ON, 0U }, ++ }; + + spin_lock_irqsave(&mac->lock, flags); +- if (mac->pass_ctrl) +- filter |= RX_FILTER_CTRL; ++ if (mac->pass_ctrl) { ++ ioreqs[0].value |= 0xFFFFFFFF; ++ ioreqs[1].value = 0x1; ++ } + spin_unlock_irqrestore(&mac->lock, flags); + +- return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); ++ return zd_iowrite32a(&mac->chip, ioreqs, ARRAY_SIZE(ioreqs)); + } + + static int set_mc_hash(struct zd_mac *mac) +@@ -657,7 +662,8 @@ + /* Caller has to ensure that length >= sizeof(struct rx_status). */ + status = (struct rx_status *) + (buffer + (length - sizeof(struct rx_status))); +- if (status->frame_status & ZD_RX_ERROR) { ++ if ((status->frame_status & ZD_RX_ERROR) || ++ (status->frame_status & ~0x21)) { + if (mac->pass_failed_fcs && + (status->frame_status & ZD_RX_CRC32_ERROR)) { + stats.flag |= RX_FLAG_FAILED_FCS_CRC; diff --git a/sys-kernel/compat-drivers/files/ath_regd_optional.patch b/sys-kernel/compat-drivers/files/ath_regd_optional.patch new file mode 100644 index 00000000..415fc896 --- /dev/null +++ b/sys-kernel/compat-drivers/files/ath_regd_optional.patch @@ -0,0 +1,39 @@ +diff -Naur compat-wireless-3.0-rc4-1-orig/drivers/net/wireless/ath/regd.c compat-wireless-3.0-rc4-1/drivers/net/wireless/ath/regd.c +--- compat-wireless-3.0-rc4-1-orig/drivers/net/wireless/ath/regd.c 2011-06-23 19:02:22.000000000 -0400 ++++ compat-wireless-3.0-rc4-1/drivers/net/wireless/ath/regd.c 2011-06-26 01:52:35.000000000 -0400 +@@ -193,6 +193,8 @@ + u32 bandwidth = 0; + int r; + ++ return; ++ + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + + if (!wiphy->bands[band]) +@@ -252,6 +254,8 @@ + u32 bandwidth = 0; + int r; + ++ return; ++ + sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + + /* +@@ -299,6 +303,8 @@ + struct ieee80211_channel *ch; + unsigned int i; + ++ return; ++ + if (!wiphy->bands[IEEE80211_BAND_5GHZ]) + return; + +@@ -466,6 +472,8 @@ + { + const struct ieee80211_regdomain *regd; + ++ return 0; ++ + wiphy->reg_notifier = reg_notifier; + wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY; + diff --git a/sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch b/sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch new file mode 100644 index 00000000..4ff7a73b --- /dev/null +++ b/sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec-warnings.patch @@ -0,0 +1,34 @@ +Fixes for: +drivers/net/wireless/ath/ath6kl/sdio.c: In function ‘ath6kl_sdio_alloc_prep_scat_req’: +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the buf_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the sg_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +--- drivers/net/wireless/ath/ath6kl/sdio.c ++++ drivers/net/wireless/ath/ath6kl/sdio.c +@@ -341,11 +341,14 @@ + scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); + scat_req_sz = sizeof(*s_req) + scat_list_sz; + +- if (!virt_scat) ++ if (!virt_scat) { + sg_sz = sizeof(struct scatterlist) * n_scat_entry; +- else ++ buf_sz = 0; ++ } else { ++ sg_sz = 0; + buf_sz = 2 * L1_CACHE_BYTES + + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; ++ } + + for (i = 0; i < n_scat_req; i++) { + /* allocate the scatter request */ +--- drivers/gpu/drm/i915/intel_display.c ++++ drivers/gpu/drm/i915/intel_display.c +@@ -6216,7 +6216,7 @@ + obj = work->old_fb_obj; + + atomic_clear_mask(1 << intel_crtc->plane, +- &obj->pending_flip.counter); ++ &obj->pending_flip); + + wake_up(&dev_priv->pending_flip_queue); + schedule_work(&work->work); diff --git a/sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec.patch b/sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec.patch new file mode 100644 index 00000000..9dfc5adf --- /dev/null +++ b/sys-kernel/compat-drivers/files/compat-drivers-3.7_rc1_p6-grsec.patch @@ -0,0 +1,8365 @@ +--- drivers/net/wireless/ath/ath.h 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ath/ath.h 2012-10-15 17:30:59.818924529 +0000 +@@ -119,6 +119,7 @@ struct ath_ops { + void (*write_flush) (void *); + u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); + }; ++typedef struct ath_ops __no_const ath_ops_no_const; + + struct ath_common; + struct ath_bus_ops; +--- drivers/net/wireless/ath/ath9k/ar9002_mac.c 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/ath/ath9k/ar9002_mac.c 2012-10-15 17:30:59.816924531 +0000 +@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; + +- ACCESS_ONCE(ads->ds_link) = i->link; +- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->ds_link) = i->link; ++ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0]; + + ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore); + ctl6 = SM(i->keytype, AR_EncrType); +@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + + if ((i->is_first || i->is_last) && + i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) { +- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ds_ctl2) = 0; +- ACCESS_ONCE(ads->ds_ctl3) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl2) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl3) = 0; + } + + if (!i->is_first) { +- ACCESS_ONCE(ads->ds_ctl0) = 0; +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl0) = 0; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + return; + } + +@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + break; + } + +- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, voi + | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable : + (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)); + +- ACCESS_ONCE(ads->ds_ctl1) = ctl1; +- ACCESS_ONCE(ads->ds_ctl6) = ctl6; ++ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1; ++ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6; + + if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST) + return; + +- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) +--- drivers/net/wireless/ath/ath9k/ar9003_mac.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ath/ath9k/ar9003_mac.c 2012-10-15 17:30:59.817924530 +0000 +@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + (i->qcu << AR_TxQcuNum_S) | desc_len; + + checksum += val; +- ACCESS_ONCE(ads->info) = val; ++ ACCESS_ONCE_RW(ads->info) = val; + + checksum += i->link; +- ACCESS_ONCE(ads->link) = i->link; ++ ACCESS_ONCE_RW(ads->link) = i->link; + + checksum += i->buf_addr[0]; +- ACCESS_ONCE(ads->data0) = i->buf_addr[0]; ++ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0]; + checksum += i->buf_addr[1]; +- ACCESS_ONCE(ads->data1) = i->buf_addr[1]; ++ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1]; + checksum += i->buf_addr[2]; +- ACCESS_ONCE(ads->data2) = i->buf_addr[2]; ++ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2]; + checksum += i->buf_addr[3]; +- ACCESS_ONCE(ads->data3) = i->buf_addr[3]; ++ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3]; + + checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl3) = val; ++ ACCESS_ONCE_RW(ads->ctl3) = val; + checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl5) = val; ++ ACCESS_ONCE_RW(ads->ctl5) = val; + checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl7) = val; ++ ACCESS_ONCE_RW(ads->ctl7) = val; + checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen); +- ACCESS_ONCE(ads->ctl9) = val; ++ ACCESS_ONCE_RW(ads->ctl9) = val; + + checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff); +- ACCESS_ONCE(ads->ctl10) = checksum; ++ ACCESS_ONCE_RW(ads->ctl10) = checksum; + + if (i->is_first || i->is_last) { +- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0) + | set11nTries(i->rates, 1) + | set11nTries(i->rates, 2) + | set11nTries(i->rates, 3) + | (i->dur_update ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + +- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0) + | set11nRate(i->rates, 1) + | set11nRate(i->rates, 2) + | set11nRate(i->rates, 3); + } else { +- ACCESS_ONCE(ads->ctl13) = 0; +- ACCESS_ONCE(ads->ctl14) = 0; ++ ACCESS_ONCE_RW(ads->ctl13) = 0; ++ ACCESS_ONCE_RW(ads->ctl14) = 0; + } + + ads->ctl20 = 0; +@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + + ctl17 = SM(i->keytype, AR_EncrType); + if (!i->is_first) { +- ACCESS_ONCE(ads->ctl11) = 0; +- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore; +- ACCESS_ONCE(ads->ctl15) = 0; +- ACCESS_ONCE(ads->ctl16) = 0; +- ACCESS_ONCE(ads->ctl17) = ctl17; +- ACCESS_ONCE(ads->ctl18) = 0; +- ACCESS_ONCE(ads->ctl19) = 0; ++ ACCESS_ONCE_RW(ads->ctl11) = 0; ++ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore; ++ ACCESS_ONCE_RW(ads->ctl15) = 0; ++ ACCESS_ONCE_RW(ads->ctl16) = 0; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl18) = 0; ++ ACCESS_ONCE_RW(ads->ctl19) = 0; + return; + } + +- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen) ++ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen) + | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(i->txpower, AR_XmitPower) + | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) +@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, voi + val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S; + ctl12 |= SM(val, AR_PAPRDChainMask); + +- ACCESS_ONCE(ads->ctl12) = ctl12; +- ACCESS_ONCE(ads->ctl17) = ctl17; ++ ACCESS_ONCE_RW(ads->ctl12) = ctl12; ++ ACCESS_ONCE_RW(ads->ctl17) = ctl17; + +- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0) + | set11nPktDurRTSCTS(i->rates, 1); + +- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) ++ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2) + | set11nPktDurRTSCTS(i->rates, 3); + +- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0) ++ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0) + | set11nRateFlags(i->rates, 1) + | set11nRateFlags(i->rates, 2) + | set11nRateFlags(i->rates, 3) + | SM(i->rtscts_rate, AR_RTSCTSRate); + +- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding; ++ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding; + } + + static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads) +--- drivers/net/wireless/ath/ath9k/hw.h 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ath/ath9k/hw.h 2012-10-15 17:30:59.817924530 +0000 +@@ -657,7 +657,7 @@ struct ath_hw_private_ops { + + /* ANI */ + void (*ani_cache_ini_regs)(struct ath_hw *ah); +-}; ++} __no_const; + + /** + * struct ath_hw_ops - callbacks used by hardware code and driver code +@@ -687,7 +687,7 @@ struct ath_hw_ops { + void (*antdiv_comb_conf_set)(struct ath_hw *ah, + struct ath_hw_antcomb_conf *antconf); + void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable); +-}; ++} __no_const; + + struct ath_nf_limits { + s16 max; +@@ -707,7 +707,7 @@ enum ath_cal_list { + #define AH_FASTCC 0x4 + + struct ath_hw { +- struct ath_ops reg_ops; ++ ath_ops_no_const reg_ops; + + struct ieee80211_hw *hw; + struct ath_common common; +--- drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h 2012-10-15 17:30:59.818924529 +0000 +@@ -545,7 +545,7 @@ struct phy_func_ptr { + void (*carrsuppr)(struct brcms_phy *); + s32 (*rxsigpwr)(struct brcms_phy *, s32); + void (*detach)(struct brcms_phy *); +-}; ++} __no_const; + + struct brcms_phy { + struct brcms_phy_pub pubpi_ro; +--- drivers/net/wireless/iwlegacy/3945-mac.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/iwlegacy/3945-mac.c 2012-10-15 17:30:59.819924529 +0000 +@@ -3613,7 +3613,9 @@ il3945_pci_probe(struct pci_dev *pdev, c + */ + if (il3945_mod_params.disable_hw_scan) { + D_INFO("Disabling hw_scan\n"); +- il3945_mac_ops.hw_scan = NULL; ++ pax_open_kernel(); ++ *(void **)&il3945_mac_ops.hw_scan = NULL; ++ pax_close_kernel(); + } + + D_INFO("*** LOAD DRIVER ***\n"); +--- drivers/net/wireless/iwlwifi/dvm/debugfs.c 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/iwlwifi/dvm/debugfs.c 2012-10-15 17:30:59.819924529 +0000 +@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(stru + { + struct iwl_priv *priv = file->private_data; + char buf[64]; +- int buf_size; ++ size_t buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); +@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_wri + struct iwl_priv *priv = file->private_data; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_wr + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); +@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_ove + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int value; + + memset(buf, 0, sizeof(buf)); +@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_sta + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); +@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_w + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); +@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_w + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); +@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_writ + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); +@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_wr + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int flush; + + memset(buf, 0, sizeof(buf)); +@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode + + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int rts; + + if (!priv->cfg->ht_params) +@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write + { + struct iwl_priv *priv = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; +- int buf_size; ++ size_t buf_size; + + /* check that the interface is up */ + if (!iwl_is_ready(priv)) +@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_ + struct iwl_priv *priv = file->private_data; + char buf[8]; + u32 calib_disabled; +- int buf_size; ++ size_t buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); +--- drivers/net/wireless/iwlwifi/pcie/trans.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/iwlwifi/pcie/trans.c 2012-10-15 17:30:59.820924530 +0000 +@@ -1944,7 +1944,7 @@ static ssize_t iwl_dbgfs_interrupt_write + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; + + char buf[8]; +- int buf_size; ++ size_t buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); +@@ -1965,7 +1965,7 @@ static ssize_t iwl_dbgfs_csr_write(struc + { + struct iwl_trans *trans = file->private_data; + char buf[8]; +- int buf_size; ++ size_t buf_size; + int csr; + + memset(buf, 0, sizeof(buf)); +--- drivers/net/wireless/mac80211_hwsim.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/mac80211_hwsim.c 2012-10-15 17:30:59.820924530 +0000 +@@ -1748,9 +1748,11 @@ static int __init init_mac80211_hwsim(vo + return -EINVAL; + + if (fake_hw_scan) { +- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; +- mac80211_hwsim_ops.sw_scan_start = NULL; +- mac80211_hwsim_ops.sw_scan_complete = NULL; ++ pax_open_kernel(); ++ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; ++ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL; ++ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL; ++ pax_close_kernel(); + } + + spin_lock_init(&hwsim_radio_lock); +--- drivers/net/wireless/mwifiex/main.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/mwifiex/main.h 2012-10-15 17:30:59.820924530 +0000 +@@ -571,7 +571,7 @@ struct mwifiex_if_ops { + int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *); + int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); +-}; ++} __no_const; + + struct mwifiex_adapter { + u8 iface_type; +--- drivers/net/wireless/rndis_wlan.c 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/rndis_wlan.c 2012-10-15 17:30:59.821924531 +0000 +@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbn + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); + +- if (rts_threshold < 0 || rts_threshold > 2347) ++ if (rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); +--- drivers/net/wireless/rt2x00/rt2x00.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/rt2x00/rt2x00.h 2012-10-15 17:30:59.821924531 +0000 +@@ -397,7 +397,7 @@ struct rt2x00_intf { + * for hardware which doesn't support hardware + * sequence counting. + */ +- atomic_t seqno; ++ atomic_unchecked_t seqno; + }; + + static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) +--- drivers/net/wireless/rt2x00/rt2x00queue.c 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/rt2x00/rt2x00queue.c 2012-10-15 17:30:59.822924531 +0000 +@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descri + * sequence counter given by mac80211. + */ + if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) +- seqno = atomic_add_return(0x10, &intf->seqno); ++ seqno = atomic_add_return_unchecked(0x10, &intf->seqno); + else +- seqno = atomic_read(&intf->seqno); ++ seqno = atomic_read_unchecked(&intf->seqno); + + hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seqno); +--- drivers/net/wireless/ti/wl1251/wl1251.h 2012-09-27 23:19:11.000000000 +0000 ++++ drivers/net/wireless/ti/wl1251/wl1251.h 2012-10-15 17:30:59.822924531 +0000 +@@ -266,7 +266,7 @@ struct wl1251_if_operations { + void (*reset)(struct wl1251 *wl); + void (*enable_irq)(struct wl1251 *wl); + void (*disable_irq)(struct wl1251 *wl); +-}; ++} __no_const; + + struct wl1251 { + struct ieee80211_hw *hw; +--- drivers/net/wireless/ti/wlcore/wlcore.h 2012-09-27 23:19:12.000000000 +0000 ++++ drivers/net/wireless/ti/wlcore/wlcore.h 2012-10-15 17:30:59.822924531 +0000 +@@ -81,7 +81,7 @@ struct wlcore_ops { + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key_conf); + u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len); +-}; ++} __no_const; + + enum wlcore_partitions { + PART_DOWN, +--- include/linux/unaligned/access_ok.h 2012-09-17 19:15:56.000000000 +0000 ++++ include/linux/unaligned/access_ok.h 2012-10-15 17:30:59.823924531 +0000 +@@ -6,32 +6,32 @@ + + static inline u16 get_unaligned_le16(const void *p) + { +- return le16_to_cpup((__le16 *)p); ++ return le16_to_cpup((const __le16 *)p); + } + + static inline u32 get_unaligned_le32(const void *p) + { +- return le32_to_cpup((__le32 *)p); ++ return le32_to_cpup((const __le32 *)p); + } + + static inline u64 get_unaligned_le64(const void *p) + { +- return le64_to_cpup((__le64 *)p); ++ return le64_to_cpup((const __le64 *)p); + } + + static inline u16 get_unaligned_be16(const void *p) + { +- return be16_to_cpup((__be16 *)p); ++ return be16_to_cpup((const __be16 *)p); + } + + static inline u32 get_unaligned_be32(const void *p) + { +- return be32_to_cpup((__be32 *)p); ++ return be32_to_cpup((const __be32 *)p); + } + + static inline u64 get_unaligned_be64(const void *p) + { +- return be64_to_cpup((__be64 *)p); ++ return be64_to_cpup((const __be64 *)p); + } + + static inline void put_unaligned_le16(u16 val, void *p) +--- net/bluetooth/hci_sock.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/hci_sock.c 2012-10-15 17:30:59.825924531 +0000 +@@ -940,7 +940,7 @@ static int hci_sock_setsockopt(struct so + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + +- len = min_t(unsigned int, len, sizeof(uf)); ++ len = min((size_t)len, sizeof(uf)); + if (copy_from_user(&uf, optval, len)) { + err = -EFAULT; + break; +--- net/bluetooth/l2cap_core.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/bluetooth/l2cap_core.c 2012-10-15 17:30:59.825924531 +0000 +@@ -3165,8 +3165,10 @@ static int l2cap_parse_conf_rsp(struct l + break; + + case L2CAP_CONF_RFC: +- if (olen == sizeof(rfc)) +- memcpy(&rfc, (void *)val, olen); ++ if (olen != sizeof(rfc)) ++ break; ++ ++ memcpy(&rfc, (void *)val, olen); + + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && + rfc.mode != chan->mode) +--- net/bluetooth/l2cap_sock.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/l2cap_sock.c 2012-10-15 17:30:59.826924531 +0000 +@@ -467,7 +467,8 @@ static int l2cap_sock_setsockopt_old(str + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct l2cap_options opts; +- int len, err = 0; ++ int err = 0; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -489,7 +490,7 @@ static int l2cap_sock_setsockopt_old(str + opts.max_tx = chan->max_tx; + opts.txwin_size = chan->tx_win; + +- len = min_t(unsigned int, sizeof(opts), optlen); ++ len = min(sizeof(opts), len); + if (copy_from_user((char *) &opts, optval, len)) { + err = -EFAULT; + break; +@@ -574,7 +575,8 @@ static int l2cap_sock_setsockopt(struct + struct bt_security sec; + struct bt_power pwr; + struct l2cap_conn *conn; +- int len, err = 0; ++ int err = 0; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -597,7 +599,7 @@ static int l2cap_sock_setsockopt(struct + + sec.level = BT_SECURITY_LOW; + +- len = min_t(unsigned int, sizeof(sec), optlen); ++ len = min(sizeof(sec), len); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; +@@ -694,7 +696,7 @@ static int l2cap_sock_setsockopt(struct + + pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; + +- len = min_t(unsigned int, sizeof(pwr), optlen); ++ len = min(sizeof(pwr), len); + if (copy_from_user((char *) &pwr, optval, len)) { + err = -EFAULT; + break; +--- net/bluetooth/rfcomm/sock.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/rfcomm/sock.c 2012-10-15 17:30:59.826924531 +0000 +@@ -676,7 +676,7 @@ static int rfcomm_sock_setsockopt(struct + struct sock *sk = sock->sk; + struct bt_security sec; + int err = 0; +- size_t len; ++ size_t len = optlen; + u32 opt; + + BT_DBG("sk %p", sk); +@@ -698,7 +698,7 @@ static int rfcomm_sock_setsockopt(struct + + sec.level = BT_SECURITY_LOW; + +- len = min_t(unsigned int, sizeof(sec), optlen); ++ len = min(sizeof(sec), len); + if (copy_from_user((char *) &sec, optval, len)) { + err = -EFAULT; + break; +--- net/bluetooth/rfcomm/tty.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/bluetooth/rfcomm/tty.c 2012-10-15 17:30:59.826924531 +0000 +@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm + BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (dev->port.count > 0) { ++ if (atomic_read(&dev->port.count) > 0) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return; + } +@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_st + return -ENODEV; + + BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst), +- dev->channel, dev->port.count); ++ dev->channel, atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (++dev->port.count > 1) { ++ if (atomic_inc_return(&dev->port.count) > 1) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return 0; + } +@@ -736,10 +736,10 @@ static void rfcomm_tty_close(struct tty_ + return; + + BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, +- dev->port.count); ++ atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (!--dev->port.count) { ++ if (!atomic_dec_return(&dev->port.count)) { + spin_unlock_irqrestore(&dev->port.lock, flags); + if (dev->tty_dev->parent) + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) +--- net/mac80211/ieee80211_i.h 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/ieee80211_i.h 2012-10-15 17:30:59.827924531 +0000 +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + #include "key.h" + #include "sta_info.h" + #include "debug.h" +@@ -840,7 +841,7 @@ struct ieee80211_local { + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + +- int open_count; ++ local_t open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, +--- net/mac80211/iface.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/iface.c 2012-10-15 17:30:59.827924531 +0000 +@@ -454,7 +454,7 @@ static int ieee80211_do_open(struct net_ + break; + } + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + res = drv_start(local); + if (res) + goto err_del_bss; +@@ -497,7 +497,7 @@ static int ieee80211_do_open(struct net_ + break; + } + +- if (local->monitors == 0 && local->open_count == 0) { ++ if (local->monitors == 0 && local_read(&local->open_count) == 0) { + res = ieee80211_add_virtual_monitor(local); + if (res) + goto err_stop; +@@ -594,7 +594,7 @@ static int ieee80211_do_open(struct net_ + mutex_unlock(&local->mtx); + + if (coming_up) +- local->open_count++; ++ local_inc(&local->open_count); + + if (hw_reconf_flags) + ieee80211_hw_config(local, hw_reconf_flags); +@@ -607,7 +607,7 @@ static int ieee80211_do_open(struct net_ + err_del_interface: + drv_remove_interface(local, sdata); + err_stop: +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; +@@ -741,7 +741,7 @@ static void ieee80211_do_stop(struct iee + } + + if (going_down) +- local->open_count--; ++ local_dec(&local->open_count); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: +@@ -801,7 +801,7 @@ static void ieee80211_do_stop(struct iee + + ieee80211_recalc_ps(local, -1); + +- if (local->open_count == 0) { ++ if (local_read(&local->open_count) == 0) { + if (local->ops->napi_poll) + napi_disable(&local->napi); + ieee80211_clear_tx_pending(local); +@@ -833,7 +833,7 @@ static void ieee80211_do_stop(struct iee + } + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + +- if (local->monitors == local->open_count && local->monitors > 0) ++ if (local->monitors == local_read(&local->open_count) && local->monitors > 0) + ieee80211_add_virtual_monitor(local); + } + +--- net/mac80211/main.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/main.c 2012-10-15 17:30:59.827924531 +0000 +@@ -170,7 +170,7 @@ int ieee80211_hw_config(struct ieee80211 + local->hw.conf.power_level = power; + } + +- if (changed && local->open_count) { ++ if (changed && local_read(&local->open_count)) { + ret = drv_config(local, changed); + /* + * Goal: +--- net/mac80211/pm.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/mac80211/pm.c 2012-10-15 17:30:59.828924531 +0000 +@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211 + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto suspend; + + ieee80211_scan_cancel(local); +@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211 + cancel_work_sync(&local->dynamic_ps_enable_work); + del_timer_sync(&local->dynamic_ps_timer); + +- local->wowlan = wowlan && local->open_count; ++ local->wowlan = wowlan && local_read(&local->open_count); + if (local->wowlan) { + int err = drv_suspend(local, wowlan); + if (err < 0) { +@@ -143,7 +143,7 @@ int __ieee80211_suspend(struct ieee80211 + drv_remove_interface(local, sdata); + + /* stop hardware - this must stop RX */ +- if (local->open_count) ++ if (local_read(&local->open_count)) + ieee80211_stop_device(local); + + suspend: +--- net/mac80211/rate.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/mac80211/rate.c 2012-10-15 17:30:59.828924531 +0000 +@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct + + ASSERT_RTNL(); + +- if (local->open_count) ++ if (local_read(&local->open_count)) + return -EBUSY; + + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { +--- net/mac80211/rc80211_pid_debugfs.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/mac80211/rc80211_pid_debugfs.c 2012-10-15 17:30:59.828924531 +0000 +@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_r + + spin_unlock_irqrestore(&events->lock, status); + +- if (copy_to_user(buf, pb, p)) ++ if (p > sizeof(pb) || copy_to_user(buf, pb, p)) + return -EFAULT; + + return p; +--- net/mac80211/util.c 2012-09-27 23:19:11.000000000 +0000 ++++ net/mac80211/util.c 2012-10-15 17:30:59.828924531 +0000 +@@ -1251,7 +1251,7 @@ int ieee80211_reconfig(struct ieee80211_ + } + #endif + /* everything else happens only if HW was up & running */ +- if (!local->open_count) ++ if (!local_read(&local->open_count)) + goto wake_up; + + /* +--- net/wireless/core.h 2012-09-27 23:19:11.000000000 +0000 ++++ net/wireless/core.h 2012-10-15 17:30:59.829924531 +0000 +@@ -28,7 +28,7 @@ struct cfg80211_registered_device { + struct mutex mtx; + + /* rfkill support */ +- struct rfkill_ops rfkill_ops; ++ rfkill_ops_no_const rfkill_ops; + struct rfkill *rfkill; + struct work_struct rfkill_sync; + +--- net/wireless/wext-core.c 2012-09-27 23:19:12.000000000 +0000 ++++ net/wireless/wext-core.c 2012-10-15 17:30:59.829924531 +0000 +@@ -792,8 +792,7 @@ static int ioctl_standard_iw_point(struc + */ + + /* Support for very large requests */ +- if ((descr->flags & IW_DESCR_FLAG_NOMAX) && +- (user_length > descr->max_tokens)) { ++ if (user_length > descr->max_tokens) { + /* Allow userspace to GET more than max so + * we can support any size GET requests. + * There is still a limit : -ENOMEM. +@@ -832,22 +831,6 @@ static int ioctl_standard_iw_point(struc + } + } + +- if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { +- /* +- * If this is a GET, but not NOMAX, it means that the extra +- * data is not bounded by userspace, but by max_tokens. Thus +- * set the length to max_tokens. This matches the extra data +- * allocation. +- * The driver should fill it with the number of tokens it +- * provided, and it may check iwp->length rather than having +- * knowledge of max_tokens. If the driver doesn't change the +- * iwp->length, this ioctl just copies back max_token tokens +- * filled with zeroes. Hopefully the driver isn't claiming +- * them to be valid data. +- */ +- iwp->length = descr->max_tokens; +- } +- + err = handler(dev, info, (union iwreq_data *) iwp, extra); + + iwp->length += essid_compat; +--- scripts/gcc-plugin.sh 1970-01-01 00:00:00.000000000 +0000 ++++ scripts/gcc-plugin.sh 2012-10-15 17:30:59.829924531 +0000 +@@ -0,0 +1,17 @@ ++#!/bin/bash ++plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 < ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to implement various sparse (source code checker) features ++ * ++ * TODO: ++ * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch) ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++#include "target.h" ++ ++extern void c_register_addr_space (const char *str, addr_space_t as); ++extern enum machine_mode default_addr_space_pointer_mode (addr_space_t); ++extern enum machine_mode default_addr_space_address_mode (addr_space_t); ++extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as); ++extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as); ++extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as); ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info checker_plugin_info = { ++ .version = "201111150100", ++}; ++ ++#define ADDR_SPACE_KERNEL 0 ++#define ADDR_SPACE_FORCE_KERNEL 1 ++#define ADDR_SPACE_USER 2 ++#define ADDR_SPACE_FORCE_USER 3 ++#define ADDR_SPACE_IOMEM 0 ++#define ADDR_SPACE_FORCE_IOMEM 0 ++#define ADDR_SPACE_PERCPU 0 ++#define ADDR_SPACE_FORCE_PERCPU 0 ++#define ADDR_SPACE_RCU 0 ++#define ADDR_SPACE_FORCE_RCU 0 ++ ++static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC); ++} ++ ++static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace) ++{ ++ return default_addr_space_address_mode(ADDR_SPACE_GENERIC); ++} ++ ++static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_valid_pointer_mode(mode, as); ++} ++ ++static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as) ++{ ++ return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC); ++} ++ ++static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as) ++{ ++ return default_addr_space_legitimize_address(x, oldx, mode, as); ++} ++ ++static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset) ++{ ++ if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER) ++ return true; ++ ++ if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM) ++ return true; ++ ++ if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL) ++ return true; ++ ++ return subset == superset; ++} ++ ++static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type) ++{ ++// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type)); ++// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type)); ++ ++ return op; ++} ++ ++static void register_checker_address_spaces(void *event_data, void *data) ++{ ++ c_register_addr_space("__kernel", ADDR_SPACE_KERNEL); ++ c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL); ++ c_register_addr_space("__user", ADDR_SPACE_USER); ++ c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER); ++// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM); ++// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM); ++// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU); ++// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU); ++// c_register_addr_space("__rcu", ADDR_SPACE_RCU); ++// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU); ++ ++ targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode; ++ targetm.addr_space.address_mode = checker_addr_space_address_mode; ++ targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode; ++ targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p; ++// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address; ++ targetm.addr_space.subset_p = checker_addr_space_subset_p; ++ targetm.addr_space.convert = checker_addr_space_convert; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info); ++ ++ for (i = 0; i < argc; ++i) ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL); ++ ++ return 0; ++} +--- tools/gcc/colorize_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/colorize_plugin.c 2012-10-15 17:30:59.830924531 +0000 +@@ -0,0 +1,148 @@ ++/* ++ * Copyright 2012 by PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to colorize diagnostic output ++ * ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info colorize_plugin_info = { ++ .version = "201203092200", ++ .help = NULL, ++}; ++ ++#define GREEN "\033[32m\033[2m" ++#define LIGHTGREEN "\033[32m\033[1m" ++#define YELLOW "\033[33m\033[2m" ++#define LIGHTYELLOW "\033[33m\033[1m" ++#define RED "\033[31m\033[2m" ++#define LIGHTRED "\033[31m\033[1m" ++#define BLUE "\033[34m\033[2m" ++#define LIGHTBLUE "\033[34m\033[1m" ++#define BRIGHT "\033[m\033[1m" ++#define NORMAL "\033[m" ++ ++static diagnostic_starter_fn old_starter; ++static diagnostic_finalizer_fn old_finalizer; ++ ++static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ const char *color; ++ char *newprefix; ++ ++ switch (diagnostic->kind) { ++ case DK_NOTE: ++ color = LIGHTBLUE; ++ break; ++ ++ case DK_PEDWARN: ++ case DK_WARNING: ++ color = LIGHTYELLOW; ++ break; ++ ++ case DK_ERROR: ++ case DK_FATAL: ++ case DK_ICE: ++ case DK_PERMERROR: ++ case DK_SORRY: ++ color = LIGHTRED; ++ break; ++ ++ default: ++ color = NORMAL; ++ } ++ ++ old_starter(context, diagnostic); ++ if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix)) ++ return; ++ pp_destroy_prefix(context->printer); ++ pp_set_prefix(context->printer, newprefix); ++} ++ ++static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic) ++{ ++ old_finalizer(context, diagnostic); ++} ++ ++static void colorize_arm(void) ++{ ++ old_starter = diagnostic_starter(global_dc); ++ old_finalizer = diagnostic_finalizer(global_dc); ++ ++ diagnostic_starter(global_dc) = start_colorize; ++ diagnostic_finalizer(global_dc) = finalize_colorize; ++} ++ ++static unsigned int execute_colorize_rearm(void) ++{ ++ if (diagnostic_starter(global_dc) == start_colorize) ++ return 0; ++ ++ colorize_arm(); ++ return 0; ++} ++ ++struct simple_ipa_opt_pass pass_ipa_colorize_rearm = { ++ .pass = { ++ .type = SIMPLE_IPA_PASS, ++ .name = "colorize_rearm", ++ .gate = NULL, ++ .execute = execute_colorize_rearm, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static void colorize_start_unit(void *gcc_data, void *user_data) ++{ ++ colorize_arm(); ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info colorize_rearm_pass_info = { ++ .pass = &pass_ipa_colorize_rearm.pass, ++ .reference_pass_name = "*free_lang_data", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info); ++ register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info); ++ return 0; ++} +--- tools/gcc/constify_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/constify_plugin.c 2012-10-15 17:30:59.830924531 +0000 +@@ -0,0 +1,331 @@ ++/* ++ * Copyright 2011 by Emese Revfy ++ * Copyright 2011 by PaX Team ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification. ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/const_plugin/ ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c ++ * $ gcc -fplugin=constify_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info const_plugin_info = { ++ .version = "201205300030", ++ .help = "no-constify\tturn off constification\n", ++}; ++ ++static void deconstify_tree(tree node); ++ ++static void deconstify_type(tree type) ++{ ++ tree field; ++ ++ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) { ++ tree type = TREE_TYPE(field); ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ if (!TYPE_READONLY(type)) ++ continue; ++ ++ deconstify_tree(field); ++ } ++ TYPE_READONLY(type) = 0; ++ C_TYPE_FIELDS_READONLY(type) = 0; ++} ++ ++static void deconstify_tree(tree node) ++{ ++ tree old_type, new_type, field; ++ ++ old_type = TREE_TYPE(node); ++ ++ gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST)); ++ ++ new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST); ++ TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type)); ++ for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field)) ++ DECL_FIELD_CONTEXT(field) = new_type; ++ ++ deconstify_type(new_type); ++ ++ TREE_READONLY(node) = 0; ++ TREE_TYPE(node) = new_type; ++} ++ ++static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ tree type; ++ ++ *no_add_attrs = true; ++ if (TREE_CODE(*node) == FUNCTION_DECL) { ++ error("%qE attribute does not apply to functions", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == VAR_DECL) { ++ error("%qE attribute does not apply to variables", name); ++ return NULL_TREE; ++ } ++ ++ if (TYPE_P(*node)) { ++ if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE) ++ *no_add_attrs = false; ++ else ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ type = TREE_TYPE(*node); ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) { ++ error("%qE attribute is already applied to the type", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) { ++ error("%qE attribute used on type that is not constified", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) == TYPE_DECL) { ++ deconstify_tree(*node); ++ return NULL_TREE; ++ } ++ ++ return NULL_TREE; ++} ++ ++static void constify_type(tree type) ++{ ++ TYPE_READONLY(type) = 1; ++ C_TYPE_FIELDS_READONLY(type) = 1; ++} ++ ++static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ *no_add_attrs = true; ++ if (!TYPE_P(*node)) { ++ error("%qE attribute applies to types only", name); ++ return NULL_TREE; ++ } ++ ++ if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) { ++ error("%qE attribute applies to struct and union types only", name); ++ return NULL_TREE; ++ } ++ ++ *no_add_attrs = false; ++ constify_type(*node); ++ return NULL_TREE; ++} ++ ++static struct attribute_spec no_const_attr = { ++ .name = "no_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_no_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static struct attribute_spec do_const_attr = { ++ .name = "do_const", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = false, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_do_const_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = true ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&no_const_attr); ++ register_attribute(&do_const_attr); ++} ++ ++static bool is_fptr(tree field) ++{ ++ tree ptr = TREE_TYPE(field); ++ ++ if (TREE_CODE(ptr) != POINTER_TYPE) ++ return false; ++ ++ return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE; ++} ++ ++static bool walk_struct(tree node) ++{ ++ tree field; ++ ++ if (TYPE_FIELDS(node) == NULL_TREE) ++ return false; ++ ++ if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) { ++ gcc_assert(!TYPE_READONLY(node)); ++ deconstify_type(node); ++ return false; ++ } ++ ++ for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) { ++ tree type = TREE_TYPE(field); ++ enum tree_code code = TREE_CODE(type); ++ ++ if (node == type) ++ return false; ++ if (code == RECORD_TYPE || code == UNION_TYPE) { ++ if (!(walk_struct(type))) ++ return false; ++ } else if (!is_fptr(field) && !TREE_READONLY(field)) ++ return false; ++ } ++ return true; ++} ++ ++static void finish_type(void *event_data, void *data) ++{ ++ tree type = (tree)event_data; ++ ++ if (type == NULL_TREE || type == error_mark_node) ++ return; ++ ++ if (TYPE_READONLY(type)) ++ return; ++ ++ if (walk_struct(type)) ++ constify_type(type); ++} ++ ++static unsigned int check_local_variables(void); ++ ++struct gimple_opt_pass pass_local_variable = { ++ { ++ .type = GIMPLE_PASS, ++ .name = "check_local_variables", ++ .gate = NULL, ++ .execute = check_local_variables, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static unsigned int check_local_variables(void) ++{ ++ tree var; ++ referenced_var_iterator rvi; ++ ++#if BUILDING_GCC_VERSION == 4005 ++ FOR_EACH_REFERENCED_VAR(var, rvi) { ++#else ++ FOR_EACH_REFERENCED_VAR(cfun, var, rvi) { ++#endif ++ tree type = TREE_TYPE(var); ++ ++ if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var)) ++ continue; ++ ++ if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) ++ continue; ++ ++ if (!TYPE_READONLY(type)) ++ continue; ++ ++// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var))) ++// continue; ++ ++// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) ++// continue; ++ ++ if (walk_struct(type)) { ++ error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ bool constify = true; ++ ++ struct register_pass_info local_variable_pass_info = { ++ .pass = &pass_local_variable.pass, ++ .reference_pass_name = "*referenced_vars", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!(strcmp(argv[i].key, "no-constify"))) { ++ constify = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info); ++ if (constify) { ++ register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +--- tools/gcc/generate_size_overflow_hash.sh 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/generate_size_overflow_hash.sh 2012-10-15 17:30:59.830924531 +0000 +@@ -0,0 +1,94 @@ ++#!/bin/bash ++ ++# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c). ++ ++header1="size_overflow_hash.h" ++database="size_overflow_hash.data" ++n=65536 ++ ++usage() { ++cat <> "$header1" ++ done ++ echo >> "$header1" ++} ++ ++create_structs () { ++ rm -f "$header1" ++ ++ create_defines ++ ++ cat "$database" | while read data ++ do ++ data_array=($data) ++ struct_hash_name="${data_array[0]}" ++ funcn="${data_array[1]}" ++ params="${data_array[2]}" ++ next="${data_array[5]}" ++ ++ echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1" ++ ++ echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1" ++ echo -en "\t.param\t= " >> "$header1" ++ line= ++ for param_num in ${params//-/ }; ++ do ++ line="${line}PARAM"$param_num"|" ++ done ++ ++ echo -e "${line%?},\n};\n" >> "$header1" ++ done ++} ++ ++create_headers () { ++ echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1" ++} ++ ++create_array_elements () { ++ index=0 ++ grep -v "nohasharray" $database | sort -n -k 4 | while read data ++ do ++ data_array=($data) ++ i="${data_array[3]}" ++ hash="${data_array[4]}" ++ while [[ $index -lt $i ]] ++ do ++ echo -e "\t["$index"]\t= NULL," >> "$header1" ++ index=$(($index + 1)) ++ done ++ index=$(($index + 1)) ++ echo -e "\t["$i"]\t= &"$hash"," >> "$header1" ++ done ++ echo '};' >> $header1 ++} ++ ++create_structs ++create_headers ++create_array_elements ++ ++exit 0 +--- tools/gcc/kallocstat_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/kallocstat_plugin.c 2012-10-15 17:30:59.831924531 +0000 +@@ -0,0 +1,167 @@ ++/* ++ * Copyright 2011 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to find the distribution of k*alloc sizes ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static const char * const kalloc_functions[] = { ++ "__kmalloc", ++ "kmalloc", ++ "kmalloc_large", ++ "kmalloc_node", ++ "kmalloc_order", ++ "kmalloc_order_trace", ++ "kmalloc_slab", ++ "kzalloc", ++ "kzalloc_node", ++}; ++ ++static struct plugin_info kallocstat_plugin_info = { ++ .version = "201111150100", ++}; ++ ++static unsigned int execute_kallocstat(void); ++ ++static struct gimple_opt_pass kallocstat_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kallocstat", ++ .gate = NULL, ++ .execute = execute_kallocstat, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = 0 ++ } ++}; ++ ++static bool is_kalloc(const char *fnname) ++{ ++ size_t i; ++ ++ for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++) ++ if (!strcmp(fnname, kalloc_functions[i])) ++ return true; ++ return false; ++} ++ ++static unsigned int execute_kallocstat(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: ++ tree fndecl, size; ++ gimple call_stmt; ++ const char *fnname; ++ ++ // is it a call ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fndecl = gimple_call_fndecl(call_stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (TREE_CODE(fndecl) != FUNCTION_DECL) ++ continue; ++ ++ // is it a call to k*alloc ++ fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl)); ++ if (!is_kalloc(fnname)) ++ continue; ++ ++ // is the size arg the result of a simple const assignment ++ size = gimple_call_arg(call_stmt, 0); ++ while (true) { ++ gimple def_stmt; ++ expanded_location xloc; ++ size_t size_val; ++ ++ if (TREE_CODE(size) != SSA_NAME) ++ break; ++ def_stmt = SSA_NAME_DEF_STMT(size); ++ if (!def_stmt || !is_gimple_assign(def_stmt)) ++ break; ++ if (gimple_num_ops(def_stmt) != 2) ++ break; ++ size = gimple_assign_rhs1(def_stmt); ++ if (!TREE_CONSTANT(size)) ++ continue; ++ xloc = expand_location(gimple_location(def_stmt)); ++ if (!xloc.file) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ size_val = TREE_INT_CST_LOW(size); ++ fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line); ++ break; ++ } ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_node(stderr, "pax", fndecl, 4); ++ } ++ } ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info kallocstat_pass_info = { ++ .pass = &kallocstat_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info); ++ ++ return 0; ++} +--- tools/gcc/kernexec_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/kernexec_plugin.c 2012-10-15 17:30:59.831924531 +0000 +@@ -0,0 +1,427 @@ ++/* ++ * Copyright 2011 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386 ++ * ++ * TODO: ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++extern rtx emit_move_insn(rtx x, rtx y); ++ ++int plugin_is_GPL_compatible; ++ ++static struct plugin_info kernexec_plugin_info = { ++ .version = "201111291120", ++ .help = "method=[bts|or]\tinstrumentation method\n" ++}; ++ ++static unsigned int execute_kernexec_reload(void); ++static unsigned int execute_kernexec_fptr(void); ++static unsigned int execute_kernexec_retaddr(void); ++static bool kernexec_cmodel_check(void); ++ ++static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *); ++static void (*kernexec_instrument_retaddr)(rtx); ++ ++static struct gimple_opt_pass kernexec_reload_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_reload", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_reload, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct gimple_opt_pass kernexec_fptr_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "kernexec_fptr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_fptr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi ++ } ++}; ++ ++static struct rtl_opt_pass kernexec_retaddr_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "kernexec_retaddr", ++ .gate = kernexec_cmodel_check, ++ .execute = execute_kernexec_retaddr, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func | TODO_ggc_collect ++ } ++}; ++ ++static bool kernexec_cmodel_check(void) ++{ ++ tree section; ++ ++ if (ix86_cmodel != CM_KERNEL) ++ return false; ++ ++ section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); ++ if (!section || !TREE_VALUE(section)) ++ return true; ++ ++ section = TREE_VALUE(TREE_VALUE(section)); ++ if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10)) ++ return true; ++ ++ return false; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered ++ */ ++static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_movabs_stmt; ++ ++ // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : ); ++ asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL); ++ gimple_asm_set_volatile(asm_movabs_stmt, true); ++ gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(asm_movabs_stmt); ++} ++ ++/* ++ * find all asm() stmts that clobber r10 and add a reload of r10 ++ */ ++static unsigned int execute_kernexec_reload(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: __asm__ ("" : : : "r10"); ++ gimple asm_stmt; ++ size_t nclobbers; ++ ++ // is it an asm ... ++ asm_stmt = gsi_stmt(gsi); ++ if (gimple_code(asm_stmt) != GIMPLE_ASM) ++ continue; ++ ++ // ... clobbering r10 ++ nclobbers = gimple_asm_nclobbers(asm_stmt); ++ while (nclobbers--) { ++ tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); ++ if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10")) ++ continue; ++ kernexec_reload_fptr_mask(&gsi); ++//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO); ++ break; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce ++ * a non-canonical address from a userland ptr and will just trigger a GPF on dereference ++ */ ++static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) ++{ ++ gimple assign_intptr, assign_new_fptr, call_stmt; ++ tree intptr, old_fptr, new_fptr, kernexec_mask; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary unsigned long variable used for bitops and cast fptr to it ++ intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); ++ add_referenced_var(intptr); ++ mark_sym_for_renaming(intptr); ++ assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // apply logical or to temporary unsigned long and bitmask ++ kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); ++// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL); ++ assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask)); ++ gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); ++ update_stmt(assign_intptr); ++ ++ // cast temporary unsigned long back to a temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); ++ gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); ++ update_stmt(assign_new_fptr); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) ++{ ++ gimple asm_or_stmt, call_stmt; ++ tree old_fptr, new_fptr, input, output; ++ VEC(tree, gc) *inputs = NULL; ++ VEC(tree, gc) *outputs = NULL; ++ ++ call_stmt = gsi_stmt(*gsi); ++ old_fptr = gimple_call_fn(call_stmt); ++ ++ // create temporary fptr variable ++ new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); ++ add_referenced_var(new_fptr); ++ mark_sym_for_renaming(new_fptr); ++ ++ // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); ++ input = build_tree_list(NULL_TREE, build_string(2, "0")); ++ input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); ++ output = build_tree_list(NULL_TREE, build_string(3, "=r")); ++ output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); ++ VEC_safe_push(tree, gc, inputs, input); ++ VEC_safe_push(tree, gc, outputs, output); ++ asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL); ++ gimple_asm_set_volatile(asm_or_stmt, true); ++ gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); ++ update_stmt(asm_or_stmt); ++ ++ // replace call stmt fn with the new fptr ++ gimple_call_set_fn(call_stmt, new_fptr); ++ update_stmt(call_stmt); ++} ++ ++/* ++ * find all C level function pointer dereferences and forcibly set the highest bit of the pointer ++ */ ++static unsigned int execute_kernexec_fptr(void) ++{ ++ basic_block bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); ++ tree fn; ++ gimple call_stmt; ++ ++ // is it a call ... ++ call_stmt = gsi_stmt(gsi); ++ if (!is_gimple_call(call_stmt)) ++ continue; ++ fn = gimple_call_fn(call_stmt); ++ if (TREE_CODE(fn) == ADDR_EXPR) ++ continue; ++ if (TREE_CODE(fn) != SSA_NAME) ++ gcc_unreachable(); ++ ++ // ... through a function pointer ++ fn = SSA_NAME_VAR(fn); ++ if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != POINTER_TYPE) ++ continue; ++ fn = TREE_TYPE(fn); ++ if (TREE_CODE(fn) != FUNCTION_TYPE) ++ continue; ++ ++ kernexec_instrument_fptr(&gsi); ++ ++//debug_tree(gimple_call_fn(call_stmt)); ++//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); ++ } ++ } ++ ++ return 0; ++} ++ ++// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn ++static void kernexec_instrument_retaddr_bts(rtx insn) ++{ ++ rtx btsq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("btsq $63,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(btsq) = 1; ++// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(btsq, insn); ++} ++ ++// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn ++static void kernexec_instrument_retaddr_or(rtx insn) ++{ ++ rtx orq; ++ rtvec argvec, constraintvec, labelvec; ++ int line; ++ ++ // create asm volatile("orq %%r10,(%%rsp)":::) ++ argvec = rtvec_alloc(0); ++ constraintvec = rtvec_alloc(0); ++ labelvec = rtvec_alloc(0); ++ line = expand_location(RTL_LOCATION(insn)).line; ++ orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line); ++ MEM_VOLATILE_P(orq) = 1; ++// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS ++ emit_insn_before(orq, insn); ++} ++ ++/* ++ * find all asm level function returns and forcibly set the highest bit of the return address ++ */ ++static unsigned int execute_kernexec_retaddr(void) ++{ ++ rtx insn; ++ ++ // 1. find function returns ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) ++ // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) ++ rtx body; ++ ++ // is it a retn ++ if (!JUMP_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) == PARALLEL) ++ body = XVECEXP(body, 0, 0); ++ if (GET_CODE(body) != RETURN) ++ continue; ++ kernexec_instrument_retaddr(insn); ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info kernexec_reload_pass_info = { ++ .pass = &kernexec_reload_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_fptr_pass_info = { ++ .pass = &kernexec_fptr_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ struct register_pass_info kernexec_retaddr_pass_info = { ++ .pass = &kernexec_retaddr_pass.pass, ++ .reference_pass_name = "pro_and_epilogue", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info); ++ ++ if (TARGET_64BIT == 0) ++ return 0; ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "method")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ if (!strcmp(argv[i].value, "bts")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_bts; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; ++ } else if (!strcmp(argv[i].value, "or")) { ++ kernexec_instrument_fptr = kernexec_instrument_fptr_or; ++ kernexec_instrument_retaddr = kernexec_instrument_retaddr_or; ++ fix_register("r10", 1, 1); ++ } else ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) ++ error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); ++ ++ if (kernexec_instrument_fptr == kernexec_instrument_fptr_or) ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); ++ ++ return 0; ++} +--- tools/gcc/latent_entropy_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/latent_entropy_plugin.c 2012-10-15 17:30:59.831924531 +0000 +@@ -0,0 +1,295 @@ ++/* ++ * Copyright 2012 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help generate a little bit of entropy from program state, ++ * used during boot in the kernel ++ * ++ * TODO: ++ * - add ipa pass to identify not explicitly marked candidate functions ++ * - mix in more program state (function arguments/return values, loop variables, etc) ++ * - more instrumentation control via attribute parameters ++ * ++ * BUGS: ++ * - LTO needs -flto-partition=none for now ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++#include "tree-flow.h" ++ ++int plugin_is_GPL_compatible; ++ ++static tree latent_entropy_decl; ++ ++static struct plugin_info latent_entropy_plugin_info = { ++ .version = "201207271820", ++ .help = NULL ++}; ++ ++static unsigned int execute_latent_entropy(void); ++static bool gate_latent_entropy(void); ++ ++static struct gimple_opt_pass latent_entropy_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "latent_entropy", ++ .gate = gate_latent_entropy, ++ .execute = execute_latent_entropy, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa ++ } ++}; ++ ++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs) ++{ ++ if (TREE_CODE(*node) != FUNCTION_DECL) { ++ *no_add_attrs = true; ++ error("%qE attribute only applies to functions", name); ++ } ++ return NULL_TREE; ++} ++ ++static struct attribute_spec latent_entropy_attr = { ++ .name = "latent_entropy", ++ .min_length = 0, ++ .max_length = 0, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_latent_entropy_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static void register_attributes(void *event_data, void *data) ++{ ++ register_attribute(&latent_entropy_attr); ++} ++ ++static bool gate_latent_entropy(void) ++{ ++ tree latent_entropy_attr; ++ ++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl)); ++ return latent_entropy_attr != NULL_TREE; ++} ++ ++static unsigned HOST_WIDE_INT seed; ++static unsigned HOST_WIDE_INT get_random_const(void) ++{ ++ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL); ++ return seed; ++} ++ ++static enum tree_code get_op(tree *rhs) ++{ ++ static enum tree_code op; ++ unsigned HOST_WIDE_INT random_const; ++ ++ random_const = get_random_const(); ++ ++ switch (op) { ++ case BIT_XOR_EXPR: ++ op = PLUS_EXPR; ++ break; ++ ++ case PLUS_EXPR: ++ if (rhs) { ++ op = LROTATE_EXPR; ++ random_const &= HOST_BITS_PER_WIDE_INT - 1; ++ break; ++ } ++ ++ case LROTATE_EXPR: ++ default: ++ op = BIT_XOR_EXPR; ++ break; ++ } ++ if (rhs) ++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const); ++ return op; ++} ++ ++static void perturb_local_entropy(basic_block bb, tree local_entropy) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ tree addxorrol, rhs; ++ enum tree_code op; ++ ++ op = get_op(&rhs); ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs); ++ assign = gimple_build_assign(local_entropy, addxorrol); ++ find_referenced_vars_in(assign); ++//debug_bb(bb); ++ gsi = gsi_after_labels(bb); ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++} ++ ++static void perturb_latent_entropy(basic_block bb, tree rhs) ++{ ++ gimple_stmt_iterator gsi; ++ gimple assign; ++ tree addxorrol, temp; ++ ++ // 1. create temporary copy of latent_entropy ++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy"); ++ add_referenced_var(temp); ++ mark_sym_for_renaming(temp); ++ ++ // 2. read... ++ assign = gimple_build_assign(temp, latent_entropy_decl); ++ find_referenced_vars_in(assign); ++ gsi = gsi_after_labels(bb); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ ++ // 3. ...modify... ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs); ++ assign = gimple_build_assign(temp, addxorrol); ++ find_referenced_vars_in(assign); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ ++ // 4. ...write latent_entropy ++ assign = gimple_build_assign(latent_entropy_decl, temp); ++ find_referenced_vars_in(assign); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++} ++ ++static unsigned int execute_latent_entropy(void) ++{ ++ basic_block bb; ++ gimple assign; ++ gimple_stmt_iterator gsi; ++ tree local_entropy; ++ ++ if (!latent_entropy_decl) { ++ struct varpool_node *node; ++ ++ for (node = varpool_nodes; node; node = node->next) { ++ tree var = node->decl; ++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy")) ++ continue; ++ latent_entropy_decl = var; ++// debug_tree(var); ++ break; ++ } ++ if (!latent_entropy_decl) { ++// debug_tree(current_function_decl); ++ return 0; ++ } ++ } ++ ++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl))); ++ ++ // 1. create local entropy variable ++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy"); ++ add_referenced_var(local_entropy); ++ mark_sym_for_renaming(local_entropy); ++ ++ // 2. initialize local entropy variable ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); ++ gsi = gsi_start_bb(bb); ++ ++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const())); ++// gimple_set_location(assign, loc); ++ find_referenced_vars_in(assign); ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ bb = bb->next_bb; ++ ++ // 3. instrument each BB with an operation on the local entropy variable ++ while (bb != EXIT_BLOCK_PTR) { ++ perturb_local_entropy(bb, local_entropy); ++ bb = bb->next_bb; ++ }; ++ ++ // 4. mix local entropy into the global entropy variable ++ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy); ++ return 0; ++} ++ ++static void start_unit_callback(void *gcc_data, void *user_data) ++{ ++#if BUILDING_GCC_VERSION >= 4007 ++ seed = get_random_seed(false); ++#else ++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed); ++ seed *= seed; ++#endif ++ ++ if (in_lto_p) ++ return; ++ ++ // extern u64 latent_entropy ++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node); ++ ++ TREE_STATIC(latent_entropy_decl) = 1; ++ TREE_PUBLIC(latent_entropy_decl) = 1; ++ TREE_USED(latent_entropy_decl) = 1; ++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1; ++ DECL_EXTERNAL(latent_entropy_decl) = 1; ++ DECL_ARTIFICIAL(latent_entropy_decl) = 0; ++ DECL_INITIAL(latent_entropy_decl) = NULL; ++// DECL_ASSEMBLER_NAME(latent_entropy_decl); ++// varpool_finalize_decl(latent_entropy_decl); ++// varpool_mark_needed_node(latent_entropy_decl); ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ struct register_pass_info latent_entropy_pass_info = { ++ .pass = &latent_entropy_pass.pass, ++ .reference_pass_name = "optimized", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info); ++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info); ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +--- tools/gcc/size_overflow_hash.data 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/size_overflow_hash.data 2012-10-15 17:30:59.834924531 +0000 +@@ -0,0 +1,3597 @@ ++_000001_hash alloc_dr 2 65495 _000001_hash NULL ++_000002_hash __copy_from_user 3 10918 _000002_hash NULL ++_000003_hash copy_from_user 3 17559 _000003_hash NULL ++_000004_hash __copy_from_user_inatomic 3 4365 _000004_hash NULL ++_000005_hash __copy_from_user_nocache 3 39351 _000005_hash NULL ++_000006_hash __copy_to_user_inatomic 3 19214 _000006_hash NULL ++_000007_hash do_xip_mapping_read 5 60297 _000007_hash NULL ++_000008_hash hugetlbfs_read 3 11268 _000008_hash NULL ++_000009_hash kmalloc 1 60432 _003302_hash NULL nohasharray ++_000010_hash kmalloc_array 1-2 9444 _000010_hash NULL ++_000012_hash __kmalloc_reserve 1 17080 _000012_hash NULL ++_000013_hash kmalloc_slab 1 11917 _000013_hash NULL ++_000014_hash kmemdup 2 64015 _000014_hash NULL ++_000015_hash __krealloc 2 14857 _000340_hash NULL nohasharray ++_000016_hash memdup_user 2 59590 _000016_hash NULL ++_000017_hash module_alloc 1 63630 _000017_hash NULL ++_000018_hash read_default_ldt 2 14302 _000018_hash NULL ++_000019_hash read_kcore 3 63488 _000019_hash NULL ++_000020_hash read_ldt 2 47570 _000020_hash NULL ++_000021_hash read_zero 3 19366 _000021_hash NULL ++_000022_hash __vmalloc_node 1 39308 _000022_hash NULL ++_000023_hash aac_convert_sgraw2 4 51598 _000023_hash NULL ++_000024_hash aa_simple_write_to_buffer 4-3 49683 _000024_hash NULL ++_000025_hash ablkcipher_copy_iv 3 64140 _000025_hash NULL ++_000026_hash ablkcipher_next_slow 3-4 47274 _000026_hash NULL ++_000028_hash acpi_battery_write_alarm 3 1240 _000028_hash NULL ++_000029_hash acpi_os_allocate 1 14892 _000029_hash NULL ++_000030_hash acpi_system_write_wakeup_device 3 34853 _000030_hash NULL ++_000031_hash adu_write 3 30487 _000031_hash NULL ++_000032_hash aer_inject_write 3 52399 _000032_hash NULL ++_000033_hash afs_alloc_flat_call 2-3 36399 _000033_hash NULL ++_000035_hash afs_proc_cells_write 3 61139 _000035_hash NULL ++_000036_hash afs_proc_rootcell_write 3 15822 _000036_hash NULL ++_000037_hash agp_3_5_isochronous_node_enable 3 49465 _000037_hash NULL ++_000038_hash agp_alloc_page_array 1 22554 _000038_hash NULL ++_000039_hash ah_alloc_tmp 2-3 54378 _000039_hash NULL ++_000041_hash ahash_setkey_unaligned 3 33521 _000041_hash NULL ++_000042_hash alg_setkey 3 31485 _000042_hash NULL ++_000043_hash aligned_kmalloc 1 3628 _000043_hash NULL ++_000044_hash alloc_context 1 3194 _000044_hash NULL ++_000045_hash alloc_ep_req 2 54860 _000045_hash NULL ++_000046_hash alloc_fdmem 1 27083 _000046_hash NULL ++_000047_hash alloc_flex_gd 1 57259 _000047_hash NULL ++_000048_hash alloc_sglist 1-3-2 22960 _000048_hash NULL ++_000049_hash __alloc_skb 1 23940 _000049_hash NULL ++_000050_hash aoedev_flush 2 44398 _000050_hash NULL ++_000051_hash append_to_buffer 3 63550 _000051_hash NULL ++_000052_hash asix_read_cmd 5 13245 _000052_hash NULL ++_000053_hash asix_write_cmd 5 58192 _000053_hash NULL ++_000054_hash at76_set_card_command 4 4471 _000054_hash NULL ++_000055_hash ath6kl_add_bss_if_needed 6 24317 _000055_hash NULL ++_000056_hash ath6kl_debug_roam_tbl_event 3 5224 _000056_hash NULL ++_000057_hash ath6kl_mgmt_powersave_ap 6 13791 _000057_hash NULL ++_000058_hash ath6kl_send_go_probe_resp 3 21113 _000058_hash NULL ++_000059_hash ath6kl_set_ap_probe_resp_ies 3 50539 _000059_hash NULL ++_000060_hash ath6kl_set_assoc_req_ies 3 43185 _000060_hash NULL ++_000061_hash ath6kl_wmi_bssinfo_event_rx 3 2275 _000061_hash NULL ++_000062_hash ath6kl_wmi_send_action_cmd 7 58860 _000062_hash NULL ++_000063_hash __ath6kl_wmi_send_mgmt_cmd 7 38971 _000063_hash NULL ++_000064_hash attach_hdlc_protocol 3 19986 _000064_hash NULL ++_000065_hash audio_write 4 54261 _001782_hash NULL nohasharray ++_000066_hash audit_unpack_string 3 13748 _000066_hash NULL ++_000067_hash ax25_setsockopt 5 42740 _000067_hash NULL ++_000068_hash b43_debugfs_write 3 34838 _000068_hash NULL ++_000069_hash b43legacy_debugfs_write 3 28556 _000069_hash NULL ++_000070_hash batadv_hash_new 1 40491 _000070_hash NULL ++_000071_hash batadv_orig_node_add_if 2 18433 _000071_hash NULL ++_000072_hash batadv_orig_node_del_if 2 4 _000072_hash NULL ++_000073_hash batadv_tt_changes_fill_buff 4 40323 _000073_hash NULL ++_000074_hash batadv_tt_realloc_packet_buff 4 49960 _000074_hash NULL ++_000075_hash bch_alloc 1 4593 _000075_hash NULL ++_000076_hash befs_nls2utf 3 17163 _000076_hash NULL ++_000077_hash befs_utf2nls 3 25628 _000077_hash NULL ++_000078_hash bfad_debugfs_write_regrd 3 15218 _000078_hash NULL ++_000079_hash bfad_debugfs_write_regwr 3 61841 _000079_hash NULL ++_000080_hash bio_alloc_map_data 1-2 50782 _000080_hash NULL ++_000082_hash bio_kmalloc 2 54672 _000082_hash NULL ++_000083_hash bitmap_storage_alloc 2 55077 _000083_hash NULL ++_000084_hash blkcipher_copy_iv 3 24075 _000084_hash NULL ++_000085_hash blkcipher_next_slow 3-4 52733 _000085_hash NULL ++_000087_hash bl_pipe_downcall 3 34264 _000087_hash NULL ++_000088_hash bnad_debugfs_write_regrd 3 6706 _000088_hash NULL ++_000089_hash bnad_debugfs_write_regwr 3 57500 _000089_hash NULL ++_000090_hash bnx2fc_cmd_mgr_alloc 2-3 24873 _000090_hash NULL ++_000092_hash bnx2fc_process_unsol_compl 2 15576 _000092_hash NULL ++_000093_hash bnx2_nvram_write 2-4 7790 _000093_hash NULL ++_000095_hash btmrvl_gpiogap_write 3 35053 _000095_hash NULL ++_000096_hash btmrvl_hscfgcmd_write 3 27143 _000096_hash NULL ++_000097_hash btmrvl_hscmd_write 3 27089 _000097_hash NULL ++_000098_hash btmrvl_hsmode_write 3 42252 _000098_hash NULL ++_000099_hash btmrvl_pscmd_write 3 29504 _000099_hash NULL ++_000100_hash btmrvl_psmode_write 3 3703 _000100_hash NULL ++_000101_hash btrfs_alloc_delayed_item 1 11678 _000101_hash NULL ++_000102_hash c4iw_id_table_alloc 3 48163 _000102_hash NULL ++_000103_hash cache_do_downcall 3 6926 _000103_hash NULL ++_000104_hash cachefiles_cook_key 2 33274 _000104_hash NULL ++_000105_hash cachefiles_daemon_write 3 43535 _000105_hash NULL ++_000106_hash capi_write 3 35104 _003607_hash NULL nohasharray ++_000107_hash carl9170_debugfs_write 3 50857 _000107_hash NULL ++_000108_hash cciss_allocate_sg_chain_blocks 2-3 5368 _000108_hash NULL ++_000110_hash cciss_proc_write 3 10259 _000110_hash NULL ++_000111_hash cdrom_read_cdda_old 4 27664 _000111_hash NULL ++_000112_hash ceph_alloc_page_vector 1 18710 _000112_hash NULL ++_000113_hash ceph_buffer_new 1 35974 _000113_hash NULL ++_000114_hash ceph_copy_user_to_page_vector 3-4 656 _000114_hash NULL ++_000116_hash ceph_get_direct_page_vector 2 41917 _000116_hash NULL ++_000117_hash ceph_msg_new 2 5846 _000117_hash NULL ++_000118_hash ceph_setxattr 4 18913 _000118_hash NULL ++_000119_hash cfi_read_pri 3 24366 _000119_hash NULL ++_000120_hash cgroup_write_string 5 10900 _000120_hash NULL ++_000121_hash cgroup_write_X64 5 54514 _000121_hash NULL ++_000122_hash change_xattr 5 61390 _000122_hash NULL ++_000123_hash check_load_and_stores 2 2143 _000123_hash NULL ++_000124_hash cifs_idmap_key_instantiate 3 54503 _000124_hash NULL ++_000125_hash cifs_security_flags_proc_write 3 5484 _000125_hash NULL ++_000126_hash cifs_setxattr 4 23957 _000126_hash NULL ++_000127_hash cifs_spnego_key_instantiate 3 23588 _000127_hash NULL ++_000128_hash cld_pipe_downcall 3 15058 _000128_hash NULL ++_000129_hash clear_refs_write 3 61904 _000129_hash NULL ++_000130_hash clusterip_proc_write 3 44729 _000130_hash NULL ++_000131_hash cm4040_write 3 58079 _000131_hash NULL ++_000132_hash cm_copy_private_data 2 3649 _000132_hash NULL ++_000133_hash cmm_write 3 2896 _000133_hash NULL ++_000134_hash cm_write 3 36858 _000134_hash NULL ++_000135_hash coda_psdev_write 3 1711 _000135_hash NULL ++_000136_hash codec_reg_read_file 3 36280 _000136_hash NULL ++_000137_hash command_file_write 3 31318 _000137_hash NULL ++_000138_hash command_write 3 58841 _000138_hash NULL ++_000139_hash comm_write 3 44537 _001714_hash NULL nohasharray ++_000140_hash concat_writev 3 21451 _000140_hash NULL ++_000141_hash copy_and_check 3 19089 _000141_hash NULL ++_000142_hash copy_from_user_toio 3 31966 _000142_hash NULL ++_000143_hash copy_items 6 50140 _000143_hash NULL ++_000144_hash copy_macs 4 45534 _000144_hash NULL ++_000145_hash __copy_to_user 3 17551 _000145_hash NULL ++_000146_hash copy_vm86_regs_from_user 3 45340 _000146_hash NULL ++_000147_hash core_sys_select 1 47494 _000147_hash NULL ++_000148_hash cosa_write 3 1774 _000148_hash NULL ++_000149_hash cp210x_set_config 4 46447 _000149_hash NULL ++_000150_hash create_entry 2 33479 _000150_hash NULL ++_000151_hash create_queues 2-3 9088 _000151_hash NULL ++_000153_hash create_xattr 5 54106 _000153_hash NULL ++_000154_hash create_xattr_datum 5 33356 _003443_hash NULL nohasharray ++_000155_hash csum_partial_copy_fromiovecend 3-4 9957 _000155_hash NULL ++_000157_hash ctrl_out 3-5 8712 _000157_hash NULL ++_000159_hash cxacru_cm_get_array 4 4412 _000159_hash NULL ++_000160_hash cxgbi_alloc_big_mem 1 4707 _000160_hash NULL ++_000161_hash dac960_user_command_proc_write 3 3071 _000161_hash NULL ++_000162_hash datablob_format 2 39571 _002490_hash NULL nohasharray ++_000163_hash dccp_feat_clone_sp_val 3 11942 _000163_hash NULL ++_000164_hash dccp_setsockopt_ccid 4 30701 _000164_hash NULL ++_000165_hash dccp_setsockopt_cscov 2 37766 _000165_hash NULL ++_000166_hash dccp_setsockopt_service 4 65336 _000166_hash NULL ++_000167_hash ddebug_proc_write 3 18055 _000167_hash NULL ++_000168_hash dev_config 3 8506 _000168_hash NULL ++_000169_hash device_write 3 45156 _000169_hash NULL ++_000170_hash devm_kzalloc 2 4966 _000170_hash NULL ++_000171_hash devres_alloc 2 551 _000171_hash NULL ++_000172_hash dfs_file_write 3 41196 _000172_hash NULL ++_000173_hash direct_entry 3 38836 _000173_hash NULL ++_000174_hash dispatch_ioctl 2 32357 _000174_hash NULL ++_000175_hash dispatch_proc_write 3 44320 _000175_hash NULL ++_000176_hash diva_os_copy_from_user 4 7792 _000176_hash NULL ++_000177_hash dlm_alloc_pagevec 1 54296 _000177_hash NULL ++_000178_hash dlmfs_file_read 3 28385 _000178_hash NULL ++_000179_hash dlmfs_file_write 3 6892 _000179_hash NULL ++_000180_hash dm_read 3 15674 _000180_hash NULL ++_000181_hash dm_write 3 2513 _000181_hash NULL ++_000182_hash __dn_setsockopt 5 13060 _000182_hash NULL ++_000183_hash dns_query 3 9676 _000183_hash NULL ++_000184_hash dns_resolver_instantiate 3 63314 _000184_hash NULL ++_000185_hash do_add_counters 3 3992 _000185_hash NULL ++_000186_hash __do_config_autodelink 3 58763 _000186_hash NULL ++_000187_hash do_ip_setsockopt 5 41852 _000187_hash NULL ++_000188_hash do_ipv6_setsockopt 5 18215 _000188_hash NULL ++_000189_hash do_ip_vs_set_ctl 4 48641 _000189_hash NULL ++_000190_hash do_kimage_alloc 3 64827 _000190_hash NULL ++_000191_hash do_register_entry 4 29478 _000191_hash NULL ++_000192_hash do_tty_write 5 44896 _000192_hash NULL ++_000193_hash do_update_counters 4 2259 _000193_hash NULL ++_000194_hash dsp_write 2 46218 _000194_hash NULL ++_000195_hash dup_to_netobj 3 26363 _000195_hash NULL ++_000196_hash dwc3_link_state_write 3 12641 _000196_hash NULL ++_000197_hash dwc3_mode_write 3 51997 _000197_hash NULL ++_000198_hash dwc3_testmode_write 3 30516 _000198_hash NULL ++_000199_hash ecryptfs_copy_filename 4 11868 _000199_hash NULL ++_000200_hash ecryptfs_miscdev_write 3 26847 _000200_hash NULL ++_000201_hash ecryptfs_send_miscdev 2 64816 _000201_hash NULL ++_000202_hash efx_tsoh_heap_alloc 2 58545 _000202_hash NULL ++_000203_hash emi26_writememory 4 57908 _000203_hash NULL ++_000204_hash emi62_writememory 4 29731 _000204_hash NULL ++_000205_hash encrypted_instantiate 3 3168 _000205_hash NULL ++_000206_hash encrypted_update 3 13414 _000206_hash NULL ++_000207_hash ep0_write 3 14536 _001422_hash NULL nohasharray ++_000208_hash ep_read 3 58813 _000208_hash NULL ++_000209_hash ep_write 3 59008 _000209_hash NULL ++_000210_hash erst_dbg_write 3 46715 _000210_hash NULL ++_000211_hash esp_alloc_tmp 2-3 40558 _000211_hash NULL ++_000213_hash evdev_do_ioctl 2 24459 _000213_hash NULL ++_000214_hash exofs_read_lookup_dev_table 3 17733 _000214_hash NULL ++_000215_hash ext4_kvmalloc 1 14796 _000215_hash NULL ++_000216_hash ezusb_writememory 4 45976 _000216_hash NULL ++_000217_hash fanotify_write 3 64623 _000217_hash NULL ++_000218_hash fd_copyin 3 56247 _000218_hash NULL ++_000219_hash ffs_epfile_io 3 64886 _000219_hash NULL ++_000220_hash ffs_prepare_buffer 2 59892 _000220_hash NULL ++_000221_hash f_hidg_write 3 7932 _000221_hash NULL ++_000222_hash file_read_actor 4 1401 _000222_hash NULL ++_000223_hash fill_write_buffer 3 3142 _000223_hash NULL ++_000224_hash __find_xattr 6 2117 _002425_hash NULL nohasharray ++_000225_hash fl_create 5 56435 _000225_hash NULL ++_000226_hash fs_path_ensure_buf 2 59445 _000226_hash NULL ++_000227_hash ftdi_elan_write 3 57309 _000227_hash NULL ++_000228_hash fw_iso_buffer_alloc 2 13704 _000228_hash NULL ++_000229_hash garmin_write_bulk 3 58191 _000229_hash NULL ++_000230_hash garp_attr_create 3 3883 _000230_hash NULL ++_000231_hash get_arg 3 5694 _000231_hash NULL ++_000232_hash getdqbuf 1 62908 _000232_hash NULL ++_000233_hash get_fdb_entries 3 41916 _000233_hash NULL ++_000234_hash get_fd_set 1 3866 _000234_hash NULL ++_000235_hash get_indirect_ea 4 51869 _000235_hash NULL ++_000236_hash get_registers 3 26187 _000236_hash NULL ++_000237_hash get_scq 2 10897 _000237_hash NULL ++_000238_hash get_server_iovec 2 16804 _000238_hash NULL ++_000239_hash get_ucode_user 3 38202 _000239_hash NULL ++_000240_hash get_user_cpu_mask 2 14861 _000240_hash NULL ++_000241_hash gfs2_alloc_sort_buffer 1 18275 _000241_hash NULL ++_000242_hash gfs2_glock_nq_m 1 20347 _000242_hash NULL ++_000243_hash gigaset_initcs 2 43753 _000243_hash NULL ++_000244_hash gigaset_initdriver 2 1060 _000244_hash NULL ++_000245_hash groups_alloc 1 7614 _000245_hash NULL ++_000246_hash gs_alloc_req 2 58883 _000246_hash NULL ++_000247_hash gs_buf_alloc 2 25067 _000247_hash NULL ++_000248_hash gsm_data_alloc 3 42437 _000248_hash NULL ++_000249_hash gss_pipe_downcall 3 23182 _000249_hash NULL ++_000250_hash handle_request 9 10024 _000250_hash NULL ++_000251_hash hashtab_create 3 33769 _000251_hash NULL ++_000252_hash hcd_buffer_alloc 2 27495 _000252_hash NULL ++_000253_hash hci_sock_setsockopt 5 28993 _000253_hash NULL ++_000254_hash heap_init 2 49617 _000254_hash NULL ++_000255_hash hest_ghes_dev_register 1 46766 _000255_hash NULL ++_000256_hash hidg_alloc_ep_req 2 10159 _000256_hash NULL ++_000257_hash hid_parse_report 3 51737 _000257_hash NULL ++_000258_hash hidraw_get_report 3 45609 _000258_hash NULL ++_000259_hash hidraw_report_event 3 20503 _000259_hash NULL ++_000260_hash hidraw_send_report 3 23449 _000260_hash NULL ++_000261_hash hpfs_translate_name 3 41497 _000261_hash NULL ++_000262_hash hysdn_conf_write 3 52145 _000262_hash NULL ++_000263_hash __i2400mu_send_barker 3 23652 _000263_hash NULL ++_000264_hash i2cdev_read 3 1206 _000264_hash NULL ++_000265_hash i2cdev_write 3 23310 _000265_hash NULL ++_000266_hash i2o_parm_field_get 5 34477 _000266_hash NULL ++_000267_hash i2o_parm_table_get 6 61635 _000267_hash NULL ++_000268_hash ib_copy_from_udata 3 59502 _000268_hash NULL ++_000269_hash ib_ucm_alloc_data 3 36885 _000269_hash NULL ++_000270_hash ib_umad_write 3 47993 _000270_hash NULL ++_000271_hash ib_uverbs_unmarshall_recv 5 12251 _000271_hash NULL ++_000272_hash icn_writecmd 2 38629 _000272_hash NULL ++_000273_hash ide_driver_proc_write 3 32493 _000273_hash NULL ++_000274_hash ide_settings_proc_write 3 35110 _000274_hash NULL ++_000275_hash idetape_chrdev_write 3 53976 _000275_hash NULL ++_000276_hash idmap_pipe_downcall 3 14591 _000276_hash NULL ++_000277_hash ieee80211_build_probe_req 7-5 27660 _000277_hash NULL ++_000278_hash ieee80211_if_write 3 34894 _000278_hash NULL ++_000279_hash if_write 3 51756 _000279_hash NULL ++_000280_hash ilo_write 3 64378 _000280_hash NULL ++_000281_hash ima_write_policy 3 40548 _000281_hash NULL ++_000282_hash init_data_container 1 60709 _000282_hash NULL ++_000283_hash init_send_hfcd 1 34586 _000283_hash NULL ++_000284_hash insert_dent 7 65034 _000284_hash NULL ++_000285_hash interpret_user_input 2 19393 _000285_hash NULL ++_000286_hash int_proc_write 3 39542 _000286_hash NULL ++_000287_hash ioctl_private_iw_point 7 1273 _000287_hash NULL ++_000288_hash iov_iter_copy_from_user 4 31942 _000288_hash NULL ++_000289_hash iov_iter_copy_from_user_atomic 4 56368 _000289_hash NULL ++_000290_hash iowarrior_write 3 18604 _000290_hash NULL ++_000291_hash ipc_alloc 1 1192 _000291_hash NULL ++_000292_hash ipc_rcu_alloc 1 21208 _000292_hash NULL ++_000293_hash ip_options_get_from_user 4 64958 _000293_hash NULL ++_000294_hash ipv6_renew_option 3 38813 _000294_hash NULL ++_000295_hash ip_vs_conn_fill_param_sync 6 29771 _001898_hash NULL nohasharray ++_000296_hash ip_vs_create_timeout_table 2 64478 _000296_hash NULL ++_000297_hash ipw_queue_tx_init 3 49161 _000297_hash NULL ++_000298_hash irda_setsockopt 5 19824 _000298_hash NULL ++_000299_hash irias_new_octseq_value 2 13596 _003821_hash NULL nohasharray ++_000300_hash irnet_ctrl_write 3 24139 _000300_hash NULL ++_000301_hash isdn_add_channels 3 40905 _000301_hash NULL ++_000302_hash isdn_ppp_fill_rq 2 41428 _000302_hash NULL ++_000303_hash isdn_ppp_write 4 29109 _000303_hash NULL ++_000304_hash isdn_read 3 50021 _000304_hash NULL ++_000305_hash isdn_v110_open 3 2418 _000305_hash NULL ++_000306_hash isdn_writebuf_stub 4 52383 _000306_hash NULL ++_000307_hash islpci_mgt_transmit 5 34133 _000307_hash NULL ++_000308_hash iso_callback 3 43208 _000308_hash NULL ++_000309_hash iso_packets_buffer_init 3-4 29061 _000309_hash NULL ++_000310_hash it821x_firmware_command 3 8628 _000310_hash NULL ++_000311_hash iwch_alloc_fastreg_pbl 2 40153 _000311_hash NULL ++_000312_hash iwl_calib_set 3 34400 _003754_hash NULL nohasharray ++_000313_hash jbd2_journal_init_revoke_table 1 36336 _000313_hash NULL ++_000314_hash jffs2_alloc_full_dirent 1 60179 _001158_hash NULL nohasharray ++_000315_hash journal_init_revoke_table 1 56331 _000315_hash NULL ++_000316_hash kcalloc 1-2 27770 _000316_hash NULL ++_000318_hash keyctl_instantiate_key_common 4 47889 _000318_hash NULL ++_000319_hash keyctl_update_key 3 26061 _000319_hash NULL ++_000320_hash __kfifo_alloc 2-3 22173 _000320_hash NULL ++_000322_hash kfifo_copy_from_user 3 5091 _000322_hash NULL ++_000323_hash kmalloc_node 1 50163 _003818_hash NULL nohasharray ++_000324_hash kmalloc_parameter 1 65279 _000324_hash NULL ++_000325_hash kmem_alloc 1 31920 _000325_hash NULL ++_000326_hash kobj_map 2-3 9566 _000326_hash NULL ++_000328_hash kone_receive 4 4690 _000328_hash NULL ++_000329_hash kone_send 4 63435 _000329_hash NULL ++_000330_hash krealloc 2 14908 _000330_hash NULL ++_000331_hash kvmalloc 1 32646 _000331_hash NULL ++_000332_hash kvm_read_guest_atomic 4 10765 _000332_hash NULL ++_000333_hash kvm_read_guest_cached 4 39666 _000333_hash NULL ++_000334_hash kvm_read_guest_page 5 18074 _000334_hash NULL ++_000335_hash kzalloc 1 54740 _000335_hash NULL ++_000336_hash l2cap_sock_setsockopt 5 50207 _000336_hash NULL ++_000337_hash l2cap_sock_setsockopt_old 4 29346 _000337_hash NULL ++_000338_hash lane2_associate_req 4 45398 _000338_hash NULL ++_000339_hash lbs_debugfs_write 3 48413 _000339_hash NULL ++_000340_hash lcd_write 3 14857 _000340_hash &_000015_hash ++_000341_hash ldm_frag_add 2 5611 _000341_hash NULL ++_000342_hash __lgread 4 31668 _000342_hash NULL ++_000343_hash libipw_alloc_txb 1-3-2 27579 _000343_hash NULL ++_000344_hash link_send_sections_long 4 46556 _000344_hash NULL ++_000345_hash listxattr 3 12769 _000345_hash NULL ++_000346_hash load_msg 2 95 _000346_hash NULL ++_000347_hash lpfc_debugfs_dif_err_write 3 17424 _000347_hash NULL ++_000348_hash lp_write 3 9511 _000348_hash NULL ++_000349_hash mb_cache_create 2 17307 _000349_hash NULL ++_000350_hash mce_write 3 26201 _000350_hash NULL ++_000351_hash mcs7830_get_reg 3 33308 _000351_hash NULL ++_000352_hash mcs7830_set_reg 3 31413 _000352_hash NULL ++_000353_hash memcpy_fromiovec 3 55247 _000353_hash NULL ++_000354_hash memcpy_fromiovecend 3-4 2707 _000354_hash NULL ++_000356_hash mempool_resize 2 47983 _002039_hash NULL nohasharray ++_000357_hash mem_rw 3 22085 _000357_hash NULL ++_000358_hash mgmt_control 3 7349 _000358_hash NULL ++_000359_hash mgmt_pending_add 5 46976 _000359_hash NULL ++_000360_hash mlx4_ib_alloc_fast_reg_page_list 2 46119 _000360_hash NULL ++_000361_hash mmc_alloc_sg 1 21504 _000361_hash NULL ++_000362_hash mmc_send_bus_test 4 18285 _000362_hash NULL ++_000363_hash mmc_send_cxd_data 5 38655 _000363_hash NULL ++_000364_hash module_alloc_update_bounds 1 47205 _000364_hash NULL ++_000365_hash move_addr_to_kernel 2 32673 _000365_hash NULL ++_000366_hash mpi_alloc_limb_space 1 23190 _000366_hash NULL ++_000367_hash mpi_resize 2 44674 _000367_hash NULL ++_000368_hash mptctl_getiocinfo 2 28545 _000368_hash NULL ++_000369_hash mtdchar_readoob 4 31200 _000369_hash NULL ++_000370_hash mtdchar_write 3 56831 _002122_hash NULL nohasharray ++_000371_hash mtdchar_writeoob 4 3393 _000371_hash NULL ++_000372_hash mtd_device_parse_register 5 5024 _000372_hash NULL ++_000373_hash mtf_test_write 3 18844 _000373_hash NULL ++_000374_hash mthca_alloc_icm_table 3-4 38268 _002459_hash NULL nohasharray ++_000376_hash mthca_alloc_init 2 21754 _000376_hash NULL ++_000377_hash mthca_array_init 2 39987 _000377_hash NULL ++_000378_hash mthca_buf_alloc 2 35861 _000378_hash NULL ++_000379_hash mtrr_write 3 59622 _000379_hash NULL ++_000380_hash musb_test_mode_write 3 33518 _000380_hash NULL ++_000381_hash mwifiex_get_common_rates 3 17131 _000381_hash NULL ++_000382_hash __mxt_write_reg 3 57326 _000382_hash NULL ++_000383_hash nand_bch_init 2-3 16280 _001439_hash NULL nohasharray ++_000385_hash ncp_file_write 3 3813 _000385_hash NULL ++_000386_hash ncp__vol2io 5 4804 _000386_hash NULL ++_000387_hash nes_alloc_fast_reg_page_list 2 33523 _000387_hash NULL ++_000388_hash nfc_targets_found 3 29886 _000388_hash NULL ++_000389_hash __nf_ct_ext_add_length 3 12364 _000389_hash NULL ++_000390_hash nfs4_acl_new 1 49806 _000390_hash NULL ++_000391_hash nfs4_write_cached_acl 4 15070 _000391_hash NULL ++_000392_hash nfsd_symlink 6 63442 _000392_hash NULL ++_000393_hash nfs_idmap_get_desc 2-4 42990 _000393_hash NULL ++_000395_hash nfs_readdir_make_qstr 3 12509 _000395_hash NULL ++_000396_hash note_last_dentry 3 12285 _000396_hash NULL ++_000397_hash ntfs_copy_from_user 3-5 15072 _000397_hash NULL ++_000399_hash __ntfs_copy_from_user_iovec_inatomic 3-4 38153 _000399_hash NULL ++_000401_hash ntfs_ucstonls 3-5 23097 _000401_hash NULL ++_000403_hash nvme_alloc_iod 1 56027 _000403_hash NULL ++_000404_hash nvram_write 3 3894 _000404_hash NULL ++_000405_hash o2hb_debug_create 4 18744 _000405_hash NULL ++_000406_hash o2net_send_message_vec 4 879 _002013_hash NULL nohasharray ++_000407_hash ocfs2_control_cfu 2 37750 _000407_hash NULL ++_000408_hash oom_adjust_write 3 41116 _000408_hash NULL ++_000409_hash oom_score_adj_write 3 42594 _000409_hash NULL ++_000410_hash oprofilefs_ulong_from_user 3 57251 _000410_hash NULL ++_000411_hash opticon_write 4 60775 _000411_hash NULL ++_000412_hash p9_check_zc_errors 4 15534 _000412_hash NULL ++_000413_hash packet_buffer_init 2 1607 _000413_hash NULL ++_000414_hash packet_setsockopt 5 17662 _000414_hash NULL ++_000415_hash parse_command 2 37079 _000415_hash NULL ++_000416_hash pcbit_writecmd 2 12332 _000416_hash NULL ++_000417_hash pcmcia_replace_cis 3 57066 _000417_hash NULL ++_000418_hash pgctrl_write 3 50453 _000418_hash NULL ++_000419_hash pg_write 3 40766 _000419_hash NULL ++_000420_hash pidlist_allocate 1 64404 _000420_hash NULL ++_000421_hash pipe_iov_copy_from_user 3 23102 _000421_hash NULL ++_000422_hash pipe_iov_copy_to_user 3 3447 _000422_hash NULL ++_000423_hash pkt_add 3 39897 _000423_hash NULL ++_000424_hash pktgen_if_write 3 55628 _000424_hash NULL ++_000425_hash platform_device_add_data 3 310 _000425_hash NULL ++_000426_hash platform_device_add_resources 3 13289 _000426_hash NULL ++_000427_hash pmcraid_copy_sglist 3 38431 _000427_hash NULL ++_000428_hash pm_qos_power_write 3 52513 _000428_hash NULL ++_000429_hash pnpbios_proc_write 3 19758 _000429_hash NULL ++_000430_hash pool_allocate 3 42012 _000430_hash NULL ++_000431_hash posix_acl_alloc 1 48063 _000431_hash NULL ++_000432_hash ppp_cp_parse_cr 4 5214 _000432_hash NULL ++_000433_hash ppp_write 3 34034 _000433_hash NULL ++_000434_hash pp_read 3 33210 _000434_hash NULL ++_000435_hash pp_write 3 39554 _000435_hash NULL ++_000436_hash printer_req_alloc 2 62687 _000436_hash NULL ++_000437_hash printer_write 3 60276 _000437_hash NULL ++_000438_hash prism2_info_scanresults 3 59729 _000438_hash NULL ++_000439_hash prism2_set_genericelement 3 29277 _000439_hash NULL ++_000440_hash __probe_kernel_read 3 61119 _000440_hash NULL ++_000441_hash __probe_kernel_write 3 29842 _000441_hash NULL ++_000442_hash proc_coredump_filter_write 3 25625 _000442_hash NULL ++_000443_hash _proc_do_string 2 6376 _000443_hash NULL ++_000444_hash process_vm_rw_pages 5-6 15954 _000444_hash NULL ++_000446_hash proc_loginuid_write 3 63648 _000446_hash NULL ++_000447_hash proc_pid_attr_write 3 63845 _000447_hash NULL ++_000448_hash proc_scsi_devinfo_write 3 32064 _000448_hash NULL ++_000449_hash proc_scsi_write 3 29142 _000449_hash NULL ++_000450_hash proc_scsi_write_proc 3 267 _000450_hash NULL ++_000451_hash pskb_expand_head 2-3 42881 _000451_hash NULL ++_000453_hash pstore_mkfile 5 50830 _000453_hash NULL ++_000454_hash pti_char_write 3 60960 _000454_hash NULL ++_000455_hash ptrace_writedata 4 45021 _000455_hash NULL ++_000456_hash pt_write 3 40159 _000456_hash NULL ++_000457_hash qdisc_class_hash_alloc 1 18262 _000457_hash NULL ++_000458_hash r3964_write 4 57662 _000458_hash NULL ++_000459_hash raw_seticmpfilter 3 6888 _000459_hash NULL ++_000460_hash raw_setsockopt 5 45800 _000460_hash NULL ++_000461_hash rawv6_seticmpfilter 5 12137 _000461_hash NULL ++_000462_hash ray_cs_essid_proc_write 3 17875 _000462_hash NULL ++_000463_hash rbd_add 3 16366 _000463_hash NULL ++_000464_hash rbd_snap_add 4 19678 _000464_hash NULL ++_000465_hash rdma_set_ib_paths 3 45592 _000465_hash NULL ++_000466_hash rds_page_copy_user 4 35691 _000466_hash NULL ++_000467_hash read 3 9397 _000467_hash NULL ++_000468_hash read_buf 2 20469 _000468_hash NULL ++_000469_hash read_cis_cache 4 29735 _000469_hash NULL ++_000470_hash realloc_buffer 2 25816 _000470_hash NULL ++_000471_hash receive_DataRequest 3 9904 _000471_hash NULL ++_000472_hash recent_mt_proc_write 3 8206 _000472_hash NULL ++_000473_hash regmap_access_read_file 3 37223 _000473_hash NULL ++_000474_hash regmap_bulk_write 4 59049 _000474_hash NULL ++_000475_hash regmap_map_read_file 3 37685 _000475_hash NULL ++_000476_hash regset_tls_set 4 18459 _000476_hash NULL ++_000477_hash reiserfs_add_entry 4 23062 _002792_hash NULL nohasharray ++_000478_hash remote_settings_file_write 3 22987 _000478_hash NULL ++_000479_hash request_key_auth_new 3 38092 _000479_hash NULL ++_000480_hash restore_i387_fxsave 2 17528 _000480_hash NULL ++_000481_hash revalidate 2 19043 _000481_hash NULL ++_000482_hash rfcomm_sock_setsockopt 5 18254 _000482_hash NULL ++_000483_hash rndis_add_response 2 58544 _000483_hash NULL ++_000484_hash rndis_set_oid 4 6547 _000484_hash NULL ++_000485_hash rngapi_reset 3 34366 _002137_hash NULL nohasharray ++_000486_hash roccat_common2_receive 4 50369 _000486_hash NULL ++_000487_hash roccat_common2_send 4 2422 _000487_hash NULL ++_000488_hash rpc_malloc 2 43573 _000488_hash NULL ++_000489_hash rt2x00debug_write_bbp 3 8212 _000489_hash NULL ++_000490_hash rt2x00debug_write_csr 3 64753 _000490_hash NULL ++_000491_hash rt2x00debug_write_eeprom 3 23091 _000491_hash NULL ++_000492_hash rt2x00debug_write_rf 3 38195 _000492_hash NULL ++_000493_hash rt2x00debug_write_rfcsr 3 41473 _000493_hash NULL ++_000494_hash rts51x_read_mem 4 26577 _002730_hash NULL nohasharray ++_000495_hash rts51x_read_status 4 11830 _000495_hash NULL ++_000496_hash rts51x_write_mem 4 17598 _000496_hash NULL ++_000497_hash rw_copy_check_uvector 3 45748 _003716_hash NULL nohasharray ++_000498_hash rxrpc_request_key 3 27235 _000498_hash NULL ++_000499_hash rxrpc_server_keyring 3 16431 _000499_hash NULL ++_000500_hash savemem 3 58129 _000500_hash NULL ++_000501_hash sb16_copy_from_user 10-7-6 55836 _000501_hash NULL ++_000504_hash sched_autogroup_write 3 10984 _000504_hash NULL ++_000505_hash scsi_mode_select 6 37330 _000505_hash NULL ++_000506_hash scsi_tgt_copy_sense 3 26933 _000506_hash NULL ++_000507_hash sctp_auth_create_key 1 51641 _000507_hash NULL ++_000508_hash sctp_getsockopt_delayed_ack 2 9232 _000508_hash NULL ++_000509_hash sctp_getsockopt_local_addrs 2 25178 _000509_hash NULL ++_000510_hash sctp_make_abort_user 3 29654 _000510_hash NULL ++_000511_hash sctp_setsockopt_active_key 3 43755 _000511_hash NULL ++_000512_hash sctp_setsockopt_adaptation_layer 3 26935 _003246_hash NULL nohasharray ++_000513_hash sctp_setsockopt_associnfo 3 51684 _000513_hash NULL ++_000514_hash sctp_setsockopt_auth_chunk 3 30843 _000514_hash NULL ++_000515_hash sctp_setsockopt_auth_key 3 3793 _000515_hash NULL ++_000516_hash sctp_setsockopt_autoclose 3 5775 _000516_hash NULL ++_000517_hash sctp_setsockopt_bindx 3 49870 _000517_hash NULL ++_000518_hash __sctp_setsockopt_connectx 3 46949 _000518_hash NULL ++_000519_hash sctp_setsockopt_context 3 31091 _000519_hash NULL ++_000520_hash sctp_setsockopt_default_send_param 3 49578 _000520_hash NULL ++_000521_hash sctp_setsockopt_delayed_ack 3 40129 _000521_hash NULL ++_000522_hash sctp_setsockopt_del_key 3 42304 _002709_hash NULL nohasharray ++_000523_hash sctp_setsockopt_events 3 18862 _000523_hash NULL ++_000524_hash sctp_setsockopt_hmac_ident 3 11687 _000524_hash NULL ++_000525_hash sctp_setsockopt_initmsg 3 1383 _000525_hash NULL ++_000526_hash sctp_setsockopt_maxburst 3 28041 _000526_hash NULL ++_000527_hash sctp_setsockopt_maxseg 3 11829 _000527_hash NULL ++_000528_hash sctp_setsockopt_peer_addr_params 3 734 _000528_hash NULL ++_000529_hash sctp_setsockopt_peer_primary_addr 3 13440 _000529_hash NULL ++_000530_hash sctp_setsockopt_rtoinfo 3 30941 _000530_hash NULL ++_000531_hash security_context_to_sid_core 2 29248 _000531_hash NULL ++_000532_hash sel_commit_bools_write 3 46077 _000532_hash NULL ++_000533_hash sel_write_avc_cache_threshold 3 2256 _000533_hash NULL ++_000534_hash sel_write_bool 3 46996 _000534_hash NULL ++_000535_hash sel_write_checkreqprot 3 60774 _000535_hash NULL ++_000536_hash sel_write_disable 3 10511 _000536_hash NULL ++_000537_hash sel_write_enforce 3 48998 _000537_hash NULL ++_000538_hash sel_write_load 3 63830 _000538_hash NULL ++_000539_hash send_bulk_static_data 3 61932 _000539_hash NULL ++_000540_hash set_aoe_iflist 2 42737 _000540_hash NULL ++_000541_hash setkey_unaligned 3 39474 _000541_hash NULL ++_000542_hash set_registers 3 53582 _000542_hash NULL ++_000543_hash setsockopt 5 54539 _000543_hash NULL ++_000544_hash setup_req 3 5848 _000544_hash NULL ++_000545_hash setxattr 4 37006 _000545_hash NULL ++_000546_hash sfq_alloc 1 2861 _000546_hash NULL ++_000547_hash sg_kmalloc 1 50240 _000547_hash NULL ++_000548_hash sgl_map_user_pages 2 30610 _000548_hash NULL ++_000549_hash shash_setkey_unaligned 3 8620 _000549_hash NULL ++_000550_hash shmem_xattr_alloc 2 61190 _000550_hash NULL ++_000551_hash sierra_setup_urb 5 46029 _000551_hash NULL ++_000552_hash simple_transaction_get 3 50633 _000552_hash NULL ++_000553_hash simple_write_to_buffer 2-5 3122 _000553_hash NULL ++_000555_hash sisusb_send_bulk_msg 3 17864 _000555_hash NULL ++_000556_hash skb_add_data 3 48363 _000556_hash NULL ++_000557_hash skb_do_copy_data_nocache 5 12465 _000557_hash NULL ++_000558_hash sl_alloc_bufs 2 50380 _000558_hash NULL ++_000559_hash sl_realloc_bufs 2 64086 _000559_hash NULL ++_000560_hash smk_set_cipso 3 20379 _000560_hash NULL ++_000561_hash smk_write_ambient 3 45691 _000561_hash NULL ++_000562_hash smk_write_direct 3 46363 _000562_hash NULL ++_000563_hash smk_write_doi 3 49621 _000563_hash NULL ++_000564_hash smk_write_logging 3 2618 _000564_hash NULL ++_000565_hash smk_write_mapped 3 13519 _000565_hash NULL ++_000566_hash smk_write_netlbladdr 3 42525 _000566_hash NULL ++_000567_hash smk_write_onlycap 3 14400 _000567_hash NULL ++_000568_hash smk_write_rules_list 3 18565 _000568_hash NULL ++_000569_hash snd_ctl_elem_user_tlv 3 11695 _000569_hash NULL ++_000570_hash snd_emu10k1_fx8010_read 5 9605 _000570_hash NULL ++_000571_hash snd_emu10k1_synth_copy_from_user 3-5 9061 _000571_hash NULL ++_000573_hash snd_gus_dram_poke 4 18525 _000573_hash NULL ++_000574_hash snd_hdsp_playback_copy 5 20676 _000574_hash NULL ++_000575_hash snd_info_entry_write 3 63474 _000575_hash NULL ++_000576_hash snd_korg1212_copy_from 6 36169 _000576_hash NULL ++_000577_hash snd_mem_proc_write 3 9786 _000577_hash NULL ++_000578_hash snd_midi_channel_init_set 1 30092 _000578_hash NULL ++_000579_hash snd_midi_event_new 1 9893 _000764_hash NULL nohasharray ++_000580_hash snd_opl4_mem_proc_write 5 9670 _000580_hash NULL ++_000581_hash snd_pcm_aio_read 3 13900 _000581_hash NULL ++_000582_hash snd_pcm_aio_write 3 28738 _000582_hash NULL ++_000583_hash snd_pcm_oss_write1 3 10872 _000583_hash NULL ++_000584_hash snd_pcm_oss_write2 3 27332 _000584_hash NULL ++_000585_hash snd_rawmidi_kernel_write1 4 56847 _000585_hash NULL ++_000586_hash snd_rme9652_playback_copy 5 20970 _000586_hash NULL ++_000587_hash snd_sb_csp_load_user 3 45190 _000587_hash NULL ++_000588_hash snd_usb_ctl_msg 8 8436 _000588_hash NULL ++_000589_hash sock_bindtodevice 3 50942 _000589_hash NULL ++_000590_hash sock_kmalloc 2 62205 _000590_hash NULL ++_000591_hash spidev_ioctl 2 12846 _000591_hash NULL ++_000592_hash spidev_write 3 44510 _000592_hash NULL ++_000593_hash squashfs_read_table 3 16945 _000593_hash NULL ++_000594_hash srpt_alloc_ioctx 2-3 51042 _000594_hash NULL ++_000596_hash srpt_alloc_ioctx_ring 2-4-3 49330 _000596_hash NULL ++_000597_hash st5481_setup_isocpipes 6-4 61340 _000597_hash NULL ++_000598_hash sta_agg_status_write 3 45164 _000598_hash NULL ++_000599_hash svc_setsockopt 5 36876 _000599_hash NULL ++_000600_hash sys_add_key 4 61288 _000600_hash NULL ++_000601_hash sys_modify_ldt 3 18824 _000601_hash NULL ++_000602_hash sys_semtimedop 3 4486 _000602_hash NULL ++_000603_hash sys_setdomainname 2 4373 _000603_hash NULL ++_000604_hash sys_sethostname 2 42962 _000604_hash NULL ++_000605_hash tomoyo_write_self 3 45161 _000605_hash NULL ++_000606_hash tower_write 3 8580 _000606_hash NULL ++_000607_hash tpm_write 3 50798 _000607_hash NULL ++_000608_hash trusted_instantiate 3 4710 _000608_hash NULL ++_000609_hash trusted_update 3 12664 _000609_hash NULL ++_000610_hash tty_buffer_alloc 2 45437 _000610_hash NULL ++_000611_hash __tun_chr_ioctl 4 22300 _000611_hash NULL ++_000612_hash ubi_more_leb_change_data 4 63534 _000612_hash NULL ++_000613_hash ubi_more_update_data 4 39189 _000613_hash NULL ++_000614_hash ubi_resize_volume 2 50172 _000614_hash NULL ++_000615_hash udf_alloc_i_data 2 35786 _000615_hash NULL ++_000616_hash uea_idma_write 3 64139 _000616_hash NULL ++_000617_hash uea_request 4 47613 _000617_hash NULL ++_000618_hash uea_send_modem_cmd 3 3888 _000618_hash NULL ++_000619_hash uio_write 3 43202 _000619_hash NULL ++_000620_hash um_idi_write 3 18293 _000620_hash NULL ++_000621_hash us122l_ctl_msg 8 13330 _000621_hash NULL ++_000622_hash usb_alloc_urb 1 43436 _000622_hash NULL ++_000623_hash usblp_new_writeurb 2 22894 _000623_hash NULL ++_000624_hash usblp_write 3 23178 _000624_hash NULL ++_000625_hash usbtest_alloc_urb 3-5 34446 _000625_hash NULL ++_000627_hash usbtmc_write 3 64340 _000627_hash NULL ++_000628_hash user_instantiate 3 26131 _000628_hash NULL ++_000629_hash user_update 3 41332 _000629_hash NULL ++_000630_hash uwb_rc_cmd_done 4 35892 _000630_hash NULL ++_000631_hash uwb_rc_neh_grok_event 3 55799 _000631_hash NULL ++_000632_hash v9fs_alloc_rdir_buf 2 42150 _000632_hash NULL ++_000633_hash vc_do_resize 3-4 48842 _000633_hash NULL ++_000635_hash vcs_write 3 3910 _000635_hash NULL ++_000636_hash vga_arb_write 3 36112 _000636_hash NULL ++_000637_hash vga_switcheroo_debugfs_write 3 33984 _000637_hash NULL ++_000638_hash vhci_get_user 3 45039 _000638_hash NULL ++_000639_hash video_proc_write 3 6724 _000639_hash NULL ++_000640_hash vlsi_alloc_ring 3-4 57003 _000640_hash NULL ++_000642_hash __vmalloc 1 61168 _000642_hash NULL ++_000643_hash vmalloc_32 1 1135 _000643_hash NULL ++_000644_hash vmalloc_32_user 1 37519 _000644_hash NULL ++_000645_hash vmalloc_exec 1 36132 _000645_hash NULL ++_000646_hash vmalloc_node 1 58700 _000646_hash NULL ++_000647_hash __vmalloc_node_flags 1 30352 _000647_hash NULL ++_000648_hash vmalloc_user 1 32308 _000648_hash NULL ++_000649_hash vol_cdev_direct_write 3 20751 _000649_hash NULL ++_000650_hash vp_request_msix_vectors 2 28849 _000650_hash NULL ++_000651_hash vring_add_indirect 3-4 20737 _000651_hash NULL ++_000653_hash vring_new_virtqueue 1 9671 _000653_hash NULL ++_000654_hash vxge_os_dma_malloc 2 46184 _000654_hash NULL ++_000655_hash vxge_os_dma_malloc_async 3 56348 _000655_hash NULL ++_000656_hash wdm_write 3 53735 _000656_hash NULL ++_000657_hash wiimote_hid_send 3 48528 _000657_hash NULL ++_000658_hash wlc_phy_loadsampletable_nphy 3 64367 _000658_hash NULL ++_000659_hash write 3 62671 _000659_hash NULL ++_000660_hash write_flush 3 50803 _000660_hash NULL ++_000661_hash write_rio 3 54837 _000661_hash NULL ++_000662_hash x25_asy_change_mtu 2 26928 _000662_hash NULL ++_000663_hash xdi_copy_from_user 4 8395 _000663_hash NULL ++_000664_hash xfrm_dst_alloc_copy 3 3034 _000664_hash NULL ++_000665_hash xfrm_user_policy 4 62573 _000665_hash NULL ++_000666_hash xfs_attrmulti_attr_set 4 59346 _000666_hash NULL ++_000667_hash xfs_handle_to_dentry 3 12135 _000667_hash NULL ++_000668_hash xip_file_read 3 58592 _000668_hash NULL ++_000669_hash __xip_file_write 3-4 2733 _000669_hash NULL ++_000671_hash xprt_rdma_allocate 2 31372 _000671_hash NULL ++_000672_hash zd_usb_iowrite16v_async 3 23984 _000672_hash NULL ++_000673_hash zd_usb_read_fw 4 22049 _000673_hash NULL ++_000674_hash zerocopy_sg_from_iovec 3 11828 _000674_hash NULL ++_000675_hash __a2mp_build 3 60987 _000675_hash NULL ++_000677_hash acpi_ex_allocate_name_string 2-1 7685 _001169_hash NULL nohasharray ++_000678_hash acpi_os_allocate_zeroed 1 37422 _000678_hash NULL ++_000679_hash acpi_ut_initialize_buffer 2 47143 _002830_hash NULL nohasharray ++_000680_hash ad7879_spi_xfer 3 36311 _000680_hash NULL ++_000681_hash add_new_gdb 3 27643 _000681_hash NULL ++_000682_hash add_numbered_child 5 14273 _000682_hash NULL ++_000683_hash add_res_range 4 21310 _000683_hash NULL ++_000684_hash addtgt 3 54703 _000684_hash NULL ++_000685_hash add_uuid 4 49831 _000685_hash NULL ++_000686_hash afs_cell_alloc 2 24052 _000686_hash NULL ++_000687_hash aggr_recv_addba_req_evt 4 38037 _000687_hash NULL ++_000688_hash agp_create_memory 1 1075 _000688_hash NULL ++_000689_hash agp_create_user_memory 1 62955 _000689_hash NULL ++_000690_hash alg_setsockopt 5 20985 _000690_hash NULL ++_000691_hash alloc_async 1 14208 _000691_hash NULL ++_000692_hash ___alloc_bootmem_nopanic 1 53626 _000692_hash NULL ++_000693_hash alloc_buf 1 34532 _000693_hash NULL ++_000694_hash alloc_chunk 1 49575 _000694_hash NULL ++_000695_hash alloc_context 1 41283 _000695_hash NULL ++_000696_hash alloc_ctrl_packet 1 44667 _000696_hash NULL ++_000697_hash alloc_data_packet 1 46698 _000697_hash NULL ++_000698_hash alloc_dca_provider 2 59670 _000698_hash NULL ++_000699_hash __alloc_dev_table 2 54343 _000699_hash NULL ++_000700_hash alloc_ep 1 17269 _000700_hash NULL ++_000701_hash __alloc_extent_buffer 3 15093 _000701_hash NULL ++_000702_hash alloc_group_attrs 2 9194 _000727_hash NULL nohasharray ++_000703_hash alloc_large_system_hash 2 22391 _000703_hash NULL ++_000704_hash alloc_netdev_mqs 1 30030 _000704_hash NULL ++_000705_hash __alloc_objio_seg 1 7203 _000705_hash NULL ++_000706_hash alloc_ring 2-4 15345 _000706_hash NULL ++_000707_hash alloc_ring 2-4 39151 _000707_hash NULL ++_000710_hash alloc_session 1-2 64171 _000710_hash NULL ++_000714_hash alloc_skb 1 55439 _000714_hash NULL ++_000715_hash alloc_skb_fclone 1 3467 _000715_hash NULL ++_000716_hash alloc_smp_req 1 51337 _000716_hash NULL ++_000717_hash alloc_smp_resp 1 3566 _000717_hash NULL ++_000718_hash alloc_ts_config 1 45775 _000718_hash NULL ++_000719_hash alloc_upcall 2 62186 _000719_hash NULL ++_000720_hash altera_drscan 2 48698 _000720_hash NULL ++_000721_hash altera_irscan 2 62396 _000721_hash NULL ++_000722_hash altera_set_dr_post 2 54291 _000722_hash NULL ++_000723_hash altera_set_dr_pre 2 64862 _000723_hash NULL ++_000724_hash altera_set_ir_post 2 20948 _000724_hash NULL ++_000725_hash altera_set_ir_pre 2 54103 _000725_hash NULL ++_000726_hash altera_swap_dr 2 50090 _000726_hash NULL ++_000727_hash altera_swap_ir 2 9194 _000727_hash &_000702_hash ++_000728_hash amd_create_gatt_pages 1 20537 _000728_hash NULL ++_000729_hash aoechr_write 3 62883 _003674_hash NULL nohasharray ++_000730_hash applesmc_create_nodes 2 49392 _000730_hash NULL ++_000731_hash array_zalloc 1-2 7519 _000731_hash NULL ++_000733_hash arvo_sysfs_read 6 31617 _000733_hash NULL ++_000734_hash arvo_sysfs_write 6 3311 _000734_hash NULL ++_000735_hash asd_store_update_bios 4 10165 _000735_hash NULL ++_000736_hash ata_host_alloc 2 46094 _000736_hash NULL ++_000737_hash atalk_sendmsg 4 21677 _000737_hash NULL ++_000738_hash ath6kl_cfg80211_connect_event 7-9-8 13443 _000738_hash NULL ++_000739_hash ath6kl_mgmt_tx 9 21153 _000739_hash NULL ++_000740_hash ath6kl_wmi_proc_events_vif 5 42549 _003190_hash NULL nohasharray ++_000741_hash ath6kl_wmi_roam_tbl_event_rx 3 43440 _000741_hash NULL ++_000742_hash ath6kl_wmi_send_mgmt_cmd 7 17347 _000742_hash NULL ++_000743_hash ath_descdma_setup 5 12257 _000743_hash NULL ++_000744_hash ath_rx_edma_init 2 65483 _000744_hash NULL ++_000745_hash ati_create_gatt_pages 1 4722 _003275_hash NULL nohasharray ++_000746_hash audit_expand 2 2098 _000746_hash NULL ++_000747_hash audit_init_entry 1 38644 _000747_hash NULL ++_000748_hash ax25_sendmsg 4 62770 _000748_hash NULL ++_000749_hash b1_alloc_card 1 36155 _000749_hash NULL ++_000750_hash b43_nphy_load_samples 3 36481 _000750_hash NULL ++_000751_hash batadv_orig_hash_add_if 2 10033 _000751_hash NULL ++_000752_hash batadv_orig_hash_del_if 2 48972 _000752_hash NULL ++_000753_hash batadv_tt_append_diff 4 20588 _000753_hash NULL ++_000754_hash batadv_tt_commit_changes 4 2008 _000754_hash NULL ++_000755_hash batadv_tt_prepare_packet_buff 4 1280 _000755_hash NULL ++_000756_hash bio_copy_user_iov 4 37660 _000756_hash NULL ++_000757_hash __bio_map_kern 3 47379 _000757_hash NULL ++_000758_hash bitmap_resize 2 33054 _000758_hash NULL ++_000759_hash blk_check_plugged 3 50736 _000759_hash NULL ++_000760_hash blk_register_region 1-2 51424 _000760_hash NULL ++_000762_hash bm_entry_write 3 28338 _000762_hash NULL ++_000763_hash bm_realloc_pages 2 9431 _000763_hash NULL ++_000764_hash bm_register_write 3 9893 _000764_hash &_000579_hash ++_000765_hash bm_status_write 3 12964 _000765_hash NULL ++_000766_hash br_mdb_rehash 2 42643 _000766_hash NULL ++_000767_hash btmrvl_sdio_host_to_card 3 12152 _000767_hash NULL ++_000768_hash btrfs_copy_from_user 1-3 43806 _000768_hash NULL ++_000770_hash btrfs_insert_delayed_dir_index 4 63720 _000770_hash NULL ++_000771_hash __btrfs_map_block 3 49839 _000771_hash NULL ++_000772_hash c4iw_init_resource 2-3 30393 _000772_hash NULL ++_000774_hash cache_downcall 3 13666 _000774_hash NULL ++_000775_hash cache_slow_downcall 2 8570 _000775_hash NULL ++_000776_hash caif_seqpkt_sendmsg 4 22961 _000776_hash NULL ++_000777_hash caif_stream_sendmsg 4 9110 _000777_hash NULL ++_000778_hash carl9170_cmd_buf 3 950 _000778_hash NULL ++_000779_hash cdev_add 2-3 38176 _000779_hash NULL ++_000781_hash cdrom_read_cdda 4 50478 _000781_hash NULL ++_000782_hash ceph_dns_resolve_name 2 62488 _000782_hash NULL ++_000783_hash ceph_msgpool_get 2 54258 _000783_hash NULL ++_000784_hash cfg80211_connect_result 4-6 56515 _000784_hash NULL ++_000786_hash cfg80211_disconnected 4 57 _000786_hash NULL ++_000787_hash cfg80211_inform_bss 8 19332 _000787_hash NULL ++_000788_hash cfg80211_inform_bss_frame 4 41078 _000788_hash NULL ++_000789_hash cfg80211_mlme_register_mgmt 5 19852 _000789_hash NULL ++_000790_hash cfg80211_roamed_bss 4-6 50198 _000790_hash NULL ++_000792_hash cgroup_file_write 3 52417 _000792_hash NULL ++_000793_hash cifs_readdata_alloc 1 26360 _000793_hash NULL ++_000794_hash cifs_readv_from_socket 3 19109 _000794_hash NULL ++_000795_hash cifs_writedata_alloc 1 32880 _003097_hash NULL nohasharray ++_000796_hash cnic_alloc_dma 3 34641 _000796_hash NULL ++_000797_hash cnic_init_id_tbl 2 41354 _000797_hash NULL ++_000798_hash configfs_write_file 3 61621 _000798_hash NULL ++_000799_hash construct_key 3 11329 _000799_hash NULL ++_000800_hash context_alloc 3 24645 _000800_hash NULL ++_000801_hash copy_to_user 3 57835 _000801_hash NULL ++_000802_hash cp210x_get_config 4 56229 _000802_hash NULL ++_000803_hash create_attr_set 1 22861 _000803_hash NULL ++_000804_hash create_bounce_buffer 3 39155 _000804_hash NULL ++_000805_hash create_gpadl_header 2 19064 _000805_hash NULL ++_000806_hash _create_sg_bios 4 31244 _000806_hash NULL ++_000807_hash cryptd_alloc_instance 2-3 18048 _000807_hash NULL ++_000809_hash crypto_ahash_setkey 3 55134 _000809_hash NULL ++_000810_hash crypto_alloc_instance2 3 25277 _000810_hash NULL ++_000811_hash crypto_shash_setkey 3 60483 _000811_hash NULL ++_000812_hash cxgb_alloc_mem 1 24007 _000812_hash NULL ++_000813_hash cxgbi_device_portmap_create 3 25747 _000813_hash NULL ++_000814_hash cxgbi_device_register 1-2 36746 _000814_hash NULL ++_000816_hash __cxio_init_resource_fifo 3 23447 _000816_hash NULL ++_000817_hash dccp_sendmsg 4 56058 _000817_hash NULL ++_000818_hash ddp_make_gl 1 12179 _000818_hash NULL ++_000819_hash depth_write 3 3021 _000819_hash NULL ++_000820_hash dev_irnet_write 3 11398 _000820_hash NULL ++_000821_hash dev_set_alias 3 50084 _000821_hash NULL ++_000822_hash dev_write 3 7708 _000822_hash NULL ++_000823_hash dfs_global_file_write 3 6112 _000823_hash NULL ++_000824_hash dgram_sendmsg 4 45679 _000824_hash NULL ++_000825_hash disconnect 4 32521 _000825_hash NULL ++_000826_hash dma_attach 6-7 50831 _000826_hash NULL ++_000828_hash dma_declare_coherent_memory 4-2 14244 _000828_hash NULL ++_000829_hash dn_sendmsg 4 38390 _000829_hash NULL ++_000830_hash dn_setsockopt 5 314 _000830_hash NULL ++_000831_hash do_arpt_set_ctl 4 51053 _000831_hash NULL ++_000832_hash do_dccp_setsockopt 5 54377 _003195_hash NULL nohasharray ++_000833_hash do_ip6t_set_ctl 4 60040 _000833_hash NULL ++_000834_hash do_ipt_set_ctl 4 56238 _000834_hash NULL ++_000835_hash do_jffs2_setxattr 5 25910 _000835_hash NULL ++_000836_hash do_msgsnd 4 1387 _000836_hash NULL ++_000837_hash do_pselect 1 62061 _000837_hash NULL ++_000838_hash do_raw_setsockopt 5 55215 _000838_hash NULL ++_000839_hash do_readv_writev 4 51849 _000839_hash NULL ++_000840_hash do_sync 1 9604 _000840_hash NULL ++_000841_hash dup_array 3 33551 _000841_hash NULL ++_000842_hash ecryptfs_decode_and_decrypt_filename 5 10379 _000842_hash NULL ++_000843_hash ecryptfs_encrypt_and_encode_filename 6 2109 _000843_hash NULL ++_000844_hash ecryptfs_send_message_locked 2 31801 _000844_hash NULL ++_000845_hash edac_device_alloc_ctl_info 1 5941 _000845_hash NULL ++_000846_hash edac_mc_alloc 4 3611 _000846_hash NULL ++_000847_hash edac_pci_alloc_ctl_info 1 63388 _000847_hash NULL ++_000848_hash efivar_create_sysfs_entry 2 19485 _000848_hash NULL ++_000849_hash enable_write 3 30456 _000849_hash NULL ++_000850_hash enclosure_register 3 57412 _000850_hash NULL ++_000851_hash enlarge_skb 2 44248 _002839_hash NULL nohasharray ++_000852_hash evdev_ioctl_handler 2 21705 _000852_hash NULL ++_000853_hash ext4_kvzalloc 1 47605 _000853_hash NULL ++_000854_hash extend_netdev_table 2 21453 _000854_hash NULL ++_000855_hash fcoe_ctlr_device_add 3 1793 _000855_hash NULL ++_000856_hash fd_do_readv 3 51297 _000856_hash NULL ++_000857_hash fd_do_writev 3 29329 _000857_hash NULL ++_000858_hash __feat_register_sp 6 64712 _000858_hash NULL ++_000859_hash __ffs_ep0_read_events 3 48868 _000859_hash NULL ++_000860_hash ffs_ep0_write 3 9438 _000860_hash NULL ++_000861_hash ffs_epfile_read 3 18775 _000861_hash NULL ++_000862_hash ffs_epfile_write 3 48014 _000862_hash NULL ++_000863_hash fib_info_hash_alloc 1 9075 _000863_hash NULL ++_000864_hash fillonedir 3 41746 _000864_hash NULL ++_000865_hash fs_devrw_entry 3 11924 _000865_hash NULL ++_000866_hash fs_path_prepare_for_add 2 61854 _000866_hash NULL ++_000867_hash fuse_fill_write_pages 4 53682 _000867_hash NULL ++_000868_hash fw_device_op_ioctl 2 11595 _000868_hash NULL ++_000869_hash fw_iso_buffer_init 3 54582 _000869_hash NULL ++_000870_hash fw_node_create 2 9559 _000870_hash NULL ++_000871_hash garmin_read_process 3 27509 _000871_hash NULL ++_000872_hash garp_request_join 4 7471 _000872_hash NULL ++_000873_hash generic_perform_write 3 54832 _000873_hash NULL ++_000874_hash gen_pool_add_virt 4 39913 _000874_hash NULL ++_000875_hash get_derived_key 4 61100 _000875_hash NULL ++_000876_hash get_new_cssid 2 51665 _000876_hash NULL ++_000877_hash getxattr 4 24398 _003758_hash NULL nohasharray ++_000878_hash gsm_control_reply 4 53333 _000878_hash NULL ++_000879_hash hcd_alloc_coherent 5 55862 _000879_hash NULL ++_000880_hash hci_sock_sendmsg 4 37420 _000880_hash NULL ++_000881_hash hidraw_ioctl 2 63658 _000881_hash NULL ++_000882_hash hidraw_write 3 31536 _000882_hash NULL ++_000883_hash hid_register_field 2-3 4874 _000883_hash NULL ++_000885_hash hid_report_raw_event 4 2762 _000885_hash NULL ++_000886_hash hpi_alloc_control_cache 1 35351 _000886_hash NULL ++_000887_hash hugetlbfs_read_actor 2-5-4 34547 _000887_hash NULL ++_000890_hash hvc_alloc 4 12579 _000890_hash NULL ++_000891_hash __hwahc_dev_set_key 5 46328 _000891_hash NULL ++_000892_hash i2400m_zrealloc_2x 3 54166 _001549_hash NULL nohasharray ++_000893_hash ib_alloc_device 1 26483 _000893_hash NULL ++_000894_hash ib_create_send_mad 5 1196 _000894_hash NULL ++_000895_hash ibmasm_new_command 2 25714 _000895_hash NULL ++_000896_hash ib_send_cm_drep 3 50186 _000896_hash NULL ++_000897_hash ib_send_cm_mra 4 60202 _003063_hash NULL nohasharray ++_000898_hash ib_send_cm_rtu 3 63138 _000898_hash NULL ++_000899_hash ide_core_cp_entry 3 22636 _000899_hash NULL ++_000900_hash ieee80211_if_write_smps 3 35550 _000900_hash NULL ++_000901_hash ieee80211_if_write_tkip_mic_test 3 58748 _000901_hash NULL ++_000902_hash ieee80211_if_write_tsf 3 36077 _000902_hash NULL ++_000903_hash ieee80211_if_write_uapsd_max_sp_len 3 14233 _000903_hash NULL ++_000904_hash ieee80211_if_write_uapsd_queues 3 51526 _000904_hash NULL ++_000905_hash ieee80211_key_alloc 3 19065 _000905_hash NULL ++_000906_hash ieee80211_send_probe_req 6-4 6924 _000906_hash NULL ++_000907_hash ieee80211_skb_resize 3 50211 _000907_hash NULL ++_000908_hash if_spi_host_to_card 4 62890 _000908_hash NULL ++_000909_hash if_writecmd 2 815 _000909_hash NULL ++_000910_hash init_bch 1-2 64130 _000910_hash NULL ++_000912_hash init_ipath 1 48187 _000912_hash NULL ++_000913_hash init_list_set 2-3 39188 _000913_hash NULL ++_000915_hash init_q 4 132 _000915_hash NULL ++_000916_hash init_state 2 60165 _000916_hash NULL ++_000917_hash init_tag_map 3 57515 _000917_hash NULL ++_000918_hash input_ff_create 2 21240 _000918_hash NULL ++_000919_hash input_mt_init_slots 2 31183 _000919_hash NULL ++_000920_hash interfaces 2 38859 _000920_hash NULL ++_000921_hash int_hardware_entry 3 36833 _000921_hash NULL ++_000922_hash int_hw_irq_en 3 46776 _000922_hash NULL ++_000923_hash int_tasklet_entry 3 52500 _000923_hash NULL ++_000924_hash ioat2_alloc_ring 2 11172 _000924_hash NULL ++_000925_hash ip_generic_getfrag 3-4 12187 _000925_hash NULL ++_000927_hash ip_options_get_alloc 1 7448 _000927_hash NULL ++_000928_hash ipr_alloc_ucode_buffer 1 40199 _000928_hash NULL ++_000929_hash ip_set_alloc 1 57953 _000929_hash NULL ++_000930_hash ip_setsockopt 5 33487 _000930_hash NULL ++_000931_hash ipv6_flowlabel_opt 3 58135 _001179_hash NULL nohasharray ++_000932_hash ipv6_renew_options 5 28867 _000932_hash NULL ++_000933_hash ipv6_setsockopt 5 29871 _000933_hash NULL ++_000934_hash ipxrtr_route_packet 4 54036 _000934_hash NULL ++_000935_hash irda_sendmsg 4 4388 _000935_hash NULL ++_000936_hash irda_sendmsg_dgram 4 38563 _000936_hash NULL ++_000937_hash irda_sendmsg_ultra 4 42047 _000937_hash NULL ++_000938_hash irias_add_octseq_attrib 4 29983 _000938_hash NULL ++_000939_hash irq_alloc_generic_chip 2 26650 _000939_hash NULL ++_000940_hash iscsi_alloc_session 3 49390 _000940_hash NULL ++_000941_hash iscsi_create_conn 2 50425 _000941_hash NULL ++_000942_hash iscsi_create_endpoint 1 15193 _000942_hash NULL ++_000943_hash iscsi_create_iface 5 38510 _000943_hash NULL ++_000944_hash iscsi_decode_text_input 4 58292 _000944_hash NULL ++_000945_hash iscsi_pool_init 2-4 54913 _000945_hash NULL ++_000947_hash iscsit_dump_data_payload 2 38683 _000947_hash NULL ++_000948_hash isdn_write 3 45863 _000948_hash NULL ++_000949_hash isku_receive 4 54130 _000949_hash NULL ++_000950_hash islpci_mgt_transaction 5 23610 _000950_hash NULL ++_000951_hash iso_alloc_urb 4-5 45206 _000951_hash NULL ++_000952_hash iso_sched_alloc 1 13377 _003325_hash NULL nohasharray ++_000953_hash iwl_trans_txq_alloc 3 36147 _000953_hash NULL ++_000954_hash ixgbe_alloc_q_vector 4-6 24439 _000954_hash NULL ++_000956_hash jbd2_journal_init_revoke 2 51088 _000956_hash NULL ++_000957_hash jffs2_write_dirent 5 37311 _000957_hash NULL ++_000958_hash journal_init_revoke 2 56933 _000958_hash NULL ++_000959_hash keyctl_instantiate_key 3 41855 _000959_hash NULL ++_000960_hash keyctl_instantiate_key_iov 3 16969 _000960_hash NULL ++_000961_hash __kfifo_from_user 3 20399 _000961_hash NULL ++_000962_hash kimage_crash_alloc 3 3233 _000962_hash NULL ++_000963_hash kimage_normal_alloc 3 31140 _000963_hash NULL ++_000964_hash kmem_realloc 2 37489 _000964_hash NULL ++_000965_hash kmem_zalloc 1 11510 _000965_hash NULL ++_000966_hash koneplus_sysfs_read 6 42792 _000966_hash NULL ++_000967_hash kvm_kvzalloc 1 52894 _000967_hash NULL ++_000968_hash kvm_read_guest_page_mmu 6 37611 _000968_hash NULL ++_000969_hash kvm_set_irq_routing 3 48704 _000969_hash NULL ++_000970_hash kvm_write_guest_cached 4 11106 _000970_hash NULL ++_000971_hash kvm_write_guest_page 5 63555 _002812_hash NULL nohasharray ++_000972_hash kzalloc_node 1 24352 _000972_hash NULL ++_000973_hash l2cap_skbuff_fromiovec 3-4 35003 _000973_hash NULL ++_000975_hash l2tp_ip_sendmsg 4 50411 _000975_hash NULL ++_000976_hash l2tp_session_create 1 25286 _000976_hash NULL ++_000977_hash lc_create 3 48662 _000977_hash NULL ++_000978_hash leaf_dealloc 3 29566 _000978_hash NULL ++_000979_hash linear_conf 2 23485 _003837_hash NULL nohasharray ++_000980_hash llc_ui_sendmsg 4 24987 _000980_hash NULL ++_000981_hash load_module 2 60056 _003010_hash NULL nohasharray ++_000982_hash lpfc_sli4_queue_alloc 3 62646 _000982_hash NULL ++_000983_hash mdiobus_alloc_size 1 52259 _000983_hash NULL ++_000984_hash mempool_create_node 1 3191 _000984_hash NULL ++_000985_hash mem_read 3 57631 _000985_hash NULL ++_000986_hash memstick_alloc_host 1 142 _000986_hash NULL ++_000987_hash mem_swapout_entry 3 32586 _000987_hash NULL ++_000988_hash mem_write 3 22232 _000988_hash NULL ++_000989_hash mesh_table_alloc 1 22305 _000989_hash NULL ++_000990_hash mfd_add_devices 4 16668 _000990_hash NULL ++_000991_hash mISDN_sock_sendmsg 4 41035 _000991_hash NULL ++_000992_hash mlx4_init_icm_table 4-5 2151 _000992_hash NULL ++_000994_hash mmc_alloc_host 1 48097 _000994_hash NULL ++_000995_hash mmc_test_alloc_mem 2-3 28102 _000995_hash NULL ++_000997_hash mon_bin_ioctl 3 2771 _000997_hash NULL ++_000998_hash mpi_alloc 1 18094 _000998_hash NULL ++_000999_hash mpihelp_mul_karatsuba_case 5-3 23918 _003061_hash NULL nohasharray ++_001000_hash __mptctl_ioctl 2 15875 _001000_hash NULL ++_001001_hash mtd_concat_create 2 14416 _001001_hash NULL ++_001002_hash mthca_alloc_cq_buf 3 46512 _001002_hash NULL ++_001003_hash mvumi_alloc_mem_resource 3 47750 _001003_hash NULL ++_001004_hash mwifiex_11n_create_rx_reorder_tbl 4 63806 _001004_hash NULL ++_001005_hash mwifiex_alloc_sdio_mpa_buffers 2-3 60961 _001005_hash NULL ++_001007_hash mwl8k_cmd_set_beacon 4 23110 _001007_hash NULL ++_001008_hash neigh_hash_alloc 1 17595 _001008_hash NULL ++_001009_hash __netdev_alloc_skb 2 18595 _001009_hash NULL ++_001010_hash __netlink_change_ngroups 2 46156 _001010_hash NULL ++_001011_hash netlink_sendmsg 4 33708 _001236_hash NULL nohasharray ++_001012_hash netxen_alloc_sds_rings 2 13417 _001012_hash NULL ++_001013_hash new_bind_ctl 2 35324 _001013_hash NULL ++_001014_hash new_dir 3 31919 _001014_hash NULL ++_001015_hash new_tape_buffer 2 32866 _001015_hash NULL ++_001016_hash nfc_llcp_build_tlv 3 19536 _001016_hash NULL ++_001017_hash nfc_llcp_send_i_frame 3 59130 _001017_hash NULL ++_001018_hash nf_ct_ext_create 3 51232 _001018_hash NULL ++_001019_hash nfs4_alloc_pages 1 48426 _001019_hash NULL ++_001020_hash nfs4_alloc_slots 1 2454 _003345_hash NULL nohasharray ++_001021_hash nfsctl_transaction_write 3 64800 _001021_hash NULL ++_001022_hash nfs_fscache_get_super_cookie 3 44355 _001850_hash NULL nohasharray ++_001023_hash nfs_idmap_request_key 3 30208 _001023_hash NULL ++_001024_hash nfs_pgarray_set 2 1085 _001024_hash NULL ++_001025_hash nl_pid_hash_zalloc 1 23314 _001025_hash NULL ++_001026_hash nr_sendmsg 4 53656 _001026_hash NULL ++_001027_hash nsm_create_handle 4 38060 _001027_hash NULL ++_001028_hash ntfs_copy_from_user_iovec 3-6 49829 _001028_hash NULL ++_001030_hash ntfs_file_buffered_write 4-6 41442 _001030_hash NULL ++_001032_hash __ntfs_malloc 1 34022 _001032_hash NULL ++_001033_hash nvme_alloc_queue 3 46865 _001033_hash NULL ++_001034_hash nvme_map_user_pages 3-4 41093 _001639_hash NULL nohasharray ++_001036_hash ocfs2_acl_from_xattr 2 21604 _001036_hash NULL ++_001037_hash ocfs2_control_message 3 19564 _001037_hash NULL ++_001038_hash _ore_get_io_state 3-5-4 2166 _001038_hash NULL ++_001041_hash orinoco_set_key 5-7 17878 _001041_hash NULL ++_001043_hash osdmap_set_max_osd 2 57630 _002267_hash NULL nohasharray ++_001044_hash _osd_realloc_seg 3 54352 _001044_hash NULL ++_001045_hash osst_execute 7-6 17607 _001045_hash NULL ++_001046_hash osst_write 3 31581 _001046_hash NULL ++_001047_hash otp_read 2-5-4 10594 _001047_hash NULL ++_001050_hash ovs_vport_alloc 1 33475 _001050_hash NULL ++_001051_hash p54_parse_rssical 3 64493 _001051_hash NULL ++_001052_hash p9_client_zc_rpc 7 14345 _001052_hash NULL ++_001053_hash packet_sendmsg_spkt 4 28885 _001053_hash NULL ++_001054_hash pair_device 4 61175 _003161_hash NULL nohasharray ++_001055_hash pccard_store_cis 6 18176 _001055_hash NULL ++_001056_hash pci_add_cap_save_buffer 3 3426 _001056_hash NULL ++_001057_hash pcnet32_realloc_rx_ring 3 36598 _001057_hash NULL ++_001058_hash pcnet32_realloc_tx_ring 3 38428 _001058_hash NULL ++_001059_hash pcpu_mem_zalloc 1 22948 _001059_hash NULL ++_001060_hash pep_sendmsg 4 62524 _001060_hash NULL ++_001061_hash pfkey_sendmsg 4 47394 _001061_hash NULL ++_001062_hash pidlist_resize 2 496 _001062_hash NULL ++_001063_hash pin_code_reply 4 46510 _001063_hash NULL ++_001064_hash ping_getfrag 3-4 8360 _001064_hash NULL ++_001066_hash pipe_set_size 2 5204 _001066_hash NULL ++_001067_hash pkt_bio_alloc 1 48284 _001067_hash NULL ++_001068_hash platform_create_bundle 4-6 12785 _001068_hash NULL ++_001070_hash pm8001_store_update_fw 4 55716 _001070_hash NULL ++_001071_hash pmcraid_alloc_sglist 1 9864 _001071_hash NULL ++_001072_hash pn533_dep_link_up 5 22154 _001072_hash NULL ++_001073_hash pn533_init_target_frame 3 65438 _001073_hash NULL ++_001074_hash pnp_alloc 1 24869 _001538_hash NULL nohasharray ++_001075_hash pn_sendmsg 4 12640 _001075_hash NULL ++_001076_hash pppoe_sendmsg 4 48039 _001076_hash NULL ++_001077_hash pppol2tp_sendmsg 4 56420 _001077_hash NULL ++_001078_hash prism2_info_hostscanresults 3 39657 _001078_hash NULL ++_001079_hash process_vm_rw 3-5 47533 _001079_hash NULL ++_001081_hash process_vm_rw_single_vec 1-2 26213 _001081_hash NULL ++_001083_hash proc_write 3 51003 _001083_hash NULL ++_001084_hash profile_load 3 58267 _001084_hash NULL ++_001085_hash profile_remove 3 8556 _001085_hash NULL ++_001086_hash profile_replace 3 14652 _001086_hash NULL ++_001087_hash pscsi_get_bio 1 56103 _001087_hash NULL ++_001088_hash __pskb_copy 2 9038 _001088_hash NULL ++_001089_hash __pskb_pull_tail 2 60287 _001089_hash NULL ++_001090_hash qla4xxx_alloc_work 2 44813 _001090_hash NULL ++_001091_hash qlcnic_alloc_msix_entries 2 46160 _001091_hash NULL ++_001092_hash qlcnic_alloc_sds_rings 2 26795 _001092_hash NULL ++_001093_hash queue_received_packet 5 9657 _001093_hash NULL ++_001094_hash raw_send_hdrinc 4 58803 _001094_hash NULL ++_001095_hash raw_sendmsg 4 23078 _003316_hash NULL nohasharray ++_001096_hash rawsock_sendmsg 4 60010 _001096_hash NULL ++_001097_hash rawv6_send_hdrinc 3 35425 _001097_hash NULL ++_001098_hash rawv6_setsockopt 5 56165 _001098_hash NULL ++_001099_hash rb_alloc 1 3102 _001099_hash NULL ++_001100_hash rbd_alloc_coll 1 33678 _001100_hash NULL ++_001101_hash rbd_create_rw_ops 1 55297 _001101_hash NULL ++_001102_hash rds_ib_inc_copy_to_user 3 55007 _001102_hash NULL ++_001103_hash rds_iw_inc_copy_to_user 3 29214 _001103_hash NULL ++_001104_hash rds_message_alloc 1 10517 _001104_hash NULL ++_001105_hash rds_message_copy_from_user 3 45510 _001105_hash NULL ++_001106_hash rds_message_inc_copy_to_user 3 26540 _001106_hash NULL ++_001107_hash regcache_rbtree_insert_to_block 5 58009 _001107_hash NULL ++_001108_hash _regmap_raw_write 4 42652 _001108_hash NULL ++_001109_hash regmap_register_patch 3 21681 _001109_hash NULL ++_001110_hash relay_alloc_page_array 1 52735 _001110_hash NULL ++_001111_hash remove_uuid 4 64505 _001111_hash NULL ++_001112_hash reshape_ring 2 29147 _001112_hash NULL ++_001113_hash RESIZE_IF_NEEDED 2 56286 _001113_hash NULL ++_001114_hash resize_info_buffer 2 62889 _001114_hash NULL ++_001115_hash resize_stripes 2 61650 _001115_hash NULL ++_001116_hash rfcomm_sock_sendmsg 4 37661 _003661_hash NULL nohasharray ++_001117_hash roccat_common2_send_with_status 4 50343 _001117_hash NULL ++_001118_hash rose_sendmsg 4 20249 _001118_hash NULL ++_001119_hash rsc_mgr_init 3 16299 _001119_hash NULL ++_001120_hash rxrpc_send_data 5 21553 _001120_hash NULL ++_001121_hash rxrpc_setsockopt 5 50286 _001121_hash NULL ++_001122_hash savu_sysfs_read 6 49473 _001122_hash NULL ++_001124_hash sco_send_frame 3 41815 _001124_hash NULL ++_001125_hash scsi_dispatch_cmd_entry 3 49848 _001125_hash NULL ++_001126_hash scsi_host_alloc 2 63041 _001126_hash NULL ++_001127_hash scsi_tgt_kspace_exec 8 9522 _001127_hash NULL ++_001128_hash sctp_sendmsg 4 61919 _001128_hash NULL ++_001129_hash sctp_setsockopt 5 44788 _001129_hash NULL ++_001130_hash sctp_setsockopt_connectx 3 6073 _001130_hash NULL ++_001131_hash sctp_setsockopt_connectx_old 3 22631 _001131_hash NULL ++_001132_hash sctp_tsnmap_grow 2 32784 _001132_hash NULL ++_001133_hash sctp_tsnmap_init 2 36446 _001133_hash NULL ++_001134_hash sctp_user_addto_chunk 2-3 62047 _001134_hash NULL ++_001136_hash security_context_to_sid 2 19839 _001136_hash NULL ++_001137_hash security_context_to_sid_default 2 3492 _003841_hash NULL nohasharray ++_001138_hash security_context_to_sid_force 2 20724 _001138_hash NULL ++_001139_hash self_check_write 5 50856 _001139_hash NULL ++_001140_hash selinux_transaction_write 3 59038 _001140_hash NULL ++_001141_hash sel_write_access 3 51704 _001141_hash NULL ++_001142_hash sel_write_create 3 11353 _001142_hash NULL ++_001143_hash sel_write_member 3 28800 _001143_hash NULL ++_001144_hash sel_write_relabel 3 55195 _001144_hash NULL ++_001145_hash sel_write_user 3 45060 _001145_hash NULL ++_001146_hash __seq_open_private 3 40715 _001146_hash NULL ++_001147_hash serverworks_create_gatt_pages 1 46582 _001147_hash NULL ++_001148_hash set_connectable 4 56458 _001148_hash NULL ++_001149_hash set_dev_class 4 39645 _001921_hash NULL nohasharray ++_001150_hash set_discoverable 4 48141 _001150_hash NULL ++_001151_hash set_fd_set 1 35249 _001151_hash NULL ++_001152_hash setkey 3 14987 _001152_hash NULL ++_001153_hash set_le 4 30581 _001153_hash NULL ++_001154_hash set_link_security 4 4502 _001154_hash NULL ++_001155_hash set_local_name 4 55757 _001155_hash NULL ++_001156_hash set_powered 4 12129 _001156_hash NULL ++_001157_hash set_ssp 4 62411 _001157_hash NULL ++_001158_hash sg_build_sgat 3 60179 _001158_hash &_000314_hash ++_001159_hash sg_read_oxfer 3 51724 _001159_hash NULL ++_001160_hash shmem_xattr_set 4 11843 _001160_hash NULL ++_001161_hash simple_alloc_urb 3 60420 _001161_hash NULL ++_001162_hash sisusb_send_bridge_packet 2 11649 _001162_hash NULL ++_001163_hash sisusb_send_packet 2 20891 _001163_hash NULL ++_001164_hash sisusb_write_mem_bulk 4 29678 _001164_hash NULL ++_001165_hash skb_add_data_nocache 4 4682 _001165_hash NULL ++_001166_hash skb_copy_datagram_from_iovec 2-5-4 52014 _001166_hash NULL ++_001169_hash skb_copy_expand 2-3 7685 _001169_hash &_000677_hash ++_001171_hash skb_copy_to_page_nocache 6 58624 _001171_hash NULL ++_001172_hash __skb_cow 2 39254 _001172_hash NULL ++_001173_hash skb_cow_data 2 11565 _001173_hash NULL ++_001174_hash skb_pad 2 17302 _001174_hash NULL ++_001175_hash skb_realloc_headroom 2 19516 _001175_hash NULL ++_001176_hash sk_chk_filter 2 42095 _001176_hash NULL ++_001177_hash skcipher_sendmsg 4 30290 _001177_hash NULL ++_001178_hash sl_change_mtu 2 7396 _001178_hash NULL ++_001179_hash slhc_init 1-2 58135 _001179_hash &_000931_hash ++_001181_hash sm501_create_subdev 3-4 48668 _001245_hash NULL nohasharray ++_001183_hash smk_user_access 3 24440 _001183_hash NULL ++_001184_hash smk_write_cipso2 3 1021 _001184_hash NULL ++_001185_hash smk_write_cipso 3 17989 _001185_hash NULL ++_001186_hash smk_write_load2 3 52155 _001186_hash NULL ++_001187_hash smk_write_load 3 26829 _001187_hash NULL ++_001188_hash smk_write_load_self2 3 591 _001188_hash NULL ++_001189_hash smk_write_load_self 3 7958 _001189_hash NULL ++_001190_hash snapshot_write 3 28351 _001190_hash NULL ++_001191_hash snd_ac97_pcm_assign 2 30218 _001191_hash NULL ++_001192_hash snd_card_create 4 64418 _001529_hash NULL nohasharray ++_001193_hash snd_emux_create_port 3 42533 _001193_hash NULL ++_001194_hash snd_gus_dram_write 4 38784 _001194_hash NULL ++_001195_hash snd_midi_channel_alloc_set 1 28153 _001195_hash NULL ++_001196_hash _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 _001196_hash NULL ++_001197_hash snd_pcm_oss_sync1 2 45298 _001197_hash NULL ++_001198_hash snd_pcm_oss_write 3 38108 _001198_hash NULL ++_001199_hash snd_pcm_plugin_build 5 25505 _001199_hash NULL ++_001200_hash snd_rawmidi_kernel_write 3 25106 _001200_hash NULL ++_001201_hash snd_rawmidi_write 3 28008 _001201_hash NULL ++_001202_hash snd_rme32_playback_copy 5 43732 _001202_hash NULL ++_001203_hash snd_rme96_playback_copy 5 13111 _001203_hash NULL ++_001204_hash snd_seq_device_new 4 31753 _001204_hash NULL ++_001205_hash snd_seq_oss_readq_new 2 14283 _001205_hash NULL ++_001206_hash snd_vx_create 4 40948 _001206_hash NULL ++_001207_hash sock_setsockopt 5 50088 _001207_hash NULL ++_001208_hash sound_write 3 5102 _001208_hash NULL ++_001209_hash _sp2d_alloc 1-3-2 16944 _001209_hash NULL ++_001212_hash spi_alloc_master 2 45223 _001212_hash NULL ++_001213_hash spidev_message 3 5518 _001213_hash NULL ++_001214_hash spi_register_board_info 2 35651 _001214_hash NULL ++_001215_hash squashfs_cache_init 2 41656 _001215_hash NULL ++_001216_hash squashfs_read_data 6 59440 _001216_hash NULL ++_001217_hash squashfs_read_fragment_index_table 4 2506 _001217_hash NULL ++_001218_hash squashfs_read_id_index_table 4 61961 _001218_hash NULL ++_001219_hash squashfs_read_inode_lookup_table 4 64739 _001219_hash NULL ++_001220_hash srp_alloc_iu 2 44227 _001220_hash NULL ++_001221_hash srp_iu_pool_alloc 2 17920 _001221_hash NULL ++_001222_hash srp_ring_alloc 2 26760 _001222_hash NULL ++_001226_hash start_isoc_chain 2 565 _001226_hash NULL ++_001227_hash st_write 3 16874 _001227_hash NULL ++_001228_hash svc_pool_map_alloc_arrays 2 47181 _001228_hash NULL ++_001229_hash symtab_init 2 61050 _001229_hash NULL ++_001230_hash sys_bind 3 10799 _001230_hash NULL ++_001231_hash sys_connect 3 15291 _003816_hash NULL nohasharray ++_001232_hash sys_flistxattr 3 41407 _001232_hash NULL ++_001233_hash sys_fsetxattr 4 49736 _001233_hash NULL ++_001234_hash sysfs_write_file 3 57116 _001234_hash NULL ++_001235_hash sys_ipc 3 4889 _001235_hash NULL ++_001236_hash sys_keyctl 4 33708 _001236_hash &_001011_hash ++_001237_hash sys_listxattr 3 27833 _001237_hash NULL ++_001238_hash sys_llistxattr 3 4532 _001238_hash NULL ++_001239_hash sys_lsetxattr 4 61177 _001239_hash NULL ++_001240_hash sys_mq_timedsend 3 57661 _001240_hash NULL ++_001241_hash sys_sched_setaffinity 2 32046 _001241_hash NULL ++_001242_hash sys_select 1 38827 _001242_hash NULL ++_001243_hash sys_semop 3 39457 _001243_hash NULL ++_001244_hash sys_sendto 6 20809 _001244_hash NULL ++_001245_hash sys_setgroups 1 48668 _001245_hash &_001181_hash ++_001246_hash sys_setgroups16 1 48882 _001246_hash NULL ++_001247_hash sys_setxattr 4 37880 _001247_hash NULL ++_001248_hash t4_alloc_mem 1 32342 _001248_hash NULL ++_001249_hash tcf_hash_create 4 54360 _001249_hash NULL ++_001250_hash tcp_send_rcvq 3 11316 _001250_hash NULL ++_001251_hash __team_options_register 3 63941 _001251_hash NULL ++_001252_hash test_unaligned_bulk 3 52333 _001252_hash NULL ++_001253_hash tifm_alloc_adapter 1 10903 _001253_hash NULL ++_001254_hash timeout_write 3 50991 _001254_hash NULL ++_001255_hash timeradd_entry 3 49850 _001255_hash NULL ++_001256_hash tipc_link_send_sections_fast 4 37920 _001256_hash NULL ++_001257_hash tipc_subseq_alloc 1 5957 _001257_hash NULL ++_001258_hash tnode_alloc 1 49407 _001258_hash NULL ++_001259_hash tomoyo_commit_ok 2 20167 _001259_hash NULL ++_001260_hash tomoyo_scan_bprm 2-4 15642 _003488_hash NULL nohasharray ++_001262_hash tps6586x_writes 3 58689 _001262_hash NULL ++_001263_hash tty_buffer_find 2 2443 _001263_hash NULL ++_001264_hash tty_write 3 5494 _001264_hash NULL ++_001265_hash ubifs_setxattr 4 59650 _001477_hash NULL nohasharray ++_001266_hash ubi_self_check_all_ff 4 41959 _001266_hash NULL ++_001267_hash udf_sb_alloc_partition_maps 2 62313 _001267_hash NULL ++_001268_hash udplite_getfrag 3-4 14479 _001268_hash NULL ++_001270_hash ulong_write_file 3 26485 _001270_hash NULL ++_001271_hash unix_stream_sendmsg 4 61455 _001271_hash NULL ++_001272_hash unlink_queued 3-4 645 _001272_hash NULL ++_001273_hash update_pmkid 4 2481 _001273_hash NULL ++_001274_hash usb_alloc_coherent 2 65444 _001274_hash NULL ++_001275_hash vc_resize 2-3 3585 _001275_hash NULL ++_001277_hash vhci_write 3 2224 _001277_hash NULL ++_001278_hash __vhost_add_used_n 3 26554 _001278_hash NULL ++_001279_hash virtqueue_add_buf 3-4 59470 _001279_hash NULL ++_001281_hash vmalloc 1 15464 _001281_hash NULL ++_001282_hash vol_cdev_write 3 40915 _001282_hash NULL ++_001283_hash vxge_device_register 4 7752 _001283_hash NULL ++_001284_hash __vxge_hw_blockpool_malloc 2 5786 _001284_hash NULL ++_001285_hash __vxge_hw_channel_allocate 3 55462 _001285_hash NULL ++_001286_hash vzalloc 1 47421 _001286_hash NULL ++_001287_hash vzalloc_node 1 23424 _001287_hash NULL ++_001288_hash wa_nep_queue 2 8858 _001288_hash NULL ++_001289_hash __wa_xfer_setup_segs 2 56725 _001289_hash NULL ++_001290_hash wiphy_new 2 2482 _001290_hash NULL ++_001291_hash wm8350_block_write 3 19727 _001291_hash NULL ++_001292_hash wpan_phy_alloc 1 48056 _001292_hash NULL ++_001293_hash write_flush_pipefs 3 2021 _001293_hash NULL ++_001294_hash write_flush_procfs 3 44011 _001294_hash NULL ++_001295_hash wusb_ccm_mac 7 32199 _001295_hash NULL ++_001296_hash x25_sendmsg 4 12487 _001296_hash NULL ++_001297_hash xfrm_hash_alloc 1 10997 _001297_hash NULL ++_001298_hash _xfs_buf_get_pages 2 46811 _001298_hash NULL ++_001299_hash xfs_da_grow_inode_int 3 21785 _001299_hash NULL ++_001300_hash xfs_dir_cilookup_result 3 64288 _003160_hash NULL nohasharray ++_001301_hash xfs_idata_realloc 2 26199 _001301_hash NULL ++_001302_hash xfs_iext_add_indirect_multi 3 32400 _001302_hash NULL ++_001303_hash xfs_iext_inline_to_direct 2 12384 _001303_hash NULL ++_001304_hash xfs_iformat_local 4 49472 _001304_hash NULL ++_001305_hash xfs_iroot_realloc 2 46826 _001305_hash NULL ++_001306_hash xhci_alloc_stream_info 3 63902 _001306_hash NULL ++_001307_hash xlog_recover_add_to_trans 4 62839 _001307_hash NULL ++_001308_hash xprt_alloc 2 1475 _001308_hash NULL ++_001309_hash xt_alloc_table_info 1 57903 _001309_hash NULL ++_001310_hash _zd_iowrite32v_async_locked 3 39034 _001310_hash NULL ++_001311_hash zd_usb_iowrite16v 3 49744 _001311_hash NULL ++_001312_hash a2mp_send 4 41615 _001312_hash NULL ++_001313_hash acpi_ds_build_internal_package_obj 3 58271 _001313_hash NULL ++_001314_hash acpi_system_read_event 3 55362 _001314_hash NULL ++_001315_hash acpi_ut_create_buffer_object 1 42030 _001315_hash NULL ++_001316_hash acpi_ut_create_package_object 1 17594 _001316_hash NULL ++_001317_hash acpi_ut_create_string_object 1 15360 _001317_hash NULL ++_001318_hash ad7879_spi_multi_read 3 8218 _001318_hash NULL ++_001319_hash add_child 4 45201 _001319_hash NULL ++_001320_hash add_port 2 54941 _001320_hash NULL ++_001321_hash adu_read 3 24177 _001321_hash NULL ++_001322_hash afs_cell_create 2 27346 _001322_hash NULL ++_001323_hash agp_allocate_memory 2 58761 _001323_hash NULL ++_001324_hash agp_generic_alloc_user 1 9470 _001324_hash NULL ++_001325_hash alc_auto_create_extra_outs 2 18975 _001325_hash NULL ++_001326_hash alloc_agpphysmem_i8xx 1 39427 _001326_hash NULL ++_001327_hash allocate_cnodes 1 5329 _001327_hash NULL ++_001328_hash ___alloc_bootmem 1 11410 _001328_hash NULL ++_001329_hash __alloc_bootmem_low_node 2 25726 _001662_hash NULL nohasharray ++_001330_hash __alloc_bootmem_node 2 1992 _001330_hash NULL ++_001331_hash __alloc_bootmem_node_nopanic 2 6432 _001331_hash NULL ++_001332_hash __alloc_bootmem_nopanic 1 65397 _001332_hash NULL ++_001333_hash alloc_candev 1-2 7776 _001333_hash NULL ++_001335_hash _alloc_cdb_cont 2 23609 _001335_hash NULL ++_001336_hash alloc_dummy_extent_buffer 2 56374 _001336_hash NULL ++_001337_hash ____alloc_ei_netdev 1 51475 _001337_hash NULL ++_001338_hash alloc_etherdev_mqs 1 36450 _001338_hash NULL ++_001339_hash alloc_extent_buffer 3 52824 _001339_hash NULL ++_001340_hash alloc_fcdev 1 18780 _001340_hash NULL ++_001341_hash alloc_fddidev 1 15382 _001341_hash NULL ++_001342_hash _alloc_get_attr_desc 2 470 _001342_hash NULL ++_001343_hash alloc_hippi_dev 1 51320 _001343_hash NULL ++_001344_hash alloc_irdadev 1 19140 _001344_hash NULL ++_001345_hash alloc_ldt 2 21972 _001345_hash NULL ++_001346_hash alloc_ltalkdev 1 38071 _001346_hash NULL ++_001347_hash alloc_one_pg_vec_page 1 10747 _001347_hash NULL ++_001348_hash alloc_orinocodev 1 21371 _001348_hash NULL ++_001349_hash alloc_ring 2-4 18278 _001349_hash NULL ++_001351_hash _alloc_set_attr_list 4 48991 _001351_hash NULL ++_001353_hash alloc_tx 2 32143 _001353_hash NULL ++_001354_hash alloc_wr 1-2 24635 _001354_hash NULL ++_001356_hash async_setkey 3 35521 _001356_hash NULL ++_001357_hash ata_host_alloc_pinfo 3 17325 _001357_hash NULL ++_001360_hash ath6kl_connect_event 7-9-8 14267 _001360_hash NULL ++_001361_hash ath6kl_fwlog_block_read 3 49836 _001361_hash NULL ++_001362_hash ath6kl_fwlog_read 3 32101 _001362_hash NULL ++_001363_hash ath9k_wmi_cmd 4 327 _001363_hash NULL ++_001364_hash ath_rx_init 2 43564 _001364_hash NULL ++_001365_hash ath_tx_init 2 60515 _001365_hash NULL ++_001366_hash atm_alloc_charge 2 19517 _001914_hash NULL nohasharray ++_001367_hash atm_get_addr 3 31221 _001367_hash NULL ++_001368_hash audit_log_n_hex 3 45617 _001368_hash NULL ++_001369_hash audit_log_n_string 3 31705 _001369_hash NULL ++_001370_hash ax25_output 2 22736 _001370_hash NULL ++_001371_hash bcsp_prepare_pkt 3 12961 _001371_hash NULL ++_001372_hash bdx_rxdb_create 1 46525 _001372_hash NULL ++_001373_hash bdx_tx_db_init 2 41719 _001373_hash NULL ++_001374_hash bio_map_kern 3 64751 _001374_hash NULL ++_001375_hash bits_to_user 2-3 47733 _001375_hash NULL ++_001377_hash __blk_queue_init_tags 2 9778 _001377_hash NULL ++_001378_hash blk_queue_resize_tags 2 28670 _001378_hash NULL ++_001379_hash blk_rq_map_user_iov 5 16772 _001379_hash NULL ++_001380_hash bm_init 2 13529 _001380_hash NULL ++_001381_hash brcmf_alloc_wdev 1 60347 _001381_hash NULL ++_001382_hash __btrfs_buffered_write 3 35311 _002735_hash NULL nohasharray ++_001383_hash btrfs_insert_dir_item 4 59304 _001383_hash NULL ++_001384_hash btrfs_map_block 3 64379 _001384_hash NULL ++_001385_hash bt_skb_alloc 1 6404 _001385_hash NULL ++_001386_hash c4_add_card 3 54968 _001386_hash NULL ++_001387_hash cache_read 3 24790 _001387_hash NULL ++_001388_hash cache_write 3 13589 _001388_hash NULL ++_001389_hash calc_hmac 3 32010 _001389_hash NULL ++_001390_hash capinc_tty_write 3 28539 _001390_hash NULL ++_001391_hash ccid_getsockopt_builtin_ccids 2 53634 _001391_hash NULL ++_001392_hash ceph_copy_page_vector_to_user 3-4 31270 _001392_hash NULL ++_001394_hash ceph_parse_server_name 2 60318 _001394_hash NULL ++_001395_hash ceph_read_dir 3 17005 _001395_hash NULL ++_001396_hash cfg80211_roamed 5-7 32632 _001396_hash NULL ++_001398_hash cfpkt_add_body 3 44630 _001398_hash NULL ++_001399_hash cfpkt_create_pfx 1-2 23594 _001399_hash NULL ++_001401_hash cmd_complete 6 51629 _001401_hash NULL ++_001402_hash cmtp_add_msgpart 4 9252 _001402_hash NULL ++_001403_hash cmtp_send_interopmsg 7 376 _001403_hash NULL ++_001404_hash coda_psdev_read 3 35029 _001404_hash NULL ++_001405_hash construct_key_and_link 4 8321 _001405_hash NULL ++_001406_hash copy_counters_to_user 5 17027 _001406_hash NULL ++_001407_hash copy_entries_to_user 1 52367 _001407_hash NULL ++_001408_hash copy_from_buf 2-4 27308 _001408_hash NULL ++_001410_hash copy_oldmem_page 3-1 26164 _001410_hash NULL ++_001411_hash copy_to_user_fromio 3 57432 _001411_hash NULL ++_001412_hash cryptd_hash_setkey 3 42781 _001412_hash NULL ++_001413_hash crypto_authenc_esn_setkey 3 6985 _001413_hash NULL ++_001414_hash crypto_authenc_setkey 3 80 _001414_hash NULL ++_001415_hash cxgb3_get_cpl_reply_skb 2 10620 _001415_hash NULL ++_001416_hash cxgbi_ddp_reserve 4 30091 _001416_hash NULL ++_001417_hash cxio_init_resource_fifo 3 28764 _001417_hash NULL ++_001418_hash cxio_init_resource_fifo_random 3 47151 _001418_hash NULL ++_001419_hash datablob_hmac_append 3 40038 _001419_hash NULL ++_001420_hash datablob_hmac_verify 4 24786 _001420_hash NULL ++_001421_hash dataflash_read_fact_otp 3-2 33204 _001421_hash NULL ++_001422_hash dataflash_read_user_otp 3-2 14536 _001422_hash &_000207_hash ++_001423_hash dccp_feat_register_sp 5 17914 _001423_hash NULL ++_001424_hash dccp_setsockopt 5 60367 _001424_hash NULL ++_001425_hash __dev_alloc_skb 1 28681 _001425_hash NULL ++_001426_hash disk_expand_part_tbl 2 30561 _001426_hash NULL ++_001427_hash diva_os_alloc_message_buffer 1 64568 _001427_hash NULL ++_001428_hash diva_os_copy_to_user 4 48508 _001428_hash NULL ++_001429_hash diva_os_malloc 2 16406 _001429_hash NULL ++_001430_hash dmam_declare_coherent_memory 4-2 43679 _001430_hash NULL ++_001431_hash dm_vcalloc 1-2 16814 _001431_hash NULL ++_001433_hash dn_alloc_skb 2 6631 _001433_hash NULL ++_001434_hash do_proc_readlink 3 14096 _001434_hash NULL ++_001435_hash do_readlink 2 43518 _001435_hash NULL ++_001436_hash __do_replace 5 37227 _001436_hash NULL ++_001437_hash do_sigpending 2 9766 _001437_hash NULL ++_001438_hash drbd_bm_resize 2 20522 _001438_hash NULL ++_001439_hash drbd_setsockopt 5 16280 _001439_hash &_000383_hash ++_001440_hash dump_midi 3 51040 _001440_hash NULL ++_001441_hash ecryptfs_filldir 3 6622 _001441_hash NULL ++_001442_hash ecryptfs_send_message 2 18322 _001442_hash NULL ++_001443_hash ep0_read 3 38095 _001443_hash NULL ++_001444_hash evdev_ioctl 2 22371 _001444_hash NULL ++_001445_hash ext4_add_new_descs 3 19509 _001445_hash NULL ++_001446_hash fat_ioctl_filldir 3 36621 _001446_hash NULL ++_001447_hash _fc_frame_alloc 1 43568 _001447_hash NULL ++_001448_hash fc_host_post_vendor_event 3 30903 _001448_hash NULL ++_001449_hash fd_copyout 3 59323 _001449_hash NULL ++_001450_hash f_hidg_read 3 6238 _001450_hash NULL ++_001451_hash filldir 3 55137 _001451_hash NULL ++_001452_hash filldir64 3 46469 _001452_hash NULL ++_001453_hash find_skb 2 20431 _001453_hash NULL ++_001454_hash from_buffer 3 18625 _001454_hash NULL ++_001455_hash fsm_init 2 16134 _001455_hash NULL ++_001456_hash fs_path_add 3 15648 _001456_hash NULL ++_001457_hash fs_path_add_from_extent_buffer 4 27702 _001457_hash NULL ++_001458_hash fuse_perform_write 4 18457 _001458_hash NULL ++_001459_hash gem_alloc_skb 2 51715 _001459_hash NULL ++_001460_hash generic_file_buffered_write 4 25464 _001460_hash NULL ++_001461_hash gen_pool_add 3 21776 _001461_hash NULL ++_001462_hash get_packet 3 41914 _001462_hash NULL ++_001463_hash get_packet 3 5747 _001463_hash NULL ++_001464_hash get_packet_pg 4 28023 _001464_hash NULL ++_001465_hash get_skb 2 63008 _001465_hash NULL ++_001466_hash get_subdir 3 62581 _001466_hash NULL ++_001467_hash gsm_control_message 4 18209 _001467_hash NULL ++_001468_hash gsm_control_modem 3 55303 _001468_hash NULL ++_001469_hash gsm_control_rls 3 3353 _001469_hash NULL ++_001470_hash handle_received_packet 3 22457 _001470_hash NULL ++_001471_hash hash_setkey 3 48310 _001471_hash NULL ++_001472_hash hdlcdrv_register 2 6792 _001472_hash NULL ++_001473_hash hiddev_ioctl 2 36816 _001473_hash NULL ++_001474_hash hid_input_report 4 32458 _001474_hash NULL ++_001475_hash hidp_queue_report 3 1881 _001475_hash NULL ++_001476_hash __hidp_send_ctrl_message 4 28303 _001476_hash NULL ++_001477_hash hidraw_read 3 59650 _001477_hash &_001265_hash ++_001478_hash HiSax_readstatus 2 15752 _001478_hash NULL ++_001480_hash __hwahc_op_set_gtk 4 42038 _001480_hash NULL ++_001481_hash __hwahc_op_set_ptk 5 36510 _001481_hash NULL ++_001482_hash hycapi_rx_capipkt 3 11602 _001482_hash NULL ++_001483_hash i2400m_net_rx 5 27170 _001483_hash NULL ++_001484_hash ib_copy_to_udata 3 27525 _001484_hash NULL ++_001485_hash idetape_chrdev_read 3 2097 _001485_hash NULL ++_001486_hash ieee80211_alloc_hw 1 43829 _001486_hash NULL ++_001487_hash ieee80211_bss_info_update 4 13991 _001487_hash NULL ++_001488_hash igmpv3_newpack 2 35912 _001488_hash NULL ++_001489_hash ilo_read 3 32531 _001489_hash NULL ++_001490_hash init_map_ipmac 3-4 63896 _001490_hash NULL ++_001492_hash init_tid_tabs 2-4-3 13252 _001492_hash NULL ++_001495_hash iowarrior_read 3 53483 _001495_hash NULL ++_001496_hash ip_options_get 4 56538 _001496_hash NULL ++_001497_hash ipv6_getsockopt_sticky 5 56711 _001497_hash NULL ++_001498_hash ipwireless_send_packet 4 8328 _001498_hash NULL ++_001499_hash ipx_sendmsg 4 1362 _001499_hash NULL ++_001500_hash irq_domain_add_linear 2 29236 _001500_hash NULL ++_001501_hash iscsi_conn_setup 2 35159 _001501_hash NULL ++_001502_hash iscsi_create_session 3 51647 _001502_hash NULL ++_001503_hash iscsi_host_alloc 2 36671 _001503_hash NULL ++_001504_hash iscsi_if_send_reply 7 52219 _001504_hash NULL ++_001505_hash iscsi_offload_mesg 5 58425 _001505_hash NULL ++_001506_hash iscsi_ping_comp_event 5 38263 _001506_hash NULL ++_001507_hash iscsi_post_host_event 4 13473 _001507_hash NULL ++_001508_hash iscsi_recv_pdu 4 16755 _001508_hash NULL ++_001509_hash iscsi_session_setup 4-5 196 _001509_hash NULL ++_001511_hash iscsit_find_cmd_from_itt_or_dump 3 17194 _003122_hash NULL nohasharray ++_001512_hash isdn_net_ciscohdlck_alloc_skb 2 55209 _001951_hash NULL nohasharray ++_001513_hash isdn_ppp_ccp_xmit_reset 6 63297 _001513_hash NULL ++_001514_hash isdn_ppp_read 4 50356 _001514_hash NULL ++_001515_hash isdn_ppp_skb_push 2 5236 _001515_hash NULL ++_001516_hash isku_sysfs_read 6 58806 _001516_hash NULL ++_001517_hash isku_sysfs_write 6 49767 _001517_hash NULL ++_001520_hash jbd2_alloc 1 41359 _001520_hash NULL ++_001521_hash jffs2_do_link 6 42048 _001521_hash NULL ++_001522_hash jffs2_do_unlink 4 62020 _001522_hash NULL ++_001523_hash jffs2_security_setxattr 4 62107 _001523_hash NULL ++_001524_hash jffs2_trusted_setxattr 4 17048 _001524_hash NULL ++_001525_hash jffs2_user_setxattr 4 10182 _001525_hash NULL ++_001526_hash joydev_ioctl_common 2 49359 _001526_hash NULL ++_001527_hash kernel_setsockopt 5 35913 _001527_hash NULL ++_001528_hash keyctl_describe_key 3 36853 _001528_hash NULL ++_001529_hash keyctl_get_security 3 64418 _001529_hash &_001192_hash ++_001530_hash keyring_read 3 13438 _001530_hash NULL ++_001531_hash kfifo_copy_to_user 3 20646 _001531_hash NULL ++_001532_hash kmem_zalloc_large 1 56128 _001532_hash NULL ++_001533_hash kmp_init 2 41373 _001533_hash NULL ++_001534_hash koneplus_sysfs_write 6 35993 _001534_hash NULL ++_001535_hash kvm_clear_guest_page 4 2308 _001535_hash NULL ++_001536_hash kvm_read_nested_guest_page 5 13337 _001536_hash NULL ++_001537_hash _l2_alloc_skb 1 11883 _001537_hash NULL ++_001538_hash l2cap_create_basic_pdu 3 24869 _001538_hash &_001074_hash ++_001539_hash l2cap_create_connless_pdu 3 37327 _001539_hash NULL ++_001540_hash l2cap_create_iframe_pdu 3 40055 _001540_hash NULL ++_001541_hash l3_alloc_skb 1 32289 _001541_hash NULL ++_001542_hash __lgwrite 4 57669 _001542_hash NULL ++_001543_hash libfc_host_alloc 2 7917 _001543_hash NULL ++_001544_hash llc_alloc_frame 4 64366 _001544_hash NULL ++_001545_hash llcp_sock_sendmsg 4 1092 _001545_hash NULL ++_001546_hash mac_drv_rx_init 2 48898 _001546_hash NULL ++_001547_hash macvtap_get_user 4 28185 _001547_hash NULL ++_001548_hash mdc800_device_read 3 22896 _001548_hash NULL ++_001549_hash memcpy_toiovec 3 54166 _001549_hash &_000892_hash ++_001550_hash memcpy_toiovecend 3-4 19736 _001550_hash NULL ++_001552_hash mempool_create 1 29437 _001552_hash NULL ++_001553_hash mgmt_event 4 12810 _001553_hash NULL ++_001554_hash mgt_set_varlen 4 60916 _001554_hash NULL ++_001555_hash mI_alloc_skb 1 24770 _001555_hash NULL ++_001556_hash mlx4_en_create_rx_ring 3 62498 _001556_hash NULL ++_001557_hash mlx4_en_create_tx_ring 4 48501 _001557_hash NULL ++_001558_hash mlx4_init_cmpt_table 3 11569 _001558_hash NULL ++_001559_hash mon_bin_get_event 4 52863 _001559_hash NULL ++_001560_hash mousedev_read 3 47123 _001560_hash NULL ++_001561_hash move_addr_to_user 2 2868 _001561_hash NULL ++_001562_hash mpihelp_mul 5-3 27805 _001562_hash NULL ++_001564_hash mpi_set_buffer 3 65294 _001564_hash NULL ++_001565_hash mptctl_ioctl 2 12355 _001565_hash NULL ++_001566_hash msnd_fifo_alloc 2 23179 _001566_hash NULL ++_001567_hash mtdswap_init 2 55719 _001567_hash NULL ++_001568_hash mthca_alloc_resize_buf 3 60394 _001568_hash NULL ++_001569_hash mthca_init_cq 2 60011 _001569_hash NULL ++_001570_hash nci_skb_alloc 2 49757 _001570_hash NULL ++_001571_hash neigh_hash_grow 2 17283 _001571_hash NULL ++_001572_hash netdev_alloc_skb 2 62437 _001572_hash NULL ++_001573_hash __netdev_alloc_skb_ip_align 2 55067 _001573_hash NULL ++_001574_hash netlink_change_ngroups 2 16457 _001574_hash NULL ++_001575_hash new_skb 1 21148 _001575_hash NULL ++_001576_hash nfc_alloc_recv_skb 1 10244 _001576_hash NULL ++_001577_hash nfcwilink_skb_alloc 1 16167 _001577_hash NULL ++_001578_hash __nf_nat_mangle_tcp_packet 5-7 8190 _001578_hash NULL ++_001580_hash nf_nat_mangle_udp_packet 5-7 13321 _001580_hash NULL ++_001582_hash nfqnl_mangle 4-2 36226 _001582_hash NULL ++_001583_hash nfs4_realloc_slot_table 2 22859 _001583_hash NULL ++_001584_hash nfs_idmap_get_key 2 39616 _001584_hash NULL ++_001585_hash nfs_readdata_alloc 2 65015 _001585_hash NULL ++_001586_hash nfs_writedata_alloc 2 12133 _001586_hash NULL ++_001587_hash nfulnl_alloc_skb 2 65207 _001587_hash NULL ++_001588_hash ni65_alloc_mem 3 10664 _001588_hash NULL ++_001589_hash nsm_get_handle 4 52089 _001589_hash NULL ++_001590_hash ntfs_malloc_nofs 1 49572 _001590_hash NULL ++_001591_hash ntfs_malloc_nofs_nofail 1 63631 _001591_hash NULL ++_001592_hash nvme_create_queue 3 170 _001592_hash NULL ++_001593_hash ocfs2_control_write 3 54737 _001593_hash NULL ++_001595_hash orinoco_add_extscan_result 3 18207 _001595_hash NULL ++_001596_hash osd_req_read_sg_kern 5 6378 _001596_hash NULL ++_001597_hash osd_req_write_sg_kern 5 10514 _001597_hash NULL ++_001599_hash override_release 2 52032 _001599_hash NULL ++_001600_hash p9_client_read 5 19750 _001600_hash NULL ++_001601_hash packet_snd 3 13634 _001601_hash NULL ++_001602_hash pcbit_stat 2 27364 _001602_hash NULL ++_001603_hash pcpu_extend_area_map 2 12589 _001603_hash NULL ++_001604_hash pep_alloc_skb 3 46303 _001604_hash NULL ++_001605_hash pg_read 3 17276 _001605_hash NULL ++_001606_hash picolcd_debug_eeprom_read 3 14549 _001606_hash NULL ++_001607_hash pkt_alloc_packet_data 1 37928 _001607_hash NULL ++_001608_hash pmcraid_build_passthrough_ioadls 2 62034 _001608_hash NULL ++_001609_hash pn_raw_send 2 54330 _001609_hash NULL ++_001610_hash posix_clock_register 2 5662 _001610_hash NULL ++_001611_hash printer_read 3 54851 _001611_hash NULL ++_001612_hash __proc_file_read 3 54978 _001612_hash NULL ++_001613_hash pskb_may_pull 2 22546 _001613_hash NULL ++_001614_hash __pskb_pull 2 42602 _001614_hash NULL ++_001615_hash ptp_read 4 63251 _001615_hash NULL ++_001616_hash pt_read 3 49136 _001616_hash NULL ++_001617_hash put_cmsg 4 36589 _001617_hash NULL ++_001618_hash px_raw_event 4 49371 _001618_hash NULL ++_001619_hash qla4xxx_post_aen_work 3 46953 _001619_hash NULL ++_001620_hash qla4xxx_post_ping_evt_work 4 8074 _001819_hash NULL nohasharray ++_001621_hash raid5_resize 2 63306 _001621_hash NULL ++_001622_hash rawv6_sendmsg 4 20080 _001622_hash NULL ++_001623_hash rds_message_map_pages 2 31487 _001623_hash NULL ++_001624_hash rds_sendmsg 4 40976 _001624_hash NULL ++_001625_hash read_flush 3 43851 _001625_hash NULL ++_001626_hash read_profile 3 27859 _001626_hash NULL ++_001627_hash read_vmcore 3 26501 _001627_hash NULL ++_001628_hash redirected_tty_write 3 65297 _001628_hash NULL ++_001629_hash refill_pool 2 19477 _001629_hash NULL ++_001630_hash __register_chrdev 2-3 54223 _001630_hash NULL ++_001632_hash regmap_raw_write 4 53803 _001632_hash NULL ++_001633_hash reiserfs_allocate_list_bitmaps 3 21732 _001633_hash NULL ++_001634_hash reiserfs_resize 2 34377 _001634_hash NULL ++_001635_hash request_key_auth_read 3 24109 _001635_hash NULL ++_001636_hash rfcomm_wmalloc 2 58090 _001636_hash NULL ++_001637_hash rfkill_fop_read 3 54711 _001637_hash NULL ++_001638_hash rng_dev_read 3 41581 _001638_hash NULL ++_001639_hash roccat_read 3 41093 _001639_hash &_001034_hash ++_001640_hash rx 4 57944 _001640_hash NULL ++_001641_hash rxrpc_client_sendmsg 5 23236 _001641_hash NULL ++_001642_hash rxrpc_kernel_send_data 3 60083 _001642_hash NULL ++_001643_hash rxrpc_server_sendmsg 4 37331 _001643_hash NULL ++_001644_hash savu_sysfs_write 6 42273 _001644_hash NULL ++_001645_hash sco_sock_sendmsg 4 62542 _001645_hash NULL ++_001646_hash scsi_nl_send_vendor_msg 5 16394 _001646_hash NULL ++_001647_hash scsi_register 2 49094 _001647_hash NULL ++_001648_hash sctp_datamsg_from_user 4 55342 _001648_hash NULL ++_001649_hash sctp_getsockopt_events 2 3607 _001649_hash NULL ++_001650_hash sctp_getsockopt_maxburst 2 42941 _001650_hash NULL ++_001651_hash sctp_getsockopt_maxseg 2 10737 _001651_hash NULL ++_001652_hash sctp_make_chunk 4 12986 _001652_hash NULL ++_001653_hash sctpprobe_read 3 17741 _001653_hash NULL ++_001654_hash sctp_tsnmap_mark 2 35929 _001654_hash NULL ++_001655_hash sctp_ulpevent_new 1 33377 _001655_hash NULL ++_001656_hash sdhci_alloc_host 2 7509 _001656_hash NULL ++_001657_hash selinux_inode_post_setxattr 4 26037 _001657_hash NULL ++_001658_hash selinux_inode_setsecurity 4 18148 _001658_hash NULL ++_001659_hash selinux_inode_setxattr 4 10708 _001659_hash NULL ++_001660_hash selinux_secctx_to_secid 2 63744 _001660_hash NULL ++_001661_hash selinux_setprocattr 4 55611 _001661_hash NULL ++_001662_hash sel_write_context 3 25726 _001662_hash &_001329_hash ++_001663_hash send_command 4 10832 _001663_hash NULL ++_001664_hash seq_copy_in_user 3 18543 _001664_hash NULL ++_001665_hash seq_open_net 4 8968 _001779_hash NULL nohasharray ++_001666_hash seq_open_private 3 61589 _001666_hash NULL ++_001667_hash set_arg 3 42824 _001667_hash NULL ++_001668_hash sg_read 3 25799 _001668_hash NULL ++_001669_hash shash_async_setkey 3 10720 _003506_hash NULL nohasharray ++_001670_hash shash_compat_setkey 3 12267 _001670_hash NULL ++_001671_hash shmem_setxattr 4 55867 _001671_hash NULL ++_001672_hash simple_read_from_buffer 2-5 55957 _001672_hash NULL ++_001674_hash sisusb_clear_vram 2-3 57466 _001674_hash NULL ++_001676_hash sisusbcon_do_font_op 9 52271 _001676_hash NULL ++_001677_hash sisusb_copy_memory 4 35016 _001677_hash NULL ++_001678_hash sisusb_write 3 44834 _001678_hash NULL ++_001680_hash skb_cow 2 26138 _001680_hash NULL ++_001681_hash skb_cow_head 2 52495 _001681_hash NULL ++_001682_hash skb_make_writable 2 24783 _001682_hash NULL ++_001683_hash skb_padto 2 50759 _001683_hash NULL ++_001684_hash sk_stream_alloc_skb 2 57622 _001684_hash NULL ++_001685_hash smk_write_access2 3 19170 _001685_hash NULL ++_001686_hash smk_write_access 3 49561 _001686_hash NULL ++_001687_hash snd_es1938_capture_copy 5 25930 _001687_hash NULL ++_001688_hash snd_gus_dram_peek 4 9062 _001688_hash NULL ++_001689_hash snd_hdsp_capture_copy 5 4011 _001689_hash NULL ++_001690_hash snd_korg1212_copy_to 6 92 _001690_hash NULL ++_001691_hash snd_opl4_mem_proc_read 5 63774 _001691_hash NULL ++_001692_hash snd_pcm_oss_read1 3 63771 _001692_hash NULL ++_001693_hash snd_pcm_plugin_alloc 2 12580 _001693_hash NULL ++_001694_hash snd_rawmidi_kernel_read1 4 36740 _001694_hash NULL ++_001695_hash snd_rme9652_capture_copy 5 10287 _001695_hash NULL ++_001696_hash sock_alloc_send_pskb 2 21246 _001696_hash NULL ++_001697_hash sock_rmalloc 2 59740 _002491_hash NULL nohasharray ++_001698_hash sock_wmalloc 2 16472 _001698_hash NULL ++_001699_hash solos_param_store 4 34755 _001699_hash NULL ++_001702_hash srp_target_alloc 3 37288 _001702_hash NULL ++_001703_hash store_ifalias 4 35088 _001703_hash NULL ++_001704_hash store_msg 3 56417 _001704_hash NULL ++_001705_hash str_to_user 2 11411 _001705_hash NULL ++_001706_hash subbuf_read_actor 3 2071 _001706_hash NULL ++_001707_hash sys_fgetxattr 4 25166 _001707_hash NULL ++_001708_hash sys_gethostname 2 49698 _001708_hash NULL ++_001709_hash sys_getxattr 4 37418 _001709_hash NULL ++_001710_hash sys_init_module 2 36047 _001710_hash NULL ++_001711_hash sys_kexec_load 2 14222 _001711_hash NULL ++_001712_hash sys_lgetxattr 4 45531 _001712_hash NULL ++_001713_hash syslog_print 2 307 _001713_hash NULL ++_001714_hash sys_msgsnd 3 44537 _001714_hash &_000139_hash ++_001715_hash sys_process_vm_readv 3-5 19090 _003104_hash NULL nohasharray ++_001717_hash sys_process_vm_writev 3-5 4928 _001717_hash NULL ++_001719_hash sys_pselect6 1 57449 _001719_hash NULL ++_001720_hash sys_sched_getaffinity 2 60033 _001720_hash NULL ++_001721_hash sys_setsockopt 5 35320 _001721_hash NULL ++_001722_hash t3_init_l2t 1 8261 _001722_hash NULL ++_001723_hash t4vf_pktgl_to_skb 2 39005 _001723_hash NULL ++_001724_hash tcp_collapse 5-6 63294 _001724_hash NULL ++_001726_hash tcp_sendmsg 4 30296 _001726_hash NULL ++_001727_hash team_options_register 3 20091 _001727_hash NULL ++_001728_hash tipc_buf_acquire 1 60437 _001728_hash NULL ++_001729_hash tipc_cfg_reply_alloc 1 27606 _001729_hash NULL ++_001730_hash tipc_send2name 6 16809 _001730_hash NULL ++_001731_hash tipc_send2port 5 63935 _001731_hash NULL ++_001732_hash tipc_send 4 51238 _001732_hash NULL ++_001733_hash tnode_new 3 44757 _002769_hash NULL nohasharray ++_001734_hash tomoyo_read_self 3 33539 _001734_hash NULL ++_001735_hash tomoyo_update_domain 2 5498 _001735_hash NULL ++_001736_hash tomoyo_update_policy 2 40458 _001736_hash NULL ++_001737_hash tpm_read 3 50344 _001737_hash NULL ++_001738_hash TSS_rawhmac 3 17486 _001738_hash NULL ++_001739_hash __tty_buffer_request_room 2 27700 _001739_hash NULL ++_001740_hash tun_get_user 4 39099 _001740_hash NULL ++_001741_hash ubi_dump_flash 4 46381 _001741_hash NULL ++_001742_hash ubi_io_write 4-5 15870 _003453_hash NULL nohasharray ++_001744_hash udp_setsockopt 5 25985 _001744_hash NULL ++_001745_hash udpv6_setsockopt 5 18487 _001745_hash NULL ++_001746_hash uio_read 3 49300 _001746_hash NULL ++_001747_hash ulog_alloc_skb 1 23427 _001747_hash NULL ++_001748_hash unix_dgram_sendmsg 4 45699 _001748_hash NULL ++_001749_hash unlink1 3 63059 _001749_hash NULL ++_001751_hash usbdev_read 3 45114 _001751_hash NULL ++_001752_hash usblp_ioctl 2 30203 _001752_hash NULL ++_001753_hash usblp_read 3 57342 _003832_hash NULL nohasharray ++_001754_hash usbtmc_read 3 32377 _001754_hash NULL ++_001755_hash _usb_writeN_sync 4 31682 _001755_hash NULL ++_001756_hash user_read 3 51881 _001756_hash NULL ++_001757_hash vcs_read 3 8017 _001757_hash NULL ++_001758_hash vdma_mem_alloc 1 6171 _001758_hash NULL ++_001759_hash venus_create 4 20555 _001759_hash NULL ++_001760_hash venus_link 5 32165 _001760_hash NULL ++_001761_hash venus_lookup 4 8121 _001761_hash NULL ++_001762_hash venus_mkdir 4 8967 _001762_hash NULL ++_001763_hash venus_remove 4 59781 _001763_hash NULL ++_001764_hash venus_rename 4-5 17707 _003784_hash NULL nohasharray ++_001766_hash venus_rmdir 4 45564 _001766_hash NULL ++_001767_hash venus_symlink 4-6 23570 _001767_hash NULL ++_001769_hash vfs_readlink 3 54368 _001769_hash NULL ++_001770_hash vfs_readv 3 38011 _001770_hash NULL ++_001771_hash vfs_writev 3 25278 _001771_hash NULL ++_001772_hash vga_arb_read 3 4886 _001772_hash NULL ++_001773_hash vgacon_adjust_height 2 28124 _001773_hash NULL ++_001774_hash vhci_put_user 4 12604 _001774_hash NULL ++_001775_hash vhost_add_used_n 3 10760 _001775_hash NULL ++_001776_hash virtnet_send_command 5-6 61993 _001776_hash NULL ++_001778_hash vmbus_establish_gpadl 3 4495 _001778_hash NULL ++_001779_hash vol_cdev_read 3 8968 _001779_hash &_001665_hash ++_001780_hash wdm_read 3 6549 _001780_hash NULL ++_001781_hash write_adapter_mem 3 3234 _001781_hash NULL ++_001782_hash wusb_prf 7 54261 _001782_hash &_000065_hash ++_001783_hash xdi_copy_to_user 4 48900 _001783_hash NULL ++_001784_hash xfs_buf_associate_memory 3 17915 _001784_hash NULL ++_001785_hash xfs_buf_get_maps 2 4581 _001785_hash NULL ++_001786_hash xfs_buf_get_uncached 2 51477 _001786_hash NULL ++_001787_hash xfs_buf_item_get_format 2 189 _001787_hash NULL ++_001788_hash xfs_buf_map_from_irec 5 2368 _002641_hash NULL nohasharray ++_001789_hash xfs_dir2_block_to_sf 3 37868 _001789_hash NULL ++_001790_hash xfs_dir2_leaf_getdents 3 23841 _001790_hash NULL ++_001791_hash xfs_dir2_sf_addname_hard 3 54254 _001791_hash NULL ++_001792_hash xfs_efd_init 3 5463 _001792_hash NULL ++_001793_hash xfs_efi_init 2 5476 _001793_hash NULL ++_001794_hash xfs_iext_realloc_direct 2 20521 _001794_hash NULL ++_001795_hash xfs_iext_realloc_indirect 2 59211 _001795_hash NULL ++_001796_hash xfs_inumbers_fmt 3 12817 _001796_hash NULL ++_001797_hash xhci_alloc_streams 5 37586 _001797_hash NULL ++_001798_hash xlog_recover_add_to_cont_trans 4 44102 _001798_hash NULL ++_001799_hash xz_dec_lzma2_create 2 36353 _002713_hash NULL nohasharray ++_001800_hash _zd_iowrite32v_locked 3 44725 _001800_hash NULL ++_001801_hash a2mp_chan_alloc_skb_cb 2 27159 _001801_hash NULL ++_001802_hash aat2870_reg_read_file 3 12221 _001802_hash NULL ++_001803_hash add_partition 2 55588 _001803_hash NULL ++_001804_hash add_sctp_bind_addr 3 12269 _001804_hash NULL ++_001805_hash _add_sg_continuation_descriptor 3 54721 _001805_hash NULL ++_001806_hash afs_cell_lookup 2 8482 _001806_hash NULL ++_001807_hash afs_send_simple_reply 3 63940 _001807_hash NULL ++_001808_hash agp_allocate_memory_wrap 1 16576 _001808_hash NULL ++_001809_hash __alloc_bootmem 1 31498 _001809_hash NULL ++_001810_hash __alloc_bootmem_low 1 43423 _003425_hash NULL nohasharray ++_001811_hash __alloc_bootmem_node_high 2 65076 _001811_hash NULL ++_001812_hash alloc_cc770dev 1 48186 _001812_hash NULL ++_001813_hash __alloc_ei_netdev 1 29338 _001813_hash NULL ++_001814_hash __alloc_eip_netdev 1 51549 _001814_hash NULL ++_001815_hash alloc_libipw 1 22708 _001815_hash NULL ++_001816_hash _alloc_mISDN_skb 3 52232 _001816_hash NULL ++_001817_hash alloc_pg_vec 2 8533 _001817_hash NULL ++_001818_hash alloc_sja1000dev 1 17868 _001818_hash NULL ++_001819_hash alloc_targets 2 8074 _001819_hash &_001620_hash ++_001822_hash ath6kl_disconnect_timeout_read 3 3650 _001822_hash NULL ++_001823_hash ath6kl_endpoint_stats_read 3 41554 _001823_hash NULL ++_001824_hash ath6kl_fwlog_mask_read 3 2050 _001824_hash NULL ++_001825_hash ath6kl_keepalive_read 3 44303 _001825_hash NULL ++_001826_hash ath6kl_listen_int_read 3 10355 _001826_hash NULL ++_001827_hash ath6kl_lrssi_roam_read 3 61022 _001827_hash NULL ++_001828_hash ath6kl_regdump_read 3 14393 _001828_hash NULL ++_001829_hash ath6kl_regread_read 3 25884 _001829_hash NULL ++_001830_hash ath6kl_regwrite_read 3 48747 _001830_hash NULL ++_001831_hash ath6kl_roam_table_read 3 26166 _001831_hash NULL ++_001832_hash ath9k_debugfs_read_buf 3 25316 _001832_hash NULL ++_001833_hash ath9k_multi_regread 4 65056 _001833_hash NULL ++_001834_hash ath_rxbuf_alloc 2 24745 _001834_hash NULL ++_001835_hash atk_debugfs_ggrp_read 3 29522 _001835_hash NULL ++_001836_hash audit_log_n_untrustedstring 3 9548 _001836_hash NULL ++_001837_hash ax25_send_frame 2 19964 _001837_hash NULL ++_001838_hash b43_debugfs_read 3 24425 _001838_hash NULL ++_001839_hash b43legacy_debugfs_read 3 2473 _001839_hash NULL ++_001840_hash batadv_bla_is_backbone_gw 3 58488 _001840_hash NULL ++_001841_hash batadv_check_management_packet 3 52993 _001841_hash NULL ++_001842_hash batadv_check_unicast_packet 2 10866 _001842_hash NULL ++_001843_hash batadv_interface_rx 4 8568 _001843_hash NULL ++_001844_hash batadv_skb_head_push 2 11360 _001844_hash NULL ++_001845_hash bchannel_get_rxbuf 2 37213 _001845_hash NULL ++_001846_hash bcm_recvmsg 4 43992 _001846_hash NULL ++_001847_hash bfad_debugfs_read 3 13119 _001847_hash NULL ++_001848_hash bfad_debugfs_read_regrd 3 57830 _001848_hash NULL ++_001849_hash blk_init_tags 1 30592 _001849_hash NULL ++_001850_hash blk_queue_init_tags 2 44355 _001850_hash &_001022_hash ++_001851_hash blk_rq_map_kern 4 47004 _001851_hash NULL ++_001852_hash bm_entry_read 3 10976 _001852_hash NULL ++_001853_hash bm_status_read 3 19583 _001853_hash NULL ++_001854_hash bnad_debugfs_read 3 50665 _001854_hash NULL ++_001855_hash bnad_debugfs_read_regrd 3 51308 _001855_hash NULL ++_001856_hash bnx2i_send_nl_mesg 4 53353 _001856_hash NULL ++_001857_hash brcmf_debugfs_sdio_counter_read 3 58369 _001857_hash NULL ++_001858_hash brcmf_sdio_assert_info 4 52653 _001858_hash NULL ++_001859_hash brcmf_sdio_dump_console 4 37455 _001859_hash NULL ++_001860_hash brcmf_sdio_trap_info 4 48510 _001860_hash NULL ++_001861_hash btmrvl_curpsmode_read 3 46939 _001861_hash NULL ++_001862_hash btmrvl_gpiogap_read 3 4718 _001862_hash NULL ++_001863_hash btmrvl_hscfgcmd_read 3 56303 _001863_hash NULL ++_001864_hash btmrvl_hscmd_read 3 1614 _001864_hash NULL ++_001865_hash btmrvl_hsmode_read 3 1647 _001865_hash NULL ++_001866_hash btmrvl_hsstate_read 3 920 _001866_hash NULL ++_001867_hash btmrvl_pscmd_read 3 24308 _001867_hash NULL ++_001868_hash btmrvl_psmode_read 3 22395 _001868_hash NULL ++_001869_hash btmrvl_psstate_read 3 50683 _001869_hash NULL ++_001870_hash btmrvl_txdnldready_read 3 413 _001870_hash NULL ++_001871_hash btrfs_add_link 5 9973 _001871_hash NULL ++_001872_hash __btrfs_direct_write 4 22273 _001872_hash NULL ++_001873_hash btrfs_discard_extent 2 38547 _001873_hash NULL ++_001874_hash btrfs_file_aio_write 4 21520 _001874_hash NULL ++_001875_hash btrfs_find_create_tree_block 3 55812 _001875_hash NULL ++_001876_hash btrfsic_map_block 2 56751 _001876_hash NULL ++_001877_hash cache_read_pipefs 3 47615 _001877_hash NULL ++_001878_hash cache_read_procfs 3 52882 _001878_hash NULL ++_001879_hash cache_write_pipefs 3 48270 _001879_hash NULL ++_001880_hash cache_write_procfs 3 22491 _001880_hash NULL ++_001881_hash caif_stream_recvmsg 4 13173 _001881_hash NULL ++_001882_hash carl9170_alloc 1 27 _001882_hash NULL ++_001883_hash carl9170_debugfs_read 3 47738 _001883_hash NULL ++_001884_hash ceph_msgpool_init 4 34599 _001884_hash NULL ++_001885_hash cfpkt_add_trail 3 27260 _001885_hash NULL ++_001886_hash cfpkt_create 1 18197 _001886_hash NULL ++_001887_hash cfpkt_pad_trail 2 55511 _003606_hash NULL nohasharray ++_001888_hash cfpkt_split 2 47541 _001888_hash NULL ++_001889_hash cgroup_read_s64 5 19570 _001889_hash NULL ++_001890_hash cgroup_read_u64 5 45532 _001890_hash NULL ++_001891_hash channel_type_read 3 47308 _001891_hash NULL ++_001892_hash check_header 2 56930 _001892_hash NULL ++_001893_hash codec_list_read_file 3 24910 _001893_hash NULL ++_001894_hash configfs_read_file 3 1683 _001894_hash NULL ++_001895_hash console_store 4 36007 _001895_hash NULL ++_001896_hash cpuset_common_file_read 5 8800 _001896_hash NULL ++_001897_hash create_subvol 4 30836 _001897_hash NULL ++_001898_hash cxio_hal_init_resource 2-7-6 29771 _001898_hash &_000295_hash ++_001901_hash cxio_hal_init_rhdl_resource 1 25104 _001901_hash NULL ++_001902_hash dai_list_read_file 3 25421 _001902_hash NULL ++_001903_hash dapm_bias_read_file 3 64715 _001903_hash NULL ++_001904_hash dapm_widget_power_read_file 3 59950 _001983_hash NULL nohasharray ++_001907_hash dbgfs_frame 3 45917 _001907_hash NULL ++_001908_hash dbgfs_state 3 38894 _001908_hash NULL ++_001909_hash dccp_manip_pkt 2 30229 _001909_hash NULL ++_001910_hash ddp_ppod_write_idata 5 25610 _001910_hash NULL ++_001911_hash debugfs_read 3 62535 _001911_hash NULL ++_001912_hash debug_output 3 18575 _001912_hash NULL ++_001913_hash debug_read 3 19322 _001913_hash NULL ++_001914_hash dev_alloc_skb 1 19517 _001914_hash &_001366_hash ++_001915_hash dfs_file_read 3 18116 _001915_hash NULL ++_001916_hash diva_alloc_dma_map 2 23798 _001916_hash NULL ++_001917_hash diva_xdi_write 4 63975 _001917_hash NULL ++_001918_hash dma_memcpy_pg_to_iovec 6 1725 _001918_hash NULL ++_001919_hash dma_memcpy_to_iovec 5 12173 _001919_hash NULL ++_001920_hash dma_show_regs 3 35266 _001920_hash NULL ++_001921_hash dm_exception_table_init 2 39645 _001921_hash &_001149_hash ++_001922_hash dn_nsp_do_disc 2-6 49474 _001922_hash NULL ++_001924_hash dn_recvmsg 4 17213 _001924_hash NULL ++_001925_hash dns_resolver_read 3 54658 _001925_hash NULL ++_001926_hash do_msgrcv 4 5590 _001926_hash NULL ++_001927_hash do_syslog 3 56807 _001927_hash NULL ++_001928_hash dpcm_state_read_file 3 65489 _001928_hash NULL ++_001929_hash dsp_cmx_send_member 2 15625 _001929_hash NULL ++_001930_hash fallback_on_nodma_alloc 2 35332 _001930_hash NULL ++_001931_hash fc_frame_alloc 2 1596 _001931_hash NULL ++_001932_hash fc_frame_alloc_fill 2 59394 _001932_hash NULL ++_001933_hash filter_read 3 61692 _001933_hash NULL ++_001934_hash __finish_unordered_dir 4 33198 _001934_hash NULL ++_001935_hash format_devstat_counter 3 32550 _001935_hash NULL ++_001936_hash fragmentation_threshold_read 3 61718 _001936_hash NULL ++_001937_hash fuse_conn_limit_read 3 20084 _001937_hash NULL ++_001938_hash fuse_conn_waiting_read 3 49762 _001938_hash NULL ++_001939_hash fuse_file_aio_write 4 46399 _001939_hash NULL ++_001940_hash generic_readlink 3 32654 _001940_hash NULL ++_001941_hash gre_manip_pkt 2 38785 _001941_hash NULL ++_001942_hash handle_eviocgbit 3 44193 _001942_hash NULL ++_001943_hash handle_response 5 55951 _001943_hash NULL ++_001944_hash handle_response_icmp 7 39574 _001944_hash NULL ++_001945_hash hash_recvmsg 4 50924 _001945_hash NULL ++_001946_hash hci_send_cmd 3 43810 _001946_hash NULL ++_001947_hash hci_si_event 3 1404 _001947_hash NULL ++_001948_hash help 4 14971 _001948_hash NULL ++_001949_hash hfcpci_empty_bfifo 4 62323 _001949_hash NULL ++_001950_hash hidp_send_ctrl_message 4 43702 _001950_hash NULL ++_001951_hash ht40allow_map_read 3 55209 _001951_hash &_001512_hash ++_001952_hash hwflags_read 3 52318 _001952_hash NULL ++_001953_hash hysdn_conf_read 3 42324 _001953_hash NULL ++_001954_hash hysdn_sched_rx 3 60533 _001954_hash NULL ++_001955_hash i2400m_rx_stats_read 3 57706 _001955_hash NULL ++_001956_hash i2400m_tx_stats_read 3 28527 _001956_hash NULL ++_001957_hash icmp_manip_pkt 2 48801 _001957_hash NULL ++_001958_hash idmouse_read 3 63374 _001958_hash NULL ++_001959_hash ieee80211_if_read 3 6785 _001959_hash NULL ++_001960_hash ieee80211_rx_bss_info 3 61630 _001960_hash NULL ++_001961_hash ikconfig_read_current 3 1658 _001961_hash NULL ++_001962_hash il3945_sta_dbgfs_stats_table_read 3 48802 _001962_hash NULL ++_001963_hash il3945_ucode_general_stats_read 3 46111 _001963_hash NULL ++_001964_hash il3945_ucode_rx_stats_read 3 3048 _001964_hash NULL ++_001965_hash il3945_ucode_tx_stats_read 3 36016 _001965_hash NULL ++_001966_hash il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 _001966_hash NULL ++_001967_hash il4965_rs_sta_dbgfs_scale_table_read 3 38564 _001967_hash NULL ++_001968_hash il4965_rs_sta_dbgfs_stats_table_read 3 49206 _001968_hash NULL ++_001969_hash il4965_ucode_general_stats_read 3 56277 _001969_hash NULL ++_001970_hash il4965_ucode_rx_stats_read 3 61948 _001970_hash NULL ++_001971_hash il4965_ucode_tx_stats_read 3 12064 _001971_hash NULL ++_001972_hash il_dbgfs_chain_noise_read 3 38044 _001972_hash NULL ++_001973_hash il_dbgfs_channels_read 3 25005 _001973_hash NULL ++_001974_hash il_dbgfs_disable_ht40_read 3 42386 _001974_hash NULL ++_001975_hash il_dbgfs_fh_reg_read 3 40993 _001975_hash NULL ++_001976_hash il_dbgfs_force_reset_read 3 57517 _001976_hash NULL ++_001977_hash il_dbgfs_interrupt_read 3 3351 _001977_hash NULL ++_001978_hash il_dbgfs_missed_beacon_read 3 59956 _001978_hash NULL ++_001979_hash il_dbgfs_nvm_read 3 12288 _001979_hash NULL ++_001980_hash il_dbgfs_power_save_status_read 3 43165 _001980_hash NULL ++_001981_hash il_dbgfs_qos_read 3 33615 _001981_hash NULL ++_001982_hash il_dbgfs_rxon_filter_flags_read 3 19281 _001982_hash NULL ++_001983_hash il_dbgfs_rxon_flags_read 3 59950 _001983_hash &_001904_hash ++_001984_hash il_dbgfs_rx_queue_read 3 11221 _001984_hash NULL ++_001985_hash il_dbgfs_rx_stats_read 3 15243 _001985_hash NULL ++_001986_hash il_dbgfs_sensitivity_read 3 2370 _001986_hash NULL ++_001987_hash il_dbgfs_sram_read 3 62296 _001987_hash NULL ++_001988_hash il_dbgfs_stations_read 3 21532 _001988_hash NULL ++_001989_hash il_dbgfs_status_read 3 58388 _001989_hash NULL ++_001990_hash il_dbgfs_tx_queue_read 3 55668 _001990_hash NULL ++_001991_hash il_dbgfs_tx_stats_read 3 32913 _001991_hash NULL ++_001992_hash ima_show_htable_value 2 57136 _001992_hash NULL ++_001994_hash intel_fake_agp_alloc_by_type 1 1 _001994_hash NULL ++_001995_hash ip4ip6_err 5 36772 _001995_hash NULL ++_001996_hash ip6_append_data 4-5 36490 _003601_hash NULL nohasharray ++_001997_hash ip6ip6_err 5 18308 _001997_hash NULL ++_001998_hash __ip_append_data 7-8 36191 _001998_hash NULL ++_001999_hash ip_vs_icmp_xmit 4 59624 _001999_hash NULL ++_002000_hash ip_vs_icmp_xmit_v6 4 20464 _002000_hash NULL ++_002001_hash ipw_write 3 59807 _002001_hash NULL ++_002002_hash irda_recvmsg_stream 4 35280 _002002_hash NULL ++_002003_hash irq_domain_add_simple 2 46734 _002003_hash NULL ++_002004_hash __iscsi_complete_pdu 4 10726 _002004_hash NULL ++_002005_hash iscsi_nop_out_rsp 4 51117 _002005_hash NULL ++_002006_hash iscsi_tcp_conn_setup 2 16376 _002006_hash NULL ++_002007_hash iwl_dbgfs_bt_traffic_read 3 35534 _002007_hash NULL ++_002008_hash iwl_dbgfs_calib_disabled_read 3 22649 _002008_hash NULL ++_002009_hash iwl_dbgfs_chain_noise_read 3 46355 _002009_hash NULL ++_002010_hash iwl_dbgfs_channels_read 3 6784 _002010_hash NULL ++_002011_hash iwl_dbgfs_current_sleep_command_read 3 2081 _002011_hash NULL ++_002012_hash iwl_dbgfs_disable_ht40_read 3 35761 _002012_hash NULL ++_002013_hash iwl_dbgfs_fh_reg_read 3 879 _002013_hash &_000406_hash ++_002014_hash iwl_dbgfs_interrupt_read 3 23574 _002014_hash NULL ++_002015_hash iwl_dbgfs_log_event_read 3 2107 _002015_hash NULL ++_002016_hash iwl_dbgfs_missed_beacon_read 3 50584 _002016_hash NULL ++_002017_hash iwl_dbgfs_nvm_read 3 23845 _002017_hash NULL ++_002018_hash iwl_dbgfs_plcp_delta_read 3 55407 _002018_hash NULL ++_002019_hash iwl_dbgfs_power_save_status_read 3 54392 _002019_hash NULL ++_002020_hash iwl_dbgfs_protection_mode_read 3 13943 _002020_hash NULL ++_002021_hash iwl_dbgfs_qos_read 3 11753 _002021_hash NULL ++_002022_hash iwl_dbgfs_reply_tx_error_read 3 19205 _002022_hash NULL ++_002023_hash iwl_dbgfs_rf_reset_read 3 26512 _002023_hash NULL ++_002024_hash iwl_dbgfs_rx_handlers_read 3 18708 _002024_hash NULL ++_002025_hash iwl_dbgfs_rxon_filter_flags_read 3 28832 _002025_hash NULL ++_002026_hash iwl_dbgfs_rxon_flags_read 3 20795 _002026_hash NULL ++_002027_hash iwl_dbgfs_rx_queue_read 3 19943 _002027_hash NULL ++_002028_hash iwl_dbgfs_sensitivity_read 3 63116 _002731_hash NULL nohasharray ++_002029_hash iwl_dbgfs_sleep_level_override_read 3 3038 _002029_hash NULL ++_002030_hash iwl_dbgfs_sram_read 3 44505 _002030_hash NULL ++_002031_hash iwl_dbgfs_stations_read 3 9309 _002031_hash NULL ++_002032_hash iwl_dbgfs_status_read 3 5171 _002032_hash NULL ++_002033_hash iwl_dbgfs_temperature_read 3 29224 _002033_hash NULL ++_002034_hash iwl_dbgfs_thermal_throttling_read 3 38779 _002034_hash NULL ++_002035_hash iwl_dbgfs_tx_queue_read 3 4635 _002035_hash NULL ++_002036_hash iwl_dbgfs_ucode_bt_stats_read 3 42820 _002036_hash NULL ++_002037_hash iwl_dbgfs_ucode_general_stats_read 3 49199 _002037_hash NULL ++_002038_hash iwl_dbgfs_ucode_rx_stats_read 3 58023 _002038_hash NULL ++_002039_hash iwl_dbgfs_ucode_tracing_read 3 47983 _002039_hash &_000356_hash ++_002040_hash iwl_dbgfs_ucode_tx_stats_read 3 31611 _002040_hash NULL ++_002041_hash iwl_dbgfs_wowlan_sram_read 3 540 _002041_hash NULL ++_002042_hash joydev_ioctl 2 33343 _002042_hash NULL ++_002043_hash kernel_readv 3 35617 _002043_hash NULL ++_002044_hash key_algorithm_read 3 57946 _002044_hash NULL ++_002045_hash key_icverrors_read 3 20895 _002045_hash NULL ++_002046_hash key_key_read 3 3241 _002046_hash NULL ++_002047_hash key_replays_read 3 62746 _002047_hash NULL ++_002048_hash key_rx_spec_read 3 12736 _002048_hash NULL ++_002049_hash key_tx_spec_read 3 4862 _002049_hash NULL ++_002050_hash __kfifo_to_user 3 36555 _002568_hash NULL nohasharray ++_002051_hash __kfifo_to_user_r 3 39123 _002051_hash NULL ++_002052_hash kmem_zalloc_greedy 2-3 65268 _002052_hash NULL ++_002054_hash l1oip_socket_recv 6 56537 _002054_hash NULL ++_002055_hash l2cap_build_cmd 4 48676 _002055_hash NULL ++_002056_hash l2cap_chan_send 3 49995 _002056_hash NULL ++_002057_hash l2cap_segment_sdu 4 48772 _002057_hash NULL ++_002058_hash l2down_create 4 21755 _002058_hash NULL ++_002059_hash l2tp_xmit_skb 3 42672 _002059_hash NULL ++_002060_hash l2up_create 3 6430 _002060_hash NULL ++_002061_hash lbs_debugfs_read 3 30721 _002061_hash NULL ++_002062_hash lbs_dev_info 3 51023 _002062_hash NULL ++_002063_hash lbs_host_sleep_read 3 31013 _002063_hash NULL ++_002064_hash lbs_rdbbp_read 3 45805 _002064_hash NULL ++_002065_hash lbs_rdmac_read 3 418 _002065_hash NULL ++_002066_hash lbs_rdrf_read 3 41431 _002066_hash NULL ++_002067_hash lbs_sleepparams_read 3 10840 _002067_hash NULL ++_002068_hash lbs_threshold_read 5 21046 _002068_hash NULL ++_002069_hash ldisc_receive 4 41516 _002069_hash NULL ++_002070_hash libfc_vport_create 2 4415 _002070_hash NULL ++_002073_hash lkdtm_debugfs_read 3 45752 _002073_hash NULL ++_002074_hash llcp_sock_recvmsg 4 13556 _002074_hash NULL ++_002075_hash long_retry_limit_read 3 59766 _002075_hash NULL ++_002076_hash lpfc_debugfs_dif_err_read 3 36303 _002076_hash NULL ++_002077_hash lpfc_debugfs_read 3 16566 _002077_hash NULL ++_002078_hash lpfc_idiag_baracc_read 3 58466 _002972_hash NULL nohasharray ++_002079_hash lpfc_idiag_ctlacc_read 3 33943 _002079_hash NULL ++_002080_hash lpfc_idiag_drbacc_read 3 15948 _002080_hash NULL ++_002081_hash lpfc_idiag_extacc_read 3 48301 _002081_hash NULL ++_002082_hash lpfc_idiag_mbxacc_read 3 28061 _002082_hash NULL ++_002083_hash lpfc_idiag_pcicfg_read 3 50334 _002083_hash NULL ++_002084_hash lpfc_idiag_queacc_read 3 13950 _002084_hash NULL ++_002085_hash lpfc_idiag_queinfo_read 3 55662 _002085_hash NULL ++_002086_hash lro_gen_skb 6 2644 _002086_hash NULL ++_002087_hash mac80211_format_buffer 2 41010 _002087_hash NULL ++_002088_hash macvtap_alloc_skb 2-4-3 50629 _002088_hash NULL ++_002091_hash macvtap_put_user 4 55609 _002091_hash NULL ++_002092_hash macvtap_sendmsg 4 30629 _002092_hash NULL ++_002093_hash mangle_packet 6-8 27864 _002093_hash NULL ++_002095_hash manip_pkt 3 7741 _002095_hash NULL ++_002096_hash mempool_create_kmalloc_pool 1 41650 _002096_hash NULL ++_002097_hash mempool_create_page_pool 1 30189 _002097_hash NULL ++_002098_hash mempool_create_slab_pool 1 62907 _002098_hash NULL ++_002099_hash mgmt_device_found 10 14146 _002099_hash NULL ++_002100_hash minstrel_stats_read 3 17290 _002100_hash NULL ++_002101_hash mmc_ext_csd_read 3 13205 _002101_hash NULL ++_002102_hash mon_bin_read 3 6841 _002102_hash NULL ++_002103_hash mon_stat_read 3 25238 _002103_hash NULL ++_002105_hash mqueue_read_file 3 6228 _002105_hash NULL ++_002106_hash mwifiex_debug_read 3 53074 _002106_hash NULL ++_002107_hash mwifiex_getlog_read 3 54269 _002107_hash NULL ++_002108_hash mwifiex_info_read 3 53447 _002108_hash NULL ++_002109_hash mwifiex_rdeeprom_read 3 51429 _002109_hash NULL ++_002110_hash mwifiex_regrdwr_read 3 34472 _002110_hash NULL ++_002111_hash named_prepare_buf 2 24532 _002111_hash NULL ++_002112_hash nci_send_cmd 3 58206 _002112_hash NULL ++_002113_hash netdev_alloc_skb_ip_align 2 40811 _002113_hash NULL ++_002114_hash netpoll_send_udp 3 58955 _002114_hash NULL ++_002115_hash nfcwilink_send_bts_cmd 3 10802 _002115_hash NULL ++_002116_hash nf_nat_mangle_tcp_packet 5-7 8643 _002116_hash NULL ++_002119_hash nfsd_vfs_read 6 62605 _002616_hash NULL nohasharray ++_002120_hash nfsd_vfs_write 6 54577 _002120_hash NULL ++_002121_hash nfs_idmap_lookup_id 2 10660 _002121_hash NULL ++_002122_hash ntfs_rl_realloc 3 56831 _002122_hash &_000370_hash ++_002123_hash ntfs_rl_realloc_nofail 3 32173 _002123_hash NULL ++_002124_hash o2hb_debug_read 3 37851 _002124_hash NULL ++_002125_hash o2net_debug_read 3 52105 _002125_hash NULL ++_002126_hash ocfs2_control_read 3 56405 _002126_hash NULL ++_002127_hash ocfs2_debug_read 3 14507 _002127_hash NULL ++_002128_hash oom_adjust_read 3 25127 _002128_hash NULL ++_002129_hash oom_score_adj_read 3 39921 _002426_hash NULL nohasharray ++_002130_hash oprofilefs_str_to_user 3 42182 _002130_hash NULL ++_002131_hash oprofilefs_ulong_to_user 3 11582 _002131_hash NULL ++_002132_hash osd_req_add_get_attr_list 3 49278 _002132_hash NULL ++_002133_hash _osd_req_list_objects 6 4204 _002133_hash NULL ++_002134_hash osd_req_read_kern 5 59990 _002134_hash NULL ++_002135_hash osd_req_write_kern 5 53486 _002135_hash NULL ++_002136_hash osst_read 3 40237 _002136_hash NULL ++_002137_hash p54_alloc_skb 3 34366 _002137_hash &_000485_hash ++_002138_hash p54_init_common 1 23850 _002138_hash NULL ++_002139_hash packet_alloc_skb 2-5-4 62602 _002139_hash NULL ++_002142_hash packet_sendmsg 4 24954 _002142_hash NULL ++_002143_hash page_readlink 3 23346 _002143_hash NULL ++_002144_hash pcf50633_write_block 3 2124 _002144_hash NULL ++_002145_hash pcpu_alloc_alloc_info 1-2 45813 _002145_hash NULL ++_002147_hash pep_indicate 5 38611 _002147_hash NULL ++_002148_hash pep_reply 5 50582 _002148_hash NULL ++_002149_hash pipe_handler_request 5 50774 _003582_hash NULL nohasharray ++_002150_hash platform_list_read_file 3 34734 _002150_hash NULL ++_002151_hash pm860x_bulk_write 3 43875 _002151_hash NULL ++_002152_hash pm_qos_power_read 3 55891 _002152_hash NULL ++_002153_hash port_show_regs 3 5904 _002153_hash NULL ++_002154_hash proc_coredump_filter_read 3 39153 _002154_hash NULL ++_002155_hash proc_fdinfo_read 3 62043 _002155_hash NULL ++_002156_hash proc_file_read 3 53905 _002156_hash NULL ++_002157_hash proc_info_read 3 63344 _002157_hash NULL ++_002158_hash proc_loginuid_read 3 15631 _002158_hash NULL ++_002159_hash proc_pid_attr_read 3 10173 _002159_hash NULL ++_002160_hash proc_pid_readlink 3 52186 _002160_hash NULL ++_002161_hash proc_read 3 43614 _002161_hash NULL ++_002162_hash proc_self_readlink 3 38094 _002162_hash NULL ++_002163_hash proc_sessionid_read 3 6911 _002299_hash NULL nohasharray ++_002164_hash provide_user_output 3 41105 _002164_hash NULL ++_002165_hash pskb_network_may_pull 2 35336 _002165_hash NULL ++_002166_hash pskb_pull 2 65005 _002166_hash NULL ++_002167_hash pstore_file_read 3 57288 _002167_hash NULL ++_002168_hash ql_process_mac_rx_page 4 15543 _002168_hash NULL ++_002169_hash ql_process_mac_rx_skb 4 6689 _002169_hash NULL ++_002170_hash queues_read 3 24877 _002170_hash NULL ++_002171_hash raw_recvmsg 4 17277 _002171_hash NULL ++_002172_hash rcname_read 3 25919 _002172_hash NULL ++_002173_hash read_4k_modal_eeprom 3 30212 _002173_hash NULL ++_002174_hash read_9287_modal_eeprom 3 59327 _002174_hash NULL ++_002175_hash reada_find_extent 2 63486 _002175_hash NULL ++_002176_hash read_def_modal_eeprom 3 14041 _002176_hash NULL ++_002177_hash read_enabled_file_bool 3 37744 _002177_hash NULL ++_002178_hash read_file_ani 3 23161 _002178_hash NULL ++_002179_hash read_file_antenna 3 13574 _002179_hash NULL ++_002180_hash read_file_base_eeprom 3 42168 _002180_hash NULL ++_002181_hash read_file_beacon 3 32595 _002181_hash NULL ++_002182_hash read_file_blob 3 57406 _002182_hash NULL ++_002183_hash read_file_bool 3 4180 _002183_hash NULL ++_002184_hash read_file_credit_dist_stats 3 54367 _002184_hash NULL ++_002185_hash read_file_debug 3 58256 _002185_hash NULL ++_002186_hash read_file_disable_ani 3 6536 _002186_hash NULL ++_002187_hash read_file_dma 3 9530 _002187_hash NULL ++_002188_hash read_file_dump_nfcal 3 18766 _002188_hash NULL ++_002189_hash read_file_frameerrors 3 64001 _002189_hash NULL ++_002190_hash read_file_interrupt 3 61742 _002197_hash NULL nohasharray ++_002191_hash read_file_misc 3 9948 _002191_hash NULL ++_002192_hash read_file_modal_eeprom 3 39909 _002192_hash NULL ++_002193_hash read_file_queue 3 40895 _002193_hash NULL ++_002194_hash read_file_rcstat 3 22854 _002194_hash NULL ++_002195_hash read_file_recv 3 48232 _002195_hash NULL ++_002196_hash read_file_regidx 3 33370 _002196_hash NULL ++_002197_hash read_file_regval 3 61742 _002197_hash &_002190_hash ++_002198_hash read_file_reset 3 52310 _002198_hash NULL ++_002199_hash read_file_rx_chainmask 3 41605 _002199_hash NULL ++_002200_hash read_file_slot 3 50111 _002200_hash NULL ++_002201_hash read_file_stations 3 35795 _002201_hash NULL ++_002202_hash read_file_tgt_int_stats 3 20697 _002202_hash NULL ++_002203_hash read_file_tgt_rx_stats 3 33944 _002203_hash NULL ++_002204_hash read_file_tgt_stats 3 8959 _002204_hash NULL ++_002205_hash read_file_tgt_tx_stats 3 51847 _002205_hash NULL ++_002206_hash read_file_tx_chainmask 3 3829 _002206_hash NULL ++_002207_hash read_file_war_stats 3 292 _002207_hash NULL ++_002208_hash read_file_xmit 3 21487 _002208_hash NULL ++_002209_hash read_flush_pipefs 3 20171 _002209_hash NULL ++_002210_hash read_flush_procfs 3 27642 _002210_hash NULL ++_002211_hash read_from_oldmem 2 3337 _002211_hash NULL ++_002212_hash read_oldmem 3 55658 _002212_hash NULL ++_002213_hash receive_packet 2 12367 _002213_hash NULL ++_002214_hash regmap_name_read_file 3 39379 _002214_hash NULL ++_002215_hash repair_io_failure 4 4815 _002215_hash NULL ++_002216_hash request_key_and_link 4 42693 _002216_hash NULL ++_002217_hash res_counter_read 4 33499 _002217_hash NULL ++_002218_hash rfcomm_tty_write 3 51603 _002218_hash NULL ++_002219_hash rs_sta_dbgfs_rate_scale_data_read 3 47165 _002219_hash NULL ++_002220_hash rs_sta_dbgfs_scale_table_read 3 40262 _002220_hash NULL ++_002221_hash rs_sta_dbgfs_stats_table_read 3 56573 _002221_hash NULL ++_002222_hash rts_threshold_read 3 44384 _002222_hash NULL ++_002223_hash rxrpc_sendmsg 4 29049 _002223_hash NULL ++_002224_hash scrub_setup_recheck_block 3-4 56245 _002224_hash NULL ++_002226_hash scsi_adjust_queue_depth 3 12802 _002226_hash NULL ++_002227_hash sctp_make_abort 3 34459 _002227_hash NULL ++_002228_hash sctp_make_asconf 3 4078 _002228_hash NULL ++_002229_hash sctp_make_asconf_ack 3 31726 _002229_hash NULL ++_002230_hash sctp_make_datafrag_empty 3 34737 _002230_hash NULL ++_002231_hash sctp_make_fwdtsn 3 53265 _002231_hash NULL ++_002232_hash sctp_make_heartbeat_ack 4 34411 _002232_hash NULL ++_002233_hash sctp_make_init 4 58401 _002233_hash NULL ++_002234_hash sctp_make_init_ack 4 3335 _002234_hash NULL ++_002235_hash sctp_make_op_error_space 3 5528 _002235_hash NULL ++_002236_hash sctp_manip_pkt 2 40620 _002236_hash NULL ++_002237_hash selinux_inode_notifysecctx 3 36896 _002237_hash NULL ++_002238_hash sel_read_avc_cache_threshold 3 33942 _002238_hash NULL ++_002239_hash sel_read_avc_hash_stats 3 1984 _002239_hash NULL ++_002240_hash sel_read_bool 3 24236 _002240_hash NULL ++_002241_hash sel_read_checkreqprot 3 33068 _002241_hash NULL ++_002242_hash sel_read_class 3 12669 _002960_hash NULL nohasharray ++_002243_hash sel_read_enforce 3 2828 _002243_hash NULL ++_002244_hash sel_read_handle_status 3 56139 _002244_hash NULL ++_002245_hash sel_read_handle_unknown 3 57933 _002245_hash NULL ++_002246_hash sel_read_initcon 3 32362 _002246_hash NULL ++_002247_hash sel_read_mls 3 25369 _002247_hash NULL ++_002248_hash sel_read_perm 3 42302 _002248_hash NULL ++_002249_hash sel_read_policy 3 55947 _002249_hash NULL ++_002250_hash sel_read_policycap 3 28544 _002250_hash NULL ++_002251_hash sel_read_policyvers 3 55 _002827_hash NULL nohasharray ++_002252_hash send_mpa_reject 3 7135 _002252_hash NULL ++_002253_hash send_mpa_reply 3 32372 _002253_hash NULL ++_002254_hash send_msg 4 37323 _002254_hash NULL ++_002255_hash send_packet 4 52960 _002255_hash NULL ++_002256_hash set_rxd_buffer_pointer 8 9950 _002256_hash NULL ++_002257_hash sge_rx 3 50594 _002257_hash NULL ++_002258_hash short_retry_limit_read 3 4687 _002258_hash NULL ++_002259_hash simple_attr_read 3 24738 _002259_hash NULL ++_002260_hash simple_transaction_read 3 17076 _002260_hash NULL ++_002261_hash sisusbcon_bmove 5-7-6 21873 _002261_hash NULL ++_002264_hash sisusbcon_clear 3-5-4 64329 _002264_hash NULL ++_002267_hash sisusbcon_putcs 3 57630 _002267_hash &_001043_hash ++_002268_hash sisusbcon_scroll 5-3-2 31315 _002268_hash NULL ++_002269_hash sisusbcon_scroll_area 3-4 25899 _002269_hash NULL ++_002271_hash skb_copy_datagram_const_iovec 2-5-4 48102 _002271_hash NULL ++_002274_hash skb_copy_datagram_iovec 2-4 5806 _002274_hash NULL ++_002276_hash skb_gro_header_slow 2 34958 _002276_hash NULL ++_002277_hash smk_read_ambient 3 61220 _002277_hash NULL ++_002278_hash smk_read_direct 3 15803 _002278_hash NULL ++_002279_hash smk_read_doi 3 30813 _002279_hash NULL ++_002280_hash smk_read_logging 3 37804 _002280_hash NULL ++_002281_hash smk_read_mapped 3 7562 _002281_hash NULL ++_002282_hash smk_read_onlycap 3 3855 _002282_hash NULL ++_002283_hash smp_build_cmd 3 45853 _002283_hash NULL ++_002284_hash snapshot_read 3 22601 _002284_hash NULL ++_002285_hash snd_cs4281_BA0_read 5 6847 _002285_hash NULL ++_002286_hash snd_cs4281_BA1_read 5 20323 _002286_hash NULL ++_002287_hash snd_cs46xx_io_read 5 45734 _002287_hash NULL ++_002288_hash snd_gus_dram_read 4 56686 _002288_hash NULL ++_002289_hash snd_mixart_BA0_read 5 45069 _002289_hash NULL ++_002290_hash snd_mixart_BA1_read 5 5082 _002290_hash NULL ++_002291_hash snd_pcm_oss_read 3 28317 _002291_hash NULL ++_002292_hash snd_pcm_plug_alloc 2 42339 _002292_hash NULL ++_002293_hash snd_rawmidi_kernel_read 3 4328 _002293_hash NULL ++_002294_hash snd_rawmidi_read 3 56337 _002294_hash NULL ++_002295_hash snd_rme32_capture_copy 5 39653 _002295_hash NULL ++_002296_hash snd_rme96_capture_copy 5 58484 _002296_hash NULL ++_002297_hash snd_soc_hw_bulk_write_raw 4 14245 _002297_hash NULL ++_002298_hash sock_alloc_send_skb 2 23720 _002298_hash NULL ++_002299_hash spi_show_regs 3 6911 _002299_hash &_002163_hash ++_002300_hash sta_agg_status_read 3 14058 _002300_hash NULL ++_002301_hash sta_connected_time_read 3 17435 _002301_hash NULL ++_002302_hash sta_flags_read 3 56710 _002302_hash NULL ++_002303_hash sta_ht_capa_read 3 10366 _002303_hash NULL ++_002304_hash sta_last_seq_ctrl_read 3 19106 _002304_hash NULL ++_002305_hash sta_num_ps_buf_frames_read 3 1488 _002305_hash NULL ++_002306_hash st_read 3 51251 _002306_hash NULL ++_002307_hash supply_map_read_file 3 10608 _002307_hash NULL ++_002308_hash sysfs_read_file 3 42113 _002308_hash NULL ++_002309_hash sys_preadv 3 17100 _002309_hash NULL ++_002310_hash sys_pwritev 3 41722 _002310_hash NULL ++_002311_hash sys_readv 3 50664 _002311_hash NULL ++_002312_hash sys_rt_sigpending 2 24961 _002312_hash NULL ++_002313_hash sys_writev 3 28384 _002313_hash NULL ++_002314_hash tcf_csum_skb_nextlayer 3 64025 _002314_hash NULL ++_002315_hash tcp_fragment 3 20436 _002315_hash NULL ++_002316_hash tcp_manip_pkt 2 14202 _002316_hash NULL ++_002317_hash teiup_create 3 43201 _002317_hash NULL ++_002318_hash test_iso_queue 5 62534 _002318_hash NULL ++_002319_hash tg3_run_loopback 2 30093 _002319_hash NULL ++_002320_hash tipc_msg_build 4 12326 _002320_hash NULL ++_002321_hash TSS_authhmac 3 12839 _002321_hash NULL ++_002322_hash TSS_checkhmac1 5 31429 _002322_hash NULL ++_002323_hash TSS_checkhmac2 5-7 40520 _002323_hash NULL ++_002325_hash tty_audit_log 8 47280 _002325_hash NULL ++_002326_hash tty_buffer_request_room 2 23228 _002326_hash NULL ++_002327_hash tty_insert_flip_string_fixed_flag 4 37428 _002327_hash NULL ++_002328_hash tty_insert_flip_string_flags 4 30969 _002328_hash NULL ++_002329_hash tty_prepare_flip_string 3 39955 _002329_hash NULL ++_002330_hash tty_prepare_flip_string_flags 4 59240 _002330_hash NULL ++_002331_hash tun_alloc_skb 2-4-3 41216 _002331_hash NULL ++_002334_hash tun_sendmsg 4 10337 _002334_hash NULL ++_002335_hash u32_array_read 3 2219 _002335_hash NULL ++_002336_hash ubi_io_write_data 4-5 40305 _002336_hash NULL ++_002338_hash udplite_manip_pkt 2 62433 _002338_hash NULL ++_002339_hash udp_manip_pkt 2 50770 _002339_hash NULL ++_002340_hash uhci_debug_read 3 5911 _002340_hash NULL ++_002341_hash um_idi_read 3 850 _002341_hash NULL ++_002342_hash unix_seqpacket_sendmsg 4 27893 _002342_hash NULL ++_002343_hash unix_stream_recvmsg 4 35210 _002343_hash NULL ++_002344_hash unlink_simple 3 47506 _002344_hash NULL ++_002345_hash use_pool 2 64607 _002345_hash NULL ++_002346_hash v9fs_fid_readn 4 60544 _002346_hash NULL ++_002347_hash v9fs_file_read 3 40858 _002347_hash NULL ++_002348_hash vhci_read 3 47878 _002348_hash NULL ++_002349_hash vhost_add_used_and_signal_n 4 8038 _002349_hash NULL ++_002350_hash vmbus_open 2-3 12154 _002350_hash NULL ++_002352_hash vxge_rx_alloc 3 52024 _002352_hash NULL ++_002353_hash waiters_read 3 40902 _002353_hash NULL ++_002354_hash wm8994_bulk_write 3 13615 _002354_hash NULL ++_002355_hash write_pbl 4 59583 _002355_hash NULL ++_002356_hash wusb_prf_256 7 29203 _002356_hash NULL ++_002357_hash wusb_prf_64 7 51065 _002357_hash NULL ++_002358_hash _xfs_buf_alloc 3 38058 _002358_hash NULL ++_002359_hash xfs_buf_read_uncached 3 42844 _002359_hash NULL ++_002360_hash xfs_file_buffered_aio_write 4 11492 _002360_hash NULL ++_002361_hash xfs_iext_add 3 41422 _002361_hash NULL ++_002362_hash xfs_iext_remove_direct 3 40744 _002362_hash NULL ++_002363_hash xfs_readdir 3 41200 _002363_hash NULL ++_002364_hash xfs_trans_get_efd 3 51148 _002364_hash NULL ++_002365_hash xfs_trans_get_efi 2 7898 _002365_hash NULL ++_002366_hash xlog_bread_offset 3 60030 _002366_hash NULL ++_002367_hash xlog_get_bp 2 23229 _002367_hash NULL ++_002368_hash xz_dec_init 2 29029 _002368_hash NULL ++_002369_hash aac_change_queue_depth 2 825 _002369_hash NULL ++_002370_hash add_rx_skb 3 8257 _002370_hash NULL ++_002371_hash afs_extract_data 5 50261 _002371_hash NULL ++_002372_hash arcmsr_adjust_disk_queue_depth 2 16756 _002372_hash NULL ++_002373_hash atalk_recvmsg 4 22053 _002373_hash NULL ++_002374_hash ath6kl_buf_alloc 1 57304 _002374_hash NULL ++_002376_hash atomic_read_file 3 16227 _002376_hash NULL ++_002377_hash ax25_recvmsg 4 64441 _002377_hash NULL ++_002378_hash batadv_add_packet 3 12136 _002378_hash NULL ++_002379_hash batadv_iv_ogm_aggregate_new 2 54761 _002379_hash NULL ++_002380_hash batadv_tt_response_fill_table 1 39236 _002380_hash NULL ++_002381_hash beiscsi_process_async_pdu 7 39834 _002381_hash NULL ++_002382_hash bioset_create 1 5580 _002382_hash NULL ++_002383_hash bioset_integrity_create 2 62708 _002383_hash NULL ++_002384_hash biovec_create_pools 2 9575 _002384_hash NULL ++_002385_hash bnx2fc_process_l2_frame_compl 3 65072 _002385_hash NULL ++_002386_hash brcmf_sdbrcm_died_dump 3 15841 _002386_hash NULL ++_002387_hash brcmu_pkt_buf_get_skb 1 5556 _002387_hash NULL ++_002388_hash br_send_bpdu 3 29669 _002388_hash NULL ++_002389_hash btrfs_error_discard_extent 2 50444 _002389_hash NULL ++_002390_hash __btrfs_free_reserved_extent 2 31207 _002390_hash NULL ++_002391_hash btrfsic_cmp_log_and_dev_bytenr 2 49628 _002391_hash NULL ++_002392_hash btrfsic_create_link_to_next_block 4 58246 _002392_hash NULL ++_002393_hash btrfs_init_new_buffer 4 55761 _002393_hash NULL ++_002394_hash btrfs_mksubvol 3 58240 _002394_hash NULL ++_002395_hash bt_skb_send_alloc 2 6581 _002395_hash NULL ++_002396_hash bt_sock_recvmsg 4 12316 _002396_hash NULL ++_002397_hash bt_sock_stream_recvmsg 4 52518 _002397_hash NULL ++_002398_hash c4iw_reject_cr 3 28174 _002398_hash NULL ++_002399_hash caif_seqpkt_recvmsg 4 32241 _002399_hash NULL ++_002400_hash carl9170_rx_copy_data 2 21656 _002400_hash NULL ++_002401_hash cfpkt_append 3 61206 _002401_hash NULL ++_002402_hash cfpkt_setlen 2 49343 _002402_hash NULL ++_002403_hash cgroup_file_read 3 28804 _002403_hash NULL ++_002404_hash cosa_net_setup_rx 2 38594 _002404_hash NULL ++_002405_hash cpu_type_read 3 36540 _002405_hash NULL ++_002406_hash cxgb4_pktgl_to_skb 2 61899 _002406_hash NULL ++_002408_hash dccp_recvmsg 4 16056 _002408_hash NULL ++_002409_hash ddp_clear_map 4 46152 _002409_hash NULL ++_002410_hash ddp_set_map 4 751 _002410_hash NULL ++_002411_hash depth_read 3 31112 _002411_hash NULL ++_002412_hash dfs_global_file_read 3 7787 _002412_hash NULL ++_002413_hash dgram_recvmsg 4 23104 _002413_hash NULL ++_002414_hash diva_init_dma_map 3 58336 _002414_hash NULL ++_002415_hash divas_write 3 63901 _002415_hash NULL ++_002416_hash dma_push_rx 2 39973 _002416_hash NULL ++_002417_hash dma_skb_copy_datagram_iovec 3-5 21516 _002417_hash NULL ++_002419_hash dm_table_create 3 35687 _002419_hash NULL ++_002420_hash dn_alloc_send_pskb 2 4465 _002420_hash NULL ++_002421_hash dn_nsp_return_disc 2 60296 _002421_hash NULL ++_002422_hash dn_nsp_send_disc 2 23469 _002422_hash NULL ++_002423_hash dsp_tone_hw_message 3 17678 _002423_hash NULL ++_002424_hash e1000_check_copybreak 3 62448 _002424_hash NULL ++_002425_hash enable_read 3 2117 _002425_hash &_000224_hash ++_002426_hash exofs_read_kern 6 39921 _002426_hash &_002129_hash ++_002427_hash fast_rx_path 3 59214 _002427_hash NULL ++_002428_hash fc_change_queue_depth 2 36841 _002428_hash NULL ++_002429_hash fc_fcp_frame_alloc 2 12624 _002429_hash NULL ++_002430_hash fcoe_ctlr_send_keep_alive 3 15308 _002430_hash NULL ++_002431_hash frequency_read 3 64031 _003698_hash NULL nohasharray ++_002432_hash ftdi_process_packet 5 45005 _002432_hash NULL ++_002433_hash fuse_conn_congestion_threshold_read 3 51028 _002433_hash NULL ++_002434_hash fuse_conn_max_background_read 3 10855 _002434_hash NULL ++_002435_hash fwnet_incoming_packet 3 40380 _002435_hash NULL ++_002436_hash fwnet_pd_new 4 39947 _003402_hash NULL nohasharray ++_002437_hash get_alua_req 3 4166 _002437_hash NULL ++_002438_hash get_rdac_req 3 45882 _002438_hash NULL ++_002439_hash got_frame 2 16028 _002439_hash NULL ++_002440_hash gsm_mux_rx_netchar 3 33336 _002440_hash NULL ++_002441_hash hci_sock_recvmsg 4 7072 _002441_hash NULL ++_002442_hash hdlcdev_rx 3 997 _002442_hash NULL ++_002443_hash hdlc_empty_fifo 2 18397 _002443_hash NULL ++_002444_hash hfc_empty_fifo 2 57972 _002444_hash NULL ++_002445_hash hfcpci_empty_fifo 4 2427 _002445_hash NULL ++_002446_hash hfcsusb_rx_frame 3 52745 _002446_hash NULL ++_002447_hash hidp_output_raw_report 3 5629 _002447_hash NULL ++_002448_hash hpsa_change_queue_depth 2 15449 _002448_hash NULL ++_002449_hash hptiop_adjust_disk_queue_depth 2 20122 _002449_hash NULL ++_002450_hash hscx_empty_fifo 2 13360 _002450_hash NULL ++_002451_hash hysdn_rx_netpkt 3 16136 _002451_hash NULL ++_002452_hash i2o_pool_alloc 4 55485 _002452_hash NULL ++_002453_hash ide_queue_pc_tail 5 11673 _002453_hash NULL ++_002454_hash ide_raw_taskfile 4 42355 _002454_hash NULL ++_002455_hash idetape_queue_rw_tail 3 29562 _002455_hash NULL ++_002456_hash ieee80211_amsdu_to_8023s 5 15561 _002456_hash NULL ++_002457_hash ieee80211_fragment 4 33112 _002457_hash NULL ++_002458_hash ieee80211_if_read_aid 3 9705 _002458_hash NULL ++_002459_hash ieee80211_if_read_auto_open_plinks 3 38268 _002459_hash &_000374_hash ++_002460_hash ieee80211_if_read_ave_beacon 3 64924 _002460_hash NULL ++_002461_hash ieee80211_if_read_bssid 3 35161 _002461_hash NULL ++_002462_hash ieee80211_if_read_channel_type 3 23884 _002462_hash NULL ++_002463_hash ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 _002463_hash NULL ++_002464_hash ieee80211_if_read_dot11MeshForwarding 3 13940 _002464_hash NULL ++_002465_hash ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 _002465_hash NULL ++_002466_hash ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 _002466_hash NULL ++_002467_hash ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 _002467_hash NULL ++_002468_hash ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 _002468_hash NULL ++_002469_hash ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 _002469_hash NULL ++_002470_hash ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 _002470_hash NULL ++_002471_hash ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 _002471_hash NULL ++_002472_hash ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 _002472_hash NULL ++_002473_hash ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 _002473_hash NULL ++_002474_hash ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 _002474_hash NULL ++_002475_hash ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 _002475_hash NULL ++_002476_hash ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 _002476_hash NULL ++_002477_hash ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 _002477_hash NULL ++_002478_hash ieee80211_if_read_dot11MeshMaxRetries 3 12756 _002478_hash NULL ++_002479_hash ieee80211_if_read_dot11MeshRetryTimeout 3 52168 _002479_hash NULL ++_002480_hash ieee80211_if_read_dot11MeshTTL 3 58307 _002480_hash NULL ++_002481_hash ieee80211_if_read_dropped_frames_congestion 3 32603 _002481_hash NULL ++_002482_hash ieee80211_if_read_dropped_frames_no_route 3 33383 _002482_hash NULL ++_002483_hash ieee80211_if_read_dropped_frames_ttl 3 44500 _002483_hash NULL ++_002484_hash ieee80211_if_read_drop_unencrypted 3 37053 _002484_hash NULL ++_002485_hash ieee80211_if_read_dtim_count 3 38419 _002485_hash NULL ++_002486_hash ieee80211_if_read_element_ttl 3 18869 _002486_hash NULL ++_002487_hash ieee80211_if_read_estab_plinks 3 32533 _002487_hash NULL ++_002488_hash ieee80211_if_read_flags 3 57470 _002919_hash NULL nohasharray ++_002489_hash ieee80211_if_read_fwded_frames 3 36520 _002489_hash NULL ++_002490_hash ieee80211_if_read_fwded_mcast 3 39571 _002490_hash &_000162_hash ++_002491_hash ieee80211_if_read_fwded_unicast 3 59740 _002491_hash &_001697_hash ++_002492_hash ieee80211_if_read_ht_opmode 3 29044 _002492_hash NULL ++_002493_hash ieee80211_if_read_last_beacon 3 31257 _002493_hash NULL ++_002494_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002494_hash NULL ++_002495_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002495_hash NULL ++_002496_hash ieee80211_if_read_num_mcast_sta 3 12419 _002496_hash NULL ++_002497_hash ieee80211_if_read_num_sta_ps 3 34722 _002497_hash NULL ++_002498_hash ieee80211_if_read_path_refresh_time 3 25545 _002498_hash NULL ++_002499_hash ieee80211_if_read_peer 3 45233 _002499_hash NULL ++_002500_hash ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 _002500_hash NULL ++_002501_hash ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 _002501_hash NULL ++_002502_hash ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 _002502_hash NULL ++_002503_hash ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 _002503_hash NULL ++_002504_hash ieee80211_if_read_rssi_threshold 3 49260 _002504_hash NULL ++_002505_hash ieee80211_if_read_smps 3 27416 _002505_hash NULL ++_002506_hash ieee80211_if_read_state 3 9813 _002707_hash NULL nohasharray ++_002507_hash ieee80211_if_read_tkip_mic_test 3 19565 _002507_hash NULL ++_002508_hash ieee80211_if_read_tsf 3 16420 _002508_hash NULL ++_002509_hash ieee80211_if_read_uapsd_max_sp_len 3 15067 _002509_hash NULL ++_002510_hash ieee80211_if_read_uapsd_queues 3 55150 _002510_hash NULL ++_002511_hash ieee80211_mgmt_tx 9 46860 _002511_hash NULL ++_002512_hash ieee80211_probereq_get 4-6 29069 _002512_hash NULL ++_002514_hash ieee80211_rx_mgmt_beacon 3 24430 _002514_hash NULL ++_002515_hash ieee80211_rx_mgmt_probe_resp 3 6918 _002515_hash NULL ++_002516_hash ieee80211_send_auth 5 24121 _002516_hash NULL ++_002517_hash ieee80211_set_probe_resp 3 10077 _002517_hash NULL ++_002518_hash ieee80211_tdls_mgmt 8 9581 _002518_hash NULL ++_002519_hash ima_show_htable_violations 3 10619 _002519_hash NULL ++_002520_hash ima_show_measurements_count 3 23536 _002520_hash NULL ++_002521_hash insert_one_name 7 61668 _002521_hash NULL ++_002522_hash ip6_ufo_append_data 5-7-6 4780 _002522_hash NULL ++_002525_hash ip_append_data 5-6 16942 _002525_hash NULL ++_002526_hash ip_make_skb 5-6 13129 _002526_hash NULL ++_002527_hash ip_nat_sdp_port 6 52938 _002527_hash NULL ++_002528_hash ip_nat_sip_expect 7 45693 _002528_hash NULL ++_002529_hash ipr_change_queue_depth 2 6431 _002529_hash NULL ++_002530_hash ip_recv_error 3 23109 _002530_hash NULL ++_002531_hash ip_ufo_append_data 6-8-7 12775 _002531_hash NULL ++_002534_hash ipv6_recv_error 3 56347 _002534_hash NULL ++_002535_hash ipv6_recv_rxpmtu 3 7142 _002535_hash NULL ++_002536_hash ipw_packet_received_skb 2 1230 _002536_hash NULL ++_002537_hash ipx_recvmsg 4 44366 _002537_hash NULL ++_002538_hash irda_recvmsg_dgram 4 32631 _002538_hash NULL ++_002539_hash iscsi_change_queue_depth 2 23416 _002539_hash NULL ++_002540_hash iscsi_complete_pdu 4 48372 _002540_hash NULL ++_002541_hash iwch_reject_cr 3 23901 _002541_hash NULL ++_002542_hash ixgb_check_copybreak 3 5847 _002542_hash NULL ++_002543_hash key_conf_hw_key_idx_read 3 25003 _002543_hash NULL ++_002544_hash key_conf_keyidx_read 3 42443 _002544_hash NULL ++_002545_hash key_conf_keylen_read 3 49758 _002545_hash NULL ++_002546_hash key_flags_read 3 25931 _002546_hash NULL ++_002547_hash key_ifindex_read 3 31411 _002547_hash NULL ++_002548_hash key_tx_rx_count_read 3 44742 _002548_hash NULL ++_002549_hash kmsg_read 3 46514 _002549_hash NULL ++_002550_hash l1oip_socket_parse 4 4507 _002550_hash NULL ++_002551_hash l2cap_send_cmd 4 14548 _002551_hash NULL ++_002552_hash l2cap_sock_sendmsg 4 63427 _002552_hash NULL ++_002553_hash l2tp_ip6_recvmsg 4 62874 _002553_hash NULL ++_002554_hash l2tp_ip6_sendmsg 4 7461 _002554_hash NULL ++_002555_hash l2tp_ip_recvmsg 4 22681 _002555_hash NULL ++_002556_hash lbs_bcnmiss_read 3 8678 _002556_hash NULL ++_002557_hash lbs_failcount_read 3 31063 _002557_hash NULL ++_002558_hash lbs_highrssi_read 3 64089 _002558_hash NULL ++_002559_hash lbs_highsnr_read 3 5931 _002559_hash NULL ++_002560_hash lbs_lowrssi_read 3 32242 _002560_hash NULL ++_002561_hash lbs_lowsnr_read 3 29571 _002561_hash NULL ++_002563_hash llc_ui_recvmsg 4 3826 _002563_hash NULL ++_002564_hash lowpan_fragment_xmit 3-4 22095 _002564_hash NULL ++_002566_hash lpfc_change_queue_depth 2 25905 _002566_hash NULL ++_002568_hash macvtap_do_read 4 36555 _002568_hash &_002050_hash ++_002569_hash mangle_sdp_packet 9 36279 _002569_hash NULL ++_002570_hash map_addr 6 4666 _002570_hash NULL ++_002571_hash mcs_unwrap_fir 3 25733 _002571_hash NULL ++_002572_hash mcs_unwrap_mir 3 9455 _002572_hash NULL ++_002573_hash megaraid_change_queue_depth 2 64815 _002573_hash NULL ++_002574_hash megasas_change_queue_depth 2 32747 _002574_hash NULL ++_002575_hash mld_newpack 2 50950 _002575_hash NULL ++_002576_hash mptscsih_change_queue_depth 2 26036 _002576_hash NULL ++_002577_hash named_distribute 4 48544 _002577_hash NULL ++_002578_hash NCR_700_change_queue_depth 2 31742 _002578_hash NULL ++_002579_hash netlink_recvmsg 4 61600 _002579_hash NULL ++_002580_hash nfc_alloc_send_skb 4 3167 _002580_hash NULL ++_002581_hash nf_nat_ftp 5 47948 _002581_hash NULL ++_002582_hash nfsctl_transaction_read 3 48250 _002582_hash NULL ++_002583_hash nfsd_read 5 19568 _002583_hash NULL ++_002584_hash nfsd_read_file 6 62241 _002584_hash NULL ++_002585_hash nfsd_write 6 54809 _002585_hash NULL ++_002586_hash nfs_map_group_to_gid 3 15892 _002586_hash NULL ++_002587_hash nfs_map_name_to_uid 3 51132 _002587_hash NULL ++_002588_hash nr_recvmsg 4 12649 _002588_hash NULL ++_002589_hash ntfs_rl_append 2-4 6037 _002589_hash NULL ++_002591_hash ntfs_rl_insert 2-4 4931 _002591_hash NULL ++_002593_hash ntfs_rl_replace 2-4 14136 _002593_hash NULL ++_002595_hash ntfs_rl_split 2-4 52328 _002595_hash NULL ++_002597_hash osd_req_list_collection_objects 5 36664 _002597_hash NULL ++_002598_hash osd_req_list_partition_objects 5 56464 _002598_hash NULL ++_002599_hash osd_req_read_sg 5 47905 _002599_hash NULL ++_002600_hash osd_req_write_sg 5 50908 _002600_hash NULL ++_002602_hash p54_download_eeprom 4 43842 _002602_hash NULL ++_002604_hash packet_recv_error 3 16669 _002604_hash NULL ++_002605_hash packet_recvmsg 4 47700 _002605_hash NULL ++_002606_hash pep_recvmsg 4 19402 _002606_hash NULL ++_002607_hash pfkey_recvmsg 4 53604 _002607_hash NULL ++_002608_hash ping_recvmsg 4 25597 _002608_hash NULL ++_002609_hash pmcraid_change_queue_depth 2 9116 _002609_hash NULL ++_002610_hash pn_recvmsg 4 30887 _002610_hash NULL ++_002611_hash pointer_size_read 3 51863 _002611_hash NULL ++_002612_hash power_read 3 15939 _002612_hash NULL ++_002613_hash pppoe_recvmsg 4 15073 _002613_hash NULL ++_002614_hash pppol2tp_recvmsg 4 57742 _002993_hash NULL nohasharray ++_002615_hash ppp_tx_cp 5 62044 _002615_hash NULL ++_002616_hash prism2_send_mgmt 4 62605 _002616_hash &_002119_hash ++_002617_hash prism2_sta_send_mgmt 5 43916 _002617_hash NULL ++_002618_hash prison_create 1 43623 _002618_hash NULL ++_002619_hash qla2x00_adjust_sdev_qdepth_up 2 20097 _002619_hash NULL ++_002620_hash qla2x00_change_queue_depth 2 24742 _002620_hash NULL ++_002621_hash _queue_data 4 54983 _002621_hash NULL ++_002622_hash raw_recvmsg 4 52529 _002622_hash NULL ++_002623_hash rawsock_recvmsg 4 12144 _002623_hash NULL ++_002624_hash rawv6_recvmsg 4 30265 _002624_hash NULL ++_002625_hash rds_tcp_data_recv 3 53476 _002625_hash NULL ++_002626_hash reada_add_block 2 54247 _002626_hash NULL ++_002627_hash readahead_tree_block 3 36285 _002627_hash NULL ++_002628_hash reada_tree_block_flagged 3 18402 _002628_hash NULL ++_002629_hash read_dma 3 55086 _002629_hash NULL ++_002630_hash read_fifo 3 826 _002630_hash NULL ++_002631_hash read_tree_block 3 841 _002631_hash NULL ++_002632_hash receive_copy 3 12216 _002632_hash NULL ++_002633_hash recover_peb 6-7 29238 _002633_hash NULL ++_002635_hash recv_msg 4 48709 _002635_hash NULL ++_002636_hash recv_stream 4 30138 _002636_hash NULL ++_002637_hash _req_append_segment 2 41031 _002637_hash NULL ++_002638_hash request_key_async 4 6990 _002638_hash NULL ++_002639_hash request_key_async_with_auxdata 4 46624 _002639_hash NULL ++_002640_hash request_key_with_auxdata 4 24515 _002640_hash NULL ++_002641_hash rose_recvmsg 4 2368 _002641_hash &_001788_hash ++_002642_hash rtl8169_try_rx_copy 3 705 _002642_hash NULL ++_002643_hash _rtl92s_firmware_downloadcode 3 14021 _002643_hash NULL ++_002644_hash rx_data 4 60442 _002644_hash NULL ++_002645_hash rxrpc_recvmsg 4 26233 _002645_hash NULL ++_002646_hash sas_change_queue_depth 2 18555 _002646_hash NULL ++_002647_hash scsi_activate_tcq 2 42640 _002647_hash NULL ++_002648_hash scsi_deactivate_tcq 2 47086 _002648_hash NULL ++_002649_hash scsi_execute 5 33596 _002649_hash NULL ++_002650_hash _scsih_adjust_queue_depth 2 1083 _002650_hash NULL ++_002651_hash scsi_init_shared_tag_map 2 59812 _002651_hash NULL ++_002652_hash scsi_track_queue_full 2 44239 _002652_hash NULL ++_002653_hash sctp_abort_pkt_new 5 55218 _002653_hash NULL ++_002654_hash sctp_make_abort_violation 4 27959 _002654_hash NULL ++_002655_hash sctp_make_op_error 5-6 7057 _002655_hash NULL ++_002657_hash sctp_recvmsg 4 23265 _002657_hash NULL ++_002658_hash send_stream 4 3397 _002658_hash NULL ++_002659_hash sis190_try_rx_copy 3 57069 _002659_hash NULL ++_002664_hash skb_copy_and_csum_datagram_iovec 2 24466 _002664_hash NULL ++_002666_hash skge_rx_get 3 40598 _002666_hash NULL ++_002667_hash smp_send_cmd 3 512 _002667_hash NULL ++_002668_hash snd_gf1_mem_proc_dump 5 16926 _003499_hash NULL nohasharray ++_002669_hash sta_dev_read 3 14782 _002669_hash NULL ++_002670_hash sta_inactive_ms_read 3 25690 _002670_hash NULL ++_002671_hash sta_last_signal_read 3 31818 _002671_hash NULL ++_002672_hash stats_dot11ACKFailureCount_read 3 45558 _002672_hash NULL ++_002673_hash stats_dot11FCSErrorCount_read 3 28154 _002673_hash NULL ++_002674_hash stats_dot11RTSFailureCount_read 3 43948 _002674_hash NULL ++_002675_hash stats_dot11RTSSuccessCount_read 3 33065 _002675_hash NULL ++_002676_hash storvsc_connect_to_vsp 2 22 _002676_hash NULL ++_002677_hash sys_msgrcv 3 959 _002677_hash NULL ++_002678_hash sys_syslog 3 10746 _002678_hash NULL ++_002679_hash tcf_csum_ipv4_icmp 3 9258 _002679_hash NULL ++_002680_hash tcf_csum_ipv4_igmp 3 60446 _002680_hash NULL ++_002681_hash tcf_csum_ipv4_tcp 4 39713 _002681_hash NULL ++_002682_hash tcf_csum_ipv4_udp 4 30777 _002682_hash NULL ++_002683_hash tcf_csum_ipv6_icmp 4 11738 _002683_hash NULL ++_002684_hash tcf_csum_ipv6_tcp 4 54877 _002684_hash NULL ++_002685_hash tcf_csum_ipv6_udp 4 25241 _002685_hash NULL ++_002686_hash tcm_loop_change_queue_depth 2 42454 _002686_hash NULL ++_002687_hash tcp_copy_to_iovec 3 28344 _002687_hash NULL ++_002688_hash tcp_mark_head_lost 2 35895 _002688_hash NULL ++_002689_hash tcp_match_skb_to_sack 4 23568 _002689_hash NULL ++_002690_hash timeout_read 3 47915 _002690_hash NULL ++_002691_hash tipc_multicast 5 49144 _002691_hash NULL ++_002692_hash tipc_port_recv_sections 4 42890 _002692_hash NULL ++_002693_hash tipc_port_reject_sections 5 55229 _002693_hash NULL ++_002694_hash total_ps_buffered_read 3 16365 _002694_hash NULL ++_002695_hash tso_fragment 3 29050 _002695_hash NULL ++_002696_hash tty_insert_flip_string 3 34042 _002696_hash NULL ++_002698_hash tun_put_user 4 59849 _002698_hash NULL ++_002699_hash twa_change_queue_depth 2 48808 _002699_hash NULL ++_002700_hash tw_change_queue_depth 2 11116 _002700_hash NULL ++_002701_hash twl_change_queue_depth 2 41342 _002701_hash NULL ++_002702_hash ubi_eba_atomic_leb_change 5 60379 _002702_hash NULL ++_002703_hash ubi_eba_write_leb 5-6 36029 _002703_hash NULL ++_002705_hash ubi_eba_write_leb_st 5 44343 _002705_hash NULL ++_002706_hash udp_recvmsg 4 42558 _002706_hash NULL ++_002707_hash udpv6_recvmsg 4 9813 _002707_hash &_002506_hash ++_002708_hash udpv6_sendmsg 4 22316 _002708_hash NULL ++_002709_hash ulong_read_file 3 42304 _002709_hash &_000522_hash ++_002710_hash unix_dgram_recvmsg 4 14952 _002710_hash NULL ++_002711_hash user_power_read 3 39414 _002711_hash NULL ++_002712_hash v9fs_direct_read 3 45546 _002712_hash NULL ++_002713_hash v9fs_file_readn 4 36353 _002713_hash &_001799_hash ++_002714_hash vcc_recvmsg 4 37198 _002714_hash NULL ++_002715_hash velocity_rx_copy 2 34583 _002715_hash NULL ++_002716_hash W6692_empty_Bfifo 2 47804 _002716_hash NULL ++_002717_hash wep_iv_read 3 54744 _002717_hash NULL ++_002718_hash x25_recvmsg 4 42777 _002718_hash NULL ++_002719_hash xfs_buf_get_map 3 24522 _002719_hash NULL ++_002720_hash xfs_file_aio_write 4 33234 _002720_hash NULL ++_002721_hash xfs_iext_insert 3 18667 _002741_hash NULL nohasharray ++_002722_hash xfs_iext_remove 3 50909 _002722_hash NULL ++_002723_hash xlog_do_recovery_pass 3 21618 _002723_hash NULL ++_002724_hash xlog_find_verify_log_record 2 18870 _002724_hash NULL ++_002725_hash zd_mac_rx 3 38296 _002725_hash NULL ++_002726_hash aircable_process_packet 5 46639 _002726_hash NULL ++_002727_hash ath6kl_wmi_get_new_buf 1 52304 _002727_hash NULL ++_002728_hash batadv_iv_ogm_queue_add 3 46319 _002728_hash NULL ++_002729_hash batadv_receive_client_update_packet 3 41578 _002729_hash NULL ++_002730_hash batadv_receive_server_sync_packet 3 26577 _002730_hash &_000494_hash ++_002731_hash brcmf_alloc_pkt_and_read 2 63116 _002731_hash &_002028_hash ++_002732_hash brcmf_sdcard_recv_buf 6 38179 _002732_hash NULL ++_002733_hash brcmf_sdcard_rwdata 5 65041 _002733_hash NULL ++_002734_hash brcmf_sdcard_send_buf 6 7713 _002734_hash NULL ++_002735_hash brcmf_sdio_forensic_read 3 35311 _002735_hash &_001382_hash ++_002736_hash btrfs_alloc_free_block 3 8986 _002736_hash NULL ++_002737_hash btrfs_free_and_pin_reserved_extent 2 53016 _002737_hash NULL ++_002738_hash btrfs_free_reserved_extent 2 9867 _002738_hash NULL ++_002739_hash carl9170_handle_mpdu 3 11056 _002739_hash NULL ++_002740_hash do_trimming 3 26952 _002740_hash NULL ++_002741_hash edge_tty_recv 4 18667 _002741_hash &_002721_hash ++_002742_hash fwnet_receive_packet 9 50537 _002742_hash NULL ++_002743_hash gigaset_if_receive 3 4861 _002743_hash NULL ++_002744_hash gsm_dlci_data 3 14155 _002744_hash NULL ++_002745_hash handle_rx_packet 3 58993 _002745_hash NULL ++_002746_hash HDLC_irq 2 8709 _002746_hash NULL ++_002747_hash hdlc_rpr_irq 2 10240 _002747_hash NULL ++_002749_hash ifx_spi_insert_flip_string 3 51752 _002749_hash NULL ++_002753_hash ip_nat_sdp_media 8 23386 _002753_hash NULL ++_002754_hash ip_send_unicast_reply 6 38714 _002754_hash NULL ++_002756_hash ipwireless_network_packet_received 4 51277 _002756_hash NULL ++_002757_hash ipwireless_tty_received 3 49154 _002757_hash NULL ++_002758_hash iscsi_iser_recv 4 41948 _002758_hash NULL ++_002759_hash l2cap_bredr_sig_cmd 3 49065 _002759_hash NULL ++_002760_hash l2cap_sock_alloc_skb_cb 2 33532 _002760_hash NULL ++_002761_hash l2cap_sock_recvmsg 4 59886 _002761_hash NULL ++_002762_hash llcp_allocate_pdu 3 19866 _002762_hash NULL ++_002763_hash macvtap_recvmsg 4 63949 _002763_hash NULL ++_002764_hash osd_req_list_dev_partitions 4 60027 _002764_hash NULL ++_002765_hash osd_req_list_partition_collections 5 38223 _002765_hash NULL ++_002766_hash osst_do_scsi 4 44410 _002766_hash NULL ++_002767_hash ping_sendmsg 4 3782 _002767_hash NULL ++_002768_hash ppp_cp_event 6 2965 _002768_hash NULL ++_002769_hash pty_write 3 44757 _002769_hash &_001733_hash ++_002770_hash push_rx 3 28939 _002770_hash NULL ++_002772_hash qla2x00_handle_queue_full 2 24365 _002772_hash NULL ++_002773_hash qla4xxx_change_queue_depth 2 1268 _002773_hash NULL ++_002774_hash rfcomm_sock_recvmsg 4 22227 _002774_hash NULL ++_002775_hash scsi_execute_req 5 42088 _002775_hash NULL ++_002776_hash _scsih_change_queue_depth 2 26230 _002776_hash NULL ++_002777_hash sctp_sf_abort_violation 6 38380 _002777_hash NULL ++_002778_hash send_to_tty 3 45141 _002778_hash NULL ++_002780_hash sky2_receive 2 13407 _002780_hash NULL ++_002781_hash spi_execute 5 28736 _002781_hash NULL ++_002782_hash submit_inquiry 3 42108 _002782_hash NULL ++_002783_hash tcp_dma_try_early_copy 3 4457 _002783_hash NULL ++_002784_hash tcp_sacktag_walk 6 49703 _002784_hash NULL ++_002785_hash tcp_write_xmit 2 64602 _002785_hash NULL ++_002786_hash ti_recv 4 22027 _002786_hash NULL ++_002787_hash tun_do_read 4 50800 _002787_hash NULL ++_002788_hash ubi_leb_change 4 10289 _002788_hash NULL ++_002789_hash ubi_leb_write 4-5 5478 _002789_hash NULL ++_002791_hash udp_sendmsg 4 4492 _002791_hash NULL ++_002792_hash unix_seqpacket_recvmsg 4 23062 _002792_hash &_000477_hash ++_002793_hash v9fs_cached_file_read 3 2514 _002793_hash NULL ++_002794_hash write_leb 5 36957 _002794_hash NULL ++_002795_hash xfs_buf_read_map 3 40226 _002795_hash NULL ++_002796_hash xfs_trans_get_buf_map 4 2927 _002796_hash NULL ++_002797_hash xlog_do_log_recovery 3 17550 _002797_hash NULL ++_002798_hash ath6kl_wmi_add_wow_pattern_cmd 4 12842 _002798_hash NULL ++_002799_hash ath6kl_wmi_beginscan_cmd 8 25462 _002799_hash NULL ++_002800_hash ath6kl_wmi_send_probe_response_cmd 6 31728 _002800_hash NULL ++_002801_hash ath6kl_wmi_set_appie_cmd 5 39266 _002801_hash NULL ++_002802_hash ath6kl_wmi_set_ie_cmd 6 37260 _002802_hash NULL ++_002803_hash ath6kl_wmi_startscan_cmd 8 33674 _002803_hash NULL ++_002804_hash ath6kl_wmi_test_cmd 3 27312 _002804_hash NULL ++_002805_hash brcmf_sdbrcm_membytes 3-5 37324 _002805_hash NULL ++_002807_hash brcmf_sdbrcm_read_control 3 22721 _002807_hash NULL ++_002808_hash brcmf_tx_frame 3 20978 _002808_hash NULL ++_002809_hash __carl9170_rx 3 56784 _002809_hash NULL ++_002810_hash ch_do_scsi 4 31171 _002810_hash NULL ++_002811_hash dbg_leb_change 4 23555 _002811_hash NULL ++_002812_hash dbg_leb_write 4-5 63555 _002812_hash &_000971_hash ++_002814_hash gluebi_write 3 27905 _002814_hash NULL ++_002815_hash hdlc_irq_one 2 3944 _002815_hash NULL ++_002819_hash iser_rcv_completion 2 8048 _002819_hash NULL ++_002820_hash lock_loop 1 61681 _002820_hash NULL ++_002821_hash process_rcvd_data 3 6679 _002821_hash NULL ++_002822_hash brcmf_sdbrcm_bus_txctl 3 42492 _002822_hash NULL ++_002823_hash carl9170_rx 3 13272 _002823_hash NULL ++_002824_hash carl9170_rx_stream 3 1334 _002824_hash NULL ++_002826_hash mpt_lan_receive_post_turbo 2 13592 _002826_hash NULL ++_002827_hash padzero 1 55 _002827_hash &_002251_hash ++_002828_hash scsi_mode_sense 5 16835 _002828_hash NULL ++_002829_hash scsi_vpd_inquiry 4 30040 _002829_hash NULL ++_002830_hash ses_recv_diag 4 47143 _002830_hash &_000679_hash ++_002831_hash ses_send_diag 4 64527 _002831_hash NULL ++_002832_hash tcp_push_one 2 48816 _002832_hash NULL ++_002833_hash __tcp_push_pending_frames 2 48148 _002833_hash NULL ++_002834_hash trim_bitmaps 3 24158 _002834_hash NULL ++_002835_hash tun_recvmsg 4 48463 _002835_hash NULL ++_002836_hash ubifs_leb_change 4 17789 _002836_hash NULL ++_002837_hash ubifs_leb_write 4-5 22679 _002837_hash NULL ++_002839_hash xfs_buf_readahead_map 3 44248 _002839_hash &_000851_hash ++_002840_hash xfs_trans_read_buf_map 5 37487 _002840_hash NULL ++_002841_hash xlog_do_recover 3 59789 _002841_hash NULL ++_002842_hash btrfs_trim_block_group 3 28963 _002842_hash NULL ++_002843_hash do_write_orph_node 2 64343 _002843_hash NULL ++_002844_hash fix_unclean_leb 3 23188 _002844_hash NULL ++_002845_hash fixup_leb 3 43256 _002845_hash NULL ++_002846_hash recover_head 3 17904 _002846_hash NULL ++_002847_hash scsi_get_vpd_page 4 51951 _002847_hash NULL ++_002848_hash sd_do_mode_sense 5 11507 _002848_hash NULL ++_002849_hash tcp_push 3 10680 _002849_hash NULL ++_002850_hash ubifs_wbuf_write_nolock 3 64946 _002850_hash NULL ++_002851_hash ubifs_write_node 3-5 11258 _002851_hash NULL ++_002852_hash ubifs_recover_leb 3 60639 _002852_hash NULL ++_002853_hash write_head 4 30481 _002853_hash NULL ++_002854_hash write_node 4 33121 _002854_hash NULL ++_002855_hash ubifs_recover_log_leb 3 12079 _002855_hash NULL ++_002856_hash replay_log_leb 3 18704 _002856_hash NULL ++_002857_hash alloc_cpu_rmap 1 65363 _002857_hash NULL ++_002858_hash alloc_ebda_hpc 1-2 50046 _002858_hash NULL ++_002860_hash alloc_sched_domains 1 28972 _002860_hash NULL ++_002861_hash amthi_read 4 45831 _002861_hash NULL ++_002862_hash bcm_char_read 3 31750 _002862_hash NULL ++_002863_hash BcmCopySection 5 2035 _002863_hash NULL ++_002864_hash buffer_from_user 3 51826 _002864_hash NULL ++_002865_hash buffer_to_user 3 35439 _002865_hash NULL ++_002866_hash card_send_command 3 40757 _002866_hash NULL ++_002867_hash chd_dec_fetch_cdata 3 50926 _002867_hash NULL ++_002868_hash copy_nodes_to_user 2 63807 _002868_hash NULL ++_002869_hash create_log 2 8225 _002869_hash NULL ++_002870_hash crystalhd_create_dio_pool 2 3427 _002870_hash NULL ++_002871_hash crystalhd_user_data 3 18407 _002871_hash NULL ++_002872_hash do_pages_stat 2 4437 _002872_hash NULL ++_002873_hash do_read_log_to_user 4 3236 _002873_hash NULL ++_002874_hash do_write_log_from_user 3 39362 _002874_hash NULL ++_002875_hash evm_read_key 3 54674 _002875_hash NULL ++_002876_hash evm_write_key 3 27715 _002876_hash NULL ++_002877_hash fir16_create 3 5574 _002877_hash NULL ++_002878_hash get_nodes 3 39012 _002878_hash NULL ++_002879_hash __iio_allocate_kfifo 2-3 55738 _002879_hash NULL ++_002881_hash __iio_allocate_sw_ring_buffer 3 4843 _002881_hash NULL ++_002882_hash iio_debugfs_read_reg 3 60908 _002882_hash NULL ++_002883_hash iio_debugfs_write_reg 3 22742 _002883_hash NULL ++_002884_hash iio_device_alloc 1 41440 _002884_hash NULL ++_002885_hash iio_event_chrdev_read 3 54757 _002885_hash NULL ++_002886_hash iio_read_first_n_kfifo 2 57910 _002886_hash NULL ++_002887_hash iio_read_first_n_sw_rb 2 51911 _002887_hash NULL ++_002888_hash ioapic_setup_resources 1 35255 _002888_hash NULL ++_002889_hash keymap_store 4 45406 _002889_hash NULL ++_002890_hash line6_alloc_sysex_buffer 4 28225 _002890_hash NULL ++_002891_hash line6_dumpreq_initbuf 3 53123 _002891_hash NULL ++_002892_hash line6_midibuf_init 2 52425 _002892_hash NULL ++_002893_hash _malloc 1 54077 _002893_hash NULL ++_002894_hash mei_read 3 6507 _002894_hash NULL ++_002895_hash mei_write 3 4005 _002895_hash NULL ++_002896_hash msg_set 3 51725 _002896_hash NULL ++_002897_hash newpart 6 47485 _002897_hash NULL ++_002898_hash OS_kmalloc 1 36909 _002898_hash NULL ++_002899_hash OS_mem_token_alloc 1 14276 _002899_hash NULL ++_002900_hash packet_came 3 18072 _002900_hash NULL ++_002901_hash pcpu_alloc_bootmem 2 62074 _002901_hash NULL ++_002902_hash pcpu_build_alloc_info 1-3-2 41443 _002902_hash NULL ++_002905_hash pcpu_get_vm_areas 3 50085 _002905_hash NULL ++_002906_hash resource_from_user 3 30341 _002906_hash NULL ++_002907_hash rtsx_read_cfg_seq 3-5 48139 _002907_hash NULL ++_002909_hash rtsx_write_cfg_seq 3-5 27485 _002909_hash NULL ++_002911_hash sca3000_read_data 4 57064 _002911_hash NULL ++_002912_hash sca3000_read_first_n_hw_rb 2 11479 _002912_hash NULL ++_002913_hash send_midi_async 3 57463 _002913_hash NULL ++_002914_hash sep_create_dcb_dmatables_context 6 37551 _002914_hash NULL ++_002915_hash sep_create_dcb_dmatables_context_kernel 6 49728 _002915_hash NULL ++_002916_hash sep_create_msgarea_context 4 33829 _002916_hash NULL ++_002917_hash sep_lli_table_secure_dma 2-3 64042 _002917_hash NULL ++_002919_hash sep_lock_user_pages 2-3 57470 _002919_hash &_002488_hash ++_002921_hash sep_prepare_input_output_dma_table_in_dcb 4-5-2-3 63087 _002921_hash NULL ++_002923_hash sep_read 3 17161 _002923_hash NULL ++_002924_hash TransmitTcb 4 12989 _002924_hash NULL ++_002925_hash ValidateDSDParamsChecksum 3 63654 _002925_hash NULL ++_002926_hash Wb35Reg_BurstWrite 4 62327 _002926_hash NULL ++_002927_hash alloc_irq_cpu_rmap 1 28459 _002927_hash NULL ++_002928_hash InterfaceTransmitPacket 3 42058 _002928_hash NULL ++_002929_hash line6_dumpreq_init 3 34473 _002929_hash NULL ++_002931_hash pcpu_embed_first_chunk 1-3-2 24224 _002931_hash NULL ++_002933_hash pcpu_fc_alloc 2 11818 _002933_hash NULL ++_002934_hash pcpu_page_first_chunk 1 20712 _002934_hash NULL ++_002935_hash pod_alloc_sysex_buffer 3 31651 _002935_hash NULL ++_002936_hash r8712_usbctrl_vendorreq 6 48489 _002936_hash NULL ++_002937_hash r871x_set_wpa_ie 3 7000 _002937_hash NULL ++_002938_hash sep_prepare_input_dma_table 2-3 2009 _002938_hash NULL ++_002940_hash sep_prepare_input_output_dma_table 2-4-3 63429 _002940_hash NULL ++_002943_hash sys_get_mempolicy 3 30379 _002943_hash NULL ++_002944_hash sys_mbind 5 7990 _002944_hash NULL ++_002945_hash sys_migrate_pages 2 39825 _002945_hash NULL ++_002946_hash sys_move_pages 2 42626 _002946_hash NULL ++_002947_hash sys_set_mempolicy 3 32608 _002947_hash NULL ++_002948_hash variax_alloc_sysex_buffer 3 15237 _002948_hash NULL ++_002949_hash vme_user_read 3 55338 _002949_hash NULL ++_002950_hash vme_user_write 3 15587 _002950_hash NULL ++_002954_hash variax_set_raw2 4 32374 _002954_hash NULL ++_002955_hash copy_in_user 3 57502 _002955_hash NULL ++_002956_hash __earlyonly_bootmem_alloc 2 23824 _002956_hash NULL ++_002957_hash rfc4106_set_key 3 54519 _002957_hash NULL ++_002958_hash sparse_early_usemaps_alloc_pgdat_section 2 62304 _002958_hash NULL ++_002959_hash sparse_early_usemaps_alloc_node 4 9269 _002959_hash NULL ++_002960_hash sparse_mem_maps_populate_node 4 12669 _002960_hash &_002242_hash ++_002961_hash vmemmap_alloc_block 1 43245 _002961_hash NULL ++_002962_hash sparse_early_mem_maps_alloc_node 4 36971 _002962_hash NULL ++_002963_hash vmemmap_alloc_block_buf 1 61126 _002963_hash NULL ++_002964_hash alloc_mr 1 45935 _002964_hash NULL ++_002965_hash atomic_counters_read 3 48827 _002965_hash NULL ++_002966_hash atomic_stats_read 3 36228 _002966_hash NULL ++_002967_hash capabilities_read 3 58457 _002967_hash NULL ++_002968_hash compat_core_sys_select 1 65285 _002968_hash NULL ++_002969_hash compat_dccp_setsockopt 5 51263 _002969_hash NULL ++_002970_hash compat_do_arpt_set_ctl 4 12184 _002970_hash NULL ++_002971_hash compat_do_ip6t_set_ctl 4 3184 _002971_hash NULL ++_002972_hash compat_do_ipt_set_ctl 4 58466 _002972_hash &_002078_hash ++_002973_hash compat_filldir 3 32999 _002973_hash NULL ++_002974_hash compat_filldir64 3 35354 _002974_hash NULL ++_002975_hash compat_fillonedir 3 15620 _002975_hash NULL ++_002976_hash compat_ip_setsockopt 5 13870 _003094_hash NULL nohasharray ++_002977_hash compat_ipv6_setsockopt 5 20468 _002977_hash NULL ++_002978_hash compat_mpctl_ioctl 2 45671 _002978_hash NULL ++_002979_hash compat_raw_setsockopt 5 30634 _002979_hash NULL ++_002980_hash compat_rawv6_setsockopt 5 4967 _002980_hash NULL ++_002981_hash compat_rw_copy_check_uvector 3 22001 _003263_hash NULL nohasharray ++_002982_hash compat_sock_setsockopt 5 23 _002982_hash NULL ++_002983_hash compat_sys_get_mempolicy 3 31109 _002983_hash NULL ++_002984_hash compat_sys_kexec_load 2 35674 _002984_hash NULL ++_002985_hash compat_sys_keyctl 4 9639 _002985_hash NULL ++_002986_hash compat_sys_mbind 5 36256 _002986_hash NULL ++_002987_hash compat_sys_migrate_pages 2 3157 _002987_hash NULL ++_002988_hash compat_sys_move_pages 2 5861 _002988_hash NULL ++_002989_hash compat_sys_mq_timedsend 3 31060 _002989_hash NULL ++_002990_hash compat_sys_msgrcv 2 7482 _002990_hash NULL ++_002991_hash compat_sys_msgsnd 2 10738 _002991_hash NULL ++_002992_hash compat_sys_semtimedop 3 3606 _002992_hash NULL ++_002993_hash compat_sys_set_mempolicy 3 57742 _002993_hash &_002614_hash ++_002994_hash __copy_in_user 3 34790 _002994_hash NULL ++_002995_hash dev_counters_read 3 19216 _002995_hash NULL ++_002996_hash dev_names_read 3 38509 _002996_hash NULL ++_002997_hash driver_names_read 3 60399 _002997_hash NULL ++_002998_hash driver_stats_read 3 8944 _002998_hash NULL ++_002999_hash evdev_ioctl_compat 2 13851 _002999_hash NULL ++_003000_hash evtchn_read 3 3569 _003000_hash NULL ++_003001_hash evtchn_write 3 43278 _003001_hash NULL ++_003002_hash fat_compat_ioctl_filldir 3 36328 _003002_hash NULL ++_003003_hash flash_read 3 57843 _003003_hash NULL ++_003004_hash flash_write 3 62354 _003004_hash NULL ++_003005_hash fw_device_op_compat_ioctl 2 42804 _003005_hash NULL ++_003006_hash gather_array 3 56641 _003006_hash NULL ++_003007_hash ghash_async_setkey 3 60001 _003007_hash NULL ++_003008_hash gntdev_alloc_map 2 35145 _003008_hash NULL ++_003009_hash gnttab_map 2 56439 _003009_hash NULL ++_003010_hash gru_alloc_gts 2-3 60056 _003010_hash &_000981_hash ++_003012_hash hiddev_compat_ioctl 2 41255 _003012_hash NULL ++_003013_hash init_cdev 1 8274 _003013_hash NULL ++_003014_hash init_per_cpu 1 17880 _003014_hash NULL ++_003015_hash ipath_create_cq 2 45586 _003015_hash NULL ++_003016_hash ipath_get_base_info 3 7043 _003016_hash NULL ++_003017_hash ipath_init_qp_table 2 25167 _003017_hash NULL ++_003018_hash ipath_resize_cq 2 712 _003018_hash NULL ++_003019_hash joydev_compat_ioctl 2 8765 _003019_hash NULL ++_003020_hash mon_bin_compat_ioctl 3 50234 _003020_hash NULL ++_003021_hash options_write 3 47243 _003021_hash NULL ++_003022_hash portcntrs_1_read 3 47253 _003022_hash NULL ++_003023_hash portcntrs_2_read 3 56586 _003023_hash NULL ++_003024_hash portnames_read 3 41958 _003024_hash NULL ++_003025_hash ptc_proc_write 3 12076 _003025_hash NULL ++_003026_hash put_cmsg_compat 4 35937 _003026_hash NULL ++_003027_hash qib_alloc_devdata 2 51819 _003027_hash NULL ++_003028_hash qib_alloc_fast_reg_page_list 2 10507 _003028_hash NULL ++_003029_hash qib_cdev_init 1 34778 _003029_hash NULL ++_003030_hash qib_create_cq 2 27497 _003030_hash NULL ++_003031_hash qib_diag_write 3 62133 _003031_hash NULL ++_003032_hash qib_get_base_info 3 11369 _003032_hash NULL ++_003033_hash qib_resize_cq 2 53090 _003033_hash NULL ++_003034_hash qsfp_1_read 3 21915 _003034_hash NULL ++_003035_hash qsfp_2_read 3 31491 _003035_hash NULL ++_003036_hash queue_reply 3 22416 _003036_hash NULL ++_003037_hash spidev_compat_ioctl 2 63778 _003037_hash NULL ++_003038_hash split 2 11691 _003038_hash NULL ++_003039_hash stats_read_ul 3 32751 _003039_hash NULL ++_003040_hash sys32_ipc 3 7238 _003040_hash NULL ++_003041_hash sys32_rt_sigpending 2 25814 _003041_hash NULL ++_003042_hash tunables_read 3 36385 _003042_hash NULL ++_003043_hash tunables_write 3 59563 _003043_hash NULL ++_003044_hash xenbus_file_write 3 6282 _003044_hash NULL ++_003045_hash xlbd_reserve_minors 1-2 18365 _003045_hash NULL ++_003047_hash xpc_kmalloc_cacheline_aligned 1 42895 _003047_hash NULL ++_003048_hash xpc_kzalloc_cacheline_aligned 1 65433 _003048_hash NULL ++_003049_hash xsd_read 3 15653 _003049_hash NULL ++_003050_hash compat_do_readv_writev 4 49102 _003050_hash NULL ++_003051_hash compat_keyctl_instantiate_key_iov 3 57431 _003088_hash NULL nohasharray ++_003052_hash compat_process_vm_rw 3-5 22254 _003052_hash NULL ++_003054_hash compat_sys_select 1 16131 _003054_hash NULL ++_003055_hash compat_sys_setsockopt 5 3326 _003055_hash NULL ++_003056_hash compat_udp_setsockopt 5 38840 _003056_hash NULL ++_003057_hash compat_udpv6_setsockopt 5 42981 _003057_hash NULL ++_003058_hash do_compat_pselect 1 10398 _003058_hash NULL ++_003059_hash gnttab_expand 1 15817 _003059_hash NULL ++_003060_hash ipath_cdev_init 1 37752 _003060_hash NULL ++_003061_hash ipath_reg_phys_mr 3 23918 _003061_hash &_000999_hash ++_003062_hash qib_alloc_fast_reg_mr 2 12526 _003062_hash NULL ++_003063_hash qib_reg_phys_mr 3 60202 _003063_hash &_000897_hash ++_003064_hash compat_readv 3 30273 _003064_hash NULL ++_003065_hash compat_sys_process_vm_readv 3-5 15374 _003065_hash NULL ++_003067_hash compat_sys_process_vm_writev 3-5 41194 _003067_hash NULL ++_003069_hash compat_sys_pselect6 1 14105 _003069_hash NULL ++_003070_hash compat_writev 3 60063 _003070_hash NULL ++_003071_hash get_free_entries 1 46030 _003071_hash NULL ++_003072_hash compat_sys_preadv64 3 24283 _003072_hash NULL ++_003073_hash compat_sys_pwritev64 3 51151 _003073_hash NULL ++_003074_hash compat_sys_readv 3 20911 _003074_hash NULL ++_003075_hash compat_sys_writev 3 5784 _003075_hash NULL ++_003076_hash gnttab_alloc_grant_references 1 18240 _003076_hash NULL ++_003077_hash compat_sys_preadv 3 583 _003077_hash NULL ++_003078_hash compat_sys_pwritev 3 17886 _003078_hash NULL ++_003079_hash aes_decrypt_fail_read 3 54815 _003079_hash NULL ++_003080_hash aes_decrypt_interrupt_read 3 19910 _003080_hash NULL ++_003081_hash aes_decrypt_packets_read 3 10155 _003081_hash NULL ++_003082_hash aes_encrypt_fail_read 3 32562 _003082_hash NULL ++_003083_hash aes_encrypt_interrupt_read 3 39919 _003083_hash NULL ++_003084_hash aes_encrypt_packets_read 3 48666 _003084_hash NULL ++_003085_hash agp_remap 2 30665 _003085_hash NULL ++_003086_hash alloc_apertures 1 56561 _003086_hash NULL ++_003087_hash allocate_probes 1 40204 _003087_hash NULL ++_003088_hash alloc_ftrace_hash 1 57431 _003088_hash &_003051_hash ++_003089_hash alloc_page_cgroup 1 2919 _003089_hash NULL ++_003090_hash __alloc_preds 2 9492 _003090_hash NULL ++_003091_hash __alloc_pred_stack 2 26687 _003091_hash NULL ++_003092_hash alloc_sched_domains 1 47756 _003092_hash NULL ++_003093_hash alloc_trace_probe 6 38720 _003093_hash NULL ++_003094_hash alloc_trace_uprobe 3 13870 _003094_hash &_002976_hash ++_003095_hash ath6kl_sdio_alloc_prep_scat_req 2 51986 _003095_hash NULL ++_003096_hash ath6kl_usb_post_recv_transfers 2 32892 _003096_hash NULL ++_003097_hash ath6kl_usb_submit_ctrl_in 6 32880 _003097_hash &_000795_hash ++_003098_hash ath6kl_usb_submit_ctrl_out 6 9978 _003098_hash NULL ++_003099_hash av7110_ipack_init 2 46655 _003099_hash NULL ++_003100_hash av7110_vbi_write 3 34384 _003100_hash NULL ++_003101_hash bin_uuid 3 28999 _003101_hash NULL ++_003102_hash blk_dropped_read 3 4168 _003102_hash NULL ++_003103_hash blk_msg_write 3 13655 _003103_hash NULL ++_003104_hash brcmf_usbdev_qinit 2 19090 _003104_hash &_001715_hash ++_003105_hash brcmf_usb_dl_cmd 4 53130 _003105_hash NULL ++_003106_hash ci_ll_init 3 12930 _003106_hash NULL ++_003107_hash ci_ll_write 4 3740 _003107_hash NULL ++_003108_hash conf_read 3 55786 _003108_hash NULL ++_003109_hash __copy_from_user_inatomic_nocache 3 49921 _003109_hash NULL ++_003110_hash cx24116_writeregN 4 41975 _003110_hash NULL ++_003111_hash cyttsp_probe 4 1940 _003111_hash NULL ++_003112_hash dccpprobe_read 3 52549 _003112_hash NULL ++_003113_hash ddb_input_read 3 9743 _003113_hash NULL ++_003114_hash ddb_output_write 3 31902 _003114_hash NULL ++_003115_hash __devres_alloc 2 25598 _003115_hash NULL ++_003116_hash dma_rx_errors_read 3 52045 _003116_hash NULL ++_003117_hash dma_rx_requested_read 3 65354 _003117_hash NULL ++_003118_hash dma_tx_errors_read 3 46060 _003118_hash NULL ++_003119_hash dma_tx_requested_read 3 16110 _003203_hash NULL nohasharray ++_003120_hash do_dmabuf_dirty_sou 7 3017 _003120_hash NULL ++_003121_hash do_surface_dirty_sou 7 39678 _003121_hash NULL ++_003122_hash driver_state_read 3 17194 _003122_hash &_001511_hash ++_003123_hash drm_agp_bind_pages 3 56748 _003123_hash NULL ++_003124_hash drm_buffer_alloc 2 44405 _003124_hash NULL ++_003125_hash drm_calloc_large 1-2 65421 _003125_hash NULL ++_003127_hash drm_fb_helper_init 3-4 19044 _003127_hash NULL ++_003129_hash drm_ht_create 2 18853 _003129_hash NULL ++_003130_hash drm_ioctl 2 42813 _003130_hash NULL ++_003131_hash drm_malloc_ab 1-2 16831 _003131_hash NULL ++_003133_hash drm_mode_crtc_set_gamma_size 2 31881 _003133_hash NULL ++_003134_hash drm_plane_init 6 28731 _003134_hash NULL ++_003135_hash drm_property_create 4 51239 _003135_hash NULL ++_003136_hash drm_property_create_blob 2 7414 _003136_hash NULL ++_003137_hash drm_vblank_init 2 11362 _003137_hash NULL ++_003138_hash drm_vmalloc_dma 1 14550 _003138_hash NULL ++_003139_hash dvb_aplay 3 56296 _003139_hash NULL ++_003140_hash dvb_ca_en50221_init 4 45718 _003140_hash NULL ++_003141_hash dvb_ca_en50221_io_write 3 43533 _003141_hash NULL ++_003142_hash dvb_dmxdev_set_buffer_size 2 55643 _003142_hash NULL ++_003143_hash dvbdmx_write 3 19423 _003143_hash NULL ++_003144_hash dvb_dvr_set_buffer_size 2 9840 _003144_hash NULL ++_003145_hash dvb_net_sec 3 37884 _003145_hash NULL ++_003146_hash dvb_play 3 50814 _003146_hash NULL ++_003147_hash dvb_ringbuffer_pkt_read_user 2-5-3 4303 _003147_hash NULL ++_003150_hash dvb_ringbuffer_read_user 3 56702 _003150_hash NULL ++_003151_hash dvb_usercopy 2 14036 _003151_hash NULL ++_003152_hash dw210x_op_rw 6 39915 _003152_hash NULL ++_003153_hash edt_ft5x06_debugfs_raw_data_read 3 28002 _003153_hash NULL ++_003154_hash em_canid_change 3 14150 _003154_hash NULL ++_003155_hash event_calibration_read 3 21083 _003155_hash NULL ++_003156_hash event_enable_read 3 7074 _003156_hash NULL ++_003157_hash event_filter_read 3 23494 _003157_hash NULL ++_003158_hash event_filter_write 3 56609 _003158_hash NULL ++_003159_hash event_heart_beat_read 3 48961 _003159_hash NULL ++_003160_hash event_id_read 3 64288 _003160_hash &_001300_hash ++_003161_hash event_oom_late_read 3 61175 _003161_hash &_001054_hash ++_003162_hash event_phy_transmit_error_read 3 10471 _003162_hash NULL ++_003163_hash event_rx_mem_empty_read 3 40363 _003163_hash NULL ++_003164_hash event_rx_mismatch_read 3 38518 _003164_hash NULL ++_003165_hash event_rx_pool_read 3 25792 _003165_hash NULL ++_003166_hash event_tx_stuck_read 3 19305 _003166_hash NULL ++_003167_hash excessive_retries_read 3 60425 _003167_hash NULL ++_003168_hash flexcop_device_kmalloc 1 54793 _003168_hash NULL ++_003169_hash fm_send_cmd 5 39639 _003169_hash NULL ++_003170_hash __fprog_create 2 41263 _003170_hash NULL ++_003171_hash fq_codel_zalloc 1 15378 _003171_hash NULL ++_003172_hash ftrace_pid_write 3 39710 _003172_hash NULL ++_003173_hash ftrace_profile_read 3 21327 _003173_hash NULL ++_003174_hash fw_stats_raw_read 3 1369 _003174_hash NULL ++_003175_hash get_info 3 55681 _003175_hash NULL ++_003176_hash __get_vm_area_node 1 55305 _003176_hash NULL ++_003177_hash gpio_power_read 3 36059 _003177_hash NULL ++_003178_hash h5_prepare_pkt 4 12085 _003178_hash NULL ++_003179_hash hsc_msg_alloc 1 60990 _003179_hash NULL ++_003180_hash hsc_write 3 55875 _003180_hash NULL ++_003181_hash hsi_alloc_controller 1 41802 _003181_hash NULL ++_003182_hash hsi_register_board_info 2 13820 _003182_hash NULL ++_003183_hash hugetlb_cgroup_read 5 49259 _003183_hash NULL ++_003184_hash i915_cache_sharing_read 3 24775 _003184_hash NULL ++_003185_hash i915_cache_sharing_write 3 57961 _003185_hash NULL ++_003186_hash i915_max_freq_read 3 20581 _003186_hash NULL ++_003187_hash i915_max_freq_write 3 11350 _003187_hash NULL ++_003188_hash i915_min_freq_read 3 38470 _003188_hash NULL ++_003189_hash i915_min_freq_write 3 10981 _003189_hash NULL ++_003190_hash i915_ring_stop_read 3 42549 _003190_hash &_000740_hash ++_003191_hash i915_ring_stop_write 3 59010 _003191_hash NULL ++_003192_hash i915_wedged_read 3 35474 _003192_hash NULL ++_003193_hash i915_wedged_write 3 47771 _003193_hash NULL ++_003194_hash ieee802154_alloc_device 1 13767 _003194_hash NULL ++_003195_hash intel_sdvo_write_cmd 4 54377 _003195_hash &_000832_hash ++_003196_hash isr_cmd_cmplt_read 3 53439 _003196_hash NULL ++_003197_hash isr_commands_read 3 41398 _003197_hash NULL ++_003198_hash isr_decrypt_done_read 3 49490 _003198_hash NULL ++_003199_hash isr_dma0_done_read 3 8574 _003199_hash NULL ++_003200_hash isr_dma1_done_read 3 48159 _003200_hash NULL ++_003201_hash isr_fiqs_read 3 34687 _003201_hash NULL ++_003202_hash isr_host_acknowledges_read 3 54136 _003202_hash NULL ++_003203_hash isr_hw_pm_mode_changes_read 3 16110 _003203_hash &_003119_hash ++_003204_hash isr_irqs_read 3 9181 _003204_hash NULL ++_003205_hash isr_low_rssi_read 3 64789 _003205_hash NULL ++_003206_hash isr_pci_pm_read 3 30271 _003206_hash NULL ++_003207_hash isr_rx_headers_read 3 38325 _003207_hash NULL ++_003208_hash isr_rx_mem_overflow_read 3 43025 _003208_hash NULL ++_003209_hash isr_rx_procs_read 3 31804 _003209_hash NULL ++_003210_hash isr_rx_rdys_read 3 35283 _003210_hash NULL ++_003211_hash isr_tx_exch_complete_read 3 16103 _003211_hash NULL ++_003212_hash isr_tx_procs_read 3 23084 _003212_hash NULL ++_003213_hash isr_wakeups_read 3 49607 _003213_hash NULL ++_003214_hash LoadBitmap 2 19658 _003214_hash NULL ++_003215_hash mem_cgroup_read 5 22461 _003215_hash NULL ++_003216_hash mic_calc_failure_read 3 59700 _003216_hash NULL ++_003217_hash mic_rx_pkts_read 3 27972 _003217_hash NULL ++_003218_hash __module_alloc 1 50004 _003218_hash NULL ++_003219_hash module_alloc_update_bounds_rw 1 63233 _003219_hash NULL ++_003220_hash module_alloc_update_bounds_rx 1 58634 _003220_hash NULL ++_003221_hash mwifiex_usb_submit_rx_urb 2 54558 _003221_hash NULL ++_003222_hash nfc_hci_hcp_message_tx 6 14534 _003222_hash NULL ++_003223_hash nfc_hci_set_param 5 40697 _003223_hash NULL ++_003224_hash nfc_shdlc_alloc_skb 2 12741 _003224_hash NULL ++_003225_hash opera1_xilinx_rw 5 31453 _003225_hash NULL ++_003226_hash persistent_ram_vmap 1-2 709 _003226_hash NULL ++_003228_hash prctl_set_mm 3 64538 _003228_hash NULL ++_003229_hash probe_kernel_write 3 17481 _003229_hash NULL ++_003230_hash proc_fault_inject_read 3 36802 _003230_hash NULL ++_003231_hash proc_fault_inject_write 3 21058 _003231_hash NULL ++_003232_hash ps_pspoll_max_apturn_read 3 6699 _003232_hash NULL ++_003233_hash ps_pspoll_timeouts_read 3 11776 _003233_hash NULL ++_003234_hash ps_pspoll_utilization_read 3 5361 _003234_hash NULL ++_003235_hash ps_upsd_max_apturn_read 3 19918 _003235_hash NULL ++_003236_hash ps_upsd_max_sptime_read 3 63362 _003236_hash NULL ++_003237_hash ps_upsd_timeouts_read 3 28924 _003237_hash NULL ++_003238_hash ps_upsd_utilization_read 3 51669 _003238_hash NULL ++_003239_hash ptp_filter_init 2 36780 _003239_hash NULL ++_003240_hash pwr_disable_ps_read 3 13176 _003240_hash NULL ++_003241_hash pwr_elp_enter_read 3 5324 _003241_hash NULL ++_003242_hash pwr_enable_ps_read 3 17686 _003242_hash NULL ++_003243_hash pwr_fix_tsf_ps_read 3 26627 _003243_hash NULL ++_003244_hash pwr_missing_bcns_read 3 25824 _003244_hash NULL ++_003245_hash pwr_power_save_off_read 3 18355 _003245_hash NULL ++_003246_hash pwr_ps_enter_read 3 26935 _003246_hash &_000512_hash ++_003247_hash pwr_rcvd_awake_beacons_read 3 50505 _003247_hash NULL ++_003248_hash pwr_rcvd_beacons_read 3 52836 _003248_hash NULL ++_003249_hash pwr_tx_without_ps_read 3 48423 _003249_hash NULL ++_003250_hash pwr_tx_with_ps_read 3 60851 _003250_hash NULL ++_003251_hash pwr_wake_on_host_read 3 26321 _003251_hash NULL ++_003252_hash pwr_wake_on_timer_exp_read 3 22640 _003252_hash NULL ++_003253_hash rb_simple_read 3 45972 _003253_hash NULL ++_003254_hash read_file_dfs 3 43145 _003254_hash NULL ++_003255_hash retry_count_read 3 52129 _003255_hash NULL ++_003256_hash rx_dropped_read 3 44799 _003256_hash NULL ++_003257_hash rx_fcs_err_read 3 62844 _003257_hash NULL ++_003258_hash rx_hdr_overflow_read 3 64407 _003258_hash NULL ++_003259_hash rx_hw_stuck_read 3 57179 _003259_hash NULL ++_003260_hash rx_out_of_mem_read 3 10157 _003260_hash NULL ++_003261_hash rx_path_reset_read 3 23801 _003261_hash NULL ++_003262_hash rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 _003262_hash NULL ++_003263_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _003263_hash &_002981_hash ++_003264_hash rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 _003264_hash NULL ++_003265_hash rxpipe_rx_prep_beacon_drop_read 3 2403 _003265_hash NULL ++_003266_hash rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 _003266_hash NULL ++_003267_hash rx_reset_counter_read 3 58001 _003267_hash NULL ++_003268_hash rx_xfr_hint_trig_read 3 40283 _003268_hash NULL ++_003269_hash saa7146_vmalloc_build_pgtable 2 19780 _003269_hash NULL ++_003270_hash sched_feat_write 3 55202 _003270_hash NULL ++_003271_hash sd_alloc_ctl_entry 1 29708 _003271_hash NULL ++_003272_hash shmem_pread_fast 3 34147 _003272_hash NULL ++_003273_hash shmem_pread_slow 3 3198 _003273_hash NULL ++_003274_hash shmem_pwrite_slow 3 31741 _003274_hash NULL ++_003275_hash show_header 3 4722 _003275_hash &_000745_hash ++_003276_hash stack_max_size_read 3 1445 _003276_hash NULL ++_003277_hash subsystem_filter_read 3 62310 _003277_hash NULL ++_003278_hash subsystem_filter_write 3 13022 _003278_hash NULL ++_003279_hash swap_cgroup_swapon 2 13614 _003279_hash NULL ++_003280_hash system_enable_read 3 25815 _003280_hash NULL ++_003281_hash tda10048_writeregbulk 4 11050 _003281_hash NULL ++_003282_hash tlbflush_read_file 3 64661 _003282_hash NULL ++_003283_hash trace_options_core_read 3 47390 _003283_hash NULL ++_003284_hash trace_options_read 3 11419 _003284_hash NULL ++_003285_hash trace_parser_get_init 2 31379 _003285_hash NULL ++_003286_hash traceprobe_probes_write 3 64969 _003286_hash NULL ++_003287_hash trace_seq_to_user 3 65398 _003287_hash NULL ++_003288_hash tracing_buffers_read 3 11124 _003288_hash NULL ++_003289_hash tracing_clock_write 3 27961 _003289_hash NULL ++_003290_hash tracing_cpumask_read 3 7010 _003290_hash NULL ++_003291_hash tracing_ctrl_read 3 46922 _003291_hash NULL ++_003292_hash tracing_entries_read 3 8345 _003292_hash NULL ++_003293_hash tracing_max_lat_read 3 8890 _003293_hash NULL ++_003294_hash tracing_read_dyn_info 3 45468 _003294_hash NULL ++_003295_hash tracing_readme_read 3 16493 _003295_hash NULL ++_003296_hash tracing_saved_cmdlines_read 3 21434 _003296_hash NULL ++_003297_hash tracing_set_trace_read 3 44122 _003297_hash NULL ++_003298_hash tracing_set_trace_write 3 57096 _003298_hash NULL ++_003299_hash tracing_stats_read 3 34537 _003299_hash NULL ++_003300_hash tracing_total_entries_read 3 62817 _003300_hash NULL ++_003301_hash tracing_trace_options_write 3 153 _003301_hash NULL ++_003302_hash tstats_write 3 60432 _003302_hash &_000009_hash ++_003303_hash ttm_bo_fbdev_io 4 9805 _003303_hash NULL ++_003304_hash ttm_bo_io 5 47000 _003304_hash NULL ++_003305_hash ttm_dma_page_pool_free 2 34135 _003305_hash NULL ++_003306_hash ttm_page_pool_free 2 61661 _003306_hash NULL ++_003307_hash ttusb2_msg 4 3100 _003307_hash NULL ++_003308_hash tx_internal_desc_overflow_read 3 47300 _003308_hash NULL ++_003309_hash tx_queue_len_read 3 1463 _003309_hash NULL ++_003310_hash tx_queue_status_read 3 44978 _003310_hash NULL ++_003311_hash u_memcpya 2-3 30139 _003311_hash NULL ++_003313_hash usb_allocate_stream_buffers 3 8964 _003313_hash NULL ++_003314_hash vifs_state_read 3 33762 _003314_hash NULL ++_003315_hash vmalloc_to_sg 2 58354 _003315_hash NULL ++_003316_hash vm_map_ram 2 23078 _003316_hash &_001095_hash ++_003317_hash vmw_execbuf_process 5 22885 _003317_hash NULL ++_003318_hash vmw_fifo_reserve 2 12141 _003318_hash NULL ++_003319_hash vmw_kms_present 9 38130 _003319_hash NULL ++_003320_hash vmw_kms_readback 6 5727 _003320_hash NULL ++_003321_hash wep_addr_key_count_read 3 20174 _003321_hash NULL ++_003322_hash wep_decrypt_fail_read 3 58567 _003322_hash NULL ++_003323_hash wep_default_key_count_read 3 43035 _003323_hash NULL ++_003324_hash wep_interrupt_read 3 41492 _003324_hash NULL ++_003325_hash wep_key_not_found_read 3 13377 _003325_hash &_000952_hash ++_003326_hash wep_packets_read 3 18751 _003326_hash NULL ++_003327_hash wl1251_cmd_template_set 4 6172 _003327_hash NULL ++_003328_hash wl1271_format_buffer 2 20834 _003328_hash NULL ++_003329_hash wl1271_rx_filter_alloc_field 5 46721 _003329_hash NULL ++_003330_hash wl12xx_cmd_build_probe_req 6-8 54946 _003330_hash NULL ++_003332_hash wlcore_alloc_hw 1 7785 _003332_hash NULL ++_003333_hash aggr_size_rx_size_read 3 33526 _003333_hash NULL ++_003334_hash aggr_size_tx_agg_vs_rate_read 3 21438 _003334_hash NULL ++_003335_hash alloc_and_copy_ftrace_hash 1 29368 _003335_hash NULL ++_003336_hash alloc_bulk_urbs_generic 5 12127 _003336_hash NULL ++_003337_hash alloc_ieee80211 1 20063 _003337_hash NULL ++_003338_hash alloc_ieee80211_rsl 1 34564 _003338_hash NULL ++_003339_hash alloc_perm_bits 2 1532 _003339_hash NULL ++_003340_hash alloc_private 2 22399 _003340_hash NULL ++_003341_hash alloc_rtllib 1 51136 _003341_hash NULL ++_003342_hash alloc_rx_desc_ring 2 18016 _003342_hash NULL ++_003343_hash arcfb_write 3 8702 _003343_hash NULL ++_003344_hash ath6kl_usb_bmi_read 3 48745 _003344_hash NULL ++_003345_hash ath6kl_usb_bmi_write 3 2454 _003345_hash &_001020_hash ++_003346_hash ath6kl_usb_ctrl_msg_exchange 4 33327 _003346_hash NULL ++_003347_hash au0828_init_isoc 2-3 61917 _003347_hash NULL ++_003349_hash auok190xfb_write 3 37001 _003349_hash NULL ++_003350_hash beacon_interval_read 3 7091 _003350_hash NULL ++_003351_hash brcmf_usb_attach 1-2 44656 _003351_hash NULL ++_003353_hash broadsheetfb_write 3 39976 _003353_hash NULL ++_003354_hash broadsheet_spiflash_rewrite_sector 2 54864 _003354_hash NULL ++_003355_hash ci13xxx_add_device 3 14456 _003355_hash NULL ++_003356_hash cmpk_message_handle_tx 4 54024 _003356_hash NULL ++_003357_hash comedi_alloc_subdevices 2 29207 _003357_hash NULL ++_003358_hash comedi_buf_alloc 3 24822 _003358_hash NULL ++_003359_hash comedi_read 3 13199 _003359_hash NULL ++_003360_hash comedi_write 3 47926 _003360_hash NULL ++_003361_hash create_trace_probe 1 20175 _003361_hash NULL ++_003362_hash create_trace_uprobe 1 13184 _003362_hash NULL ++_003363_hash cx18_copy_buf_to_user 4 22735 _003363_hash NULL ++_003364_hash cx231xx_init_bulk 2-3 47024 _003364_hash NULL ++_003366_hash cx231xx_init_isoc 2-3 56453 _003366_hash NULL ++_003368_hash cx231xx_init_vbi_isoc 2-3 28053 _003368_hash NULL ++_003370_hash da9052_group_write 3 4534 _003370_hash NULL ++_003371_hash debug_debug1_read 3 8856 _003371_hash NULL ++_003372_hash debug_debug2_read 3 30526 _003372_hash NULL ++_003373_hash debug_debug3_read 3 56894 _003373_hash NULL ++_003374_hash debug_debug4_read 3 61367 _003374_hash NULL ++_003375_hash debug_debug5_read 3 2291 _003375_hash NULL ++_003376_hash debug_debug6_read 3 33168 _003376_hash NULL ++_003377_hash dev_read 3 56369 _003377_hash NULL ++_003378_hash do_dmabuf_dirty_ldu 6 52241 _003378_hash NULL ++_003379_hash drm_compat_ioctl 2 51717 _003379_hash NULL ++_003380_hash drm_mode_create_tv_properties 2 23122 _003380_hash NULL ++_003381_hash drm_property_create_bitmask 5 30195 _003381_hash NULL ++_003382_hash drm_property_create_enum 5 29201 _003382_hash NULL ++_003383_hash dsp_buffer_alloc 2 11684 _003383_hash NULL ++_003384_hash dt3155_alloc_coherent 2 58073 _003384_hash NULL ++_003385_hash dtim_interval_read 3 654 _003385_hash NULL ++_003386_hash dvb_audio_write 3 51275 _003386_hash NULL ++_003387_hash dvb_ca_en50221_io_ioctl 2 26490 _003387_hash NULL ++_003388_hash dvb_ca_write 3 41171 _003388_hash NULL ++_003389_hash dvb_demux_ioctl 2 42733 _003389_hash NULL ++_003390_hash dvb_dmxdev_buffer_read 4 20682 _003390_hash NULL ++_003391_hash dvb_dvr_ioctl 2 49182 _003391_hash NULL ++_003392_hash dvb_generic_ioctl 2 21810 _003392_hash NULL ++_003393_hash dvb_net_ioctl 2 61559 _003393_hash NULL ++_003394_hash dvb_net_sec_callback 2 28786 _003394_hash NULL ++_003396_hash dvb_video_write 3 754 _003396_hash NULL ++_003397_hash dynamic_ps_timeout_read 3 10110 _003397_hash NULL ++_003398_hash easycap_alsa_vmalloc 2 14426 _003398_hash NULL ++_003399_hash em28xx_alloc_isoc 4 46892 _003399_hash NULL ++_003400_hash error_error_bar_retry_read 3 64305 _003400_hash NULL ++_003401_hash error_error_frame_cts_nul_flid_read 3 17262 _003401_hash NULL ++_003402_hash error_error_frame_read 3 39947 _003402_hash &_002436_hash ++_003403_hash error_error_null_Frame_tx_start_read 3 55024 _003403_hash NULL ++_003404_hash error_error_numll_frame_cts_start_read 3 47781 _003404_hash NULL ++_003405_hash ext_sd_execute_read_data 9 48589 _003405_hash NULL ++_003406_hash ext_sd_execute_write_data 9 8175 _003406_hash NULL ++_003407_hash fast_user_write 5 20494 _003407_hash NULL ++_003408_hash f_audio_buffer_alloc 1 41110 _003408_hash NULL ++_003409_hash fb_alloc_cmap_gfp 2 20792 _003409_hash NULL ++_003410_hash fbcon_do_set_font 2-3 4079 _003410_hash NULL ++_003412_hash fb_read 3 33506 _003412_hash NULL ++_003413_hash fb_sys_read 3 13778 _003413_hash NULL ++_003414_hash fb_sys_write 3 33130 _003414_hash NULL ++_003415_hash fb_write 3 46924 _003415_hash NULL ++_003416_hash firmwareUpload 3 32794 _003416_hash NULL ++_003417_hash fmc_send_cmd 5 20435 _003417_hash NULL ++_003418_hash fops_read 3 40672 _003418_hash NULL ++_003419_hash forced_ps_read 3 31685 _003419_hash NULL ++_003420_hash frame_alloc 4 15981 _003420_hash NULL ++_003421_hash framebuffer_alloc 1 59145 _003421_hash NULL ++_003422_hash ftrace_write 3 29551 _003422_hash NULL ++_003423_hash fw_download_code 3 13249 _003423_hash NULL ++_003424_hash fwSendNullPacket 2 54618 _003424_hash NULL ++_003425_hash gdm_wimax_netif_rx 3 43423 _003425_hash &_001810_hash ++_003426_hash get_vm_area 1 18080 _003426_hash NULL ++_003427_hash __get_vm_area 1 61599 _003427_hash NULL ++_003428_hash get_vm_area_caller 1 10527 _003428_hash NULL ++_003429_hash __get_vm_area_caller 1 56416 _003828_hash NULL nohasharray ++_003430_hash gspca_dev_probe2 4 59833 _003430_hash NULL ++_003431_hash hdpvr_read 3 9273 _003431_hash NULL ++_003432_hash hecubafb_write 3 26942 _003432_hash NULL ++_003433_hash i915_compat_ioctl 2 3656 _003433_hash NULL ++_003434_hash i915_gem_execbuffer_relocate_slow 7 25355 _003434_hash NULL ++_003435_hash ieee80211_alloc_txb 1-2 52477 _003435_hash NULL ++_003437_hash ieee80211_authentication_req 3 63973 _003437_hash NULL ++_003438_hash ieee80211_wx_set_gen_ie 3 51399 _003438_hash NULL ++_003439_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _003458_hash NULL nohasharray ++_003440_hash intel_sdvo_set_value 4 2311 _003440_hash NULL ++_003441_hash ir_lirc_transmit_ir 3 64403 _003441_hash NULL ++_003442_hash irq_blk_threshold_read 3 33666 _003442_hash NULL ++_003443_hash irq_pkt_threshold_read 3 33356 _003443_hash &_000154_hash ++_003444_hash irq_timeout_read 3 54653 _003444_hash NULL ++_003445_hash ivtv_buf_copy_from_user 4 25502 _003445_hash NULL ++_003446_hash ivtv_copy_buf_to_user 4 6159 _003446_hash NULL ++_003447_hash ivtvfb_write 3 40023 _003447_hash NULL ++_003448_hash kgdb_hex2mem 3 24755 _003448_hash NULL ++_003449_hash lirc_buffer_init 2-3 53282 _003449_hash NULL ++_003451_hash lirc_write 3 20604 _003451_hash NULL ++_003452_hash mce_request_packet 3 1073 _003452_hash NULL ++_003453_hash media_entity_init 2-4 15870 _003453_hash &_001742_hash ++_003455_hash mem_fw_gen_free_mem_blks_read 3 11413 _003455_hash NULL ++_003456_hash mem_fwlog_free_mem_blks_read 3 59616 _003456_hash NULL ++_003457_hash mem_rx_free_mem_blks_read 3 675 _003457_hash NULL ++_003458_hash mem_tx_free_mem_blks_read 3 3521 _003458_hash &_003439_hash ++_003459_hash metronomefb_write 3 8823 _003459_hash NULL ++_003460_hash mga_compat_ioctl 2 52170 _003460_hash NULL ++_003461_hash mmio_read 4 40348 _003461_hash NULL ++_003462_hash netlink_send 5 38434 _003462_hash NULL ++_003463_hash nfc_hci_execute_cmd 5 43882 _003463_hash NULL ++_003464_hash nfc_hci_send_event 5 21452 _003464_hash NULL ++_003465_hash nfc_hci_send_response 5 56462 _003465_hash NULL ++_003466_hash ni_gpct_device_construct 5 610 _003466_hash NULL ++_003467_hash nouveau_compat_ioctl 2 28305 _003467_hash NULL ++_003468_hash odev_update 2 50169 _003468_hash NULL ++_003469_hash opera1_usb_i2c_msgxfer 4 64521 _003469_hash NULL ++_003470_hash OSDSetBlock 2-4 38986 _003470_hash NULL ++_003472_hash oz_add_farewell 5 20652 _003472_hash NULL ++_003473_hash oz_cdev_read 3 20659 _003473_hash NULL ++_003474_hash oz_cdev_write 3 33852 _003474_hash NULL ++_003475_hash oz_ep_alloc 2 5587 _003475_hash NULL ++_003476_hash oz_events_read 3 47535 _003476_hash NULL ++_003477_hash persistent_ram_buffer_map 1-2 11332 _003477_hash NULL ++_003479_hash pipeline_cs_rx_packet_in_read 3 37089 _003479_hash NULL ++_003480_hash pipeline_cs_rx_packet_out_read 3 58926 _003480_hash NULL ++_003481_hash pipeline_csum_to_rx_xfer_swi_read 3 15403 _003481_hash NULL ++_003482_hash pipeline_dec_packet_in_fifo_full_read 3 33052 _003482_hash NULL ++_003483_hash pipeline_dec_packet_in_read 3 47076 _003483_hash NULL ++_003484_hash pipeline_dec_packet_out_read 3 54052 _003484_hash NULL ++_003485_hash pipeline_defrag_to_csum_swi_read 3 63037 _003485_hash NULL ++_003486_hash pipeline_enc_rx_stat_fifo_int_read 3 7107 _003486_hash NULL ++_003487_hash pipeline_enc_tx_stat_fifo_int_read 3 14680 _003487_hash NULL ++_003488_hash pipeline_hs_tx_stat_fifo_int_read 3 15642 _003488_hash &_001260_hash ++_003489_hash pipeline_pipeline_fifo_full_read 3 34095 _003489_hash NULL ++_003490_hash pipeline_post_proc_swi_read 3 24108 _003490_hash NULL ++_003491_hash pipeline_pre_proc_swi_read 3 3898 _003491_hash NULL ++_003492_hash pipeline_pre_to_defrag_swi_read 3 56321 _003492_hash NULL ++_003493_hash pipeline_rx_complete_stat_fifo_int_read 3 40671 _003493_hash NULL ++_003494_hash pipeline_sec_frag_swi_read 3 30294 _003494_hash NULL ++_003495_hash pipeline_tcp_rx_stat_fifo_int_read 3 26745 _003495_hash NULL ++_003496_hash pipeline_tcp_tx_stat_fifo_int_read 3 32589 _003496_hash NULL ++_003497_hash play_iframe 3 8219 _003497_hash NULL ++_003498_hash probes_write 3 29711 _003498_hash NULL ++_003499_hash psb_unlocked_ioctl 2 16926 _003499_hash &_002668_hash ++_003500_hash ps_poll_ps_poll_max_ap_turn_read 3 53140 _003500_hash NULL ++_003501_hash ps_poll_ps_poll_timeouts_read 3 5934 _003501_hash NULL ++_003502_hash ps_poll_ps_poll_utilization_read 3 39383 _003502_hash NULL ++_003503_hash ps_poll_upsd_max_ap_turn_read 3 42050 _003503_hash NULL ++_003504_hash ps_poll_upsd_timeouts_read 3 36755 _003504_hash NULL ++_003505_hash ps_poll_upsd_utilization_read 3 28519 _003505_hash NULL ++_003506_hash pvr2_ioread_read 3 10720 _003506_hash &_001669_hash ++_003507_hash pvr2_ioread_set_sync_key 3 59882 _003507_hash NULL ++_003508_hash pvr2_stream_buffer_count 2 33719 _003508_hash NULL ++_003509_hash pwr_connection_out_of_sync_read 3 35061 _003509_hash NULL ++_003510_hash pwr_cont_miss_bcns_spread_read 3 39250 _003515_hash NULL nohasharray ++_003511_hash pwr_missing_bcns_cnt_read 3 45113 _003511_hash NULL ++_003512_hash pwr_rcvd_awake_bcns_cnt_read 3 12632 _003512_hash NULL ++_003513_hash pwr_rcvd_bcns_cnt_read 3 4774 _003513_hash NULL ++_003514_hash qc_capture 3 19298 _003514_hash NULL ++_003515_hash r128_compat_ioctl 2 39250 _003515_hash &_003510_hash ++_003516_hash radeon_compat_ioctl 2 59150 _003516_hash NULL ++_003517_hash radeon_kms_compat_ioctl 2 51371 _003517_hash NULL ++_003518_hash Realloc 2 34961 _003518_hash NULL ++_003519_hash redrat3_transmit_ir 3 64244 _003519_hash NULL ++_003520_hash reg_w_buf 3 27724 _003520_hash NULL ++_003521_hash reg_w_ixbuf 4 34736 _003521_hash NULL ++_003522_hash rtllib_alloc_txb 1-2 21687 _003522_hash NULL ++_003524_hash rtllib_authentication_req 3 26713 _003524_hash NULL ++_003525_hash rtllib_wx_set_gen_ie 3 59808 _003525_hash NULL ++_003526_hash rts51x_transfer_data_partial 6 5735 _003526_hash NULL ++_003527_hash rvmalloc 1 46873 _003527_hash NULL ++_003528_hash rx_decrypt_key_not_found_read 3 37820 _003528_hash NULL ++_003529_hash rx_defrag_called_read 3 1897 _003529_hash NULL ++_003530_hash rx_defrag_decrypt_failed_read 3 41411 _003530_hash NULL ++_003531_hash rx_defrag_init_called_read 3 35935 _003531_hash NULL ++_003532_hash rx_defrag_in_process_called_read 3 59338 _003532_hash NULL ++_003533_hash rx_defrag_need_decrypt_read 3 42253 _003533_hash NULL ++_003534_hash rx_defrag_need_defrag_read 3 28117 _003534_hash NULL ++_003535_hash rx_defrag_tkip_called_read 3 21031 _003535_hash NULL ++_003536_hash rx_filter_accum_arp_pend_requests_read 3 11003 _003536_hash NULL ++_003537_hash rx_filter_arp_filter_read 3 61914 _003537_hash NULL ++_003538_hash rx_filter_beacon_filter_read 3 49279 _003538_hash NULL ++_003539_hash rx_filter_data_filter_read 3 30098 _003539_hash NULL ++_003540_hash rx_filter_dup_filter_read 3 37238 _003540_hash NULL ++_003541_hash rx_filter_ibss_filter_read 3 50167 _003541_hash NULL ++_003542_hash rx_filter_max_arp_queue_dep_read 3 5851 _003542_hash NULL ++_003543_hash rx_filter_mc_filter_read 3 25712 _003543_hash NULL ++_003544_hash rx_filter_protection_filter_read 3 39282 _003544_hash NULL ++_003545_hash rx_rate_rx_frames_per_rates_read 3 7282 _003545_hash NULL ++_003546_hash rx_rx_beacon_early_term_read 3 21559 _003546_hash NULL ++_003547_hash rx_rx_checksum_result_read 3 50617 _003547_hash NULL ++_003548_hash rx_rx_cmplt_read 3 14753 _003548_hash NULL ++_003549_hash rx_rx_cmplt_task_read 3 35226 _003549_hash NULL ++_003550_hash rx_rx_defrag_end_read 3 505 _003550_hash NULL ++_003551_hash rx_rx_defrag_read 3 2010 _003551_hash NULL ++_003552_hash rx_rx_done_read 3 65217 _003552_hash NULL ++_003553_hash rx_rx_dropped_frame_read 3 23748 _003553_hash NULL ++_003554_hash rx_rx_frame_checksum_read 3 40140 _003554_hash NULL ++_003555_hash rx_rx_hdr_overflow_read 3 35002 _003555_hash NULL ++_003556_hash rx_rx_out_of_mpdu_nodes_read 3 64668 _003556_hash NULL ++_003557_hash rx_rx_phy_hdr_read 3 20950 _003557_hash NULL ++_003558_hash rx_rx_pre_complt_read 3 41653 _003558_hash NULL ++_003559_hash rx_rx_timeout_read 3 62389 _003559_hash NULL ++_003560_hash rx_rx_timeout_wa_read 3 50204 _003560_hash NULL ++_003561_hash rx_rx_tkip_replays_read 3 60193 _003561_hash NULL ++_003562_hash rx_rx_wa_ba_not_expected_read 3 61341 _003562_hash NULL ++_003563_hash rx_rx_wa_density_dropped_frame_read 3 26095 _003563_hash NULL ++_003564_hash rx_streaming_always_read 3 49401 _003564_hash NULL ++_003565_hash rx_streaming_interval_read 3 55291 _003565_hash NULL ++_003566_hash saa7164_buffer_alloc_user 2 9627 _003566_hash NULL ++_003567_hash send_control_msg 6 48498 _003567_hash NULL ++_003568_hash SendTxCommandPacket 3 42901 _003568_hash NULL ++_003569_hash setup_window 2-7-5-4 59178 _003569_hash NULL ++_003573_hash shmem_pwrite_fast 3 46842 _003573_hash NULL ++_003574_hash sleep_auth_read 3 19159 _003574_hash NULL ++_003575_hash sn9c102_read 3 29305 _003575_hash NULL ++_003576_hash snd_pcm_alloc_vmalloc_buffer 2 44595 _003576_hash NULL ++_003577_hash split_scan_timeout_read 3 20029 _003577_hash NULL ++_003578_hash stk_prepare_sio_buffers 2 57168 _003578_hash NULL ++_003579_hash store_debug_level 3 35652 _003579_hash NULL ++_003580_hash suspend_dtim_interval_read 3 64971 _003580_hash NULL ++_003581_hash sys_prctl 4 8766 _003581_hash NULL ++_003582_hash tm6000_read_write_usb 7 50774 _003582_hash &_002149_hash ++_003583_hash tracing_read_pipe 3 35312 _003583_hash NULL ++_003584_hash ts_read 3 44687 _003584_hash NULL ++_003585_hash ts_write 3 64336 _003585_hash NULL ++_003586_hash tt3650_ci_msg 4 57219 _003586_hash NULL ++_003587_hash ttm_object_device_init 2 10321 _003587_hash NULL ++_003588_hash ttm_object_file_init 2 27804 _003588_hash NULL ++_003589_hash tx_frag_bad_mblk_num_read 3 28064 _003589_hash NULL ++_003590_hash tx_frag_cache_hit_read 3 29639 _003590_hash NULL ++_003591_hash tx_frag_cache_miss_read 3 28394 _003591_hash NULL ++_003592_hash tx_frag_called_read 3 1748 _003592_hash NULL ++_003593_hash tx_frag_failed_read 3 43540 _003593_hash NULL ++_003594_hash tx_frag_init_called_read 3 48377 _003594_hash NULL ++_003595_hash tx_frag_in_process_called_read 3 1290 _003595_hash NULL ++_003596_hash tx_frag_key_not_found_read 3 22971 _003596_hash NULL ++_003597_hash tx_frag_mpdu_alloc_failed_read 3 41167 _003597_hash NULL ++_003598_hash tx_frag_need_fragmentation_read 3 50153 _003598_hash NULL ++_003599_hash tx_frag_tkip_called_read 3 31575 _003599_hash NULL ++_003600_hash tx_tx_burst_programmed_read 3 20320 _003600_hash NULL ++_003601_hash tx_tx_checksum_result_read 3 36490 _003601_hash &_001996_hash ++_003602_hash tx_tx_cmplt_read 3 35854 _003602_hash NULL ++_003603_hash tx_tx_data_prepared_read 3 43497 _003603_hash NULL ++_003604_hash tx_tx_data_programmed_read 3 36871 _003604_hash NULL ++_003605_hash tx_tx_done_data_read 3 6799 _003605_hash NULL ++_003606_hash tx_tx_done_int_template_read 3 55511 _003606_hash &_001887_hash ++_003607_hash tx_tx_done_template_read 3 35104 _003607_hash &_000106_hash ++_003608_hash tx_tx_exch_expiry_read 3 8749 _003608_hash NULL ++_003609_hash tx_tx_exch_pending_read 3 53018 _003609_hash NULL ++_003610_hash tx_tx_exch_read 3 52986 _003610_hash NULL ++_003611_hash tx_tx_frame_checksum_read 3 41553 _003611_hash NULL ++_003612_hash tx_tx_imm_resp_read 3 55964 _003612_hash NULL ++_003613_hash tx_tx_prepared_descs_read 3 9221 _003613_hash NULL ++_003614_hash tx_tx_retry_data_read 3 1926 _003614_hash NULL ++_003615_hash tx_tx_retry_template_read 3 57623 _003615_hash NULL ++_003616_hash tx_tx_start_data_read 3 53219 _003616_hash NULL ++_003617_hash tx_tx_start_fw_gen_read 3 58648 _003617_hash NULL ++_003618_hash tx_tx_start_int_templates_read 3 58324 _003618_hash NULL ++_003619_hash tx_tx_start_null_frame_read 3 6281 _003619_hash NULL ++_003620_hash tx_tx_starts_read 3 3617 _003620_hash NULL ++_003621_hash tx_tx_start_templates_read 3 17164 _003621_hash NULL ++_003622_hash tx_tx_template_prepared_read 3 30424 _003622_hash NULL ++_003623_hash tx_tx_template_programmed_read 3 30461 _003623_hash NULL ++_003624_hash udi_log_event 3 58105 _003624_hash NULL ++_003625_hash udl_prime_create 2 57159 _003625_hash NULL ++_003626_hash uf_create_device_nodes 2 24948 _003626_hash NULL ++_003627_hash uf_sme_queue_message 3 15697 _003627_hash NULL ++_003628_hash ufx_alloc_urb_list 3 10349 _003628_hash NULL ++_003629_hash unifi_net_data_malloc 3 24716 _003629_hash NULL ++_003630_hash unifi_read 3 14899 _003630_hash NULL ++_003631_hash unifi_write 3 65012 _003631_hash NULL ++_003632_hash usb_buffer_alloc 2 36276 _003632_hash NULL ++_003633_hash usbvision_rvmalloc 1 19655 _003633_hash NULL ++_003634_hash usbvision_v4l2_read 3 34386 _003634_hash NULL ++_003635_hash uvc_alloc_buffers 2-3 9656 _003635_hash NULL ++_003637_hash uvc_alloc_entity 3-4 20836 _003637_hash NULL ++_003639_hash uvc_debugfs_stats_read 3 56651 _003639_hash NULL ++_003640_hash uvc_simplify_fraction 3 31303 _003640_hash NULL ++_003641_hash v4l2_ctrl_new 7 24927 _003641_hash NULL ++_003642_hash v4l2_event_subscribe 3 53687 _003642_hash NULL ++_003643_hash v4l_stk_read 3 39672 _003643_hash NULL ++_003644_hash __vb2_perform_fileio 3 63033 _003644_hash NULL ++_003645_hash vfd_write 3 14717 _003645_hash NULL ++_003646_hash vfio_config_do_rw 3 46091 _003646_hash NULL ++_003647_hash vfio_msi_enable 2 20906 _003647_hash NULL ++_003648_hash viafb_dvp0_proc_write 3 23023 _003648_hash NULL ++_003649_hash viafb_dvp1_proc_write 3 48864 _003649_hash NULL ++_003650_hash viafb_vt1636_proc_write 3 16018 _003650_hash NULL ++_003651_hash __videobuf_alloc_vb 1 27062 _003651_hash NULL ++_003652_hash __videobuf_alloc_vb 1 5665 _003652_hash NULL ++_003653_hash __videobuf_copy_to_user 4 15423 _003653_hash NULL ++_003654_hash videobuf_dma_init_kernel 3 6963 _003654_hash NULL ++_003655_hash videobuf_pages_to_sg 2 3708 _003655_hash NULL ++_003656_hash videobuf_vmalloc_to_sg 2 4548 _003656_hash NULL ++_003657_hash video_usercopy 2 62151 _003657_hash NULL ++_003658_hash virtscsi_alloc_tgt 2 6643 _003658_hash NULL ++_003659_hash vmw_cursor_update_image 3-4 16332 _003659_hash NULL ++_003661_hash vmw_framebuffer_dmabuf_dirty 6 37661 _003661_hash &_001116_hash ++_003662_hash vmw_framebuffer_surface_dirty 6 48132 _003662_hash NULL ++_003663_hash vmw_gmr2_bind 3 21305 _003663_hash NULL ++_003664_hash vmw_unlocked_ioctl 2 19212 _003664_hash NULL ++_003665_hash w9966_v4l_read 3 31148 _003665_hash NULL ++_003666_hash wl1273_fm_fops_write 3 60621 _003666_hash NULL ++_003667_hash zoran_write 3 22404 _003667_hash NULL ++_003668_hash alloc_vm_area 1 15989 _003668_hash NULL ++_003669_hash cx18_copy_mdl_to_user 4 45549 _003669_hash NULL ++_003670_hash dlfb_ops_write 3 64150 _003670_hash NULL ++_003671_hash dvb_demux_read 3 13981 _003671_hash NULL ++_003672_hash dvb_dmxdev_read_sec 4 7892 _003672_hash NULL ++_003673_hash dvb_dvr_read 3 17073 _003673_hash NULL ++_003674_hash em28xx_init_isoc 4 62883 _003674_hash &_000729_hash ++_003675_hash fb_alloc_cmap 2 6554 _003675_hash NULL ++_003676_hash gspca_dev_probe 4 2570 _003676_hash NULL ++_003677_hash ieee80211_auth_challenge 3 18810 _003677_hash NULL ++_003678_hash ieee80211_rtl_auth_challenge 3 61897 _003678_hash NULL ++_003679_hash init_pci_cap_msi_perm 2 59033 _003679_hash NULL ++_003680_hash __ioremap_caller 1-2 21800 _003680_hash NULL ++_003682_hash ivtv_read 3 57796 _003682_hash NULL ++_003683_hash ivtv_v4l2_write 3 39226 _003683_hash NULL ++_003684_hash mce_async_out 3 58056 _003684_hash NULL ++_003685_hash mce_flush_rx_buffer 2 14976 _003685_hash NULL ++_003686_hash ms_read_multiple_pages 4-5 8052 _003686_hash NULL ++_003688_hash ms_write_multiple_pages 5-6 10362 _003688_hash NULL ++_003690_hash nfc_hci_send_cmd 5 55714 _003690_hash NULL ++_003691_hash persistent_ram_new 1-2 40501 _003691_hash NULL ++_003693_hash picolcd_fb_write 3 2318 _003693_hash NULL ++_003694_hash process_bulk_data_command 4 38906 _003694_hash NULL ++_003695_hash pvr2_v4l2_read 3 18006 _003695_hash NULL ++_003696_hash qcam_read 3 13977 _003696_hash NULL ++_003697_hash register_unifi_sdio 2 55239 _003697_hash NULL ++_003698_hash resize_async_buffer 4 64031 _003698_hash &_002431_hash ++_003699_hash rtllib_auth_challenge 3 12493 _003699_hash NULL ++_003702_hash stk_allocate_buffers 2 16291 _003702_hash NULL ++_003703_hash subdev_ioctl 2 28417 _003703_hash NULL ++_003704_hash _sys_packet_req 4 46793 _003704_hash NULL ++_003705_hash tm6000_i2c_recv_regs16 5 2949 _003705_hash NULL ++_003706_hash tm6000_i2c_recv_regs 5 46215 _003706_hash NULL ++_003707_hash tm6000_i2c_send_regs 5 20250 _003707_hash NULL ++_003708_hash tt3650_ci_msg_locked 4 8013 _003708_hash NULL ++_003709_hash ufx_ops_write 3 54848 _003709_hash NULL ++_003710_hash update_macheader 7 1775 _003710_hash NULL ++_003711_hash usbdux_attach_common 4 51764 _003750_hash NULL nohasharray ++_003712_hash usbduxfast_attach_common 4 52538 _003712_hash NULL ++_003713_hash usbduxsigma_attach_common 4 40847 _003713_hash NULL ++_003714_hash uvc_v4l2_ioctl 2 8411 _003714_hash NULL ++_003715_hash v4l2_ctrl_new_int_menu 4 41151 _003715_hash NULL ++_003716_hash v4l2_ctrl_new_std 5 45748 _003716_hash &_000497_hash ++_003717_hash v4l2_ctrl_new_std_menu 4 6221 _003717_hash NULL ++_003718_hash vb2_read 3 42703 _003718_hash NULL ++_003719_hash vb2_write 3 31948 _003719_hash NULL ++_003720_hash vfio_pci_set_msi_trigger 3-4 26507 _003720_hash NULL ++_003722_hash viafb_iga1_odev_proc_write 3 36241 _003722_hash NULL ++_003723_hash viafb_iga2_odev_proc_write 3 2363 _003723_hash NULL ++_003724_hash __videobuf_alloc_cached 1 12740 _003724_hash NULL ++_003725_hash __videobuf_alloc_uncached 1 55711 _003725_hash NULL ++_003726_hash __videobuf_copy_stream 4 44769 _003726_hash NULL ++_003727_hash videobuf_read_one 3 31637 _003727_hash NULL ++_003728_hash video_ioctl2 2 21380 _003728_hash NULL ++_003729_hash vmap 2 15025 _003729_hash NULL ++_003730_hash vmw_cursor_update_dmabuf 3-4 32045 _003730_hash NULL ++_003732_hash vmw_gmr_bind 3 44130 _003732_hash NULL ++_003733_hash xd_read_multiple_pages 4-5 11422 _003733_hash NULL ++_003735_hash xd_write_multiple_pages 5-6 53633 _003735_hash NULL ++_003737_hash xenfb_write 3 43412 _003737_hash NULL ++_003738_hash arch_gnttab_map_shared 3 41306 _003738_hash NULL ++_003739_hash arch_gnttab_map_status 3 49812 _003739_hash NULL ++_003740_hash bttv_read 3 11432 _003740_hash NULL ++_003741_hash cx18_read 3 23699 _003741_hash NULL ++_003742_hash cx2341x_ctrl_new_menu 3 49700 _003742_hash NULL ++_003743_hash cx2341x_ctrl_new_std 4 57061 _003743_hash NULL ++_003744_hash cx25821_video_ioctl 2 30188 _003744_hash NULL ++_003745_hash dt3155_read 3 59226 _003745_hash NULL ++_003746_hash ioremap_cache 1-2 47189 _003746_hash NULL ++_003748_hash ioremap_nocache 1-2 2439 _003748_hash NULL ++_003750_hash ioremap_prot 1-2 51764 _003750_hash &_003711_hash ++_003752_hash ioremap_wc 1-2 62695 _003752_hash NULL ++_003754_hash ivtv_read_pos 3 34400 _003754_hash &_000312_hash ++_003755_hash mcam_v4l_read 3 36513 _003755_hash NULL ++_003756_hash ms_rw_multi_sector 3-4 7459 _003756_hash NULL ++_003758_hash pvr2_v4l2_ioctl 2 24398 _003758_hash &_000877_hash ++_003759_hash ramoops_init_prz 5 12134 _003759_hash NULL ++_003761_hash ttm_bo_kmap_ttm 3 5922 _003761_hash NULL ++_003762_hash uf_ap_process_data_pdu 7 25860 _003762_hash NULL ++_003763_hash vb2_fop_read 3 24080 _003763_hash NULL ++_003764_hash vb2_fop_write 3 30420 _003764_hash NULL ++_003765_hash videobuf_read_stream 3 14956 _003765_hash NULL ++_003766_hash video_read 3 28148 _003766_hash NULL ++_003767_hash vmw_du_crtc_cursor_set 4-5 28479 _003767_hash NULL ++_003769_hash xd_rw 3-4 49020 _003769_hash NULL ++_003771_hash zoran_ioctl 2 30465 _003771_hash NULL ++_003772_hash zr364xx_read 3 2354 _003772_hash NULL ++_003773_hash acpi_os_ioremap 1-2 49523 _003773_hash NULL ++_003775_hash au0828_v4l2_read 3 40220 _003775_hash NULL ++_003776_hash ca91cx42_alloc_resource 2 10502 _003776_hash NULL ++_003778_hash cx18_read_pos 3 4683 _003778_hash NULL ++_003779_hash cx18_v4l2_read 3 21196 _003779_hash NULL ++_003780_hash cx231xx_v4l2_read 3 55014 _003780_hash NULL ++_003781_hash devm_ioremap_nocache 2-3 2036 _003781_hash NULL ++_003783_hash do_test 1 15766 _003783_hash NULL ++_003784_hash __einj_error_trigger 1 17707 _003784_hash &_001764_hash ++_003785_hash em28xx_v4l2_read 3 16701 _003785_hash NULL ++_003786_hash init_chip_wc_pat 2 62768 _003786_hash NULL ++_003787_hash intel_render_ring_init_dri 2-3 45446 _003787_hash NULL ++_003789_hash io_mapping_create_wc 1-2 1354 _003789_hash NULL ++_003791_hash iommu_map_mmio_space 1 30919 _003791_hash NULL ++_003792_hash ioremap 1-2 23172 _003792_hash NULL ++_003794_hash ivtv_v4l2_read 3 1964 _003794_hash NULL ++_003795_hash mga_ioremap 1-2 8571 _003795_hash NULL ++_003797_hash mpeg_read 3 6708 _003797_hash NULL ++_003798_hash msix_map_region 3 3411 _003798_hash NULL ++_003799_hash ms_rw 3-4 17220 _003799_hash NULL ++_003801_hash pci_iomap 3 47575 _003801_hash NULL ++_003802_hash pd_video_read 3 24510 _003802_hash NULL ++_003803_hash sfi_map_memory 1-2 5183 _003803_hash NULL ++_003805_hash solo_enc_read 3 33553 _003805_hash NULL ++_003806_hash solo_v4l2_read 3 59247 _003806_hash NULL ++_003807_hash timblogiw_read 3 48305 _003807_hash NULL ++_003808_hash tm6000_read 3 4151 _003808_hash NULL ++_003809_hash tsi148_alloc_resource 2 24563 _003809_hash NULL ++_003810_hash ttm_bo_ioremap 2-3 31082 _003810_hash NULL ++_003812_hash ttm_bo_kmap 3-2 60118 _003812_hash NULL ++_003813_hash vb2_vmalloc_get_userptr 3 31374 _003813_hash NULL ++_003814_hash vbi_read 3 63673 _003814_hash NULL ++_003815_hash viacam_read 3 54526 _003815_hash NULL ++_003816_hash xlate_dev_mem_ptr 1 15291 _003816_hash &_001231_hash ++_003817_hash a4t_cs_init 3 27734 _003817_hash NULL ++_003818_hash aac_nark_ioremap 2 50163 _003818_hash &_000323_hash ++_003819_hash aac_rkt_ioremap 2 3333 _003819_hash NULL ++_003820_hash aac_rx_ioremap 2 52410 _003820_hash NULL ++_003821_hash aac_sa_ioremap 2 13596 _003821_hash &_000299_hash ++_003822_hash aac_src_ioremap 2 41688 _003822_hash NULL ++_003823_hash aac_srcv_ioremap 2 6659 _003823_hash NULL ++_003824_hash acpi_map 1-2 58725 _003824_hash NULL ++_003826_hash acpi_os_read_memory 1-3 54186 _003826_hash NULL ++_003828_hash acpi_os_write_memory 1-3 56416 _003828_hash &_003429_hash ++_003830_hash atyfb_setup_generic 3 49151 _003830_hash NULL ++_003831_hash ca91cx42_master_set 4 23146 _003831_hash NULL ++_003832_hash check_mirror 1-2 57342 _003832_hash &_001753_hash ++_003834_hash cycx_setup 4 47562 _003834_hash NULL ++_003835_hash devm_ioremap 2-3 29235 _003835_hash NULL ++_003837_hash divasa_remap_pci_bar 3-4 23485 _003837_hash &_000979_hash ++_003839_hash doc_probe 1 23285 _003839_hash NULL ++_003840_hash DoC_Probe 1 57534 _003840_hash NULL ++_003841_hash efi_ioremap 1-2 3492 _003841_hash &_001137_hash ++_003843_hash ems_pcmcia_add_card 2 62627 _003843_hash NULL ++_003844_hash isp1760_register 1-2 628 _003844_hash NULL ++_003846_hash mid_get_vbt_data_r0 2 10876 _003846_hash NULL ++_003847_hash mid_get_vbt_data_r10 2 6308 _003847_hash NULL ++_003848_hash mid_get_vbt_data_r1 2 26170 _003848_hash NULL ++_003849_hash mthca_map_reg 2-3 5664 _003849_hash NULL ++_003851_hash mthca_setup_cmd_doorbells 2 53954 _003851_hash NULL ++_003852_hash netxen_nic_map_indirect_address_128M 2 42257 _003852_hash NULL ++_003853_hash pcim_iomap 3 58334 _003853_hash NULL ++_003854_hash persistent_ram_iomap 1-2 47156 _003854_hash NULL ++_003856_hash read_vbt_r0 1 503 _003856_hash NULL ++_003857_hash read_vbt_r10 1 60679 _003857_hash NULL ++_003858_hash register_device 2-3 60015 _003858_hash NULL ++_003860_hash remap_pci_mem 1-2 15966 _003860_hash NULL ++_003862_hash rtl_port_map 1-2 2385 _003862_hash NULL ++_003864_hash sfi_map_table 1 5462 _003864_hash NULL ++_003865_hash sriov_enable_migration 2 14889 _003865_hash NULL ++_003866_hash ssb_bus_scan 2 36578 _003866_hash NULL ++_003867_hash ssb_ioremap 2 5228 _003867_hash NULL ++_003868_hash tpci200_slot_map_space 2 3848 _003868_hash NULL ++_003869_hash tpm_tis_init 2-3 15304 _003869_hash NULL ++_003871_hash tsi148_master_set 4 14685 _003871_hash NULL ++_003872_hash acpi_os_map_memory 1-2 11161 _003872_hash NULL ++_003874_hash com90xx_found 3 13974 _003874_hash NULL ++_003875_hash netxen_nic_hw_read_wx_128M 2 26858 _003875_hash NULL ++_003876_hash netxen_nic_hw_write_wx_128M 2 33488 _003876_hash NULL ++_003877_hash sfi_check_table 1 6772 _003877_hash NULL ++_003878_hash sfi_sysfs_install_table 1 51688 _003878_hash NULL ++_003879_hash sriov_enable 2 59689 _003879_hash NULL ++_003880_hash ssb_bus_register 3 65183 _003880_hash NULL ++_003881_hash acpi_ex_system_memory_space_handler 2 31192 _003881_hash NULL ++_003882_hash acpi_tb_check_xsdt 1 21862 _003882_hash NULL ++_003883_hash acpi_tb_install_table 1 12988 _003883_hash NULL ++_003884_hash acpi_tb_parse_root_table 1 53455 _003884_hash NULL ++_003885_hash check_vendor_extension 1 3254 _003885_hash NULL ++_003886_hash pci_enable_sriov 2 35745 _003886_hash NULL ++_003887_hash ssb_bus_pcmciabus_register 3 56020 _003887_hash NULL ++_003888_hash ssb_bus_ssbbus_register 2 2217 _003888_hash NULL ++_003889_hash lpfc_sli_probe_sriov_nr_virtfn 2 26004 _003889_hash NULL ++_003890_hash alloc_vm_area 1 36149 _003890_hash NULL ++_003891_hash cma_create_area 2 38642 _003891_hash NULL ++_003893_hash fbcon_prepare_logo 5 6246 _003893_hash NULL ++_003894_hash io_mapping_map_wc 2 19284 _003894_hash NULL ++_003895_hash nfs_dns_resolve_name 3 25036 _003895_hash NULL ++_003896_hash nfs_parse_server_name 2 1899 _003896_hash NULL +--- tools/gcc/size_overflow_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/size_overflow_plugin.c 2012-10-15 17:30:59.835924531 +0000 +@@ -0,0 +1,1879 @@ ++/* ++ * Copyright 2011, 2012 by Emese Revfy ++ * Licensed under the GPL v2, or (at your option) v3 ++ * ++ * Homepage: ++ * http://www.grsecurity.net/~ephox/overflow_plugin/ ++ * ++ * This plugin recomputes expressions of function arguments marked by a size_overflow attribute ++ * with double integer precision (DImode/TImode for 32/64 bit integer types). ++ * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed. ++ * ++ * Usage: ++ * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c ++ * $ gcc -fplugin=size_overflow_plugin.so test.c -O2 ++ */ ++ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "intl.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "toplev.h" ++#include "function.h" ++#include "tree-flow.h" ++#include "plugin.h" ++#include "gimple.h" ++#include "c-common.h" ++#include "diagnostic.h" ++#include "cfgloop.h" ++ ++#if BUILDING_GCC_VERSION >= 4007 ++#include "c-tree.h" ++#else ++#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) ++#endif ++ ++struct size_overflow_hash { ++ const struct size_overflow_hash * const next; ++ const char * const name; ++ const unsigned int param; ++}; ++ ++#include "size_overflow_hash.h" ++ ++enum marked { ++ MARKED_NO, MARKED_YES, MARKED_NOT_INTENTIONAL ++}; ++ ++#define __unused __attribute__((__unused__)) ++#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node)) ++#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node)) ++#define BEFORE_STMT true ++#define AFTER_STMT false ++#define CREATE_NEW_VAR NULL_TREE ++#define CODES_LIMIT 32 ++#define MAX_PARAM 32 ++#define MY_STMT GF_PLF_1 ++#define NO_CAST_CHECK GF_PLF_2 ++ ++#if BUILDING_GCC_VERSION == 4005 ++#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE))) ++#endif ++ ++int plugin_is_GPL_compatible; ++void debug_gimple_stmt(gimple gs); ++ ++static tree expand(struct pointer_set_t *visited, tree lhs); ++static bool pre_expand(struct pointer_set_t *visited, const_tree lhs); ++static tree report_size_overflow_decl; ++static const_tree const_char_ptr_type_node; ++static unsigned int handle_function(void); ++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before); ++static tree get_size_overflow_type(gimple stmt, const_tree node); ++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3); ++ ++static struct plugin_info size_overflow_plugin_info = { ++ .version = "20120930beta", ++ .help = "no-size-overflow\tturn off size overflow checking\n", ++}; ++ ++static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count; ++ enum tree_code code = TREE_CODE(*node); ++ ++ switch (code) { ++ case FUNCTION_DECL: ++ arg_count = type_num_arguments(TREE_TYPE(*node)); ++ break; ++ case FUNCTION_TYPE: ++ case METHOD_TYPE: ++ arg_count = type_num_arguments(*node); ++ break; ++ default: ++ *no_add_attrs = true; ++ error("%s: %qE attribute only applies to functions", __func__, name); ++ return NULL_TREE; ++ } ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position)); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static const char* get_asm_name(tree node) ++{ ++ return IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node)); ++} ++ ++static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs) ++{ ++ unsigned int arg_count, arg_num; ++ enum tree_code code = TREE_CODE(*node); ++ ++ switch (code) { ++ case FUNCTION_DECL: ++ arg_count = type_num_arguments(TREE_TYPE(*node)); ++ break; ++ case FUNCTION_TYPE: ++ case METHOD_TYPE: ++ arg_count = type_num_arguments(*node); ++ break; ++ case FIELD_DECL: ++ arg_num = TREE_INT_CST_LOW(TREE_VALUE(args)); ++ if (arg_num != 0) { ++ *no_add_attrs = true; ++ error("%s: %qE attribute parameter can only be 0 in structure fields", __func__, name); ++ } ++ return NULL_TREE; ++ default: ++ *no_add_attrs = true; ++ error("%qE attribute only applies to functions", name); ++ return NULL_TREE; ++ } ++ ++ for (; args; args = TREE_CHAIN(args)) { ++ tree position = TREE_VALUE(args); ++ if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) > arg_count ) { ++ error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position)); ++ *no_add_attrs = true; ++ } ++ } ++ return NULL_TREE; ++} ++ ++static struct attribute_spec size_overflow_attr = { ++ .name = "size_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_size_overflow_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static struct attribute_spec intentional_overflow_attr = { ++ .name = "intentional_overflow", ++ .min_length = 1, ++ .max_length = -1, ++ .decl_required = true, ++ .type_required = false, ++ .function_type_required = false, ++ .handler = handle_intentional_overflow_attribute, ++#if BUILDING_GCC_VERSION >= 4007 ++ .affects_type_identity = false ++#endif ++}; ++ ++static void register_attributes(void __unused *event_data, void __unused *data) ++{ ++ register_attribute(&size_overflow_attr); ++ register_attribute(&intentional_overflow_attr); ++} ++ ++// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html ++static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed) ++{ ++#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); } ++#define cwmixa( in ) { cwfold( in, m, k, h ); } ++#define cwmixb( in ) { cwfold( in, n, h, k ); } ++ ++ unsigned int m = 0x57559429; ++ unsigned int n = 0x5052acdb; ++ const unsigned int *key4 = (const unsigned int *)key; ++ unsigned int h = len; ++ unsigned int k = len + seed + n; ++ unsigned long long p; ++ ++ while (len >= 8) { ++ cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2; ++ len -= 8; ++ } ++ if (len >= 4) { ++ cwmixb(key4[0]) key4 += 1; ++ len -= 4; ++ } ++ if (len) ++ cwmixa(key4[0] & ((1 << (len * 8)) - 1 )); ++ cwmixb(h ^ (k + n)); ++ return k ^ h; ++ ++#undef cwfold ++#undef cwmixa ++#undef cwmixb ++} ++ ++static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed) ++{ ++ unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff; ++ unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff; ++ return fn ^ codes; ++} ++ ++static inline tree get_original_function_decl(tree fndecl) ++{ ++ if (DECL_ABSTRACT_ORIGIN(fndecl)) ++ return DECL_ABSTRACT_ORIGIN(fndecl); ++ return fndecl; ++} ++ ++static inline gimple get_def_stmt(const_tree node) ++{ ++ gcc_assert(node != NULL_TREE); ++ gcc_assert(TREE_CODE(node) == SSA_NAME); ++ return SSA_NAME_DEF_STMT(node); ++} ++ ++static unsigned char get_tree_code(const_tree type) ++{ ++ switch (TREE_CODE(type)) { ++ case ARRAY_TYPE: ++ return 0; ++ case BOOLEAN_TYPE: ++ return 1; ++ case ENUMERAL_TYPE: ++ return 2; ++ case FUNCTION_TYPE: ++ return 3; ++ case INTEGER_TYPE: ++ return 4; ++ case POINTER_TYPE: ++ return 5; ++ case RECORD_TYPE: ++ return 6; ++ case UNION_TYPE: ++ return 7; ++ case VOID_TYPE: ++ return 8; ++ case REAL_TYPE: ++ return 9; ++ case VECTOR_TYPE: ++ return 10; ++ case REFERENCE_TYPE: ++ return 11; ++ case OFFSET_TYPE: ++ return 12; ++ case COMPLEX_TYPE: ++ return 13; ++ default: ++ debug_tree((tree)type); ++ gcc_unreachable(); ++ } ++} ++ ++static size_t add_type_codes(const_tree type, unsigned char *tree_codes, size_t len) ++{ ++ gcc_assert(type != NULL_TREE); ++ ++ while (type && len < CODES_LIMIT) { ++ tree_codes[len] = get_tree_code(type); ++ len++; ++ type = TREE_TYPE(type); ++ } ++ return len; ++} ++ ++static unsigned int get_function_decl(const_tree fndecl, unsigned char *tree_codes) ++{ ++ const_tree arg, result, arg_field, type = TREE_TYPE(fndecl); ++ enum tree_code code = TREE_CODE(type); ++ size_t len = 0; ++ ++ gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE); ++ ++ arg = TYPE_ARG_TYPES(type); ++ // skip builtins __builtin_constant_p ++ if (!arg && DECL_BUILT_IN(fndecl)) ++ return 0; ++ ++ if (TREE_CODE_CLASS(code) == tcc_type) ++ result = type; ++ else ++ result = DECL_RESULT(fndecl); ++ ++ gcc_assert(result != NULL_TREE); ++ len = add_type_codes(TREE_TYPE(result), tree_codes, len); ++ ++ if (arg == NULL_TREE) { ++ gcc_assert(CODE_CONTAINS_STRUCT(TREE_CODE(fndecl), TS_DECL_NON_COMMON)); ++ arg_field = DECL_ARGUMENT_FLD(fndecl); ++ if (arg_field == NULL_TREE) ++ return 0; ++ arg = TREE_TYPE(arg_field); ++ len = add_type_codes(arg, tree_codes, len); ++ gcc_assert(len != 0); ++ return len; ++ } ++ ++ gcc_assert(arg != NULL_TREE && TREE_CODE(arg) == TREE_LIST); ++ while (arg && len < CODES_LIMIT) { ++ len = add_type_codes(TREE_VALUE(arg), tree_codes, len); ++ arg = TREE_CHAIN(arg); ++ } ++ ++ gcc_assert(len != 0); ++ return len; ++} ++ ++static const struct size_overflow_hash *get_function_hash(tree fndecl) ++{ ++ unsigned int hash; ++ const struct size_overflow_hash *entry; ++ unsigned char tree_codes[CODES_LIMIT]; ++ size_t len; ++ const char *func_name = get_asm_name(fndecl); ++ ++ len = get_function_decl(fndecl, tree_codes); ++ if (len == 0) ++ return NULL; ++ ++ hash = get_hash_num(func_name, (const char*) tree_codes, len, 0); ++ ++ entry = size_overflow_hash[hash]; ++ while (entry) { ++ if (!strcmp(entry->name, func_name)) ++ return entry; ++ entry = entry->next; ++ } ++ ++ return NULL; ++} ++ ++static void check_arg_type(const_tree arg) ++{ ++ const_tree type = TREE_TYPE(arg); ++ enum tree_code code = TREE_CODE(type); ++ ++ gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE || ++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) || ++ (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE)); ++} ++ ++static int find_arg_number(const_tree arg, tree func) ++{ ++ tree var; ++ unsigned int argnum = 1; ++ ++ if (TREE_CODE(arg) == SSA_NAME) ++ arg = SSA_NAME_VAR(arg); ++ ++ for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) { ++ if (strcmp(NAME(arg), NAME(var))) { ++ argnum++; ++ continue; ++ } ++ check_arg_type(var); ++ return argnum; ++ } ++ gcc_unreachable(); ++} ++ ++static tree create_new_var(tree type) ++{ ++ tree new_var = create_tmp_var(type, "cicus"); ++ ++ add_referenced_var(new_var); ++ mark_sym_for_renaming(new_var); ++ return new_var; ++} ++ ++static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2) ++{ ++ gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree type = TREE_TYPE(rhs1); ++ tree lhs = create_new_var(type); ++ ++ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2); ++ gimple_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ gimple_set_plf(assign, MY_STMT, true); ++ return assign; ++} ++ ++static bool is_bool(const_tree node) ++{ ++ const_tree type; ++ ++ if (node == NULL_TREE) ++ return false; ++ ++ type = TREE_TYPE(node); ++ if (!INTEGRAL_TYPE_P(type)) ++ return false; ++ if (TREE_CODE(type) == BOOLEAN_TYPE) ++ return true; ++ if (TYPE_PRECISION(type) == 1) ++ return true; ++ return false; ++} ++ ++static tree cast_a_tree(tree type, tree var) ++{ ++ gcc_assert(type != NULL_TREE); ++ gcc_assert(var != NULL_TREE); ++ gcc_assert(fold_convertible_p(type, var)); ++ ++ return fold_convert(type, var); ++} ++ ++static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before) ++{ ++ gimple assign; ++ ++ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE); ++ if (gsi_end_p(*gsi) && before == AFTER_STMT) ++ gcc_unreachable(); ++ ++ if (lhs == CREATE_NEW_VAR) ++ lhs = create_new_var(dst_type); ++ ++ assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs)); ++ ++ if (!gsi_end_p(*gsi)) { ++ location_t loc = gimple_location(gsi_stmt(*gsi)); ++ gimple_set_location(assign, loc); ++ } ++ ++ gimple_set_lhs(assign, make_ssa_name(lhs, assign)); ++ ++ if (before) ++ gsi_insert_before(gsi, assign, GSI_NEW_STMT); ++ else ++ gsi_insert_after(gsi, assign, GSI_NEW_STMT); ++ update_stmt(assign); ++ gimple_set_plf(assign, MY_STMT, true); ++ ++ return assign; ++} ++ ++static tree cast_to_new_size_overflow_type(gimple stmt, tree new_rhs1, tree size_overflow_type, bool before) ++{ ++ const_gimple assign; ++ gimple_stmt_iterator gsi; ++ ++ if (new_rhs1 == NULL_TREE) ++ return NULL_TREE; ++ ++ if (!useless_type_conversion_p(TREE_TYPE(new_rhs1), size_overflow_type)) { ++ gsi = gsi_for_stmt(stmt); ++ assign = build_cast_stmt(size_overflow_type, new_rhs1, CREATE_NEW_VAR, &gsi, before); ++ return gimple_get_lhs(assign); ++ } ++ return new_rhs1; ++} ++ ++static tree follow_overflow_type_and_dup(struct pointer_set_t *visited, gimple stmt, const_tree node, tree new_rhs1, tree new_rhs2, tree new_rhs3) ++{ ++ tree size_overflow_type = get_size_overflow_type(stmt, node); ++ ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ ++ if (new_rhs2 != NULL_TREE) ++ new_rhs2 = cast_to_new_size_overflow_type(stmt, new_rhs2, size_overflow_type, BEFORE_STMT); ++ ++ if (new_rhs3 != NULL_TREE) ++ new_rhs3 = cast_to_new_size_overflow_type(stmt, new_rhs3, size_overflow_type, BEFORE_STMT); ++ ++ return dup_assign(visited, stmt, size_overflow_type, new_rhs1, new_rhs2, new_rhs3); ++} ++ ++ ++static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before) ++{ ++ tree size_overflow_type, lhs; ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ ++ if (rhs1 == NULL_TREE) { ++ debug_gimple_stmt(oldstmt); ++ error("%s: rhs1 is NULL_TREE", __func__); ++ gcc_unreachable(); ++ } ++ ++ if (gimple_code(oldstmt) == GIMPLE_ASM) ++ lhs = rhs1; ++ else ++ lhs = gimple_get_lhs(oldstmt); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ pointer_set_insert(visited, oldstmt); ++ if (lookup_stmt_eh_lp(oldstmt) != 0) { ++ basic_block next_bb, cur_bb; ++ const_edge e; ++ ++ gcc_assert(before == false); ++ gcc_assert(stmt_can_throw_internal(oldstmt)); ++ gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ cur_bb = gimple_bb(oldstmt); ++ next_bb = cur_bb->next_bb; ++ e = find_edge(cur_bb, next_bb); ++ gcc_assert(e != NULL); ++ gcc_assert(e->flags & EDGE_FALLTHRU); ++ ++ gsi = gsi_after_labels(next_bb); ++ gcc_assert(!gsi_end_p(gsi)); ++ ++ before = true; ++ oldstmt = gsi_stmt(gsi); ++ } ++ ++ size_overflow_type = get_size_overflow_type(oldstmt, lhs); ++ ++ stmt = build_cast_stmt(size_overflow_type, rhs1, CREATE_NEW_VAR, &gsi, before); ++ gimple_set_plf(stmt, MY_STMT, true); ++ return gimple_get_lhs(stmt); ++} ++ ++static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, tree size_overflow_type, tree rhs1, tree rhs2, tree __unused rhs3) ++{ ++ gimple stmt; ++ gimple_stmt_iterator gsi; ++ tree new_var, lhs = gimple_get_lhs(oldstmt); ++ ++ if (gimple_plf(oldstmt, MY_STMT)) ++ return lhs; ++ ++ if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) { ++ rhs1 = gimple_assign_rhs1(oldstmt); ++ rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT); ++ } ++ if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) { ++ rhs2 = gimple_assign_rhs2(oldstmt); ++ rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT); ++ } ++ ++ stmt = gimple_copy(oldstmt); ++ gimple_set_location(stmt, gimple_location(oldstmt)); ++ gimple_set_plf(stmt, MY_STMT, true); ++ ++ if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR) ++ gimple_assign_set_rhs_code(stmt, MULT_EXPR); ++ ++ if (is_bool(lhs)) ++ new_var = SSA_NAME_VAR(lhs); ++ else ++ new_var = create_new_var(size_overflow_type); ++ new_var = make_ssa_name(new_var, stmt); ++ gimple_set_lhs(stmt, new_var); ++ ++ if (rhs1 != NULL_TREE) { ++ if (!gimple_assign_cast_p(oldstmt)) ++ rhs1 = cast_a_tree(size_overflow_type, rhs1); ++ gimple_assign_set_rhs1(stmt, rhs1); ++ } ++ ++ if (rhs2 != NULL_TREE) ++ gimple_assign_set_rhs2(stmt, rhs2); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (rhs3 != NULL_TREE) ++ gimple_assign_set_rhs3(stmt, rhs3); ++#endif ++ gimple_set_vuse(stmt, gimple_vuse(oldstmt)); ++ gimple_set_vdef(stmt, gimple_vdef(oldstmt)); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, stmt, GSI_SAME_STMT); ++ update_stmt(stmt); ++ pointer_set_insert(visited, oldstmt); ++ return gimple_get_lhs(stmt); ++} ++ ++static gimple overflow_create_phi_node(gimple oldstmt, tree result) ++{ ++ basic_block bb; ++ gimple phi; ++ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt); ++ ++ bb = gsi_bb(gsi); ++ ++ phi = create_phi_node(result, bb); ++ gsi = gsi_last(phi_nodes(bb)); ++ gsi_remove(&gsi, false); ++ ++ gsi = gsi_for_stmt(oldstmt); ++ gsi_insert_after(&gsi, phi, GSI_NEW_STMT); ++ gimple_set_bb(phi, bb); ++ gimple_set_plf(phi, MY_STMT, true); ++ return phi; ++} ++ ++static basic_block create_a_first_bb(void) ++{ ++ basic_block first_bb; ++ ++ first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR); ++ return first_bb; ++} ++ ++static tree cast_old_phi_arg(gimple oldstmt, tree size_overflow_type, tree arg, tree new_var, unsigned int i) ++{ ++ basic_block bb; ++ const_gimple newstmt; ++ gimple_stmt_iterator gsi; ++ bool before = BEFORE_STMT; ++ ++ if (TREE_CODE(arg) == SSA_NAME && gimple_code(get_def_stmt(arg)) != GIMPLE_NOP) { ++ gsi = gsi_for_stmt(get_def_stmt(arg)); ++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, AFTER_STMT); ++ return gimple_get_lhs(newstmt); ++ } ++ ++ bb = gimple_phi_arg_edge(oldstmt, i)->src; ++ gsi = gsi_after_labels(bb); ++ if (bb->index == 0) { ++ bb = create_a_first_bb(); ++ gsi = gsi_start_bb(bb); ++ } ++ newstmt = build_cast_stmt(size_overflow_type, arg, new_var, &gsi, before); ++ return gimple_get_lhs(newstmt); ++} ++ ++static const_gimple handle_new_phi_arg(const_tree arg, tree new_var, tree new_rhs) ++{ ++ gimple newstmt; ++ gimple_stmt_iterator gsi; ++ void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update); ++ gimple def_newstmt = get_def_stmt(new_rhs); ++ ++ gsi_insert = gsi_insert_after; ++ gsi = gsi_for_stmt(def_newstmt); ++ ++ switch (gimple_code(get_def_stmt(arg))) { ++ case GIMPLE_PHI: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ gsi = gsi_after_labels(gimple_bb(def_newstmt)); ++ gsi_insert = gsi_insert_before; ++ break; ++ case GIMPLE_ASM: ++ case GIMPLE_CALL: ++ newstmt = gimple_build_assign(new_var, new_rhs); ++ break; ++ case GIMPLE_ASSIGN: ++ newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt)); ++ break; ++ default: ++ /* unknown gimple_code (handle_build_new_phi_arg) */ ++ gcc_unreachable(); ++ } ++ ++ gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt)); ++ gsi_insert(&gsi, newstmt, GSI_NEW_STMT); ++ gimple_set_plf(newstmt, MY_STMT, true); ++ update_stmt(newstmt); ++ return newstmt; ++} ++ ++static tree build_new_phi_arg(struct pointer_set_t *visited, tree size_overflow_type, tree arg, tree new_var) ++{ ++ const_gimple newstmt; ++ gimple def_stmt; ++ tree new_rhs; ++ ++ new_rhs = expand(visited, arg); ++ if (new_rhs == NULL_TREE) ++ return NULL_TREE; ++ ++ def_stmt = get_def_stmt(new_rhs); ++ if (gimple_code(def_stmt) == GIMPLE_NOP) ++ return NULL_TREE; ++ new_rhs = cast_to_new_size_overflow_type(def_stmt, new_rhs, size_overflow_type, AFTER_STMT); ++ ++ newstmt = handle_new_phi_arg(arg, new_var, new_rhs); ++ return gimple_get_lhs(newstmt); ++} ++ ++static tree build_new_phi(struct pointer_set_t *visited, tree orig_result) ++{ ++ gimple phi, oldstmt = get_def_stmt(orig_result); ++ tree new_result, size_overflow_type; ++ unsigned int i; ++ unsigned int n = gimple_phi_num_args(oldstmt); ++ ++ size_overflow_type = get_size_overflow_type(oldstmt, orig_result); ++ ++ new_result = create_new_var(size_overflow_type); ++ ++ pointer_set_insert(visited, oldstmt); ++ phi = overflow_create_phi_node(oldstmt, new_result); ++ for (i = 0; i < n; i++) { ++ tree arg, lhs; ++ ++ arg = gimple_phi_arg_def(oldstmt, i); ++ if (is_gimple_constant(arg)) ++ arg = cast_a_tree(size_overflow_type, arg); ++ lhs = build_new_phi_arg(visited, size_overflow_type, arg, new_result); ++ if (lhs == NULL_TREE) ++ lhs = cast_old_phi_arg(oldstmt, size_overflow_type, arg, new_result, i); ++ add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt)); ++ } ++ ++ update_stmt(phi); ++ return gimple_phi_result(phi); ++} ++ ++static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs) ++{ ++ const_gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree origtype = TREE_TYPE(orig_rhs); ++ ++ gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN); ++ ++ assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT); ++ return gimple_get_lhs(assign); ++} ++ ++static void change_rhs1(gimple stmt, tree new_rhs1) ++{ ++ tree assign_rhs; ++ const_tree rhs = gimple_assign_rhs1(stmt); ++ ++ assign_rhs = change_assign_rhs(stmt, rhs, new_rhs1); ++ gimple_assign_set_rhs1(stmt, assign_rhs); ++ update_stmt(stmt); ++} ++ ++static bool check_mode_type(const_gimple stmt) ++{ ++ const_tree lhs = gimple_get_lhs(stmt); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ const_tree rhs_type = TREE_TYPE(gimple_assign_rhs1(stmt)); ++ enum machine_mode lhs_mode = TYPE_MODE(lhs_type); ++ enum machine_mode rhs_mode = TYPE_MODE(rhs_type); ++ ++ if (rhs_mode == lhs_mode && TYPE_UNSIGNED(rhs_type) == TYPE_UNSIGNED(lhs_type)) ++ return false; ++ ++ if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type))) ++ return false; ++ ++ return true; ++} ++ ++static bool check_undefined_integer_operation(const_gimple stmt) ++{ ++ const_gimple def_stmt; ++ const_tree lhs = gimple_get_lhs(stmt); ++ const_tree rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs1_type = TREE_TYPE(rhs1); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ ++ if (TYPE_MODE(rhs1_type) != TYPE_MODE(lhs_type) || TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type)) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs1); ++ if (gimple_code(def_stmt) != GIMPLE_ASSIGN) ++ return false; ++ ++ if (gimple_assign_rhs_code(def_stmt) != MINUS_EXPR) ++ return false; ++ return true; ++} ++ ++static bool is_a_cast_and_const_overflow(const_tree no_const_rhs) ++{ ++ const_tree rhs1, lhs, rhs1_type, lhs_type; ++ enum machine_mode lhs_mode, rhs_mode; ++ gimple def_stmt = get_def_stmt(no_const_rhs); ++ ++ if (!gimple_assign_cast_p(def_stmt)) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ lhs = gimple_get_lhs(def_stmt); ++ rhs1_type = TREE_TYPE(rhs1); ++ lhs_type = TREE_TYPE(lhs); ++ rhs_mode = TYPE_MODE(rhs1_type); ++ lhs_mode = TYPE_MODE(lhs_type); ++ if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode) ++ return false; ++ ++ return true; ++} ++ ++static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt) ++{ ++ tree size_overflow_type, lhs = gimple_get_lhs(stmt); ++ tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs1_type = TREE_TYPE(rhs1); ++ const_tree lhs_type = TREE_TYPE(lhs); ++ ++ new_rhs1 = expand(visited, rhs1); ++ ++ if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (gimple_plf(stmt, MY_STMT)) ++ return lhs; ++ ++ if (gimple_plf(stmt, NO_CAST_CHECK)) ++ return follow_overflow_type_and_dup(visited, stmt, rhs1, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) { ++ size_overflow_type = get_size_overflow_type(stmt, rhs1); ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT); ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ } ++ ++ if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt)) ++ return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE); ++ ++ size_overflow_type = get_size_overflow_type(stmt, rhs1); ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ ++ change_rhs1(stmt, new_rhs1); ++ check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT); ++ ++ rhs1 = gimple_assign_rhs1(stmt); ++ rhs1_type = TREE_TYPE(rhs1); ++ if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type)) ++ return create_assign(visited, stmt, rhs1, AFTER_STMT); ++ ++ if (!check_mode_type(stmt)) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ size_overflow_type = get_size_overflow_type(stmt, lhs); ++ new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT); ++ ++ check_size_overflow(stmt, size_overflow_type, new_rhs1, lhs, BEFORE_STMT); ++ ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++} ++ ++static tree handle_unary_ops(struct pointer_set_t *visited, tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ tree rhs1 = gimple_assign_rhs1(def_stmt); ++ ++ if (is_gimple_constant(rhs1)) ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ ++ gcc_assert(TREE_CODE(rhs1) != COND_EXPR); ++ switch (TREE_CODE(rhs1)) { ++ case SSA_NAME: ++ return handle_unary_rhs(visited, def_stmt); ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case ADDR_EXPR: ++ case COMPONENT_REF: ++ case INDIRECT_REF: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case PARM_DECL: ++ case TARGET_MEM_REF: ++ case VAR_DECL: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ ++ default: ++ debug_gimple_stmt(def_stmt); ++ debug_tree(rhs1); ++ gcc_unreachable(); ++ } ++} ++ ++static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value) ++{ ++ gimple cond_stmt; ++ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb); ++ ++ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE); ++ gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING); ++ update_stmt(cond_stmt); ++} ++ ++static tree create_string_param(tree string) ++{ ++ tree i_type, a_type; ++ const int length = TREE_STRING_LENGTH(string); ++ ++ gcc_assert(length > 0); ++ ++ i_type = build_index_type(build_int_cst(NULL_TREE, length - 1)); ++ a_type = build_array_type(char_type_node, i_type); ++ ++ TREE_TYPE(string) = a_type; ++ TREE_CONSTANT(string) = 1; ++ TREE_READONLY(string) = 1; ++ ++ return build1(ADDR_EXPR, ptr_type_node, string); ++} ++ ++static void insert_cond_result(basic_block bb_true, const_gimple stmt, const_tree arg, bool min) ++{ ++ gimple func_stmt; ++ const_gimple def_stmt; ++ const_tree loc_line; ++ tree loc_file, ssa_name, current_func; ++ expanded_location xloc; ++ char ssa_name_buf[256]; ++ gimple_stmt_iterator gsi = gsi_start_bb(bb_true); ++ ++ def_stmt = get_def_stmt(arg); ++ xloc = expand_location(gimple_location(def_stmt)); ++ ++ if (!gimple_has_location(def_stmt)) { ++ xloc = expand_location(gimple_location(stmt)); ++ if (!gimple_has_location(stmt)) ++ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl)); ++ } ++ ++ loc_line = build_int_cstu(unsigned_type_node, xloc.line); ++ ++ loc_file = build_string(strlen(xloc.file) + 1, xloc.file); ++ loc_file = create_string_param(loc_file); ++ ++ current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl)); ++ current_func = create_string_param(current_func); ++ ++ snprintf(ssa_name_buf, 256, "%s_%u (%s)\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max"); ++ ssa_name = build_string(256, ssa_name_buf); ++ ssa_name = create_string_param(ssa_name); ++ ++ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name) ++ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name); ++ ++ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING); ++} ++ ++static void __unused print_the_code_insertions(const_gimple stmt) ++{ ++ location_t loc = gimple_location(stmt); ++ ++ inform(loc, "Integer size_overflow check applied here."); ++} ++ ++static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min) ++{ ++ basic_block cond_bb, join_bb, bb_true; ++ edge e; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ ++ cond_bb = gimple_bb(stmt); ++ if (before) ++ gsi_prev(&gsi); ++ if (gsi_end_p(gsi)) ++ e = split_block_after_labels(cond_bb); ++ else ++ e = split_block(cond_bb, gsi_stmt(gsi)); ++ cond_bb = e->src; ++ join_bb = e->dest; ++ e->flags = EDGE_FALSE_VALUE; ++ e->probability = REG_BR_PROB_BASE; ++ ++ bb_true = create_empty_bb(cond_bb); ++ make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE); ++ make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE); ++ make_edge(bb_true, join_bb, EDGE_FALLTHRU); ++ ++ if (dom_info_available_p(CDI_DOMINATORS)) { ++ set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb); ++ set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb); ++ } ++ ++ if (current_loops != NULL) { ++ gcc_assert(cond_bb->loop_father == join_bb->loop_father); ++ add_bb_to_loop(bb_true, cond_bb->loop_father); ++ } ++ ++ insert_cond(cond_bb, arg, cond_code, type_value); ++ insert_cond_result(bb_true, stmt, arg, min); ++ ++// print_the_code_insertions(stmt); ++} ++ ++static void check_size_overflow(gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before) ++{ ++ const_tree rhs_type = TREE_TYPE(rhs); ++ tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min; ++ ++ gcc_assert(rhs_type != NULL_TREE); ++ if (TREE_CODE(rhs_type) == POINTER_TYPE) ++ return; ++ ++ gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE); ++ ++ type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type)); ++ type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type)); ++ ++ gcc_assert(!TREE_OVERFLOW(type_max)); ++ ++ cast_rhs_type = TREE_TYPE(cast_rhs); ++ type_max_type = TREE_TYPE(type_max); ++ type_min_type = TREE_TYPE(type_min); ++ gcc_assert(useless_type_conversion_p(cast_rhs_type, type_max_type)); ++ gcc_assert(useless_type_conversion_p(type_max_type, type_min_type)); ++ ++ insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max, before, false); ++ insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min, before, true); ++} ++ ++static tree get_size_overflow_type_for_intentional_overflow(gimple def_stmt, tree change_rhs) ++{ ++ gimple change_rhs_def_stmt; ++ tree lhs = gimple_get_lhs(def_stmt); ++ tree lhs_type = TREE_TYPE(lhs); ++ tree rhs1_type = TREE_TYPE(gimple_assign_rhs1(def_stmt)); ++ tree rhs2_type = TREE_TYPE(gimple_assign_rhs2(def_stmt)); ++ ++ if (change_rhs == NULL_TREE) ++ return get_size_overflow_type(def_stmt, lhs); ++ ++ change_rhs_def_stmt = get_def_stmt(change_rhs); ++ ++ if (TREE_CODE_CLASS(gimple_assign_rhs_code(def_stmt)) == tcc_comparison) ++ return get_size_overflow_type(change_rhs_def_stmt, change_rhs); ++ ++ if (gimple_assign_rhs_code(def_stmt) == LSHIFT_EXPR) ++ return get_size_overflow_type(change_rhs_def_stmt, change_rhs); ++ ++ if (gimple_assign_rhs_code(def_stmt) == RSHIFT_EXPR) ++ return get_size_overflow_type(change_rhs_def_stmt, change_rhs); ++ ++ if (!useless_type_conversion_p(lhs_type, rhs1_type) || !useless_type_conversion_p(rhs1_type, rhs2_type)) { ++ debug_gimple_stmt(def_stmt); ++ gcc_unreachable(); ++ } ++ ++ return get_size_overflow_type(def_stmt, lhs); ++} ++ ++static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs) ++{ ++ if (gimple_assign_rhs_code(stmt) == MIN_EXPR) ++ return false; ++ if (!is_gimple_constant(rhs)) ++ return false; ++ return true; ++} ++ ++static tree get_cast_def_stmt_rhs(const_tree new_rhs) ++{ ++ gimple def_stmt; ++ ++ def_stmt = get_def_stmt(new_rhs); ++ // get_size_overflow_type ++ if (LONG_TYPE_SIZE != GET_MODE_BITSIZE(SImode)) ++ gcc_assert(gimple_assign_cast_p(def_stmt)); ++ return gimple_assign_rhs1(def_stmt); ++} ++ ++static tree cast_to_int_TI_type_and_check(gimple stmt, tree new_rhs) ++{ ++ gimple_stmt_iterator gsi; ++ const_gimple cast_stmt; ++ gimple def_stmt; ++ enum machine_mode mode = TYPE_MODE(TREE_TYPE(new_rhs)); ++ ++ if (mode != TImode && mode != DImode) { ++ def_stmt = get_def_stmt(new_rhs); ++ gcc_assert(gimple_assign_cast_p(def_stmt)); ++ new_rhs = gimple_assign_rhs1(def_stmt); ++ mode = TYPE_MODE(TREE_TYPE(new_rhs)); ++ } ++ ++ gcc_assert(mode == TImode || mode == DImode); ++ ++ if (mode == TYPE_MODE(intTI_type_node) && useless_type_conversion_p(TREE_TYPE(new_rhs), intTI_type_node)) ++ return new_rhs; ++ ++ gsi = gsi_for_stmt(stmt); ++ cast_stmt = build_cast_stmt(intTI_type_node, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT); ++ new_rhs = gimple_get_lhs(cast_stmt); ++ ++ if (mode == DImode) ++ return new_rhs; ++ ++ check_size_overflow(stmt, intTI_type_node, new_rhs, new_rhs, BEFORE_STMT); ++ ++ return new_rhs; ++} ++ ++static bool is_an_integer_trunction(const_gimple stmt) ++{ ++ gimple rhs1_def_stmt, rhs2_def_stmt; ++ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1; ++ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode; ++ const_tree rhs1 = gimple_assign_rhs1(stmt); ++ const_tree rhs2 = gimple_assign_rhs2(stmt); ++ enum machine_mode rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1)); ++ enum machine_mode rhs2_mode = TYPE_MODE(TREE_TYPE(rhs2)); ++ ++ if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2)) ++ return false; ++ ++ gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME); ++ ++ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR || rhs1_mode != SImode || rhs2_mode != SImode) ++ return false; ++ ++ rhs1_def_stmt = get_def_stmt(rhs1); ++ rhs2_def_stmt = get_def_stmt(rhs2); ++ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt)) ++ return false; ++ ++ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt); ++ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt); ++ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1)); ++ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1)); ++ if (rhs1_def_stmt_rhs1_mode != DImode || rhs2_def_stmt_rhs1_mode != DImode) ++ return false; ++ ++ gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true); ++ gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true); ++ return true; ++} ++ ++static tree handle_integer_truncation(struct pointer_set_t *visited, const_tree lhs) ++{ ++ tree new_rhs1, new_rhs2; ++ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs; ++ tree new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type; ++ gimple assign, stmt = get_def_stmt(lhs); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ ++ if (!is_an_integer_trunction(stmt)) ++ return NULL_TREE; ++ ++ new_rhs1 = expand(visited, rhs1); ++ new_rhs2 = expand(visited, rhs2); ++ ++ new_rhs1_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs1); ++ new_rhs2_def_stmt_rhs1 = get_cast_def_stmt_rhs(new_rhs2); ++ ++ new_rhs1_def_stmt_rhs1_type = TREE_TYPE(new_rhs1_def_stmt_rhs1); ++ new_rhs2_def_stmt_rhs1_type = TREE_TYPE(new_rhs2_def_stmt_rhs1); ++ ++ if (!useless_type_conversion_p(new_rhs1_def_stmt_rhs1_type, new_rhs2_def_stmt_rhs1_type)) { ++ new_rhs1_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs1_def_stmt_rhs1); ++ new_rhs2_def_stmt_rhs1 = cast_to_int_TI_type_and_check(stmt, new_rhs2_def_stmt_rhs1); ++ } ++ ++ assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1); ++ new_lhs = gimple_get_lhs(assign); ++ check_size_overflow(assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT); ++ ++ return follow_overflow_type_and_dup(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs) ++{ ++ const_gimple def_stmt; ++ ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return false; ++ ++ if (gimple_assign_rhs_code(stmt) != PLUS_EXPR) ++ return false; ++ ++ def_stmt = get_def_stmt(rhs); ++ if (gimple_code(def_stmt) != GIMPLE_ASSIGN || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR) ++ return false; ++ ++ return true; ++} ++ ++static tree handle_intentional_overflow(struct pointer_set_t *visited, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs1, tree new_rhs2) ++{ ++ tree new_rhs, size_overflow_type, orig_rhs; ++ void (*gimple_assign_set_rhs)(gimple, tree); ++ tree rhs1 = gimple_assign_rhs1(stmt); ++ tree rhs2 = gimple_assign_rhs2(stmt); ++ tree lhs = gimple_get_lhs(stmt); ++ ++ if (change_rhs == NULL_TREE) ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++ ++ if (new_rhs2 == NULL_TREE) { ++ size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs1); ++ new_rhs2 = cast_a_tree(size_overflow_type, rhs2); ++ orig_rhs = rhs1; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs1; ++ } else { ++ size_overflow_type = get_size_overflow_type_for_intentional_overflow(stmt, new_rhs2); ++ new_rhs1 = cast_a_tree(size_overflow_type, rhs1); ++ orig_rhs = rhs2; ++ gimple_assign_set_rhs = &gimple_assign_set_rhs2; ++ } ++ ++ change_rhs = cast_to_new_size_overflow_type(stmt, change_rhs, size_overflow_type, BEFORE_STMT); ++ ++ if (check_overflow) ++ check_size_overflow(stmt, size_overflow_type, change_rhs, orig_rhs, BEFORE_STMT); ++ ++ new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs); ++ gimple_assign_set_rhs(stmt, new_rhs); ++ update_stmt(stmt); ++ ++ return create_assign(visited, stmt, lhs, AFTER_STMT); ++} ++ ++static tree handle_binary_ops(struct pointer_set_t *visited, tree lhs) ++{ ++ tree rhs1, rhs2, new_lhs; ++ gimple def_stmt = get_def_stmt(lhs); ++ tree new_rhs1 = NULL_TREE; ++ tree new_rhs2 = NULL_TREE; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ ++ /* no DImode/TImode division in the 32/64 bit kernel */ ++ switch (gimple_assign_rhs_code(def_stmt)) { ++ case RDIV_EXPR: ++ case TRUNC_DIV_EXPR: ++ case CEIL_DIV_EXPR: ++ case FLOOR_DIV_EXPR: ++ case ROUND_DIV_EXPR: ++ case TRUNC_MOD_EXPR: ++ case CEIL_MOD_EXPR: ++ case FLOOR_MOD_EXPR: ++ case ROUND_MOD_EXPR: ++ case EXACT_DIV_EXPR: ++ case POINTER_PLUS_EXPR: ++ case BIT_AND_EXPR: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ default: ++ break; ++ } ++ ++ new_lhs = handle_integer_truncation(visited, lhs); ++ if (new_lhs != NULL_TREE) ++ return new_lhs; ++ ++ if (TREE_CODE(rhs1) == SSA_NAME) ++ new_rhs1 = expand(visited, rhs1); ++ if (TREE_CODE(rhs2) == SSA_NAME) ++ new_rhs2 = expand(visited, rhs2); ++ ++ if (is_a_neg_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, true, def_stmt, new_rhs1, new_rhs1, NULL_TREE); ++ if (is_a_neg_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, true, def_stmt, new_rhs2, NULL_TREE, new_rhs2); ++ ++ if (is_a_constant_overflow(def_stmt, rhs2)) ++ return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, new_rhs1, NULL_TREE); ++ if (is_a_constant_overflow(def_stmt, rhs1)) ++ return handle_intentional_overflow(visited, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, NULL_TREE, new_rhs2); ++ ++ return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE); ++} ++ ++#if BUILDING_GCC_VERSION >= 4007 ++static tree get_new_rhs(struct pointer_set_t *visited, tree size_overflow_type, tree rhs) ++{ ++ if (is_gimple_constant(rhs)) ++ return cast_a_tree(size_overflow_type, rhs); ++ if (TREE_CODE(rhs) != SSA_NAME) ++ return NULL_TREE; ++ return expand(visited, rhs); ++} ++ ++static tree handle_ternary_ops(struct pointer_set_t *visited, tree lhs) ++{ ++ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type; ++ gimple def_stmt = get_def_stmt(lhs); ++ ++ size_overflow_type = get_size_overflow_type(def_stmt, lhs); ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs3 = gimple_assign_rhs3(def_stmt); ++ new_rhs1 = get_new_rhs(visited, size_overflow_type, rhs1); ++ new_rhs2 = get_new_rhs(visited, size_overflow_type, rhs2); ++ new_rhs3 = get_new_rhs(visited, size_overflow_type, rhs3); ++ ++ return follow_overflow_type_and_dup(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3); ++} ++#endif ++ ++static tree get_size_overflow_type(gimple stmt, const_tree node) ++{ ++ const_tree type; ++ ++ gcc_assert(node != NULL_TREE); ++ ++ type = TREE_TYPE(node); ++ ++ if (gimple_plf(stmt, MY_STMT)) ++ return TREE_TYPE(node); ++ ++ switch (TYPE_MODE(type)) { ++ case QImode: ++ return (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node; ++ case HImode: ++ return (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node; ++ case SImode: ++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node; ++ case DImode: ++ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) ++ return (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node; ++ return (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node; ++ default: ++ debug_tree((tree)node); ++ error("%s: unsupported gcc configuration.", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++static tree expand_visited(gimple def_stmt) ++{ ++ const_gimple next_stmt; ++ gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt); ++ ++ gsi_next(&gsi); ++ next_stmt = gsi_stmt(gsi); ++ ++ gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT)); ++ ++ switch (gimple_code(next_stmt)) { ++ case GIMPLE_ASSIGN: ++ return gimple_get_lhs(next_stmt); ++ case GIMPLE_PHI: ++ return gimple_phi_result(next_stmt); ++ case GIMPLE_CALL: ++ return gimple_call_lhs(next_stmt); ++ default: ++ return NULL_TREE; ++ } ++} ++ ++static tree expand(struct pointer_set_t *visited, tree lhs) ++{ ++ gimple def_stmt; ++ enum tree_code code = TREE_CODE(TREE_TYPE(lhs)); ++ ++ if (is_gimple_constant(lhs)) ++ return NULL_TREE; ++ ++ if (TREE_CODE(lhs) == ADDR_EXPR) ++ return NULL_TREE; ++ ++ if (code == REAL_TYPE) ++ return NULL_TREE; ++ ++ gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE); ++ ++ ++ def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt) ++ return NULL_TREE; ++ ++ if (gimple_plf(def_stmt, MY_STMT)) ++ return lhs; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return expand_visited(def_stmt); ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ return NULL_TREE; ++ case GIMPLE_PHI: ++ return build_new_phi(visited, lhs); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return create_assign(visited, def_stmt, lhs, AFTER_STMT); ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return handle_unary_ops(visited, lhs); ++ case 3: ++ return handle_binary_ops(visited, lhs); ++#if BUILDING_GCC_VERSION >= 4007 ++ case 4: ++ return handle_ternary_ops(visited, lhs); ++#endif ++ } ++ default: ++ debug_gimple_stmt(def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++static void change_function_arg(gimple stmt, const_tree origarg, unsigned int argnum, tree newarg) ++{ ++ const_gimple assign; ++ gimple_stmt_iterator gsi = gsi_for_stmt(stmt); ++ tree origtype = TREE_TYPE(origarg); ++ ++ gcc_assert(gimple_code(stmt) == GIMPLE_CALL); ++ ++ assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, &gsi, BEFORE_STMT); ++ ++ gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign)); ++ update_stmt(stmt); ++} ++ ++static bool get_function_arg(unsigned int* argnum, const_tree fndecl) ++{ ++ const char *origid; ++ tree arg; ++ const_tree origarg; ++ ++ if (!DECL_ABSTRACT_ORIGIN(fndecl)) ++ return true; ++ ++ origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl)); ++ while (origarg && *argnum) { ++ (*argnum)--; ++ origarg = TREE_CHAIN(origarg); ++ } ++ ++ gcc_assert(*argnum == 0); ++ ++ gcc_assert(origarg != NULL_TREE); ++ origid = NAME(origarg); ++ *argnum = 0; ++ for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) { ++ if (!strcmp(origid, NAME(arg))) ++ return true; ++ (*argnum)++; ++ } ++ return false; ++} ++ ++static bool skip_types(const_tree var) ++{ ++ switch (TREE_CODE(var)) { ++ case ADDR_EXPR: ++#if BUILDING_GCC_VERSION >= 4006 ++ case MEM_REF: ++#endif ++ case ARRAY_REF: ++ case BIT_FIELD_REF: ++ case INDIRECT_REF: ++ case TARGET_MEM_REF: ++ case VAR_DECL: ++ return true; ++ default: ++ break; ++ } ++ return false; ++} ++ ++static bool walk_phi(struct pointer_set_t *visited, const_tree result) ++{ ++ gimple phi = get_def_stmt(result); ++ unsigned int i, n = gimple_phi_num_args(phi); ++ ++ if (!phi) ++ return false; ++ ++ pointer_set_insert(visited, phi); ++ for (i = 0; i < n; i++) { ++ const_tree arg = gimple_phi_arg_def(phi, i); ++ if (pre_expand(visited, arg)) ++ return true; ++ } ++ return false; ++} ++ ++static bool walk_unary_ops(struct pointer_set_t *visited, const_tree lhs) ++{ ++ gimple def_stmt = get_def_stmt(lhs); ++ const_tree rhs; ++ ++ if (!def_stmt) ++ return false; ++ ++ rhs = gimple_assign_rhs1(def_stmt); ++ if (pre_expand(visited, rhs)) ++ return true; ++ return false; ++} ++ ++static bool walk_binary_ops(struct pointer_set_t *visited, const_tree lhs) ++{ ++ bool rhs1_found, rhs2_found; ++ gimple def_stmt = get_def_stmt(lhs); ++ const_tree rhs1, rhs2; ++ ++ if (!def_stmt) ++ return false; ++ ++ rhs1 = gimple_assign_rhs1(def_stmt); ++ rhs2 = gimple_assign_rhs2(def_stmt); ++ rhs1_found = pre_expand(visited, rhs1); ++ rhs2_found = pre_expand(visited, rhs2); ++ ++ return rhs1_found || rhs2_found; ++} ++ ++static const_tree search_field_decl(const_tree comp_ref) ++{ ++ const_tree field = NULL_TREE; ++ unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref); ++ ++ for (i = 0; i < len; i++) { ++ field = TREE_OPERAND(comp_ref, i); ++ if (TREE_CODE(field) == FIELD_DECL) ++ break; ++ } ++ gcc_assert(TREE_CODE(field) == FIELD_DECL); ++ return field; ++} ++ ++static enum marked mark_status(const_tree fndecl, unsigned int argnum) ++{ ++ const_tree attr, p; ++ ++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(fndecl)); ++ if (!attr || !TREE_VALUE(attr)) ++ return MARKED_NO; ++ ++ p = TREE_VALUE(attr); ++ if (!TREE_INT_CST_LOW(TREE_VALUE(p))) ++ return MARKED_NOT_INTENTIONAL; ++ ++ do { ++ if (argnum == TREE_INT_CST_LOW(TREE_VALUE(p))) ++ return MARKED_YES; ++ p = TREE_CHAIN(p); ++ } while (p); ++ ++ return MARKED_NO; ++} ++ ++static void print_missing_msg(tree func, unsigned int argnum) ++{ ++ unsigned int new_hash; ++ size_t len; ++ unsigned char tree_codes[CODES_LIMIT]; ++ location_t loc = DECL_SOURCE_LOCATION(func); ++ const char *curfunc = get_asm_name(func); ++ ++ len = get_function_decl(func, tree_codes); ++ new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0); ++ inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, new_hash); ++} ++ ++static unsigned int search_missing_attribute(const_tree arg) ++{ ++ const_tree type = TREE_TYPE(arg); ++ tree func = get_original_function_decl(current_function_decl); ++ unsigned int argnum; ++ const struct size_overflow_hash *hash; ++ ++ gcc_assert(TREE_CODE(arg) != COMPONENT_REF); ++ ++ if (TREE_CODE(type) == POINTER_TYPE) ++ return 0; ++ ++ argnum = find_arg_number(arg, func); ++ if (argnum == 0) ++ return 0; ++ ++ if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(func))) ++ return argnum; ++ ++ hash = get_function_hash(func); ++ if (!hash || !(hash->param & (1U << argnum))) { ++ print_missing_msg(func, argnum); ++ return 0; ++ } ++ return argnum; ++} ++ ++static bool is_already_marked(const_tree lhs) ++{ ++ unsigned int argnum; ++ const_tree fndecl; ++ ++ argnum = search_missing_attribute(lhs); ++ fndecl = get_original_function_decl(current_function_decl); ++ if (argnum && mark_status(fndecl, argnum) == MARKED_YES) ++ return true; ++ return false; ++} ++ ++static bool pre_expand(struct pointer_set_t *visited, const_tree lhs) ++{ ++ const_gimple def_stmt; ++ ++ if (is_gimple_constant(lhs)) ++ return false; ++ ++ if (skip_types(lhs)) ++ return false; ++ ++ if (TREE_CODE(lhs) == PARM_DECL) ++ return is_already_marked(lhs); ++ ++ if (TREE_CODE(lhs) == COMPONENT_REF) { ++ const_tree field, attr; ++ ++ field = search_field_decl(lhs); ++ attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(field)); ++ if (!attr || !TREE_VALUE(attr)) ++ return false; ++ return true; ++ } ++ ++ def_stmt = get_def_stmt(lhs); ++ ++ if (!def_stmt) ++ return false; ++ ++ if (pointer_set_contains(visited, def_stmt)) ++ return false; ++ ++ switch (gimple_code(def_stmt)) { ++ case GIMPLE_NOP: ++ if (TREE_CODE(SSA_NAME_VAR(lhs)) == PARM_DECL) ++ return is_already_marked(lhs); ++ return false; ++ case GIMPLE_PHI: ++ return walk_phi(visited, lhs); ++ case GIMPLE_CALL: ++ case GIMPLE_ASM: ++ return false; ++ case GIMPLE_ASSIGN: ++ switch (gimple_num_ops(def_stmt)) { ++ case 2: ++ return walk_unary_ops(visited, lhs); ++ case 3: ++ return walk_binary_ops(visited, lhs); ++ } ++ default: ++ debug_gimple_stmt((gimple)def_stmt); ++ error("%s: unknown gimple code", __func__); ++ gcc_unreachable(); ++ } ++} ++ ++static bool search_attributes(tree fndecl, const_tree arg, unsigned int argnum) ++{ ++ struct pointer_set_t *visited; ++ bool is_found; ++ enum marked is_marked; ++ location_t loc; ++ ++ visited = pointer_set_create(); ++ is_found = pre_expand(visited, arg); ++ pointer_set_destroy(visited); ++ ++ is_marked = mark_status(fndecl, argnum + 1); ++ if ((is_found && is_marked == MARKED_YES) || is_marked == MARKED_NOT_INTENTIONAL) ++ return true; ++ ++ if (is_found) { ++ loc = DECL_SOURCE_LOCATION(fndecl); ++ inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", get_asm_name(fndecl), argnum + 1); ++ return true; ++ } ++ return false; ++} ++ ++static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum) ++{ ++ struct pointer_set_t *visited; ++ tree arg, newarg; ++ bool match; ++ ++ match = get_function_arg(&argnum, fndecl); ++ if (!match) ++ return; ++ gcc_assert(gimple_call_num_args(stmt) > argnum); ++ arg = gimple_call_arg(stmt, argnum); ++ if (arg == NULL_TREE) ++ return; ++ ++ if (is_gimple_constant(arg)) ++ return; ++ ++ if (search_attributes(fndecl, arg, argnum)) ++ return; ++ ++ if (TREE_CODE(arg) != SSA_NAME) ++ return; ++ ++ check_arg_type(arg); ++ ++ visited = pointer_set_create(); ++ newarg = expand(visited, arg); ++ pointer_set_destroy(visited); ++ ++ if (newarg == NULL_TREE) ++ return; ++ ++ change_function_arg(stmt, arg, argnum, newarg); ++ ++ check_size_overflow(stmt, TREE_TYPE(newarg), newarg, arg, BEFORE_STMT); ++} ++ ++static void handle_function_by_attribute(gimple stmt, const_tree attr, tree fndecl) ++{ ++ tree p = TREE_VALUE(attr); ++ do { ++ handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1); ++ p = TREE_CHAIN(p); ++ } while (p); ++} ++ ++static void handle_function_by_hash(gimple stmt, tree fndecl) ++{ ++ tree orig_fndecl; ++ unsigned int num; ++ const struct size_overflow_hash *hash; ++ ++ orig_fndecl = get_original_function_decl(fndecl); ++ if (C_DECL_IMPLICIT(orig_fndecl)) ++ return; ++ hash = get_function_hash(orig_fndecl); ++ if (!hash) ++ return; ++ ++ for (num = 1; num <= MAX_PARAM; num++) ++ if (hash->param & (1U << num)) ++ handle_function_arg(stmt, fndecl, num - 1); ++} ++ ++static void set_plf_false(void) ++{ ++ basic_block bb; ++ ++ FOR_ALL_BB(bb) { ++ gimple_stmt_iterator si; ++ ++ for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si)) ++ gimple_set_plf(gsi_stmt(si), MY_STMT, false); ++ for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si)) ++ gimple_set_plf(gsi_stmt(si), MY_STMT, false); ++ } ++} ++ ++static unsigned int handle_function(void) ++{ ++ basic_block next, bb = ENTRY_BLOCK_PTR->next_bb; ++ ++ set_plf_false(); ++ ++ do { ++ gimple_stmt_iterator gsi; ++ next = bb->next_bb; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ tree fndecl, attr; ++ gimple stmt = gsi_stmt(gsi); ++ ++ if (!(is_gimple_call(stmt))) ++ continue; ++ fndecl = gimple_call_fndecl(stmt); ++ if (fndecl == NULL_TREE) ++ continue; ++ if (gimple_call_num_args(stmt) == 0) ++ continue; ++ attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl)); ++ if (!attr || !TREE_VALUE(attr)) ++ handle_function_by_hash(stmt, fndecl); ++ else ++ handle_function_by_attribute(stmt, attr, fndecl); ++ gsi = gsi_for_stmt(stmt); ++ next = gimple_bb(stmt)->next_bb; ++ } ++ bb = next; ++ } while (bb); ++ return 0; ++} ++ ++static struct gimple_opt_pass size_overflow_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "size_overflow", ++ .gate = NULL, ++ .execute = handle_function, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_cfg | PROP_referenced_vars, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow ++ } ++}; ++ ++static void start_unit_callback(void __unused *gcc_data, void __unused *user_data) ++{ ++ tree fntype; ++ ++ const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0)); ++ ++ // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var) ++ fntype = build_function_type_list(void_type_node, ++ const_char_ptr_type_node, ++ unsigned_type_node, ++ const_char_ptr_type_node, ++ const_char_ptr_type_node, ++ NULL_TREE); ++ report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype); ++ ++ DECL_ASSEMBLER_NAME(report_size_overflow_decl); ++ TREE_PUBLIC(report_size_overflow_decl) = 1; ++ DECL_EXTERNAL(report_size_overflow_decl) = 1; ++ DECL_ARTIFICIAL(report_size_overflow_decl) = 1; ++ TREE_THIS_VOLATILE(report_size_overflow_decl) = 1; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ int i; ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ bool enable = true; ++ ++ struct register_pass_info size_overflow_pass_info = { ++ .pass = &size_overflow_pass.pass, ++ .reference_pass_name = "ssa", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_AFTER ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "no-size-overflow")) { ++ enable = false; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info); ++ if (enable) { ++ register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info); ++ } ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL); ++ ++ return 0; ++} +--- tools/gcc/stackleak_plugin.c 1970-01-01 00:00:00.000000000 +0000 ++++ tools/gcc/stackleak_plugin.c 2012-10-15 17:30:59.835924531 +0000 +@@ -0,0 +1,313 @@ ++/* ++ * Copyright 2011 by the PaX Team ++ * Licensed under the GPL v2 ++ * ++ * Note: the choice of the license means that the compilation process is ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3, ++ * but for the kernel it doesn't matter since it doesn't link against ++ * any of the gcc libraries ++ * ++ * gcc plugin to help implement various PaX features ++ * ++ * - track lowest stack pointer ++ * ++ * TODO: ++ * - initialize all local variables ++ * ++ * BUGS: ++ * - none known ++ */ ++#include "gcc-plugin.h" ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "flags.h" ++#include "intl.h" ++#include "toplev.h" ++#include "plugin.h" ++//#include "expr.h" where are you... ++#include "diagnostic.h" ++#include "plugin-version.h" ++#include "tm.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "rtl.h" ++#include "emit-rtl.h" ++ ++extern void print_gimple_stmt(FILE *, gimple, int, int); ++ ++int plugin_is_GPL_compatible; ++ ++static int track_frame_size = -1; ++static const char track_function[] = "pax_track_stack"; ++static const char check_function[] = "pax_check_alloca"; ++static bool init_locals; ++ ++static struct plugin_info stackleak_plugin_info = { ++ .version = "201203140940", ++ .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n" ++// "initialize-locals\t\tforcibly initialize all stack frames\n" ++}; ++ ++static bool gate_stackleak_track_stack(void); ++static unsigned int execute_stackleak_tree_instrument(void); ++static unsigned int execute_stackleak_final(void); ++ ++static struct gimple_opt_pass stackleak_tree_instrument_pass = { ++ .pass = { ++ .type = GIMPLE_PASS, ++ .name = "stackleak_tree_instrument", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_tree_instrument, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = PROP_gimple_leh | PROP_cfg, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa ++ } ++}; ++ ++static struct rtl_opt_pass stackleak_final_rtl_opt_pass = { ++ .pass = { ++ .type = RTL_PASS, ++ .name = "stackleak_final", ++ .gate = gate_stackleak_track_stack, ++ .execute = execute_stackleak_final, ++ .sub = NULL, ++ .next = NULL, ++ .static_pass_number = 0, ++ .tv_id = TV_NONE, ++ .properties_required = 0, ++ .properties_provided = 0, ++ .properties_destroyed = 0, ++ .todo_flags_start = 0, ++ .todo_flags_finish = TODO_dump_func ++ } ++}; ++ ++static bool gate_stackleak_track_stack(void) ++{ ++ return track_frame_size >= 0; ++} ++ ++static void stackleak_check_alloca(gimple_stmt_iterator *gsi) ++{ ++ gimple check_alloca; ++ tree fntype, fndecl, alloca_size; ++ ++ fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE); ++ fndecl = build_fn_decl(check_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_check_alloca(unsigned long size) ++ alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0); ++ check_alloca = gimple_build_call(fndecl, 1, alloca_size); ++ gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT); ++} ++ ++static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi) ++{ ++ gimple track_stack; ++ tree fntype, fndecl; ++ ++ fntype = build_function_type_list(void_type_node, NULL_TREE); ++ fndecl = build_fn_decl(track_function, fntype); ++ DECL_ASSEMBLER_NAME(fndecl); // for LTO ++ ++ // insert call to void pax_track_stack(void) ++ track_stack = gimple_build_call(fndecl, 0); ++ gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING); ++} ++ ++#if BUILDING_GCC_VERSION == 4005 ++static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code) ++{ ++ tree fndecl; ++ ++ if (!is_gimple_call(stmt)) ++ return false; ++ fndecl = gimple_call_fndecl(stmt); ++ if (!fndecl) ++ return false; ++ if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL) ++ return false; ++// print_node(stderr, "pax", fndecl, 4); ++ return DECL_FUNCTION_CODE(fndecl) == code; ++} ++#endif ++ ++static bool is_alloca(gimple stmt) ++{ ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA)) ++ return true; ++ ++#if BUILDING_GCC_VERSION >= 4007 ++ if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ return true; ++#endif ++ ++ return false; ++} ++ ++static unsigned int execute_stackleak_tree_instrument(void) ++{ ++ basic_block bb, entry_bb; ++ bool prologue_instrumented = false, is_leaf = true; ++ ++ entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb; ++ ++ // 1. loop through BBs and GIMPLE statements ++ FOR_EACH_BB(bb) { ++ gimple_stmt_iterator gsi; ++ ++ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { ++ gimple stmt; ++ ++ stmt = gsi_stmt(gsi); ++ ++ if (is_gimple_call(stmt)) ++ is_leaf = false; ++ ++ // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes ++ if (!is_alloca(stmt)) ++ continue; ++ ++ // 2. insert stack overflow check before each __builtin_alloca call ++ stackleak_check_alloca(&gsi); ++ ++ // 3. insert track call after each __builtin_alloca call ++ stackleak_add_instrumentation(&gsi); ++ if (bb == entry_bb) ++ prologue_instrumented = true; ++ } ++ } ++ ++ // special cases for some bad linux code: taking the address of static inline functions will materialize them ++ // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI ++ // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI. ++ // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here. ++ if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl)) ++ return 0; ++ if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10)) ++ return 0; ++ ++ // 4. insert track call at the beginning ++ if (!prologue_instrumented) { ++ gimple_stmt_iterator gsi; ++ ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest; ++ if (dom_info_available_p(CDI_DOMINATORS)) ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR); ++ gsi = gsi_start_bb(bb); ++ stackleak_add_instrumentation(&gsi); ++ } ++ ++ return 0; ++} ++ ++static unsigned int execute_stackleak_final(void) ++{ ++ rtx insn; ++ ++ if (cfun->calls_alloca) ++ return 0; ++ ++ // keep calls only if function frame is big enough ++ if (get_frame_size() >= track_frame_size) ++ return 0; ++ ++ // 1. find pax_track_stack calls ++ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { ++ // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] ) [0 S1 A8]) (4)) -1 (nil) (nil)) ++ rtx body; ++ ++ if (!CALL_P(insn)) ++ continue; ++ body = PATTERN(insn); ++ if (GET_CODE(body) != CALL) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != MEM) ++ continue; ++ body = XEXP(body, 0); ++ if (GET_CODE(body) != SYMBOL_REF) ++ continue; ++ if (strcmp(XSTR(body, 0), track_function)) ++ continue; ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ // 2. delete call ++ insn = delete_insn_and_edges(insn); ++#if BUILDING_GCC_VERSION >= 4007 ++ if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION) ++ insn = delete_insn_and_edges(insn); ++#endif ++ } ++ ++// print_simple_rtl(stderr, get_insns()); ++// print_rtl(stderr, get_insns()); ++// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size); ++ ++ return 0; ++} ++ ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) ++{ ++ const char * const plugin_name = plugin_info->base_name; ++ const int argc = plugin_info->argc; ++ const struct plugin_argument * const argv = plugin_info->argv; ++ int i; ++ struct register_pass_info stackleak_tree_instrument_pass_info = { ++ .pass = &stackleak_tree_instrument_pass.pass, ++// .reference_pass_name = "tree_profile", ++ .reference_pass_name = "optimized", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ struct register_pass_info stackleak_final_pass_info = { ++ .pass = &stackleak_final_rtl_opt_pass.pass, ++ .reference_pass_name = "final", ++ .ref_pass_instance_number = 1, ++ .pos_op = PASS_POS_INSERT_BEFORE ++ }; ++ ++ if (!plugin_default_version_check(version, &gcc_version)) { ++ error(G_("incompatible gcc/plugin versions")); ++ return 1; ++ } ++ ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info); ++ ++ for (i = 0; i < argc; ++i) { ++ if (!strcmp(argv[i].key, "track-lowest-sp")) { ++ if (!argv[i].value) { ++ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ continue; ++ } ++ track_frame_size = atoi(argv[i].value); ++ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0) ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ if (!strcmp(argv[i].key, "initialize-locals")) { ++ if (argv[i].value) { ++ error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); ++ continue; ++ } ++ init_locals = true; ++ continue; ++ } ++ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); ++ } ++ ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info); ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info); ++ ++ return 0; ++} +--- include/net/bluetooth/bluetooth.h ++++ include/net/bluetooth/bluetooth.h +@@ -207,7 +207,7 @@ + struct file_operations fops; + int (* custom_seq_show)(struct seq_file *, void *); + #endif +-}; ++} __no_const; + + int bt_sock_register(int proto, const struct net_proto_family *ops); + int bt_sock_unregister(int proto); +--- drivers/gpu/drm/i915/i915_drv.h ++++ drivers/gpu/drm/i915/i915_drv.h +@@ -274,12 +274,12 @@ + /* render clock increase/decrease */ + /* display clock increase/decrease */ + /* pll clock increase/decrease */ +-}; ++} __no_const; + + struct drm_i915_gt_funcs { + void (*force_wake_get)(struct drm_i915_private *dev_priv); + void (*force_wake_put)(struct drm_i915_private *dev_priv); +-}; ++} __no_const; + + #define DEV_INFO_FLAGS \ + DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ diff --git a/sys-kernel/compat-drivers/files/compat-drivers-3.8-ath6kl.patch b/sys-kernel/compat-drivers/files/compat-drivers-3.8-ath6kl.patch new file mode 100644 index 00000000..8bec2867 --- /dev/null +++ b/sys-kernel/compat-drivers/files/compat-drivers-3.8-ath6kl.patch @@ -0,0 +1,37 @@ +Fixes for: +drivers/net/wireless/ath/ath6kl/sdio.c: In function ‘ath6kl_sdio_alloc_prep_scat_req’: +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the buf_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +drivers/net/wireless/ath/ath6kl/sdio.c:1478:1: warning: find_arg_number: cannot find the sg_sz argument in ath6kl_sdio_alloc_prep_scat_req [enabled by default] +--- ./drivers/net/wireless/ath/ath6kl/sdio.c ++++ ./drivers/net/wireless/ath/ath6kl/sdio.c +@@ -341,11 +341,14 @@ + scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); + scat_req_sz = sizeof(*s_req) + scat_list_sz; + +- if (!virt_scat) +- sg_sz = sizeof(struct scatterlist) * n_scat_entry; +- else +- buf_sz = 2 * L1_CACHE_BYTES + +- ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; ++ if (!virt_scat) { ++ sg_sz = sizeof(struct scatterlist) * n_scat_entry; ++ buf_sz = 0; ++ } else { ++ sg_sz = 0; ++ buf_sz = 2 * L1_CACHE_BYTES + ++ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; ++ } + + for (i = 0; i < n_scat_req; i++) { + /* allocate the scatter request */ +--- ./drivers/gpu/drm/i915/intel_display.c ++++ ./drivers/gpu/drm/i915/intel_display.c +@@ -7110,7 +7110,7 @@ + obj = work->old_fb_obj; + + atomic_clear_mask(1 << intel_crtc->plane, +- &obj->pending_flip.counter); ++ &obj->pending_flip); + wake_up(&dev_priv->pending_flip_queue); + + queue_work(dev_priv->wq, &work->work); diff --git a/sys-kernel/compat-drivers/files/compat-drivers-3.8-bt_tty.patch b/sys-kernel/compat-drivers/files/compat-drivers-3.8-bt_tty.patch new file mode 100644 index 00000000..dd299121 --- /dev/null +++ b/sys-kernel/compat-drivers/files/compat-drivers-3.8-bt_tty.patch @@ -0,0 +1,37 @@ +--- compat-drivers-3.8-rc7-1-u.orig/net/bluetooth/rfcomm/tty.c 2013-02-11 00:31:59.000000000 +0100 ++++ compat-drivers-3.8-rc7-1-u/net/bluetooth/rfcomm/tty.c 2013-02-13 12:39:58.983001215 +0100 +@@ -309,7 +309,7 @@ + BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (dev->port.count > 0) { ++ if (atomic_read(&dev->port.count) > 0) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return; + } +@@ -664,10 +664,10 @@ + return -ENODEV; + + BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, +- dev->channel, dev->port.count); ++ dev->channel, atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (++dev->port.count > 1) { ++ if (atomic_inc_return(&dev->port.count) > 1) { + spin_unlock_irqrestore(&dev->port.lock, flags); + return 0; + } +@@ -736,10 +736,10 @@ + return; + + BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, +- dev->port.count); ++ atomic_read(&dev->port.count)); + + spin_lock_irqsave(&dev->port.lock, flags); +- if (!--dev->port.count) { ++ if (!atomic_dec_return(&dev->port.count)) { + spin_unlock_irqrestore(&dev->port.lock, flags); + if (dev->tty_dev->parent) + #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29)) diff --git a/sys-kernel/compat-drivers/files/compat-drivers-3.8-driver-select b/sys-kernel/compat-drivers/files/compat-drivers-3.8-driver-select new file mode 100755 index 00000000..bafaf352 --- /dev/null +++ b/sys-kernel/compat-drivers/files/compat-drivers-3.8-driver-select @@ -0,0 +1,845 @@ +#!/usr/bin/env bash +# Copyright 2009 Luis R. Rodriguez +# +# This script allows you to select your compat-drivers driver and +# reduce compilation time. + +# Heavily modified by Stefan Kuhn +# Configures compat-drivers for multiple drivers at once +# Suited for package managers + +# This internal variable contains a list of all 'Makefile's +CPD_MAKEFILES=" + MAKEFILE + COMPAT_CONFIG_CW + DRIVERS_MAKEFILE + ATH_MAKEFILE + ATH9K_MAKEFILE + BRCM80211_MAKEFILE + RT2X00_MAKEFILE + TI_MAKEFILE + NET_WIRELESS_MAKEFILE + EEPROM_MAKEFILE + DRIVERS_NET_ATHEROS + DRIVERS_NET_BROADCOM + DRIVERS_NET_USB_MAKEFILE + SSB_MAKEFILE + BCMA_MAKEFILE" + +# This internal variable contains an array with paths to all files +CPD_MAKEFILES_ARRAY=( + MAKEFILE=Makefile + COMPAT_CONFIG_CW=config.mk + DRIVERS_MAKEFILE=drivers/net/wireless/Makefile + ATH_MAKEFILE=drivers/net/wireless/ath/Makefile + ATH9K_MAKEFILE=drivers/net/wireless/ath/ath9k/Makefile + BRCM80211_MAKEFILE=drivers/net/wireless/brcm80211/Makefile + RT2X00_MAKEFILE=drivers/net/wireless/rt2x00/Makefile + TI_MAKEFILE=drivers/net/wireless/ti/Makefile + NET_WIRELESS_MAKEFILE=net/wireless/Makefile + EEPROM_MAKEFILE=drivers/misc/eeprom/Makefile + DRIVERS_NET_ATHEROS=drivers/net/ethernet/atheros/Makefile + DRIVERS_NET_BROADCOM=drivers/net/ethernet/broadcom/Makefile + DRIVERS_NET_USB_MAKEFILE=drivers/net/usb/Makefile + SSB_MAKEFILE=drivers/ssb/Makefile + BCMA_MAKEFILE=drivers/bcma/Makefile +) + +# This internal variable holds modules to be added to the atheros Makefile +CPD_ADD_ATHEROS="" + +# This internal variable controls the execution phase (and write protection) +# phases: 0=start, 1=configure, 2=write, 3=restore +# no file should be touched below phase 2 +CPD_PHASE=0 + +# CPD_MODULE +# This internal variable contains a temporary value, the currently processed +# argument + +# CPD_DISABLE_${CPD_MODULE} +# These internal variables contains the 'disable-actions' of the # currently +# processed argument + +# This internal variable stores selected drivers (and groups) +CPD_SELECTED_DRIVERS='' + +function die { + echo "$1" 1>&2 + exit 1 +} + +function check_phase { + [ ${CPD_PHASE} -lt ${1} ] && \ + die "Current phase ${CPD_PHASE} lower then ${1}. Check failed" +} + +# This internal function returns the path to a file from CPD_MAKEFILES_ARRAY +function get_makefile { + local file + for file in "${CPD_MAKEFILES_ARRAY[@]}"; do + if [ "${file%%=*}" = "${1}" ]; then + echo "${file#*=}" + return 0 + fi + done + die "File ${1} not found" +} + + +# used to backup files from foo to foo.${BACKUP_EXT} +BACKUP_EXT="bk" + +# Pretty colors +GREEN="\033[01;32m" +YELLOW="\033[01;33m" +NORMAL="\033[00m" +BLUE="\033[34m" +RED="\033[31m" +PURPLE="\033[35m" +CYAN="\033[36m" +UNDERLINE="\033[02m" + +# this internal function disables colors +function unset_colors { + GREEN= + YELLOW= + NORMAL= + BLUE= + RED= + PURPLE= + CYAN= + UNDERLINE= +} + +SUPPORTED_80211_DRIVERS="ath5k ath9k ath9k_ap ath9k_htc carl9170 ath6kl wil6210 b43 zd1211rw rt2x00 wl1251 wl12xx brcmsmac brcmfmac" + +# b43 needs some more work for driver-select, the SSB stuff, plus +# what if you update b44 but not b43? It will bust. +SUPPORTED_ETH_DRIVERS="atl1 atl2 atl1e atl1c alx" + +SUPPORTED_DRM_DRIVERS="i915" + +SUPPORTED_DRIVERS="${SUPPORTED_80211_DRIVERS} ${SUPPORTED_ETH_DRIVERS} ${SUPPORTED_DRM_DRIVERS}" + +function usage { + echo -e "${GREEN}Usage${NORMAL}: ${BOLD}$0${NORMAL} [${PURPLE}-q${NORMAL}] [ ${PURPLE}${NORMAL} | ${CYAN}${NORMAL} | ${GREEN}restore${NORMAL} ]" + + # These should match the switch below. + echo -e "Supported 802.11 drivers:" + local i + for i in $SUPPORTED_80211_DRIVERS; do + echo -e "\t${PURPLE}${i}${NORMAL}" + done + + echo + echo -e "Supported Ethernet drivers:" + for i in $SUPPORTED_ETH_DRIVERS; do + echo -e "\t${PURPLE}${i}${NORMAL}" + done + + echo -e "Supported DRM drivers:" + for i in $SUPPORTED_DRM_DRIVERS; do + echo -e "\t${PURPLE}${i}${NORMAL}" + done + + # These should match the switch below. + echo -e "\nSupported group drivers:" + echo -e "\t${CYAN}atheros${NORMAL} < ${PURPLE} ath5k ath9k carl9170 zd1211rw ath6kl wil6210${NORMAL}>" + echo -e "\t${CYAN}ath${NORMAL} < ${PURPLE} ath5k ath9k carl9170 ath6kl wil6210${NORMAL}>" + echo -e "\t${CYAN}brcm80211${NORMAL} < ${PURPLE} brcmsmac brcmfmac ${NORMAL}>" + echo -e "\t${CYAN}intel${NORMAL} < ${PURPLE} iwlwifi, iwlegacy ${NORMAL}>" + echo -e "\t${CYAN}rtl818x${NORMAL} < ${PURPLE} rtl8180 rtl8187 ${NORMAL}>" + echo -e "\t${CYAN}rtlwifi${NORMAL} < ${PURPLE} rtl8192ce ${NORMAL}>" + echo -e "\t${CYAN}ti${NORMAL} < ${PURPLE} wl1251 wl12xx (SPI and SDIO)${NORMAL}>" + + echo -e "\nSupported group drivers: Bluetooth & Ethernet:" + echo -e "\t${CYAN}atlxx${NORMAL} < ${PURPLE} atl1 atl2 atl1e alx${NORMAL}>" + echo -e "\t${CYAN}bt${NORMAL} < ${PURPLE} Linux bluetooth drivers ${NORMAL}>" + + echo -e "\nSupported group drivers: DRM:" + echo -e "\t${CYAN}drm${NORMAL} < ${PURPLE} i915${NORMAL}>" + + echo + echo -e "Restoring compat-drivers:" + echo -e "\t${GREEN}restore${NORMAL}: you can use this option to restore compat-drivers to the original state" + + echo + echo -e "Options:" + echo -e "\t${PURPLE}-q${NORMAL}:\tDisables colored output" +} + +function backup_file { + check_phase 2 + if [ -f $1.${BACKUP_EXT} ]; then + echo -e "Backup exists: ${CYAN}${1}.${BACKUP_EXT}${NORMAL}" + return + fi + echo -e "Backing up makefile: ${CYAN}${1}.${BACKUP_EXT}${NORMAL}" + cp "${1}" "${1}.${BACKUP_EXT}" || die +} + +# This internal function registers a 'disable' action for a module. +# It writes to a variable CPD_DISABLE_${CPD_MODULE} +function disable { + check_phase 1 + eval "CPD_DISABLE_${CPD_MODULE}+=\" ${*}\"" || die +} + +# This internal function clears a Makefile completely. +function disable_makefile +{ + check_phase 2 + backup_file $1 + echo > $1 +} + +function select_drivers_from_makefile +{ + check_phase 2 + local MAKEFILE=$(get_makefile "$1") + shift + backup_file $MAKEFILE + local CONFIGS="" + local i + for i in $@; do + if [[ "$CONFIGS" = "" ]]; then + CONFIGS="$i" + else + CONFIGS="${CONFIGS}|$i" + fi + done + egrep "$CONFIGS" $MAKEFILE > ${MAKEFILE}.tmp + mv ${MAKEFILE}.tmp ${MAKEFILE} +} + +# This internal function registers filters for the drivers Makefile +function select_drivers { + check_phase 1 + eval "CPD_DRIVERS_MAKEFILE+=\" ${*}\"" || die +} + +# This internal function disables "lib80211" +function disable_lib80211 +{ + check_phase 2 + backup_file "$(get_makefile NET_WIRELESS_MAKEFILE)" + # perl -i -ne 'print if ! /LIB80211/ ' $NET_WIRELESS_MAKEFILE + sed -i '/LIB80211/d' "$(get_makefile NET_WIRELESS_MAKEFILE)" || die +} + +# This internal function disables "b44" +function disable_b44 { + check_phase 2 + backup_file "$(get_makefile DRIVERS_NET_BROADCOM)" + # perl -i -ne 'print if ! /CONFIG_B44/ ' $DRIVERS_NET_BROADCOM + sed -i '/CONFIG_B44/d' "$(get_makefile DRIVERS_NET_BROADCOM)" || die +} + +# This internal function disables "ssb" +function disable_ssb +{ + check_phase 2 + disable_makefile "$(get_makefile ${SSB_MAKEFILE})" + # perl -i -ne 'print if ! /drivers\/ssb\//' Makefile + sed -i '/drivers\/ssb\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "bcma" +function disable_bcma +{ + check_phase 2 + disable_makefile "$(get_makefile ${BCMA_MAKEFILE})" + # perl -i -ne 'print if ! /drivers\/bcma\//' Makefile + sed -i '/drivers\/bcma\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "rfkill" +function disable_rfkill +{ + check_phase 2 + backup_file "$(get_makefile MAKEFILE)" + # perl -i -ne 'print if ! /CONFIG_COMPAT_RFKILL/' Makefile + sed -i '/CONFIG_COMPAT_RFKILL/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "eprom" +function disable_eeprom +{ + check_phase 2 + disable_makefile "$(get_makefile ${EEPROM_MAKEFILE})" || die + # perl -i -ne 'print if ! /drivers\/misc\/eeprom\//' Makefile + sed -i '/drivers\/misc\/eeprom\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "usbnet" +# TODO: this function is twice in driver-select script!?! Why? +function disable_usbnet +{ + check_phase 2 + disable_makefile ${DRIVERS_NET_USB_MAKEFILE} || die + # perl -i -ne 'print if ! /drivers\/net\/usb\//' Makefile + sed -i '/drivers\/net\/usb\//d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "usbnet" +# TODO: this function is twice in driver-select script!?! Why? +function disable_usbnet { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_NET_USB_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_NET_USB_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "ethernet" +function disable_ethernet { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_NETWORK_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_NETWORK_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "var_03" +function disable_var_03 { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_VAR_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_VAR_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "bt" +function disable_bt { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_BLUETOOTH/' Makefile + sed -i '/CONFIG_COMPAT_BLUETOOTH/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "80211" +function disable_80211 { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_WIRELESS/' Makefile + sed -i '/CONFIG_COMPAT_WIRELESS/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "drm" +function disable_drm { + check_phase 2 + # perl -i -ne 'print if ! /CONFIG_COMPAT_VIDEO_MODULES/' Makefile + sed -i '/CONFIG_COMPAT_VIDEO_MODULES/d' "$(get_makefile MAKEFILE)" || die +} + +function disable_bt_usb_ethernet { + check_phase 1 + # backup_file Makefile + disable usbnet + disable ethernet + disable bt + disable update-initramfs + disable drm +} + +function disable_bt_usb_ethernet_var { + check_phase 1 + # backup_file Makefile + disable bt_usb_ethernet + disable var_03 +} + +function enable_only_ethernet { + check_phase 1 + # backup_file Makefile + # backup_file $DRIVERS_NET_BROADCOM + # backup_file $DRIVERS_NET_ATHEROS + disable staging + disable usbnet + disable var_03 + disable bt + disable drm + # rfkill may be needed if you enable b44 as you may have b43 + disable rfkill + disable 80211 +} + +function disable_var { + check_phase 1 + disable ssb + disable bcma + disable usbnet + disable eeprom + disable update-initramfs +} + +function disable_var_01 { + check_phase 1 + disable lib80211 + disable var +} + +function disable_var_02 { + check_phase 1 + #var_01 with eeprom not disabled + disable lib80211 + disable ssb + disable bcma + disable usbnet + disable update-initramfs +} + +# This internal function disables "staging" +function disable_staging { + check_phase 2 + backup_file "$(get_makefile MAKEFILE)" + # perl -i -ne 'print if ! /CONFIG_COMPAT_STAGING/ ' Makefile + sed -i '/CONFIG_COMPAT_STAGING/d' "$(get_makefile MAKEFILE)" || die +} + +# This internal function disables "update-initramfs" +function disable_update-initramfs +{ + check_phase 2 + backup_file "$(get_makefile MAKEFILE)" + # perl -i -ne 'print if ! /update-initramfs/' Makefile + sed -i '/update-initramfs/d' "$(get_makefile MAKEFILE)" || die +} + +function enable_only_drm { + check_phase 1 + # backup_file Makefile + disable ethernet + disable staging + disable usbnet + disable var_03 + disable bt + # rfkill may be needed if you enable b44 as you may have b43 + disable rfkill + disable 80211 +} + +# This internal function registers filters for the ath Makefile +function select_ath_driver +{ + check_phase 1 + # backup_file $ATH_MAKEFILE + # perl -i -ne 'print if /'$1'/ || /CONFIG_ATH_/ || /ath-objs/ || /regd.o/ || /hw.o/ || /key.o/' $ATH_MAKEFILE + eval "CPD_ATH_MAKEFILE+=\" ${*} CONFIG_ATH_ ath-objs regd.o hw.o key.o\"" || die + disable var_01 +} + +# This internal function registers no-common filters for the ath Makefile +function select_ath_no_common +{ + check_phase 1 + # backup_file $ATH_MAKEFILE + # perl -i -ne 'print if /'$1'/' $ATH_MAKEFILE + eval "CPD_ATH_MAKEFILE+=\" ${*}\"" || die + disable var_01 +} + +function select_ath9k_driver +{ + check_phase 1 + select_ath_driver CONFIG_ATH9K_HW + # In the future here we'll add stuff to disable ath9k_htc +} + +function select_ath9k_driver_ap +{ + check_phase 1 + select_ath9k_driver + # backup_file $COMPAT_CONFIG_CW + # perl -i -ne 'print if ! /CONFIG_COMPAT_ATH9K_RATE_CONTROL/ ' $COMPAT_CONFIG_CW + # this does not work with multipe drivers, since it's the only filter to that file + # It is only applied when only the ath9k_ap driver is selected and nothing else + # eval "CPD_COMPAT_CONFIG_CW+=\" CONFIG_COMPAT_ATH9K_RATE_CONTROL\"" || die +} + +# This internal function registers filters for the ti Makefile +function select_ti_drivers +{ + check_phase 1 + select_drivers CONFIG_WL_TI + # select_drivers_from_makefile $TI_MAKEFILE $@ + eval "CPD_TI_MAKEFILE+=\" ${*}\"" || die +} + +# This internal function registers filters for the brcm80211 Makefile +function select_brcm80211_driver +{ + check_phase 1 + # backup_file $BRCM80211_MAKEFILE + # perl -i -ne 'print if /'$1'/ || /CONFIG_BRCMUTIL/ ' $BRCM80211_MAKEFILE + eval "CPD_BRCM80211_MAKEFILE+=\" ${*} CONFIG_BRCMUTIL\"" || die +} + +function restore_file { + check_phase 3 + local ORIG="${1%%.${BACKUP_EXT}}" || die + cp $1 $ORIG || die + rm -f $1 || die + echo -e "Restored makefile: ${CYAN}${ORIG}${NORMAL} (and removed backup)" +} + +function restore_compat { + check_phase 3 + local FILES=$(find ./ -type f -name *\."${BACKUP_EXT}") || die + local i + for i in $FILES; do + restore_file $i + done +} + +if [ ! -f .compat_version ]; then + die "Must run $0 from the compat-drivers top level directory" +fi + +# set phase to configure +CPD_PHASE=1 + +# loop over all arguments +# This sets the configuration for each flag/module +for arg in "$@"; do + # clear/set global vars + CPD_MODULE="$arg" + CPD_SELECTED_DRIVERS+=" $arg" + case "$arg" in + restore) + CPD_PHASE=3 + restore_compat + exit 0 + ;; + usage) + usage + exit 0 + ;; + -q) + unset_colors || die + CPD_SELECTED_DRIVERS="${CPD_SELECTED_DRIVERS% -q}" + ;; + # Group drivers + atheros) + select_drivers CONFIG_ATH_CARDS \ + CONFIG_COMPAT_ZD1211RW + disable staging + disable_bt_usb_ethernet_var + disable var_01 + ;; + ath) + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + disable var_01 + ;; + intel) + select_drivers CONFIG_IWLWIFI \ + CONFIG_IWLEGACY \ + CONFIG_IPW + disable staging + disable var + disable bt + disable ethernet + disable usbnet + ;; + iwlwifi) + select_drivers CONFIG_IWLWIFI + disable staging + disable var_01 + disable bt + disable ethernet + disable usbnet + ;; + iwlegacy) + select_drivers CONFIG_IWLEGACY + disable staging + disable var_01 + disable bt + disable ethernet + disable usbnet + ;; + rtl818x) + select_drivers CONFIG_RTL8180 CONFIG_RTL8187 + disable staging + disable bt_usb_ethernet + disable ssb + disable bcma + disable lib80211 + ;; + rtlwifi) + select_drivers CONFIG_RTL8192CE CONFIG_RTLWIFI + disable staging + disable_bt_usb_ethernet_var + disable lib80211 + ;; + ti) + select_drivers CONFIG_WL_TI + disable_bt_usb_ethernet_var + disable staging + disable var_01 + ;; + brcm80211) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_BRCMUTIL \ + CONFIG_BRCMFMAC \ + CONFIG_BRCMSMAC + ;; + # Singular modules + ath5k) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_driver CONFIG_ATH5K + #patch -p1 < enable-older-kernels/enable-2.6.23.patch + ;; + ath9k) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath9k_driver + ;; + ath9k_ap) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath9k_driver_ap + ;; + carl9170) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_driver CONFIG_CARL9170 + ;; + ath9k_htc) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath9k_driver + ;; + ath6kl) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_driver CONFIG_ATH6KL + ;; + wil6210) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_ATH_CARDS + select_ath_no_common CONFIG_WIL6210 + ;; + brcmsmac) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_BRCMSMAC + select_brcm80211_driver CONFIG_BRCMSMAC CONFIG_BRCMUTIL + ;; + brcmfmac) + disable staging + disable_bt_usb_ethernet_var + select_drivers CONFIG_BRCMFMAC + select_brcm80211_driver CONFIG_BRCMFMAC CONFIG_BRCMUTIL + ;; + zd1211rw) + select_drivers CONFIG_COMPAT_ZD1211RW + disable staging + disable var_01 + ;; + b43) + disable staging + disable bt_usb_ethernet + disable eeprom + disable lib80211 + select_drivers CONFIG_B43 + ;; + rt2x00) + select_drivers CONFIG_RT2X00 + disable staging + disable_bt_usb_ethernet + disable var_02 + ;; + wl1251) + select_ti_drivers CONFIG_WL1251 + disable staging + disable var_01 + ;; + wl12xx) + select_ti_drivers CONFIG_WL12XX + disable staging + disable var_01 + ;; + wl18xx) + select_ti_drivers CONFIG_WL18XX + disable staging + disable var_01 + ;; + # Ethernet and Bluetooth drivers + atl1) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL1) += atlx/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL1) += atlx/\n" + ;; + atl2) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL2) += atlx/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL2) += atlx/\n" + ;; + atl1e) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL1E) += atl1e/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL1E) += atl1e/\n" + ;; + atl1c) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ATL1C) += atl1c/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ATL1C) += atl1c/\n" + ;; + alx) + enable_only_ethernet + disable b44 + # echo -e "obj-\$(CONFIG_ALX) += alx/" > "$(get_makefile DRIVERS_NET_ATHEROS)" || die + CPD_ADD_ATHEROS+="obj-\$(CONFIG_ALX) += alx/\n" + ;; + atlxx) + select_drivers CONFIG_ATL1 CONFIG_ATL2 CONFIG_ATL1E CONFIG_ALX + enable_only_ethernet + disable b44 + disable update-initramfs + ;; + bt) + select_drivers CONFIG_BT + disable var + disable ethernet + disable staging + disable 80211 + ;; + i915) + enable_only_drm + ;; + drm) + enable_only_drm + ;; + *) + ./$0 usage + die "Unsupported driver: ${arg}" + exit 1 + ;; + esac +done + +# special for ath9k_ap +# this filter is only applied when no other driver is selected +if [ "${CPD_SELECTED_DRIVERS}" == " ath9k_ap" ]; then + eval "CPD_COMPAT_CONFIG_CW+=\" CONFIG_COMPAT_ATH9K_RATE_CONTROL\"" \ + || die "Failed to apply special filter for ath9k_ap" +fi + +if [[ ! -f built-in.o ]]; then + if [[ "$1" != "restore" ]]; then + echo -e "${PURPLE}Processing new driver-select request...${NORMAL}" + fi +fi + +# This internal function checks if the first argument is contained in the rest +# of the arguments +function has { + local x=$1 + shift + local y + for y in "$@"; do + [ "${y}" = "${x}" ] && return 0 + done + return 1 +} + +# this internal function checks if both groups and single modules were selected +# this is not supported +function check_groups { + local mods= + local grps= + for CPD_MODULE in ${CPD_SELECTED_DRIVERS}; do + if has "${CPD_MODULE}" ${SUPPORTED_DRIVERS}; then + mods+=" ${CPD_MODULE}" + else + grps+=" ${CPD_MODULE}" + fi + done + [ ! "${mods}" == '' ] && [ ! "${grps}" == '' ] && \ + die "Mixing group and single drivers is not supported by this script! Groups: <${grps}> Drivers: <${mods}>" +} +check_groups + +# set phase to write +CPD_PHASE=2 + +# Always backup the top level Makefile, unless restoring +if [[ "$1" != "restore" ]]; then + backup_file Makefile +fi + +# If a user selects a new driver make sure we clean up for them +# first and also restore the backup makefiles then. Otherwise +# we'll be trying to leave drivers on Makefiles which are not +# already there from a previous run. +if [ -f built-in.o ]; then + echo -e "${PURPLE}Old build found, going to clean this up first...${NORMAL}" + make clean + echo -e "${PURPLE}Restoring Makefiles...${NORMAL}" + ./$0 restore +fi + +# This function reads the configuration (disable-actions and filters) for each +# single active flag, then constructs and applies the common configuration set. +function src_configure { + local use_enabled_list="${CPD_SELECTED_DRIVERS}" + # compose common disable list for all flags + # 1st module/flag + local iuse1="$(echo $use_enabled_list | cut -d ' ' -f 1)" || die + eval "local disable_list=\$CPD_DISABLE_${iuse1}" || die + local iuse + local dis + for iuse in ${use_enabled_list}; do + if [ "${iuse}" != "${iuse1}" ]; then + local disable_list_new='' + eval "local disable_list_other=\$CPD_DISABLE_${iuse}" || die + for dis in ${disable_list}; do + has "${dis}" ${disable_list_other} && \ + disable_list_new+=" ${dis}" + done + disable_list="${disable_list_new}" + fi + done + # sort and remove duplicates + disable_list=$(printf '%s\n' ${disable_list} | sort -u | tr '\n' ' ') || die + + # prepend to atheros Makefile + if [ "${CPD_ADD_ATHEROS}" != '' ]; then + # ensure that backup file exists + backup_file "$(get_makefile DRIVERS_NET_ATHEROS)" + # prepend using backup + echo -e "${CPD_ADD_ATHEROS}"|cat - \ + "$(get_makefile DRIVERS_NET_ATHEROS).${BACKUP_EXT}" > \ + "$(get_makefile DRIVERS_NET_ATHEROS)" \ + || die "Failed to prepend to atheros Makefile" + echo -e "Prepended to atheros Makefile ...${NORMAL}" + fi + # execute all filters for the Makefiles + local file + for file in ${CPD_MAKEFILES}; do + eval "local filter_list=\$CPD_${file}" || die + if [ -n "${filter_list}" ]; then + # sort and remove duplicates + filter_list=$(printf '%s\n' ${filter_list} | sort -u | tr '\n' ' ')\ + || die + echo -e "Filtering ${CYAN}$(get_makefile ${file})${NORMAL} for: ${CYAN}${filter_list}${NORMAL}" + select_drivers_from_makefile "${file}" "${filter_list}" || die + fi + done + # execute common disable list + echo -e "Common disable list: ${CYAN}${disable_list}${NORMAL}" + for dis in ${disable_list}; do + echo -e "Running disable function: ${CYAN}disable_${dis}${NORMAL}" + eval "disable_${dis}" || die + done +} + +# call src_configure ... +src_configure || die "Failed on src_configure ..." diff --git a/sys-kernel/compat-drivers/files/ipw2200-inject.3.4.6.patch b/sys-kernel/compat-drivers/files/ipw2200-inject.3.4.6.patch new file mode 100644 index 00000000..941bbc50 --- /dev/null +++ b/sys-kernel/compat-drivers/files/ipw2200-inject.3.4.6.patch @@ -0,0 +1,120 @@ +diff -urN linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.c linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.c +--- linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.c 2010-10-21 04:30:22.000000000 +0800 ++++ linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.c 2010-12-08 22:22:41.937999976 +0800 +@@ -216,6 +216,7 @@ + static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, + int len, int sync); + ++static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, int pri); + static void ipw_tx_queue_free(struct ipw_priv *); + + static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); +@@ -1911,6 +1912,63 @@ + static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO, + show_net_stats, store_net_stats); + ++/* SYSFS INJECT */ ++static ssize_t store_inject(struct device *d, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct ipw_priv *priv = dev_get_drvdata(d); ++ struct libipw_device *ieee = priv->ieee; ++ struct libipw_txb *txb; ++ struct sk_buff *skb_frag; ++ unsigned char *newbuf; ++ unsigned long flags; ++ ++ // should test (ieee->is_queue_full) ++ ++ // Fw only accepts data, so avoid accidental fw errors. ++ if ( (buf[0]&0x0c) != '\x08') { ++ //printk("ipw2200: inject: discarding non-data frame (type=%02X)\n",(int)(unsigned char)buf[0]); ++ return count; ++ } ++ ++ if (count>1500) { ++ count=1500; ++ printk("ipw2200: inject: cutting down frame to 1500 bytes\n"); ++ } ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ ++ // Create a txb with one skb ++ txb = kmalloc(sizeof(struct libipw_txb) + sizeof(u8 *), GFP_ATOMIC); ++ if (!txb) ++ goto nosepuede; ++ txb->nr_frags=1; ++ txb->frag_size = ieee->tx_headroom; ++ txb->fragments[0]=__dev_alloc_skb(count + ieee->tx_headroom, GFP_ATOMIC); ++ if (!txb->fragments[0]) { ++ kfree(txb); ++ goto nosepuede; ++ } ++ skb_reserve(txb->fragments[0], ieee->tx_headroom); ++ txb->encrypted=0; ++ txb->payload_size=count; ++ skb_frag = txb->fragments[0]; ++ newbuf=skb_put(skb_frag, count); ++ ++ // copy data into txb->skb and send it ++ memcpy(newbuf, buf, count); ++ ++ ipw_tx_skb(priv, txb, 0); ++ ++nosepuede: ++ spin_unlock_irqrestore(&priv->lock, flags); ++ return count; ++} ++ ++ ++static DEVICE_ATTR(inject, S_IWUSR, NULL, store_inject); ++ + static ssize_t show_channels(struct device *d, + struct device_attribute *attr, + char *buf) +@@ -10214,7 +10272,6 @@ + modify to send one tfd per fragment instead of using chunking. otherwise + we need to heavily modify the libipw_skb_to_txb. + */ +- + static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, + int pri) + { +@@ -10544,6 +10601,12 @@ + mutex_lock(&priv->mutex); + priv->config |= CFG_CUSTOM_MAC; + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); ++ ++#ifdef CONFIG_IPW2200_PROMISCUOUS ++ if (rtap_iface) ++ memcpy(priv->prom_net_dev->dev_addr, addr->sa_data, ETH_ALEN); ++#endif ++ + printk(KERN_INFO "%s: Setting MAC to %pM\n", + priv->net_dev->name, priv->mac_addr); + schedule_work(&priv->adapter_restart); +@@ -11597,6 +11660,7 @@ + #ifdef CONFIG_IPW2200_PROMISCUOUS + &dev_attr_rtap_iface.attr, + &dev_attr_rtap_filter.attr, ++ &dev_attr_inject.attr, + #endif + NULL + }; +diff -urN linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.h linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.h +--- linux-2.6.36-gentoo.orig/drivers/net/wireless/ipw2x00/ipw2200.h 2010-10-21 04:30:22.000000000 +0800 ++++ linux-2.6.36-gentoo/drivers/net/wireless/ipw2x00/ipw2200.h 2010-12-08 22:20:01.561000000 +0800 +@@ -2014,4 +2014,12 @@ + + #define IPW_MAX_CONFIG_RETRIES 10 + ++/* ++ * Hhack to get code compiling on new kernels, the define below ++ * seem to be removed from the linux headers. ++ */ ++#ifndef MAC_ARG ++#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5] ++#endif ++ + #endif /* __ipw2200_h__ */ diff --git a/sys-kernel/compat-drivers/files/leds-disable-strict-3.6.6.patch b/sys-kernel/compat-drivers/files/leds-disable-strict-3.6.6.patch new file mode 100644 index 00000000..fbc8d159 --- /dev/null +++ b/sys-kernel/compat-drivers/files/leds-disable-strict-3.6.6.patch @@ -0,0 +1,30 @@ +--- drivers/net/wireless/rt2x00/rt2x00leds.c ++++ drivers/net/wireless/rt2x00/rt2x00leds.c +@@ -29,6 +29,7 @@ + #include "rt2x00.h" + #include "rt2x00lib.h" + ++#ifdef CONFIG_RT2X00_LIB_LEDS + void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) + { + struct rt2x00_led *led = &rt2x00dev->led_qual; +@@ -244,3 +245,4 @@ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_qual); + } ++#endif /* CONFIG_RT2X00_LIB_LEDS */ +--- net/mac80211/led.c ++++ net/mac80211/led.c +@@ -12,6 +12,7 @@ + #include + #include "led.h" + ++#ifdef CONFIG_MAC80211_LEDS + void ieee80211_led_rx(struct ieee80211_local *local) + { + if (unlikely(!local->rx_led)) +@@ -307,3 +308,4 @@ + else + ieee80211_start_tpt_led_trig(local); + } ++#endif /* CONFIG_MAC80211_LEDS */ diff --git a/sys-kernel/compat-drivers/files/leds-disable-strict-3.7_rc1_p6.patch b/sys-kernel/compat-drivers/files/leds-disable-strict-3.7_rc1_p6.patch new file mode 100644 index 00000000..fbc8d159 --- /dev/null +++ b/sys-kernel/compat-drivers/files/leds-disable-strict-3.7_rc1_p6.patch @@ -0,0 +1,30 @@ +--- drivers/net/wireless/rt2x00/rt2x00leds.c ++++ drivers/net/wireless/rt2x00/rt2x00leds.c +@@ -29,6 +29,7 @@ + #include "rt2x00.h" + #include "rt2x00lib.h" + ++#ifdef CONFIG_RT2X00_LIB_LEDS + void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) + { + struct rt2x00_led *led = &rt2x00dev->led_qual; +@@ -244,3 +245,4 @@ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_qual); + } ++#endif /* CONFIG_RT2X00_LIB_LEDS */ +--- net/mac80211/led.c ++++ net/mac80211/led.c +@@ -12,6 +12,7 @@ + #include + #include "led.h" + ++#ifdef CONFIG_MAC80211_LEDS + void ieee80211_led_rx(struct ieee80211_local *local) + { + if (unlikely(!local->rx_led)) +@@ -307,3 +308,4 @@ + else + ieee80211_start_tpt_led_trig(local); + } ++#endif /* CONFIG_MAC80211_LEDS */ diff --git a/sys-kernel/compat-drivers/files/leds-disable-strict-3.8.patch b/sys-kernel/compat-drivers/files/leds-disable-strict-3.8.patch new file mode 100644 index 00000000..fbc8d159 --- /dev/null +++ b/sys-kernel/compat-drivers/files/leds-disable-strict-3.8.patch @@ -0,0 +1,30 @@ +--- drivers/net/wireless/rt2x00/rt2x00leds.c ++++ drivers/net/wireless/rt2x00/rt2x00leds.c +@@ -29,6 +29,7 @@ + #include "rt2x00.h" + #include "rt2x00lib.h" + ++#ifdef CONFIG_RT2X00_LIB_LEDS + void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) + { + struct rt2x00_led *led = &rt2x00dev->led_qual; +@@ -244,3 +245,4 @@ + if (rt2x00dev->led_qual.flags & LED_REGISTERED) + rt2x00leds_resume_led(&rt2x00dev->led_qual); + } ++#endif /* CONFIG_RT2X00_LIB_LEDS */ +--- net/mac80211/led.c ++++ net/mac80211/led.c +@@ -12,6 +12,7 @@ + #include + #include "led.h" + ++#ifdef CONFIG_MAC80211_LEDS + void ieee80211_led_rx(struct ieee80211_local *local) + { + if (unlikely(!local->rx_led)) +@@ -307,3 +308,4 @@ + else + ieee80211_start_tpt_led_trig(local); + } ++#endif /* CONFIG_MAC80211_LEDS */ diff --git a/sys-kernel/compat-drivers/metadata.xml b/sys-kernel/compat-drivers/metadata.xml new file mode 100644 index 00000000..7a88dd2a --- /dev/null +++ b/sys-kernel/compat-drivers/metadata.xml @@ -0,0 +1,35 @@ + + + + +Add patches for better wifi injection support +Bypass any filtering of the modules and build them all. Use only when no other flag works! +COMPAT_DRIVERS_ETHERNET setting to build driver for alx ethernet cards +COMPAT_DRIVERS_ETHERNET setting to build driver for atl1 ethernet cards +COMPAT_DRIVERS_ETHERNET setting to build driver for atl1c ethernet cards +COMPAT_DRIVERS_ETHERNET setting to build driver for atl1e ethernet cards +COMPAT_DRIVERS_ETHERNET setting to build driver for atl2 ethernet cards +COMPAT_DRIVERS_ETHERNET setting to build driver for atlxx ethernet cards +COMPAT_DRIVERS_VARIOUS setting to build driver for bluetooth devices +COMPAT_DRIVERS_VARIOUS setting to build driver for drm devices +COMPAT_DRIVERS_VARIOUS setting to build driver for i915 devices +COMPAT_DRIVERS_VARIOUS setting to build driver for staging devices +COMPAT_DRIVERS_VARIOUS setting to build driver for usbnet devices +COMPAT_DRIVERS_WIFI setting to build driver for ath5k wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for ath6kl wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for ath9k wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for ath9k_ap wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for ath9k_htc wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for b43 wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for b44 wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for brcmfmac wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for brcmsmac wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for carl9170 wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for rt2x00 wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for wil6210 wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for wl1251 wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for wl12xx wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for wl18xx wireless cards +COMPAT_DRIVERS_WIFI setting to build driver for zd1211rw wireless cards + + diff --git a/sys-kernel/debian-sources-lts/Manifest b/sys-kernel/debian-sources-lts/Manifest new file mode 100644 index 00000000..c5e32f78 --- /dev/null +++ b/sys-kernel/debian-sources-lts/Manifest @@ -0,0 +1,4 @@ +DIST linux-2.6_2.6.32-41.diff.gz 15924342 SHA256 4c22fc57902393b12b12fcc3c8ed04d7a99eb4fe311131fb1f647d48a9d85c19 +DIST linux-2.6_2.6.32-43.diff.gz 16261810 SHA256 57f8a8021e590c1c0cb65afccd3f6e78716314db3ac78fae8ddaebb2ebf36801 +DIST linux-2.6_2.6.32-46.diff.gz 16321966 SHA256 8e6220b01f30ee9acb3ae8a6a7825f054ddbe447914cd0a6d60cfb29b7553467 SHA512 047407593c281c2b9920acc6fa173c414d582823e66601894648050828d14c6fa3a07b99c48b53fcd16d6a1fa128f705246f1fa0c3d6715fa3e2a5fa7a7d083a WHIRLPOOL 68e480843569cd802d9c6d3390136f1aad149142527b1cedff8c9cd56ca764a8a83e2f8b88569ef9dbfc8569bbf18062abc6490a31a7ff352f567a2345cabc0c +DIST linux-2.6_2.6.32.orig.tar.gz 82167227 SHA256 e9858964b9d836293e1fe3736658ab1ba20c5897b504ddb09dd4b64ec05a043d diff --git a/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41-r1.ebuild b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41-r1.ebuild new file mode 100644 index 00000000..db111499 --- /dev/null +++ b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41-r1.ebuild @@ -0,0 +1,161 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=2 + +inherit mount-boot + +SLOT=$PVR +CKV=2.6.32 +KV_FULL=${PN}-${PVR} +KERNEL_ARCHIVE="linux-2.6_2.6.32.orig.tar.gz" +RESTRICT="binchecks strip" +# based on : http://packages.ubuntu.com/maverick/linux-image-2.6.35-22-server +LICENSE="GPL-2" +KEYWORDS="*" +IUSE="openvz binary" +DEPEND="binary? ( >=sys-kernel/genkernel-3.4.12.6-r4 )" +RDEPEND="binary? ( || ( >=sys-fs/udev-160 >=virtual/udev-171 ) )" +DESCRIPTION="Debian Sources (and optional binary kernel)" +HOMEPAGE="http://www.debian.org" +MAINPATCH="linux-2.6_2.6.32-41.diff.gz" +SRC_URI="http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${KERNEL_ARCHIVE} + http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${MAINPATCH}" +S="$WORKDIR/linux-${CKV}" + +apply() { + p=$1; shift + case "${p##*.}" in + gz) + ca="gzip -dc" + ;; + bz2) + ca="bzip2 -dc" + ;; + xz) + ca="xz -dc" + ;; + *) + ca="cat" + ;; + esac + [ ! -e $p ] && die "patch $p not found" + echo "Applying patch $p"; $ca $p | patch $* || die "patch $p failed" +} + +pkg_setup() { + unset ARCH; unset LDFLAGS #will interfere with Makefile if set +} + +src_unpack() { + cd ${WORKDIR} + unpack ${KERNEL_ARCHIVE} +} + +src_prepare() { + cd ${WORKDIR} + apply $DISTDIR/$MAINPATCH -p1 + + # debian-specific stuff.... + + mv linux-* ${S##*/} || die + mv debian ${S##*/}/ || die + cd ${S} + sed -i \ + -e 's/^sys.path.append.*$/sys.path.append(".\/debian\/lib\/python")/' \ + -e 's/^_default_home =.*$/_default_home = ".\/debian\/patches"/' \ + debian/bin/patch.apply || die + python2 debian/bin/patch.apply $KV_DEB || die + if use openvz + then + python2 debian/bin/patch.apply -a $ARCH -f openvz || die + fi + + # end of debian-specific stuff... + + sed -i -e "s:^\(EXTRAVERSION =\).*:\1 ${EXTRAVERSION}:" Makefile || die + sed -i -e 's:#export\tINSTALL_PATH:export\tINSTALL_PATH:' Makefile || die + rm -f .config >/dev/null + cp -a debian ${T} || die "couldn't back up debian dir (will be wiped by mrproper)" + make -s mrproper || die "make mrproper failed" + cp -a ${T}/debian . || die "couldn't restore debian directory" + make -s include/linux/version.h || die "make include/linux/version.h failed" + #mv "${TEMP}/configs" "${S}" || die + cd ${S} + local opts + use openvz && opts="openvz" + local myarch="amd64" + [ "$ARCH" = "x86" ] && myarch="i386" + cp ${FILESDIR}/config-extract . || die + chmod +x config-extract || die + ./config-extract ${myarch} ${opts} || die + cp .config ${T}/config || die + make -s mrproper || die "make mrproper failed" + make -s include/linux/version.h || die "make include/linux/version.h failed" +} + +src_compile() { + ! use binary && return + install -d ${WORKDIR}/out/{lib,boot} + install -d ${T}/{cache,twork} + install -d $WORKDIR/build $WORKDIR/out/lib/firmware + genkernel \ + --no-save-config \ + --kernel-config="$T/config" \ + --kernname="${PN}" \ + --build-src="$S" \ + --build-dst=${WORKDIR}/build \ + --makeopts="${MAKEOPTS}" \ + --firmware-dst=${WORKDIR}/out/lib/firmware \ + --cachedir="${T}/cache" \ + --tempdir="${T}/twork" \ + --logfile="${WORKDIR}/genkernel.log" \ + --bootdir="${WORKDIR}/out/boot" \ + --lvm \ + --luks \ + --iscsi \ + --module-prefix="${WORKDIR}/out" \ + all || die "genkernel failed" +} + +src_install() { + # copy sources into place: + dodir /usr/src + cp -a ${S} ${D}/usr/src/linux-${P} || die + cd ${D}/usr/src/linux-${P} + # prepare for real-world use and 3rd-party module building: + make mrproper || die + cp ${T}/config .config || die + cp -a ${T}/debian debian || die + yes "" | make oldconfig || die + # if we didn't use genkernel, we're done. The kernel source tree is left in + # an unconfigured state - you can't compile 3rd-party modules against it yet. + use binary || return + make prepare || die + make scripts || die + # OK, now the source tree is configured to allow 3rd-party modules to be + # built against it, since we want that to work since we have a binary kernel + # built. + cp -a ${WORKDIR}/out/* ${D}/ || die "couldn't copy output files into place" + # module symlink fixup: + rm -f ${D}/lib/modules/*/source || die + rm -f ${D}/lib/modules/*/build || die + cd ${D}/lib/modules + # module strip: + find -iname *.ko -exec strip --strip-debug {} \; + # back to the symlink fixup: + local moddir="$(ls -d [23]*)" + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/source || die + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/build || die + + # Fixes FL-14 + cp "${WORKDIR}/build/System.map" "${D}/usr/src/linux-${P}/" || die + cp "${WORKDIR}/build/Module.symvers" "${D}/usr/src/linux-${P}/" || die + +} + +pkg_postinst() { + if [ ! -e ${ROOT}usr/src/linux ] + then + ln -s linux-${P} ${ROOT}usr/src/linux + fi +} diff --git a/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41.ebuild b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41.ebuild new file mode 100644 index 00000000..2b9f5974 --- /dev/null +++ b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.41.ebuild @@ -0,0 +1,156 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=2 + +inherit mount-boot + +SLOT=$PVR +CKV=2.6.32 +KV_FULL=${PN}-${PVR} +KERNEL_ARCHIVE="linux-2.6_2.6.32.orig.tar.gz" +RESTRICT="binchecks strip" +# based on : http://packages.ubuntu.com/maverick/linux-image-2.6.35-22-server +LICENSE="GPL-2" +KEYWORDS="*" +IUSE="openvz binary" +DEPEND="binary? ( >=sys-kernel/genkernel-3.4.12.6-r4 )" +RDEPEND="binary? ( || ( >=sys-fs/udev-160 >=virtual/udev-171 ) )" +DESCRIPTION="Debian Sources (and optional binary kernel)" +HOMEPAGE="http://www.debian.org" +MAINPATCH="linux-2.6_2.6.32-41.diff.gz" +SRC_URI="http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${KERNEL_ARCHIVE} + http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${MAINPATCH}" +S="$WORKDIR/linux-${CKV}" + +apply() { + p=$1; shift + case "${p##*.}" in + gz) + ca="gzip -dc" + ;; + bz2) + ca="bzip2 -dc" + ;; + xz) + ca="xz -dc" + ;; + *) + ca="cat" + ;; + esac + [ ! -e $p ] && die "patch $p not found" + echo "Applying patch $p"; $ca $p | patch $* || die "patch $p failed" +} + +pkg_setup() { + unset ARCH; unset LDFLAGS #will interfere with Makefile if set +} + +src_unpack() { + cd ${WORKDIR} + unpack ${KERNEL_ARCHIVE} +} + +src_prepare() { + cd ${WORKDIR} + apply $DISTDIR/$MAINPATCH -p1 + + # debian-specific stuff.... + + mv linux-* ${S##*/} || die + mv debian ${S##*/}/ || die + cd ${S} + sed -i \ + -e 's/^sys.path.append.*$/sys.path.append(".\/debian\/lib\/python")/' \ + -e 's/^_default_home =.*$/_default_home = ".\/debian\/patches"/' \ + debian/bin/patch.apply || die + python2 debian/bin/patch.apply $KV_DEB || die + if use openvz + then + python2 debian/bin/patch.apply -a $ARCH -f openvz || die + fi + + # end of debian-specific stuff... + + sed -i -e "s:^\(EXTRAVERSION =\).*:\1 ${EXTRAVERSION}:" Makefile || die + sed -i -e 's:#export\tINSTALL_PATH:export\tINSTALL_PATH:' Makefile || die + rm -f .config >/dev/null + cp -a debian ${T} || die "couldn't back up debian dir (will be wiped by mrproper)" + make -s mrproper || die "make mrproper failed" + cp -a ${T}/debian . || die "couldn't restore debian directory" + make -s include/linux/version.h || die "make include/linux/version.h failed" + #mv "${TEMP}/configs" "${S}" || die + cd ${S} + local opts + use openvz && opts="openvz" + local myarch="amd64" + [ "$ARCH" = "x86" ] && myarch="i386" + cp ${FILESDIR}/config-extract . || die + chmod +x config-extract || die + ./config-extract ${myarch} ${opts} || die + cp .config ${T}/config || die + make -s mrproper || die "make mrproper failed" + make -s include/linux/version.h || die "make include/linux/version.h failed" +} + +src_compile() { + ! use binary && return + install -d ${WORKDIR}/out/{lib,boot} + install -d ${T}/{cache,twork} + install -d $WORKDIR/build $WORKDIR/out/lib/firmware + genkernel \ + --no-save-config \ + --kernel-config="$T/config" \ + --kernname="${PN}" \ + --build-src="$S" \ + --build-dst=${WORKDIR}/build \ + --makeopts="${MAKEOPTS}" \ + --firmware-dst=${WORKDIR}/out/lib/firmware \ + --cachedir="${T}/cache" \ + --tempdir="${T}/twork" \ + --logfile="${WORKDIR}/genkernel.log" \ + --bootdir="${WORKDIR}/out/boot" \ + --lvm \ + --luks \ + --iscsi \ + --module-prefix="${WORKDIR}/out" \ + all || die "genkernel failed" +} + +src_install() { + # copy sources into place: + dodir /usr/src + cp -a ${S} ${D}/usr/src/linux-${P} || die + cd ${D}/usr/src/linux-${P} + # prepare for real-world use and 3rd-party module building: + make mrproper || die + cp ${T}/config .config || die + cp -a ${T}/debian debian || die + yes "" | make oldconfig || die + # if we didn't use genkernel, we're done. The kernel source tree is left in + # an unconfigured state - you can't compile 3rd-party modules against it yet. + use binary || return + make prepare || die + make scripts || die + # OK, now the source tree is configured to allow 3rd-party modules to be + # built against it, since we want that to work since we have a binary kernel + # built. + cp -a ${WORKDIR}/out/* ${D}/ || die "couldn't copy output files into place" + # module symlink fixup: + rm -f ${D}/lib/modules/*/source || die + rm -f ${D}/lib/modules/*/build || die + cd ${D}/lib/modules + # module strip: + find -iname *.ko -exec strip --strip-debug {} \; + # back to the symlink fixup: + local moddir="$(ls -d [23]*)" + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/source || die + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/build || die +} + +pkg_postinst() { + if [ ! -e ${ROOT}usr/src/linux ] + then + ln -s linux-${P} ${ROOT}usr/src/linux + fi +} diff --git a/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43-r1.ebuild b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43-r1.ebuild new file mode 100644 index 00000000..36225eef --- /dev/null +++ b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43-r1.ebuild @@ -0,0 +1,161 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=2 + +inherit mount-boot + +SLOT=$PVR +CKV=2.6.32 +KV_FULL=${PN}-${PVR} +KERNEL_ARCHIVE="linux-2.6_2.6.32.orig.tar.gz" +RESTRICT="binchecks strip" +# based on : http://packages.ubuntu.com/maverick/linux-image-2.6.35-22-server +LICENSE="GPL-2" +KEYWORDS="*" +IUSE="openvz binary" +DEPEND="binary? ( >=sys-kernel/genkernel-3.4.12.6-r4 )" +RDEPEND="binary? ( || ( >=sys-fs/udev-160 >=virtual/udev-171 ) )" +DESCRIPTION="Debian Sources (and optional binary kernel)" +HOMEPAGE="http://www.debian.org" +MAINPATCH="linux-2.6_2.6.32-43.diff.gz" +SRC_URI="http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${KERNEL_ARCHIVE} + http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${MAINPATCH}" +S="$WORKDIR/linux-${CKV}" + +apply() { + p=$1; shift + case "${p##*.}" in + gz) + ca="gzip -dc" + ;; + bz2) + ca="bzip2 -dc" + ;; + xz) + ca="xz -dc" + ;; + *) + ca="cat" + ;; + esac + [ ! -e $p ] && die "patch $p not found" + echo "Applying patch $p"; $ca $p | patch $* || die "patch $p failed" +} + +pkg_setup() { + unset ARCH; unset LDFLAGS #will interfere with Makefile if set +} + +src_unpack() { + cd ${WORKDIR} + unpack ${KERNEL_ARCHIVE} +} + +src_prepare() { + cd ${WORKDIR} + apply $DISTDIR/$MAINPATCH -p1 + + # debian-specific stuff.... + + mv linux-* ${S##*/} || die + mv debian ${S##*/}/ || die + cd ${S} + sed -i \ + -e 's/^sys.path.append.*$/sys.path.append(".\/debian\/lib\/python")/' \ + -e 's/^_default_home =.*$/_default_home = ".\/debian\/patches"/' \ + debian/bin/patch.apply || die + python2 debian/bin/patch.apply $KV_DEB || die + if use openvz + then + python2 debian/bin/patch.apply -a $ARCH -f openvz || die + fi + + # end of debian-specific stuff... + + sed -i -e "s:^\(EXTRAVERSION =\).*:\1 ${EXTRAVERSION}:" Makefile || die + sed -i -e 's:#export\tINSTALL_PATH:export\tINSTALL_PATH:' Makefile || die + rm -f .config >/dev/null + cp -a debian ${T} || die "couldn't back up debian dir (will be wiped by mrproper)" + make -s mrproper || die "make mrproper failed" + cp -a ${T}/debian . || die "couldn't restore debian directory" + make -s include/linux/version.h || die "make include/linux/version.h failed" + #mv "${TEMP}/configs" "${S}" || die + cd ${S} + local opts + use openvz && opts="openvz" + local myarch="amd64" + [ "$ARCH" = "x86" ] && myarch="i386" + cp ${FILESDIR}/config-extract . || die + chmod +x config-extract || die + ./config-extract ${myarch} ${opts} || die + cp .config ${T}/config || die + make -s mrproper || die "make mrproper failed" + make -s include/linux/version.h || die "make include/linux/version.h failed" +} + +src_compile() { + ! use binary && return + install -d ${WORKDIR}/out/{lib,boot} + install -d ${T}/{cache,twork} + install -d $WORKDIR/build $WORKDIR/out/lib/firmware + genkernel \ + --no-save-config \ + --kernel-config="$T/config" \ + --kernname="${PN}" \ + --build-src="$S" \ + --build-dst=${WORKDIR}/build \ + --makeopts="${MAKEOPTS}" \ + --firmware-dst=${WORKDIR}/out/lib/firmware \ + --cachedir="${T}/cache" \ + --tempdir="${T}/twork" \ + --logfile="${WORKDIR}/genkernel.log" \ + --bootdir="${WORKDIR}/out/boot" \ + --lvm \ + --luks \ + --iscsi \ + --module-prefix="${WORKDIR}/out" \ + all || die "genkernel failed" +} + +src_install() { + # copy sources into place: + dodir /usr/src + cp -a ${S} ${D}/usr/src/linux-${P} || die + cd ${D}/usr/src/linux-${P} + # prepare for real-world use and 3rd-party module building: + make mrproper || die + cp ${T}/config .config || die + cp -a ${T}/debian debian || die + yes "" | make oldconfig || die + # if we didn't use genkernel, we're done. The kernel source tree is left in + # an unconfigured state - you can't compile 3rd-party modules against it yet. + use binary || return + make prepare || die + make scripts || die + # OK, now the source tree is configured to allow 3rd-party modules to be + # built against it, since we want that to work since we have a binary kernel + # built. + cp -a ${WORKDIR}/out/* ${D}/ || die "couldn't copy output files into place" + # module symlink fixup: + rm -f ${D}/lib/modules/*/source || die + rm -f ${D}/lib/modules/*/build || die + cd ${D}/lib/modules + # module strip: + find -iname *.ko -exec strip --strip-debug {} \; + # back to the symlink fixup: + local moddir="$(ls -d [23]*)" + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/source || die + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/build || die + + # Fixes FL-14 + cp "${WORKDIR}/build/System.map" "${D}/usr/src/linux-${P}/" || die + cp "${WORKDIR}/build/Module.symvers" "${D}/usr/src/linux-${P}/" || die + +} + +pkg_postinst() { + if [ ! -e ${ROOT}usr/src/linux ] + then + ln -s linux-${P} ${ROOT}usr/src/linux + fi +} diff --git a/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43.ebuild b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43.ebuild new file mode 100644 index 00000000..ee437a14 --- /dev/null +++ b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.43.ebuild @@ -0,0 +1,156 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=2 + +inherit mount-boot + +SLOT=$PVR +CKV=2.6.32 +KV_FULL=${PN}-${PVR} +KERNEL_ARCHIVE="linux-2.6_2.6.32.orig.tar.gz" +RESTRICT="binchecks strip" +# based on : http://packages.ubuntu.com/maverick/linux-image-2.6.35-22-server +LICENSE="GPL-2" +KEYWORDS="*" +IUSE="openvz binary" +DEPEND="binary? ( >=sys-kernel/genkernel-3.4.12.6-r4 )" +RDEPEND="binary? ( || ( >=sys-fs/udev-160 >=virtual/udev-171 ) )" +DESCRIPTION="Debian Sources (and optional binary kernel)" +HOMEPAGE="http://www.debian.org" +MAINPATCH="linux-2.6_2.6.32-43.diff.gz" +SRC_URI="http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${KERNEL_ARCHIVE} + http://ftp.bg.debian.org/debian/pool/main/l/linux-2.6/${MAINPATCH}" +S="$WORKDIR/linux-${CKV}" + +apply() { + p=$1; shift + case "${p##*.}" in + gz) + ca="gzip -dc" + ;; + bz2) + ca="bzip2 -dc" + ;; + xz) + ca="xz -dc" + ;; + *) + ca="cat" + ;; + esac + [ ! -e $p ] && die "patch $p not found" + echo "Applying patch $p"; $ca $p | patch $* || die "patch $p failed" +} + +pkg_setup() { + unset ARCH; unset LDFLAGS #will interfere with Makefile if set +} + +src_unpack() { + cd ${WORKDIR} + unpack ${KERNEL_ARCHIVE} +} + +src_prepare() { + cd ${WORKDIR} + apply $DISTDIR/$MAINPATCH -p1 + + # debian-specific stuff.... + + mv linux-* ${S##*/} || die + mv debian ${S##*/}/ || die + cd ${S} + sed -i \ + -e 's/^sys.path.append.*$/sys.path.append(".\/debian\/lib\/python")/' \ + -e 's/^_default_home =.*$/_default_home = ".\/debian\/patches"/' \ + debian/bin/patch.apply || die + python2 debian/bin/patch.apply $KV_DEB || die + if use openvz + then + python2 debian/bin/patch.apply -a $ARCH -f openvz || die + fi + + # end of debian-specific stuff... + + sed -i -e "s:^\(EXTRAVERSION =\).*:\1 ${EXTRAVERSION}:" Makefile || die + sed -i -e 's:#export\tINSTALL_PATH:export\tINSTALL_PATH:' Makefile || die + rm -f .config >/dev/null + cp -a debian ${T} || die "couldn't back up debian dir (will be wiped by mrproper)" + make -s mrproper || die "make mrproper failed" + cp -a ${T}/debian . || die "couldn't restore debian directory" + make -s include/linux/version.h || die "make include/linux/version.h failed" + #mv "${TEMP}/configs" "${S}" || die + cd ${S} + local opts + use openvz && opts="openvz" + local myarch="amd64" + [ "$ARCH" = "x86" ] && myarch="i386" + cp ${FILESDIR}/config-extract . || die + chmod +x config-extract || die + ./config-extract ${myarch} ${opts} || die + cp .config ${T}/config || die + make -s mrproper || die "make mrproper failed" + make -s include/linux/version.h || die "make include/linux/version.h failed" +} + +src_compile() { + ! use binary && return + install -d ${WORKDIR}/out/{lib,boot} + install -d ${T}/{cache,twork} + install -d $WORKDIR/build $WORKDIR/out/lib/firmware + genkernel \ + --no-save-config \ + --kernel-config="$T/config" \ + --kernname="${PN}" \ + --build-src="$S" \ + --build-dst=${WORKDIR}/build \ + --makeopts="${MAKEOPTS}" \ + --firmware-dst=${WORKDIR}/out/lib/firmware \ + --cachedir="${T}/cache" \ + --tempdir="${T}/twork" \ + --logfile="${WORKDIR}/genkernel.log" \ + --bootdir="${WORKDIR}/out/boot" \ + --lvm \ + --luks \ + --iscsi \ + --module-prefix="${WORKDIR}/out" \ + all || die "genkernel failed" +} + +src_install() { + # copy sources into place: + dodir /usr/src + cp -a ${S} ${D}/usr/src/linux-${P} || die + cd ${D}/usr/src/linux-${P} + # prepare for real-world use and 3rd-party module building: + make mrproper || die + cp ${T}/config .config || die + cp -a ${T}/debian debian || die + yes "" | make oldconfig || die + # if we didn't use genkernel, we're done. The kernel source tree is left in + # an unconfigured state - you can't compile 3rd-party modules against it yet. + use binary || return + make prepare || die + make scripts || die + # OK, now the source tree is configured to allow 3rd-party modules to be + # built against it, since we want that to work since we have a binary kernel + # built. + cp -a ${WORKDIR}/out/* ${D}/ || die "couldn't copy output files into place" + # module symlink fixup: + rm -f ${D}/lib/modules/*/source || die + rm -f ${D}/lib/modules/*/build || die + cd ${D}/lib/modules + # module strip: + find -iname *.ko -exec strip --strip-debug {} \; + # back to the symlink fixup: + local moddir="$(ls -d [23]*)" + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/source || die + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/build || die +} + +pkg_postinst() { + if [ ! -e ${ROOT}usr/src/linux ] + then + ln -s linux-${P} ${ROOT}usr/src/linux + fi +} diff --git a/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.46.ebuild b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.46.ebuild new file mode 100644 index 00000000..94e1100a --- /dev/null +++ b/sys-kernel/debian-sources-lts/debian-sources-lts-2.6.32.46.ebuild @@ -0,0 +1,162 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=2 + +inherit mount-boot + +SLOT=$PVR +CKV=2.6.32 +KV_FULL=${PN}-${PVR} +KERNEL_ARCHIVE="linux-2.6_2.6.32.orig.tar.gz" +RESTRICT="binchecks strip" +# based on : http://packages.ubuntu.com/maverick/linux-image-2.6.35-22-server +LICENSE="GPL-2" +KEYWORDS="*" +IUSE="openvz binary" +DEPEND="binary? ( >=sys-kernel/genkernel-3.4.12.6-r4 )" +RDEPEND="binary? ( || ( >=sys-fs/udev-160 >=virtual/udev-171 ) )" +DESCRIPTION="Debian Sources (and optional binary kernel)" +HOMEPAGE="http://www.debian.org" +MAINPATCH="linux-2.6_2.6.32-46.diff.gz" +SRC_URI="http://ftp.osuosl.org/pub/funtoo/distfiles/${KERNEL_ARCHIVE} + http://ftp.osuosl.org/pub/funtoo/distfiles/${MAINPATCH}" +RESTRICT="mirror" +S="$WORKDIR/linux-${CKV}" + +apply() { + p=$1; shift + case "${p##*.}" in + gz) + ca="gzip -dc" + ;; + bz2) + ca="bzip2 -dc" + ;; + xz) + ca="xz -dc" + ;; + *) + ca="cat" + ;; + esac + [ ! -e $p ] && die "patch $p not found" + echo "Applying patch $p"; $ca $p | patch $* || die "patch $p failed" +} + +pkg_setup() { + unset ARCH; unset LDFLAGS #will interfere with Makefile if set +} + +src_unpack() { + cd ${WORKDIR} + unpack ${KERNEL_ARCHIVE} +} + +src_prepare() { + cd ${WORKDIR} + apply $DISTDIR/$MAINPATCH -p1 + + # debian-specific stuff.... + + mv linux-* ${S##*/} || die + mv debian ${S##*/}/ || die + cd ${S} + sed -i \ + -e 's/^sys.path.append.*$/sys.path.append(".\/debian\/lib\/python")/' \ + -e 's/^_default_home =.*$/_default_home = ".\/debian\/patches"/' \ + debian/bin/patch.apply || die + python2 debian/bin/patch.apply $KV_DEB || die + if use openvz + then + python2 debian/bin/patch.apply -a $ARCH -f openvz || die + fi + + # end of debian-specific stuff... + + sed -i -e "s:^\(EXTRAVERSION =\).*:\1 ${EXTRAVERSION}:" Makefile || die + sed -i -e 's:#export\tINSTALL_PATH:export\tINSTALL_PATH:' Makefile || die + rm -f .config >/dev/null + cp -a debian ${T} || die "couldn't back up debian dir (will be wiped by mrproper)" + make -s mrproper || die "make mrproper failed" + cp -a ${T}/debian . || die "couldn't restore debian directory" + make -s include/linux/version.h || die "make include/linux/version.h failed" + #mv "${TEMP}/configs" "${S}" || die + cd ${S} + local opts + use openvz && opts="openvz" + local myarch="amd64" + [ "$ARCH" = "x86" ] && myarch="i386" + cp ${FILESDIR}/config-extract . || die + chmod +x config-extract || die + ./config-extract ${myarch} ${opts} || die + cp .config ${T}/config || die + make -s mrproper || die "make mrproper failed" + make -s include/linux/version.h || die "make include/linux/version.h failed" +} + +src_compile() { + ! use binary && return + install -d ${WORKDIR}/out/{lib,boot} + install -d ${T}/{cache,twork} + install -d $WORKDIR/build $WORKDIR/out/lib/firmware + genkernel \ + --no-save-config \ + --kernel-config="$T/config" \ + --kernname="${PN}" \ + --build-src="$S" \ + --build-dst=${WORKDIR}/build \ + --makeopts="${MAKEOPTS}" \ + --firmware-dst=${WORKDIR}/out/lib/firmware \ + --cachedir="${T}/cache" \ + --tempdir="${T}/twork" \ + --logfile="${WORKDIR}/genkernel.log" \ + --bootdir="${WORKDIR}/out/boot" \ + --lvm \ + --luks \ + --iscsi \ + --module-prefix="${WORKDIR}/out" \ + all || die "genkernel failed" +} + +src_install() { + # copy sources into place: + dodir /usr/src + cp -a ${S} ${D}/usr/src/linux-${P} || die + cd ${D}/usr/src/linux-${P} + # prepare for real-world use and 3rd-party module building: + make mrproper || die + cp ${T}/config .config || die + cp -a ${T}/debian debian || die + yes "" | make oldconfig || die + # if we didn't use genkernel, we're done. The kernel source tree is left in + # an unconfigured state - you can't compile 3rd-party modules against it yet. + use binary || return + make prepare || die + make scripts || die + # OK, now the source tree is configured to allow 3rd-party modules to be + # built against it, since we want that to work since we have a binary kernel + # built. + cp -a ${WORKDIR}/out/* ${D}/ || die "couldn't copy output files into place" + # module symlink fixup: + rm -f ${D}/lib/modules/*/source || die + rm -f ${D}/lib/modules/*/build || die + cd ${D}/lib/modules + # module strip: + find -iname *.ko -exec strip --strip-debug {} \; + # back to the symlink fixup: + local moddir="$(ls -d [23]*)" + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/source || die + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/build || die + + # Fixes FL-14 + cp "${WORKDIR}/build/System.map" "${D}/usr/src/linux-${P}/" || die + cp "${WORKDIR}/build/Module.symvers" "${D}/usr/src/linux-${P}/" || die + +} + +pkg_postinst() { + if [ ! -e ${ROOT}usr/src/linux ] + then + ln -s linux-${P} ${ROOT}usr/src/linux + fi +} diff --git a/sys-kernel/debian-sources-lts/files/config-extract b/sys-kernel/debian-sources-lts/files/config-extract new file mode 100755 index 00000000..fe15f548 --- /dev/null +++ b/sys-kernel/debian-sources-lts/files/config-extract @@ -0,0 +1,216 @@ +#!/usr/bin/python2 + +import os,sys,re +import getopt + +re_head = re.compile('^binary-arch_(.*)_real::') +re_flav = re.compile('binary-arch-flavour') +re_item = re.compile("[A-Z_]*='[^']*'") + +try: + f=open("debian/rules.gen","r") +except: + print "Unable to open debian/rules.gen; can't continue." + sys.exit(1) +lines=f.readlines() +f.close() + +line=0 + +configlist = [] +configdict = {} + +# scan Debian rules.gen file and gather all variable data into a more useable format: + +while line < len(lines): + head_match = re_head.match(lines[line]) + if not head_match: + line += 1 + continue + config_name = head_match.group(1) + line += 1 + if not re_flav.findall(lines[line]): + continue + lsplit = re_item.findall(lines[line]) + groovydict = {} + for item in lsplit: + kv = item.split("=",1) + if len(kv) < 2: + continue + groovydict[kv[0]] = kv[1][1:-1] + configlist.append(config_name) + configdict[config_name] = groovydict + line += 1 + +# We will organize the arch, featureset and flavors into cascading lists so +# that we can present a nice clean chart of what's available to the user: + +archdict = {} + +for config in configlist: + cs = config.split("_") + if not cs[0] in archdict: + archdict[cs[0]] = { } + if cs[1] == "none": + cs[1] = None + if cs[1] not in archdict[cs[0]]: + archdict[cs[0]][cs[1]] = [] + archdict[cs[0]][cs[1]].append(cs[2]) + +arches = archdict.keys() +arches.sort() + +features = [ None ] +for arch in arches: + for flav in archdict[arch]: + if flav not in features: + features.append(flav) + +PROG="config-extract" +def usage(): + print """This work is free software. + +Copyright 2011 Funtoo Technologies. You can redistribute and/or modify it under +the terms of the GNU General Public License version 3 as published by the Free +Software Foundation. Alternatively you may (at your option) use any other +license that has been publicly approved for use with this program by Funtoo +Technologies (or its successors, if any.) + +usage: %s [options] arch [featureset] [subarch] + + -h --help print this usage and exit + -l --list list all available kernel configurations + -o --outfile specify kernel config outfile -- + defaults to .config in current directory + [featureset] defaults to "none" if not specified + [subarch] defaults to the only one available; otherwise required + +This program was written by Daniel Robbins for Funtoo Linux, for the purpose of +easily and conveniently extracting Debian kernel configurations. To see a nice +list of all available kernel configurations, use the --list option. + +Debian's kernel configs are specified internally in arch_featureset_flavor +format, such as: "amd64_openvz_amd64". The featureset typically describes an +optional kernel configuration such as "xen" or "openvz", while the flavor in +Debian terminology typically refers to the sub-architecture of the CPU. + +When using this command, you must specify an arch. A featureset of "none" is +assumed unless you specify one, and by default this program will pick the only +available subarch if there is only one to choose from. If not, you will need to +pick one (and the program will remind you to do this.) + +The kernel configuration will be written to ".config" in the current directory, +or the location you specified using the -o/--outfile option. +""" % PROG + sys.exit(2) + +try: + opts, args = getopt.getopt(sys.argv[1:], "o:hl", ["help", "list","outfile="]) +except getopt.GetoptError, err: + print str(err) + usage() + +mode="run" +outfile=None +for o,a in opts: + if o in ("-h", "--help"): + usage() + elif o in ("-l", "--list"): + mode="list" + elif o in ("-o", "--outfile"): + outfile = a + else: + assert False, "Unhandled option" +if mode == "run": + if len(args) < 1 or len(args) > 3: + if len(args) == 0: + print "Please specify an arch - one of: "+", ".join(arches) + sys.exit(2) + else: + print "Too many arguments." + usage() + arch = args[0] + if outfile == None: + outfile = os.path.join(os.getcwd(),".config") + featureset = None + subarch = None + if len(args) == 3: + featureset = args[1] + subarch = args[2] + elif len(args) == 2: + featureset = args[1] + +# print out optimized list of available kernel configurations: + +if mode=="list": + print + for flav in features: + label = flav + if label == None: + label = "standard" + print "====== %s featureset ======" % label + print + for arch in arches: + if flav in archdict[arch]: + if len(archdict[arch][flav]) == 1: + print arch.rjust(12) + else: + flavlist = archdict[arch][flav] + flavlist.sort() + variants = ", ".join(flavlist) + print arch.rjust(12) + ":", variants + print + sys.exit(0) + +# featureset defaults to None. + +if featureset not in archdict[arch]: + print "Error: There is no '%s' featureset kernel config for arch '%s'. Exiting." % ( featureset, arch ) + sys.exit(2) + +# If a subarch is not specified (None), then we will auto-pick the subarch if only one is available. +# Debian often has an "amd64" subarch for the "amd64" arch, rather than "none" as I might expect: + +if subarch == None: + if len(archdict[arch][featureset]) == 1: + subarch = archdict[arch][featureset][0] + else: + print "Error: there is more than one 'sub-architecture' for this arch." + print "Please specify one of the following subarches as a secondary argument:" + print ", ".join(archdict[arch][featureset]) + sys.exit(2) +else: + if subarch not in archdict[arch][featureset]: + print "Error: specified sub-architecture '%s' is not available for this arch. Exiting." % subarch + sys.exit(2) + +# We've done all our arg processing, now let's construct the master_key that we will use to look up the +# proper settings to pass to Debian's debian/bin/kconfig.py command: + +master_key=arch +if featureset == None: + master_key += "_none" +else: + master_key += "_%s" % featureset +if subarch == None: + master_key += "_none" +else: + master_key += "_%s" % subarch +if master_key not in configdict: + print "Master key lookup failed; can't continue. Please report this bug." + sys.exit(1) +if "KCONFIG" not in configdict[master_key]: + print "Unable to find KCONFIG option; can't continue. Please report this bug." + sys.exit(1) +cmd = "python2 debian/bin/kconfig.py '%s' %s" % ( outfile, configdict[master_key]["KCONFIG"] ) +if "KCONFIG_OPTIONS" in configdict[master_key]: + cmd += " %s" % configdict[master_key]["KCONFIG_OPTIONS"] +os.environ["PYTHONPATH"] = "debian/lib/python" +retval = os.system(cmd) +if retval == 0: + print "Wrote %s kernel configuration to %s." % ( master_key, outfile ) + sys.exit(0) +else: + print "There was an error extracting the Debian kernel config." + sys.exit(1) + diff --git a/sys-kernel/debian-sources-lts/files/debian-sources-2.6.32.30-bridgemac.patch b/sys-kernel/debian-sources-lts/files/debian-sources-2.6.32.30-bridgemac.patch new file mode 100644 index 00000000..b092c15a --- /dev/null +++ b/sys-kernel/debian-sources-lts/files/debian-sources-2.6.32.30-bridgemac.patch @@ -0,0 +1,15 @@ +diff -urN linux/net/bridge/br_stp_if.c drobtmp/net/bridge/br_stp_if.c +--- linux/net/bridge/br_stp_if.c 2009-06-20 01:46:25.000000000 -0600 ++++ drobtmp/net/bridge/br_stp_if.c 2009-06-27 16:47:57.000000000 -0600 +@@ -163,10 +163,7 @@ + struct net_bridge_port *p; + + list_for_each_entry(p, &br->port_list, list) { +- if (addr == br_mac_zero || +- memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0) +- addr = p->dev->dev_addr; +- ++ addr = p->dev->dev_addr; + } + + if (compare_ether_addr(br->bridge_id.addr, addr)) diff --git a/sys-kernel/debian-sources-lts/files/debian-sources-2.6.38.3-bridgemac.patch b/sys-kernel/debian-sources-lts/files/debian-sources-2.6.38.3-bridgemac.patch new file mode 100644 index 00000000..b092c15a --- /dev/null +++ b/sys-kernel/debian-sources-lts/files/debian-sources-2.6.38.3-bridgemac.patch @@ -0,0 +1,15 @@ +diff -urN linux/net/bridge/br_stp_if.c drobtmp/net/bridge/br_stp_if.c +--- linux/net/bridge/br_stp_if.c 2009-06-20 01:46:25.000000000 -0600 ++++ drobtmp/net/bridge/br_stp_if.c 2009-06-27 16:47:57.000000000 -0600 +@@ -163,10 +163,7 @@ + struct net_bridge_port *p; + + list_for_each_entry(p, &br->port_list, list) { +- if (addr == br_mac_zero || +- memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0) +- addr = p->dev->dev_addr; +- ++ addr = p->dev->dev_addr; + } + + if (compare_ether_addr(br->bridge_id.addr, addr)) diff --git a/sys-kernel/debian-sources-lts/metadata.xml b/sys-kernel/debian-sources-lts/metadata.xml new file mode 100644 index 00000000..bd7d6111 --- /dev/null +++ b/sys-kernel/debian-sources-lts/metadata.xml @@ -0,0 +1,6 @@ + +funtoo + + funtoo-dev@googlegroups.com + + diff --git a/sys-kernel/debian-sources/Manifest b/sys-kernel/debian-sources/Manifest new file mode 100644 index 00000000..97775aec --- /dev/null +++ b/sys-kernel/debian-sources/Manifest @@ -0,0 +1,16 @@ +DIST linux_3.10.11-1.debian.tar.xz 750708 SHA256 5597033846bd3f992712f20e261062e1ac2320897594578bfab731702495ee52 SHA512 14afb60757457e3070766cbe97200987a9b9dc9d7a5d32077886753599cdeaf6a35d8ae3a6f9a39371633c11e8cc20942cbf4be6ac5f40ea49241df9cc3060a7 WHIRLPOOL d571f4ee852af76b79b7db2349227522e86f59ba4562f43c00a5a1cfe5067eca9e857a441cc695095968ebf74cb612ef9da9db4ee3f2bd54a5c6a08a70da123b +DIST linux_3.10.11.orig.tar.xz 73944292 SHA256 c6337250fb9eaa258eefc37ee9f66d3c4fcf8db1e3b29e3c557e33f5df6a05c7 SHA512 e214a14f9ca70bdece4abb6ff82ee9b5e66694b8415d43d74455e6c31dbe4f39583ae0c7e14fc569bfb777131e34b5e2e35f819314b500f72cae80b67f3e6451 WHIRLPOOL 2bca020fade4ef2ebac5b96de957fbe5e69463dfc33c87b9f96259843125a9e2b9d1a3d2483e5450680ce400b1d2bea094dd7ac8ccf3964e31714bb2cfbb06e2 +DIST linux_3.11.10-1.debian.tar.xz 2688056 SHA256 9669805bb50cef3f89854b81f74de371391db798d700c3ea461b63132bc3e586 SHA512 42092c21d73e8f8d82688ec0ae1029324ebdb5e7814de23393986f3d917d9134bf043ee0ab96bb5d517ae1fc63165b80179f1afd14eecdab1ce4fb8e86ad0d51 WHIRLPOOL 23ddf48f55cb3dcef82bcae6ca35cd4bf4f1edf17bc9e31fdfb6730bce49b77d716c138973ec427455f2f2094d8011d383165d3a737052bbde1d1f43f5682843 +DIST linux_3.11.10.orig.tar.xz 75892436 SHA256 23f3392aa9d97e514e892b91691dba67d2cef9a064d49c0e4f90a12b9e3a8331 SHA512 5cb32460ab1d331c267723c2693659c66c030aac218bcefdcefde54a8af4eaa24b795fac067b9bf7fd17409fcbb7783787ef5389054ce44d0c327017ee7cfb87 WHIRLPOOL 2fe53bdeaae503b3d40ea4bb7b9f18f9ade47f99d72b6d4cedc74dc6431d649b999a7ae388daf88b6de8d8816d60d9f9de8a81460779aadd16463e47c511c426 +DIST linux_3.12.3-1~exp1.debian.tar.xz 770364 SHA256 34fdb9d6d368665a3fe468058c794058624d14db545ceefc8bf092bd11191609 SHA512 52c1a5563ef89b90249b27692b2a6c84fa2e4fd711c3563376a830bbafba280a22fef237b5eaedc01ccd87625a3a40450cd355497df5f946e9564d657a400077 WHIRLPOOL 624f91fe66f25090ab2976a075f9f26edc692fede685a07bf4d01c76b5134c29862602940b6062b22cc27f61a76b0372aa6b5a535607395da05f635596ea3c37 +DIST linux_3.12.3.orig.tar.xz 77289676 SHA256 52991a7699ace36517e9bca4442db0b1ad736f169d24186deaf4b133cb17816d SHA512 760c8de39431b1b34e76d887bd8747807fdbff0cdf65fb28847625151be2abc632ee3a0b30caefce1f3723ea4756fa51dbffb93590f5a47865e4cc5e93139e8e WHIRLPOOL a3aadb0210b72749c7a4d23d47eb62fc371d173812eb043b638c43af26c81da9ee6d0afa6a76678f892cc5376cbf007b5794afafa9560b2fb85890763146a13d +DIST linux_3.2.29-1.debian.tar.xz 955952 SHA256 b421f738632fd0b85c6686313daea7e8cebd4d218af4f92a4e6327449d51f4d6 +DIST linux_3.2.29.orig.tar.xz 65725120 SHA256 e9fdd2a7f774188974f5e74a68a112e563763e0c83db7a8411d5d3bb4bee1213 +DIST linux_3.2.35-2.debian.tar.xz 2483540 SHA256 8ae153d652cb0a870128d202fe6cd93d89f2c70996217355ca5681b62cce37fb SHA512 b2e6e0fcbac4e2b4a0cfc958535c13f22a20387b557995e7e036b79f6d29061fbe79eddfc5de1e3abe200b4c765637b19773ab8e5c61f3842c449a3dc3a49f85 WHIRLPOOL 8b30cadfcdaad17fc73d9fb567c3f68abac0f9b4ae778ed500bd601edda654c3ba6dd498f6b7df77ab4ae55f5c2b8475134b39cef7ee818872dd19b915f6dd0d +DIST linux_3.2.35.orig.tar.xz 65831540 SHA256 ea4c76c3820f9d7a07c7beb9f631a07d04613908c5f670afa200d677fd5f8538 SHA512 f2f931dc1169af7b19d4b9e88935297ee72b173b4fc946a8932f8599bb43164c23f7de4da2468319fa5991de7347b93c1b4a5a727c5d0b98db68e535cc1cedbe WHIRLPOOL 9c4e901c93e89bdbc4da45391a830f90839164cd3a565a7e836d121a5fe879b1611e7993a7061037caa5639c00fd0de98963934615cefb897895171668aa0641 +DIST linux_3.2.39-1.debian.tar.xz 3188844 SHA256 5332b4729956b341230cbbaff06fda0c51347e68a054c791e35a10898143ff13 SHA512 6c4aaba734da7193b33e5c2e49d4a771296f870e40570cbad79e253c6f85f551d29cb556f05851e18f8e474af23165de4bc921ef56037872597283d703e467e3 WHIRLPOOL 330d65b6bccf0de85bc9bc38c98a94bb5c41d9266cbc5abc48c23d9eb5e077dac25a480fc51c1e41187a7e5382d9194a167bcb8a7accc5f232d7ba5b1b16daf7 +DIST linux_3.2.39.orig.tar.xz 65857136 SHA256 663eb090fc2d7d5a3d0dc613a662baf44c5e0057a9e99b404299ee25546e7a91 SHA512 fae785b28689f1fac6741806136939a1d00a23fda07e7106259db14755a89bb35979c5f9792d48a969db547b9a0058bf824a9d1012059fbe57b128d5d2179fcf WHIRLPOOL 22fa44fc16b537d2140e7ef7bdff60b9d828129d8c3370b1260b576e1b40eb8052d41867a0bc229e8116a7fdb09c54bdcdbf49d07300cad9f793d4ede3377a33 +DIST linux_3.2.41-2+deb7u2.debian.tar.xz 3190452 SHA256 29400c7b48e78c18dcc05c19bbbbf42d229d6eaed6efd4baeb1eb4f168f17f30 SHA512 dc3b61a0bd843f53362532cef9aba4f50ee65141f77569d30838baf9cbf4ab154acb5dea018355c759dc0ec88db41c4cc03ae0831b28e7d8e0399f71f997a5bd WHIRLPOOL 22acf212881f4df1338f7b1bf4f5e3f4411ad19370b21cd6ea172b0ef25e84e632ad954992bb35da568ee384b4d3a9dfa3c56afdb7e4e2c8926a002d023cbcc6 +DIST linux_3.2.41.orig.tar.xz 65857856 SHA256 a74577ed87d53af9d880c1a11c0be50d44edbcaeb8b0db54f3d1dafbd3dcc386 SHA512 2c5c78d6ad5080e587ed03dcab328b7c7ae3b9b0fd2776196b4ba1519e445a3996cb70a0293ecedacc5713b61ca76935d19af37fc3d5a956cd21bc0d08e877e4 WHIRLPOOL 5a9b2e67d87e8b662a093de1148953b2818047112a9d9588286aa508393538dbc018314c2302069f06a8b3cf2046abd93b67db6d3cf7de339ca77ffbbb330abe +DIST linux_3.2.51-1.debian.tar.xz 3260368 SHA256 6a2d6f80b886fce43df8092e3ba0f423190872a763c75b058f93133d9cbad8a0 SHA512 6966728af759fd86a4613ec1b8c33771243ab2d83aa66848137d91971820012f83a3b7084c2eaaee85ed790bb8b384794d283fe00d4ae54efb65cb0204f2386f WHIRLPOOL 4731a63f02d4895edc51c4c693772ffd3361ec5236d3d3a1b3c8453e3708ace33e0ca0a3bd424705f264458c0c1ff6f0ed4ddf57d12b6d75c1d9ca3fc0a32d8c +DIST linux_3.2.51.orig.tar.xz 65876468 SHA256 a6f1aa8a0d5addc805e35d94457280975c5162990fb2f2a82de061d55932a3a3 SHA512 1141957380ecdf21358fee45f34e161be1b5822cdd625cab82413e588d0adaedb58bcbbec914b6ee2624a63cb470cc88d3f0cb6ee78f21178ad6c6fce49a0ada WHIRLPOOL e376767227757235954e238a94261482b4daa52823166ebf32c43504ad24d62ce9430b50418ff1f70bac1c0c247786d5f6f8c79f0b283c3a81369a918a78d0f7 diff --git a/sys-kernel/debian-sources/debian-sources-3.10.11.ebuild b/sys-kernel/debian-sources/debian-sources-3.10.11.ebuild new file mode 100644 index 00000000..02039505 --- /dev/null +++ b/sys-kernel/debian-sources/debian-sources-3.10.11.ebuild @@ -0,0 +1,145 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=3 + +inherit eutils mount-boot + +SLOT=$PVR +CKV=3.10.11 +KV_FULL=${PN}-${PVR} +EXTRAVERSION=-1 +KERNEL_ARCHIVE="linux_${PV}.orig.tar.xz" +PATCH_ARCHIVE="linux_${PV}${EXTRAVERSION}.debian.tar.xz" +RESTRICT="binchecks strip mirror" +# based on : http://packages.ubuntu.com/maverick/linux-image-2.6.35-22-server +LICENSE="GPL-2" +KEYWORDS="*" +IUSE="binary rt" +DEPEND="binary? ( >=sys-kernel/genkernel-3.4.40.7 )" +RDEPEND="binary? ( || ( >=sys-fs/udev-160 >=virtual/udev-171 ) )" +DESCRIPTION="Debian Sources (and optional binary kernel)" +HOMEPAGE="http://www.debian.org" +SRC_URI="http://ftp.osuosl.org/pub/funtoo/distfiles/${KERNEL_ARCHIVE} http://ftp.osuosl.org/pub/funtoo/distfiles/${PATCH_ARCHIVE}" +S="$WORKDIR/linux-${CKV}" + +get_patch_list() { + [[ -z "${1}" ]] && die "No patch series file specified" + local patch_series="${1}" + while read line ; do + if [[ "${line:0:1}" != "#" ]] ; then + echo "${line}" + fi + done < "${patch_series}" +} + +pkg_setup() { + export REAL_ARCH="$ARCH" + unset ARCH; unset LDFLAGS #will interfere with Makefile if set +} + +src_prepare() { + + cd ${S} + for debpatch in $( get_patch_list "${WORKDIR}/debian/patches/series" ); do + epatch -p1 "${WORKDIR}/debian/patches/${debpatch}" + done + + if use rt ; then + for rtpatch in $( get_patch_list "${WORKDIR}/debian/patches/series-rt" ) ; do + epatch -p1 "${WORKDIR}/debian/patches/${rtpatch}" + done + fi + + # end of debian-specific stuff... + + sed -i -e "s:^\(EXTRAVERSION =\).*:\1 ${EXTRAVERSION}:" Makefile || die + sed -i -e 's:#export\tINSTALL_PATH:export\tINSTALL_PATH:' Makefile || die + rm -f .config >/dev/null + cp -a "${WORKDIR}"/debian "${T}" + make -s mrproper || die "make mrproper failed" + #make -s include/linux/version.h || die "make include/linux/version.h failed" + cd ${S} + + cp -aR "${WORKDIR}"/debian "${S}"/debian + # xfs fix for 3.10.11, see FL-823: + epatch ${FILESDIR}/debian-sources-3.10.11-xfs-libcrc32c-fix.patch + + local opts + use rt && opts="rt" || opts="standard" + local myarch="amd64" + [ "$REAL_ARCH" = "x86" ] && myarch="i386" && opts="$opts 686-pae" + cp ${FILESDIR}/config-extract . || die + chmod +x config-extract || die + ./config-extract ${myarch} ${opts} || die + cp .config ${T}/config || die + make -s mrproper || die "make mrproper failed" + #make -s include/linux/version.h || die "make include/linux/version.h failed" +} + +src_compile() { + ! use binary && return + install -d ${WORKDIR}/out/{lib,boot} + install -d ${T}/{cache,twork} + install -d $WORKDIR/build $WORKDIR/out/lib/firmware + genkernel \ + --no-save-config \ + --kernel-config="$T/config" \ + --kernname="${PN}" \ + --build-src="$S" \ + --build-dst=${WORKDIR}/build \ + --makeopts="${MAKEOPTS}" \ + --firmware-dst=${WORKDIR}/out/lib/firmware \ + --cachedir="${T}/cache" \ + --tempdir="${T}/twork" \ + --logfile="${WORKDIR}/genkernel.log" \ + --bootdir="${WORKDIR}/out/boot" \ + --lvm \ + --luks \ + --mdadm \ + --iscsi \ + --module-prefix="${WORKDIR}/out" \ + all || die "genkernel failed" +} + +src_install() { + # copy sources into place: + dodir /usr/src + cp -a ${S} ${D}/usr/src/linux-${P} || die + cd ${D}/usr/src/linux-${P} + # prepare for real-world use and 3rd-party module building: + make mrproper || die + cp ${T}/config .config || die + cp -a ${T}/debian debian || die + yes "" | make oldconfig || die + # if we didn't use genkernel, we're done. The kernel source tree is left in + # an unconfigured state - you can't compile 3rd-party modules against it yet. + use binary || return + make prepare || die + make scripts || die + # OK, now the source tree is configured to allow 3rd-party modules to be + # built against it, since we want that to work since we have a binary kernel + # built. + cp -a ${WORKDIR}/out/* ${D}/ || die "couldn't copy output files into place" + # module symlink fixup: + rm -f ${D}/lib/modules/*/source || die + rm -f ${D}/lib/modules/*/build || die + cd ${D}/lib/modules + # module strip: + find -iname *.ko -exec strip --strip-debug {} \; + # back to the symlink fixup: + local moddir="$(ls -d [23]*)" + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/source || die + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/build || die + + # Fixes FL-14 + cp "${WORKDIR}/build/System.map" "${D}/usr/src/linux-${P}/" || die + cp "${WORKDIR}/build/Module.symvers" "${D}/usr/src/linux-${P}/" || die + +} + +pkg_postinst() { + if [ ! -e ${ROOT}usr/src/linux ] + then + ln -s linux-${P} ${ROOT}usr/src/linux + fi +} diff --git a/sys-kernel/debian-sources/debian-sources-3.12.3.ebuild b/sys-kernel/debian-sources/debian-sources-3.12.3.ebuild new file mode 100644 index 00000000..86dae42e --- /dev/null +++ b/sys-kernel/debian-sources/debian-sources-3.12.3.ebuild @@ -0,0 +1,145 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=3 + +inherit eutils mount-boot + +SLOT=$PVR +CKV=3.12.3 +KV_FULL=${PN}-${PVR} +EXTRAVERSION=-1~exp1 +KERNEL_ARCHIVE="linux_${PV}.orig.tar.xz" +PATCH_ARCHIVE="linux_${PV}${EXTRAVERSION}.debian.tar.xz" +RESTRICT="binchecks strip mirror" +# based on : http://packages.ubuntu.com/maverick/linux-image-2.6.35-22-server +LICENSE="GPL-2" +KEYWORDS="*" +IUSE="binary rt" +DEPEND="binary? ( >=sys-kernel/genkernel-3.4.40.7 )" +RDEPEND="binary? ( || ( >=sys-fs/udev-160 >=virtual/udev-171 ) )" +DESCRIPTION="Debian Sources (and optional binary kernel)" +HOMEPAGE="http://www.debian.org" +SRC_URI="http://ftp.osuosl.org/pub/funtoo/distfiles/${KERNEL_ARCHIVE} http://ftp.osuosl.org/pub/funtoo/distfiles/${PATCH_ARCHIVE}" +S="$WORKDIR/linux-${CKV}" + +get_patch_list() { + [[ -z "${1}" ]] && die "No patch series file specified" + local patch_series="${1}" + while read line ; do + if [[ "${line:0:1}" != "#" ]] ; then + echo "${line}" + fi + done < "${patch_series}" +} + +pkg_setup() { + export REAL_ARCH="$ARCH" + unset ARCH; unset LDFLAGS #will interfere with Makefile if set +} + +src_prepare() { + + cd ${S} + for debpatch in $( get_patch_list "${WORKDIR}/debian/patches/series" ); do + epatch -p1 "${WORKDIR}/debian/patches/${debpatch}" + done + + if use rt ; then + for rtpatch in $( get_patch_list "${WORKDIR}/debian/patches/series-rt" ) ; do + epatch -p1 "${WORKDIR}/debian/patches/${rtpatch}" + done + fi + + # end of debian-specific stuff... + + sed -i -e "s:^\(EXTRAVERSION =\).*:\1 ${EXTRAVERSION}:" Makefile || die + sed -i -e 's:#export\tINSTALL_PATH:export\tINSTALL_PATH:' Makefile || die + rm -f .config >/dev/null + cp -a "${WORKDIR}"/debian "${T}" + make -s mrproper || die "make mrproper failed" + #make -s include/linux/version.h || die "make include/linux/version.h failed" + cd ${S} + cp -aR "${WORKDIR}"/debian "${S}"/debian + + ## XFS LIBCRC kernel config fixes, FL-823 + epatch ${FILESDIR}/debian-sources-3.12.3-xfs-libcrc32c-fix.patch + + local opts + use rt && opts="rt" || opts="standard" + local myarch="amd64" + [ "$REAL_ARCH" = "x86" ] && myarch="i386" && opts="$opts 686-pae" + cp ${FILESDIR}/config-extract . || die + chmod +x config-extract || die + ./config-extract ${myarch} ${opts} || die + cp .config ${T}/config || die + make -s mrproper || die "make mrproper failed" + #make -s include/linux/version.h || die "make include/linux/version.h failed" +} + +src_compile() { + ! use binary && return + install -d ${WORKDIR}/out/{lib,boot} + install -d ${T}/{cache,twork} + install -d $WORKDIR/build $WORKDIR/out/lib/firmware + genkernel \ + --no-save-config \ + --kernel-config="$T/config" \ + --kernname="${PN}" \ + --build-src="$S" \ + --build-dst=${WORKDIR}/build \ + --makeopts="${MAKEOPTS}" \ + --firmware-dst=${WORKDIR}/out/lib/firmware \ + --cachedir="${T}/cache" \ + --tempdir="${T}/twork" \ + --logfile="${WORKDIR}/genkernel.log" \ + --bootdir="${WORKDIR}/out/boot" \ + --lvm \ + --luks \ + --mdadm \ + --iscsi \ + --module-prefix="${WORKDIR}/out" \ + all || die "genkernel failed" +} + +src_install() { + # copy sources into place: + dodir /usr/src + cp -a ${S} ${D}/usr/src/linux-${P} || die + cd ${D}/usr/src/linux-${P} + # prepare for real-world use and 3rd-party module building: + make mrproper || die + cp ${T}/config .config || die + cp -a ${T}/debian debian || die + yes "" | make oldconfig || die + # if we didn't use genkernel, we're done. The kernel source tree is left in + # an unconfigured state - you can't compile 3rd-party modules against it yet. + use binary || return + make prepare || die + make scripts || die + # OK, now the source tree is configured to allow 3rd-party modules to be + # built against it, since we want that to work since we have a binary kernel + # built. + cp -a ${WORKDIR}/out/* ${D}/ || die "couldn't copy output files into place" + # module symlink fixup: + rm -f ${D}/lib/modules/*/source || die + rm -f ${D}/lib/modules/*/build || die + cd ${D}/lib/modules + # module strip: + find -iname *.ko -exec strip --strip-debug {} \; + # back to the symlink fixup: + local moddir="$(ls -d [23]*)" + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/source || die + ln -s /usr/src/linux-${P} ${D}/lib/modules/${moddir}/build || die + + # Fixes FL-14 + cp "${WORKDIR}/build/System.map" "${D}/usr/src/linux-${P}/" || die + cp "${WORKDIR}/build/Module.symvers" "${D}/usr/src/linux-${P}/" || die + +} + +pkg_postinst() { + if [ ! -e ${ROOT}usr/src/linux ] + then + ln -s linux-${P} ${ROOT}usr/src/linux + fi +} diff --git a/sys-kernel/debian-sources/files/config-extract b/sys-kernel/debian-sources/files/config-extract new file mode 100755 index 00000000..4860a645 --- /dev/null +++ b/sys-kernel/debian-sources/files/config-extract @@ -0,0 +1,219 @@ +#!/usr/bin/python2 + +import os,sys,re +import getopt + +re_head = re.compile('^binary-arch_(.*)_real::') +re_flav = re.compile('binary-arch-flavour') +re_item = re.compile("[A-Z_]*='[^']*'") + +try: + f=open("debian/rules.gen","r") +except: + print "Unable to open debian/rules.gen; can't continue." + sys.exit(1) +lines=f.readlines() +f.close() + +line=0 + +configlist = [] +configdict = {} + +# scan Debian rules.gen file and gather all variable data into a more useable format: + +while line < len(lines): + head_match = re_head.match(lines[line]) + if not head_match: + line += 1 + continue + config_name = head_match.group(1) + line += 1 + if not re_flav.findall(lines[line]): + continue + lsplit = re_item.findall(lines[line]) + groovydict = {} + for item in lsplit: + kv = item.split("=",1) + if len(kv) < 2: + continue + groovydict[kv[0]] = kv[1][1:-1] + configlist.append(config_name) + configdict[config_name] = groovydict + line += 1 + +# We will organize the arch, featureset and flavors into cascading lists so +# that we can present a nice clean chart of what's available to the user: + +archdict = {} + +for config in configlist: + cs = config.split("_") + if not cs[0] in archdict: + archdict[cs[0]] = { } + if cs[1] == "none": + cs[1] = None + if cs[1] not in archdict[cs[0]]: + archdict[cs[0]][cs[1]] = [] + archdict[cs[0]][cs[1]].append(cs[2]) + +arches = archdict.keys() +arches.sort() + +features = [ None ] +for arch in arches: + for flav in archdict[arch]: + if flav not in features: + features.append(flav) + +PROG="config-extract" +def usage(): + print """This work is free software. + +Copyright 2011 Funtoo Technologies. You can redistribute and/or modify it under +the terms of the GNU General Public License version 3 as published by the Free +Software Foundation. Alternatively you may (at your option) use any other +license that has been publicly approved for use with this program by Funtoo +Technologies (or its successors, if any.) + +usage: %s [options] arch [featureset] [subarch] + + -h --help print this usage and exit + -l --list list all available kernel configurations + -o --outfile specify kernel config outfile -- + defaults to .config in current directory + [featureset] defaults to "standard" if not specified + [subarch] defaults to the only one available; otherwise required + +This program was written by Daniel Robbins for Funtoo Linux, for the purpose of +easily and conveniently extracting Debian kernel configurations. To see a nice +list of all available kernel configurations, use the --list option. + +Debian's kernel configs are specified internally in arch_featureset_flavor +format, such as: "amd64_openvz_amd64". The featureset typically describes an +optional kernel configuration such as "xen" or "openvz", while the flavor in +Debian terminology typically refers to the sub-architecture of the CPU. + +When using this command, you must specify an arch. A featureset of "standard" is +assumed unless you specify one, and by default this program will pick the only +available subarch if there is only one to choose from. If not, you will need to +pick one (and the program will remind you to do this.) + +The kernel configuration will be written to ".config" in the current directory, +or the location you specified using the -o/--outfile option. +""" % PROG + sys.exit(2) + +try: + opts, args = getopt.getopt(sys.argv[1:], "o:hl", ["help", "list","outfile="]) +except getopt.GetoptError, err: + print str(err) + usage() + +mode="run" +outfile=None +for o,a in opts: + if o in ("-h", "--help"): + usage() + elif o in ("-l", "--list"): + mode="list" + elif o in ("-o", "--outfile"): + outfile = a + else: + assert False, "Unhandled option" +if mode == "run": + if len(args) < 1 or len(args) > 3: + if len(args) == 0: + print "Please specify an arch - one of: "+", ".join(arches) + sys.exit(2) + else: + print "Too many arguments." + usage() + arch = args[0] + if outfile == None: + outfile = os.path.join(os.getcwd(),".config") + featureset = None + subarch = None + if len(args) == 3: + featureset = args[1] + subarch = args[2] + elif len(args) == 2: + featureset = args[1] + if featureset == "standard": + featureset = None + +# print out optimized list of available kernel configurations: + +if mode=="list": + print + for flav in features: + label = flav + if label == None: + label = "standard" + print "====== %s featureset ======" % label + print + for arch in arches: + if flav in archdict[arch]: + if len(archdict[arch][flav]) == 1: + print arch.rjust(12) + else: + flavlist = archdict[arch][flav] + flavlist.sort() + variants = ", ".join(flavlist) + print arch.rjust(12) + ":", variants + print + sys.exit(0) + +# featureset defaults to None. + +if featureset not in archdict[arch]: + print "Error: There is no '%s' featureset kernel config for arch '%s'. Exiting." % ( featureset, arch ) + print archdict[arch] + sys.exit(2) + +# If a subarch is not specified (None), then we will auto-pick the subarch if only one is available. +# Debian often has an "amd64" subarch for the "amd64" arch, rather than "none" as I might expect: + +if subarch == None: + if len(archdict[arch][featureset]) == 1: + subarch = archdict[arch][featureset][0] + else: + print "Error: there is more than one 'sub-architecture' for this arch." + print "Please specify [arch] [featureset] [subarch], with one of these subarches:" + print ", ".join(archdict[arch][featureset]) + sys.exit(2) +else: + if subarch not in archdict[arch][featureset]: + print "Error: specified sub-architecture '%s' is not available for this arch. Exiting." % subarch + sys.exit(2) + +# We've done all our arg processing, now let's construct the master_key that we will use to look up the +# proper settings to pass to Debian's debian/bin/kconfig.py command: + +master_key=arch +if featureset == None: + master_key += "_none" +else: + master_key += "_%s" % featureset +if subarch == None: + master_key += "_none" +else: + master_key += "_%s" % subarch +if master_key not in configdict: + print "Master key lookup failed; can't continue. Please report this bug." + sys.exit(1) +if "KCONFIG" not in configdict[master_key]: + print "Unable to find KCONFIG option; can't continue. Please report this bug." + sys.exit(1) +cmd = "python2 debian/bin/kconfig.py '%s' %s" % ( outfile, configdict[master_key]["KCONFIG"] ) +if "KCONFIG_OPTIONS" in configdict[master_key]: + cmd += " %s" % configdict[master_key]["KCONFIG_OPTIONS"] +os.environ["PYTHONPATH"] = "debian/lib/python" +retval = os.system(cmd) +if retval == 0: + print "Wrote %s kernel configuration to %s." % ( master_key, outfile ) + sys.exit(0) +else: + print "There was an error extracting the Debian kernel config." + sys.exit(1) + diff --git a/sys-kernel/debian-sources/files/debian-sources-3.10.11-xfs-libcrc32c-fix.patch b/sys-kernel/debian-sources/files/debian-sources-3.10.11-xfs-libcrc32c-fix.patch new file mode 100644 index 00000000..a20fecdc --- /dev/null +++ b/sys-kernel/debian-sources/files/debian-sources-3.10.11-xfs-libcrc32c-fix.patch @@ -0,0 +1,21 @@ +diff -urN linux-debian-sources-3.10.11/debian/config/config linux-debian-sources-3.10.11-fixed/debian/config/config +--- linux-debian-sources-3.10.11/debian/config/config 2013-08-17 13:57:25.000000000 -0600 ++++ linux-debian-sources-3.10.11-fixed/debian/config/config 2013-12-10 15:58:46.512296124 -0700 +@@ -4183,7 +4183,7 @@ + ## + ## file: fs/xfs/Kconfig + ## +-CONFIG_XFS_FS=m ++CONFIG_XFS_FS=y + CONFIG_XFS_QUOTA=y + CONFIG_XFS_POSIX_ACL=y + CONFIG_XFS_RT=y +@@ -4360,7 +4360,7 @@ + CONFIG_CRC_ITU_T=m + CONFIG_CRC32=y + CONFIG_CRC7=m +-CONFIG_LIBCRC32C=m ++CONFIG_LIBCRC32C=y + + ## + ## file: lib/Kconfig.debug diff --git a/sys-kernel/debian-sources/files/debian-sources-3.12.3-xfs-libcrc32c-fix.patch b/sys-kernel/debian-sources/files/debian-sources-3.12.3-xfs-libcrc32c-fix.patch new file mode 100644 index 00000000..ca0ee89c --- /dev/null +++ b/sys-kernel/debian-sources/files/debian-sources-3.12.3-xfs-libcrc32c-fix.patch @@ -0,0 +1,21 @@ +diff -Nuar -Nuar debian/config/config debian-fixed/config/config +--- debian/config/config 2013-12-05 01:48:42.000000000 +0000 ++++ debian-fixed/config/config 2013-12-14 07:39:56.017237225 +0000 +@@ -4425,7 +4425,7 @@ + ## + ## file: fs/xfs/Kconfig + ## +-CONFIG_XFS_FS=m ++CONFIG_XFS_FS=y + CONFIG_XFS_QUOTA=y + CONFIG_XFS_POSIX_ACL=y + CONFIG_XFS_RT=y +@@ -4614,7 +4614,7 @@ + CONFIG_CRC32=y + # CONFIG_CRC32_SELFTEST is not set + CONFIG_CRC7=m +-CONFIG_LIBCRC32C=m ++CONFIG_LIBCRC32C=y + # CONFIG_DDR is not set + + ## diff --git a/sys-kernel/debian-sources/metadata.xml b/sys-kernel/debian-sources/metadata.xml new file mode 100644 index 00000000..403aaa88 --- /dev/null +++ b/sys-kernel/debian-sources/metadata.xml @@ -0,0 +1,10 @@ + + funtoo + + funtoo-dev@googlegroups.com + + + Builds and installs kernel automatically + Applies the CONFIG_PREEMPT_RT patch series + + diff --git a/sys-kernel/kogaion-sources/Manifest b/sys-kernel/kogaion-sources/Manifest new file mode 100644 index 00000000..58c1ec81 --- /dev/null +++ b/sys-kernel/kogaion-sources/Manifest @@ -0,0 +1 @@ +DIST linux-3.10.25.tar.xz 73225500 SHA256 a6c46d88deaf4f0af9038d2d23b4ed617f901c5de38f1886b865c432f22ac7cd SHA512 843b117c44ce1622d07b7e566ab80a931bfc7ed9cf6f2837e62e0dc3e1d87ef504d8566ceed9cd8d68d6616b9495de7e8dfc59c34af20a61b99260694ae31b93 WHIRLPOOL cedfd73a3bb5ea680b59e18f5d8c714bf8a71e0441a29809c0db1bf79d16e167fc524f0387d3cd6044ff07132d1b7d168690f62aca1241e1a4149658e33ca5e6 diff --git a/sys-kernel/kogaion-sources/files/desktop/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7-3.10.patch b/sys-kernel/kogaion-sources/files/desktop/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7-3.10.patch new file mode 100644 index 00000000..f92978cf --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7-3.10.patch @@ -0,0 +1,103 @@ +From 3ded69bee018e94b1cf5e13af9ff557f0f61ab30 Mon Sep 17 00:00:00 2001 +From: Arianna Avanzini +Date: Mon, 27 Jan 2014 23:50:08 +0100 +Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7-3.10 + +Update Kconfig.iosched and do the related Makefile changes to include +kernel configuration options for BFQ. Also add the bfqio controller +to the cgroups subsystem. + +Signed-off-by: Paolo Valente +Signed-off-by: Arianna Avanzini +--- + block/Kconfig.iosched | 32 ++++++++++++++++++++++++++++++++ + block/Makefile | 1 + + include/linux/cgroup_subsys.h | 6 ++++++ + 3 files changed, 39 insertions(+) + +diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched +index 421bef9..8f552ba 100644 +--- a/block/Kconfig.iosched ++++ b/block/Kconfig.iosched +@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED + ---help--- + Enable group IO scheduling in CFQ. + ++config IOSCHED_BFQ ++ tristate "BFQ I/O scheduler" ++ default n ++ ---help--- ++ The BFQ I/O scheduler tries to distribute bandwidth among ++ all processes according to their weights. ++ It aims at distributing the bandwidth as desired, independently of ++ the disk parameters and with any workload. It also tries to ++ guarantee low latency to interactive and soft real-time ++ applications. If compiled built-in (saying Y here), BFQ can ++ be configured to support hierarchical scheduling. ++ ++config CGROUP_BFQIO ++ bool "BFQ hierarchical scheduling support" ++ depends on CGROUPS && IOSCHED_BFQ=y ++ default n ++ ---help--- ++ Enable hierarchical scheduling in BFQ, using the cgroups ++ filesystem interface. The name of the subsystem will be ++ bfqio. ++ + choice + prompt "Default I/O scheduler" + default DEFAULT_CFQ +@@ -52,6 +73,16 @@ choice + config DEFAULT_CFQ + bool "CFQ" if IOSCHED_CFQ=y + ++ config DEFAULT_BFQ ++ bool "BFQ" if IOSCHED_BFQ=y ++ help ++ Selects BFQ as the default I/O scheduler which will be ++ used by default for all block devices. ++ The BFQ I/O scheduler aims at distributing the bandwidth ++ as desired, independently of the disk parameters and with ++ any workload. It also tries to guarantee low latency to ++ interactive and soft real-time applications. ++ + config DEFAULT_NOOP + bool "No-op" + +@@ -61,6 +92,7 @@ config DEFAULT_IOSCHED + string + default "deadline" if DEFAULT_DEADLINE + default "cfq" if DEFAULT_CFQ ++ default "bfq" if DEFAULT_BFQ + default "noop" if DEFAULT_NOOP + + endmenu +diff --git a/block/Makefile b/block/Makefile +index 39b76ba..c0d20fa 100644 +--- a/block/Makefile ++++ b/block/Makefile +@@ -15,6 +15,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o + obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o + obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o + obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o ++obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o + + obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o + obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o +diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h +index 6e7ec64..e5e6b0d 100644 +--- a/include/linux/cgroup_subsys.h ++++ b/include/linux/cgroup_subsys.h +@@ -84,3 +84,9 @@ SUBSYS(bcache) + #endif + + /* */ ++ ++#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_BFQIO) ++SUBSYS(bfqio) ++#endif ++ ++/* */ +-- +1.8.5.2 + diff --git a/sys-kernel/kogaion-sources/files/desktop/0002-block-introduce-the-BFQ-v7-I-O-sched-for-3.10.patch b/sys-kernel/kogaion-sources/files/desktop/0002-block-introduce-the-BFQ-v7-I-O-sched-for-3.10.patch new file mode 100644 index 00000000..0a2c5079 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/0002-block-introduce-the-BFQ-v7-I-O-sched-for-3.10.patch @@ -0,0 +1,5969 @@ +From d40506359ff7f890adac4bd75541de73044a121e Mon Sep 17 00:00:00 2001 +From: Paolo Valente +Date: Thu, 9 May 2013 19:10:02 +0200 +Subject: [PATCH 2/3] block: introduce the BFQ-v7 I/O sched for 3.10 + +Add the BFQ-v7 I/O scheduler to 3.10. +The general structure is borrowed from CFQ, as much of the code for +handling I/O contexts Over time, several useful features have been +ported from CFQ as well (details in the changelog in README.BFQ). A +(bfq_)queue is associated to each task doing I/O on a device, and each +time a scheduling decision has to be made a queue is selected and served +until it expires. + + - Slices are given in the service domain: tasks are assigned + budgets, measured in number of sectors. Once got the disk, a task + must however consume its assigned budget within a configurable + maximum time (by default, the maximum possible value of the + budgets is automatically computed to comply with this timeout). + This allows the desired latency vs "throughput boosting" tradeoff + to be set. + + - Budgets are scheduled according to a variant of WF2Q+, implemented + using an augmented rb-tree to take eligibility into account while + preserving an O(log N) overall complexity. + + - A low-latency tunable is provided; if enabled, both interactive + and soft real-time applications are guaranteed a very low latency. + + - Latency guarantees are preserved also in the presence of NCQ. + + - Also with flash-based devices, a high throughput is achieved + while still preserving latency guarantees. + + - BFQ features Early Queue Merge (EQM), a sort of fusion of the + cooperating-queue-merging and the preemption mechanisms present + in CFQ. EQM is in fact a unified mechanism that tries to get a + sequential read pattern, and hence a high throughput, with any + set of processes performing interleaved I/O over a contiguous + sequence of sectors. + + - BFQ supports full hierarchical scheduling, exporting a cgroups + interface. Since each node has a full scheduler, each group can + be assigned its own weight. + + - If the cgroups interface is not used, only I/O priorities can be + assigned to processes, with ioprio values mapped to weights + with the relation weight = IOPRIO_BE_NR - ioprio. + + - ioprio classes are served in strict priority order, i.e., lower + priority queues are not served as long as there are higher + priority queues. Among queues in the same class the bandwidth is + distributed in proportion to the weight of each queue. A very + thin extra bandwidth is however guaranteed to the Idle class, to + prevent it from starving. + +Signed-off-by: Paolo Valente +Signed-off-by: Arianna Avanzini +--- + block/bfq-cgroup.c | 885 ++++++++++++++ + block/bfq-ioc.c | 36 + + block/bfq-iosched.c | 3256 +++++++++++++++++++++++++++++++++++++++++++++++++++ + block/bfq-sched.c | 1077 +++++++++++++++++ + block/bfq.h | 612 ++++++++++ + 5 files changed, 5866 insertions(+) + create mode 100644 block/bfq-cgroup.c + create mode 100644 block/bfq-ioc.c + create mode 100644 block/bfq-iosched.c + create mode 100644 block/bfq-sched.c + create mode 100644 block/bfq.h + +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c +new file mode 100644 +index 0000000..5a117ad +--- /dev/null ++++ b/block/bfq-cgroup.c +@@ -0,0 +1,885 @@ ++/* ++ * BFQ: CGROUPS support. ++ * ++ * Based on ideas and code from CFQ: ++ * Copyright (C) 2003 Jens Axboe ++ * ++ * Copyright (C) 2008 Fabio Checconi ++ * Paolo Valente ++ * ++ * Copyright (C) 2010 Paolo Valente ++ * ++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file. ++ */ ++ ++#ifdef CONFIG_CGROUP_BFQIO ++ ++static DEFINE_MUTEX(bfqio_mutex); ++ ++static bool bfqio_is_removed(struct cgroup *cgroup) ++{ ++ return test_bit(CGRP_REMOVED, &cgroup->flags); ++} ++ ++static struct bfqio_cgroup bfqio_root_cgroup = { ++ .weight = BFQ_DEFAULT_GRP_WEIGHT, ++ .ioprio = BFQ_DEFAULT_GRP_IOPRIO, ++ .ioprio_class = BFQ_DEFAULT_GRP_CLASS, ++}; ++ ++static inline void bfq_init_entity(struct bfq_entity *entity, ++ struct bfq_group *bfqg) ++{ ++ entity->weight = entity->new_weight; ++ entity->orig_weight = entity->new_weight; ++ entity->ioprio = entity->new_ioprio; ++ entity->ioprio_class = entity->new_ioprio_class; ++ entity->parent = bfqg->my_entity; ++ entity->sched_data = &bfqg->sched_data; ++} ++ ++static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup) ++{ ++ return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id), ++ struct bfqio_cgroup, css); ++} ++ ++/* ++ * Search the bfq_group for bfqd into the hash table (by now only a list) ++ * of bgrp. Must be called under rcu_read_lock(). ++ */ ++static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp, ++ struct bfq_data *bfqd) ++{ ++ struct bfq_group *bfqg; ++ void *key; ++ ++ hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) { ++ key = rcu_dereference(bfqg->bfqd); ++ if (key == bfqd) ++ return bfqg; ++ } ++ ++ return NULL; ++} ++ ++static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp, ++ struct bfq_group *bfqg) ++{ ++ struct bfq_entity *entity = &bfqg->entity; ++ ++ /* ++ * If the weight of the entity has never been set via the sysfs ++ * interface, then bgrp->weight == 0. In this case we initialize ++ * the weight from the current ioprio value. Otherwise, the group ++ * weight, if set, has priority over the ioprio value. ++ */ ++ if (bgrp->weight == 0) { ++ entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio); ++ entity->new_ioprio = bgrp->ioprio; ++ } else { ++ entity->new_weight = bgrp->weight; ++ entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight); ++ } ++ entity->orig_weight = entity->weight = entity->new_weight; ++ entity->ioprio = entity->new_ioprio; ++ entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class; ++ entity->my_sched_data = &bfqg->sched_data; ++} ++ ++static inline void bfq_group_set_parent(struct bfq_group *bfqg, ++ struct bfq_group *parent) ++{ ++ struct bfq_entity *entity; ++ ++ BUG_ON(parent == NULL); ++ BUG_ON(bfqg == NULL); ++ ++ entity = &bfqg->entity; ++ entity->parent = parent->my_entity; ++ entity->sched_data = &parent->sched_data; ++} ++ ++/** ++ * bfq_group_chain_alloc - allocate a chain of groups. ++ * @bfqd: queue descriptor. ++ * @cgroup: the leaf cgroup this chain starts from. ++ * ++ * Allocate a chain of groups starting from the one belonging to ++ * @cgroup up to the root cgroup. Stop if a cgroup on the chain ++ * to the root has already an allocated group on @bfqd. ++ */ ++static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd, ++ struct cgroup *cgroup) ++{ ++ struct bfqio_cgroup *bgrp; ++ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL; ++ ++ for (; cgroup != NULL; cgroup = cgroup->parent) { ++ bgrp = cgroup_to_bfqio(cgroup); ++ ++ bfqg = bfqio_lookup_group(bgrp, bfqd); ++ if (bfqg != NULL) { ++ /* ++ * All the cgroups in the path from there to the ++ * root must have a bfq_group for bfqd, so we don't ++ * need any more allocations. ++ */ ++ break; ++ } ++ ++ bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC); ++ if (bfqg == NULL) ++ goto cleanup; ++ ++ bfq_group_init_entity(bgrp, bfqg); ++ bfqg->my_entity = &bfqg->entity; ++ ++ if (leaf == NULL) { ++ leaf = bfqg; ++ prev = leaf; ++ } else { ++ bfq_group_set_parent(prev, bfqg); ++ /* ++ * Build a list of allocated nodes using the bfqd ++ * filed, that is still unused and will be initialized ++ * only after the node will be connected. ++ */ ++ prev->bfqd = bfqg; ++ prev = bfqg; ++ } ++ } ++ ++ return leaf; ++ ++cleanup: ++ while (leaf != NULL) { ++ prev = leaf; ++ leaf = leaf->bfqd; ++ kfree(prev); ++ } ++ ++ return NULL; ++} ++ ++/** ++ * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy. ++ * @bfqd: the queue descriptor. ++ * @cgroup: the leaf cgroup to start from. ++ * @leaf: the leaf group (to be associated to @cgroup). ++ * ++ * Try to link a chain of groups to a cgroup hierarchy, connecting the ++ * nodes bottom-up, so we can be sure that when we find a cgroup in the ++ * hierarchy that already as a group associated to @bfqd all the nodes ++ * in the path to the root cgroup have one too. ++ * ++ * On locking: the queue lock protects the hierarchy (there is a hierarchy ++ * per device) while the bfqio_cgroup lock protects the list of groups ++ * belonging to the same cgroup. ++ */ ++static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup, ++ struct bfq_group *leaf) ++{ ++ struct bfqio_cgroup *bgrp; ++ struct bfq_group *bfqg, *next, *prev = NULL; ++ unsigned long flags; ++ ++ assert_spin_locked(bfqd->queue->queue_lock); ++ ++ for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) { ++ bgrp = cgroup_to_bfqio(cgroup); ++ next = leaf->bfqd; ++ ++ bfqg = bfqio_lookup_group(bgrp, bfqd); ++ BUG_ON(bfqg != NULL); ++ ++ spin_lock_irqsave(&bgrp->lock, flags); ++ ++ rcu_assign_pointer(leaf->bfqd, bfqd); ++ hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data); ++ hlist_add_head(&leaf->bfqd_node, &bfqd->group_list); ++ ++ spin_unlock_irqrestore(&bgrp->lock, flags); ++ ++ prev = leaf; ++ leaf = next; ++ } ++ ++ BUG_ON(cgroup == NULL && leaf != NULL); ++ if (cgroup != NULL && prev != NULL) { ++ bgrp = cgroup_to_bfqio(cgroup); ++ bfqg = bfqio_lookup_group(bgrp, bfqd); ++ bfq_group_set_parent(prev, bfqg); ++ } ++} ++ ++/** ++ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup. ++ * @bfqd: queue descriptor. ++ * @cgroup: cgroup being searched for. ++ * ++ * Return a group associated to @bfqd in @cgroup, allocating one if ++ * necessary. When a group is returned all the cgroups in the path ++ * to the root have a group associated to @bfqd. ++ * ++ * If the allocation fails, return the root group: this breaks guarantees ++ * but is a safe fallbak. If this loss becames a problem it can be ++ * mitigated using the equivalent weight (given by the product of the ++ * weights of the groups in the path from @group to the root) in the ++ * root scheduler. ++ * ++ * We allocate all the missing nodes in the path from the leaf cgroup ++ * to the root and we connect the nodes only after all the allocations ++ * have been successful. ++ */ ++static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, ++ struct cgroup *cgroup) ++{ ++ struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); ++ struct bfq_group *bfqg; ++ ++ bfqg = bfqio_lookup_group(bgrp, bfqd); ++ if (bfqg != NULL) ++ return bfqg; ++ ++ bfqg = bfq_group_chain_alloc(bfqd, cgroup); ++ if (bfqg != NULL) ++ bfq_group_chain_link(bfqd, cgroup, bfqg); ++ else ++ bfqg = bfqd->root_group; ++ ++ return bfqg; ++} ++ ++/** ++ * bfq_bfqq_move - migrate @bfqq to @bfqg. ++ * @bfqd: queue descriptor. ++ * @bfqq: the queue to move. ++ * @entity: @bfqq's entity. ++ * @bfqg: the group to move to. ++ * ++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating ++ * it on the new one. Avoid putting the entity on the old group idle tree. ++ * ++ * Must be called under the queue lock; the cgroup owning @bfqg must ++ * not disappear (by now this just means that we are called under ++ * rcu_read_lock()). ++ */ ++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ struct bfq_entity *entity, struct bfq_group *bfqg) ++{ ++ int busy, resume; ++ ++ busy = bfq_bfqq_busy(bfqq); ++ resume = !RB_EMPTY_ROOT(&bfqq->sort_list); ++ ++ BUG_ON(resume && !entity->on_st); ++ BUG_ON(busy && !resume && entity->on_st && ++ bfqq != bfqd->in_service_queue); ++ ++ if (busy) { ++ BUG_ON(atomic_read(&bfqq->ref) < 2); ++ ++ if (!resume) ++ bfq_del_bfqq_busy(bfqd, bfqq, 0); ++ else ++ bfq_deactivate_bfqq(bfqd, bfqq, 0); ++ } else if (entity->on_st) ++ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); ++ ++ /* ++ * Here we use a reference to bfqg. We don't need a refcounter ++ * as the cgroup reference will not be dropped, so that its ++ * destroy() callback will not be invoked. ++ */ ++ entity->parent = bfqg->my_entity; ++ entity->sched_data = &bfqg->sched_data; ++ ++ if (busy && resume) ++ bfq_activate_bfqq(bfqd, bfqq); ++ ++ if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver) ++ bfq_schedule_dispatch(bfqd); ++} ++ ++/** ++ * __bfq_bic_change_cgroup - move @bic to @cgroup. ++ * @bfqd: the queue descriptor. ++ * @bic: the bic to move. ++ * @cgroup: the cgroup to move to. ++ * ++ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller ++ * has to make sure that the reference to cgroup is valid across the call. ++ * ++ * NOTE: an alternative approach might have been to store the current ++ * cgroup in bfqq and getting a reference to it, reducing the lookup ++ * time here, at the price of slightly more complex code. ++ */ ++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, ++ struct bfq_io_cq *bic, ++ struct cgroup *cgroup) ++{ ++ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); ++ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); ++ struct bfq_entity *entity; ++ struct bfq_group *bfqg; ++ struct bfqio_cgroup *bgrp; ++ ++ bgrp = cgroup_to_bfqio(cgroup); ++ ++ bfqg = bfq_find_alloc_group(bfqd, cgroup); ++ if (async_bfqq != NULL) { ++ entity = &async_bfqq->entity; ++ ++ if (entity->sched_data != &bfqg->sched_data) { ++ bic_set_bfqq(bic, NULL, 0); ++ bfq_log_bfqq(bfqd, async_bfqq, ++ "bic_change_group: %p %d", ++ async_bfqq, atomic_read(&async_bfqq->ref)); ++ bfq_put_queue(async_bfqq); ++ } ++ } ++ ++ if (sync_bfqq != NULL) { ++ entity = &sync_bfqq->entity; ++ if (entity->sched_data != &bfqg->sched_data) ++ bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg); ++ } ++ ++ return bfqg; ++} ++ ++/** ++ * bfq_bic_change_cgroup - move @bic to @cgroup. ++ * @bic: the bic being migrated. ++ * @cgroup: the destination cgroup. ++ * ++ * When the task owning @bic is moved to @cgroup, @bic is immediately ++ * moved into its new parent group. ++ */ ++static void bfq_bic_change_cgroup(struct bfq_io_cq *bic, ++ struct cgroup *cgroup) ++{ ++ struct bfq_data *bfqd; ++ unsigned long uninitialized_var(flags); ++ ++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), ++ &flags); ++ if (bfqd != NULL) { ++ __bfq_bic_change_cgroup(bfqd, bic, cgroup); ++ bfq_put_bfqd_unlock(bfqd, &flags); ++ } ++} ++ ++/** ++ * bfq_bic_update_cgroup - update the cgroup of @bic. ++ * @bic: the @bic to update. ++ * ++ * Make sure that @bic is enqueued in the cgroup of the current task. ++ * We need this in addition to moving bics during the cgroup attach ++ * phase because the task owning @bic could be at its first disk ++ * access or we may end up in the root cgroup as the result of a ++ * memory allocation failure and here we try to move to the right ++ * group. ++ * ++ * Must be called under the queue lock. It is safe to use the returned ++ * value even after the rcu_read_unlock() as the migration/destruction ++ * paths act under the queue lock too. IOW it is impossible to race with ++ * group migration/destruction and end up with an invalid group as: ++ * a) here cgroup has not yet been destroyed, nor its destroy callback ++ * has started execution, as current holds a reference to it, ++ * b) if it is destroyed after rcu_read_unlock() [after current is ++ * migrated to a different cgroup] its attach() callback will have ++ * taken care of remove all the references to the old cgroup data. ++ */ ++static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic) ++{ ++ struct bfq_data *bfqd = bic_to_bfqd(bic); ++ struct bfq_group *bfqg; ++ struct cgroup *cgroup; ++ ++ BUG_ON(bfqd == NULL); ++ ++ rcu_read_lock(); ++ cgroup = task_cgroup(current, bfqio_subsys_id); ++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, cgroup); ++ rcu_read_unlock(); ++ ++ return bfqg; ++} ++ ++/** ++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. ++ * @st: the service tree being flushed. ++ */ ++static inline void bfq_flush_idle_tree(struct bfq_service_tree *st) ++{ ++ struct bfq_entity *entity = st->first_idle; ++ ++ for (; entity != NULL; entity = st->first_idle) ++ __bfq_deactivate_entity(entity, 0); ++} ++ ++/** ++ * bfq_reparent_leaf_entity - move leaf entity to the root_group. ++ * @bfqd: the device data structure with the root group. ++ * @entity: the entity to move. ++ */ ++static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd, ++ struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ ++ BUG_ON(bfqq == NULL); ++ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group); ++ return; ++} ++ ++/** ++ * bfq_reparent_active_entities - move to the root group all active entities. ++ * @bfqd: the device data structure with the root group. ++ * @bfqg: the group to move from. ++ * @st: the service tree with the entities. ++ * ++ * Needs queue_lock to be taken and reference to be valid over the call. ++ */ ++static inline void bfq_reparent_active_entities(struct bfq_data *bfqd, ++ struct bfq_group *bfqg, ++ struct bfq_service_tree *st) ++{ ++ struct rb_root *active = &st->active; ++ struct bfq_entity *entity = NULL; ++ ++ if (!RB_EMPTY_ROOT(&st->active)) ++ entity = bfq_entity_of(rb_first(active)); ++ ++ for (; entity != NULL; entity = bfq_entity_of(rb_first(active))) ++ bfq_reparent_leaf_entity(bfqd, entity); ++ ++ if (bfqg->sched_data.active_entity != NULL) ++ bfq_reparent_leaf_entity(bfqd, bfqg->sched_data.active_entity); ++ ++ return; ++} ++ ++/** ++ * bfq_destroy_group - destroy @bfqg. ++ * @bgrp: the bfqio_cgroup containing @bfqg. ++ * @bfqg: the group being destroyed. ++ * ++ * Destroy @bfqg, making sure that it is not referenced from its parent. ++ */ ++static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg) ++{ ++ struct bfq_data *bfqd; ++ struct bfq_service_tree *st; ++ struct bfq_entity *entity = bfqg->my_entity; ++ unsigned long uninitialized_var(flags); ++ int i; ++ ++ hlist_del(&bfqg->group_node); ++ ++ /* ++ * Empty all service_trees belonging to this group before deactivating ++ * the group itself. ++ */ ++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { ++ st = bfqg->sched_data.service_tree + i; ++ ++ /* ++ * The idle tree may still contain bfq_queues belonging ++ * to exited task because they never migrated to a different ++ * cgroup from the one being destroyed now. Noone else ++ * can access them so it's safe to act without any lock. ++ */ ++ bfq_flush_idle_tree(st); ++ ++ /* ++ * It may happen that some queues are still active ++ * (busy) upon group destruction (if the corresponding ++ * processes have been forced to terminate). We move ++ * all the leaf entities corresponding to these queues ++ * to the root_group. ++ * Also, it may happen that the group has an entity ++ * under service, which is disconnected from the active ++ * tree: it must be moved, too. ++ * There is no need to put the sync queues, as the ++ * scheduler has taken no reference. ++ */ ++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); ++ if (bfqd != NULL) { ++ bfq_reparent_active_entities(bfqd, bfqg, st); ++ bfq_put_bfqd_unlock(bfqd, &flags); ++ } ++ BUG_ON(!RB_EMPTY_ROOT(&st->active)); ++ BUG_ON(!RB_EMPTY_ROOT(&st->idle)); ++ } ++ BUG_ON(bfqg->sched_data.next_active != NULL); ++ BUG_ON(bfqg->sched_data.active_entity != NULL); ++ ++ /* ++ * We may race with device destruction, take extra care when ++ * dereferencing bfqg->bfqd. ++ */ ++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); ++ if (bfqd != NULL) { ++ hlist_del(&bfqg->bfqd_node); ++ __bfq_deactivate_entity(entity, 0); ++ bfq_put_async_queues(bfqd, bfqg); ++ bfq_put_bfqd_unlock(bfqd, &flags); ++ } ++ BUG_ON(entity->tree != NULL); ++ ++ /* ++ * No need to defer the kfree() to the end of the RCU grace ++ * period: we are called from the destroy() callback of our ++ * cgroup, so we can be sure that noone is a) still using ++ * this cgroup or b) doing lookups in it. ++ */ ++ kfree(bfqg); ++} ++ ++static void bfq_end_raising_async(struct bfq_data *bfqd) ++{ ++ struct hlist_node *tmp; ++ struct bfq_group *bfqg; ++ ++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) ++ bfq_end_raising_async_queues(bfqd, bfqg); ++ bfq_end_raising_async_queues(bfqd, bfqd->root_group); ++} ++ ++/** ++ * bfq_disconnect_groups - diconnect @bfqd from all its groups. ++ * @bfqd: the device descriptor being exited. ++ * ++ * When the device exits we just make sure that no lookup can return ++ * the now unused group structures. They will be deallocated on cgroup ++ * destruction. ++ */ ++static void bfq_disconnect_groups(struct bfq_data *bfqd) ++{ ++ struct hlist_node *tmp; ++ struct bfq_group *bfqg; ++ ++ bfq_log(bfqd, "disconnect_groups beginning"); ++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) { ++ hlist_del(&bfqg->bfqd_node); ++ ++ __bfq_deactivate_entity(bfqg->my_entity, 0); ++ ++ /* ++ * Don't remove from the group hash, just set an ++ * invalid key. No lookups can race with the ++ * assignment as bfqd is being destroyed; this ++ * implies also that new elements cannot be added ++ * to the list. ++ */ ++ rcu_assign_pointer(bfqg->bfqd, NULL); ++ ++ bfq_log(bfqd, "disconnect_groups: put async for group %p", ++ bfqg); ++ bfq_put_async_queues(bfqd, bfqg); ++ } ++} ++ ++static inline void bfq_free_root_group(struct bfq_data *bfqd) ++{ ++ struct bfqio_cgroup *bgrp = &bfqio_root_cgroup; ++ struct bfq_group *bfqg = bfqd->root_group; ++ ++ bfq_put_async_queues(bfqd, bfqg); ++ ++ spin_lock_irq(&bgrp->lock); ++ hlist_del_rcu(&bfqg->group_node); ++ spin_unlock_irq(&bgrp->lock); ++ ++ /* ++ * No need to synchronize_rcu() here: since the device is gone ++ * there cannot be any read-side access to its root_group. ++ */ ++ kfree(bfqg); ++} ++ ++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) ++{ ++ struct bfq_group *bfqg; ++ struct bfqio_cgroup *bgrp; ++ int i; ++ ++ bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node); ++ if (bfqg == NULL) ++ return NULL; ++ ++ bfqg->entity.parent = NULL; ++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) ++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; ++ ++ bgrp = &bfqio_root_cgroup; ++ spin_lock_irq(&bgrp->lock); ++ rcu_assign_pointer(bfqg->bfqd, bfqd); ++ hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data); ++ spin_unlock_irq(&bgrp->lock); ++ ++ return bfqg; ++} ++ ++#define SHOW_FUNCTION(__VAR) \ ++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \ ++ struct cftype *cftype) \ ++{ \ ++ struct bfqio_cgroup *bgrp; \ ++ u64 ret = -ENODEV; \ ++ \ ++ mutex_lock(&bfqio_mutex); \ ++ if (bfqio_is_removed(cgroup)) \ ++ goto out_unlock; \ ++ \ ++ bgrp = cgroup_to_bfqio(cgroup); \ ++ spin_lock_irq(&bgrp->lock); \ ++ ret = bgrp->__VAR; \ ++ spin_unlock_irq(&bgrp->lock); \ ++ \ ++out_unlock: \ ++ mutex_unlock(&bfqio_mutex); \ ++ return ret; \ ++} ++ ++SHOW_FUNCTION(weight); ++SHOW_FUNCTION(ioprio); ++SHOW_FUNCTION(ioprio_class); ++#undef SHOW_FUNCTION ++ ++#define STORE_FUNCTION(__VAR, __MIN, __MAX) \ ++static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \ ++ struct cftype *cftype, \ ++ u64 val) \ ++{ \ ++ struct bfqio_cgroup *bgrp; \ ++ struct bfq_group *bfqg; \ ++ int ret = -EINVAL; \ ++ \ ++ if (val < (__MIN) || val > (__MAX)) \ ++ return ret; \ ++ \ ++ ret = -ENODEV; \ ++ mutex_lock(&bfqio_mutex); \ ++ if (bfqio_is_removed(cgroup)) \ ++ goto out_unlock; \ ++ ret = 0; \ ++ \ ++ bgrp = cgroup_to_bfqio(cgroup); \ ++ \ ++ spin_lock_irq(&bgrp->lock); \ ++ bgrp->__VAR = (unsigned short)val; \ ++ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \ ++ /* \ ++ * Setting the ioprio_changed flag of the entity \ ++ * to 1 with new_##__VAR == ##__VAR would re-set \ ++ * the value of the weight to its ioprio mapping. \ ++ * Set the flag only if necessary. \ ++ */ \ ++ if ((unsigned short)val != bfqg->entity.new_##__VAR) { \ ++ bfqg->entity.new_##__VAR = (unsigned short)val; \ ++ smp_wmb(); \ ++ bfqg->entity.ioprio_changed = 1; \ ++ } \ ++ } \ ++ spin_unlock_irq(&bgrp->lock); \ ++ \ ++out_unlock: \ ++ mutex_unlock(&bfqio_mutex); \ ++ return ret; \ ++} ++ ++STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT); ++STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1); ++STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE); ++#undef STORE_FUNCTION ++ ++static struct cftype bfqio_files[] = { ++ { ++ .name = "weight", ++ .read_u64 = bfqio_cgroup_weight_read, ++ .write_u64 = bfqio_cgroup_weight_write, ++ }, ++ { ++ .name = "ioprio", ++ .read_u64 = bfqio_cgroup_ioprio_read, ++ .write_u64 = bfqio_cgroup_ioprio_write, ++ }, ++ { ++ .name = "ioprio_class", ++ .read_u64 = bfqio_cgroup_ioprio_class_read, ++ .write_u64 = bfqio_cgroup_ioprio_class_write, ++ }, ++ { }, /* terminate */ ++}; ++ ++static struct cgroup_subsys_state *bfqio_create(struct cgroup *cgroup) ++{ ++ struct bfqio_cgroup *bgrp; ++ ++ if (cgroup->parent != NULL) { ++ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL); ++ if (bgrp == NULL) ++ return ERR_PTR(-ENOMEM); ++ } else ++ bgrp = &bfqio_root_cgroup; ++ ++ spin_lock_init(&bgrp->lock); ++ INIT_HLIST_HEAD(&bgrp->group_data); ++ bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO; ++ bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS; ++ ++ return &bgrp->css; ++} ++ ++/* ++ * We cannot support shared io contexts, as we have no means to support ++ * two tasks with the same ioc in two different groups without major rework ++ * of the main bic/bfqq data structures. By now we allow a task to change ++ * its cgroup only if it's the only owner of its ioc; the drawback of this ++ * behavior is that a group containing a task that forked using CLONE_IO ++ * will not be destroyed until the tasks sharing the ioc die. ++ */ ++static int bfqio_can_attach(struct cgroup *cgroup, struct cgroup_taskset *tset) ++{ ++ struct task_struct *task; ++ struct io_context *ioc; ++ int ret = 0; ++ ++ cgroup_taskset_for_each(task, cgroup, tset) { ++ /* task_lock() is needed to avoid races with exit_io_context() */ ++ task_lock(task); ++ ioc = task->io_context; ++ if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1) ++ /* ++ * ioc == NULL means that the task is either too young or ++ * exiting: if it has still no ioc the ioc can't be shared, ++ * if the task is exiting the attach will fail anyway, no ++ * matter what we return here. ++ */ ++ ret = -EINVAL; ++ task_unlock(task); ++ if (ret) ++ break; ++ } ++ ++ return ret; ++} ++ ++static void bfqio_attach(struct cgroup *cgroup, struct cgroup_taskset *tset) ++{ ++ struct task_struct *task; ++ struct io_context *ioc; ++ struct io_cq *icq; ++ ++ /* ++ * IMPORTANT NOTE: The move of more than one process at a time to a ++ * new group has not yet been tested. ++ */ ++ cgroup_taskset_for_each(task, cgroup, tset) { ++ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); ++ if (ioc) { ++ /* ++ * Handle cgroup change here. ++ */ ++ rcu_read_lock(); ++ hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node) ++ if (!strncmp( ++ icq->q->elevator->type->elevator_name, ++ "bfq", ELV_NAME_MAX)) ++ bfq_bic_change_cgroup(icq_to_bic(icq), ++ cgroup); ++ rcu_read_unlock(); ++ put_io_context(ioc); ++ } ++ } ++} ++ ++static void bfqio_destroy(struct cgroup *cgroup) ++{ ++ struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); ++ struct hlist_node *tmp; ++ struct bfq_group *bfqg; ++ ++ /* ++ * Since we are destroying the cgroup, there are no more tasks ++ * referencing it, and all the RCU grace periods that may have ++ * referenced it are ended (as the destruction of the parent ++ * cgroup is RCU-safe); bgrp->group_data will not be accessed by ++ * anything else and we don't need any synchronization. ++ */ ++ hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node) ++ bfq_destroy_group(bgrp, bfqg); ++ ++ BUG_ON(!hlist_empty(&bgrp->group_data)); ++ ++ kfree(bgrp); ++} ++ ++struct cgroup_subsys bfqio_subsys = { ++ .name = "bfqio", ++ .css_alloc = bfqio_create, ++ .can_attach = bfqio_can_attach, ++ .attach = bfqio_attach, ++ .css_free = bfqio_destroy, ++ .subsys_id = bfqio_subsys_id, ++ .base_cftypes = bfqio_files, ++}; ++#else ++static inline void bfq_init_entity(struct bfq_entity *entity, ++ struct bfq_group *bfqg) ++{ ++ entity->weight = entity->new_weight; ++ entity->orig_weight = entity->new_weight; ++ entity->ioprio = entity->new_ioprio; ++ entity->ioprio_class = entity->new_ioprio_class; ++ entity->sched_data = &bfqg->sched_data; ++} ++ ++static inline struct bfq_group * ++bfq_bic_update_cgroup(struct bfq_io_cq *bic) ++{ ++ struct bfq_data *bfqd = bic_to_bfqd(bic); ++ return bfqd->root_group; ++} ++ ++static inline void bfq_bfqq_move(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq, ++ struct bfq_entity *entity, ++ struct bfq_group *bfqg) ++{ ++} ++ ++static void bfq_end_raising_async(struct bfq_data *bfqd) ++{ ++ bfq_end_raising_async_queues(bfqd, bfqd->root_group); ++} ++ ++static inline void bfq_disconnect_groups(struct bfq_data *bfqd) ++{ ++ bfq_put_async_queues(bfqd, bfqd->root_group); ++} ++ ++static inline void bfq_free_root_group(struct bfq_data *bfqd) ++{ ++ kfree(bfqd->root_group); ++} ++ ++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) ++{ ++ struct bfq_group *bfqg; ++ int i; ++ ++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); ++ if (bfqg == NULL) ++ return NULL; ++ ++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) ++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; ++ ++ return bfqg; ++} ++#endif +diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c +new file mode 100644 +index 0000000..7f6b000 +--- /dev/null ++++ b/block/bfq-ioc.c +@@ -0,0 +1,36 @@ ++/* ++ * BFQ: I/O context handling. ++ * ++ * Based on ideas and code from CFQ: ++ * Copyright (C) 2003 Jens Axboe ++ * ++ * Copyright (C) 2008 Fabio Checconi ++ * Paolo Valente ++ * ++ * Copyright (C) 2010 Paolo Valente ++ */ ++ ++/** ++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq. ++ * @icq: the iocontext queue. ++ */ ++static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq) ++{ ++ /* bic->icq is the first member, %NULL will convert to %NULL */ ++ return container_of(icq, struct bfq_io_cq, icq); ++} ++ ++/** ++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. ++ * @bfqd: the lookup key. ++ * @ioc: the io_context of the process doing I/O. ++ * ++ * Queue lock must be held. ++ */ ++static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, ++ struct io_context *ioc) ++{ ++ if (ioc) ++ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue)); ++ return NULL; ++} +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +new file mode 100644 +index 0000000..96abb81 +--- /dev/null ++++ b/block/bfq-iosched.c +@@ -0,0 +1,3256 @@ ++/* ++ * BFQ, or Budget Fair Queueing, disk scheduler. ++ * ++ * Based on ideas and code from CFQ: ++ * Copyright (C) 2003 Jens Axboe ++ * ++ * Copyright (C) 2008 Fabio Checconi ++ * Paolo Valente ++ * ++ * Copyright (C) 2010 Paolo Valente ++ * ++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file. ++ * ++ * BFQ is a proportional share disk scheduling algorithm based on the ++ * slice-by-slice service scheme of CFQ. But BFQ assigns budgets, measured in ++ * number of sectors, to tasks instead of time slices. The disk is not granted ++ * to the in-service task for a given time slice, but until it has exahusted ++ * its assigned budget. This change from the time to the service domain allows ++ * BFQ to distribute the disk bandwidth among tasks as desired, without any ++ * distortion due to ZBR, workload fluctuations or other factors. BFQ uses an ++ * ad hoc internal scheduler, called B-WF2Q+, to schedule tasks according to ++ * their budgets (more precisely BFQ schedules queues associated to tasks). ++ * Thanks to this accurate scheduler, BFQ can afford to assign high budgets to ++ * disk-bound non-seeky tasks (to boost the throughput), and yet guarantee low ++ * latencies to interactive and soft real-time applications. ++ * ++ * BFQ is described in [1], where also a reference to the initial, more ++ * theoretical paper on BFQ can be found. The interested reader can find in ++ * the latter paper full details on the main algorithm as well as formulas of ++ * the guarantees, plus formal proofs of all the properties. With respect to ++ * the version of BFQ presented in these papers, this implementation adds a ++ * few more heuristics, such as the one that guarantees a low latency to soft ++ * real-time applications, and a hierarchical extension based on H-WF2Q+. ++ * ++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with ++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) ++ * complexity derives from the one introduced with EEVDF in [3]. ++ * ++ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness ++ * with the BFQ Disk I/O Scheduler'', ++ * Proceedings of the 5th Annual International Systems and Storage ++ * Conference (SYSTOR '12), June 2012. ++ * ++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf ++ * ++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing ++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, ++ * Oct 1997. ++ * ++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz ++ * ++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline ++ * First: A Flexible and Accurate Mechanism for Proportional Share ++ * Resource Allocation,'' technical report. ++ * ++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "bfq.h" ++#include "blk.h" ++ ++/* Max number of dispatches in one round of service. */ ++static const int bfq_quantum = 4; ++ ++/* Expiration time of sync (0) and async (1) requests, in jiffies. */ ++static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; ++ ++/* Maximum backwards seek, in KiB. */ ++static const int bfq_back_max = 16 * 1024; ++ ++/* Penalty of a backwards seek, in number of sectors. */ ++static const int bfq_back_penalty = 2; ++ ++/* Idling period duration, in jiffies. */ ++static int bfq_slice_idle = HZ / 125; ++ ++/* Default maximum budget values, in sectors and number of requests. */ ++static const int bfq_default_max_budget = 16 * 1024; ++static const int bfq_max_budget_async_rq = 4; ++ ++/* ++ * Async to sync throughput distribution is controlled as follows: ++ * when an async request is served, the entity is charged the number ++ * of sectors of the request, multipled by the factor below ++ */ ++static const int bfq_async_charge_factor = 10; ++ ++/* Default timeout values, in jiffies, approximating CFQ defaults. */ ++static const int bfq_timeout_sync = HZ / 8; ++static int bfq_timeout_async = HZ / 25; ++ ++struct kmem_cache *bfq_pool; ++ ++/* Below this threshold (in ms), we consider thinktime immediate. */ ++#define BFQ_MIN_TT 2 ++ ++/* hw_tag detection: parallel requests threshold and min samples needed. */ ++#define BFQ_HW_QUEUE_THRESHOLD 4 ++#define BFQ_HW_QUEUE_SAMPLES 32 ++ ++#define BFQQ_SEEK_THR (sector_t)(8 * 1024) ++#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR) ++ ++/* Min samples used for peak rate estimation (for autotuning). */ ++#define BFQ_PEAK_RATE_SAMPLES 32 ++ ++/* Shift used for peak rate fixed precision calculations. */ ++#define BFQ_RATE_SHIFT 16 ++ ++/* ++ * The duration of the weight raising for interactive applications is ++ * computed automatically (as default behaviour), using the following ++ * formula: duration = (R / r) * T, where r is the peak rate of the ++ * disk, and R and T are two reference parameters. In particular, R is ++ * the peak rate of a reference disk, and T is about the maximum time ++ * for starting popular large applications on that disk, under BFQ and ++ * while reading two files in parallel. Finally, BFQ uses two ++ * different pairs (R, T) depending on whether the disk is rotational ++ * or non-rotational. ++ */ ++#define T_rot (msecs_to_jiffies(5500)) ++#define T_nonrot (msecs_to_jiffies(2000)) ++/* Next two quantities are in sectors/usec, left-shifted by BFQ_RATE_SHIFT */ ++#define R_rot 17415 ++#define R_nonrot 34791 ++ ++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ ++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) ++ ++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) ++#define RQ_BFQQ(rq) ((rq)->elv.priv[1]) ++ ++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd); ++ ++#include "bfq-ioc.c" ++#include "bfq-sched.c" ++#include "bfq-cgroup.c" ++ ++#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\ ++ IOPRIO_CLASS_IDLE) ++#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\ ++ IOPRIO_CLASS_RT) ++ ++#define bfq_sample_valid(samples) ((samples) > 80) ++ ++/* ++ * We regard a request as SYNC, if either it's a read or has the SYNC bit ++ * set (in which case it could also be a direct WRITE). ++ */ ++static inline int bfq_bio_sync(struct bio *bio) ++{ ++ if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC)) ++ return 1; ++ ++ return 0; ++} ++ ++/* ++ * Scheduler run of queue, if there are requests pending and no one in the ++ * driver that will restart queueing. ++ */ ++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd) ++{ ++ if (bfqd->queued != 0) { ++ bfq_log(bfqd, "schedule dispatch"); ++ kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work); ++ } ++} ++ ++/* ++ * Lifted from AS - choose which of rq1 and rq2 that is best served now. ++ * We choose the request that is closesr to the head right now. Distance ++ * behind the head is penalized and only allowed to a certain extent. ++ */ ++static struct request *bfq_choose_req(struct bfq_data *bfqd, ++ struct request *rq1, ++ struct request *rq2, ++ sector_t last) ++{ ++ sector_t s1, s2, d1 = 0, d2 = 0; ++ unsigned long back_max; ++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ ++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ ++ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ ++ ++ if (rq1 == NULL || rq1 == rq2) ++ return rq2; ++ if (rq2 == NULL) ++ return rq1; ++ ++ if (rq_is_sync(rq1) && !rq_is_sync(rq2)) ++ return rq1; ++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) ++ return rq2; ++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) ++ return rq1; ++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) ++ return rq2; ++ ++ s1 = blk_rq_pos(rq1); ++ s2 = blk_rq_pos(rq2); ++ ++ /* ++ * By definition, 1KiB is 2 sectors. ++ */ ++ back_max = bfqd->bfq_back_max * 2; ++ ++ /* ++ * Strict one way elevator _except_ in the case where we allow ++ * short backward seeks which are biased as twice the cost of a ++ * similar forward seek. ++ */ ++ if (s1 >= last) ++ d1 = s1 - last; ++ else if (s1 + back_max >= last) ++ d1 = (last - s1) * bfqd->bfq_back_penalty; ++ else ++ wrap |= BFQ_RQ1_WRAP; ++ ++ if (s2 >= last) ++ d2 = s2 - last; ++ else if (s2 + back_max >= last) ++ d2 = (last - s2) * bfqd->bfq_back_penalty; ++ else ++ wrap |= BFQ_RQ2_WRAP; ++ ++ /* Found required data */ ++ ++ /* ++ * By doing switch() on the bit mask "wrap" we avoid having to ++ * check two variables for all permutations: --> faster! ++ */ ++ switch (wrap) { ++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ ++ if (d1 < d2) ++ return rq1; ++ else if (d2 < d1) ++ return rq2; ++ else { ++ if (s1 >= s2) ++ return rq1; ++ else ++ return rq2; ++ } ++ ++ case BFQ_RQ2_WRAP: ++ return rq1; ++ case BFQ_RQ1_WRAP: ++ return rq2; ++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ ++ default: ++ /* ++ * Since both rqs are wrapped, ++ * start with the one that's further behind head ++ * (--> only *one* back seek required), ++ * since back seek takes more time than forward. ++ */ ++ if (s1 <= s2) ++ return rq1; ++ else ++ return rq2; ++ } ++} ++ ++static struct bfq_queue * ++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, ++ sector_t sector, struct rb_node **ret_parent, ++ struct rb_node ***rb_link) ++{ ++ struct rb_node **p, *parent; ++ struct bfq_queue *bfqq = NULL; ++ ++ parent = NULL; ++ p = &root->rb_node; ++ while (*p) { ++ struct rb_node **n; ++ ++ parent = *p; ++ bfqq = rb_entry(parent, struct bfq_queue, pos_node); ++ ++ /* ++ * Sort strictly based on sector. Smallest to the left, ++ * largest to the right. ++ */ ++ if (sector > blk_rq_pos(bfqq->next_rq)) ++ n = &(*p)->rb_right; ++ else if (sector < blk_rq_pos(bfqq->next_rq)) ++ n = &(*p)->rb_left; ++ else ++ break; ++ p = n; ++ bfqq = NULL; ++ } ++ ++ *ret_parent = parent; ++ if (rb_link) ++ *rb_link = p; ++ ++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", ++ (long long unsigned)sector, ++ bfqq != NULL ? bfqq->pid : 0); ++ ++ return bfqq; ++} ++ ++static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq) ++{ ++ struct rb_node **p, *parent; ++ struct bfq_queue *__bfqq; ++ ++ if (bfqq->pos_root != NULL) { ++ rb_erase(&bfqq->pos_node, bfqq->pos_root); ++ bfqq->pos_root = NULL; ++ } ++ ++ if (bfq_class_idle(bfqq)) ++ return; ++ if (!bfqq->next_rq) ++ return; ++ ++ bfqq->pos_root = &bfqd->rq_pos_tree; ++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, ++ blk_rq_pos(bfqq->next_rq), &parent, &p); ++ if (__bfqq == NULL) { ++ rb_link_node(&bfqq->pos_node, parent, p); ++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root); ++ } else ++ bfqq->pos_root = NULL; ++} ++ ++static struct request *bfq_find_next_rq(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq, ++ struct request *last) ++{ ++ struct rb_node *rbnext = rb_next(&last->rb_node); ++ struct rb_node *rbprev = rb_prev(&last->rb_node); ++ struct request *next = NULL, *prev = NULL; ++ ++ BUG_ON(RB_EMPTY_NODE(&last->rb_node)); ++ ++ if (rbprev != NULL) ++ prev = rb_entry_rq(rbprev); ++ ++ if (rbnext != NULL) ++ next = rb_entry_rq(rbnext); ++ else { ++ rbnext = rb_first(&bfqq->sort_list); ++ if (rbnext && rbnext != &last->rb_node) ++ next = rb_entry_rq(rbnext); ++ } ++ ++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); ++} ++ ++static void bfq_del_rq_rb(struct request *rq) ++{ ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ struct bfq_data *bfqd = bfqq->bfqd; ++ const int sync = rq_is_sync(rq); ++ ++ BUG_ON(bfqq->queued[sync] == 0); ++ bfqq->queued[sync]--; ++ bfqd->queued--; ++ ++ elv_rb_del(&bfqq->sort_list, rq); ++ ++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { ++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) ++ bfq_del_bfqq_busy(bfqd, bfqq, 1); ++ /* ++ * Remove queue from request-position tree as it is empty. ++ */ ++ if (bfqq->pos_root != NULL) { ++ rb_erase(&bfqq->pos_node, bfqq->pos_root); ++ bfqq->pos_root = NULL; ++ } ++ } ++} ++ ++/* see the definition of bfq_async_charge_factor for details */ ++static inline unsigned long bfq_serv_to_charge(struct request *rq, ++ struct bfq_queue *bfqq) ++{ ++ return blk_rq_sectors(rq) * ++ (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->raising_coeff == 1) * ++ bfq_async_charge_factor)); ++} ++ ++/** ++ * bfq_updated_next_req - update the queue after a new next_rq selection. ++ * @bfqd: the device data the queue belongs to. ++ * @bfqq: the queue to update. ++ * ++ * If the first request of a queue changes we make sure that the queue ++ * has enough budget to serve at least its first request (if the ++ * request has grown). We do this because if the queue has not enough ++ * budget for its first request, it has to go through two dispatch ++ * rounds to actually get it dispatched. ++ */ ++static void bfq_updated_next_req(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ struct bfq_entity *entity = &bfqq->entity; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ struct request *next_rq = bfqq->next_rq; ++ unsigned long new_budget; ++ ++ if (next_rq == NULL) ++ return; ++ ++ if (bfqq == bfqd->in_service_queue) ++ /* ++ * In order not to break guarantees, budgets cannot be ++ * changed after an entity has been selected. ++ */ ++ return; ++ ++ BUG_ON(entity->tree != &st->active); ++ BUG_ON(entity == entity->sched_data->active_entity); ++ ++ new_budget = max_t(unsigned long, bfqq->max_budget, ++ bfq_serv_to_charge(next_rq, bfqq)); ++ entity->budget = new_budget; ++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget); ++ bfq_activate_bfqq(bfqd, bfqq); ++} ++ ++static inline unsigned int bfq_wrais_duration(struct bfq_data *bfqd) ++{ ++ u64 dur; ++ ++ if (bfqd->bfq_raising_max_time > 0) ++ return bfqd->bfq_raising_max_time; ++ ++ dur = bfqd->RT_prod; ++ do_div(dur, bfqd->peak_rate); ++ ++ return dur; ++} ++ ++static void bfq_add_rq_rb(struct request *rq) ++{ ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ struct bfq_entity *entity = &bfqq->entity; ++ struct bfq_data *bfqd = bfqq->bfqd; ++ struct request *next_rq, *prev; ++ unsigned long old_raising_coeff = bfqq->raising_coeff; ++ int idle_for_long_time = 0; ++ ++ bfq_log_bfqq(bfqd, bfqq, "add_rq_rb %d", rq_is_sync(rq)); ++ bfqq->queued[rq_is_sync(rq)]++; ++ bfqd->queued++; ++ ++ elv_rb_add(&bfqq->sort_list, rq); ++ ++ /* ++ * Check if this request is a better next-serve candidate. ++ */ ++ prev = bfqq->next_rq; ++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); ++ BUG_ON(next_rq == NULL); ++ bfqq->next_rq = next_rq; ++ ++ /* ++ * Adjust priority tree position, if next_rq changes. ++ */ ++ if (prev != bfqq->next_rq) ++ bfq_rq_pos_tree_add(bfqd, bfqq); ++ ++ if (!bfq_bfqq_busy(bfqq)) { ++ int soft_rt = bfqd->bfq_raising_max_softrt_rate > 0 && ++ time_is_before_jiffies(bfqq->soft_rt_next_start); ++ idle_for_long_time = time_is_before_jiffies( ++ bfqq->budget_timeout + ++ bfqd->bfq_raising_min_idle_time); ++ entity->budget = max_t(unsigned long, bfqq->max_budget, ++ bfq_serv_to_charge(next_rq, bfqq)); ++ ++ if (!bfqd->low_latency) ++ goto add_bfqq_busy; ++ ++ /* ++ * If the queue is not being boosted and has been idle ++ * for enough time, start a weight-raising period ++ */ ++ if (old_raising_coeff == 1 && ++ (idle_for_long_time || soft_rt)) { ++ bfqq->raising_coeff = bfqd->bfq_raising_coeff; ++ if (idle_for_long_time) ++ bfqq->raising_cur_max_time = ++ bfq_wrais_duration(bfqd); ++ else ++ bfqq->raising_cur_max_time = ++ bfqd->bfq_raising_rt_max_time; ++ bfq_log_bfqq(bfqd, bfqq, ++ "wrais starting at %llu msec," ++ "rais_max_time %u", ++ bfqq->last_rais_start_finish, ++ jiffies_to_msecs(bfqq-> ++ raising_cur_max_time)); ++ } else if (old_raising_coeff > 1) { ++ if (idle_for_long_time) ++ bfqq->raising_cur_max_time = ++ bfq_wrais_duration(bfqd); ++ else if (bfqq->raising_cur_max_time == ++ bfqd->bfq_raising_rt_max_time && ++ !soft_rt) { ++ bfqq->raising_coeff = 1; ++ bfq_log_bfqq(bfqd, bfqq, ++ "wrais ending at %llu msec," ++ "rais_max_time %u", ++ bfqq->last_rais_start_finish, ++ jiffies_to_msecs(bfqq-> ++ raising_cur_max_time)); ++ } else if ((bfqq->last_rais_start_finish + ++ bfqq->raising_cur_max_time < ++ jiffies + bfqd->bfq_raising_rt_max_time) && ++ soft_rt) { ++ /* ++ * ++ * The remaining weight-raising time is lower ++ * than bfqd->bfq_raising_rt_max_time, which ++ * means that the application is enjoying ++ * weight raising either because deemed soft rt ++ * in the near past, or because deemed ++ * interactive a long ago. In both cases, ++ * resetting now the current remaining weight- ++ * raising time for the application to the ++ * weight-raising duration for soft rt ++ * applications would not cause any latency ++ * increase for the application (as the new ++ * duration would be higher than the remaining ++ * time). ++ * ++ * In addition, the application is now meeting ++ * the requirements for being deemed soft rt. ++ * In the end we can correctly and safely ++ * (re)charge the weight-raising duration for ++ * the application with the weight-raising ++ * duration for soft rt applications. ++ * ++ * In particular, doing this recharge now, i.e., ++ * before the weight-raising period for the ++ * application finishes, reduces the probability ++ * of the following negative scenario: ++ * 1) the weight of a soft rt application is ++ * raised at startup (as for any newly ++ * created application), ++ * 2) since the application is not interactive, ++ * at a certain time weight-raising is ++ * stopped for the application, ++ * 3) at that time the application happens to ++ * still have pending requests, and hence ++ * is destined to not have a chance to be ++ * deemed soft rt before these requests are ++ * completed (see the comments to the ++ * function bfq_bfqq_softrt_next_start() ++ * for details on soft rt detection), ++ * 4) these pending requests experience a high ++ * latency because the application is not ++ * weight-raised while they are pending. ++ */ ++ bfqq->last_rais_start_finish = jiffies; ++ bfqq->raising_cur_max_time = ++ bfqd->bfq_raising_rt_max_time; ++ } ++ } ++ if (old_raising_coeff != bfqq->raising_coeff) ++ entity->ioprio_changed = 1; ++add_bfqq_busy: ++ bfqq->last_idle_bklogged = jiffies; ++ bfqq->service_from_backlogged = 0; ++ bfq_clear_bfqq_softrt_update(bfqq); ++ bfq_add_bfqq_busy(bfqd, bfqq); ++ } else { ++ if (bfqd->low_latency && old_raising_coeff == 1 && ++ !rq_is_sync(rq) && ++ bfqq->last_rais_start_finish + ++ time_is_before_jiffies( ++ bfqd->bfq_raising_min_inter_arr_async)) { ++ bfqq->raising_coeff = bfqd->bfq_raising_coeff; ++ bfqq->raising_cur_max_time = bfq_wrais_duration(bfqd); ++ ++ bfqd->raised_busy_queues++; ++ entity->ioprio_changed = 1; ++ bfq_log_bfqq(bfqd, bfqq, ++ "non-idle wrais starting at %llu msec," ++ "rais_max_time %u", ++ bfqq->last_rais_start_finish, ++ jiffies_to_msecs(bfqq-> ++ raising_cur_max_time)); ++ } ++ bfq_updated_next_req(bfqd, bfqq); ++ } ++ ++ if (bfqd->low_latency && ++ (old_raising_coeff == 1 || bfqq->raising_coeff == 1 || ++ idle_for_long_time)) ++ bfqq->last_rais_start_finish = jiffies; ++} ++ ++static void bfq_reposition_rq_rb(struct bfq_queue *bfqq, struct request *rq) ++{ ++ elv_rb_del(&bfqq->sort_list, rq); ++ bfqq->queued[rq_is_sync(rq)]--; ++ bfqq->bfqd->queued--; ++ bfq_add_rq_rb(rq); ++} ++ ++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, ++ struct bio *bio) ++{ ++ struct task_struct *tsk = current; ++ struct bfq_io_cq *bic; ++ struct bfq_queue *bfqq; ++ ++ bic = bfq_bic_lookup(bfqd, tsk->io_context); ++ if (bic == NULL) ++ return NULL; ++ ++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); ++ if (bfqq != NULL) ++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); ++ ++ return NULL; ++} ++ ++static void bfq_activate_request(struct request_queue *q, struct request *rq) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ ++ bfqd->rq_in_driver++; ++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); ++ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", ++ (long long unsigned)bfqd->last_position); ++} ++ ++static void bfq_deactivate_request(struct request_queue *q, struct request *rq) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ ++ WARN_ON(bfqd->rq_in_driver == 0); ++ bfqd->rq_in_driver--; ++} ++ ++static void bfq_remove_request(struct request *rq) ++{ ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ struct bfq_data *bfqd = bfqq->bfqd; ++ ++ if (bfqq->next_rq == rq) { ++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); ++ bfq_updated_next_req(bfqd, bfqq); ++ } ++ ++ list_del_init(&rq->queuelist); ++ bfq_del_rq_rb(rq); ++ ++ if (rq->cmd_flags & REQ_META) { ++ WARN_ON(bfqq->meta_pending == 0); ++ bfqq->meta_pending--; ++ } ++} ++ ++static int bfq_merge(struct request_queue *q, struct request **req, ++ struct bio *bio) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct request *__rq; ++ ++ __rq = bfq_find_rq_fmerge(bfqd, bio); ++ if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) { ++ *req = __rq; ++ return ELEVATOR_FRONT_MERGE; ++ } ++ ++ return ELEVATOR_NO_MERGE; ++} ++ ++static void bfq_merged_request(struct request_queue *q, struct request *req, ++ int type) ++{ ++ if (type == ELEVATOR_FRONT_MERGE) { ++ struct bfq_queue *bfqq = RQ_BFQQ(req); ++ ++ bfq_reposition_rq_rb(bfqq, req); ++ } ++} ++ ++static void bfq_merged_requests(struct request_queue *q, struct request *rq, ++ struct request *next) ++{ ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ ++ /* ++ * Reposition in fifo if next is older than rq. ++ */ ++ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && ++ time_before(rq_fifo_time(next), rq_fifo_time(rq))) { ++ list_move(&rq->queuelist, &next->queuelist); ++ rq_set_fifo_time(rq, rq_fifo_time(next)); ++ } ++ ++ if (bfqq->next_rq == next) ++ bfqq->next_rq = rq; ++ ++ bfq_remove_request(next); ++} ++ ++/* Must be called with bfqq != NULL */ ++static inline void bfq_bfqq_end_raising(struct bfq_queue *bfqq) ++{ ++ BUG_ON(bfqq == NULL); ++ if (bfq_bfqq_busy(bfqq)) ++ bfqq->bfqd->raised_busy_queues--; ++ bfqq->raising_coeff = 1; ++ bfqq->raising_cur_max_time = 0; ++ /* Trigger a weight change on the next activation of the queue */ ++ bfqq->entity.ioprio_changed = 1; ++} ++ ++static void bfq_end_raising_async_queues(struct bfq_data *bfqd, ++ struct bfq_group *bfqg) ++{ ++ int i, j; ++ ++ for (i = 0; i < 2; i++) ++ for (j = 0; j < IOPRIO_BE_NR; j++) ++ if (bfqg->async_bfqq[i][j] != NULL) ++ bfq_bfqq_end_raising(bfqg->async_bfqq[i][j]); ++ if (bfqg->async_idle_bfqq != NULL) ++ bfq_bfqq_end_raising(bfqg->async_idle_bfqq); ++} ++ ++static void bfq_end_raising(struct bfq_data *bfqd) ++{ ++ struct bfq_queue *bfqq; ++ ++ spin_lock_irq(bfqd->queue->queue_lock); ++ ++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) ++ bfq_bfqq_end_raising(bfqq); ++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) ++ bfq_bfqq_end_raising(bfqq); ++ bfq_end_raising_async(bfqd); ++ ++ spin_unlock_irq(bfqd->queue->queue_lock); ++} ++ ++static int bfq_allow_merge(struct request_queue *q, struct request *rq, ++ struct bio *bio) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct bfq_io_cq *bic; ++ struct bfq_queue *bfqq; ++ ++ /* ++ * Disallow merge of a sync bio into an async request. ++ */ ++ if (bfq_bio_sync(bio) && !rq_is_sync(rq)) ++ return 0; ++ ++ /* ++ * Lookup the bfqq that this bio will be queued with. Allow ++ * merge only if rq is queued there. ++ * Queue lock is held here. ++ */ ++ bic = bfq_bic_lookup(bfqd, current->io_context); ++ if (bic == NULL) ++ return 0; ++ ++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); ++ return bfqq == RQ_BFQQ(rq); ++} ++ ++static void __bfq_set_in_service_queue(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ if (bfqq != NULL) { ++ bfq_mark_bfqq_must_alloc(bfqq); ++ bfq_mark_bfqq_budget_new(bfqq); ++ bfq_clear_bfqq_fifo_expire(bfqq); ++ ++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; ++ ++ bfq_log_bfqq(bfqd, bfqq, ++ "set_in_service_queue, cur-budget = %lu", ++ bfqq->entity.budget); ++ } ++ ++ bfqd->in_service_queue = bfqq; ++} ++ ++/* ++ * Get and set a new queue for service. ++ */ ++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ if (!bfqq) ++ bfqq = bfq_get_next_queue(bfqd); ++ else ++ bfq_get_next_queue_forced(bfqd, bfqq); ++ ++ __bfq_set_in_service_queue(bfqd, bfqq); ++ return bfqq; ++} ++ ++static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd, ++ struct request *rq) ++{ ++ if (blk_rq_pos(rq) >= bfqd->last_position) ++ return blk_rq_pos(rq) - bfqd->last_position; ++ else ++ return bfqd->last_position - blk_rq_pos(rq); ++} ++ ++/* ++ * Return true if bfqq has no request pending and rq is close enough to ++ * bfqd->last_position, or if rq is closer to bfqd->last_position than ++ * bfqq->next_rq ++ */ ++static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq) ++{ ++ return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR; ++} ++ ++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd) ++{ ++ struct rb_root *root = &bfqd->rq_pos_tree; ++ struct rb_node *parent, *node; ++ struct bfq_queue *__bfqq; ++ sector_t sector = bfqd->last_position; ++ ++ if (RB_EMPTY_ROOT(root)) ++ return NULL; ++ ++ /* ++ * First, if we find a request starting at the end of the last ++ * request, choose it. ++ */ ++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); ++ if (__bfqq != NULL) ++ return __bfqq; ++ ++ /* ++ * If the exact sector wasn't found, the parent of the NULL leaf ++ * will contain the closest sector (rq_pos_tree sorted by next_request ++ * position). ++ */ ++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node); ++ if (bfq_rq_close(bfqd, __bfqq->next_rq)) ++ return __bfqq; ++ ++ if (blk_rq_pos(__bfqq->next_rq) < sector) ++ node = rb_next(&__bfqq->pos_node); ++ else ++ node = rb_prev(&__bfqq->pos_node); ++ if (node == NULL) ++ return NULL; ++ ++ __bfqq = rb_entry(node, struct bfq_queue, pos_node); ++ if (bfq_rq_close(bfqd, __bfqq->next_rq)) ++ return __bfqq; ++ ++ return NULL; ++} ++ ++/* ++ * bfqd - obvious ++ * cur_bfqq - passed in so that we don't decide that the current queue ++ * is closely cooperating with itself. ++ * ++ * We are assuming that cur_bfqq has dispatched at least one request, ++ * and that bfqd->last_position reflects a position on the disk associated ++ * with the I/O issued by cur_bfqq. ++ */ ++static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd, ++ struct bfq_queue *cur_bfqq) ++{ ++ struct bfq_queue *bfqq; ++ ++ if (bfq_class_idle(cur_bfqq)) ++ return NULL; ++ if (!bfq_bfqq_sync(cur_bfqq)) ++ return NULL; ++ if (BFQQ_SEEKY(cur_bfqq)) ++ return NULL; ++ ++ /* If device has only one backlogged bfq_queue, don't search. */ ++ if (bfqd->busy_queues == 1) ++ return NULL; ++ ++ /* ++ * We should notice if some of the queues are cooperating, e.g. ++ * working closely on the same area of the disk. In that case, ++ * we can group them together and don't waste time idling. ++ */ ++ bfqq = bfqq_close(bfqd); ++ if (bfqq == NULL || bfqq == cur_bfqq) ++ return NULL; ++ ++ /* ++ * Do not merge queues from different bfq_groups. ++ */ ++ if (bfqq->entity.parent != cur_bfqq->entity.parent) ++ return NULL; ++ ++ /* ++ * It only makes sense to merge sync queues. ++ */ ++ if (!bfq_bfqq_sync(bfqq)) ++ return NULL; ++ if (BFQQ_SEEKY(bfqq)) ++ return NULL; ++ ++ /* ++ * Do not merge queues of different priority classes. ++ */ ++ if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq)) ++ return NULL; ++ ++ return bfqq; ++} ++ ++/* ++ * If enough samples have been computed, return the current max budget ++ * stored in bfqd, which is dynamically updated according to the ++ * estimated disk peak rate; otherwise return the default max budget ++ */ ++static inline unsigned long bfq_max_budget(struct bfq_data *bfqd) ++{ ++ if (bfqd->budgets_assigned < 194) ++ return bfq_default_max_budget; ++ else ++ return bfqd->bfq_max_budget; ++} ++ ++/* ++ * Return min budget, which is a fraction of the current or default ++ * max budget (trying with 1/32) ++ */ ++static inline unsigned long bfq_min_budget(struct bfq_data *bfqd) ++{ ++ if (bfqd->budgets_assigned < 194) ++ return bfq_default_max_budget / 32; ++ else ++ return bfqd->bfq_max_budget / 32; ++} ++ ++/* ++ * Decides whether idling should be done for given device and ++ * given in-service queue. ++ */ ++static inline bool bfq_queue_nonrot_noidle(struct bfq_data *bfqd, ++ struct bfq_queue *in_service_bfqq) ++{ ++ if (in_service_bfqq == NULL) ++ return false; ++ /* ++ * If device is SSD it has no seek penalty, disable idling; but ++ * do so only if: ++ * - device does not support queuing, otherwise we still have ++ * a problem with sync vs async workloads; ++ * - the queue is not weight-raised, to preserve guarantees. ++ */ ++ return (blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag && ++ in_service_bfqq->raising_coeff == 1); ++} ++ ++static void bfq_arm_slice_timer(struct bfq_data *bfqd) ++{ ++ struct bfq_queue *bfqq = bfqd->in_service_queue; ++ struct bfq_io_cq *bic; ++ unsigned long sl; ++ ++ WARN_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); ++ ++ /* Tasks have exited, don't wait. */ ++ bic = bfqd->in_service_bic; ++ if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0) ++ return; ++ ++ bfq_mark_bfqq_wait_request(bfqq); ++ ++ /* ++ * We don't want to idle for seeks, but we do want to allow ++ * fair distribution of slice time for a process doing back-to-back ++ * seeks. So allow a little bit of time for him to submit a new rq. ++ * ++ * To prevent processes with (partly) seeky workloads from ++ * being too ill-treated, grant them a small fraction of the ++ * assigned budget before reducing the waiting time to ++ * BFQ_MIN_TT. This happened to help reduce latency. ++ */ ++ sl = bfqd->bfq_slice_idle; ++ if (bfq_sample_valid(bfqq->seek_samples) && BFQQ_SEEKY(bfqq) && ++ bfqq->entity.service > bfq_max_budget(bfqd) / 8 && ++ bfqq->raising_coeff == 1) ++ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT)); ++ else if (bfqq->raising_coeff > 1) ++ sl = sl * 3; ++ bfqd->last_idling_start = ktime_get(); ++ mod_timer(&bfqd->idle_slice_timer, jiffies + sl); ++ bfq_log(bfqd, "arm idle: %u/%u ms", ++ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle)); ++} ++ ++/* ++ * Set the maximum time for the in-service queue to consume its ++ * budget. This prevents seeky processes from lowering the disk ++ * throughput (always guaranteed with a time slice scheme as in CFQ). ++ */ ++static void bfq_set_budget_timeout(struct bfq_data *bfqd) ++{ ++ struct bfq_queue *bfqq = bfqd->in_service_queue; ++ unsigned int timeout_coeff; ++ if (bfqq->raising_cur_max_time == bfqd->bfq_raising_rt_max_time) ++ timeout_coeff = 1; ++ else ++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; ++ ++ bfqd->last_budget_start = ktime_get(); ++ ++ bfq_clear_bfqq_budget_new(bfqq); ++ bfqq->budget_timeout = jiffies + ++ bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff; ++ ++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", ++ jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * ++ timeout_coeff)); ++} ++ ++/* ++ * Move request from internal lists to the request queue dispatch list. ++ */ ++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ ++ bfq_remove_request(rq); ++ bfqq->dispatched++; ++ elv_dispatch_sort(q, rq); ++ ++ if (bfq_bfqq_sync(bfqq)) ++ bfqd->sync_flight++; ++} ++ ++/* ++ * Return expired entry, or NULL to just start from scratch in rbtree. ++ */ ++static struct request *bfq_check_fifo(struct bfq_queue *bfqq) ++{ ++ struct request *rq = NULL; ++ ++ if (bfq_bfqq_fifo_expire(bfqq)) ++ return NULL; ++ ++ bfq_mark_bfqq_fifo_expire(bfqq); ++ ++ if (list_empty(&bfqq->fifo)) ++ return NULL; ++ ++ rq = rq_entry_fifo(bfqq->fifo.next); ++ ++ if (time_before(jiffies, rq_fifo_time(rq))) ++ return NULL; ++ ++ return rq; ++} ++ ++/* ++ * Must be called with the queue_lock held. ++ */ ++static int bfqq_process_refs(struct bfq_queue *bfqq) ++{ ++ int process_refs, io_refs; ++ ++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; ++ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st; ++ BUG_ON(process_refs < 0); ++ return process_refs; ++} ++ ++static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) ++{ ++ int process_refs, new_process_refs; ++ struct bfq_queue *__bfqq; ++ ++ /* ++ * If there are no process references on the new_bfqq, then it is ++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain ++ * may have dropped their last reference (not just their last process ++ * reference). ++ */ ++ if (!bfqq_process_refs(new_bfqq)) ++ return; ++ ++ /* Avoid a circular list and skip interim queue merges. */ ++ while ((__bfqq = new_bfqq->new_bfqq)) { ++ if (__bfqq == bfqq) ++ return; ++ new_bfqq = __bfqq; ++ } ++ ++ process_refs = bfqq_process_refs(bfqq); ++ new_process_refs = bfqq_process_refs(new_bfqq); ++ /* ++ * If the process for the bfqq has gone away, there is no ++ * sense in merging the queues. ++ */ ++ if (process_refs == 0 || new_process_refs == 0) ++ return; ++ ++ /* ++ * Merge in the direction of the lesser amount of work. ++ */ ++ if (new_process_refs >= process_refs) { ++ bfqq->new_bfqq = new_bfqq; ++ atomic_add(process_refs, &new_bfqq->ref); ++ } else { ++ new_bfqq->new_bfqq = bfqq; ++ atomic_add(new_process_refs, &bfqq->ref); ++ } ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", ++ new_bfqq->pid); ++} ++ ++static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq) ++{ ++ struct bfq_entity *entity = &bfqq->entity; ++ return entity->budget - entity->service; ++} ++ ++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) ++{ ++ BUG_ON(bfqq != bfqd->in_service_queue); ++ ++ __bfq_bfqd_reset_in_service(bfqd); ++ ++ /* ++ * If this bfqq is shared between multiple processes, check ++ * to make sure that those processes are still issuing I/Os ++ * within the mean seek distance. If not, it may be time to ++ * break the queues apart again. ++ */ ++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) ++ bfq_mark_bfqq_split_coop(bfqq); ++ ++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) { ++ /* ++ * overloading budget_timeout field to store when ++ * the queue remains with no backlog, used by ++ * the weight-raising mechanism ++ */ ++ bfqq->budget_timeout = jiffies; ++ bfq_del_bfqq_busy(bfqd, bfqq, 1); ++ } else { ++ bfq_activate_bfqq(bfqd, bfqq); ++ /* ++ * Resort priority tree of potential close cooperators. ++ */ ++ bfq_rq_pos_tree_add(bfqd, bfqq); ++ } ++} ++ ++/** ++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. ++ * @bfqd: device data. ++ * @bfqq: queue to update. ++ * @reason: reason for expiration. ++ * ++ * Handle the feedback on @bfqq budget. See the body for detailed ++ * comments. ++ */ ++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq, ++ enum bfqq_expiration reason) ++{ ++ struct request *next_rq; ++ unsigned long budget, min_budget; ++ ++ budget = bfqq->max_budget; ++ min_budget = bfq_min_budget(bfqd); ++ ++ BUG_ON(bfqq != bfqd->in_service_queue); ++ ++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu", ++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); ++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu", ++ budget, bfq_min_budget(bfqd)); ++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", ++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); ++ ++ if (bfq_bfqq_sync(bfqq)) { ++ switch (reason) { ++ /* ++ * Caveat: in all the following cases we trade latency ++ * for throughput. ++ */ ++ case BFQ_BFQQ_TOO_IDLE: ++ /* ++ * This is the only case where we may reduce ++ * the budget: if there is no requets of the ++ * process still waiting for completion, then ++ * we assume (tentatively) that the timer has ++ * expired because the batch of requests of ++ * the process could have been served with a ++ * smaller budget. Hence, betting that ++ * process will behave in the same way when it ++ * becomes backlogged again, we reduce its ++ * next budget. As long as we guess right, ++ * this budget cut reduces the latency ++ * experienced by the process. ++ * ++ * However, if there are still outstanding ++ * requests, then the process may have not yet ++ * issued its next request just because it is ++ * still waiting for the completion of some of ++ * the still oustanding ones. So in this ++ * subcase we do not reduce its budget, on the ++ * contrary we increase it to possibly boost ++ * the throughput, as discussed in the ++ * comments to the BUDGET_TIMEOUT case. ++ */ ++ if (bfqq->dispatched > 0) /* still oustanding reqs */ ++ budget = min(budget * 2, bfqd->bfq_max_budget); ++ else { ++ if (budget > 5 * min_budget) ++ budget -= 4 * min_budget; ++ else ++ budget = min_budget; ++ } ++ break; ++ case BFQ_BFQQ_BUDGET_TIMEOUT: ++ /* ++ * We double the budget here because: 1) it ++ * gives the chance to boost the throughput if ++ * this is not a seeky process (which may have ++ * bumped into this timeout because of, e.g., ++ * ZBR), 2) together with charge_full_budget ++ * it helps give seeky processes higher ++ * timestamps, and hence be served less ++ * frequently. ++ */ ++ budget = min(budget * 2, bfqd->bfq_max_budget); ++ break; ++ case BFQ_BFQQ_BUDGET_EXHAUSTED: ++ /* ++ * The process still has backlog, and did not ++ * let either the budget timeout or the disk ++ * idling timeout expire. Hence it is not ++ * seeky, has a short thinktime and may be ++ * happy with a higher budget too. So ++ * definitely increase the budget of this good ++ * candidate to boost the disk throughput. ++ */ ++ budget = min(budget * 4, bfqd->bfq_max_budget); ++ break; ++ case BFQ_BFQQ_NO_MORE_REQUESTS: ++ /* ++ * Leave the budget unchanged. ++ */ ++ default: ++ return; ++ } ++ } else /* async queue */ ++ /* async queues get always the maximum possible budget ++ * (their ability to dispatch is limited by ++ * @bfqd->bfq_max_budget_async_rq). ++ */ ++ budget = bfqd->bfq_max_budget; ++ ++ bfqq->max_budget = budget; ++ ++ if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 && ++ bfqq->max_budget > bfqd->bfq_max_budget) ++ bfqq->max_budget = bfqd->bfq_max_budget; ++ ++ /* ++ * Make sure that we have enough budget for the next request. ++ * Since the finish time of the bfqq must be kept in sync with ++ * the budget, be sure to call __bfq_bfqq_expire() after the ++ * update. ++ */ ++ next_rq = bfqq->next_rq; ++ if (next_rq != NULL) ++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, ++ bfq_serv_to_charge(next_rq, bfqq)); ++ else ++ bfqq->entity.budget = bfqq->max_budget; ++ ++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu", ++ next_rq != NULL ? blk_rq_sectors(next_rq) : 0, ++ bfqq->entity.budget); ++} ++ ++static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout) ++{ ++ unsigned long max_budget; ++ ++ /* ++ * The max_budget calculated when autotuning is equal to the ++ * amount of sectors transfered in timeout_sync at the ++ * estimated peak rate. ++ */ ++ max_budget = (unsigned long)(peak_rate * 1000 * ++ timeout >> BFQ_RATE_SHIFT); ++ ++ return max_budget; ++} ++ ++/* ++ * In addition to updating the peak rate, checks whether the process ++ * is "slow", and returns 1 if so. This slow flag is used, in addition ++ * to the budget timeout, to reduce the amount of service provided to ++ * seeky processes, and hence reduce their chances to lower the ++ * throughput. See the code for more details. ++ */ ++static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ int compensate, enum bfqq_expiration reason) ++{ ++ u64 bw, usecs, expected, timeout; ++ ktime_t delta; ++ int update = 0; ++ ++ if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq)) ++ return 0; ++ ++ if (compensate) ++ delta = bfqd->last_idling_start; ++ else ++ delta = ktime_get(); ++ delta = ktime_sub(delta, bfqd->last_budget_start); ++ usecs = ktime_to_us(delta); ++ ++ /* Don't trust short/unrealistic values. */ ++ if (usecs < 100 || usecs >= LONG_MAX) ++ return 0; ++ ++ /* ++ * Calculate the bandwidth for the last slice. We use a 64 bit ++ * value to store the peak rate, in sectors per usec in fixed ++ * point math. We do so to have enough precision in the estimate ++ * and to avoid overflows. ++ */ ++ bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT; ++ do_div(bw, (unsigned long)usecs); ++ ++ timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); ++ ++ /* ++ * Use only long (> 20ms) intervals to filter out spikes for ++ * the peak rate estimation. ++ */ ++ if (usecs > 20000) { ++ if (bw > bfqd->peak_rate || ++ (!BFQQ_SEEKY(bfqq) && ++ reason == BFQ_BFQQ_BUDGET_TIMEOUT)) { ++ bfq_log(bfqd, "measured bw =%llu", bw); ++ /* ++ * To smooth oscillations use a low-pass filter with ++ * alpha=7/8, i.e., ++ * new_rate = (7/8) * old_rate + (1/8) * bw ++ */ ++ do_div(bw, 8); ++ if (bw == 0) ++ return 0; ++ bfqd->peak_rate *= 7; ++ do_div(bfqd->peak_rate, 8); ++ bfqd->peak_rate += bw; ++ update = 1; ++ bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate); ++ } ++ ++ update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1; ++ ++ if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES) ++ bfqd->peak_rate_samples++; ++ ++ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && ++ update && bfqd->bfq_user_max_budget == 0) { ++ bfqd->bfq_max_budget = ++ bfq_calc_max_budget(bfqd->peak_rate, timeout); ++ bfq_log(bfqd, "new max_budget=%lu", ++ bfqd->bfq_max_budget); ++ } ++ } ++ ++ /* ++ * If the process has been served for a too short time ++ * interval to let its possible sequential accesses prevail on ++ * the initial seek time needed to move the disk head on the ++ * first sector it requested, then give the process a chance ++ * and for the moment return false. ++ */ ++ if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8) ++ return 0; ++ ++ /* ++ * A process is considered ``slow'' (i.e., seeky, so that we ++ * cannot treat it fairly in the service domain, as it would ++ * slow down too much the other processes) if, when a slice ++ * ends for whatever reason, it has received service at a ++ * rate that would not be high enough to complete the budget ++ * before the budget timeout expiration. ++ */ ++ expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT; ++ ++ /* ++ * Caveat: processes doing IO in the slower disk zones will ++ * tend to be slow(er) even if not seeky. And the estimated ++ * peak rate will actually be an average over the disk ++ * surface. Hence, to not be too harsh with unlucky processes, ++ * we keep a budget/3 margin of safety before declaring a ++ * process slow. ++ */ ++ return expected > (4 * bfqq->entity.budget) / 3; ++} ++ ++/* ++ * To be deemed as soft real-time, an application must meet two requirements. ++ * The first is that the application must not require an average bandwidth ++ * higher than the approximate bandwidth required to playback or record a ++ * compressed high-definition video. ++ * The next function is invoked on the completion of the last request of a ++ * batch, to compute the next-start time instant, soft_rt_next_start, such ++ * that, if the next request of the application does not arrive before ++ * soft_rt_next_start, then the above requirement on the bandwidth is met. ++ * ++ * The second requirement is that the request pattern of the application is ++ * isochronous, i.e., that, after issuing a request or a batch of requests, the ++ * application stops for a while, then issues a new batch, and so on. For this ++ * reason the next function is invoked to compute soft_rt_next_start only for ++ * applications that meet this requirement, whereas soft_rt_next_start is set ++ * to infinity for applications that do not. ++ * ++ * Unfortunately, even a greedy application may happen to behave in an ++ * isochronous way if several processes are competing for the CPUs. In fact, ++ * in this scenario the application stops issuing requests while the CPUs are ++ * busy serving other processes, then restarts, then stops again for a while, ++ * and so on. In addition, if the disk achieves a low enough throughput with ++ * the request pattern issued by the application, then the above bandwidth ++ * requirement may happen to be met too. To prevent such a greedy application ++ * to be deemed as soft real-time, a further rule is used in the computation ++ * of soft_rt_next_start: soft_rt_next_start must be higher than the current ++ * time plus the maximum time for which the arrival of a request is waited ++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle. This ++ * filters out greedy applications, as the latter issue instead their next ++ * request as soon as possible after the last one has been completed (in ++ * contrast, when a batch of requests is completed, a soft real-time ++ * application spends some time processing data). ++ * ++ * Actually, the last filter may easily generate false positives if: only ++ * bfqd->bfq_slice_idle is used as a reference time interval, and one or ++ * both the following two cases occur: ++ * 1) HZ is so low that the duration of a jiffie is comparable to or higher ++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with ++ * HZ=100. ++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing ++ * for a while, then suddenly 'jump' by several units to recover the lost ++ * increments. This seems to happen, e.g., inside virtual machines. ++ * To address this issue, we do not use as a reference time interval just ++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In ++ * particular we add the minimum number of jiffies for which the filter seems ++ * to be quite precise also in embedded systems and KVM/QEMU virtual machines. ++ */ ++static inline u64 bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ return max(bfqq->last_idle_bklogged + ++ HZ * bfqq->service_from_backlogged / ++ bfqd->bfq_raising_max_softrt_rate, ++ (u64)jiffies + bfqq->bfqd->bfq_slice_idle + 4); ++} ++ ++/** ++ * bfq_bfqq_expire - expire a queue. ++ * @bfqd: device owning the queue. ++ * @bfqq: the queue to expire. ++ * @compensate: if true, compensate for the time spent idling. ++ * @reason: the reason causing the expiration. ++ * ++ * ++ * If the process associated to the queue is slow (i.e., seeky), or in ++ * case of budget timeout, or, finally, if it is async, we ++ * artificially charge it an entire budget (independently of the ++ * actual service it received). As a consequence, the queue will get ++ * higher timestamps than the correct ones upon reactivation, and ++ * hence it will be rescheduled as if it had received more service ++ * than what it actually received. In the end, this class of processes ++ * will receive less service in proportion to how slowly they consume ++ * their budgets (and hence how seriously they tend to lower the ++ * throughput). ++ * ++ * In contrast, when a queue expires because it has been idling for ++ * too much or because it exhausted its budget, we do not touch the ++ * amount of service it has received. Hence when the queue will be ++ * reactivated and its timestamps updated, the latter will be in sync ++ * with the actual service received by the queue until expiration. ++ * ++ * Charging a full budget to the first type of queues and the exact ++ * service to the others has the effect of using the WF2Q+ policy to ++ * schedule the former on a timeslice basis, without violating the ++ * service domain guarantees of the latter. ++ */ ++static void bfq_bfqq_expire(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq, ++ int compensate, ++ enum bfqq_expiration reason) ++{ ++ int slow; ++ BUG_ON(bfqq != bfqd->in_service_queue); ++ ++ /* Update disk peak rate for autotuning and check whether the ++ * process is slow (see bfq_update_peak_rate). ++ */ ++ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason); ++ ++ /* ++ * As above explained, 'punish' slow (i.e., seeky), timed-out ++ * and async queues, to favor sequential sync workloads. ++ * ++ * Processes doing IO in the slower disk zones will tend to be ++ * slow(er) even if not seeky. Hence, since the estimated peak ++ * rate is actually an average over the disk surface, these ++ * processes may timeout just for bad luck. To avoid punishing ++ * them we do not charge a full budget to a process that ++ * succeeded in consuming at least 2/3 of its budget. ++ */ ++ if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT && ++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)) ++ bfq_bfqq_charge_full_budget(bfqq); ++ ++ bfqq->service_from_backlogged += bfqq->entity.service; ++ ++ if (bfqd->low_latency && bfqq->raising_coeff == 1) ++ bfqq->last_rais_start_finish = jiffies; ++ ++ if (bfqd->low_latency && bfqd->bfq_raising_max_softrt_rate > 0) { ++ if (reason != BFQ_BFQQ_BUDGET_TIMEOUT && ++ reason != BFQ_BFQQ_BUDGET_EXHAUSTED) { ++ /* ++ * If we get here, then the request pattern is ++ * isochronous (see the comments to the function ++ * bfq_bfqq_softrt_next_start()). However, if the ++ * queue still has in-flight requests, then it is ++ * better to postpone the computation of next_start ++ * to the next request completion. In fact, if we ++ * computed it now, then the application might pass ++ * the greedy-application filter improperly, because ++ * the arrival of its next request may happen to be ++ * higher than (jiffies + bfqq->bfqd->bfq_slice_idle) ++ * not because the application is truly soft real- ++ * time, but just because the application is currently ++ * waiting for the completion of some request before ++ * issuing, as quickly as possible, its next request. ++ */ ++ if (bfqq->dispatched > 0) { ++ bfqq->soft_rt_next_start = -1; ++ bfq_mark_bfqq_softrt_update(bfqq); ++ } else ++ bfqq->soft_rt_next_start = ++ bfq_bfqq_softrt_next_start(bfqd, bfqq); ++ } else ++ bfqq->soft_rt_next_start = -1; /* infinity */ ++ } ++ ++ bfq_log_bfqq(bfqd, bfqq, ++ "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, slow, ++ bfqq->dispatched, bfq_bfqq_idle_window(bfqq)); ++ ++ /* Increase, decrease or leave budget unchanged according to reason */ ++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); ++ __bfq_bfqq_expire(bfqd, bfqq); ++} ++ ++/* ++ * Budget timeout is not implemented through a dedicated timer, but ++ * just checked on request arrivals and completions, as well as on ++ * idle timer expirations. ++ */ ++static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) ++{ ++ if (bfq_bfqq_budget_new(bfqq)) ++ return 0; ++ ++ if (time_before(jiffies, bfqq->budget_timeout)) ++ return 0; ++ ++ return 1; ++} ++ ++/* ++ * If we expire a queue that is waiting for the arrival of a new ++ * request, we may prevent the fictitious timestamp backshifting that ++ * allows the guarantees of the queue to be preserved (see [1] for ++ * this tricky aspect). Hence we return true only if this condition ++ * does not hold, or if the queue is slow enough to deserve only to be ++ * kicked off for preserving a high throughput. ++*/ ++static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) ++{ ++ bfq_log_bfqq(bfqq->bfqd, bfqq, ++ "may_budget_timeout: wr %d left %d timeout %d", ++ bfq_bfqq_wait_request(bfqq), ++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, ++ bfq_bfqq_budget_timeout(bfqq)); ++ ++ return (!bfq_bfqq_wait_request(bfqq) || ++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) ++ && ++ bfq_bfqq_budget_timeout(bfqq); ++} ++ ++/* ++ * For weight-raised queues issuing sync requests, idling is always performed, ++ * as this is instrumental in guaranteeing a high fraction of the throughput ++ * to these queues, and hence in guaranteeing a lower latency for their ++ * requests. See [1] for details. ++ * ++ * For non-weight-raised queues, idling is instead disabled if the device is ++ * NCQ-enabled and non-rotational, as this boosts the throughput on such ++ * devices. ++ */ ++static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq) ++{ ++ struct bfq_data *bfqd = bfqq->bfqd; ++ ++ return bfq_bfqq_sync(bfqq) && ( ++ bfqq->raising_coeff > 1 || ++ (bfq_bfqq_idle_window(bfqq) && ++ !(bfqd->hw_tag && ++ (blk_queue_nonrot(bfqd->queue) || ++ /* ++ * If there are weight-raised busy queues, then do not idle ++ * the disk for a sync non-weight-raised queue, and hence ++ * expire the queue immediately if empty. Combined with the ++ * timestamping rules of BFQ (see [1] for details), this ++ * causes sync non-weight-raised queues to get a lower ++ * fraction of the disk throughput, and hence reduces the rate ++ * at which the processes associated to these queues ask for ++ * requests from the request pool. ++ * ++ * This is beneficial for weight-raised processes, when the ++ * system operates in request-pool saturation conditions ++ * (e.g., in the presence of write hogs). In fact, if ++ * non-weight-raised processes ask for requests at a lower ++ * rate, then weight-raised processes have a higher ++ * probability to get a request from the pool immediately ++ * (or at least soon) when they need one. Hence they have a ++ * higher probability to actually get a fraction of the disk ++ * throughput proportional to their high weight. This is ++ * especially true with NCQ-enabled drives, which enqueue ++ * several requests in advance and further reorder ++ * internally-queued requests. ++ * ++ * Mistreating non-weight-raised queues in the above-described ++ * way, when there are busy weight-raised queues, seems to ++ * mitigate starvation problems in the presence of heavy write ++ * workloads and NCQ, and hence to guarantee a higher ++ * application and system responsiveness in these hostile ++ * scenarios. ++ */ ++ bfqd->raised_busy_queues > 0) ++ ) ++ ) ++ ); ++} ++ ++/* ++ * If the in-service queue is empty, but it is sync and either of the following ++ * conditions holds, then: 1) the queue must remain in service and cannot be ++ * expired, and 2) the disk must be idled to wait for the possible arrival ++ * of a new request for the queue. The conditions are: ++ * - the device is rotational and not performing NCQ, and the queue has its ++ * idle window set (in this case, waiting for a new request for the queue ++ * is likely to boost the disk throughput); ++ * - the queue is weight-raised (waiting for the request is necessary to ++ * provide the queue with fairness and latency guarantees, see [1] for ++ * details). ++ */ ++static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) ++{ ++ struct bfq_data *bfqd = bfqq->bfqd; ++ ++ return (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 && ++ bfq_bfqq_must_not_expire(bfqq) && ++ !bfq_queue_nonrot_noidle(bfqd, bfqq)); ++} ++ ++/* ++ * Select a queue for service. If we have a current queue in service, ++ * check whether to continue servicing it, or retrieve and set a new one. ++ */ ++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) ++{ ++ struct bfq_queue *bfqq, *new_bfqq = NULL; ++ struct request *next_rq; ++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; ++ ++ bfqq = bfqd->in_service_queue; ++ if (bfqq == NULL) ++ goto new_queue; ++ ++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); ++ ++ /* ++ * If another queue has a request waiting within our mean seek ++ * distance, let it run. The expire code will check for close ++ * cooperators and put the close queue at the front of the ++ * service tree. If possible, merge the expiring queue with the ++ * new bfqq. ++ */ ++ new_bfqq = bfq_close_cooperator(bfqd, bfqq); ++ if (new_bfqq != NULL && bfqq->new_bfqq == NULL) ++ bfq_setup_merge(bfqq, new_bfqq); ++ ++ if (bfq_may_expire_for_budg_timeout(bfqq) && ++ !timer_pending(&bfqd->idle_slice_timer) && ++ !bfq_bfqq_must_idle(bfqq)) ++ goto expire; ++ ++ next_rq = bfqq->next_rq; ++ /* ++ * If bfqq has requests queued and it has enough budget left to ++ * serve them, keep the queue, otherwise expire it. ++ */ ++ if (next_rq != NULL) { ++ if (bfq_serv_to_charge(next_rq, bfqq) > ++ bfq_bfqq_budget_left(bfqq)) { ++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED; ++ goto expire; ++ } else { ++ /* ++ * The idle timer may be pending because we may not ++ * disable disk idling even when a new request arrives ++ */ ++ if (timer_pending(&bfqd->idle_slice_timer)) { ++ /* ++ * If we get here: 1) at least a new request ++ * has arrived but we have not disabled the ++ * timer because the request was too small, ++ * 2) then the block layer has unplugged the ++ * device, causing the dispatch to be invoked. ++ * ++ * Since the device is unplugged, now the ++ * requests are probably large enough to ++ * provide a reasonable throughput. ++ * So we disable idling. ++ */ ++ bfq_clear_bfqq_wait_request(bfqq); ++ del_timer(&bfqd->idle_slice_timer); ++ } ++ if (new_bfqq == NULL) ++ goto keep_queue; ++ else ++ goto expire; ++ } ++ } ++ ++ /* ++ * No requests pending. If the in-service queue has no cooperator and ++ * still has requests in flight (possibly waiting for a completion) ++ * or is idling for a new request, then keep it. ++ */ ++ if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) || ++ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) { ++ bfqq = NULL; ++ goto keep_queue; ++ } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) { ++ /* ++ * Expiring the queue because there is a close cooperator, ++ * cancel timer. ++ */ ++ bfq_clear_bfqq_wait_request(bfqq); ++ del_timer(&bfqd->idle_slice_timer); ++ } ++ ++ reason = BFQ_BFQQ_NO_MORE_REQUESTS; ++expire: ++ bfq_bfqq_expire(bfqd, bfqq, 0, reason); ++new_queue: ++ bfqq = bfq_set_in_service_queue(bfqd, new_bfqq); ++ bfq_log(bfqd, "select_queue: new queue %d returned", ++ bfqq != NULL ? bfqq->pid : 0); ++keep_queue: ++ return bfqq; ++} ++ ++static void bfq_update_raising_data(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ if (bfqq->raising_coeff > 1) { /* queue is being boosted */ ++ struct bfq_entity *entity = &bfqq->entity; ++ ++ bfq_log_bfqq(bfqd, bfqq, ++ "raising period dur %u/%u msec, " ++ "old raising coeff %u, w %d(%d)", ++ jiffies_to_msecs(jiffies - ++ bfqq->last_rais_start_finish), ++ jiffies_to_msecs(bfqq->raising_cur_max_time), ++ bfqq->raising_coeff, ++ bfqq->entity.weight, bfqq->entity.orig_weight); ++ ++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight != ++ entity->orig_weight * bfqq->raising_coeff); ++ if (entity->ioprio_changed) ++ bfq_log_bfqq(bfqd, bfqq, ++ "WARN: pending prio change"); ++ /* ++ * If too much time has elapsed from the beginning ++ * of this weight-raising, stop it. ++ */ ++ if (jiffies - bfqq->last_rais_start_finish > ++ bfqq->raising_cur_max_time) { ++ bfqq->last_rais_start_finish = jiffies; ++ bfq_log_bfqq(bfqd, bfqq, ++ "wrais ending at %llu msec," ++ "rais_max_time %u", ++ bfqq->last_rais_start_finish, ++ jiffies_to_msecs(bfqq-> ++ raising_cur_max_time)); ++ bfq_bfqq_end_raising(bfqq); ++ __bfq_entity_update_weight_prio( ++ bfq_entity_service_tree(entity), ++ entity); ++ } ++ } ++} ++ ++/* ++ * Dispatch one request from bfqq, moving it to the request queue ++ * dispatch list. ++ */ ++static int bfq_dispatch_request(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ int dispatched = 0; ++ struct request *rq; ++ unsigned long service_to_charge; ++ ++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); ++ ++ /* Follow expired path, else get first next available. */ ++ rq = bfq_check_fifo(bfqq); ++ if (rq == NULL) ++ rq = bfqq->next_rq; ++ service_to_charge = bfq_serv_to_charge(rq, bfqq); ++ ++ if (service_to_charge > bfq_bfqq_budget_left(bfqq)) { ++ /* ++ * This may happen if the next rq is chosen ++ * in fifo order instead of sector order. ++ * The budget is properly dimensioned ++ * to be always sufficient to serve the next request ++ * only if it is chosen in sector order. The reason is ++ * that it would be quite inefficient and little useful ++ * to always make sure that the budget is large enough ++ * to serve even the possible next rq in fifo order. ++ * In fact, requests are seldom served in fifo order. ++ * ++ * Expire the queue for budget exhaustion, and ++ * make sure that the next act_budget is enough ++ * to serve the next request, even if it comes ++ * from the fifo expired path. ++ */ ++ bfqq->next_rq = rq; ++ /* ++ * Since this dispatch is failed, make sure that ++ * a new one will be performed ++ */ ++ if (!bfqd->rq_in_driver) ++ bfq_schedule_dispatch(bfqd); ++ goto expire; ++ } ++ ++ /* Finally, insert request into driver dispatch list. */ ++ bfq_bfqq_served(bfqq, service_to_charge); ++ bfq_dispatch_insert(bfqd->queue, rq); ++ ++ bfq_update_raising_data(bfqd, bfqq); ++ ++ bfq_log_bfqq(bfqd, bfqq, ++ "dispatched %u sec req (%llu), budg left %lu", ++ blk_rq_sectors(rq), ++ (long long unsigned)blk_rq_pos(rq), ++ bfq_bfqq_budget_left(bfqq)); ++ ++ dispatched++; ++ ++ if (bfqd->in_service_bic == NULL) { ++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount); ++ bfqd->in_service_bic = RQ_BIC(rq); ++ } ++ ++ if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) && ++ dispatched >= bfqd->bfq_max_budget_async_rq) || ++ bfq_class_idle(bfqq))) ++ goto expire; ++ ++ return dispatched; ++ ++expire: ++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED); ++ return dispatched; ++} ++ ++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) ++{ ++ int dispatched = 0; ++ ++ while (bfqq->next_rq != NULL) { ++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); ++ dispatched++; ++ } ++ ++ BUG_ON(!list_empty(&bfqq->fifo)); ++ return dispatched; ++} ++ ++/* ++ * Drain our current requests. Used for barriers and when switching ++ * io schedulers on-the-fly. ++ */ ++static int bfq_forced_dispatch(struct bfq_data *bfqd) ++{ ++ struct bfq_queue *bfqq, *n; ++ struct bfq_service_tree *st; ++ int dispatched = 0; ++ ++ bfqq = bfqd->in_service_queue; ++ if (bfqq != NULL) ++ __bfq_bfqq_expire(bfqd, bfqq); ++ ++ /* ++ * Loop through classes, and be careful to leave the scheduler ++ * in a consistent state, as feedback mechanisms and vtime ++ * updates cannot be disabled during the process. ++ */ ++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { ++ st = bfq_entity_service_tree(&bfqq->entity); ++ ++ dispatched += __bfq_forced_dispatch_bfqq(bfqq); ++ bfqq->max_budget = bfq_max_budget(bfqd); ++ ++ bfq_forget_idle(st); ++ } ++ ++ BUG_ON(bfqd->busy_queues != 0); ++ ++ return dispatched; ++} ++ ++static int bfq_dispatch_requests(struct request_queue *q, int force) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct bfq_queue *bfqq; ++ int max_dispatch; ++ ++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); ++ if (bfqd->busy_queues == 0) ++ return 0; ++ ++ if (unlikely(force)) ++ return bfq_forced_dispatch(bfqd); ++ ++ bfqq = bfq_select_queue(bfqd); ++ if (bfqq == NULL) ++ return 0; ++ ++ max_dispatch = bfqd->bfq_quantum; ++ if (bfq_class_idle(bfqq)) ++ max_dispatch = 1; ++ ++ if (!bfq_bfqq_sync(bfqq)) ++ max_dispatch = bfqd->bfq_max_budget_async_rq; ++ ++ if (bfqq->dispatched >= max_dispatch) { ++ if (bfqd->busy_queues > 1) ++ return 0; ++ if (bfqq->dispatched >= 4 * max_dispatch) ++ return 0; ++ } ++ ++ if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq)) ++ return 0; ++ ++ bfq_clear_bfqq_wait_request(bfqq); ++ BUG_ON(timer_pending(&bfqd->idle_slice_timer)); ++ ++ if (!bfq_dispatch_request(bfqd, bfqq)) ++ return 0; ++ ++ bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)", ++ bfqq->pid, max_dispatch); ++ ++ return 1; ++} ++ ++/* ++ * Task holds one reference to the queue, dropped when task exits. Each rq ++ * in-flight on this queue also holds a reference, dropped when rq is freed. ++ * ++ * Queue lock must be held here. ++ */ ++static void bfq_put_queue(struct bfq_queue *bfqq) ++{ ++ struct bfq_data *bfqd = bfqq->bfqd; ++ ++ BUG_ON(atomic_read(&bfqq->ref) <= 0); ++ ++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq, ++ atomic_read(&bfqq->ref)); ++ if (!atomic_dec_and_test(&bfqq->ref)) ++ return; ++ ++ BUG_ON(rb_first(&bfqq->sort_list) != NULL); ++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); ++ BUG_ON(bfqq->entity.tree != NULL); ++ BUG_ON(bfq_bfqq_busy(bfqq)); ++ BUG_ON(bfqd->in_service_queue == bfqq); ++ ++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq); ++ ++ kmem_cache_free(bfq_pool, bfqq); ++} ++ ++static void bfq_put_cooperator(struct bfq_queue *bfqq) ++{ ++ struct bfq_queue *__bfqq, *next; ++ ++ /* ++ * If this queue was scheduled to merge with another queue, be ++ * sure to drop the reference taken on that queue (and others in ++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. ++ */ ++ __bfqq = bfqq->new_bfqq; ++ while (__bfqq) { ++ if (__bfqq == bfqq) { ++ WARN(1, "bfqq->new_bfqq loop detected.\n"); ++ break; ++ } ++ next = __bfqq->new_bfqq; ++ bfq_put_queue(__bfqq); ++ __bfqq = next; ++ } ++} ++ ++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) ++{ ++ if (bfqq == bfqd->in_service_queue) { ++ __bfq_bfqq_expire(bfqd, bfqq); ++ bfq_schedule_dispatch(bfqd); ++ } ++ ++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, ++ atomic_read(&bfqq->ref)); ++ ++ bfq_put_cooperator(bfqq); ++ ++ bfq_put_queue(bfqq); ++} ++ ++static void bfq_init_icq(struct io_cq *icq) ++{ ++ struct bfq_io_cq *bic = icq_to_bic(icq); ++ ++ bic->ttime.last_end_request = jiffies; ++} ++ ++static void bfq_exit_icq(struct io_cq *icq) ++{ ++ struct bfq_io_cq *bic = icq_to_bic(icq); ++ struct bfq_data *bfqd = bic_to_bfqd(bic); ++ ++ if (bic->bfqq[BLK_RW_ASYNC]) { ++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]); ++ bic->bfqq[BLK_RW_ASYNC] = NULL; ++ } ++ ++ if (bic->bfqq[BLK_RW_SYNC]) { ++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]); ++ bic->bfqq[BLK_RW_SYNC] = NULL; ++ } ++} ++ ++/* ++ * Update the entity prio values; note that the new values will not ++ * be used until the next (re)activation. ++ */ ++static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) ++{ ++ struct task_struct *tsk = current; ++ int ioprio_class; ++ ++ if (!bfq_bfqq_prio_changed(bfqq)) ++ return; ++ ++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); ++ switch (ioprio_class) { ++ default: ++ dev_err(bfqq->bfqd->queue->backing_dev_info.dev, ++ "bfq: bad prio %x\n", ioprio_class); ++ case IOPRIO_CLASS_NONE: ++ /* ++ * No prio set, inherit CPU scheduling settings. ++ */ ++ bfqq->entity.new_ioprio = task_nice_ioprio(tsk); ++ bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk); ++ break; ++ case IOPRIO_CLASS_RT: ++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); ++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT; ++ break; ++ case IOPRIO_CLASS_BE: ++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); ++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE; ++ break; ++ case IOPRIO_CLASS_IDLE: ++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE; ++ bfqq->entity.new_ioprio = 7; ++ bfq_clear_bfqq_idle_window(bfqq); ++ break; ++ } ++ ++ bfqq->entity.ioprio_changed = 1; ++ ++ /* ++ * Keep track of original prio settings in case we have to temporarily ++ * elevate the priority of this queue. ++ */ ++ bfqq->org_ioprio = bfqq->entity.new_ioprio; ++ bfq_clear_bfqq_prio_changed(bfqq); ++} ++ ++static void bfq_changed_ioprio(struct bfq_io_cq *bic) ++{ ++ struct bfq_data *bfqd; ++ struct bfq_queue *bfqq, *new_bfqq; ++ struct bfq_group *bfqg; ++ unsigned long uninitialized_var(flags); ++ int ioprio = bic->icq.ioc->ioprio; ++ ++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data), ++ &flags); ++ /* ++ * This condition may trigger on a newly created bic, be sure to drop ++ * the lock before returning. ++ */ ++ if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio)) ++ goto out; ++ ++ bfqq = bic->bfqq[BLK_RW_ASYNC]; ++ if (bfqq != NULL) { ++ bfqg = container_of(bfqq->entity.sched_data, struct bfq_group, ++ sched_data); ++ new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic, ++ GFP_ATOMIC); ++ if (new_bfqq != NULL) { ++ bic->bfqq[BLK_RW_ASYNC] = new_bfqq; ++ bfq_log_bfqq(bfqd, bfqq, ++ "changed_ioprio: bfqq %p %d", ++ bfqq, atomic_read(&bfqq->ref)); ++ bfq_put_queue(bfqq); ++ } ++ } ++ ++ bfqq = bic->bfqq[BLK_RW_SYNC]; ++ if (bfqq != NULL) ++ bfq_mark_bfqq_prio_changed(bfqq); ++ ++ bic->ioprio = ioprio; ++ ++out: ++ bfq_put_bfqd_unlock(bfqd, &flags); ++} ++ ++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ pid_t pid, int is_sync) ++{ ++ RB_CLEAR_NODE(&bfqq->entity.rb_node); ++ INIT_LIST_HEAD(&bfqq->fifo); ++ ++ atomic_set(&bfqq->ref, 0); ++ bfqq->bfqd = bfqd; ++ ++ bfq_mark_bfqq_prio_changed(bfqq); ++ ++ if (is_sync) { ++ if (!bfq_class_idle(bfqq)) ++ bfq_mark_bfqq_idle_window(bfqq); ++ bfq_mark_bfqq_sync(bfqq); ++ } ++ ++ /* Tentative initial value to trade off between thr and lat */ ++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; ++ bfqq->pid = pid; ++ ++ bfqq->raising_coeff = 1; ++ bfqq->last_rais_start_finish = 0; ++ bfqq->soft_rt_next_start = -1; ++} ++ ++static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd, ++ struct bfq_group *bfqg, ++ int is_sync, ++ struct bfq_io_cq *bic, ++ gfp_t gfp_mask) ++{ ++ struct bfq_queue *bfqq, *new_bfqq = NULL; ++ ++retry: ++ /* bic always exists here */ ++ bfqq = bic_to_bfqq(bic, is_sync); ++ ++ /* ++ * Always try a new alloc if we fall back to the OOM bfqq ++ * originally, since it should just be a temporary situation. ++ */ ++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) { ++ bfqq = NULL; ++ if (new_bfqq != NULL) { ++ bfqq = new_bfqq; ++ new_bfqq = NULL; ++ } else if (gfp_mask & __GFP_WAIT) { ++ spin_unlock_irq(bfqd->queue->queue_lock); ++ new_bfqq = kmem_cache_alloc_node(bfq_pool, ++ gfp_mask | __GFP_ZERO, ++ bfqd->queue->node); ++ spin_lock_irq(bfqd->queue->queue_lock); ++ if (new_bfqq != NULL) ++ goto retry; ++ } else { ++ bfqq = kmem_cache_alloc_node(bfq_pool, ++ gfp_mask | __GFP_ZERO, ++ bfqd->queue->node); ++ } ++ ++ if (bfqq != NULL) { ++ bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync); ++ bfq_log_bfqq(bfqd, bfqq, "allocated"); ++ } else { ++ bfqq = &bfqd->oom_bfqq; ++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); ++ } ++ ++ bfq_init_prio_data(bfqq, bic); ++ bfq_init_entity(&bfqq->entity, bfqg); ++ } ++ ++ if (new_bfqq != NULL) ++ kmem_cache_free(bfq_pool, new_bfqq); ++ ++ return bfqq; ++} ++ ++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, ++ struct bfq_group *bfqg, ++ int ioprio_class, int ioprio) ++{ ++ switch (ioprio_class) { ++ case IOPRIO_CLASS_RT: ++ return &bfqg->async_bfqq[0][ioprio]; ++ case IOPRIO_CLASS_NONE: ++ ioprio = IOPRIO_NORM; ++ /* fall through */ ++ case IOPRIO_CLASS_BE: ++ return &bfqg->async_bfqq[1][ioprio]; ++ case IOPRIO_CLASS_IDLE: ++ return &bfqg->async_idle_bfqq; ++ default: ++ BUG(); ++ } ++} ++ ++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, ++ struct bfq_group *bfqg, int is_sync, ++ struct bfq_io_cq *bic, gfp_t gfp_mask) ++{ ++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); ++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); ++ struct bfq_queue **async_bfqq = NULL; ++ struct bfq_queue *bfqq = NULL; ++ ++ if (!is_sync) { ++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, ++ ioprio); ++ bfqq = *async_bfqq; ++ } ++ ++ if (bfqq == NULL) ++ bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask); ++ ++ /* ++ * Pin the queue now that it's allocated, scheduler exit will prune it. ++ */ ++ if (!is_sync && *async_bfqq == NULL) { ++ atomic_inc(&bfqq->ref); ++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", ++ bfqq, atomic_read(&bfqq->ref)); ++ *async_bfqq = bfqq; ++ } ++ ++ atomic_inc(&bfqq->ref); ++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, ++ atomic_read(&bfqq->ref)); ++ return bfqq; ++} ++ ++static void bfq_update_io_thinktime(struct bfq_data *bfqd, ++ struct bfq_io_cq *bic) ++{ ++ unsigned long elapsed = jiffies - bic->ttime.last_end_request; ++ unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle); ++ ++ bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; ++ bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8; ++ bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) / ++ bic->ttime.ttime_samples; ++} ++ ++static void bfq_update_io_seektime(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq, ++ struct request *rq) ++{ ++ sector_t sdist; ++ u64 total; ++ ++ if (bfqq->last_request_pos < blk_rq_pos(rq)) ++ sdist = blk_rq_pos(rq) - bfqq->last_request_pos; ++ else ++ sdist = bfqq->last_request_pos - blk_rq_pos(rq); ++ ++ /* ++ * Don't allow the seek distance to get too large from the ++ * odd fragment, pagein, etc. ++ */ ++ if (bfqq->seek_samples == 0) /* first request, not really a seek */ ++ sdist = 0; ++ else if (bfqq->seek_samples <= 60) /* second & third seek */ ++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024); ++ else ++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64); ++ ++ bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8; ++ bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8; ++ total = bfqq->seek_total + (bfqq->seek_samples/2); ++ do_div(total, bfqq->seek_samples); ++ bfqq->seek_mean = (sector_t)total; ++ ++ bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist, ++ (u64)bfqq->seek_mean); ++} ++ ++/* ++ * Disable idle window if the process thinks too long or seeks so much that ++ * it doesn't matter. ++ */ ++static void bfq_update_idle_window(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq, ++ struct bfq_io_cq *bic) ++{ ++ int enable_idle; ++ ++ /* Don't idle for async or idle io prio class. */ ++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) ++ return; ++ ++ enable_idle = bfq_bfqq_idle_window(bfqq); ++ ++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 || ++ bfqd->bfq_slice_idle == 0 || ++ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) && ++ bfqq->raising_coeff == 1)) ++ enable_idle = 0; ++ else if (bfq_sample_valid(bic->ttime.ttime_samples)) { ++ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle && ++ bfqq->raising_coeff == 1) ++ enable_idle = 0; ++ else ++ enable_idle = 1; ++ } ++ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", ++ enable_idle); ++ ++ if (enable_idle) ++ bfq_mark_bfqq_idle_window(bfqq); ++ else ++ bfq_clear_bfqq_idle_window(bfqq); ++} ++ ++/* ++ * Called when a new fs request (rq) is added to bfqq. Check if there's ++ * something we should do about it. ++ */ ++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ struct request *rq) ++{ ++ struct bfq_io_cq *bic = RQ_BIC(rq); ++ ++ if (rq->cmd_flags & REQ_META) ++ bfqq->meta_pending++; ++ ++ bfq_update_io_thinktime(bfqd, bic); ++ bfq_update_io_seektime(bfqd, bfqq, rq); ++ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || ++ !BFQQ_SEEKY(bfqq)) ++ bfq_update_idle_window(bfqd, bfqq, bic); ++ ++ bfq_log_bfqq(bfqd, bfqq, ++ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", ++ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), ++ (long long unsigned)bfqq->seek_mean); ++ ++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); ++ ++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { ++ int small_req = bfqq->queued[rq_is_sync(rq)] == 1 && ++ blk_rq_sectors(rq) < 32; ++ int budget_timeout = bfq_bfqq_budget_timeout(bfqq); ++ ++ /* ++ * There is just this request queued: if the request ++ * is small and the queue is not to be expired, then ++ * just exit. ++ * ++ * In this way, if the disk is being idled to wait for ++ * a new request from the in-service queue, we avoid ++ * unplugging the device and committing the disk to serve ++ * just a small request. On the contrary, we wait for ++ * the block layer to decide when to unplug the device: ++ * hopefully, new requests will be merged to this one ++ * quickly, then the device will be unplugged and ++ * larger requests will be dispatched. ++ */ ++ if (small_req && !budget_timeout) ++ return; ++ ++ /* ++ * A large enough request arrived, or the queue is to ++ * be expired: in both cases disk idling is to be ++ * stopped, so clear wait_request flag and reset ++ * timer. ++ */ ++ bfq_clear_bfqq_wait_request(bfqq); ++ del_timer(&bfqd->idle_slice_timer); ++ ++ /* ++ * The queue is not empty, because a new request just ++ * arrived. Hence we can safely expire the queue, in ++ * case of budget timeout, without risking that the ++ * timestamps of the queue are not updated correctly. ++ * See [1] for more details. ++ */ ++ if (budget_timeout) ++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT); ++ ++ /* ++ * Let the request rip immediately, or let a new queue be ++ * selected if bfqq has just been expired. ++ */ ++ __blk_run_queue(bfqd->queue); ++ } ++} ++ ++static void bfq_insert_request(struct request_queue *q, struct request *rq) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ ++ assert_spin_locked(bfqd->queue->queue_lock); ++ bfq_init_prio_data(bfqq, RQ_BIC(rq)); ++ ++ bfq_add_rq_rb(rq); ++ ++ rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]); ++ list_add_tail(&rq->queuelist, &bfqq->fifo); ++ ++ bfq_rq_enqueued(bfqd, bfqq, rq); ++} ++ ++static void bfq_update_hw_tag(struct bfq_data *bfqd) ++{ ++ bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver, ++ bfqd->rq_in_driver); ++ ++ if (bfqd->hw_tag == 1) ++ return; ++ ++ /* ++ * This sample is valid if the number of outstanding requests ++ * is large enough to allow a queueing behavior. Note that the ++ * sum is not exact, as it's not taking into account deactivated ++ * requests. ++ */ ++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) ++ return; ++ ++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) ++ return; ++ ++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; ++ bfqd->max_rq_in_driver = 0; ++ bfqd->hw_tag_samples = 0; ++} ++ ++static void bfq_completed_request(struct request_queue *q, struct request *rq) ++{ ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ struct bfq_data *bfqd = bfqq->bfqd; ++ const int sync = rq_is_sync(rq); ++ ++ bfq_log_bfqq(bfqd, bfqq, "completed %u sects req (%d)", ++ blk_rq_sectors(rq), sync); ++ ++ bfq_update_hw_tag(bfqd); ++ ++ WARN_ON(!bfqd->rq_in_driver); ++ WARN_ON(!bfqq->dispatched); ++ bfqd->rq_in_driver--; ++ bfqq->dispatched--; ++ ++ if (bfq_bfqq_sync(bfqq)) ++ bfqd->sync_flight--; ++ ++ if (sync) ++ RQ_BIC(rq)->ttime.last_end_request = jiffies; ++ ++ /* ++ * The computation of softrt_next_start was scheduled for the next ++ * request completion: it is now time to compute it. ++ */ ++ if (bfq_bfqq_softrt_update(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list)) ++ bfqq->soft_rt_next_start = ++ bfq_bfqq_softrt_next_start(bfqd, bfqq); ++ ++ /* ++ * If this is the in-service queue, check if it needs to be expired, ++ * or if we want to idle in case it has no pending requests. ++ */ ++ if (bfqd->in_service_queue == bfqq) { ++ if (bfq_bfqq_budget_new(bfqq)) ++ bfq_set_budget_timeout(bfqd); ++ ++ if (bfq_bfqq_must_idle(bfqq)) { ++ bfq_arm_slice_timer(bfqd); ++ goto out; ++ } else if (bfq_may_expire_for_budg_timeout(bfqq)) ++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT); ++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) && ++ (bfqq->dispatched == 0 || ++ !bfq_bfqq_must_not_expire(bfqq))) ++ bfq_bfqq_expire(bfqd, bfqq, 0, ++ BFQ_BFQQ_NO_MORE_REQUESTS); ++ } ++ ++ if (!bfqd->rq_in_driver) ++ bfq_schedule_dispatch(bfqd); ++ ++out: ++ return; ++} ++ ++static inline int __bfq_may_queue(struct bfq_queue *bfqq) ++{ ++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { ++ bfq_clear_bfqq_must_alloc(bfqq); ++ return ELV_MQUEUE_MUST; ++ } ++ ++ return ELV_MQUEUE_MAY; ++} ++ ++static int bfq_may_queue(struct request_queue *q, int rw) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct task_struct *tsk = current; ++ struct bfq_io_cq *bic; ++ struct bfq_queue *bfqq; ++ ++ /* ++ * Don't force setup of a queue from here, as a call to may_queue ++ * does not necessarily imply that a request actually will be queued. ++ * So just lookup a possibly existing queue, or return 'may queue' ++ * if that fails. ++ */ ++ bic = bfq_bic_lookup(bfqd, tsk->io_context); ++ if (bic == NULL) ++ return ELV_MQUEUE_MAY; ++ ++ bfqq = bic_to_bfqq(bic, rw_is_sync(rw)); ++ if (bfqq != NULL) { ++ bfq_init_prio_data(bfqq, bic); ++ ++ return __bfq_may_queue(bfqq); ++ } ++ ++ return ELV_MQUEUE_MAY; ++} ++ ++/* ++ * Queue lock held here. ++ */ ++static void bfq_put_request(struct request *rq) ++{ ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ ++ if (bfqq != NULL) { ++ const int rw = rq_data_dir(rq); ++ ++ BUG_ON(!bfqq->allocated[rw]); ++ bfqq->allocated[rw]--; ++ ++ rq->elv.priv[0] = NULL; ++ rq->elv.priv[1] = NULL; ++ ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", ++ bfqq, atomic_read(&bfqq->ref)); ++ bfq_put_queue(bfqq); ++ } ++} ++ ++static struct bfq_queue * ++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, ++ struct bfq_queue *bfqq) ++{ ++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", ++ (long unsigned)bfqq->new_bfqq->pid); ++ bic_set_bfqq(bic, bfqq->new_bfqq, 1); ++ bfq_mark_bfqq_coop(bfqq->new_bfqq); ++ bfq_put_queue(bfqq); ++ return bic_to_bfqq(bic, 1); ++} ++ ++/* ++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this ++ * was the last process referring to said bfqq. ++ */ ++static struct bfq_queue * ++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) ++{ ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); ++ if (bfqq_process_refs(bfqq) == 1) { ++ bfqq->pid = current->pid; ++ bfq_clear_bfqq_coop(bfqq); ++ bfq_clear_bfqq_split_coop(bfqq); ++ return bfqq; ++ } ++ ++ bic_set_bfqq(bic, NULL, 1); ++ ++ bfq_put_cooperator(bfqq); ++ ++ bfq_put_queue(bfqq); ++ return NULL; ++} ++ ++/* ++ * Allocate bfq data structures associated with this request. ++ */ ++static int bfq_set_request(struct request_queue *q, struct request *rq, ++ struct bio *bio, gfp_t gfp_mask) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq); ++ const int rw = rq_data_dir(rq); ++ const int is_sync = rq_is_sync(rq); ++ struct bfq_queue *bfqq; ++ struct bfq_group *bfqg; ++ unsigned long flags; ++ ++ might_sleep_if(gfp_mask & __GFP_WAIT); ++ ++ bfq_changed_ioprio(bic); ++ ++ spin_lock_irqsave(q->queue_lock, flags); ++ ++ if (bic == NULL) ++ goto queue_fail; ++ ++ bfqg = bfq_bic_update_cgroup(bic); ++ ++new_queue: ++ bfqq = bic_to_bfqq(bic, is_sync); ++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) { ++ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask); ++ bic_set_bfqq(bic, bfqq, is_sync); ++ } else { ++ /* ++ * If the queue was seeky for too long, break it apart. ++ */ ++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { ++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); ++ bfqq = bfq_split_bfqq(bic, bfqq); ++ if (!bfqq) ++ goto new_queue; ++ } ++ ++ /* ++ * Check to see if this queue is scheduled to merge with ++ * another closely cooperating queue. The merging of queues ++ * happens here as it must be done in process context. ++ * The reference on new_bfqq was taken in merge_bfqqs. ++ */ ++ if (bfqq->new_bfqq != NULL) ++ bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq); ++ } ++ ++ bfqq->allocated[rw]++; ++ atomic_inc(&bfqq->ref); ++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, ++ atomic_read(&bfqq->ref)); ++ ++ rq->elv.priv[0] = bic; ++ rq->elv.priv[1] = bfqq; ++ ++ spin_unlock_irqrestore(q->queue_lock, flags); ++ ++ return 0; ++ ++queue_fail: ++ bfq_schedule_dispatch(bfqd); ++ spin_unlock_irqrestore(q->queue_lock, flags); ++ ++ return 1; ++} ++ ++static void bfq_kick_queue(struct work_struct *work) ++{ ++ struct bfq_data *bfqd = ++ container_of(work, struct bfq_data, unplug_work); ++ struct request_queue *q = bfqd->queue; ++ ++ spin_lock_irq(q->queue_lock); ++ __blk_run_queue(q); ++ spin_unlock_irq(q->queue_lock); ++} ++ ++/* ++ * Handler of the expiration of the timer running if the in-service queue ++ * is idling inside its time slice. ++ */ ++static void bfq_idle_slice_timer(unsigned long data) ++{ ++ struct bfq_data *bfqd = (struct bfq_data *)data; ++ struct bfq_queue *bfqq; ++ unsigned long flags; ++ enum bfqq_expiration reason; ++ ++ spin_lock_irqsave(bfqd->queue->queue_lock, flags); ++ ++ bfqq = bfqd->in_service_queue; ++ /* ++ * Theoretical race here: the in-service queue can be NULL or different ++ * from the queue that was idling if the timer handler spins on ++ * the queue_lock and a new request arrives for the current ++ * queue and there is a full dispatch cycle that changes the ++ * in-service queue. This can hardly happen, but in the worst case ++ * we just expire a queue too early. ++ */ ++ if (bfqq != NULL) { ++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); ++ if (bfq_bfqq_budget_timeout(bfqq)) ++ /* ++ * Also here the queue can be safely expired ++ * for budget timeout without wasting ++ * guarantees ++ */ ++ reason = BFQ_BFQQ_BUDGET_TIMEOUT; ++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) ++ /* ++ * The queue may not be empty upon timer expiration, ++ * because we may not disable the timer when the first ++ * request of the in-service queue arrives during ++ * disk idling ++ */ ++ reason = BFQ_BFQQ_TOO_IDLE; ++ else ++ goto schedule_dispatch; ++ ++ bfq_bfqq_expire(bfqd, bfqq, 1, reason); ++ } ++ ++schedule_dispatch: ++ bfq_schedule_dispatch(bfqd); ++ ++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); ++} ++ ++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) ++{ ++ del_timer_sync(&bfqd->idle_slice_timer); ++ cancel_work_sync(&bfqd->unplug_work); ++} ++ ++static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd, ++ struct bfq_queue **bfqq_ptr) ++{ ++ struct bfq_group *root_group = bfqd->root_group; ++ struct bfq_queue *bfqq = *bfqq_ptr; ++ ++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq); ++ if (bfqq != NULL) { ++ bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group); ++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", ++ bfqq, atomic_read(&bfqq->ref)); ++ bfq_put_queue(bfqq); ++ *bfqq_ptr = NULL; ++ } ++} ++ ++/* ++ * Release all the bfqg references to its async queues. If we are ++ * deallocating the group these queues may still contain requests, so ++ * we reparent them to the root cgroup (i.e., the only one that will ++ * exist for sure untill all the requests on a device are gone). ++ */ ++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) ++{ ++ int i, j; ++ ++ for (i = 0; i < 2; i++) ++ for (j = 0; j < IOPRIO_BE_NR; j++) ++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); ++ ++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); ++} ++ ++static void bfq_exit_queue(struct elevator_queue *e) ++{ ++ struct bfq_data *bfqd = e->elevator_data; ++ struct request_queue *q = bfqd->queue; ++ struct bfq_queue *bfqq, *n; ++ ++ bfq_shutdown_timer_wq(bfqd); ++ ++ spin_lock_irq(q->queue_lock); ++ ++ BUG_ON(bfqd->in_service_queue != NULL); ++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) ++ bfq_deactivate_bfqq(bfqd, bfqq, 0); ++ ++ bfq_disconnect_groups(bfqd); ++ spin_unlock_irq(q->queue_lock); ++ ++ bfq_shutdown_timer_wq(bfqd); ++ ++ synchronize_rcu(); ++ ++ BUG_ON(timer_pending(&bfqd->idle_slice_timer)); ++ ++ bfq_free_root_group(bfqd); ++ kfree(bfqd); ++} ++ ++static int bfq_init_queue(struct request_queue *q) ++{ ++ struct bfq_group *bfqg; ++ struct bfq_data *bfqd; ++ ++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); ++ if (bfqd == NULL) ++ return -ENOMEM; ++ ++ /* ++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. ++ * Grab a permanent reference to it, so that the normal code flow ++ * will not attempt to free it. ++ */ ++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0); ++ atomic_inc(&bfqd->oom_bfqq.ref); ++ ++ bfqd->queue = q; ++ q->elevator->elevator_data = bfqd; ++ ++ bfqg = bfq_alloc_root_group(bfqd, q->node); ++ if (bfqg == NULL) { ++ kfree(bfqd); ++ return -ENOMEM; ++ } ++ ++ bfqd->root_group = bfqg; ++ ++ init_timer(&bfqd->idle_slice_timer); ++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer; ++ bfqd->idle_slice_timer.data = (unsigned long)bfqd; ++ ++ bfqd->rq_pos_tree = RB_ROOT; ++ ++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); ++ ++ INIT_LIST_HEAD(&bfqd->active_list); ++ INIT_LIST_HEAD(&bfqd->idle_list); ++ ++ bfqd->hw_tag = -1; ++ ++ bfqd->bfq_max_budget = bfq_default_max_budget; ++ ++ bfqd->bfq_quantum = bfq_quantum; ++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; ++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; ++ bfqd->bfq_back_max = bfq_back_max; ++ bfqd->bfq_back_penalty = bfq_back_penalty; ++ bfqd->bfq_slice_idle = bfq_slice_idle; ++ bfqd->bfq_class_idle_last_service = 0; ++ bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq; ++ bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; ++ bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; ++ ++ bfqd->low_latency = true; ++ ++ bfqd->bfq_raising_coeff = 20; ++ bfqd->bfq_raising_rt_max_time = msecs_to_jiffies(300); ++ bfqd->bfq_raising_max_time = 0; ++ bfqd->bfq_raising_min_idle_time = msecs_to_jiffies(2000); ++ bfqd->bfq_raising_min_inter_arr_async = msecs_to_jiffies(500); ++ bfqd->bfq_raising_max_softrt_rate = 7000; /* ++ * Approximate rate required ++ * to playback or record a ++ * high-definition compressed ++ * video. ++ */ ++ bfqd->raised_busy_queues = 0; ++ ++ /* Initially estimate the device's peak rate as the reference rate */ ++ if (blk_queue_nonrot(bfqd->queue)) { ++ bfqd->RT_prod = R_nonrot * T_nonrot; ++ bfqd->peak_rate = R_nonrot; ++ } else { ++ bfqd->RT_prod = R_rot * T_rot; ++ bfqd->peak_rate = R_rot; ++ } ++ ++ return 0; ++} ++ ++static void bfq_slab_kill(void) ++{ ++ if (bfq_pool != NULL) ++ kmem_cache_destroy(bfq_pool); ++} ++ ++static int __init bfq_slab_setup(void) ++{ ++ bfq_pool = KMEM_CACHE(bfq_queue, 0); ++ if (bfq_pool == NULL) ++ return -ENOMEM; ++ return 0; ++} ++ ++static ssize_t bfq_var_show(unsigned int var, char *page) ++{ ++ return sprintf(page, "%d\n", var); ++} ++ ++static ssize_t bfq_var_store(unsigned long *var, const char *page, size_t count) ++{ ++ unsigned long new_val; ++ int ret = kstrtoul(page, 10, &new_val); ++ ++ if (ret == 0) ++ *var = new_val; ++ ++ return count; ++} ++ ++static ssize_t bfq_raising_max_time_show(struct elevator_queue *e, char *page) ++{ ++ struct bfq_data *bfqd = e->elevator_data; ++ return sprintf(page, "%d\n", bfqd->bfq_raising_max_time > 0 ? ++ jiffies_to_msecs(bfqd->bfq_raising_max_time) : ++ jiffies_to_msecs(bfq_wrais_duration(bfqd))); ++} ++ ++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) ++{ ++ struct bfq_queue *bfqq; ++ struct bfq_data *bfqd = e->elevator_data; ++ ssize_t num_char = 0; ++ ++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n", ++ bfqd->queued); ++ ++ spin_lock_irq(bfqd->queue->queue_lock); ++ ++ num_char += sprintf(page + num_char, "Active:\n"); ++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { ++ num_char += sprintf(page + num_char, ++ "pid%d: weight %hu, nr_queued %d %d," ++ " dur %d/%u\n", ++ bfqq->pid, ++ bfqq->entity.weight, ++ bfqq->queued[0], ++ bfqq->queued[1], ++ jiffies_to_msecs(jiffies - ++ bfqq->last_rais_start_finish), ++ jiffies_to_msecs(bfqq->raising_cur_max_time)); ++ } ++ ++ num_char += sprintf(page + num_char, "Idle:\n"); ++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { ++ num_char += sprintf(page + num_char, ++ "pid%d: weight %hu, dur %d/%u\n", ++ bfqq->pid, ++ bfqq->entity.weight, ++ jiffies_to_msecs(jiffies - ++ bfqq->last_rais_start_finish), ++ jiffies_to_msecs(bfqq->raising_cur_max_time)); ++ } ++ ++ spin_unlock_irq(bfqd->queue->queue_lock); ++ ++ return num_char; ++} ++ ++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ ++static ssize_t __FUNC(struct elevator_queue *e, char *page) \ ++{ \ ++ struct bfq_data *bfqd = e->elevator_data; \ ++ unsigned int __data = __VAR; \ ++ if (__CONV) \ ++ __data = jiffies_to_msecs(__data); \ ++ return bfq_var_show(__data, (page)); \ ++} ++SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0); ++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1); ++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1); ++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); ++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); ++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1); ++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); ++SHOW_FUNCTION(bfq_max_budget_async_rq_show, bfqd->bfq_max_budget_async_rq, 0); ++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1); ++SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1); ++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); ++SHOW_FUNCTION(bfq_raising_coeff_show, bfqd->bfq_raising_coeff, 0); ++SHOW_FUNCTION(bfq_raising_rt_max_time_show, bfqd->bfq_raising_rt_max_time, 1); ++SHOW_FUNCTION(bfq_raising_min_idle_time_show, bfqd->bfq_raising_min_idle_time, ++ 1); ++SHOW_FUNCTION(bfq_raising_min_inter_arr_async_show, ++ bfqd->bfq_raising_min_inter_arr_async, ++ 1); ++SHOW_FUNCTION(bfq_raising_max_softrt_rate_show, ++ bfqd->bfq_raising_max_softrt_rate, 0); ++#undef SHOW_FUNCTION ++ ++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ ++static ssize_t \ ++__FUNC(struct elevator_queue *e, const char *page, size_t count) \ ++{ \ ++ struct bfq_data *bfqd = e->elevator_data; \ ++ unsigned long uninitialized_var(__data); \ ++ int ret = bfq_var_store(&__data, (page), count); \ ++ if (__data < (MIN)) \ ++ __data = (MIN); \ ++ else if (__data > (MAX)) \ ++ __data = (MAX); \ ++ if (__CONV) \ ++ *(__PTR) = msecs_to_jiffies(__data); \ ++ else \ ++ *(__PTR) = __data; \ ++ return ret; \ ++} ++STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0); ++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, ++ INT_MAX, 1); ++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, ++ INT_MAX, 1); ++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); ++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, ++ INT_MAX, 0); ++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1); ++STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq, ++ 1, INT_MAX, 0); ++STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0, ++ INT_MAX, 1); ++STORE_FUNCTION(bfq_raising_coeff_store, &bfqd->bfq_raising_coeff, 1, ++ INT_MAX, 0); ++STORE_FUNCTION(bfq_raising_max_time_store, &bfqd->bfq_raising_max_time, 0, ++ INT_MAX, 1); ++STORE_FUNCTION(bfq_raising_rt_max_time_store, &bfqd->bfq_raising_rt_max_time, 0, ++ INT_MAX, 1); ++STORE_FUNCTION(bfq_raising_min_idle_time_store, ++ &bfqd->bfq_raising_min_idle_time, 0, INT_MAX, 1); ++STORE_FUNCTION(bfq_raising_min_inter_arr_async_store, ++ &bfqd->bfq_raising_min_inter_arr_async, 0, INT_MAX, 1); ++STORE_FUNCTION(bfq_raising_max_softrt_rate_store, ++ &bfqd->bfq_raising_max_softrt_rate, 0, INT_MAX, 0); ++#undef STORE_FUNCTION ++ ++/* do nothing for the moment */ ++static ssize_t bfq_weights_store(struct elevator_queue *e, ++ const char *page, size_t count) ++{ ++ return count; ++} ++ ++static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd) ++{ ++ u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); ++ ++ if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES) ++ return bfq_calc_max_budget(bfqd->peak_rate, timeout); ++ else ++ return bfq_default_max_budget; ++} ++ ++static ssize_t bfq_max_budget_store(struct elevator_queue *e, ++ const char *page, size_t count) ++{ ++ struct bfq_data *bfqd = e->elevator_data; ++ unsigned long uninitialized_var(__data); ++ int ret = bfq_var_store(&__data, (page), count); ++ ++ if (__data == 0) ++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); ++ else { ++ if (__data > INT_MAX) ++ __data = INT_MAX; ++ bfqd->bfq_max_budget = __data; ++ } ++ ++ bfqd->bfq_user_max_budget = __data; ++ ++ return ret; ++} ++ ++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, ++ const char *page, size_t count) ++{ ++ struct bfq_data *bfqd = e->elevator_data; ++ unsigned long uninitialized_var(__data); ++ int ret = bfq_var_store(&__data, (page), count); ++ ++ if (__data < 1) ++ __data = 1; ++ else if (__data > INT_MAX) ++ __data = INT_MAX; ++ ++ bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data); ++ if (bfqd->bfq_user_max_budget == 0) ++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); ++ ++ return ret; ++} ++ ++static ssize_t bfq_low_latency_store(struct elevator_queue *e, ++ const char *page, size_t count) ++{ ++ struct bfq_data *bfqd = e->elevator_data; ++ unsigned long uninitialized_var(__data); ++ int ret = bfq_var_store(&__data, (page), count); ++ ++ if (__data > 1) ++ __data = 1; ++ if (__data == 0 && bfqd->low_latency != 0) ++ bfq_end_raising(bfqd); ++ bfqd->low_latency = __data; ++ ++ return ret; ++} ++ ++#define BFQ_ATTR(name) \ ++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) ++ ++static struct elv_fs_entry bfq_attrs[] = { ++ BFQ_ATTR(quantum), ++ BFQ_ATTR(fifo_expire_sync), ++ BFQ_ATTR(fifo_expire_async), ++ BFQ_ATTR(back_seek_max), ++ BFQ_ATTR(back_seek_penalty), ++ BFQ_ATTR(slice_idle), ++ BFQ_ATTR(max_budget), ++ BFQ_ATTR(max_budget_async_rq), ++ BFQ_ATTR(timeout_sync), ++ BFQ_ATTR(timeout_async), ++ BFQ_ATTR(low_latency), ++ BFQ_ATTR(raising_coeff), ++ BFQ_ATTR(raising_max_time), ++ BFQ_ATTR(raising_rt_max_time), ++ BFQ_ATTR(raising_min_idle_time), ++ BFQ_ATTR(raising_min_inter_arr_async), ++ BFQ_ATTR(raising_max_softrt_rate), ++ BFQ_ATTR(weights), ++ __ATTR_NULL ++}; ++ ++static struct elevator_type iosched_bfq = { ++ .ops = { ++ .elevator_merge_fn = bfq_merge, ++ .elevator_merged_fn = bfq_merged_request, ++ .elevator_merge_req_fn = bfq_merged_requests, ++ .elevator_allow_merge_fn = bfq_allow_merge, ++ .elevator_dispatch_fn = bfq_dispatch_requests, ++ .elevator_add_req_fn = bfq_insert_request, ++ .elevator_activate_req_fn = bfq_activate_request, ++ .elevator_deactivate_req_fn = bfq_deactivate_request, ++ .elevator_completed_req_fn = bfq_completed_request, ++ .elevator_former_req_fn = elv_rb_former_request, ++ .elevator_latter_req_fn = elv_rb_latter_request, ++ .elevator_init_icq_fn = bfq_init_icq, ++ .elevator_exit_icq_fn = bfq_exit_icq, ++ .elevator_set_req_fn = bfq_set_request, ++ .elevator_put_req_fn = bfq_put_request, ++ .elevator_may_queue_fn = bfq_may_queue, ++ .elevator_init_fn = bfq_init_queue, ++ .elevator_exit_fn = bfq_exit_queue, ++ }, ++ .icq_size = sizeof(struct bfq_io_cq), ++ .icq_align = __alignof__(struct bfq_io_cq), ++ .elevator_attrs = bfq_attrs, ++ .elevator_name = "bfq", ++ .elevator_owner = THIS_MODULE, ++}; ++ ++static int __init bfq_init(void) ++{ ++ /* ++ * Can be 0 on HZ < 1000 setups. ++ */ ++ if (bfq_slice_idle == 0) ++ bfq_slice_idle = 1; ++ ++ if (bfq_timeout_async == 0) ++ bfq_timeout_async = 1; ++ ++ if (bfq_slab_setup()) ++ return -ENOMEM; ++ ++ elv_register(&iosched_bfq); ++ printk(KERN_INFO "BFQ I/O-scheduler version: v7"); ++ ++ return 0; ++} ++ ++static void __exit bfq_exit(void) ++{ ++ elv_unregister(&iosched_bfq); ++ bfq_slab_kill(); ++} ++ ++module_init(bfq_init); ++module_exit(bfq_exit); ++ ++MODULE_AUTHOR("Fabio Checconi, Paolo Valente"); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Budget Fair Queueing IO scheduler"); +diff --git a/block/bfq-sched.c b/block/bfq-sched.c +new file mode 100644 +index 0000000..30df81c +--- /dev/null ++++ b/block/bfq-sched.c +@@ -0,0 +1,1077 @@ ++/* ++ * BFQ: Hierarchical B-WF2Q+ scheduler. ++ * ++ * Based on ideas and code from CFQ: ++ * Copyright (C) 2003 Jens Axboe ++ * ++ * Copyright (C) 2008 Fabio Checconi ++ * Paolo Valente ++ * ++ * Copyright (C) 2010 Paolo Valente ++ */ ++ ++#ifdef CONFIG_CGROUP_BFQIO ++#define for_each_entity(entity) \ ++ for (; entity != NULL; entity = entity->parent) ++ ++#define for_each_entity_safe(entity, parent) \ ++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent) ++ ++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, ++ int extract, ++ struct bfq_data *bfqd); ++ ++static inline void bfq_update_budget(struct bfq_entity *next_active) ++{ ++ struct bfq_entity *bfqg_entity; ++ struct bfq_group *bfqg; ++ struct bfq_sched_data *group_sd; ++ ++ BUG_ON(next_active == NULL); ++ ++ group_sd = next_active->sched_data; ++ ++ bfqg = container_of(group_sd, struct bfq_group, sched_data); ++ /* ++ * bfq_group's my_entity field is not NULL only if the group ++ * is not the root group. We must not touch the root entity ++ * as it must never become an active entity. ++ */ ++ bfqg_entity = bfqg->my_entity; ++ if (bfqg_entity != NULL) ++ bfqg_entity->budget = next_active->budget; ++} ++ ++static int bfq_update_next_active(struct bfq_sched_data *sd) ++{ ++ struct bfq_entity *next_active; ++ ++ if (sd->active_entity != NULL) ++ /* will update/requeue at the end of service */ ++ return 0; ++ ++ /* ++ * NOTE: this can be improved in many ways, such as returning ++ * 1 (and thus propagating upwards the update) only when the ++ * budget changes, or caching the bfqq that will be scheduled ++ * next from this subtree. By now we worry more about ++ * correctness than about performance... ++ */ ++ next_active = bfq_lookup_next_entity(sd, 0, NULL); ++ sd->next_active = next_active; ++ ++ if (next_active != NULL) ++ bfq_update_budget(next_active); ++ ++ return 1; ++} ++ ++static inline void bfq_check_next_active(struct bfq_sched_data *sd, ++ struct bfq_entity *entity) ++{ ++ BUG_ON(sd->next_active != entity); ++} ++#else ++#define for_each_entity(entity) \ ++ for (; entity != NULL; entity = NULL) ++ ++#define for_each_entity_safe(entity, parent) \ ++ for (parent = NULL; entity != NULL; entity = parent) ++ ++static inline int bfq_update_next_active(struct bfq_sched_data *sd) ++{ ++ return 0; ++} ++ ++static inline void bfq_check_next_active(struct bfq_sched_data *sd, ++ struct bfq_entity *entity) ++{ ++} ++ ++static inline void bfq_update_budget(struct bfq_entity *next_active) ++{ ++} ++#endif ++ ++/* ++ * Shift for timestamp calculations. This actually limits the maximum ++ * service allowed in one timestamp delta (small shift values increase it), ++ * the maximum total weight that can be used for the queues in the system ++ * (big shift values increase it), and the period of virtual time wraparounds. ++ */ ++#define WFQ_SERVICE_SHIFT 22 ++ ++/** ++ * bfq_gt - compare two timestamps. ++ * @a: first ts. ++ * @b: second ts. ++ * ++ * Return @a > @b, dealing with wrapping correctly. ++ */ ++static inline int bfq_gt(u64 a, u64 b) ++{ ++ return (s64)(a - b) > 0; ++} ++ ++static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = NULL; ++ ++ BUG_ON(entity == NULL); ++ ++ if (entity->my_sched_data == NULL) ++ bfqq = container_of(entity, struct bfq_queue, entity); ++ ++ return bfqq; ++} ++ ++ ++/** ++ * bfq_delta - map service into the virtual time domain. ++ * @service: amount of service. ++ * @weight: scale factor (weight of an entity or weight sum). ++ */ ++static inline u64 bfq_delta(unsigned long service, ++ unsigned long weight) ++{ ++ u64 d = (u64)service << WFQ_SERVICE_SHIFT; ++ ++ do_div(d, weight); ++ return d; ++} ++ ++/** ++ * bfq_calc_finish - assign the finish time to an entity. ++ * @entity: the entity to act upon. ++ * @service: the service to be charged to the entity. ++ */ ++static inline void bfq_calc_finish(struct bfq_entity *entity, ++ unsigned long service) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ ++ BUG_ON(entity->weight == 0); ++ ++ entity->finish = entity->start + ++ bfq_delta(service, entity->weight); ++ ++ if (bfqq != NULL) { ++ bfq_log_bfqq(bfqq->bfqd, bfqq, ++ "calc_finish: serv %lu, w %d", ++ service, entity->weight); ++ bfq_log_bfqq(bfqq->bfqd, bfqq, ++ "calc_finish: start %llu, finish %llu, delta %llu", ++ entity->start, entity->finish, ++ bfq_delta(service, entity->weight)); ++ } ++} ++ ++/** ++ * bfq_entity_of - get an entity from a node. ++ * @node: the node field of the entity. ++ * ++ * Convert a node pointer to the relative entity. This is used only ++ * to simplify the logic of some functions and not as the generic ++ * conversion mechanism because, e.g., in the tree walking functions, ++ * the check for a %NULL value would be redundant. ++ */ ++static inline struct bfq_entity *bfq_entity_of(struct rb_node *node) ++{ ++ struct bfq_entity *entity = NULL; ++ ++ if (node != NULL) ++ entity = rb_entry(node, struct bfq_entity, rb_node); ++ ++ return entity; ++} ++ ++/** ++ * bfq_extract - remove an entity from a tree. ++ * @root: the tree root. ++ * @entity: the entity to remove. ++ */ ++static inline void bfq_extract(struct rb_root *root, ++ struct bfq_entity *entity) ++{ ++ BUG_ON(entity->tree != root); ++ ++ entity->tree = NULL; ++ rb_erase(&entity->rb_node, root); ++} ++ ++/** ++ * bfq_idle_extract - extract an entity from the idle tree. ++ * @st: the service tree of the owning @entity. ++ * @entity: the entity being removed. ++ */ ++static void bfq_idle_extract(struct bfq_service_tree *st, ++ struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ struct rb_node *next; ++ ++ BUG_ON(entity->tree != &st->idle); ++ ++ if (entity == st->first_idle) { ++ next = rb_next(&entity->rb_node); ++ st->first_idle = bfq_entity_of(next); ++ } ++ ++ if (entity == st->last_idle) { ++ next = rb_prev(&entity->rb_node); ++ st->last_idle = bfq_entity_of(next); ++ } ++ ++ bfq_extract(&st->idle, entity); ++ ++ if (bfqq != NULL) ++ list_del(&bfqq->bfqq_list); ++} ++ ++/** ++ * bfq_insert - generic tree insertion. ++ * @root: tree root. ++ * @entity: entity to insert. ++ * ++ * This is used for the idle and the active tree, since they are both ++ * ordered by finish time. ++ */ ++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) ++{ ++ struct bfq_entity *entry; ++ struct rb_node **node = &root->rb_node; ++ struct rb_node *parent = NULL; ++ ++ BUG_ON(entity->tree != NULL); ++ ++ while (*node != NULL) { ++ parent = *node; ++ entry = rb_entry(parent, struct bfq_entity, rb_node); ++ ++ if (bfq_gt(entry->finish, entity->finish)) ++ node = &parent->rb_left; ++ else ++ node = &parent->rb_right; ++ } ++ ++ rb_link_node(&entity->rb_node, parent, node); ++ rb_insert_color(&entity->rb_node, root); ++ ++ entity->tree = root; ++} ++ ++/** ++ * bfq_update_min - update the min_start field of a entity. ++ * @entity: the entity to update. ++ * @node: one of its children. ++ * ++ * This function is called when @entity may store an invalid value for ++ * min_start due to updates to the active tree. The function assumes ++ * that the subtree rooted at @node (which may be its left or its right ++ * child) has a valid min_start value. ++ */ ++static inline void bfq_update_min(struct bfq_entity *entity, ++ struct rb_node *node) ++{ ++ struct bfq_entity *child; ++ ++ if (node != NULL) { ++ child = rb_entry(node, struct bfq_entity, rb_node); ++ if (bfq_gt(entity->min_start, child->min_start)) ++ entity->min_start = child->min_start; ++ } ++} ++ ++/** ++ * bfq_update_active_node - recalculate min_start. ++ * @node: the node to update. ++ * ++ * @node may have changed position or one of its children may have moved, ++ * this function updates its min_start value. The left and right subtrees ++ * are assumed to hold a correct min_start value. ++ */ ++static inline void bfq_update_active_node(struct rb_node *node) ++{ ++ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); ++ ++ entity->min_start = entity->start; ++ bfq_update_min(entity, node->rb_right); ++ bfq_update_min(entity, node->rb_left); ++} ++ ++/** ++ * bfq_update_active_tree - update min_start for the whole active tree. ++ * @node: the starting node. ++ * ++ * @node must be the deepest modified node after an update. This function ++ * updates its min_start using the values held by its children, assuming ++ * that they did not change, and then updates all the nodes that may have ++ * changed in the path to the root. The only nodes that may have changed ++ * are the ones in the path or their siblings. ++ */ ++static void bfq_update_active_tree(struct rb_node *node) ++{ ++ struct rb_node *parent; ++ ++up: ++ bfq_update_active_node(node); ++ ++ parent = rb_parent(node); ++ if (parent == NULL) ++ return; ++ ++ if (node == parent->rb_left && parent->rb_right != NULL) ++ bfq_update_active_node(parent->rb_right); ++ else if (parent->rb_left != NULL) ++ bfq_update_active_node(parent->rb_left); ++ ++ node = parent; ++ goto up; ++} ++ ++/** ++ * bfq_active_insert - insert an entity in the active tree of its group/device. ++ * @st: the service tree of the entity. ++ * @entity: the entity being inserted. ++ * ++ * The active tree is ordered by finish time, but an extra key is kept ++ * per each node, containing the minimum value for the start times of ++ * its children (and the node itself), so it's possible to search for ++ * the eligible node with the lowest finish time in logarithmic time. ++ */ ++static void bfq_active_insert(struct bfq_service_tree *st, ++ struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ struct rb_node *node = &entity->rb_node; ++ ++ bfq_insert(&st->active, entity); ++ ++ if (node->rb_left != NULL) ++ node = node->rb_left; ++ else if (node->rb_right != NULL) ++ node = node->rb_right; ++ ++ bfq_update_active_tree(node); ++ ++ if (bfqq != NULL) ++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); ++} ++ ++/** ++ * bfq_ioprio_to_weight - calc a weight from an ioprio. ++ * @ioprio: the ioprio value to convert. ++ */ ++static unsigned short bfq_ioprio_to_weight(int ioprio) ++{ ++ WARN_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); ++ return IOPRIO_BE_NR - ioprio; ++} ++ ++/** ++ * bfq_weight_to_ioprio - calc an ioprio from a weight. ++ * @weight: the weight value to convert. ++ * ++ * To preserve as mush as possible the old only-ioprio user interface, ++ * 0 is used as an escape ioprio value for weights (numerically) equal or ++ * larger than IOPRIO_BE_NR ++ */ ++static unsigned short bfq_weight_to_ioprio(int weight) ++{ ++ WARN_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT); ++ return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight; ++} ++ ++static inline void bfq_get_entity(struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ struct bfq_sched_data *sd; ++ ++ if (bfqq != NULL) { ++ sd = entity->sched_data; ++ atomic_inc(&bfqq->ref); ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", ++ bfqq, atomic_read(&bfqq->ref)); ++ } ++} ++ ++/** ++ * bfq_find_deepest - find the deepest node that an extraction can modify. ++ * @node: the node being removed. ++ * ++ * Do the first step of an extraction in an rb tree, looking for the ++ * node that will replace @node, and returning the deepest node that ++ * the following modifications to the tree can touch. If @node is the ++ * last node in the tree return %NULL. ++ */ ++static struct rb_node *bfq_find_deepest(struct rb_node *node) ++{ ++ struct rb_node *deepest; ++ ++ if (node->rb_right == NULL && node->rb_left == NULL) ++ deepest = rb_parent(node); ++ else if (node->rb_right == NULL) ++ deepest = node->rb_left; ++ else if (node->rb_left == NULL) ++ deepest = node->rb_right; ++ else { ++ deepest = rb_next(node); ++ if (deepest->rb_right != NULL) ++ deepest = deepest->rb_right; ++ else if (rb_parent(deepest) != node) ++ deepest = rb_parent(deepest); ++ } ++ ++ return deepest; ++} ++ ++/** ++ * bfq_active_extract - remove an entity from the active tree. ++ * @st: the service_tree containing the tree. ++ * @entity: the entity being removed. ++ */ ++static void bfq_active_extract(struct bfq_service_tree *st, ++ struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ struct rb_node *node; ++ ++ node = bfq_find_deepest(&entity->rb_node); ++ bfq_extract(&st->active, entity); ++ ++ if (node != NULL) ++ bfq_update_active_tree(node); ++ ++ if (bfqq != NULL) ++ list_del(&bfqq->bfqq_list); ++} ++ ++/** ++ * bfq_idle_insert - insert an entity into the idle tree. ++ * @st: the service tree containing the tree. ++ * @entity: the entity to insert. ++ */ ++static void bfq_idle_insert(struct bfq_service_tree *st, ++ struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ struct bfq_entity *first_idle = st->first_idle; ++ struct bfq_entity *last_idle = st->last_idle; ++ ++ if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish)) ++ st->first_idle = entity; ++ if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish)) ++ st->last_idle = entity; ++ ++ bfq_insert(&st->idle, entity); ++ ++ if (bfqq != NULL) ++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); ++} ++ ++/** ++ * bfq_forget_entity - remove an entity from the wfq trees. ++ * @st: the service tree. ++ * @entity: the entity being removed. ++ * ++ * Update the device status and forget everything about @entity, putting ++ * the device reference to it, if it is a queue. Entities belonging to ++ * groups are not refcounted. ++ */ ++static void bfq_forget_entity(struct bfq_service_tree *st, ++ struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ struct bfq_sched_data *sd; ++ ++ BUG_ON(!entity->on_st); ++ ++ entity->on_st = 0; ++ st->wsum -= entity->weight; ++ if (bfqq != NULL) { ++ sd = entity->sched_data; ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", ++ bfqq, atomic_read(&bfqq->ref)); ++ bfq_put_queue(bfqq); ++ } ++} ++ ++/** ++ * bfq_put_idle_entity - release the idle tree ref of an entity. ++ * @st: service tree for the entity. ++ * @entity: the entity being released. ++ */ ++static void bfq_put_idle_entity(struct bfq_service_tree *st, ++ struct bfq_entity *entity) ++{ ++ bfq_idle_extract(st, entity); ++ bfq_forget_entity(st, entity); ++} ++ ++/** ++ * bfq_forget_idle - update the idle tree if necessary. ++ * @st: the service tree to act upon. ++ * ++ * To preserve the global O(log N) complexity we only remove one entry here; ++ * as the idle tree will not grow indefinitely this can be done safely. ++ */ ++static void bfq_forget_idle(struct bfq_service_tree *st) ++{ ++ struct bfq_entity *first_idle = st->first_idle; ++ struct bfq_entity *last_idle = st->last_idle; ++ ++ if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL && ++ !bfq_gt(last_idle->finish, st->vtime)) { ++ /* ++ * Forget the whole idle tree, increasing the vtime past ++ * the last finish time of idle entities. ++ */ ++ st->vtime = last_idle->finish; ++ } ++ ++ if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime)) ++ bfq_put_idle_entity(st, first_idle); ++} ++ ++static struct bfq_service_tree * ++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, ++ struct bfq_entity *entity) ++{ ++ struct bfq_service_tree *new_st = old_st; ++ ++ if (entity->ioprio_changed) { ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ ++ BUG_ON(old_st->wsum < entity->weight); ++ old_st->wsum -= entity->weight; ++ ++ if (entity->new_weight != entity->orig_weight) { ++ entity->orig_weight = entity->new_weight; ++ entity->ioprio = ++ bfq_weight_to_ioprio(entity->orig_weight); ++ } else if (entity->new_ioprio != entity->ioprio) { ++ entity->ioprio = entity->new_ioprio; ++ entity->orig_weight = ++ bfq_ioprio_to_weight(entity->ioprio); ++ } else ++ entity->new_weight = entity->orig_weight = ++ bfq_ioprio_to_weight(entity->ioprio); ++ ++ entity->ioprio_class = entity->new_ioprio_class; ++ entity->ioprio_changed = 0; ++ ++ /* ++ * NOTE: here we may be changing the weight too early, ++ * this will cause unfairness. The correct approach ++ * would have required additional complexity to defer ++ * weight changes to the proper time instants (i.e., ++ * when entity->finish <= old_st->vtime). ++ */ ++ new_st = bfq_entity_service_tree(entity); ++ entity->weight = entity->orig_weight * ++ (bfqq != NULL ? bfqq->raising_coeff : 1); ++ new_st->wsum += entity->weight; ++ ++ if (new_st != old_st) ++ entity->start = new_st->vtime; ++ } ++ ++ return new_st; ++} ++ ++/** ++ * bfq_bfqq_served - update the scheduler status after selection for service. ++ * @bfqq: the queue being served. ++ * @served: bytes to transfer. ++ * ++ * NOTE: this can be optimized, as the timestamps of upper level entities ++ * are synchronized every time a new bfqq is selected for service. By now, ++ * we keep it to better check consistency. ++ */ ++static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served) ++{ ++ struct bfq_entity *entity = &bfqq->entity; ++ struct bfq_service_tree *st; ++ ++ for_each_entity(entity) { ++ st = bfq_entity_service_tree(entity); ++ ++ entity->service += served; ++ BUG_ON(entity->service > entity->budget); ++ BUG_ON(st->wsum == 0); ++ ++ st->vtime += bfq_delta(served, st->wsum); ++ bfq_forget_idle(st); ++ } ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served); ++} ++ ++/** ++ * bfq_bfqq_charge_full_budget - set the service to the entity budget. ++ * @bfqq: the queue that needs a service update. ++ * ++ * When it's not possible to be fair in the service domain, because ++ * a queue is not consuming its budget fast enough (the meaning of ++ * fast depends on the timeout parameter), we charge it a full ++ * budget. In this way we should obtain a sort of time-domain ++ * fairness among all the seeky/slow queues. ++ */ ++static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq) ++{ ++ struct bfq_entity *entity = &bfqq->entity; ++ ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); ++ ++ bfq_bfqq_served(bfqq, entity->budget - entity->service); ++} ++ ++/** ++ * __bfq_activate_entity - activate an entity. ++ * @entity: the entity being activated. ++ * ++ * Called whenever an entity is activated, i.e., it is not active and one ++ * of its children receives a new request, or has to be reactivated due to ++ * budget exhaustion. It uses the current budget of the entity (and the ++ * service received if @entity is active) of the queue to calculate its ++ * timestamps. ++ */ ++static void __bfq_activate_entity(struct bfq_entity *entity) ++{ ++ struct bfq_sched_data *sd = entity->sched_data; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ ++ if (entity == sd->active_entity) { ++ BUG_ON(entity->tree != NULL); ++ /* ++ * If we are requeueing the current entity we have ++ * to take care of not charging to it service it has ++ * not received. ++ */ ++ bfq_calc_finish(entity, entity->service); ++ entity->start = entity->finish; ++ sd->active_entity = NULL; ++ } else if (entity->tree == &st->active) { ++ /* ++ * Requeueing an entity due to a change of some ++ * next_active entity below it. We reuse the old ++ * start time. ++ */ ++ bfq_active_extract(st, entity); ++ } else if (entity->tree == &st->idle) { ++ /* ++ * Must be on the idle tree, bfq_idle_extract() will ++ * check for that. ++ */ ++ bfq_idle_extract(st, entity); ++ entity->start = bfq_gt(st->vtime, entity->finish) ? ++ st->vtime : entity->finish; ++ } else { ++ /* ++ * The finish time of the entity may be invalid, and ++ * it is in the past for sure, otherwise the queue ++ * would have been on the idle tree. ++ */ ++ entity->start = st->vtime; ++ st->wsum += entity->weight; ++ bfq_get_entity(entity); ++ ++ BUG_ON(entity->on_st); ++ entity->on_st = 1; ++ } ++ ++ st = __bfq_entity_update_weight_prio(st, entity); ++ bfq_calc_finish(entity, entity->budget); ++ bfq_active_insert(st, entity); ++} ++ ++/** ++ * bfq_activate_entity - activate an entity and its ancestors if necessary. ++ * @entity: the entity to activate. ++ * ++ * Activate @entity and all the entities on the path from it to the root. ++ */ ++static void bfq_activate_entity(struct bfq_entity *entity) ++{ ++ struct bfq_sched_data *sd; ++ ++ for_each_entity(entity) { ++ __bfq_activate_entity(entity); ++ ++ sd = entity->sched_data; ++ if (!bfq_update_next_active(sd)) ++ /* ++ * No need to propagate the activation to the ++ * upper entities, as they will be updated when ++ * the active entity is rescheduled. ++ */ ++ break; ++ } ++} ++ ++/** ++ * __bfq_deactivate_entity - deactivate an entity from its service tree. ++ * @entity: the entity to deactivate. ++ * @requeue: if false, the entity will not be put into the idle tree. ++ * ++ * Deactivate an entity, independently from its previous state. If the ++ * entity was not on a service tree just return, otherwise if it is on ++ * any scheduler tree, extract it from that tree, and if necessary ++ * and if the caller did not specify @requeue, put it on the idle tree. ++ * ++ * Return %1 if the caller should update the entity hierarchy, i.e., ++ * if the entity was under service or if it was the next_active for ++ * its sched_data; return %0 otherwise. ++ */ ++static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) ++{ ++ struct bfq_sched_data *sd = entity->sched_data; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ int was_active = entity == sd->active_entity; ++ int ret = 0; ++ ++ if (!entity->on_st) ++ return 0; ++ ++ BUG_ON(was_active && entity->tree != NULL); ++ ++ if (was_active) { ++ bfq_calc_finish(entity, entity->service); ++ sd->active_entity = NULL; ++ } else if (entity->tree == &st->active) ++ bfq_active_extract(st, entity); ++ else if (entity->tree == &st->idle) ++ bfq_idle_extract(st, entity); ++ else if (entity->tree != NULL) ++ BUG(); ++ ++ if (was_active || sd->next_active == entity) ++ ret = bfq_update_next_active(sd); ++ ++ if (!requeue || !bfq_gt(entity->finish, st->vtime)) ++ bfq_forget_entity(st, entity); ++ else ++ bfq_idle_insert(st, entity); ++ ++ BUG_ON(sd->active_entity == entity); ++ BUG_ON(sd->next_active == entity); ++ ++ return ret; ++} ++ ++/** ++ * bfq_deactivate_entity - deactivate an entity. ++ * @entity: the entity to deactivate. ++ * @requeue: true if the entity can be put on the idle tree ++ */ ++static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) ++{ ++ struct bfq_sched_data *sd; ++ struct bfq_entity *parent; ++ ++ for_each_entity_safe(entity, parent) { ++ sd = entity->sched_data; ++ ++ if (!__bfq_deactivate_entity(entity, requeue)) ++ /* ++ * The parent entity is still backlogged, and ++ * we don't need to update it as it is still ++ * under service. ++ */ ++ break; ++ ++ if (sd->next_active != NULL) ++ /* ++ * The parent entity is still backlogged and ++ * the budgets on the path towards the root ++ * need to be updated. ++ */ ++ goto update; ++ ++ /* ++ * If we reach there the parent is no more backlogged and ++ * we want to propagate the dequeue upwards. ++ */ ++ requeue = 1; ++ } ++ ++ return; ++ ++update: ++ entity = parent; ++ for_each_entity(entity) { ++ __bfq_activate_entity(entity); ++ ++ sd = entity->sched_data; ++ if (!bfq_update_next_active(sd)) ++ break; ++ } ++} ++ ++/** ++ * bfq_update_vtime - update vtime if necessary. ++ * @st: the service tree to act upon. ++ * ++ * If necessary update the service tree vtime to have at least one ++ * eligible entity, skipping to its start time. Assumes that the ++ * active tree of the device is not empty. ++ * ++ * NOTE: this hierarchical implementation updates vtimes quite often, ++ * we may end up with reactivated tasks getting timestamps after a ++ * vtime skip done because we needed a ->first_active entity on some ++ * intermediate node. ++ */ ++static void bfq_update_vtime(struct bfq_service_tree *st) ++{ ++ struct bfq_entity *entry; ++ struct rb_node *node = st->active.rb_node; ++ ++ entry = rb_entry(node, struct bfq_entity, rb_node); ++ if (bfq_gt(entry->min_start, st->vtime)) { ++ st->vtime = entry->min_start; ++ bfq_forget_idle(st); ++ } ++} ++ ++/** ++ * bfq_first_active - find the eligible entity with the smallest finish time ++ * @st: the service tree to select from. ++ * ++ * This function searches the first schedulable entity, starting from the ++ * root of the tree and going on the left every time on this side there is ++ * a subtree with at least one eligible (start >= vtime) entity. The path ++ * on the right is followed only if a) the left subtree contains no eligible ++ * entities and b) no eligible entity has been found yet. ++ */ ++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) ++{ ++ struct bfq_entity *entry, *first = NULL; ++ struct rb_node *node = st->active.rb_node; ++ ++ while (node != NULL) { ++ entry = rb_entry(node, struct bfq_entity, rb_node); ++left: ++ if (!bfq_gt(entry->start, st->vtime)) ++ first = entry; ++ ++ BUG_ON(bfq_gt(entry->min_start, st->vtime)); ++ ++ if (node->rb_left != NULL) { ++ entry = rb_entry(node->rb_left, ++ struct bfq_entity, rb_node); ++ if (!bfq_gt(entry->min_start, st->vtime)) { ++ node = node->rb_left; ++ goto left; ++ } ++ } ++ if (first != NULL) ++ break; ++ node = node->rb_right; ++ } ++ ++ BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active)); ++ return first; ++} ++ ++/** ++ * __bfq_lookup_next_entity - return the first eligible entity in @st. ++ * @st: the service tree. ++ * ++ * Update the virtual time in @st and return the first eligible entity ++ * it contains. ++ */ ++static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, ++ bool force) ++{ ++ struct bfq_entity *entity, *new_next_active = NULL; ++ ++ if (RB_EMPTY_ROOT(&st->active)) ++ return NULL; ++ ++ bfq_update_vtime(st); ++ entity = bfq_first_active_entity(st); ++ BUG_ON(bfq_gt(entity->start, st->vtime)); ++ ++ /* ++ * If the chosen entity does not match with the sched_data's ++ * next_active and we are forcedly serving the IDLE priority ++ * class tree, bubble up budget update. ++ */ ++ if (unlikely(force && entity != entity->sched_data->next_active)) { ++ new_next_active = entity; ++ for_each_entity(new_next_active) ++ bfq_update_budget(new_next_active); ++ } ++ ++ return entity; ++} ++ ++/** ++ * bfq_lookup_next_entity - return the first eligible entity in @sd. ++ * @sd: the sched_data. ++ * @extract: if true the returned entity will be also extracted from @sd. ++ * ++ * NOTE: since we cache the next_active entity at each level of the ++ * hierarchy, the complexity of the lookup can be decreased with ++ * absolutely no effort just returning the cached next_active value; ++ * we prefer to do full lookups to test the consistency of * the data ++ * structures. ++ */ ++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, ++ int extract, ++ struct bfq_data *bfqd) ++{ ++ struct bfq_service_tree *st = sd->service_tree; ++ struct bfq_entity *entity; ++ int i = 0; ++ ++ BUG_ON(sd->active_entity != NULL); ++ ++ if (bfqd != NULL && ++ jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { ++ entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, ++ true); ++ if (entity != NULL) { ++ i = BFQ_IOPRIO_CLASSES - 1; ++ bfqd->bfq_class_idle_last_service = jiffies; ++ sd->next_active = entity; ++ } ++ } ++ for (; i < BFQ_IOPRIO_CLASSES; i++) { ++ entity = __bfq_lookup_next_entity(st + i, false); ++ if (entity != NULL) { ++ if (extract) { ++ bfq_check_next_active(sd, entity); ++ bfq_active_extract(st + i, entity); ++ sd->active_entity = entity; ++ sd->next_active = NULL; ++ } ++ break; ++ } ++ } ++ ++ return entity; ++} ++ ++/* ++ * Get next queue for service. ++ */ ++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) ++{ ++ struct bfq_entity *entity = NULL; ++ struct bfq_sched_data *sd; ++ struct bfq_queue *bfqq; ++ ++ BUG_ON(bfqd->in_service_queue != NULL); ++ ++ if (bfqd->busy_queues == 0) ++ return NULL; ++ ++ sd = &bfqd->root_group->sched_data; ++ for (; sd != NULL; sd = entity->my_sched_data) { ++ entity = bfq_lookup_next_entity(sd, 1, bfqd); ++ BUG_ON(entity == NULL); ++ entity->service = 0; ++ } ++ ++ bfqq = bfq_entity_to_bfqq(entity); ++ BUG_ON(bfqq == NULL); ++ ++ return bfqq; ++} ++ ++/* ++ * Forced extraction of the given queue. ++ */ ++static void bfq_get_next_queue_forced(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ struct bfq_entity *entity; ++ struct bfq_sched_data *sd; ++ ++ BUG_ON(bfqd->in_service_queue != NULL); ++ ++ entity = &bfqq->entity; ++ /* ++ * Bubble up extraction/update from the leaf to the root. ++ */ ++ for_each_entity(entity) { ++ sd = entity->sched_data; ++ bfq_update_budget(entity); ++ bfq_update_vtime(bfq_entity_service_tree(entity)); ++ bfq_active_extract(bfq_entity_service_tree(entity), entity); ++ sd->active_entity = entity; ++ sd->next_active = NULL; ++ entity->service = 0; ++ } ++ ++ return; ++} ++ ++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) ++{ ++ if (bfqd->in_service_bic != NULL) { ++ put_io_context(bfqd->in_service_bic->icq.ioc); ++ bfqd->in_service_bic = NULL; ++ } ++ ++ bfqd->in_service_queue = NULL; ++ del_timer(&bfqd->idle_slice_timer); ++} ++ ++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ int requeue) ++{ ++ struct bfq_entity *entity = &bfqq->entity; ++ ++ if (bfqq == bfqd->in_service_queue) ++ __bfq_bfqd_reset_in_service(bfqd); ++ ++ bfq_deactivate_entity(entity, requeue); ++} ++ ++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) ++{ ++ struct bfq_entity *entity = &bfqq->entity; ++ ++ bfq_activate_entity(entity); ++} ++ ++/* ++ * Called when the bfqq no longer has requests pending, remove it from ++ * the service tree. ++ */ ++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ int requeue) ++{ ++ BUG_ON(!bfq_bfqq_busy(bfqq)); ++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); ++ ++ bfq_log_bfqq(bfqd, bfqq, "del from busy"); ++ ++ bfq_clear_bfqq_busy(bfqq); ++ ++ BUG_ON(bfqd->busy_queues == 0); ++ bfqd->busy_queues--; ++ if (bfqq->raising_coeff > 1) ++ bfqd->raised_busy_queues--; ++ ++ bfq_deactivate_bfqq(bfqd, bfqq, requeue); ++} ++ ++/* ++ * Called when an inactive queue receives a new request. ++ */ ++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) ++{ ++ BUG_ON(bfq_bfqq_busy(bfqq)); ++ BUG_ON(bfqq == bfqd->in_service_queue); ++ ++ bfq_log_bfqq(bfqd, bfqq, "add to busy"); ++ ++ bfq_activate_bfqq(bfqd, bfqq); ++ ++ bfq_mark_bfqq_busy(bfqq); ++ bfqd->busy_queues++; ++ if (bfqq->raising_coeff > 1) ++ bfqd->raised_busy_queues++; ++} +diff --git a/block/bfq.h b/block/bfq.h +new file mode 100644 +index 0000000..78da7d2 +--- /dev/null ++++ b/block/bfq.h +@@ -0,0 +1,612 @@ ++/* ++ * BFQ-v7 for 3.10.0: data structures and common functions prototypes. ++ * ++ * Based on ideas and code from CFQ: ++ * Copyright (C) 2003 Jens Axboe ++ * ++ * Copyright (C) 2008 Fabio Checconi ++ * Paolo Valente ++ * ++ * Copyright (C) 2010 Paolo Valente ++ */ ++ ++#ifndef _BFQ_H ++#define _BFQ_H ++ ++#include ++#include ++#include ++#include ++ ++#define BFQ_IOPRIO_CLASSES 3 ++#define BFQ_CL_IDLE_TIMEOUT (HZ/5) ++ ++#define BFQ_MIN_WEIGHT 1 ++#define BFQ_MAX_WEIGHT 1000 ++ ++#define BFQ_DEFAULT_GRP_WEIGHT 10 ++#define BFQ_DEFAULT_GRP_IOPRIO 0 ++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE ++ ++struct bfq_entity; ++ ++/** ++ * struct bfq_service_tree - per ioprio_class service tree. ++ * @active: tree for active entities (i.e., those backlogged). ++ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i). ++ * @first_idle: idle entity with minimum F_i. ++ * @last_idle: idle entity with maximum F_i. ++ * @vtime: scheduler virtual time. ++ * @wsum: scheduler weight sum; active and idle entities contribute to it. ++ * ++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each ++ * ioprio_class has its own independent scheduler, and so its own ++ * bfq_service_tree. All the fields are protected by the queue lock ++ * of the containing bfqd. ++ */ ++struct bfq_service_tree { ++ struct rb_root active; ++ struct rb_root idle; ++ ++ struct bfq_entity *first_idle; ++ struct bfq_entity *last_idle; ++ ++ u64 vtime; ++ unsigned long wsum; ++}; ++ ++/** ++ * struct bfq_sched_data - multi-class scheduler. ++ * @active_entity: entity under service. ++ * @next_active: head-of-the-line entity in the scheduler. ++ * @service_tree: array of service trees, one per ioprio_class. ++ * ++ * bfq_sched_data is the basic scheduler queue. It supports three ++ * ioprio_classes, and can be used either as a toplevel queue or as ++ * an intermediate queue on a hierarchical setup. ++ * @next_active points to the active entity of the sched_data service ++ * trees that will be scheduled next. ++ * ++ * The supported ioprio_classes are the same as in CFQ, in descending ++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. ++ * Requests from higher priority queues are served before all the ++ * requests from lower priority queues; among requests of the same ++ * queue requests are served according to B-WF2Q+. ++ * All the fields are protected by the queue lock of the containing bfqd. ++ */ ++struct bfq_sched_data { ++ struct bfq_entity *active_entity; ++ struct bfq_entity *next_active; ++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; ++}; ++ ++/** ++ * struct bfq_entity - schedulable entity. ++ * @rb_node: service_tree member. ++ * @on_st: flag, true if the entity is on a tree (either the active or ++ * the idle one of its service_tree). ++ * @finish: B-WF2Q+ finish timestamp (aka F_i). ++ * @start: B-WF2Q+ start timestamp (aka S_i). ++ * @tree: tree the entity is enqueued into; %NULL if not on a tree. ++ * @min_start: minimum start time of the (active) subtree rooted at ++ * this entity; used for O(log N) lookups into active trees. ++ * @service: service received during the last round of service. ++ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight. ++ * @weight: weight of the queue ++ * @parent: parent entity, for hierarchical scheduling. ++ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the ++ * associated scheduler queue, %NULL on leaf nodes. ++ * @sched_data: the scheduler queue this entity belongs to. ++ * @ioprio: the ioprio in use. ++ * @new_weight: when a weight change is requested, the new weight value. ++ * @orig_weight: original weight, used to implement weight boosting ++ * @new_ioprio: when an ioprio change is requested, the new ioprio value. ++ * @ioprio_class: the ioprio_class in use. ++ * @new_ioprio_class: when an ioprio_class change is requested, the new ++ * ioprio_class value. ++ * @ioprio_changed: flag, true when the user requested a weight, ioprio or ++ * ioprio_class change. ++ * ++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the ++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each ++ * entity belongs to the sched_data of the parent group in the cgroup ++ * hierarchy. Non-leaf entities have also their own sched_data, stored ++ * in @my_sched_data. ++ * ++ * Each entity stores independently its priority values; this would ++ * allow different weights on different devices, but this ++ * functionality is not exported to userspace by now. Priorities and ++ * weights are updated lazily, first storing the new values into the ++ * new_* fields, then setting the @ioprio_changed flag. As soon as ++ * there is a transition in the entity state that allows the priority ++ * update to take place the effective and the requested priority ++ * values are synchronized. ++ * ++ * Unless cgroups are used, the weight value is calculated from the ++ * ioprio to export the same interface as CFQ. When dealing with ++ * ``well-behaved'' queues (i.e., queues that do not spend too much ++ * time to consume their budget and have true sequential behavior, and ++ * when there are no external factors breaking anticipation) the ++ * relative weights at each level of the cgroups hierarchy should be ++ * guaranteed. All the fields are protected by the queue lock of the ++ * containing bfqd. ++ */ ++struct bfq_entity { ++ struct rb_node rb_node; ++ ++ int on_st; ++ ++ u64 finish; ++ u64 start; ++ ++ struct rb_root *tree; ++ ++ u64 min_start; ++ ++ unsigned long service, budget; ++ unsigned short weight, new_weight; ++ unsigned short orig_weight; ++ ++ struct bfq_entity *parent; ++ ++ struct bfq_sched_data *my_sched_data; ++ struct bfq_sched_data *sched_data; ++ ++ unsigned short ioprio, new_ioprio; ++ unsigned short ioprio_class, new_ioprio_class; ++ ++ int ioprio_changed; ++}; ++ ++struct bfq_group; ++ ++/** ++ * struct bfq_queue - leaf schedulable entity. ++ * @ref: reference counter. ++ * @bfqd: parent bfq_data. ++ * @new_bfqq: shared bfq_queue if queue is cooperating with ++ * one or more other queues. ++ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree). ++ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree). ++ * @sort_list: sorted list of pending requests. ++ * @next_rq: if fifo isn't expired, next request to serve. ++ * @queued: nr of requests queued in @sort_list. ++ * @allocated: currently allocated requests. ++ * @meta_pending: pending metadata requests. ++ * @fifo: fifo list of requests in sort_list. ++ * @entity: entity representing this queue in the scheduler. ++ * @max_budget: maximum budget allowed from the feedback mechanism. ++ * @budget_timeout: budget expiration (in jiffies). ++ * @dispatched: number of requests on the dispatch list or inside driver. ++ * @org_ioprio: saved ioprio during boosted periods. ++ * @flags: status flags. ++ * @bfqq_list: node for active/idle bfqq list inside our bfqd. ++ * @seek_samples: number of seeks sampled ++ * @seek_total: sum of the distances of the seeks sampled ++ * @seek_mean: mean seek distance ++ * @last_request_pos: position of the last request enqueued ++ * @pid: pid of the process owning the queue, used for logging purposes. ++ * @last_rais_start_time: last (idle -> weight-raised) transition attempt ++ * @raising_cur_max_time: current max raising time for this queue ++ * @last_idle_bklogged: time of the last transition of the @bfq_queue from ++ * idle to backlogged ++ * @service_from_backlogged: cumulative service received from the @bfq_queue ++ * since the last transition from idle to backlogged ++ * ++ * A bfq_queue is a leaf request queue; it can be associated to an io_context ++ * or more (if it is an async one). @cgroup holds a reference to the ++ * cgroup, to be sure that it does not disappear while a bfqq still ++ * references it (mostly to avoid races between request issuing and task ++ * migration followed by cgroup distruction). ++ * All the fields are protected by the queue lock of the containing bfqd. ++ */ ++struct bfq_queue { ++ atomic_t ref; ++ struct bfq_data *bfqd; ++ ++ /* fields for cooperating queues handling */ ++ struct bfq_queue *new_bfqq; ++ struct rb_node pos_node; ++ struct rb_root *pos_root; ++ ++ struct rb_root sort_list; ++ struct request *next_rq; ++ int queued[2]; ++ int allocated[2]; ++ int meta_pending; ++ struct list_head fifo; ++ ++ struct bfq_entity entity; ++ ++ unsigned long max_budget; ++ unsigned long budget_timeout; ++ ++ int dispatched; ++ ++ unsigned short org_ioprio; ++ ++ unsigned int flags; ++ ++ struct list_head bfqq_list; ++ ++ unsigned int seek_samples; ++ u64 seek_total; ++ sector_t seek_mean; ++ sector_t last_request_pos; ++ ++ pid_t pid; ++ ++ /* weight-raising fields */ ++ unsigned int raising_cur_max_time; ++ unsigned long soft_rt_next_start; ++ u64 last_rais_start_finish; ++ unsigned int raising_coeff; ++ u64 last_idle_bklogged; ++ unsigned long service_from_backlogged; ++}; ++ ++/** ++ * struct bfq_ttime - per process thinktime stats. ++ * @ttime_total: total process thinktime ++ * @ttime_samples: number of thinktime samples ++ * @ttime_mean: average process thinktime ++ */ ++struct bfq_ttime { ++ unsigned long last_end_request; ++ ++ unsigned long ttime_total; ++ unsigned long ttime_samples; ++ unsigned long ttime_mean; ++}; ++ ++/** ++ * struct bfq_io_cq - per (request_queue, io_context) structure. ++ * @icq: associated io_cq structure ++ * @bfqq: array of two process queues, the sync and the async ++ * @ttime: associated @bfq_ttime struct ++ */ ++struct bfq_io_cq { ++ struct io_cq icq; /* must be the first member */ ++ struct bfq_queue *bfqq[2]; ++ struct bfq_ttime ttime; ++ int ioprio; ++}; ++ ++/** ++ * struct bfq_data - per device data structure. ++ * @queue: request queue for the managed device. ++ * @root_group: root bfq_group for the device. ++ * @rq_pos_tree: rbtree sorted by next_request position, ++ * used when determining if two or more queues ++ * have interleaving requests (see bfq_close_cooperator). ++ * @busy_queues: number of bfq_queues containing requests (including the ++ * queue under service, even if it is idling). ++ * @raised_busy_queues: number of weight-raised busy bfq_queues. ++ * @queued: number of queued requests. ++ * @rq_in_driver: number of requests dispatched and waiting for completion. ++ * @sync_flight: number of sync requests in the driver. ++ * @max_rq_in_driver: max number of reqs in driver in the last @hw_tag_samples ++ * completed requests . ++ * @hw_tag_samples: nr of samples used to calculate hw_tag. ++ * @hw_tag: flag set to one if the driver is showing a queueing behavior. ++ * @budgets_assigned: number of budgets assigned. ++ * @idle_slice_timer: timer set when idling for the next sequential request ++ * from the queue under service. ++ * @unplug_work: delayed work to restart dispatching on the request queue. ++ * @in_service_queue: bfq_queue under service. ++ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue. ++ * @last_position: on-disk position of the last served request. ++ * @last_budget_start: beginning of the last budget. ++ * @last_idling_start: beginning of the last idle slice. ++ * @peak_rate: peak transfer rate observed for a budget. ++ * @peak_rate_samples: number of samples used to calculate @peak_rate. ++ * @bfq_max_budget: maximum budget allotted to a bfq_queue before rescheduling. ++ * @group_list: list of all the bfq_groups active on the device. ++ * @active_list: list of all the bfq_queues active on the device. ++ * @idle_list: list of all the bfq_queues idle on the device. ++ * @bfq_quantum: max number of requests dispatched per dispatch round. ++ * @bfq_fifo_expire: timeout for async/sync requests; when it expires ++ * requests are served in fifo order. ++ * @bfq_back_penalty: weight of backward seeks wrt forward ones. ++ * @bfq_back_max: maximum allowed backward seek. ++ * @bfq_slice_idle: maximum idling time. ++ * @bfq_user_max_budget: user-configured max budget value (0 for auto-tuning). ++ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to ++ * async queues. ++ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to ++ * to prevent seeky queues to impose long latencies to well ++ * behaved ones (this also implies that seeky queues cannot ++ * receive guarantees in the service domain; after a timeout ++ * they are charged for the whole allocated budget, to try ++ * to preserve a behavior reasonably fair among them, but ++ * without service-domain guarantees). ++ * @bfq_raising_coeff: Maximum factor by which the weight of a boosted ++ * queue is multiplied ++ * @bfq_raising_max_time: maximum duration of a weight-raising period (jiffies) ++ * @bfq_raising_rt_max_time: maximum duration for soft real-time processes ++ * @bfq_raising_min_idle_time: minimum idle period after which weight-raising ++ * may be reactivated for a queue (in jiffies) ++ * @bfq_raising_min_inter_arr_async: minimum period between request arrivals ++ * after which weight-raising may be ++ * reactivated for an already busy queue ++ * (in jiffies) ++ * @bfq_raising_max_softrt_rate: max service-rate for a soft real-time queue, ++ * sectors per seconds ++ * @RT_prod: cached value of the product R*T used for computing the maximum ++ * duration of the weight raising automatically ++ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions ++ * ++ * All the fields are protected by the @queue lock. ++ */ ++struct bfq_data { ++ struct request_queue *queue; ++ ++ struct bfq_group *root_group; ++ ++ struct rb_root rq_pos_tree; ++ ++ int busy_queues; ++ int raised_busy_queues; ++ int queued; ++ int rq_in_driver; ++ int sync_flight; ++ ++ int max_rq_in_driver; ++ int hw_tag_samples; ++ int hw_tag; ++ ++ int budgets_assigned; ++ ++ struct timer_list idle_slice_timer; ++ struct work_struct unplug_work; ++ ++ struct bfq_queue *in_service_queue; ++ struct bfq_io_cq *in_service_bic; ++ ++ sector_t last_position; ++ ++ ktime_t last_budget_start; ++ ktime_t last_idling_start; ++ int peak_rate_samples; ++ u64 peak_rate; ++ unsigned long bfq_max_budget; ++ ++ struct hlist_head group_list; ++ struct list_head active_list; ++ struct list_head idle_list; ++ ++ unsigned int bfq_quantum; ++ unsigned int bfq_fifo_expire[2]; ++ unsigned int bfq_back_penalty; ++ unsigned int bfq_back_max; ++ unsigned int bfq_slice_idle; ++ u64 bfq_class_idle_last_service; ++ ++ unsigned int bfq_user_max_budget; ++ unsigned int bfq_max_budget_async_rq; ++ unsigned int bfq_timeout[2]; ++ ++ bool low_latency; ++ ++ /* parameters of the low_latency heuristics */ ++ unsigned int bfq_raising_coeff; ++ unsigned int bfq_raising_max_time; ++ unsigned int bfq_raising_rt_max_time; ++ unsigned int bfq_raising_min_idle_time; ++ unsigned long bfq_raising_min_inter_arr_async; ++ unsigned int bfq_raising_max_softrt_rate; ++ u64 RT_prod; ++ ++ struct bfq_queue oom_bfqq; ++}; ++ ++enum bfqq_state_flags { ++ BFQ_BFQQ_FLAG_busy = 0, /* has requests or is under service */ ++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ ++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ ++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ ++ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */ ++ BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */ ++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */ ++ BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */ ++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ ++ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */ ++ BFQ_BFQQ_FLAG_softrt_update, /* needs softrt-next-start update */ ++}; ++ ++#define BFQ_BFQQ_FNS(name) \ ++static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ ++{ \ ++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ ++} \ ++static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ ++{ \ ++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ ++} \ ++static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ ++{ \ ++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ ++} ++ ++BFQ_BFQQ_FNS(busy); ++BFQ_BFQQ_FNS(wait_request); ++BFQ_BFQQ_FNS(must_alloc); ++BFQ_BFQQ_FNS(fifo_expire); ++BFQ_BFQQ_FNS(idle_window); ++BFQ_BFQQ_FNS(prio_changed); ++BFQ_BFQQ_FNS(sync); ++BFQ_BFQQ_FNS(budget_new); ++BFQ_BFQQ_FNS(coop); ++BFQ_BFQQ_FNS(split_coop); ++BFQ_BFQQ_FNS(softrt_update); ++#undef BFQ_BFQQ_FNS ++ ++/* Logging facilities. */ ++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ ++ blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args) ++ ++#define bfq_log(bfqd, fmt, args...) \ ++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) ++ ++/* Expiration reasons. */ ++enum bfqq_expiration { ++ BFQ_BFQQ_TOO_IDLE = 0, /* queue has been idling for too long */ ++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ ++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ ++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ ++}; ++ ++#ifdef CONFIG_CGROUP_BFQIO ++/** ++ * struct bfq_group - per (device, cgroup) data structure. ++ * @entity: schedulable entity to insert into the parent group sched_data. ++ * @sched_data: own sched_data, to contain child entities (they may be ++ * both bfq_queues and bfq_groups). ++ * @group_node: node to be inserted into the bfqio_cgroup->group_data ++ * list of the containing cgroup's bfqio_cgroup. ++ * @bfqd_node: node to be inserted into the @bfqd->group_list list ++ * of the groups active on the same device; used for cleanup. ++ * @bfqd: the bfq_data for the device this group acts upon. ++ * @async_bfqq: array of async queues for all the tasks belonging to ++ * the group, one queue per ioprio value per ioprio_class, ++ * except for the idle class that has only one queue. ++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). ++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used ++ * to avoid too many special cases during group creation/migration. ++ * ++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup ++ * there is a set of bfq_groups, each one collecting the lower-level ++ * entities belonging to the group that are acting on the same device. ++ * ++ * Locking works as follows: ++ * o @group_node is protected by the bfqio_cgroup lock, and is accessed ++ * via RCU from its readers. ++ * o @bfqd is protected by the queue lock, RCU is used to access it ++ * from the readers. ++ * o All the other fields are protected by the @bfqd queue lock. ++ */ ++struct bfq_group { ++ struct bfq_entity entity; ++ struct bfq_sched_data sched_data; ++ ++ struct hlist_node group_node; ++ struct hlist_node bfqd_node; ++ ++ void *bfqd; ++ ++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; ++ struct bfq_queue *async_idle_bfqq; ++ ++ struct bfq_entity *my_entity; ++}; ++ ++/** ++ * struct bfqio_cgroup - bfq cgroup data structure. ++ * @css: subsystem state for bfq in the containing cgroup. ++ * @weight: cgroup weight. ++ * @ioprio: cgroup ioprio. ++ * @ioprio_class: cgroup ioprio_class. ++ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data. ++ * @group_data: list containing the bfq_group belonging to this cgroup. ++ * ++ * @group_data is accessed using RCU, with @lock protecting the updates, ++ * @ioprio and @ioprio_class are protected by @lock. ++ */ ++struct bfqio_cgroup { ++ struct cgroup_subsys_state css; ++ ++ unsigned short weight, ioprio, ioprio_class; ++ ++ spinlock_t lock; ++ struct hlist_head group_data; ++}; ++#else ++struct bfq_group { ++ struct bfq_sched_data sched_data; ++ ++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; ++ struct bfq_queue *async_idle_bfqq; ++}; ++#endif ++ ++static inline struct bfq_service_tree * ++bfq_entity_service_tree(struct bfq_entity *entity) ++{ ++ struct bfq_sched_data *sched_data = entity->sched_data; ++ unsigned int idx = entity->ioprio_class - 1; ++ ++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES); ++ BUG_ON(sched_data == NULL); ++ ++ return sched_data->service_tree + idx; ++} ++ ++static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, ++ int is_sync) ++{ ++ return bic->bfqq[!!is_sync]; ++} ++ ++static inline void bic_set_bfqq(struct bfq_io_cq *bic, ++ struct bfq_queue *bfqq, int is_sync) ++{ ++ bic->bfqq[!!is_sync] = bfqq; ++} ++ ++static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) ++{ ++ return bic->icq.q->elevator->elevator_data; ++} ++ ++/** ++ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer. ++ * @ptr: a pointer to a bfqd. ++ * @flags: storage for the flags to be saved. ++ * ++ * This function allows bfqg->bfqd to be protected by the ++ * queue lock of the bfqd they reference; the pointer is dereferenced ++ * under RCU, so the storage for bfqd is assured to be safe as long ++ * as the RCU read side critical section does not end. After the ++ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be ++ * sure that no other writer accessed it. If we raced with a writer, ++ * the function returns NULL, with the queue unlocked, otherwise it ++ * returns the dereferenced pointer, with the queue locked. ++ */ ++static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr, ++ unsigned long *flags) ++{ ++ struct bfq_data *bfqd; ++ ++ rcu_read_lock(); ++ bfqd = rcu_dereference(*(struct bfq_data **)ptr); ++ ++ if (bfqd != NULL) { ++ spin_lock_irqsave(bfqd->queue->queue_lock, *flags); ++ if (*ptr == bfqd) ++ goto out; ++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); ++ } ++ ++ bfqd = NULL; ++out: ++ rcu_read_unlock(); ++ return bfqd; ++} ++ ++static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd, ++ unsigned long *flags) ++{ ++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); ++} ++ ++static void bfq_changed_ioprio(struct bfq_io_cq *bic); ++static void bfq_put_queue(struct bfq_queue *bfqq); ++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); ++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, ++ struct bfq_group *bfqg, int is_sync, ++ struct bfq_io_cq *bic, gfp_t gfp_mask); ++static void bfq_end_raising_async_queues(struct bfq_data *bfqd, ++ struct bfq_group *bfqg); ++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); ++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); ++#endif +-- +1.8.5.2 + diff --git a/sys-kernel/kogaion-sources/files/desktop/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7-for-3.10.0.patch b/sys-kernel/kogaion-sources/files/desktop/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7-for-3.10.0.patch new file mode 100644 index 00000000..ea585f02 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7-for-3.10.0.patch @@ -0,0 +1,1034 @@ +From efc499347ea3827417cf00718616bf61a090afec Mon Sep 17 00:00:00 2001 +From: Mauro Andreolini +Date: Thu, 23 Jan 2014 16:54:44 +0100 +Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7 for + 3.10.0 + +A set of processes may happen to perform interleaved reads, i.e., requests +whose union would give rise to a sequential read pattern. There are two +typical cases: in the first case, processes read fixed-size chunks of +data at a fixed distance from each other, while in the second case processes +may read variable-size chunks at variable distances. The latter case occurs +for example with KVM, which splits the I/O generated by the guest into +multiple chunks, and lets these chunks be served by a pool of cooperating +processes, iteratively assigning the next chunk of I/O to the first +available process. CFQ uses actual queue merging for the first type of +rocesses, whereas it uses preemption to get a sequential read pattern out +of the read requests performed by the second type of processes. In the end +it uses two different mechanisms to achieve the same goal: boosting the +throughput with interleaved I/O. + +This patch introduces Early Queue Merge (EQM), a unified mechanism to get a +sequential read pattern with both types of processes. The main idea is +checking newly arrived requests against the next request of the active queue +both in case of actual request insert and in case of request merge. By doing +so, both the types of processes can be handled by just merging their queues. +EQM is then simpler and more compact than the pair of mechanisms used in +CFQ. + +Finally, EQM also preserves the typical low-latency properties of BFQ, by +properly restoring the weight-raising state of a queue when it gets back to +a non-merged state. + +Signed-off-by: Mauro Andreolini +Signed-off-by: Arianna Avanzini +Reviewed-by: Paolo Valente +--- + block/bfq-iosched.c | 657 ++++++++++++++++++++++++++++++++++++---------------- + block/bfq-sched.c | 28 --- + block/bfq.h | 16 ++ + 3 files changed, 474 insertions(+), 227 deletions(-) + +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 96abb81..99083be6 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -445,6 +445,46 @@ static inline unsigned int bfq_wrais_duration(struct bfq_data *bfqd) + return dur; + } + ++static inline void ++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic) ++{ ++ if (bic->saved_idle_window) ++ bfq_mark_bfqq_idle_window(bfqq); ++ else ++ bfq_clear_bfqq_idle_window(bfqq); ++ if (bic->raising_time_left && bfqq->bfqd->low_latency) { ++ /* ++ * Start a weight raising period with the duration given by ++ * the raising_time_left snapshot. ++ */ ++ if (bfq_bfqq_busy(bfqq)) ++ bfqq->bfqd->raised_busy_queues++; ++ bfqq->raising_coeff = bfqq->bfqd->bfq_raising_coeff; ++ bfqq->raising_cur_max_time = bic->raising_time_left; ++ bfqq->last_rais_start_finish = jiffies; ++ bfqq->entity.ioprio_changed = 1; ++ } ++ /* ++ * Clear raising_time_left to prevent bfq_bfqq_save_state() from ++ * getting confused about the queue's need of a weight-raising ++ * period. ++ */ ++ bic->raising_time_left = 0; ++} ++ ++/* ++ * Must be called with the queue_lock held. ++ */ ++static int bfqq_process_refs(struct bfq_queue *bfqq) ++{ ++ int process_refs, io_refs; ++ ++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; ++ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st; ++ BUG_ON(process_refs < 0); ++ return process_refs; ++} ++ + static void bfq_add_rq_rb(struct request *rq) + { + struct bfq_queue *bfqq = RQ_BFQQ(rq); +@@ -486,12 +526,20 @@ static void bfq_add_rq_rb(struct request *rq) + if (!bfqd->low_latency) + goto add_bfqq_busy; + ++ if (bfq_bfqq_just_split(bfqq)) ++ goto set_ioprio_changed; ++ + /* +- * If the queue is not being boosted and has been idle +- * for enough time, start a weight-raising period ++ * If the queue: ++ * - is not being boosted, ++ * - has been idle for enough time, ++ * - is not a sync queue or is linked to a bfq_io_cq (it is ++ * shared "for its nature" or it is not shared and its ++ * requests have not been redirected to a shared queue) ++ * start a weight-raising period. + */ +- if (old_raising_coeff == 1 && +- (idle_for_long_time || soft_rt)) { ++ if (old_raising_coeff == 1 && (idle_for_long_time || soft_rt) && ++ (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) { + bfqq->raising_coeff = bfqd->bfq_raising_coeff; + if (idle_for_long_time) + bfqq->raising_cur_max_time = +@@ -572,6 +620,7 @@ static void bfq_add_rq_rb(struct request *rq) + bfqd->bfq_raising_rt_max_time; + } + } ++set_ioprio_changed: + if (old_raising_coeff != bfqq->raising_coeff) + entity->ioprio_changed = 1; + add_bfqq_busy: +@@ -754,90 +803,35 @@ static void bfq_end_raising(struct bfq_data *bfqd) + spin_unlock_irq(bfqd->queue->queue_lock); + } + +-static int bfq_allow_merge(struct request_queue *q, struct request *rq, +- struct bio *bio) +-{ +- struct bfq_data *bfqd = q->elevator->elevator_data; +- struct bfq_io_cq *bic; +- struct bfq_queue *bfqq; +- +- /* +- * Disallow merge of a sync bio into an async request. +- */ +- if (bfq_bio_sync(bio) && !rq_is_sync(rq)) +- return 0; +- +- /* +- * Lookup the bfqq that this bio will be queued with. Allow +- * merge only if rq is queued there. +- * Queue lock is held here. +- */ +- bic = bfq_bic_lookup(bfqd, current->io_context); +- if (bic == NULL) +- return 0; +- +- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); +- return bfqq == RQ_BFQQ(rq); +-} +- +-static void __bfq_set_in_service_queue(struct bfq_data *bfqd, +- struct bfq_queue *bfqq) +-{ +- if (bfqq != NULL) { +- bfq_mark_bfqq_must_alloc(bfqq); +- bfq_mark_bfqq_budget_new(bfqq); +- bfq_clear_bfqq_fifo_expire(bfqq); +- +- bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; +- +- bfq_log_bfqq(bfqd, bfqq, +- "set_in_service_queue, cur-budget = %lu", +- bfqq->entity.budget); +- } +- +- bfqd->in_service_queue = bfqq; +-} +- +-/* +- * Get and set a new queue for service. +- */ +-static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd, +- struct bfq_queue *bfqq) ++static inline sector_t bfq_io_struct_pos(void *io_struct, bool request) + { +- if (!bfqq) +- bfqq = bfq_get_next_queue(bfqd); ++ if (request) ++ return blk_rq_pos(io_struct); + else +- bfq_get_next_queue_forced(bfqd, bfqq); +- +- __bfq_set_in_service_queue(bfqd, bfqq); +- return bfqq; ++ return ((struct bio *)io_struct)->bi_sector; + } + +-static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd, +- struct request *rq) ++static inline sector_t bfq_dist_from(sector_t pos1, ++ sector_t pos2) + { +- if (blk_rq_pos(rq) >= bfqd->last_position) +- return blk_rq_pos(rq) - bfqd->last_position; ++ if (pos1 >= pos2) ++ return pos1 - pos2; + else +- return bfqd->last_position - blk_rq_pos(rq); ++ return pos2 - pos1; + } + +-/* +- * Return true if bfqq has no request pending and rq is close enough to +- * bfqd->last_position, or if rq is closer to bfqd->last_position than +- * bfqq->next_rq +- */ +-static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq) ++static inline int bfq_rq_close_to_sector(void *io_struct, bool request, ++ sector_t sector) + { +- return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR; ++ return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <= ++ BFQQ_SEEK_THR; + } + +-static struct bfq_queue *bfqq_close(struct bfq_data *bfqd) ++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector) + { + struct rb_root *root = &bfqd->rq_pos_tree; + struct rb_node *parent, *node; + struct bfq_queue *__bfqq; +- sector_t sector = bfqd->last_position; + + if (RB_EMPTY_ROOT(root)) + return NULL; +@@ -856,7 +850,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd) + * position). + */ + __bfqq = rb_entry(parent, struct bfq_queue, pos_node); +- if (bfq_rq_close(bfqd, __bfqq->next_rq)) ++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) + return __bfqq; + + if (blk_rq_pos(__bfqq->next_rq) < sector) +@@ -867,7 +861,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd) + return NULL; + + __bfqq = rb_entry(node, struct bfq_queue, pos_node); +- if (bfq_rq_close(bfqd, __bfqq->next_rq)) ++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) + return __bfqq; + + return NULL; +@@ -876,14 +870,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd) + /* + * bfqd - obvious + * cur_bfqq - passed in so that we don't decide that the current queue +- * is closely cooperating with itself. +- * +- * We are assuming that cur_bfqq has dispatched at least one request, +- * and that bfqd->last_position reflects a position on the disk associated +- * with the I/O issued by cur_bfqq. ++ * is closely cooperating with itself ++ * sector - used as a reference point to search for a close queue + */ + static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd, +- struct bfq_queue *cur_bfqq) ++ struct bfq_queue *cur_bfqq, ++ sector_t sector) + { + struct bfq_queue *bfqq; + +@@ -903,7 +895,7 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd, + * working closely on the same area of the disk. In that case, + * we can group them together and don't waste time idling. + */ +- bfqq = bfqq_close(bfqd); ++ bfqq = bfqq_close(bfqd, sector); + if (bfqq == NULL || bfqq == cur_bfqq) + return NULL; + +@@ -930,6 +922,282 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd, + return bfqq; + } + ++static struct bfq_queue * ++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) ++{ ++ int process_refs, new_process_refs; ++ struct bfq_queue *__bfqq; ++ ++ /* ++ * If there are no process references on the new_bfqq, then it is ++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain ++ * may have dropped their last reference (not just their last process ++ * reference). ++ */ ++ if (!bfqq_process_refs(new_bfqq)) ++ return NULL; ++ ++ /* Avoid a circular list and skip interim queue merges. */ ++ while ((__bfqq = new_bfqq->new_bfqq)) { ++ if (__bfqq == bfqq) ++ return NULL; ++ new_bfqq = __bfqq; ++ } ++ ++ process_refs = bfqq_process_refs(bfqq); ++ new_process_refs = bfqq_process_refs(new_bfqq); ++ /* ++ * If the process for the bfqq has gone away, there is no ++ * sense in merging the queues. ++ */ ++ if (process_refs == 0 || new_process_refs == 0) ++ return NULL; ++ ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", ++ new_bfqq->pid); ++ ++ /* ++ * Merging is just a redirection: the requests of the process owning ++ * one of the two queues are redirected to the other queue. The latter ++ * queue, in its turn, is set as shared if this is the first time that ++ * the requests of some process are redirected to it. ++ * ++ * We redirect bfqq to new_bfqq and not the opposite, because we ++ * are in the context of the process owning bfqq, hence we have the ++ * io_cq of this process. So we can immediately configure this io_cq ++ * to redirect the requests of the process to new_bfqq. ++ * ++ * NOTE, even if new_bfqq coincides with the in-service queue, the ++ * io_cq of new_bfqq is not available, because, if the in-service queue ++ * is shared, bfqd->in_service_bic may not point to the io_cq of the ++ * in-service queue. ++ * Redirecting the requests of the process owning bfqq to the currently ++ * in-service queue is in any case the best option, as we feed the ++ * in-service queue with new requests close to the last request served ++ * and, by doing so, hopefully increase the throughput. ++ */ ++ bfqq->new_bfqq = new_bfqq; ++ atomic_add(process_refs, &new_bfqq->ref); ++ return new_bfqq; ++} ++ ++/* ++ * Attempt to schedule a merge of bfqq with the currently in-service queue or ++ * with a close queue among the scheduled queues. ++ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue ++ * structure otherwise. ++ */ ++static struct bfq_queue * ++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ void *io_struct, bool request) ++{ ++ struct bfq_queue *in_service_bfqq, *new_bfqq; ++ ++ if (bfqq->new_bfqq) ++ return bfqq->new_bfqq; ++ ++ if (!io_struct) ++ return NULL; ++ ++ in_service_bfqq = bfqd->in_service_queue; ++ ++ if (in_service_bfqq == NULL || in_service_bfqq == bfqq || ++ !bfqd->in_service_bic) ++ goto check_scheduled; ++ ++ if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq)) ++ goto check_scheduled; ++ ++ if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq)) ++ goto check_scheduled; ++ ++ if (in_service_bfqq->entity.parent != bfqq->entity.parent) ++ goto check_scheduled; ++ ++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && ++ bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) { ++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); ++ if (new_bfqq != NULL) ++ return new_bfqq; /* Merge with the in-service queue */ ++ } ++ ++ /* ++ * Check whether there is a cooperator among currently scheduled ++ * queues. The only thing we need is that the bio/request is not ++ * NULL, as we need it to establish whether a cooperator exists. ++ */ ++check_scheduled: ++ new_bfqq = bfq_close_cooperator(bfqd, bfqq, ++ bfq_io_struct_pos(io_struct, request)); ++ if (new_bfqq) ++ return bfq_setup_merge(bfqq, new_bfqq); ++ ++ return NULL; ++} ++ ++static inline void ++bfq_bfqq_save_state(struct bfq_queue *bfqq) ++{ ++ /* ++ * If bfqq->bic == NULL, the queue is already shared or its requests ++ * have already been redirected to a shared queue; both idle window ++ * and weight raising state have already been saved. Do nothing. ++ */ ++ if (bfqq->bic == NULL) ++ return; ++ if (bfqq->bic->raising_time_left) ++ /* ++ * This is the queue of a just-started process, and would ++ * deserve weight raising: we set raising_time_left to the full ++ * weight-raising duration to trigger weight-raising when and ++ * if the queue is split and the first request of the queue ++ * is enqueued. ++ */ ++ bfqq->bic->raising_time_left = bfq_wrais_duration(bfqq->bfqd); ++ else if (bfqq->raising_coeff > 1) { ++ unsigned long wrais_duration = ++ jiffies - bfqq->last_rais_start_finish; ++ /* ++ * It may happen that a queue's weight raising period lasts ++ * longer than its raising_cur_max_time, as weight raising is ++ * handled only when a request is enqueued or dispatched (it ++ * does not use any timer). If the weight raising period is ++ * about to end, don't save it. ++ */ ++ if (bfqq->raising_cur_max_time <= wrais_duration) ++ bfqq->bic->raising_time_left = 0; ++ else ++ bfqq->bic->raising_time_left = ++ bfqq->raising_cur_max_time - wrais_duration; ++ /* ++ * The bfq_queue is becoming shared or the requests of the ++ * process owning the queue are being redirected to a shared ++ * queue. Stop the weight raising period of the queue, as in ++ * both cases it should not be owned by an interactive or soft ++ * real-time application. ++ */ ++ bfq_bfqq_end_raising(bfqq); ++ } else ++ bfqq->bic->raising_time_left = 0; ++ bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); ++} ++ ++static inline void ++bfq_get_bic_reference(struct bfq_queue *bfqq) ++{ ++ /* ++ * If bfqq->bic has a non-NULL value, the bic to which it belongs ++ * is about to begin using a shared bfq_queue. ++ */ ++ if (bfqq->bic) ++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount); ++} ++ ++static void ++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, ++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) ++{ ++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", ++ (long unsigned)new_bfqq->pid); ++ /* Save weight raising and idle window of the merged queues */ ++ bfq_bfqq_save_state(bfqq); ++ bfq_bfqq_save_state(new_bfqq); ++ /* ++ * Grab a reference to the bic, to prevent it from being destroyed ++ * before being possibly touched by a bfq_split_bfqq(). ++ */ ++ bfq_get_bic_reference(bfqq); ++ bfq_get_bic_reference(new_bfqq); ++ /* Merge queues (that is, let bic redirect its requests to new_bfqq) */ ++ bic_set_bfqq(bic, new_bfqq, 1); ++ bfq_mark_bfqq_coop(new_bfqq); ++ /* ++ * new_bfqq now belongs to at least two bics (it is a shared queue): set ++ * new_bfqq->bic to NULL. bfqq either: ++ * - does not belong to any bic any more, and hence bfqq->bic must ++ * be set to NULL, or ++ * - is a queue whose owning bics have already been redirected to a ++ * different queue, hence the queue is destined to not belong to any ++ * bic soon and bfqq->bic is already NULL (therefore the next ++ * assignment causes no harm). ++ */ ++ new_bfqq->bic = NULL; ++ bfqq->bic = NULL; ++ bfq_put_queue(bfqq); ++} ++ ++static int bfq_allow_merge(struct request_queue *q, struct request *rq, ++ struct bio *bio) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ struct bfq_io_cq *bic; ++ struct bfq_queue *bfqq, *new_bfqq; ++ ++ /* ++ * Disallow merge of a sync bio into an async request. ++ */ ++ if (bfq_bio_sync(bio) && !rq_is_sync(rq)) ++ return 0; ++ ++ /* ++ * Lookup the bfqq that this bio will be queued with. Allow ++ * merge only if rq is queued there. ++ * Queue lock is held here. ++ */ ++ bic = bfq_bic_lookup(bfqd, current->io_context); ++ if (bic == NULL) ++ return 0; ++ ++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); ++ /* ++ * We take advantage of this function to perform an early merge ++ * of the queues of possible cooperating processes. ++ */ ++ if (bfqq != NULL) { ++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); ++ if (new_bfqq != NULL) { ++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); ++ /* ++ * If we get here, the bio will be queued in the shared queue, ++ * i.e., new_bfqq, so use new_bfqq to decide whether bio and ++ * rq can be merged. ++ */ ++ bfqq = new_bfqq; ++ } ++ } ++ ++ return bfqq == RQ_BFQQ(rq); ++} ++ ++static void __bfq_set_in_service_queue(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq) ++{ ++ if (bfqq != NULL) { ++ bfq_mark_bfqq_must_alloc(bfqq); ++ bfq_mark_bfqq_budget_new(bfqq); ++ bfq_clear_bfqq_fifo_expire(bfqq); ++ ++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; ++ ++ bfq_log_bfqq(bfqd, bfqq, ++ "set_in_service_queue, cur-budget = %lu", ++ bfqq->entity.budget); ++ } ++ ++ bfqd->in_service_queue = bfqq; ++} ++ ++/* ++ * Get and set a new queue for service. ++ */ ++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) ++{ ++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); ++ ++ __bfq_set_in_service_queue(bfqd, bfqq); ++ return bfqq; ++} ++ + /* + * If enough samples have been computed, return the current max budget + * stored in bfqd, which is dynamically updated according to the +@@ -1077,63 +1345,6 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq) + return rq; + } + +-/* +- * Must be called with the queue_lock held. +- */ +-static int bfqq_process_refs(struct bfq_queue *bfqq) +-{ +- int process_refs, io_refs; +- +- io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; +- process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st; +- BUG_ON(process_refs < 0); +- return process_refs; +-} +- +-static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +-{ +- int process_refs, new_process_refs; +- struct bfq_queue *__bfqq; +- +- /* +- * If there are no process references on the new_bfqq, then it is +- * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain +- * may have dropped their last reference (not just their last process +- * reference). +- */ +- if (!bfqq_process_refs(new_bfqq)) +- return; +- +- /* Avoid a circular list and skip interim queue merges. */ +- while ((__bfqq = new_bfqq->new_bfqq)) { +- if (__bfqq == bfqq) +- return; +- new_bfqq = __bfqq; +- } +- +- process_refs = bfqq_process_refs(bfqq); +- new_process_refs = bfqq_process_refs(new_bfqq); +- /* +- * If the process for the bfqq has gone away, there is no +- * sense in merging the queues. +- */ +- if (process_refs == 0 || new_process_refs == 0) +- return; +- +- /* +- * Merge in the direction of the lesser amount of work. +- */ +- if (new_process_refs >= process_refs) { +- bfqq->new_bfqq = new_bfqq; +- atomic_add(process_refs, &new_bfqq->ref); +- } else { +- new_bfqq->new_bfqq = bfqq; +- atomic_add(new_process_refs, &bfqq->ref); +- } +- bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", +- new_bfqq->pid); +-} +- + static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq) + { + struct bfq_entity *entity = &bfqq->entity; +@@ -1703,7 +1914,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) + */ + static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + { +- struct bfq_queue *bfqq, *new_bfqq = NULL; ++ struct bfq_queue *bfqq; + struct request *next_rq; + enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; + +@@ -1713,17 +1924,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + + bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); + +- /* +- * If another queue has a request waiting within our mean seek +- * distance, let it run. The expire code will check for close +- * cooperators and put the close queue at the front of the +- * service tree. If possible, merge the expiring queue with the +- * new bfqq. +- */ +- new_bfqq = bfq_close_cooperator(bfqd, bfqq); +- if (new_bfqq != NULL && bfqq->new_bfqq == NULL) +- bfq_setup_merge(bfqq, new_bfqq); +- + if (bfq_may_expire_for_budg_timeout(bfqq) && + !timer_pending(&bfqd->idle_slice_timer) && + !bfq_bfqq_must_idle(bfqq)) +@@ -1760,36 +1960,26 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + bfq_clear_bfqq_wait_request(bfqq); + del_timer(&bfqd->idle_slice_timer); + } +- if (new_bfqq == NULL) +- goto keep_queue; +- else +- goto expire; ++ goto keep_queue; + } + } + + /* +- * No requests pending. If the in-service queue has no cooperator and +- * still has requests in flight (possibly waiting for a completion) +- * or is idling for a new request, then keep it. ++ * No requests pending. If the in-service queue still has requests in ++ * flight (possibly waiting for a completion) or is idling for a new ++ * request, then keep it. + */ +- if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) || +- (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) { ++ if (timer_pending(&bfqd->idle_slice_timer) || ++ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) { + bfqq = NULL; + goto keep_queue; +- } else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) { +- /* +- * Expiring the queue because there is a close cooperator, +- * cancel timer. +- */ +- bfq_clear_bfqq_wait_request(bfqq); +- del_timer(&bfqd->idle_slice_timer); + } + + reason = BFQ_BFQQ_NO_MORE_REQUESTS; + expire: + bfq_bfqq_expire(bfqd, bfqq, 0, reason); + new_queue: +- bfqq = bfq_set_in_service_queue(bfqd, new_bfqq); ++ bfqq = bfq_set_in_service_queue(bfqd); + bfq_log(bfqd, "select_queue: new queue %d returned", + bfqq != NULL ? bfqq->pid : 0); + keep_queue: +@@ -1799,9 +1989,8 @@ keep_queue: + static void bfq_update_raising_data(struct bfq_data *bfqd, + struct bfq_queue *bfqq) + { ++ struct bfq_entity *entity = &bfqq->entity; + if (bfqq->raising_coeff > 1) { /* queue is being boosted */ +- struct bfq_entity *entity = &bfqq->entity; +- + bfq_log_bfqq(bfqd, bfqq, + "raising period dur %u/%u msec, " + "old raising coeff %u, w %d(%d)", +@@ -1818,7 +2007,7 @@ static void bfq_update_raising_data(struct bfq_data *bfqd, + "WARN: pending prio change"); + /* + * If too much time has elapsed from the beginning +- * of this weight-raising, stop it. ++ * of this weight-raising period, stop it. + */ + if (jiffies - bfqq->last_rais_start_finish > + bfqq->raising_cur_max_time) { +@@ -1830,11 +2019,13 @@ static void bfq_update_raising_data(struct bfq_data *bfqd, + jiffies_to_msecs(bfqq-> + raising_cur_max_time)); + bfq_bfqq_end_raising(bfqq); +- __bfq_entity_update_weight_prio( +- bfq_entity_service_tree(entity), +- entity); + } + } ++ /* Update weight both if it must be raised and if it must be lowered */ ++ if ((entity->weight > entity->orig_weight) != (bfqq->raising_coeff > 1)) ++ __bfq_entity_update_weight_prio( ++ bfq_entity_service_tree(entity), ++ entity); + } + + /* +@@ -2075,6 +2266,25 @@ static void bfq_init_icq(struct io_cq *icq) + struct bfq_io_cq *bic = icq_to_bic(icq); + + bic->ttime.last_end_request = jiffies; ++ /* ++ * A newly created bic indicates that the process has just ++ * started doing I/O, and is probably mapping into memory its ++ * executable and libraries: it definitely needs weight raising. ++ * There is however the possibility that the process performs, ++ * for a while, I/O close to some other process. EQM intercepts ++ * this behavior and may merge the queue corresponding to the ++ * process with some other queue, BEFORE the weight of the queue ++ * is raised. Merged queues are not weight-raised (they are assumed ++ * to belong to processes that benefit only from high throughput). ++ * If the merge is basically the consequence of an accident, then ++ * the queue will be split soon and will get back its old weight. ++ * It is then important to write down somewhere that this queue ++ * does need weight raising, even if it did not make it to get its ++ * weight raised before being merged. To this purpose, we overload ++ * the field raising_time_left and assign 1 to it, to mark the queue ++ * as needing weight raising. ++ */ ++ bic->raising_time_left = 1; + } + + static void bfq_exit_icq(struct io_cq *icq) +@@ -2088,6 +2298,13 @@ static void bfq_exit_icq(struct io_cq *icq) + } + + if (bic->bfqq[BLK_RW_SYNC]) { ++ /* ++ * If the bic is using a shared queue, put the reference ++ * taken on the io_context when the bic started using a ++ * shared bfq_queue. ++ */ ++ if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC])) ++ put_io_context(icq->ioc); + bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]); + bic->bfqq[BLK_RW_SYNC] = NULL; + } +@@ -2375,6 +2592,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, + if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) + return; + ++ /* Idle window just restored, statistics are meaningless. */ ++ if (bfq_bfqq_just_split(bfqq)) ++ return; ++ + enable_idle = bfq_bfqq_idle_window(bfqq); + + if (atomic_read(&bic->icq.ioc->active_ref) == 0 || +@@ -2415,6 +2636,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, + if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || + !BFQQ_SEEKY(bfqq)) + bfq_update_idle_window(bfqd, bfqq, bic); ++ bfq_clear_bfqq_just_split(bfqq); + + bfq_log_bfqq(bfqd, bfqq, + "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", +@@ -2475,13 +2697,48 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, + static void bfq_insert_request(struct request_queue *q, struct request *rq) + { + struct bfq_data *bfqd = q->elevator->elevator_data; +- struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq; + + assert_spin_locked(bfqd->queue->queue_lock); ++ ++ /* ++ * An unplug may trigger a requeue of a request from the device ++ * driver: make sure we are in process context while trying to ++ * merge two bfq_queues. ++ */ ++ if (!in_interrupt()) { ++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); ++ if (new_bfqq != NULL) { ++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) ++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); ++ /* ++ * Release the request's reference to the old bfqq ++ * and make sure one is taken to the shared queue. ++ */ ++ new_bfqq->allocated[rq_data_dir(rq)]++; ++ bfqq->allocated[rq_data_dir(rq)]--; ++ atomic_inc(&new_bfqq->ref); ++ bfq_put_queue(bfqq); ++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) ++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq), ++ bfqq, new_bfqq); ++ rq->elv.priv[1] = new_bfqq; ++ bfqq = new_bfqq; ++ } ++ } ++ + bfq_init_prio_data(bfqq, RQ_BIC(rq)); + + bfq_add_rq_rb(rq); + ++ /* ++ * Here a newly-created bfq_queue has already started a weight-raising ++ * period: clear raising_time_left to prevent bfq_bfqq_save_state() ++ * from assigning it a full weight-raising period. See the detailed ++ * comments about this field in bfq_init_icq(). ++ */ ++ if (bfqq->bic != NULL) ++ bfqq->bic->raising_time_left = 0; + rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]); + list_add_tail(&rq->queuelist, &bfqq->fifo); + +@@ -2629,18 +2886,6 @@ static void bfq_put_request(struct request *rq) + } + } + +-static struct bfq_queue * +-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, +- struct bfq_queue *bfqq) +-{ +- bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", +- (long unsigned)bfqq->new_bfqq->pid); +- bic_set_bfqq(bic, bfqq->new_bfqq, 1); +- bfq_mark_bfqq_coop(bfqq->new_bfqq); +- bfq_put_queue(bfqq); +- return bic_to_bfqq(bic, 1); +-} +- + /* + * Returns NULL if a new bfqq should be allocated, or the old bfqq if this + * was the last process referring to said bfqq. +@@ -2649,6 +2894,9 @@ static struct bfq_queue * + bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) + { + bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); ++ ++ put_io_context(bic->icq.ioc); ++ + if (bfqq_process_refs(bfqq) == 1) { + bfqq->pid = current->pid; + bfq_clear_bfqq_coop(bfqq); +@@ -2677,6 +2925,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, + struct bfq_queue *bfqq; + struct bfq_group *bfqg; + unsigned long flags; ++ bool split = false; + + might_sleep_if(gfp_mask & __GFP_WAIT); + +@@ -2695,24 +2944,14 @@ new_queue: + bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask); + bic_set_bfqq(bic, bfqq, is_sync); + } else { +- /* +- * If the queue was seeky for too long, break it apart. +- */ ++ /* If the queue was seeky for too long, break it apart. */ + if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { + bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); + bfqq = bfq_split_bfqq(bic, bfqq); ++ split = true; + if (!bfqq) + goto new_queue; + } +- +- /* +- * Check to see if this queue is scheduled to merge with +- * another closely cooperating queue. The merging of queues +- * happens here as it must be done in process context. +- * The reference on new_bfqq was taken in merge_bfqqs. +- */ +- if (bfqq->new_bfqq != NULL) +- bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq); + } + + bfqq->allocated[rw]++; +@@ -2723,6 +2962,26 @@ new_queue: + rq->elv.priv[0] = bic; + rq->elv.priv[1] = bfqq; + ++ /* ++ * If a bfq_queue has only one process reference, it is owned ++ * by only one bfq_io_cq: we can set the bic field of the ++ * bfq_queue to the address of that structure. Also, if the ++ * queue has just been split, mark a flag so that the ++ * information is available to the other scheduler hooks. ++ */ ++ if (bfqq_process_refs(bfqq) == 1) { ++ bfqq->bic = bic; ++ if (split) { ++ bfq_mark_bfqq_just_split(bfqq); ++ /* ++ * If the queue has just been split from a shared queue, ++ * restore the idle window and the possible weight ++ * raising period. ++ */ ++ bfq_bfqq_resume_state(bfqq, bic); ++ } ++ } ++ + spin_unlock_irqrestore(q->queue_lock, flags); + + return 0; +diff --git a/block/bfq-sched.c b/block/bfq-sched.c +index 30df81c..47e66a8 100644 +--- a/block/bfq-sched.c ++++ b/block/bfq-sched.c +@@ -979,34 +979,6 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) + return bfqq; + } + +-/* +- * Forced extraction of the given queue. +- */ +-static void bfq_get_next_queue_forced(struct bfq_data *bfqd, +- struct bfq_queue *bfqq) +-{ +- struct bfq_entity *entity; +- struct bfq_sched_data *sd; +- +- BUG_ON(bfqd->in_service_queue != NULL); +- +- entity = &bfqq->entity; +- /* +- * Bubble up extraction/update from the leaf to the root. +- */ +- for_each_entity(entity) { +- sd = entity->sched_data; +- bfq_update_budget(entity); +- bfq_update_vtime(bfq_entity_service_tree(entity)); +- bfq_active_extract(bfq_entity_service_tree(entity), entity); +- sd->active_entity = entity; +- sd->next_active = NULL; +- entity->service = 0; +- } +- +- return; +-} +- + static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) + { + if (bfqd->in_service_bic != NULL) { +diff --git a/block/bfq.h b/block/bfq.h +index 78da7d2..b6ebc1d 100644 +--- a/block/bfq.h ++++ b/block/bfq.h +@@ -192,6 +192,8 @@ struct bfq_group; + * idle to backlogged + * @service_from_backlogged: cumulative service received from the @bfq_queue + * since the last transition from idle to backlogged ++ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the ++ * queue is shared + * + * A bfq_queue is a leaf request queue; it can be associated to an io_context + * or more (if it is an async one). @cgroup holds a reference to the +@@ -235,6 +237,7 @@ struct bfq_queue { + sector_t last_request_pos; + + pid_t pid; ++ struct bfq_io_cq *bic; + + /* weight-raising fields */ + unsigned int raising_cur_max_time; +@@ -264,12 +267,23 @@ struct bfq_ttime { + * @icq: associated io_cq structure + * @bfqq: array of two process queues, the sync and the async + * @ttime: associated @bfq_ttime struct ++ * @raising_time_left: snapshot of the time left before weight raising ends ++ * for the sync queue associated to this process; this ++ * snapshot is taken to remember this value while the weight ++ * raising is suspended because the queue is merged with a ++ * shared queue, and is used to set @raising_cur_max_time ++ * when the queue is split from the shared queue and its ++ * weight is raised again ++ * @saved_idle_window: same purpose as the previous field for the idle window + */ + struct bfq_io_cq { + struct io_cq icq; /* must be the first member */ + struct bfq_queue *bfqq[2]; + struct bfq_ttime ttime; + int ioprio; ++ ++ unsigned int raising_time_left; ++ unsigned int saved_idle_window; + }; + + /** +@@ -411,6 +425,7 @@ enum bfqq_state_flags { + BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */ + BFQ_BFQQ_FLAG_coop, /* bfqq is shared */ + BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */ ++ BFQ_BFQQ_FLAG_just_split, /* queue has just been split */ + BFQ_BFQQ_FLAG_softrt_update, /* needs softrt-next-start update */ + }; + +@@ -438,6 +453,7 @@ BFQ_BFQQ_FNS(sync); + BFQ_BFQQ_FNS(budget_new); + BFQ_BFQQ_FNS(coop); + BFQ_BFQQ_FNS(split_coop); ++BFQ_BFQQ_FNS(just_split); + BFQ_BFQQ_FNS(softrt_update); + #undef BFQ_BFQQ_FNS + +-- +1.8.5.2 + diff --git a/sys-kernel/kogaion-sources/files/desktop/3.10-ck1.patch b/sys-kernel/kogaion-sources/files/desktop/3.10-ck1.patch new file mode 100644 index 00000000..1a9feb96 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/3.10-ck1.patch @@ -0,0 +1,8732 @@ +// patch-3.10-ck1.patch +Index: linux-3.10-ck1/arch/powerpc/platforms/cell/spufs/sched.c +=================================================================== +--- linux-3.10-ck1.orig/arch/powerpc/platforms/cell/spufs/sched.c 2013-07-09 17:28:57.209502080 +1000 ++++ linux-3.10-ck1/arch/powerpc/platforms/cell/spufs/sched.c 2013-07-09 17:29:00.837501924 +1000 +@@ -64,11 +64,6 @@ + static struct timer_list spuloadavg_timer; + + /* +- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). +- */ +-#define NORMAL_PRIO 120 +- +-/* + * Frequency of the spu scheduler tick. By default we do one SPU scheduler + * tick for every 10 CPU scheduler ticks. + */ +Index: linux-3.10-ck1/Documentation/scheduler/sched-BFS.txt +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-3.10-ck1/Documentation/scheduler/sched-BFS.txt 2013-07-09 17:29:00.837501924 +1000 +@@ -0,0 +1,347 @@ ++BFS - The Brain Fuck Scheduler by Con Kolivas. ++ ++Goals. ++ ++The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to ++completely do away with the complex designs of the past for the cpu process ++scheduler and instead implement one that is very simple in basic design. ++The main focus of BFS is to achieve excellent desktop interactivity and ++responsiveness without heuristics and tuning knobs that are difficult to ++understand, impossible to model and predict the effect of, and when tuned to ++one workload cause massive detriment to another. ++ ++ ++Design summary. ++ ++BFS is best described as a single runqueue, O(n) lookup, earliest effective ++virtual deadline first design, loosely based on EEVDF (earliest eligible virtual ++deadline first) and my previous Staircase Deadline scheduler. Each component ++shall be described in order to understand the significance of, and reasoning for ++it. The codebase when the first stable version was released was approximately ++9000 lines less code than the existing mainline linux kernel scheduler (in ++2.6.31). This does not even take into account the removal of documentation and ++the cgroups code that is not used. ++ ++Design reasoning. ++ ++The single runqueue refers to the queued but not running processes for the ++entire system, regardless of the number of CPUs. The reason for going back to ++a single runqueue design is that once multiple runqueues are introduced, ++per-CPU or otherwise, there will be complex interactions as each runqueue will ++be responsible for the scheduling latency and fairness of the tasks only on its ++own runqueue, and to achieve fairness and low latency across multiple CPUs, any ++advantage in throughput of having CPU local tasks causes other disadvantages. ++This is due to requiring a very complex balancing system to at best achieve some ++semblance of fairness across CPUs and can only maintain relatively low latency ++for tasks bound to the same CPUs, not across them. To increase said fairness ++and latency across CPUs, the advantage of local runqueue locking, which makes ++for better scalability, is lost due to having to grab multiple locks. ++ ++A significant feature of BFS is that all accounting is done purely based on CPU ++used and nowhere is sleep time used in any way to determine entitlement or ++interactivity. Interactivity "estimators" that use some kind of sleep/run ++algorithm are doomed to fail to detect all interactive tasks, and to falsely tag ++tasks that aren't interactive as being so. The reason for this is that it is ++close to impossible to determine that when a task is sleeping, whether it is ++doing it voluntarily, as in a userspace application waiting for input in the ++form of a mouse click or otherwise, or involuntarily, because it is waiting for ++another thread, process, I/O, kernel activity or whatever. Thus, such an ++estimator will introduce corner cases, and more heuristics will be required to ++cope with those corner cases, introducing more corner cases and failed ++interactivity detection and so on. Interactivity in BFS is built into the design ++by virtue of the fact that tasks that are waking up have not used up their quota ++of CPU time, and have earlier effective deadlines, thereby making it very likely ++they will preempt any CPU bound task of equivalent nice level. See below for ++more information on the virtual deadline mechanism. Even if they do not preempt ++a running task, because the rr interval is guaranteed to have a bound upper ++limit on how long a task will wait for, it will be scheduled within a timeframe ++that will not cause visible interface jitter. ++ ++ ++Design details. ++ ++Task insertion. ++ ++BFS inserts tasks into each relevant queue as an O(1) insertion into a double ++linked list. On insertion, *every* running queue is checked to see if the newly ++queued task can run on any idle queue, or preempt the lowest running task on the ++system. This is how the cross-CPU scheduling of BFS achieves significantly lower ++latency per extra CPU the system has. In this case the lookup is, in the worst ++case scenario, O(n) where n is the number of CPUs on the system. ++ ++Data protection. ++ ++BFS has one single lock protecting the process local data of every task in the ++global queue. Thus every insertion, removal and modification of task data in the ++global runqueue needs to grab the global lock. However, once a task is taken by ++a CPU, the CPU has its own local data copy of the running process' accounting ++information which only that CPU accesses and modifies (such as during a ++timer tick) thus allowing the accounting data to be updated lockless. Once a ++CPU has taken a task to run, it removes it from the global queue. Thus the ++global queue only ever has, at most, ++ ++ (number of tasks requesting cpu time) - (number of logical CPUs) + 1 ++ ++tasks in the global queue. This value is relevant for the time taken to look up ++tasks during scheduling. This will increase if many tasks with CPU affinity set ++in their policy to limit which CPUs they're allowed to run on if they outnumber ++the number of CPUs. The +1 is because when rescheduling a task, the CPU's ++currently running task is put back on the queue. Lookup will be described after ++the virtual deadline mechanism is explained. ++ ++Virtual deadline. ++ ++The key to achieving low latency, scheduling fairness, and "nice level" ++distribution in BFS is entirely in the virtual deadline mechanism. The one ++tunable in BFS is the rr_interval, or "round robin interval". This is the ++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) ++tasks of the same nice level will be running for, or looking at it the other ++way around, the longest duration two tasks of the same nice level will be ++delayed for. When a task requests cpu time, it is given a quota (time_slice) ++equal to the rr_interval and a virtual deadline. The virtual deadline is ++offset from the current time in jiffies by this equation: ++ ++ jiffies + (prio_ratio * rr_interval) ++ ++The prio_ratio is determined as a ratio compared to the baseline of nice -20 ++and increases by 10% per nice level. The deadline is a virtual one only in that ++no guarantee is placed that a task will actually be scheduled by this time, but ++it is used to compare which task should go next. There are three components to ++how a task is next chosen. First is time_slice expiration. If a task runs out ++of its time_slice, it is descheduled, the time_slice is refilled, and the ++deadline reset to that formula above. Second is sleep, where a task no longer ++is requesting CPU for whatever reason. The time_slice and deadline are _not_ ++adjusted in this case and are just carried over for when the task is next ++scheduled. Third is preemption, and that is when a newly waking task is deemed ++higher priority than a currently running task on any cpu by virtue of the fact ++that it has an earlier virtual deadline than the currently running task. The ++earlier deadline is the key to which task is next chosen for the first and ++second cases. Once a task is descheduled, it is put back on the queue, and an ++O(n) lookup of all queued-but-not-running tasks is done to determine which has ++the earliest deadline and that task is chosen to receive CPU next. ++ ++The CPU proportion of different nice tasks works out to be approximately the ++ ++ (prio_ratio difference)^2 ++ ++The reason it is squared is that a task's deadline does not change while it is ++running unless it runs out of time_slice. Thus, even if the time actually ++passes the deadline of another task that is queued, it will not get CPU time ++unless the current running task deschedules, and the time "base" (jiffies) is ++constantly moving. ++ ++Task lookup. ++ ++BFS has 103 priority queues. 100 of these are dedicated to the static priority ++of realtime tasks, and the remaining 3 are, in order of best to worst priority, ++SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority ++scheduling). When a task of these priorities is queued, a bitmap of running ++priorities is set showing which of these priorities has tasks waiting for CPU ++time. When a CPU is made to reschedule, the lookup for the next task to get ++CPU time is performed in the following way: ++ ++First the bitmap is checked to see what static priority tasks are queued. If ++any realtime priorities are found, the corresponding queue is checked and the ++first task listed there is taken (provided CPU affinity is suitable) and lookup ++is complete. If the priority corresponds to a SCHED_ISO task, they are also ++taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds ++to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this ++stage, every task in the runlist that corresponds to that priority is checked ++to see which has the earliest set deadline, and (provided it has suitable CPU ++affinity) it is taken off the runqueue and given the CPU. If a task has an ++expired deadline, it is taken and the rest of the lookup aborted (as they are ++chosen in FIFO order). ++ ++Thus, the lookup is O(n) in the worst case only, where n is as described ++earlier, as tasks may be chosen before the whole task list is looked over. ++ ++ ++Scalability. ++ ++The major limitations of BFS will be that of scalability, as the separate ++runqueue designs will have less lock contention as the number of CPUs rises. ++However they do not scale linearly even with separate runqueues as multiple ++runqueues will need to be locked concurrently on such designs to be able to ++achieve fair CPU balancing, to try and achieve some sort of nice-level fairness ++across CPUs, and to achieve low enough latency for tasks on a busy CPU when ++other CPUs would be more suited. BFS has the advantage that it requires no ++balancing algorithm whatsoever, as balancing occurs by proxy simply because ++all CPUs draw off the global runqueue, in priority and deadline order. Despite ++the fact that scalability is _not_ the prime concern of BFS, it both shows very ++good scalability to smaller numbers of CPUs and is likely a more scalable design ++at these numbers of CPUs. ++ ++It also has some very low overhead scalability features built into the design ++when it has been deemed their overhead is so marginal that they're worth adding. ++The first is the local copy of the running process' data to the CPU it's running ++on to allow that data to be updated lockless where possible. Then there is ++deference paid to the last CPU a task was running on, by trying that CPU first ++when looking for an idle CPU to use the next time it's scheduled. Finally there ++is the notion of "sticky" tasks that are flagged when they are involuntarily ++descheduled, meaning they still want further CPU time. This sticky flag is ++used to bias heavily against those tasks being scheduled on a different CPU ++unless that CPU would be otherwise idle. When a cpu frequency governor is used ++that scales with CPU load, such as ondemand, sticky tasks are not scheduled ++on a different CPU at all, preferring instead to go idle. This means the CPU ++they were bound to is more likely to increase its speed while the other CPU ++will go idle, thus speeding up total task execution time and likely decreasing ++power usage. This is the only scenario where BFS will allow a CPU to go idle ++in preference to scheduling a task on the earliest available spare CPU. ++ ++The real cost of migrating a task from one CPU to another is entirely dependant ++on the cache footprint of the task, how cache intensive the task is, how long ++it's been running on that CPU to take up the bulk of its cache, how big the CPU ++cache is, how fast and how layered the CPU cache is, how fast a context switch ++is... and so on. In other words, it's close to random in the real world where we ++do more than just one sole workload. The only thing we can be sure of is that ++it's not free. So BFS uses the principle that an idle CPU is a wasted CPU and ++utilising idle CPUs is more important than cache locality, and cache locality ++only plays a part after that. ++ ++When choosing an idle CPU for a waking task, the cache locality is determined ++according to where the task last ran and then idle CPUs are ranked from best ++to worst to choose the most suitable idle CPU based on cache locality, NUMA ++node locality and hyperthread sibling business. They are chosen in the ++following preference (if idle): ++ ++* Same core, idle or busy cache, idle threads ++* Other core, same cache, idle or busy cache, idle threads. ++* Same node, other CPU, idle cache, idle threads. ++* Same node, other CPU, busy cache, idle threads. ++* Same core, busy threads. ++* Other core, same cache, busy threads. ++* Same node, other CPU, busy threads. ++* Other node, other CPU, idle cache, idle threads. ++* Other node, other CPU, busy cache, idle threads. ++* Other node, other CPU, busy threads. ++ ++This shows the SMT or "hyperthread" awareness in the design as well which will ++choose a real idle core first before a logical SMT sibling which already has ++tasks on the physical CPU. ++ ++Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark. ++However this benchmarking was performed on an earlier design that was far less ++scalable than the current one so it's hard to know how scalable it is in terms ++of both CPUs (due to the global runqueue) and heavily loaded machines (due to ++O(n) lookup) at this stage. Note that in terms of scalability, the number of ++_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x) ++quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark ++results are very promising indeed, without needing to tweak any knobs, features ++or options. Benchmark contributions are most welcome. ++ ++ ++Features ++ ++As the initial prime target audience for BFS was the average desktop user, it ++was designed to not need tweaking, tuning or have features set to obtain benefit ++from it. Thus the number of knobs and features has been kept to an absolute ++minimum and should not require extra user input for the vast majority of cases. ++There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval ++and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition ++to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is ++support for CGROUPS. The average user should neither need to know what these ++are, nor should they need to be using them to have good desktop behaviour. ++ ++rr_interval ++ ++There is only one "scheduler" tunable, the round robin interval. This can be ++accessed in ++ ++ /proc/sys/kernel/rr_interval ++ ++The value is in milliseconds, and the default value is set to 6ms. Valid values ++are from 1 to 1000. Decreasing the value will decrease latencies at the cost of ++decreasing throughput, while increasing it will improve throughput, but at the ++cost of worsening latencies. The accuracy of the rr interval is limited by HZ ++resolution of the kernel configuration. Thus, the worst case latencies are ++usually slightly higher than this actual value. BFS uses "dithering" to try and ++minimise the effect the Hz limitation has. The default value of 6 is not an ++arbitrary one. It is based on the fact that humans can detect jitter at ++approximately 7ms, so aiming for much lower latencies is pointless under most ++circumstances. It is worth noting this fact when comparing the latency ++performance of BFS to other schedulers. Worst case latencies being higher than ++7ms are far worse than average latencies not being in the microsecond range. ++Experimentation has shown that rr intervals being increased up to 300 can ++improve throughput but beyond that, scheduling noise from elsewhere prevents ++further demonstrable throughput. ++ ++Isochronous scheduling. ++ ++Isochronous scheduling is a unique scheduling policy designed to provide ++near-real-time performance to unprivileged (ie non-root) users without the ++ability to starve the machine indefinitely. Isochronous tasks (which means ++"same time") are set using, for example, the schedtool application like so: ++ ++ schedtool -I -e amarok ++ ++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works ++is that it has a priority level between true realtime tasks and SCHED_NORMAL ++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, ++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval ++rate). However if ISO tasks run for more than a tunable finite amount of time, ++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of ++time is the percentage of _total CPU_ available across the machine, configurable ++as a percentage in the following "resource handling" tunable (as opposed to a ++scheduler tunable): ++ ++ /proc/sys/kernel/iso_cpu ++ ++and is set to 70% by default. It is calculated over a rolling 5 second average ++Because it is the total CPU available, it means that on a multi CPU machine, it ++is possible to have an ISO task running as realtime scheduling indefinitely on ++just one CPU, as the other CPUs will be available. Setting this to 100 is the ++equivalent of giving all users SCHED_RR access and setting it to 0 removes the ++ability to run any pseudo-realtime tasks. ++ ++A feature of BFS is that it detects when an application tries to obtain a ++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the ++appropriate privileges to use those policies. When it detects this, it will ++give the task SCHED_ISO policy instead. Thus it is transparent to the user. ++Because some applications constantly set their policy as well as their nice ++level, there is potential for them to undo the override specified by the user ++on the command line of setting the policy to SCHED_ISO. To counter this, once ++a task has been set to SCHED_ISO policy, it needs superuser privileges to set ++it back to SCHED_NORMAL. This will ensure the task remains ISO and all child ++processes and threads will also inherit the ISO policy. ++ ++Idleprio scheduling. ++ ++Idleprio scheduling is a scheduling policy designed to give out CPU to a task ++_only_ when the CPU would be otherwise idle. The idea behind this is to allow ++ultra low priority tasks to be run in the background that have virtually no ++effect on the foreground tasks. This is ideally suited to distributed computing ++clients (like setiathome, folding, mprime etc) but can also be used to start ++a video encode or so on without any slowdown of other tasks. To avoid this ++policy from grabbing shared resources and holding them indefinitely, if it ++detects a state where the task is waiting on I/O, the machine is about to ++suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As ++per the Isochronous task management, once a task has been scheduled as IDLEPRIO, ++it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can ++be set to start as SCHED_IDLEPRIO with the schedtool command like so: ++ ++ schedtool -D -e ./mprime ++ ++Subtick accounting. ++ ++It is surprisingly difficult to get accurate CPU accounting, and in many cases, ++the accounting is done by simply determining what is happening at the precise ++moment a timer tick fires off. This becomes increasingly inaccurate as the ++timer tick frequency (HZ) is lowered. It is possible to create an application ++which uses almost 100% CPU, yet by being descheduled at the right time, records ++zero CPU usage. While the main problem with this is that there are possible ++security implications, it is also difficult to determine how much CPU a task ++really does use. BFS tries to use the sub-tick accounting from the TSC clock, ++where possible, to determine real CPU usage. This is not entirely reliable, but ++is far more likely to produce accurate CPU usage data than the existing designs ++and will not show tasks as consuming no CPU usage when they actually are. Thus, ++the amount of CPU reported as being used by BFS will more accurately represent ++how much CPU the task itself is using (as is shown for example by the 'time' ++application), so the reported values may be quite different to other schedulers. ++Values reported as the 'load' are more prone to problems with this design, but ++per process values are closer to real usage. When comparing throughput of BFS ++to other designs, it is important to compare the actual completed work in terms ++of total wall clock time taken and total work done, rather than the reported ++"cpu usage". ++ ++ ++Con Kolivas Tue, 5 Apr 2011 +Index: linux-3.10-ck1/Documentation/sysctl/kernel.txt +=================================================================== +--- linux-3.10-ck1.orig/Documentation/sysctl/kernel.txt 2013-07-09 17:28:57.123502084 +1000 ++++ linux-3.10-ck1/Documentation/sysctl/kernel.txt 2013-07-09 17:29:00.837501924 +1000 +@@ -33,6 +33,7 @@ + - domainname + - hostname + - hotplug ++- iso_cpu + - kptr_restrict + - kstack_depth_to_print [ X86 only ] + - l2cr [ PPC only ] +@@ -60,6 +61,7 @@ + - randomize_va_space + - real-root-dev ==> Documentation/initrd.txt + - reboot-cmd [ SPARC only ] ++- rr_interval + - rtsig-max + - rtsig-nr + - sem +@@ -306,6 +308,16 @@ + + ============================================================== + ++iso_cpu: (BFS CPU scheduler only). ++ ++This sets the percentage cpu that the unprivileged SCHED_ISO tasks can ++run effectively at realtime priority, averaged over a rolling five ++seconds over the -whole- system, meaning all cpus. ++ ++Set to 70 (percent) by default. ++ ++============================================================== ++ + l2cr: (PPC only) + + This flag controls the L2 cache of G3 processor boards. If +@@ -538,6 +550,20 @@ + + ============================================================== + ++rr_interval: (BFS CPU scheduler only) ++ ++This is the smallest duration that any cpu process scheduling unit ++will run for. Increasing this value can increase throughput of cpu ++bound tasks substantially but at the expense of increased latencies ++overall. Conversely decreasing it will decrease average and maximum ++latencies but at the expense of throughput. This value is in ++milliseconds and the default value chosen depends on the number of ++cpus available at scheduler initialisation with a minimum of 6. ++ ++Valid values are from 1-1000. ++ ++============================================================== ++ + rtsig-max & rtsig-nr: + + The file rtsig-max can be used to tune the maximum number +Index: linux-3.10-ck1/fs/proc/base.c +=================================================================== +--- linux-3.10-ck1.orig/fs/proc/base.c 2013-07-09 17:28:57.169502082 +1000 ++++ linux-3.10-ck1/fs/proc/base.c 2013-07-09 17:29:00.838501924 +1000 +@@ -339,7 +339,7 @@ + static int proc_pid_schedstat(struct task_struct *task, char *buffer) + { + return sprintf(buffer, "%llu %llu %lu\n", +- (unsigned long long)task->se.sum_exec_runtime, ++ (unsigned long long)tsk_seruntime(task), + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + } +Index: linux-3.10-ck1/include/linux/init_task.h +=================================================================== +--- linux-3.10-ck1.orig/include/linux/init_task.h 2013-07-09 17:28:57.154502083 +1000 ++++ linux-3.10-ck1/include/linux/init_task.h 2013-07-09 17:29:00.838501924 +1000 +@@ -152,12 +152,70 @@ + # define INIT_VTIME(tsk) + #endif + +-#define INIT_TASK_COMM "swapper" +- + /* + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) + */ ++#ifdef CONFIG_SCHED_BFS ++#define INIT_TASK_COMM "BFS" ++#define INIT_TASK(tsk) \ ++{ \ ++ .state = 0, \ ++ .stack = &init_thread_info, \ ++ .usage = ATOMIC_INIT(2), \ ++ .flags = PF_KTHREAD, \ ++ .prio = NORMAL_PRIO, \ ++ .static_prio = MAX_PRIO-20, \ ++ .normal_prio = NORMAL_PRIO, \ ++ .deadline = 0, \ ++ .policy = SCHED_NORMAL, \ ++ .cpus_allowed = CPU_MASK_ALL, \ ++ .mm = NULL, \ ++ .active_mm = &init_mm, \ ++ .run_list = LIST_HEAD_INIT(tsk.run_list), \ ++ .time_slice = HZ, \ ++ .tasks = LIST_HEAD_INIT(tsk.tasks), \ ++ INIT_PUSHABLE_TASKS(tsk) \ ++ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ ++ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ ++ .real_parent = &tsk, \ ++ .parent = &tsk, \ ++ .children = LIST_HEAD_INIT(tsk.children), \ ++ .sibling = LIST_HEAD_INIT(tsk.sibling), \ ++ .group_leader = &tsk, \ ++ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \ ++ RCU_POINTER_INITIALIZER(cred, &init_cred), \ ++ .comm = INIT_TASK_COMM, \ ++ .thread = INIT_THREAD, \ ++ .fs = &init_fs, \ ++ .files = &init_files, \ ++ .signal = &init_signals, \ ++ .sighand = &init_sighand, \ ++ .nsproxy = &init_nsproxy, \ ++ .pending = { \ ++ .list = LIST_HEAD_INIT(tsk.pending.list), \ ++ .signal = {{0}}}, \ ++ .blocked = {{0}}, \ ++ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ ++ .journal_info = NULL, \ ++ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ ++ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ ++ .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ .pids = { \ ++ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ ++ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ ++ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ ++ }, \ ++ INIT_IDS \ ++ INIT_PERF_EVENTS(tsk) \ ++ INIT_TRACE_IRQFLAGS \ ++ INIT_LOCKDEP \ ++ INIT_FTRACE_GRAPH \ ++ INIT_TRACE_RECURSION \ ++ INIT_TASK_RCU_PREEMPT(tsk) \ ++} ++#else /* CONFIG_SCHED_BFS */ ++#define INIT_TASK_COMM "swapper" + #define INIT_TASK(tsk) \ + { \ + .state = 0, \ +@@ -223,7 +281,7 @@ + INIT_CPUSET_SEQ \ + INIT_VTIME(tsk) \ + } +- ++#endif /* CONFIG_SCHED_BFS */ + + #define INIT_CPU_TIMERS(cpu_timers) \ + { \ +Index: linux-3.10-ck1/include/linux/ioprio.h +=================================================================== +--- linux-3.10-ck1.orig/include/linux/ioprio.h 2013-07-09 17:28:57.146502083 +1000 ++++ linux-3.10-ck1/include/linux/ioprio.h 2013-07-09 17:29:00.838501924 +1000 +@@ -52,6 +52,8 @@ + */ + static inline int task_nice_ioprio(struct task_struct *task) + { ++ if (iso_task(task)) ++ return 0; + return (task_nice(task) + 20) / 5; + } + +Index: linux-3.10-ck1/include/linux/sched.h +=================================================================== +--- linux-3.10-ck1.orig/include/linux/sched.h 2013-07-09 17:28:57.163502082 +1000 ++++ linux-3.10-ck1/include/linux/sched.h 2013-07-09 17:29:00.839501924 +1000 +@@ -229,8 +229,6 @@ + extern void init_idle(struct task_struct *idle, int cpu); + extern void init_idle_bootup_task(struct task_struct *idle); + +-extern int runqueue_is_locked(int cpu); +- + #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) + extern void nohz_balance_enter_idle(int cpu); + extern void set_cpu_sd_state_idle(void); +@@ -1040,18 +1038,35 @@ + + #ifdef CONFIG_SMP + struct llist_node wake_entry; +- int on_cpu; + #endif +- int on_rq; ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS) ++ bool on_cpu; ++#endif ++#ifndef CONFIG_SCHED_BFS ++ bool on_rq; ++#endif + + int prio, static_prio, normal_prio; + unsigned int rt_priority; ++#ifdef CONFIG_SCHED_BFS ++ int time_slice; ++ u64 deadline; ++ struct list_head run_list; ++ u64 last_ran; ++ u64 sched_time; /* sched_clock time spent running */ ++#ifdef CONFIG_SMP ++ bool sticky; /* Soft affined flag */ ++#endif ++ unsigned long rt_timeout; ++#else /* CONFIG_SCHED_BFS */ + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; ++ + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; + #endif ++#endif + + #ifdef CONFIG_PREEMPT_NOTIFIERS + /* list of struct preempt_notifier: */ +@@ -1162,6 +1177,9 @@ + int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + + cputime_t utime, stime, utimescaled, stimescaled; ++#ifdef CONFIG_SCHED_BFS ++ unsigned long utime_pc, stime_pc; ++#endif + cputime_t gtime; + #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + struct cputime prev_cputime; +@@ -1418,6 +1436,64 @@ + #endif + }; + ++#ifdef CONFIG_SCHED_BFS ++bool grunqueue_is_locked(void); ++void grq_unlock_wait(void); ++void cpu_scaling(int cpu); ++void cpu_nonscaling(int cpu); ++bool above_background_load(void); ++#define tsk_seruntime(t) ((t)->sched_time) ++#define tsk_rttimeout(t) ((t)->rt_timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++} ++ ++static inline int runqueue_is_locked(int cpu) ++{ ++ return grunqueue_is_locked(); ++} ++ ++void print_scheduler_version(void); ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return (p->policy == SCHED_ISO); ++} ++#else /* CFS */ ++extern int runqueue_is_locked(int cpu); ++static inline void cpu_scaling(int cpu) ++{ ++} ++ ++static inline void cpu_nonscaling(int cpu) ++{ ++} ++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) ++#define tsk_rttimeout(t) ((t)->rt.timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++ p->nr_cpus_allowed = current->nr_cpus_allowed; ++} ++ ++static inline void print_scheduler_version(void) ++{ ++ printk(KERN_INFO"CFS CPU scheduler.\n"); ++} ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return false; ++} ++ ++/* Anyone feel like implementing this? */ ++static inline bool above_background_load(void) ++{ ++ return false; ++} ++#endif /* CONFIG_SCHED_BFS */ ++ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) + +@@ -1844,7 +1920,7 @@ + task_sched_runtime(struct task_struct *task); + + /* sched_exec is called by processes performing an exec */ +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS) + extern void sched_exec(void); + #else + #define sched_exec() {} +@@ -2549,7 +2625,7 @@ + return 0; + } + +-static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) ++static inline void set_task_cpu(struct task_struct *p, int cpu) + { + } + +Index: linux-3.10-ck1/init/Kconfig +=================================================================== +--- linux-3.10-ck1.orig/init/Kconfig 2013-07-09 17:28:57.132502084 +1000 ++++ linux-3.10-ck1/init/Kconfig 2013-07-09 17:29:00.839501924 +1000 +@@ -28,6 +28,20 @@ + + menu "General setup" + ++config SCHED_BFS ++ bool "BFS cpu scheduler" ++ ---help--- ++ The Brain Fuck CPU Scheduler for excellent interactivity and ++ responsiveness on the desktop and solid scalability on normal ++ hardware and commodity servers. Not recommended for 4096 CPUs. ++ ++ Currently incompatible with the Group CPU scheduler, and RCU TORTURE ++ TEST so these options are disabled. ++ ++ Say Y here. ++ default y ++ ++ + config BROKEN + bool + +@@ -302,7 +316,7 @@ + # Kind of a stub config for the pure tick based cputime accounting + config TICK_CPU_ACCOUNTING + bool "Simple tick based cputime accounting" +- depends on !S390 && !NO_HZ_FULL ++ depends on !S390 && !NO_HZ_FULL && !SCHED_BFS + help + This is the basic tick based cputime accounting that maintains + statistics about user, system and idle time spent on per jiffies +@@ -325,7 +339,7 @@ + + config VIRT_CPU_ACCOUNTING_GEN + bool "Full dynticks CPU time accounting" +- depends on HAVE_CONTEXT_TRACKING && 64BIT ++ depends on HAVE_CONTEXT_TRACKING && 64BIT && !SCHED_BFS + select VIRT_CPU_ACCOUNTING + select CONTEXT_TRACKING + help +@@ -795,6 +809,7 @@ + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION ++ depends on !SCHED_BFS + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when +@@ -857,6 +872,7 @@ + + config CGROUP_CPUACCT + bool "Simple CPU accounting cgroup subsystem" ++ depends on !SCHED_BFS + help + Provides a simple Resource Controller for monitoring the + total CPU consumed by the tasks in a cgroup. +@@ -959,6 +975,7 @@ + + menuconfig CGROUP_SCHED + bool "Group CPU scheduler" ++ depends on !SCHED_BFS + default n + help + This feature lets CPU scheduler recognize task groups and control CPU +@@ -1123,6 +1140,7 @@ + + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" ++ depends on !SCHED_BFS + select EVENTFD + select CGROUPS + select CGROUP_SCHED +@@ -1526,38 +1544,8 @@ + + On non-ancient distros (post-2000 ones) N is usually a safe choice. + +-choice +- prompt "Choose SLAB allocator" +- default SLUB +- help +- This option allows to select a slab allocator. +- +-config SLAB +- bool "SLAB" +- help +- The regular slab allocator that is established and known to work +- well in all environments. It organizes cache hot objects in +- per cpu and per node queues. +- + config SLUB +- bool "SLUB (Unqueued Allocator)" +- help +- SLUB is a slab allocator that minimizes cache line usage +- instead of managing queues of cached objects (SLAB approach). +- Per cpu caching is realized using slabs of objects instead +- of queues of objects. SLUB can use memory efficiently +- and has enhanced diagnostics. SLUB is the default choice for +- a slab allocator. +- +-config SLOB +- depends on EXPERT +- bool "SLOB (Simple Allocator)" +- help +- SLOB replaces the stock allocator with a drastically simpler +- allocator. SLOB is generally more space efficient but +- does not perform as well on large systems. +- +-endchoice ++ def_bool y + + config MMAP_ALLOW_UNINITIALIZED + bool "Allow mmapped anonymous memory to be uninitialized" +Index: linux-3.10-ck1/init/main.c +=================================================================== +--- linux-3.10-ck1.orig/init/main.c 2013-07-09 17:28:57.127502084 +1000 ++++ linux-3.10-ck1/init/main.c 2013-07-09 17:29:00.839501924 +1000 +@@ -700,7 +700,6 @@ + return ret; + } + +- + extern initcall_t __initcall_start[]; + extern initcall_t __initcall0_start[]; + extern initcall_t __initcall1_start[]; +@@ -820,6 +819,8 @@ + + flush_delayed_fput(); + ++ print_scheduler_version(); ++ + if (ramdisk_execute_command) { + if (!run_init_process(ramdisk_execute_command)) + return 0; +Index: linux-3.10-ck1/kernel/delayacct.c +=================================================================== +--- linux-3.10-ck1.orig/kernel/delayacct.c 2013-07-09 17:28:57.202502081 +1000 ++++ linux-3.10-ck1/kernel/delayacct.c 2013-07-09 17:29:00.839501924 +1000 +@@ -133,7 +133,7 @@ + */ + t1 = tsk->sched_info.pcount; + t2 = tsk->sched_info.run_delay; +- t3 = tsk->se.sum_exec_runtime; ++ t3 = tsk_seruntime(tsk); + + d->cpu_count += t1; + +Index: linux-3.10-ck1/kernel/exit.c +=================================================================== +--- linux-3.10-ck1.orig/kernel/exit.c 2013-07-09 17:28:57.186502081 +1000 ++++ linux-3.10-ck1/kernel/exit.c 2013-07-09 17:29:00.839501924 +1000 +@@ -135,7 +135,7 @@ + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); + task_io_accounting_add(&sig->ioac, &tsk->ioac); +- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; ++ sig->sum_sched_runtime += tsk_seruntime(tsk); + } + + sig->nr_threads--; +Index: linux-3.10-ck1/kernel/posix-cpu-timers.c +=================================================================== +--- linux-3.10-ck1.orig/kernel/posix-cpu-timers.c 2013-07-09 17:28:57.182502082 +1000 ++++ linux-3.10-ck1/kernel/posix-cpu-timers.c 2013-07-09 17:29:00.840501924 +1000 +@@ -498,11 +498,11 @@ + { + cputime_t utime, stime; + +- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, ++ add_device_randomness((const void*) &tsk_seruntime(tsk), + sizeof(unsigned long long)); + task_cputime(tsk, &utime, &stime); + cleanup_timers(tsk->cpu_timers, +- utime, stime, tsk->se.sum_exec_runtime); ++ utime, stime, tsk_seruntime(tsk)); + + } + void posix_cpu_timers_exit_group(struct task_struct *tsk) +@@ -513,7 +513,7 @@ + task_cputime(tsk, &utime, &stime); + cleanup_timers(tsk->signal->cpu_timers, + utime + sig->utime, stime + sig->stime, +- tsk->se.sum_exec_runtime + sig->sum_sched_runtime); ++ tsk_seruntime(tsk) + sig->sum_sched_runtime); + } + + static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) +@@ -976,7 +976,7 @@ + struct cpu_timer_list *t = list_first_entry(timers, + struct cpu_timer_list, + entry); +- if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { ++ if (!--maxfire || tsk_seruntime(tsk) < t->expires.sched) { + tsk->cputime_expires.sched_exp = t->expires.sched; + break; + } +@@ -993,7 +993,7 @@ + ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); + + if (hard != RLIM_INFINITY && +- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { ++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + /* + * At the hard limit, we just die. + * No need to calculate anything else now. +@@ -1001,7 +1001,7 @@ + __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); + return; + } +- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { ++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { + /* + * At the soft limit, send a SIGXCPU every second. + */ +@@ -1282,7 +1282,7 @@ + struct task_cputime task_sample = { + .utime = utime, + .stime = stime, +- .sum_exec_runtime = tsk->se.sum_exec_runtime ++ .sum_exec_runtime = tsk_seruntime(tsk) + }; + + if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) +Index: linux-3.10-ck1/kernel/sysctl.c +=================================================================== +--- linux-3.10-ck1.orig/kernel/sysctl.c 2013-07-09 17:28:57.173502082 +1000 ++++ linux-3.10-ck1/kernel/sysctl.c 2013-07-09 17:29:00.840501924 +1000 +@@ -128,7 +128,12 @@ + static int __maybe_unused two = 2; + static int __maybe_unused three = 3; + static unsigned long one_ul = 1; +-static int one_hundred = 100; ++static int __maybe_unused one_hundred = 100; ++#ifdef CONFIG_SCHED_BFS ++extern int rr_interval; ++extern int sched_iso_cpu; ++static int __read_mostly one_thousand = 1000; ++#endif + #ifdef CONFIG_PRINTK + static int ten_thousand = 10000; + #endif +@@ -256,7 +261,7 @@ + { } + }; + +-#ifdef CONFIG_SCHED_DEBUG ++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_BFS) + static int min_sched_granularity_ns = 100000; /* 100 usecs */ + static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ + static int min_wakeup_granularity_ns; /* 0 usecs */ +@@ -273,6 +278,7 @@ + #endif + + static struct ctl_table kern_table[] = { ++#ifndef CONFIG_SCHED_BFS + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -436,6 +442,7 @@ + .extra1 = &one, + }, + #endif ++#endif /* !CONFIG_SCHED_BFS */ + #ifdef CONFIG_PROVE_LOCKING + { + .procname = "prove_locking", +@@ -907,6 +914,26 @@ + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_BFS ++ { ++ .procname = "rr_interval", ++ .data = &rr_interval, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one_thousand, ++ }, ++ { ++ .procname = "iso_cpu", ++ .data = &sched_iso_cpu, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one_hundred, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +Index: linux-3.10-ck1/lib/Kconfig.debug +=================================================================== +--- linux-3.10-ck1.orig/lib/Kconfig.debug 2013-07-09 17:28:57.137502083 +1000 ++++ linux-3.10-ck1/lib/Kconfig.debug 2013-07-09 17:29:00.840501924 +1000 +@@ -940,7 +940,7 @@ + + config RCU_TORTURE_TEST + tristate "torture tests for RCU" +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && !SCHED_BFS + default n + help + This option provides a kernel module that runs torture tests +Index: linux-3.10-ck1/include/linux/jiffies.h +=================================================================== +--- linux-3.10-ck1.orig/include/linux/jiffies.h 2013-07-09 17:28:57.150502083 +1000 ++++ linux-3.10-ck1/include/linux/jiffies.h 2013-07-09 17:29:00.840501924 +1000 +@@ -159,7 +159,7 @@ + * Have the 32 bit jiffies value wrap 5 minutes after boot + * so jiffies wrap bugs show up earlier. + */ +-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) ++#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ)) + + /* + * Change timeval to jiffies, trying to avoid the +Index: linux-3.10-ck1/drivers/cpufreq/cpufreq.c +=================================================================== +--- linux-3.10-ck1.orig/drivers/cpufreq/cpufreq.c 2013-07-09 17:28:57.224502080 +1000 ++++ linux-3.10-ck1/drivers/cpufreq/cpufreq.c 2013-07-09 17:29:00.841501924 +1000 +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -1473,6 +1474,12 @@ + + if (cpufreq_driver->target) + retval = cpufreq_driver->target(policy, target_freq, relation); ++ if (likely(retval != -EINVAL)) { ++ if (target_freq == policy->max) ++ cpu_nonscaling(policy->cpu); ++ else ++ cpu_scaling(policy->cpu); ++ } + + return retval; + } +Index: linux-3.10-ck1/drivers/cpufreq/cpufreq_ondemand.c +=================================================================== +--- linux-3.10-ck1.orig/drivers/cpufreq/cpufreq_ondemand.c 2013-07-09 17:28:57.214502080 +1000 ++++ linux-3.10-ck1/drivers/cpufreq/cpufreq_ondemand.c 2013-07-09 17:29:00.841501924 +1000 +@@ -29,8 +29,8 @@ + #include "cpufreq_governor.h" + + /* On-demand governor macros */ +-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +-#define DEF_FREQUENCY_UP_THRESHOLD (80) ++#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (26) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) + #define DEF_SAMPLING_DOWN_FACTOR (1) + #define MAX_SAMPLING_DOWN_FACTOR (100000) + #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +@@ -160,10 +160,10 @@ + } + + /* +- * Every sampling_rate, we check, if current idle time is less than 20% ++ * Every sampling_rate, we check, if current idle time is less than 37% + * (default), then we try to increase frequency. Every sampling_rate, we look + * for the lowest frequency which can sustain the load while keeping idle time +- * over 30%. If such a frequency exist, we try to decrease to this frequency. ++ * over 63%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. Frequency reduction + * happens at minimum steps of 5% (default) of current frequency +Index: linux-3.10-ck1/kernel/sched/bfs.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-3.10-ck1/kernel/sched/bfs.c 2013-07-09 17:29:00.843501924 +1000 +@@ -0,0 +1,7423 @@ ++/* ++ * kernel/sched/bfs.c, was kernel/sched.c ++ * ++ * Kernel scheduler and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and ++ * make semaphores SMP safe ++ * 1998-11-19 Implemented schedule_timeout() and related stuff ++ * by Andrea Arcangeli ++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: ++ * hybrid priority-list and round-robin design with ++ * an array-switch method of distributing timeslices ++ * and per-CPU runqueues. Cleanups and useful suggestions ++ * by Davide Libenzi, preemptible kernel bits by Robert Love. ++ * 2003-09-03 Interactivity tuning by Con Kolivas. ++ * 2004-04-02 Scheduler domains code by Nick Piggin ++ * 2007-04-15 Work begun on replacing all interactivity tuning with a ++ * fair scheduling design by Con Kolivas. ++ * 2007-05-05 Load balancing (smp-nice) and other improvements ++ * by Peter Williams ++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith ++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri ++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, ++ * Thomas Gleixner, Mike Kravetz ++ * now Brainfuck deadline scheduling policy by Con Kolivas deletes ++ * a whole lot of those previous things. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#ifdef CONFIG_PARAVIRT ++#include ++#endif ++ ++#include "cpupri.h" ++#include "../workqueue_internal.h" ++#include "../smpboot.h" ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) ++#define rt_task(p) rt_prio((p)->prio) ++#define rt_queue(rq) rt_prio((rq)->rq_prio) ++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) ++#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \ ++ (policy) == SCHED_RR) ++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) ++#define idleprio_task(p) unlikely((p)->policy == SCHED_IDLEPRIO) ++#define iso_task(p) unlikely((p)->policy == SCHED_ISO) ++#define iso_queue(rq) unlikely((rq)->rq_policy == SCHED_ISO) ++#define rq_running_iso(rq) ((rq)->rq_prio == ISO_PRIO) ++ ++#define ISO_PERIOD ((5 * HZ * grq.noc) + 1) ++ ++/* ++ * Convert user-nice values [ -20 ... 0 ... 19 ] ++ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], ++ * and back. ++ */ ++#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) ++#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20) ++#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio) ++ ++/* ++ * 'User priority' is the nice value converted to something we ++ * can work with better when scaling various scheduler parameters, ++ * it's a [ 0 ... 39 ] range. ++ */ ++#define USER_PRIO(p) ((p) - MAX_RT_PRIO) ++#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) ++#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) ++#define SCHED_PRIO(p) ((p) + MAX_RT_PRIO) ++#define STOP_PRIO (MAX_RT_PRIO - 1) ++ ++/* ++ * Some helpers for converting to/from various scales. Use shifts to get ++ * approximate multiples of ten for less overhead. ++ */ ++#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) ++#define JIFFY_NS (1000000000 / HZ) ++#define HALF_JIFFY_NS (1000000000 / HZ / 2) ++#define HALF_JIFFY_US (1000000 / HZ / 2) ++#define MS_TO_NS(TIME) ((TIME) << 20) ++#define MS_TO_US(TIME) ((TIME) << 10) ++#define NS_TO_MS(TIME) ((TIME) >> 20) ++#define NS_TO_US(TIME) ((TIME) >> 10) ++ ++#define RESCHED_US (100) /* Reschedule if less than this many μs left */ ++ ++void print_scheduler_version(void) ++{ ++ printk(KERN_INFO "BFS CPU scheduler v0.440 by Con Kolivas.\n"); ++} ++ ++/* ++ * This is the time all tasks within the same priority round robin. ++ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus. ++ * Tunable via /proc interface. ++ */ ++int rr_interval __read_mostly = 6; ++ ++/* ++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks ++ * are allowed to run five seconds as real time tasks. This is the total over ++ * all online cpus. ++ */ ++int sched_iso_cpu __read_mostly = 70; ++ ++/* ++ * The relative length of deadline for each priority(nice) level. ++ */ ++static int prio_ratios[PRIO_RANGE] __read_mostly; ++ ++/* ++ * The quota handed out to tasks of all priority levels when refilling their ++ * time_slice. ++ */ ++static inline int timeslice(void) ++{ ++ return MS_TO_US(rr_interval); ++} ++ ++/* ++ * The global runqueue data that all CPUs work off. Data is protected either ++ * by the global grq lock, or the discrete lock that precedes the data in this ++ * struct. ++ */ ++struct global_rq { ++ raw_spinlock_t lock; ++ unsigned long nr_running; ++ unsigned long nr_uninterruptible; ++ unsigned long long nr_switches; ++ struct list_head queue[PRIO_LIMIT]; ++ DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1); ++#ifdef CONFIG_SMP ++ unsigned long qnr; /* queued not running */ ++ cpumask_t cpu_idle_map; ++ bool idle_cpus; ++#endif ++ int noc; /* num_online_cpus stored and updated when it changes */ ++ u64 niffies; /* Nanosecond jiffies */ ++ unsigned long last_jiffy; /* Last jiffy we updated niffies */ ++ ++ raw_spinlock_t iso_lock; ++ int iso_ticks; ++ bool iso_refractory; ++}; ++ ++#ifdef CONFIG_SMP ++ ++/* ++ * We add the notion of a root-domain which will be used to define per-domain ++ * variables. Each exclusive cpuset essentially defines an island domain by ++ * fully partitioning the member cpus from any other cpuset. Whenever a new ++ * exclusive cpuset is created, we also create and attach a new root-domain ++ * object. ++ * ++ */ ++struct root_domain { ++ atomic_t refcount; ++ atomic_t rto_count; ++ struct rcu_head rcu; ++ cpumask_var_t span; ++ cpumask_var_t online; ++ ++ /* ++ * The "RT overload" flag: it gets set if a CPU has more than ++ * one runnable RT task. ++ */ ++ cpumask_var_t rto_mask; ++ struct cpupri cpupri; ++}; ++ ++/* ++ * By default the system creates a single root-domain with all cpus as ++ * members (mimicking the global state we have today). ++ */ ++static struct root_domain def_root_domain; ++ ++#endif /* CONFIG_SMP */ ++ ++/* There can be only one */ ++static struct global_rq grq; ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * This data should only be modified by the local cpu. ++ */ ++struct rq { ++ struct task_struct *curr, *idle, *stop; ++ struct mm_struct *prev_mm; ++ ++ /* Stored data about rq->curr to work outside grq lock */ ++ u64 rq_deadline; ++ unsigned int rq_policy; ++ int rq_time_slice; ++ u64 rq_last_ran; ++ int rq_prio; ++ bool rq_running; /* There is a task running */ ++ ++ /* Accurate timekeeping data */ ++ u64 timekeep_clock; ++ unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc, ++ iowait_pc, idle_pc; ++ atomic_t nr_iowait; ++ ++#ifdef CONFIG_SMP ++ int cpu; /* cpu of this runqueue */ ++ bool online; ++ bool scaling; /* This CPU is managed by a scaling CPU freq governor */ ++ struct task_struct *sticky_task; ++ ++ struct root_domain *rd; ++ struct sched_domain *sd; ++ int *cpu_locality; /* CPU relative cache distance */ ++#ifdef CONFIG_SCHED_SMT ++ bool (*siblings_idle)(int cpu); ++ /* See if all smt siblings are idle */ ++ cpumask_t smt_siblings; ++#endif /* CONFIG_SCHED_SMT */ ++#ifdef CONFIG_SCHED_MC ++ bool (*cache_idle)(int cpu); ++ /* See if all cache siblings are idle */ ++ cpumask_t cache_siblings; ++#endif /* CONFIG_SCHED_MC */ ++ u64 last_niffy; /* Last time this RQ updated grq.niffies */ ++#endif /* CONFIG_SMP */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ u64 prev_irq_time; ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++#ifdef CONFIG_PARAVIRT ++ u64 prev_steal_time; ++#endif /* CONFIG_PARAVIRT */ ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ u64 prev_steal_time_rq; ++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ ++ ++ u64 clock, old_clock, last_tick; ++ u64 clock_task; ++ bool dither; ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ unsigned long long rq_cpu_time; ++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++#endif /* CONFIG_SCHEDSTATS */ ++}; ++ ++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++static DEFINE_MUTEX(sched_hotcpu_mutex); ++ ++#ifdef CONFIG_SMP ++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) ++#define this_rq() (&__get_cpu_var(runqueues)) ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++/* ++ * sched_domains_mutex serialises calls to init_sched_domains, ++ * detach_destroy_domains and partition_sched_domains. ++ */ ++static DEFINE_MUTEX(sched_domains_mutex); ++ ++/* ++ * By default the system creates a single root-domain with all cpus as ++ * members (mimicking the global state we have today). ++ */ ++static struct root_domain def_root_domain; ++ ++int __weak arch_sd_sibling_asym_packing(void) ++{ ++ return 0*SD_ASYM_PACKING; ++} ++#endif /* CONFIG_SMP */ ++ ++#define rcu_dereference_check_sched_domain(p) \ ++ rcu_dereference_check((p), \ ++ lockdep_is_held(&sched_domains_mutex)) ++ ++/* ++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ++ * See detach_destroy_domains: synchronize_sched for details. ++ * ++ * The domain tree of any CPU may only be accessed from within ++ * preempt-disabled sections. ++ */ ++#define for_each_domain(cpu, __sd) \ ++ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) ++ ++static inline void update_rq_clock(struct rq *rq); ++ ++/* ++ * Sanity check should sched_clock return bogus values. We make sure it does ++ * not appear to go backwards, and use jiffies to determine the maximum and ++ * minimum it could possibly have increased, and round down to the nearest ++ * jiffy when it falls outside this. ++ */ ++static inline void niffy_diff(s64 *niff_diff, int jiff_diff) ++{ ++ unsigned long min_diff, max_diff; ++ ++ if (jiff_diff > 1) ++ min_diff = JIFFIES_TO_NS(jiff_diff - 1); ++ else ++ min_diff = 1; ++ /* Round up to the nearest tick for maximum */ ++ max_diff = JIFFIES_TO_NS(jiff_diff + 1); ++ ++ if (unlikely(*niff_diff < min_diff || *niff_diff > max_diff)) ++ *niff_diff = min_diff; ++} ++ ++#ifdef CONFIG_SMP ++static inline int cpu_of(struct rq *rq) ++{ ++ return rq->cpu; ++} ++ ++/* ++ * Niffies are a globally increasing nanosecond counter. Whenever a runqueue ++ * clock is updated with the grq.lock held, it is an opportunity to update the ++ * niffies value. Any CPU can update it by adding how much its clock has ++ * increased since it last updated niffies, minus any added niffies by other ++ * CPUs. ++ */ ++static inline void update_clocks(struct rq *rq) ++{ ++ s64 ndiff; ++ long jdiff; ++ ++ update_rq_clock(rq); ++ ndiff = rq->clock - rq->old_clock; ++ /* old_clock is only updated when we are updating niffies */ ++ rq->old_clock = rq->clock; ++ ndiff -= grq.niffies - rq->last_niffy; ++ jdiff = jiffies - grq.last_jiffy; ++ niffy_diff(&ndiff, jdiff); ++ grq.last_jiffy += jdiff; ++ grq.niffies += ndiff; ++ rq->last_niffy = grq.niffies; ++} ++#else /* CONFIG_SMP */ ++static struct rq *uprq; ++#define cpu_rq(cpu) (uprq) ++#define this_rq() (uprq) ++#define task_rq(p) (uprq) ++#define cpu_curr(cpu) ((uprq)->curr) ++static inline int cpu_of(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline void update_clocks(struct rq *rq) ++{ ++ s64 ndiff; ++ long jdiff; ++ ++ update_rq_clock(rq); ++ ndiff = rq->clock - rq->old_clock; ++ rq->old_clock = rq->clock; ++ jdiff = jiffies - grq.last_jiffy; ++ niffy_diff(&ndiff, jdiff); ++ grq.last_jiffy += jdiff; ++ grq.niffies += ndiff; ++} ++#endif ++#define raw_rq() (&__raw_get_cpu_var(runqueues)) ++ ++#include "stats.h" ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_switch ++# define finish_arch_switch(prev) do { } while (0) ++#endif ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++/* ++ * All common locking functions performed on grq.lock. rq->clock is local to ++ * the CPU accessing it so it can be modified just with interrupts disabled ++ * when we're not updating niffies. ++ * Looking up task_rq must be done under grq.lock to be safe. ++ */ ++static void update_rq_clock_task(struct rq *rq, s64 delta); ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; ++ ++ rq->clock += delta; ++ update_rq_clock_task(rq, delta); ++} ++ ++static inline bool task_running(struct task_struct *p) ++{ ++ return p->on_cpu; ++} ++ ++static inline void grq_lock(void) ++ __acquires(grq.lock) ++{ ++ raw_spin_lock(&grq.lock); ++} ++ ++static inline void grq_unlock(void) ++ __releases(grq.lock) ++{ ++ raw_spin_unlock(&grq.lock); ++} ++ ++static inline void grq_lock_irq(void) ++ __acquires(grq.lock) ++{ ++ raw_spin_lock_irq(&grq.lock); ++} ++ ++static inline void time_lock_grq(struct rq *rq) ++ __acquires(grq.lock) ++{ ++ grq_lock(); ++ update_clocks(rq); ++} ++ ++static inline void grq_unlock_irq(void) ++ __releases(grq.lock) ++{ ++ raw_spin_unlock_irq(&grq.lock); ++} ++ ++static inline void grq_lock_irqsave(unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ raw_spin_lock_irqsave(&grq.lock, *flags); ++} ++ ++static inline void grq_unlock_irqrestore(unsigned long *flags) ++ __releases(grq.lock) ++{ ++ raw_spin_unlock_irqrestore(&grq.lock, *flags); ++} ++ ++static inline struct rq ++*task_grq_lock(struct task_struct *p, unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ grq_lock_irqsave(flags); ++ return task_rq(p); ++} ++ ++static inline struct rq ++*time_task_grq_lock(struct task_struct *p, unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ struct rq *rq = task_grq_lock(p, flags); ++ update_clocks(rq); ++ return rq; ++} ++ ++static inline struct rq *task_grq_lock_irq(struct task_struct *p) ++ __acquires(grq.lock) ++{ ++ grq_lock_irq(); ++ return task_rq(p); ++} ++ ++static inline void time_task_grq_lock_irq(struct task_struct *p) ++ __acquires(grq.lock) ++{ ++ struct rq *rq = task_grq_lock_irq(p); ++ update_clocks(rq); ++} ++ ++static inline void task_grq_unlock_irq(void) ++ __releases(grq.lock) ++{ ++ grq_unlock_irq(); ++} ++ ++static inline void task_grq_unlock(unsigned long *flags) ++ __releases(grq.lock) ++{ ++ grq_unlock_irqrestore(flags); ++} ++ ++/** ++ * grunqueue_is_locked ++ * ++ * Returns true if the global runqueue is locked. ++ * This interface allows printk to be called with the runqueue lock ++ * held and know whether or not it is OK to wake up the klogd. ++ */ ++bool grunqueue_is_locked(void) ++{ ++ return raw_spin_is_locked(&grq.lock); ++} ++ ++void grq_unlock_wait(void) ++ __releases(grq.lock) ++{ ++ smp_mb(); /* spin-unlock-wait is not a full memory barrier */ ++ raw_spin_unlock_wait(&grq.lock); ++} ++ ++static inline void time_grq_lock(struct rq *rq, unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ local_irq_save(*flags); ++ time_lock_grq(rq); ++} ++ ++static inline struct rq *__task_grq_lock(struct task_struct *p) ++ __acquires(grq.lock) ++{ ++ grq_lock(); ++ return task_rq(p); ++} ++ ++static inline void __task_grq_unlock(void) ++ __releases(grq.lock) ++{ ++ grq_unlock(); ++} ++ ++/* ++ * Look for any tasks *anywhere* that are running nice 0 or better. We do ++ * this lockless for overhead reasons since the occasional wrong result ++ * is harmless. ++ */ ++bool above_background_load(void) ++{ ++ int cpu; ++ ++ for_each_online_cpu(cpu) { ++ struct task_struct *cpu_curr = cpu_rq(cpu)->curr; ++ ++ if (unlikely(!cpu_curr)) ++ continue; ++ if (PRIO_TO_NICE(cpu_curr->static_prio) < 1) { ++ return true; ++ } ++ } ++ return false; ++} ++ ++#ifndef __ARCH_WANT_UNLOCKED_CTXSW ++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++} ++ ++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ++{ ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ grq.lock.owner = current; ++#endif ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_); ++ ++ grq_unlock_irq(); ++} ++ ++#else /* __ARCH_WANT_UNLOCKED_CTXSW */ ++ ++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW ++ grq_unlock_irq(); ++#else ++ grq_unlock(); ++#endif ++} ++ ++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ++{ ++ smp_wmb(); ++#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW ++ local_irq_enable(); ++#endif ++} ++#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ ++ ++static inline bool deadline_before(u64 deadline, u64 time) ++{ ++ return (deadline < time); ++} ++ ++static inline bool deadline_after(u64 deadline, u64 time) ++{ ++ return (deadline > time); ++} ++ ++/* ++ * A task that is queued but not running will be on the grq run list. ++ * A task that is not running or queued will not be on the grq run list. ++ * A task that is currently running will have ->on_cpu set but not on the ++ * grq run list. ++ */ ++static inline bool task_queued(struct task_struct *p) ++{ ++ return (!list_empty(&p->run_list)); ++} ++ ++/* ++ * Removing from the global runqueue. Enter with grq locked. ++ */ ++static void dequeue_task(struct task_struct *p) ++{ ++ list_del_init(&p->run_list); ++ if (list_empty(grq.queue + p->prio)) ++ __clear_bit(p->prio, grq.prio_bitmap); ++} ++ ++/* ++ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as ++ * an idle task, we ensure none of the following conditions are met. ++ */ ++static bool idleprio_suitable(struct task_struct *p) ++{ ++ return (!freezing(p) && !signal_pending(p) && ++ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING))); ++} ++ ++/* ++ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check ++ * that the iso_refractory flag is not set. ++ */ ++static bool isoprio_suitable(void) ++{ ++ return !grq.iso_refractory; ++} ++ ++/* ++ * Adding to the global runqueue. Enter with grq locked. ++ */ ++static void enqueue_task(struct task_struct *p) ++{ ++ if (!rt_task(p)) { ++ /* Check it hasn't gotten rt from PI */ ++ if ((idleprio_task(p) && idleprio_suitable(p)) || ++ (iso_task(p) && isoprio_suitable())) ++ p->prio = p->normal_prio; ++ else ++ p->prio = NORMAL_PRIO; ++ } ++ __set_bit(p->prio, grq.prio_bitmap); ++ list_add_tail(&p->run_list, grq.queue + p->prio); ++ sched_info_queued(p); ++} ++ ++/* Only idle task does this as a real time task*/ ++static inline void enqueue_task_head(struct task_struct *p) ++{ ++ __set_bit(p->prio, grq.prio_bitmap); ++ list_add(&p->run_list, grq.queue + p->prio); ++ sched_info_queued(p); ++} ++ ++static inline void requeue_task(struct task_struct *p) ++{ ++ sched_info_queued(p); ++} ++ ++/* ++ * Returns the relative length of deadline all compared to the shortest ++ * deadline which is that of nice -20. ++ */ ++static inline int task_prio_ratio(struct task_struct *p) ++{ ++ return prio_ratios[TASK_USER_PRIO(p)]; ++} ++ ++/* ++ * task_timeslice - all tasks of all priorities get the exact same timeslice ++ * length. CPU distribution is handled by giving different deadlines to ++ * tasks of different priorities. Use 128 as the base value for fast shifts. ++ */ ++static inline int task_timeslice(struct task_struct *p) ++{ ++ return (rr_interval * task_prio_ratio(p) / 128); ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * qnr is the "queued but not running" count which is the total number of ++ * tasks on the global runqueue list waiting for cpu time but not actually ++ * currently running on a cpu. ++ */ ++static inline void inc_qnr(void) ++{ ++ grq.qnr++; ++} ++ ++static inline void dec_qnr(void) ++{ ++ grq.qnr--; ++} ++ ++static inline int queued_notrunning(void) ++{ ++ return grq.qnr; ++} ++ ++/* ++ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to ++ * allow easy lookup of whether any suitable idle CPUs are available. ++ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the ++ * idle_cpus variable than to do a full bitmask check when we are busy. ++ */ ++static inline void set_cpuidle_map(int cpu) ++{ ++ if (likely(cpu_online(cpu))) { ++ cpu_set(cpu, grq.cpu_idle_map); ++ grq.idle_cpus = true; ++ } ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++ cpu_clear(cpu, grq.cpu_idle_map); ++ if (cpus_empty(grq.cpu_idle_map)) ++ grq.idle_cpus = false; ++} ++ ++static bool suitable_idle_cpus(struct task_struct *p) ++{ ++ if (!grq.idle_cpus) ++ return false; ++ return (cpus_intersects(p->cpus_allowed, grq.cpu_idle_map)); ++} ++ ++#define CPUIDLE_DIFF_THREAD (1) ++#define CPUIDLE_DIFF_CORE (2) ++#define CPUIDLE_CACHE_BUSY (4) ++#define CPUIDLE_DIFF_CPU (8) ++#define CPUIDLE_THREAD_BUSY (16) ++#define CPUIDLE_DIFF_NODE (32) ++ ++static void resched_task(struct task_struct *p); ++ ++/* ++ * The best idle CPU is chosen according to the CPUIDLE ranking above where the ++ * lowest value would give the most suitable CPU to schedule p onto next. The ++ * order works out to be the following: ++ * ++ * Same core, idle or busy cache, idle or busy threads ++ * Other core, same cache, idle or busy cache, idle threads. ++ * Same node, other CPU, idle cache, idle threads. ++ * Same node, other CPU, busy cache, idle threads. ++ * Other core, same cache, busy threads. ++ * Same node, other CPU, busy threads. ++ * Other node, other CPU, idle cache, idle threads. ++ * Other node, other CPU, busy cache, idle threads. ++ * Other node, other CPU, busy threads. ++ */ ++static void ++resched_best_mask(int best_cpu, struct rq *rq, cpumask_t *tmpmask) ++{ ++ unsigned int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THREAD_BUSY | ++ CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | CPUIDLE_DIFF_CORE | ++ CPUIDLE_DIFF_THREAD; ++ int cpu_tmp; ++ ++ if (cpu_isset(best_cpu, *tmpmask)) ++ goto out; ++ ++ for_each_cpu_mask(cpu_tmp, *tmpmask) { ++ unsigned int ranking; ++ struct rq *tmp_rq; ++ ++ ranking = 0; ++ tmp_rq = cpu_rq(cpu_tmp); ++ ++#ifdef CONFIG_NUMA ++ if (rq->cpu_locality[cpu_tmp] > 3) ++ ranking |= CPUIDLE_DIFF_NODE; ++ else ++#endif ++ if (rq->cpu_locality[cpu_tmp] > 2) ++ ranking |= CPUIDLE_DIFF_CPU; ++#ifdef CONFIG_SCHED_MC ++ if (rq->cpu_locality[cpu_tmp] == 2) ++ ranking |= CPUIDLE_DIFF_CORE; ++ if (!(tmp_rq->cache_idle(cpu_tmp))) ++ ranking |= CPUIDLE_CACHE_BUSY; ++#endif ++#ifdef CONFIG_SCHED_SMT ++ if (rq->cpu_locality[cpu_tmp] == 1) ++ ranking |= CPUIDLE_DIFF_THREAD; ++ if (!(tmp_rq->siblings_idle(cpu_tmp))) ++ ranking |= CPUIDLE_THREAD_BUSY; ++#endif ++ if (ranking < best_ranking) { ++ best_cpu = cpu_tmp; ++ best_ranking = ranking; ++ } ++ } ++out: ++ resched_task(cpu_rq(best_cpu)->curr); ++} ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ struct rq *this_rq = cpu_rq(this_cpu); ++ ++ return (this_rq->cpu_locality[that_cpu] < 3); ++} ++ ++static void resched_best_idle(struct task_struct *p) ++{ ++ cpumask_t tmpmask; ++ ++ cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); ++ resched_best_mask(task_cpu(p), task_rq(p), &tmpmask); ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++ if (suitable_idle_cpus(p)) ++ resched_best_idle(p); ++} ++/* ++ * Flags to tell us whether this CPU is running a CPU frequency governor that ++ * has slowed its speed or not. No locking required as the very rare wrongly ++ * read value would be harmless. ++ */ ++void cpu_scaling(int cpu) ++{ ++ cpu_rq(cpu)->scaling = true; ++} ++ ++void cpu_nonscaling(int cpu) ++{ ++ cpu_rq(cpu)->scaling = false; ++} ++ ++static inline bool scaling_rq(struct rq *rq) ++{ ++ return rq->scaling; ++} ++ ++static inline int locality_diff(struct task_struct *p, struct rq *rq) ++{ ++ return rq->cpu_locality[task_cpu(p)]; ++} ++#else /* CONFIG_SMP */ ++static inline void inc_qnr(void) ++{ ++} ++ ++static inline void dec_qnr(void) ++{ ++} ++ ++static inline int queued_notrunning(void) ++{ ++ return grq.nr_running; ++} ++ ++static inline void set_cpuidle_map(int cpu) ++{ ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++} ++ ++static inline bool suitable_idle_cpus(struct task_struct *p) ++{ ++ return uprq->curr == uprq->idle; ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++} ++ ++void cpu_scaling(int __unused) ++{ ++} ++ ++void cpu_nonscaling(int __unused) ++{ ++} ++ ++/* ++ * Although CPUs can scale in UP, there is nowhere else for tasks to go so this ++ * always returns 0. ++ */ ++static inline bool scaling_rq(struct rq *rq) ++{ ++ return false; ++} ++ ++static inline int locality_diff(struct task_struct *p, struct rq *rq) ++{ ++ return 0; ++} ++#endif /* CONFIG_SMP */ ++EXPORT_SYMBOL_GPL(cpu_scaling); ++EXPORT_SYMBOL_GPL(cpu_nonscaling); ++ ++/* ++ * activate_idle_task - move idle task to the _front_ of runqueue. ++ */ ++static inline void activate_idle_task(struct task_struct *p) ++{ ++ enqueue_task_head(p); ++ grq.nr_running++; ++ inc_qnr(); ++} ++ ++static inline int normal_prio(struct task_struct *p) ++{ ++ if (has_rt_policy(p)) ++ return MAX_RT_PRIO - 1 - p->rt_priority; ++ if (idleprio_task(p)) ++ return IDLE_PRIO; ++ if (iso_task(p)) ++ return ISO_PRIO; ++ return NORMAL_PRIO; ++} ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks as it will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. Enter with grq locked. ++ */ ++static void activate_task(struct task_struct *p, struct rq *rq) ++{ ++ update_clocks(rq); ++ ++ /* ++ * Sleep time is in units of nanosecs, so shift by 20 to get a ++ * milliseconds-range estimation of the amount of time that the task ++ * spent sleeping: ++ */ ++ if (unlikely(prof_on == SLEEP_PROFILING)) { ++ if (p->state == TASK_UNINTERRUPTIBLE) ++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), ++ (rq->clock_task - p->last_ran) >> 20); ++ } ++ ++ p->prio = effective_prio(p); ++ if (task_contributes_to_load(p)) ++ grq.nr_uninterruptible--; ++ enqueue_task(p); ++ grq.nr_running++; ++ inc_qnr(); ++} ++ ++static inline void clear_sticky(struct task_struct *p); ++ ++/* ++ * deactivate_task - If it's running, it's not on the grq and we can just ++ * decrement the nr_running. Enter with grq locked. ++ */ ++static inline void deactivate_task(struct task_struct *p) ++{ ++ if (task_contributes_to_load(p)) ++ grq.nr_uninterruptible++; ++ grq.nr_running--; ++ clear_sticky(p); ++} ++ ++static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); ++ ++void register_task_migration_notifier(struct notifier_block *n) ++{ ++ atomic_notifier_chain_register(&task_migration_notifier, n); ++} ++ ++#ifdef CONFIG_SMP ++void set_task_cpu(struct task_struct *p, unsigned int cpu) ++{ ++#ifdef CONFIG_LOCKDEP ++ /* ++ * The caller should hold grq lock. ++ */ ++ WARN_ON_ONCE(debug_locks && !lockdep_is_held(&grq.lock)); ++#endif ++ trace_sched_migrate_task(p, cpu); ++ if (task_cpu(p) != cpu) ++ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); ++ ++ /* ++ * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be ++ * successfully executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ task_thread_info(p)->cpu = cpu; ++} ++ ++static inline void clear_sticky(struct task_struct *p) ++{ ++ p->sticky = false; ++} ++ ++static inline bool task_sticky(struct task_struct *p) ++{ ++ return p->sticky; ++} ++ ++/* Reschedule the best idle CPU that is not this one. */ ++static void ++resched_closest_idle(struct rq *rq, int cpu, struct task_struct *p) ++{ ++ cpumask_t tmpmask; ++ ++ cpus_and(tmpmask, p->cpus_allowed, grq.cpu_idle_map); ++ cpu_clear(cpu, tmpmask); ++ if (cpus_empty(tmpmask)) ++ return; ++ resched_best_mask(cpu, rq, &tmpmask); ++} ++ ++/* ++ * We set the sticky flag on a task that is descheduled involuntarily meaning ++ * it is awaiting further CPU time. If the last sticky task is still sticky ++ * but unlucky enough to not be the next task scheduled, we unstick it and try ++ * to find it an idle CPU. Realtime tasks do not stick to minimise their ++ * latency at all times. ++ */ ++static inline void ++swap_sticky(struct rq *rq, int cpu, struct task_struct *p) ++{ ++ if (rq->sticky_task) { ++ if (rq->sticky_task == p) { ++ p->sticky = true; ++ return; ++ } ++ if (task_sticky(rq->sticky_task)) { ++ clear_sticky(rq->sticky_task); ++ resched_closest_idle(rq, cpu, rq->sticky_task); ++ } ++ } ++ if (!rt_task(p)) { ++ p->sticky = true; ++ rq->sticky_task = p; ++ } else { ++ resched_closest_idle(rq, cpu, p); ++ rq->sticky_task = NULL; ++ } ++} ++ ++static inline void unstick_task(struct rq *rq, struct task_struct *p) ++{ ++ rq->sticky_task = NULL; ++ clear_sticky(p); ++} ++#else ++static inline void clear_sticky(struct task_struct *p) ++{ ++} ++ ++static inline bool task_sticky(struct task_struct *p) ++{ ++ return false; ++} ++ ++static inline void ++swap_sticky(struct rq *rq, int cpu, struct task_struct *p) ++{ ++} ++ ++static inline void unstick_task(struct rq *rq, struct task_struct *p) ++{ ++} ++#endif ++ ++/* ++ * Move a task off the global queue and take it to a cpu for it will ++ * become the running task. ++ */ ++static inline void take_task(int cpu, struct task_struct *p) ++{ ++ set_task_cpu(p, cpu); ++ dequeue_task(p); ++ clear_sticky(p); ++ dec_qnr(); ++} ++ ++/* ++ * Returns a descheduling task to the grq runqueue unless it is being ++ * deactivated. ++ */ ++static inline void return_task(struct task_struct *p, bool deactivate) ++{ ++ if (deactivate) ++ deactivate_task(p); ++ else { ++ inc_qnr(); ++ enqueue_task(p); ++ } ++} ++ ++/* ++ * resched_task - mark a task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++#ifdef CONFIG_SMP ++ ++#ifndef tsk_is_polling ++#define tsk_is_polling(t) 0 ++#endif ++ ++static void resched_task(struct task_struct *p) ++{ ++ int cpu; ++ ++ assert_raw_spin_locked(&grq.lock); ++ ++ if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) ++ return; ++ ++ set_tsk_thread_flag(p, TIF_NEED_RESCHED); ++ ++ cpu = task_cpu(p); ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* NEED_RESCHED must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(p)) ++ smp_send_reschedule(cpu); ++} ++ ++#else ++static inline void resched_task(struct task_struct *p) ++{ ++ assert_raw_spin_locked(&grq.lock); ++ set_tsk_need_resched(p); ++} ++#endif ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++#ifdef CONFIG_SMP ++struct migration_req { ++ struct task_struct *task; ++ int dest_cpu; ++}; ++ ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, long match_state) ++{ ++ unsigned long flags; ++ bool running, on_rq; ++ unsigned long ncsw; ++ struct rq *rq; ++ ++ for (;;) { ++ /* ++ * We do the initial early heuristics without holding ++ * any task-queue locks at all. We'll only try to get ++ * the runqueue lock when things look like they will ++ * work out! In the unlikely event rq is dereferenced ++ * since we're lockless, grab it again. ++ */ ++#ifdef CONFIG_SMP ++retry_rq: ++ rq = task_rq(p); ++ if (unlikely(!rq)) ++ goto retry_rq; ++#else /* CONFIG_SMP */ ++ rq = task_rq(p); ++#endif ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since this will return false ++ * if the runqueue has changed and p is actually now ++ * running somewhere else! ++ */ ++ while (task_running(p) && p == rq->curr) { ++ if (match_state && unlikely(p->state != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the grq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ rq = task_grq_lock(p, &flags); ++ trace_sched_wait_task(p); ++ running = task_running(p); ++ on_rq = task_queued(p); ++ ncsw = 0; ++ if (!match_state || p->state == match_state) ++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ++ task_grq_unlock(&flags); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it was still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(on_rq)) { ++ ktime_t to = ktime_set(0, NSEC_PER_SEC / HZ); ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_hrtimeout(&to, HRTIMER_MODE_REL); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesn't have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_send_reschedule(cpu); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(kick_process); ++#endif ++ ++#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT) ++ ++/* ++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the ++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or ++ * between themselves, they cooperatively multitask. An idle rq scores as ++ * prio PRIO_LIMIT so it is always preempted. ++ */ ++static inline bool ++can_preempt(struct task_struct *p, int prio, u64 deadline) ++{ ++ /* Better static priority RT task or better policy preemption */ ++ if (p->prio < prio) ++ return true; ++ if (p->prio > prio) ++ return false; ++ /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */ ++ if (!deadline_before(p->deadline, deadline)) ++ return false; ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++#define cpu_online_map (*(cpumask_t *)cpu_online_mask) ++#ifdef CONFIG_HOTPLUG_CPU ++/* ++ * Check to see if there is a task that is affined only to offline CPUs but ++ * still wants runtime. This happens to kernel threads during suspend/halt and ++ * disabling of CPUs. ++ */ ++static inline bool online_cpus(struct task_struct *p) ++{ ++ return (likely(cpus_intersects(cpu_online_map, p->cpus_allowed))); ++} ++#else /* CONFIG_HOTPLUG_CPU */ ++/* All available CPUs are always online without hotplug. */ ++static inline bool online_cpus(struct task_struct *p) ++{ ++ return true; ++} ++#endif ++ ++/* ++ * Check to see if p can run on cpu, and if not, whether there are any online ++ * CPUs it can run on instead. ++ */ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ if (unlikely(!cpu_isset(cpu, p->cpus_allowed))) ++ return true; ++ return false; ++} ++ ++/* ++ * When all else is equal, still prefer this_rq. ++ */ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ struct rq *highest_prio_rq = NULL; ++ int cpu, highest_prio; ++ u64 latest_deadline; ++ cpumask_t tmp; ++ ++ /* ++ * We clear the sticky flag here because for a task to have called ++ * try_preempt with the sticky flag enabled means some complicated ++ * re-scheduling has occurred and we should ignore the sticky flag. ++ */ ++ clear_sticky(p); ++ ++ if (suitable_idle_cpus(p)) { ++ resched_best_idle(p); ++ return; ++ } ++ ++ /* IDLEPRIO tasks never preempt anything but idle */ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ ++ if (likely(online_cpus(p))) ++ cpus_and(tmp, cpu_online_map, p->cpus_allowed); ++ else ++ return; ++ ++ highest_prio = latest_deadline = 0; ++ ++ for_each_cpu_mask(cpu, tmp) { ++ struct rq *rq; ++ int rq_prio; ++ ++ rq = cpu_rq(cpu); ++ rq_prio = rq->rq_prio; ++ if (rq_prio < highest_prio) ++ continue; ++ ++ if (rq_prio > highest_prio || ++ deadline_after(rq->rq_deadline, latest_deadline)) { ++ latest_deadline = rq->rq_deadline; ++ highest_prio = rq_prio; ++ highest_prio_rq = rq; ++ } ++ } ++ ++ if (likely(highest_prio_rq)) { ++ if (can_preempt(p, highest_prio, highest_prio_rq->rq_deadline)) ++ resched_task(highest_prio_rq->curr); ++ } ++} ++#else /* CONFIG_SMP */ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ return false; ++} ++ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline)) ++ resched_task(uprq->curr); ++} ++#endif /* CONFIG_SMP */ ++ ++static void ++ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ++{ ++#ifdef CONFIG_SCHEDSTATS ++ struct rq *rq = this_rq(); ++ ++#ifdef CONFIG_SMP ++ int this_cpu = smp_processor_id(); ++ ++ if (cpu == this_cpu) ++ schedstat_inc(rq, ttwu_local); ++ else { ++ struct sched_domain *sd; ++ ++ rcu_read_lock(); ++ for_each_domain(this_cpu, sd) { ++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { ++ schedstat_inc(sd, ttwu_wake_remote); ++ break; ++ } ++ } ++ rcu_read_unlock(); ++ } ++ ++#endif /* CONFIG_SMP */ ++ ++ schedstat_inc(rq, ttwu_count); ++#endif /* CONFIG_SCHEDSTATS */ ++} ++ ++static inline void ttwu_activate(struct task_struct *p, struct rq *rq, ++ bool is_sync) ++{ ++ activate_task(p, rq); ++ ++ /* ++ * Sync wakeups (i.e. those types of wakeups where the waker ++ * has indicated that it will leave the CPU in short order) ++ * don't trigger a preemption if there are no idle cpus, ++ * instead waiting for current to deschedule. ++ */ ++ if (!is_sync || suitable_idle_cpus(p)) ++ try_preempt(p, rq); ++} ++ ++static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, ++ bool success) ++{ ++ trace_sched_wakeup(p, success); ++ p->state = TASK_RUNNING; ++ ++ /* ++ * if a worker is waking up, notify workqueue. Note that on BFS, we ++ * don't really know what cpu it will be, so we fake it for ++ * wq_worker_waking_up :/ ++ */ ++ if ((p->flags & PF_WQ_WORKER) && success) ++ wq_worker_waking_up(p, cpu_of(rq)); ++} ++ ++#ifdef CONFIG_SMP ++void scheduler_ipi(void) ++{ ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * wake flags ++ */ ++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ ++#define WF_FORK 0x02 /* child wakeup after fork */ ++#define WF_MIGRATED 0x4 /* internal use, task got migrated */ ++ ++/*** ++ * try_to_wake_up - wake up a thread ++ * @p: the thread to be awakened ++ * @state: the mask of task states that can be woken ++ * @wake_flags: wake modifier flags (WF_*) ++ * ++ * Put it on the run-queue if it's not already there. The "current" ++ * thread is always on the run-queue (except when the actual ++ * re-schedule is in progress), and as such you're allowed to do ++ * the simpler "current->state = TASK_RUNNING" to mark yourself ++ * runnable without the overhead of this. ++ * ++ * Returns %true if @p was woken up, %false if it was already running ++ * or @state didn't match @p's state. ++ */ ++static bool try_to_wake_up(struct task_struct *p, unsigned int state, ++ int wake_flags) ++{ ++ bool success = false; ++ unsigned long flags; ++ struct rq *rq; ++ int cpu; ++ ++ get_cpu(); ++ ++ /* This barrier is undocumented, probably for p->state? くそ */ ++ smp_wmb(); ++ ++ /* ++ * No need to do time_lock_grq as we only need to update the rq clock ++ * if we activate the task ++ */ ++ rq = task_grq_lock(p, &flags); ++ cpu = task_cpu(p); ++ ++ /* state is a volatile long, どうして、分からない */ ++ if (!((unsigned int)p->state & state)) ++ goto out_unlock; ++ ++ if (task_queued(p) || task_running(p)) ++ goto out_running; ++ ++ ttwu_activate(p, rq, wake_flags & WF_SYNC); ++ success = true; ++ ++out_running: ++ ttwu_post_activation(p, rq, success); ++out_unlock: ++ task_grq_unlock(&flags); ++ ++ ttwu_stat(p, cpu, wake_flags); ++ ++ put_cpu(); ++ ++ return success; ++} ++ ++/** ++ * try_to_wake_up_local - try to wake up a local task with grq lock held ++ * @p: the thread to be awakened ++ * ++ * Put @p on the run-queue if it's not already there. The caller must ++ * ensure that grq is locked and, @p is not the current task. ++ * grq stays locked over invocation. ++ */ ++static void try_to_wake_up_local(struct task_struct *p) ++{ ++ struct rq *rq = task_rq(p); ++ bool success = false; ++ ++ lockdep_assert_held(&grq.lock); ++ ++ if (!(p->state & TASK_NORMAL)) ++ return; ++ ++ if (!task_queued(p)) { ++ if (likely(!task_running(p))) { ++ schedstat_inc(rq, ttwu_count); ++ schedstat_inc(rq, ttwu_local); ++ } ++ ttwu_activate(p, rq, false); ++ ttwu_stat(p, smp_processor_id(), 0); ++ success = true; ++ } ++ ttwu_post_activation(p, rq, success); ++} ++ ++/** ++ * wake_up_process - Wake up a specific process ++ * @p: The process to be woken up. ++ * ++ * Attempt to wake up the nominated process and move it to the set of runnable ++ * processes. Returns 1 if the process was woken up, 0 if it was already ++ * running. ++ * ++ * It may be assumed that this function implies a write memory barrier before ++ * changing the task state if and only if any tasks are woken up. ++ */ ++int wake_up_process(struct task_struct *p) ++{ ++ WARN_ON(task_is_stopped_or_traced(p)); ++ return try_to_wake_up(p, TASK_NORMAL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++static void time_slice_expired(struct task_struct *p); ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ */ ++void sched_fork(struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ /* ++ * The process state is set to the same value of the process executing ++ * do_fork() code. That is running. This guarantees that nobody will ++ * actually run it, and a signal or other external event cannot wake ++ * it up and insert it on the runqueue either. ++ */ ++ ++ /* Should be reset in fork.c but done here for ease of bfs patching */ ++ p->utime = ++ p->stime = ++ p->utimescaled = ++ p->stimescaled = ++ p->sched_time = ++ p->stime_pc = ++ p->utime_pc = 0; ++ ++ /* ++ * Revert to default priority/policy on fork if requested. ++ */ ++ if (unlikely(p->sched_reset_on_fork)) { ++ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { ++ p->policy = SCHED_NORMAL; ++ p->normal_prio = normal_prio(p); ++ } ++ ++ if (PRIO_TO_NICE(p->static_prio) < 0) { ++ p->static_prio = NICE_TO_PRIO(0); ++ p->normal_prio = p->static_prio; ++ } ++ ++ /* ++ * We don't need the reset flag anymore after the fork. It has ++ * fulfilled its duty: ++ */ ++ p->sched_reset_on_fork = 0; ++ } ++ ++ INIT_LIST_HEAD(&p->run_list); ++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) ++ if (unlikely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++ p->on_cpu = false; ++ clear_sticky(p); ++ ++#ifdef CONFIG_PREEMPT_COUNT ++ /* Want to start with kernel preemption disabled. */ ++ task_thread_info(p)->preempt_count = 1; ++#endif ++} ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p) ++{ ++ struct task_struct *parent; ++ unsigned long flags; ++ struct rq *rq; ++ ++ parent = p->parent; ++ rq = task_grq_lock(p, &flags); ++ ++ /* ++ * Reinit new task deadline as its creator deadline could have changed ++ * since call to dup_task_struct(). ++ */ ++ p->deadline = rq->rq_deadline; ++ ++ /* ++ * If the task is a new process, current and parent are the same. If ++ * the task is a new thread in the thread group, it will have much more ++ * in common with current than with the parent. ++ */ ++ set_task_cpu(p, task_cpu(rq->curr)); ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child. ++ */ ++ p->prio = rq->curr->normal_prio; ++ ++ activate_task(p, rq); ++ trace_sched_wakeup_new(p, 1); ++ if (unlikely(p->policy == SCHED_FIFO)) ++ goto after_ts_init; ++ ++ /* ++ * Share the timeslice between parent and child, thus the ++ * total amount of pending timeslices in the system doesn't change, ++ * resulting in more scheduling fairness. If it's negative, it won't ++ * matter since that's the same as being 0. current's time_slice is ++ * actually in rq_time_slice when it's running, as is its last_ran ++ * value. rq->rq_deadline is only modified within schedule() so it ++ * is always equal to current->deadline. ++ */ ++ p->last_ran = rq->rq_last_ran; ++ if (likely(rq->rq_time_slice >= RESCHED_US * 2)) { ++ rq->rq_time_slice /= 2; ++ p->time_slice = rq->rq_time_slice; ++after_ts_init: ++ if (rq->curr == parent && !suitable_idle_cpus(p)) { ++ /* ++ * The VM isn't cloned, so we're in a good position to ++ * do child-runs-first in anticipation of an exec. This ++ * usually avoids a lot of COW overhead. ++ */ ++ set_tsk_need_resched(parent); ++ } else ++ try_preempt(p, rq); ++ } else { ++ if (rq->curr == parent) { ++ /* ++ * Forking task has run out of timeslice. Reschedule it and ++ * start its child with a new time slice and deadline. The ++ * child will end up running first because its deadline will ++ * be slightly earlier. ++ */ ++ rq->rq_time_slice = 0; ++ set_tsk_need_resched(parent); ++ } ++ time_slice_expired(p); ++ } ++ task_grq_unlock(&flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++/** ++ * preempt_notifier_register - tell me when current is being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ sched_info_switch(prev, next); ++ perf_event_task_sched_out(prev, next); ++ fire_sched_out_preempt_notifiers(prev, next); ++ prepare_lock_switch(rq, next); ++ prepare_arch_switch(next); ++ trace_sched_switch(prev, next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ */ ++static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) ++ __releases(grq.lock) ++{ ++ struct mm_struct *mm = rq->prev_mm; ++ long prev_state; ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * The test for TASK_DEAD must occur while the runqueue locks are ++ * still held, otherwise prev could be scheduled on another cpu, die ++ * there before we look at prev->state, and then the reference would ++ * be dropped twice. ++ * Manfred Spraul ++ */ ++ prev_state = prev->state; ++ vtime_task_switch(prev); ++ finish_arch_switch(prev); ++ perf_event_task_sched_in(prev, current); ++ finish_lock_switch(rq, prev); ++ finish_arch_post_lock_switch(); ++ ++ fire_sched_in_preempt_notifiers(current); ++ if (mm) ++ mmdrop(mm); ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(prev); ++ put_task_struct(prev); ++ } ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage void schedule_tail(struct task_struct *prev) ++ __releases(grq.lock) ++{ ++ struct rq *rq = this_rq(); ++ ++ finish_task_switch(rq, prev); ++#ifdef __ARCH_WANT_UNLOCKED_CTXSW ++ /* In this case, finish_task_switch does not reenable preemption */ ++ preempt_enable(); ++#endif ++ if (current->set_child_tid) ++ put_user(current->pid, current->set_child_tid); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new ++ * thread's register state. ++ */ ++static inline void ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ struct mm_struct *mm, *oldmm; ++ ++ prepare_task_switch(rq, prev, next); ++ ++ mm = next->mm; ++ oldmm = prev->active_mm; ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_start_context_switch(prev); ++ ++ if (!mm) { ++ next->active_mm = oldmm; ++ atomic_inc(&oldmm->mm_count); ++ enter_lazy_tlb(oldmm, next); ++ } else ++ switch_mm(oldmm, mm, next); ++ ++ if (!prev->mm) { ++ prev->active_mm = NULL; ++ rq->prev_mm = oldmm; ++ } ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++#ifndef __ARCH_WANT_UNLOCKED_CTXSW ++ spin_release(&grq.lock.dep_map, 1, _THIS_IP_); ++#endif ++ ++ /* Here we just switch the register state and the stack. */ ++ context_tracking_task_switch(prev, next); ++ switch_to(prev, next, prev); ++ ++ barrier(); ++ /* ++ * this_rq must be evaluated again because prev may have moved ++ * CPUs since it called schedule(), thus the 'rq' on its stack ++ * frame will be invalid. ++ */ ++ finish_task_switch(this_rq(), prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, total number of context switches performed since bootup. All are ++ * measured without grabbing the grq lock but the occasional inaccurate result ++ * doesn't matter so long as it's positive. ++ */ ++unsigned long nr_running(void) ++{ ++ long nr = grq.nr_running; ++ ++ if (unlikely(nr < 0)) ++ nr = 0; ++ return (unsigned long)nr; ++} ++ ++static unsigned long nr_uninterruptible(void) ++{ ++ long nu = grq.nr_uninterruptible; ++ ++ if (unlikely(nu < 0)) ++ nu = 0; ++ return nu; ++} ++ ++unsigned long long nr_context_switches(void) ++{ ++ long long ns = grq.nr_switches; ++ ++ /* This is of course impossible */ ++ if (unlikely(ns < 0)) ++ ns = 1; ++ return (unsigned long long)ns; ++} ++ ++unsigned long nr_iowait(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += atomic_read(&cpu_rq(i)->nr_iowait); ++ ++ return sum; ++} ++ ++unsigned long nr_iowait_cpu(int cpu) ++{ ++ struct rq *this = cpu_rq(cpu); ++ return atomic_read(&this->nr_iowait); ++} ++ ++unsigned long nr_active(void) ++{ ++ return nr_running() + nr_uninterruptible(); ++} ++ ++/* Beyond a task running on this CPU, load is equal everywhere on BFS */ ++unsigned long this_cpu_load(void) ++{ ++ return this_rq()->rq_running + ++ ((queued_notrunning() + nr_uninterruptible()) / grq.noc); ++} ++ ++/* Variables and functions for calc_load */ ++static unsigned long calc_load_update; ++unsigned long avenrun[3]; ++EXPORT_SYMBOL(avenrun); ++ ++/** ++ * get_avenrun - get the load average array ++ * @loads: pointer to dest load array ++ * @offset: offset to add ++ * @shift: shift count to shift the result left ++ * ++ * These values are estimates at best, so no need for locking. ++ */ ++void get_avenrun(unsigned long *loads, unsigned long offset, int shift) ++{ ++ loads[0] = (avenrun[0] + offset) << shift; ++ loads[1] = (avenrun[1] + offset) << shift; ++ loads[2] = (avenrun[2] + offset) << shift; ++} ++ ++static unsigned long ++calc_load(unsigned long load, unsigned long exp, unsigned long active) ++{ ++ load *= exp; ++ load += active * (FIXED_1 - exp); ++ return load >> FSHIFT; ++} ++ ++/* ++ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds. ++ */ ++void calc_global_load(unsigned long ticks) ++{ ++ long active; ++ ++ if (time_before(jiffies, calc_load_update)) ++ return; ++ active = nr_active() * FIXED_1; ++ ++ avenrun[0] = calc_load(avenrun[0], EXP_1, active); ++ avenrun[1] = calc_load(avenrun[1], EXP_5, active); ++ avenrun[2] = calc_load(avenrun[2], EXP_15, active); ++ ++ calc_load_update = jiffies + LOAD_FREQ; ++} ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++EXPORT_PER_CPU_SYMBOL(kernel_cpustat); ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ ++/* ++ * There are no locks covering percpu hardirq/softirq time. ++ * They are only modified in account_system_vtime, on corresponding CPU ++ * with interrupts disabled. So, writes are safe. ++ * They are read and saved off onto struct rq in update_rq_clock(). ++ * This may result in other CPU reading this CPU's irq time and can ++ * race with irq/account_system_vtime on this CPU. We would either get old ++ * or new value with a side effect of accounting a slice of irq time to wrong ++ * task when irq is in progress while we read rq->clock. That is a worthy ++ * compromise in place of having locks on each irq in account_system_time. ++ */ ++static DEFINE_PER_CPU(u64, cpu_hardirq_time); ++static DEFINE_PER_CPU(u64, cpu_softirq_time); ++ ++static DEFINE_PER_CPU(u64, irq_start_time); ++static int sched_clock_irqtime; ++ ++void enable_sched_clock_irqtime(void) ++{ ++ sched_clock_irqtime = 1; ++} ++ ++void disable_sched_clock_irqtime(void) ++{ ++ sched_clock_irqtime = 0; ++} ++ ++#ifndef CONFIG_64BIT ++static DEFINE_PER_CPU(seqcount_t, irq_time_seq); ++ ++static inline void irq_time_write_begin(void) ++{ ++ __this_cpu_inc(irq_time_seq.sequence); ++ smp_wmb(); ++} ++ ++static inline void irq_time_write_end(void) ++{ ++ smp_wmb(); ++ __this_cpu_inc(irq_time_seq.sequence); ++} ++ ++static inline u64 irq_time_read(int cpu) ++{ ++ u64 irq_time; ++ unsigned seq; ++ ++ do { ++ seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); ++ irq_time = per_cpu(cpu_softirq_time, cpu) + ++ per_cpu(cpu_hardirq_time, cpu); ++ } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); ++ ++ return irq_time; ++} ++#else /* CONFIG_64BIT */ ++static inline void irq_time_write_begin(void) ++{ ++} ++ ++static inline void irq_time_write_end(void) ++{ ++} ++ ++static inline u64 irq_time_read(int cpu) ++{ ++ return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); ++} ++#endif /* CONFIG_64BIT */ ++ ++/* ++ * Called before incrementing preempt_count on {soft,}irq_enter ++ * and before decrementing preempt_count on {soft,}irq_exit. ++ */ ++void irqtime_account_irq(struct task_struct *curr) ++{ ++ unsigned long flags; ++ s64 delta; ++ int cpu; ++ ++ if (!sched_clock_irqtime) ++ return; ++ ++ local_irq_save(flags); ++ ++ cpu = smp_processor_id(); ++ delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); ++ __this_cpu_add(irq_start_time, delta); ++ ++ irq_time_write_begin(); ++ /* ++ * We do not account for softirq time from ksoftirqd here. ++ * We want to continue accounting softirq time to ksoftirqd thread ++ * in that case, so as not to confuse scheduler with a special task ++ * that do not consume any time, but still wants to run. ++ */ ++ if (hardirq_count()) ++ __this_cpu_add(cpu_hardirq_time, delta); ++ else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) ++ __this_cpu_add(cpu_softirq_time, delta); ++ ++ irq_time_write_end(); ++ local_irq_restore(flags); ++} ++EXPORT_SYMBOL_GPL(irqtime_account_irq); ++ ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#ifdef CONFIG_PARAVIRT ++static inline u64 steal_ticks(u64 steal) ++{ ++ if (unlikely(steal > NSEC_PER_SEC)) ++ return div_u64(steal, TICK_NSEC); ++ ++ return __iter_div_u64_rem(steal, TICK_NSEC, &steal); ++} ++#endif ++ ++static void update_rq_clock_task(struct rq *rq, s64 delta) ++{ ++/* ++ * In theory, the compile should just see 0 here, and optimize out the call ++ * to sched_rt_avg_update. But I don't trust it... ++ */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; ++ ++ /* ++ * Since irq_time is only updated on {soft,}irq_exit, we might run into ++ * this case when a previous update_rq_clock() happened inside a ++ * {soft,}irq region. ++ * ++ * When this happens, we stop ->clock_task and only update the ++ * prev_irq_time stamp to account for the part that fit, so that a next ++ * update will consume the rest. This ensures ->clock_task is ++ * monotonic. ++ * ++ * It does however cause some slight miss-attribution of {soft,}irq ++ * time, a more accurate solution would be to update the irq_time using ++ * the current rq->clock timestamp, except that would require using ++ * atomic ops. ++ */ ++ if (irq_delta > delta) ++ irq_delta = delta; ++ ++ rq->prev_irq_time += irq_delta; ++ delta -= irq_delta; ++#endif ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ if (static_key_false((¶virt_steal_rq_enabled))) { ++ s64 steal = paravirt_steal_clock(cpu_of(rq)); ++ u64 st; ++ ++ steal -= rq->prev_steal_time_rq; ++ ++ if (unlikely(steal > delta)) ++ steal = delta; ++ ++ st = steal_ticks(steal); ++ steal = st * TICK_NSEC; ++ ++ rq->prev_steal_time_rq += steal; ++ ++ delta -= steal; ++ } ++#endif ++ ++ rq->clock_task += delta; ++} ++ ++#ifndef nsecs_to_cputime ++# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) ++#endif ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++static void irqtime_account_hi_si(void) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ u64 latest_ns; ++ ++ latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)); ++ if (latest_ns > cpustat[CPUTIME_IRQ]) ++ cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy; ++ ++ latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)); ++ if (latest_ns > cpustat[CPUTIME_SOFTIRQ]) ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy; ++} ++#else /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#define sched_clock_irqtime (0) ++ ++static inline void irqtime_account_hi_si(void) ++{ ++} ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++static __always_inline bool steal_account_process_tick(void) ++{ ++#ifdef CONFIG_PARAVIRT ++ if (static_key_false(¶virt_steal_enabled)) { ++ u64 steal, st = 0; ++ ++ steal = paravirt_steal_clock(smp_processor_id()); ++ steal -= this_rq()->prev_steal_time; ++ ++ st = steal_ticks(steal); ++ this_rq()->prev_steal_time += st * TICK_NSEC; ++ ++ account_steal_time(st); ++ return st; ++ } ++#endif ++ return false; ++} ++ ++/* ++ * Accumulate raw cputime values of dead tasks (sig->[us]time) and live ++ * tasks (sum on group iteration) belonging to @tsk's group. ++ */ ++void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) ++{ ++ struct signal_struct *sig = tsk->signal; ++ cputime_t utime, stime; ++ struct task_struct *t; ++ ++ times->utime = sig->utime; ++ times->stime = sig->stime; ++ times->sum_exec_runtime = sig->sum_sched_runtime; ++ ++ rcu_read_lock(); ++ /* make sure we can trust tsk->thread_group list */ ++ if (!likely(pid_alive(tsk))) ++ goto out; ++ ++ t = tsk; ++ do { ++ task_cputime(t, &utime, &stime); ++ times->utime += utime; ++ times->stime += stime; ++ times->sum_exec_runtime += task_sched_runtime(t); ++ } while_each_thread(tsk, t); ++out: ++ rcu_read_unlock(); ++} ++ ++/* ++ * On each tick, see what percentage of that tick was attributed to each ++ * component and add the percentage to the _pc values. Once a _pc value has ++ * accumulated one tick's worth, account for that. This means the total ++ * percentage of load components will always be 128 (pseudo 100) per tick. ++ */ ++static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long pc) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ ++ if (atomic_read(&rq->nr_iowait) > 0) { ++ rq->iowait_pc += pc; ++ if (rq->iowait_pc >= 128) { ++ cpustat[CPUTIME_IOWAIT] += (__force u64)cputime_one_jiffy * rq->iowait_pc / 128; ++ rq->iowait_pc %= 128; ++ } ++ } else { ++ rq->idle_pc += pc; ++ if (rq->idle_pc >= 128) { ++ cpustat[CPUTIME_IDLE] += (__force u64)cputime_one_jiffy * rq->idle_pc / 128; ++ rq->idle_pc %= 128; ++ } ++ } ++ acct_update_integrals(idle); ++} ++ ++static void ++pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset, ++ unsigned long pc, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); ++ ++ p->stime_pc += pc; ++ if (p->stime_pc >= 128) { ++ int jiffs = p->stime_pc / 128; ++ ++ p->stime_pc %= 128; ++ p->stime += (__force u64)cputime_one_jiffy * jiffs; ++ p->stimescaled += one_jiffy_scaled * jiffs; ++ account_group_system_time(p, cputime_one_jiffy * jiffs); ++ } ++ p->sched_time += ns; ++ /* ++ * Do not update the cputimer if the task is already released by ++ * release_task(). ++ * ++ * This could be executed if a tick happens when a task is inside ++ * do_exit() between the call to release_task() and its final ++ * schedule() call for autoreaping tasks. ++ */ ++ if (likely(p->sighand)) ++ account_group_exec_runtime(p, ns); ++ ++ if (hardirq_count() - hardirq_offset) { ++ rq->irq_pc += pc; ++ if (rq->irq_pc >= 128) { ++ cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy * rq->irq_pc / 128; ++ rq->irq_pc %= 128; ++ } ++ } else if (in_serving_softirq()) { ++ rq->softirq_pc += pc; ++ if (rq->softirq_pc >= 128) { ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy * rq->softirq_pc / 128; ++ rq->softirq_pc %= 128; ++ } ++ } else { ++ rq->system_pc += pc; ++ if (rq->system_pc >= 128) { ++ cpustat[CPUTIME_SYSTEM] += (__force u64)cputime_one_jiffy * rq->system_pc / 128; ++ rq->system_pc %= 128; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++static void pc_user_time(struct rq *rq, struct task_struct *p, ++ unsigned long pc, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); ++ ++ p->utime_pc += pc; ++ if (p->utime_pc >= 128) { ++ int jiffs = p->utime_pc / 128; ++ ++ p->utime_pc %= 128; ++ p->utime += (__force u64)cputime_one_jiffy * jiffs; ++ p->utimescaled += one_jiffy_scaled * jiffs; ++ account_group_user_time(p, cputime_one_jiffy * jiffs); ++ } ++ p->sched_time += ns; ++ /* ++ * Do not update the cputimer if the task is already released by ++ * release_task(). ++ * ++ * it would preferable to defer the autoreap release_task ++ * after the last context switch but harder to do. ++ */ ++ if (likely(p->sighand)) ++ account_group_exec_runtime(p, ns); ++ ++ if (this_cpu_ksoftirqd() == p) { ++ /* ++ * ksoftirqd time do not get accounted in cpu_softirq_time. ++ * So, we have to handle it separately here. ++ */ ++ rq->softirq_pc += pc; ++ if (rq->softirq_pc >= 128) { ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy * rq->softirq_pc / 128; ++ rq->softirq_pc %= 128; ++ } ++ } ++ ++ if (TASK_NICE(p) > 0 || idleprio_task(p)) { ++ rq->nice_pc += pc; ++ if (rq->nice_pc >= 128) { ++ cpustat[CPUTIME_NICE] += (__force u64)cputime_one_jiffy * rq->nice_pc / 128; ++ rq->nice_pc %= 128; ++ } ++ } else { ++ rq->user_pc += pc; ++ if (rq->user_pc >= 128) { ++ cpustat[CPUTIME_USER] += (__force u64)cputime_one_jiffy * rq->user_pc / 128; ++ rq->user_pc %= 128; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++/* ++ * Convert nanoseconds to pseudo percentage of one tick. Use 128 for fast ++ * shifts instead of 100 ++ */ ++#define NS_TO_PC(NS) (NS * 128 / JIFFY_NS) ++ ++/* ++ * This is called on clock ticks. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void ++update_cpu_clock_tick(struct rq *rq, struct task_struct *p) ++{ ++ long account_ns = rq->clock_task - rq->rq_last_ran; ++ struct task_struct *idle = rq->idle; ++ unsigned long account_pc; ++ ++ if (unlikely(account_ns < 0) || steal_account_process_tick()) ++ goto ts_account; ++ ++ account_pc = NS_TO_PC(account_ns); ++ ++ /* Accurate tick timekeeping */ ++ if (user_mode(get_irq_regs())) ++ pc_user_time(rq, p, account_pc, account_ns); ++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) ++ pc_system_time(rq, p, HARDIRQ_OFFSET, ++ account_pc, account_ns); ++ else ++ pc_idle_time(rq, idle, account_pc); ++ ++ if (sched_clock_irqtime) ++ irqtime_account_hi_si(); ++ ++ts_account: ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (rq->rq_policy != SCHED_FIFO && p != idle) { ++ s64 time_diff = rq->clock - rq->timekeep_clock; ++ ++ niffy_diff(&time_diff, 1); ++ rq->rq_time_slice -= NS_TO_US(time_diff); ++ } ++ ++ rq->rq_last_ran = rq->clock_task; ++ rq->timekeep_clock = rq->clock; ++} ++ ++/* ++ * This is called on context switches. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void ++update_cpu_clock_switch(struct rq *rq, struct task_struct *p) ++{ ++ long account_ns = rq->clock_task - rq->rq_last_ran; ++ struct task_struct *idle = rq->idle; ++ unsigned long account_pc; ++ ++ if (unlikely(account_ns < 0)) ++ goto ts_account; ++ ++ account_pc = NS_TO_PC(account_ns); ++ ++ /* Accurate subtick timekeeping */ ++ if (p != idle) { ++ pc_user_time(rq, p, account_pc, account_ns); ++ } ++ else ++ pc_idle_time(rq, idle, account_pc); ++ ++ts_account: ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (rq->rq_policy != SCHED_FIFO && p != idle) { ++ s64 time_diff = rq->clock - rq->timekeep_clock; ++ ++ niffy_diff(&time_diff, 1); ++ rq->rq_time_slice -= NS_TO_US(time_diff); ++ } ++ ++ rq->rq_last_ran = rq->clock_task; ++ rq->timekeep_clock = rq->clock; ++} ++ ++/* ++ * Return any ns on the sched_clock that have not yet been accounted in ++ * @p in case that task is currently running. ++ * ++ * Called with task_grq_lock() held. ++ */ ++static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) ++{ ++ u64 ns = 0; ++ ++ if (p == rq->curr) { ++ update_clocks(rq); ++ ns = rq->clock_task - rq->rq_last_ran; ++ if (unlikely((s64)ns < 0)) ++ ns = 0; ++ } ++ ++ return ns; ++} ++ ++unsigned long long task_delta_exec(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ u64 ns; ++ ++ rq = task_grq_lock(p, &flags); ++ ns = do_task_delta_exec(p, rq); ++ task_grq_unlock(&flags); ++ ++ return ns; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ * ++ * grq lock already acquired. ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ u64 ns; ++ ++ rq = task_grq_lock(p, &flags); ++ ns = p->sched_time + do_task_delta_exec(p, rq); ++ task_grq_unlock(&flags); ++ ++ return ns; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ */ ++unsigned long long task_sched_runtime_nodelta(struct task_struct *p, unsigned long long *delta) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ u64 ns; ++ ++ rq = task_grq_lock(p, &flags); ++ ns = p->sched_time; ++ *delta = do_task_delta_exec(p, rq); ++ task_grq_unlock(&flags); ++ ++ return ns; ++} ++ ++/* Compatibility crap */ ++void account_user_time(struct task_struct *p, cputime_t cputime, ++ cputime_t cputime_scaled) ++{ ++} ++ ++void account_idle_time(cputime_t cputime) ++{ ++} ++ ++void update_cpu_load_nohz(void) ++{ ++} ++ ++#ifdef CONFIG_NO_HZ_COMMON ++void calc_load_enter_idle(void) ++{ ++} ++ ++void calc_load_exit_idle(void) ++{ ++} ++#endif /* CONFIG_NO_HZ_COMMON */ ++ ++/* ++ * Account guest cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @cputime: the cpu time spent in virtual machine since the last update ++ * @cputime_scaled: cputime scaled by cpu frequency ++ */ ++static void account_guest_time(struct task_struct *p, cputime_t cputime, ++ cputime_t cputime_scaled) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ ++ /* Add guest time to process. */ ++ p->utime += (__force u64)cputime; ++ p->utimescaled += (__force u64)cputime_scaled; ++ account_group_user_time(p, cputime); ++ p->gtime += (__force u64)cputime; ++ ++ /* Add guest time to cpustat. */ ++ if (TASK_NICE(p) > 0) { ++ cpustat[CPUTIME_NICE] += (__force u64)cputime; ++ cpustat[CPUTIME_GUEST_NICE] += (__force u64)cputime; ++ } else { ++ cpustat[CPUTIME_USER] += (__force u64)cputime; ++ cpustat[CPUTIME_GUEST] += (__force u64)cputime; ++ } ++} ++ ++/* ++ * Account system cpu time to a process and desired cpustat field ++ * @p: the process that the cpu time gets accounted to ++ * @cputime: the cpu time spent in kernel space since the last update ++ * @cputime_scaled: cputime scaled by cpu frequency ++ * @target_cputime64: pointer to cpustat field that has to be updated ++ */ ++static inline ++void __account_system_time(struct task_struct *p, cputime_t cputime, ++ cputime_t cputime_scaled, cputime64_t *target_cputime64) ++{ ++ /* Add system time to process. */ ++ p->stime += (__force u64)cputime; ++ p->stimescaled += (__force u64)cputime_scaled; ++ account_group_system_time(p, cputime); ++ ++ /* Add system time to cpustat. */ ++ *target_cputime64 += (__force u64)cputime; ++ ++ /* Account for system time used */ ++ acct_update_integrals(p); ++} ++ ++/* ++ * Account system cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @hardirq_offset: the offset to subtract from hardirq_count() ++ * @cputime: the cpu time spent in kernel space since the last update ++ * @cputime_scaled: cputime scaled by cpu frequency ++ * This is for guest only now. ++ */ ++void account_system_time(struct task_struct *p, int hardirq_offset, ++ cputime_t cputime, cputime_t cputime_scaled) ++{ ++ ++ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) ++ account_guest_time(p, cputime, cputime_scaled); ++} ++ ++/* ++ * Account for involuntary wait time. ++ * @steal: the cpu time spent in involuntary wait ++ */ ++void account_steal_time(cputime_t cputime) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ ++ cpustat[CPUTIME_STEAL] += (__force u64)cputime; ++} ++ ++/* ++ * Account for idle time. ++ * @cputime: the cpu time spent in idle wait ++ */ ++static void account_idle_times(cputime_t cputime) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ struct rq *rq = this_rq(); ++ ++ if (atomic_read(&rq->nr_iowait) > 0) ++ cpustat[CPUTIME_IOWAIT] += (__force u64)cputime; ++ else ++ cpustat[CPUTIME_IDLE] += (__force u64)cputime; ++} ++ ++#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ++ ++void account_process_tick(struct task_struct *p, int user_tick) ++{ ++} ++ ++/* ++ * Account multiple ticks of steal time. ++ * @p: the process from which the cpu time has been stolen ++ * @ticks: number of stolen ticks ++ */ ++void account_steal_ticks(unsigned long ticks) ++{ ++ account_steal_time(jiffies_to_cputime(ticks)); ++} ++ ++/* ++ * Account multiple ticks of idle time. ++ * @ticks: number of stolen ticks ++ */ ++void account_idle_ticks(unsigned long ticks) ++{ ++ account_idle_times(jiffies_to_cputime(ticks)); ++} ++#endif ++ ++static inline void grq_iso_lock(void) ++ __acquires(grq.iso_lock) ++{ ++ raw_spin_lock(&grq.iso_lock); ++} ++ ++static inline void grq_iso_unlock(void) ++ __releases(grq.iso_lock) ++{ ++ raw_spin_unlock(&grq.iso_lock); ++} ++ ++/* ++ * Functions to test for when SCHED_ISO tasks have used their allocated ++ * quota as real time scheduling and convert them back to SCHED_NORMAL. ++ * Where possible, the data is tested lockless, to avoid grabbing iso_lock ++ * because the occasional inaccurate result won't matter. However the ++ * tick data is only ever modified under lock. iso_refractory is only simply ++ * set to 0 or 1 so it's not worth grabbing the lock yet again for that. ++ */ ++static bool set_iso_refractory(void) ++{ ++ grq.iso_refractory = true; ++ return grq.iso_refractory; ++} ++ ++static bool clear_iso_refractory(void) ++{ ++ grq.iso_refractory = false; ++ return grq.iso_refractory; ++} ++ ++/* ++ * Test if SCHED_ISO tasks have run longer than their alloted period as RT ++ * tasks and set the refractory flag if necessary. There is 10% hysteresis ++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a ++ * slow division. ++ */ ++static bool test_ret_isorefractory(struct rq *rq) ++{ ++ if (likely(!grq.iso_refractory)) { ++ if (grq.iso_ticks > ISO_PERIOD * sched_iso_cpu) ++ return set_iso_refractory(); ++ } else { ++ if (grq.iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) ++ return clear_iso_refractory(); ++ } ++ return grq.iso_refractory; ++} ++ ++static void iso_tick(void) ++{ ++ grq_iso_lock(); ++ grq.iso_ticks += 100; ++ grq_iso_unlock(); ++} ++ ++/* No SCHED_ISO task was running so decrease rq->iso_ticks */ ++static inline void no_iso_tick(void) ++{ ++ if (grq.iso_ticks) { ++ grq_iso_lock(); ++ grq.iso_ticks -= grq.iso_ticks / ISO_PERIOD + 1; ++ if (unlikely(grq.iso_refractory && grq.iso_ticks < ++ ISO_PERIOD * (sched_iso_cpu * 115 / 128))) ++ clear_iso_refractory(); ++ grq_iso_unlock(); ++ } ++} ++ ++/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++static void task_running_tick(struct rq *rq) ++{ ++ struct task_struct *p; ++ ++ /* ++ * If a SCHED_ISO task is running we increment the iso_ticks. In ++ * order to prevent SCHED_ISO tasks from causing starvation in the ++ * presence of true RT tasks we account those as iso_ticks as well. ++ */ ++ if ((rt_queue(rq) || (iso_queue(rq) && !grq.iso_refractory))) { ++ if (grq.iso_ticks <= (ISO_PERIOD * 128) - 128) ++ iso_tick(); ++ } else ++ no_iso_tick(); ++ ++ if (iso_queue(rq)) { ++ if (unlikely(test_ret_isorefractory(rq))) { ++ if (rq_running_iso(rq)) { ++ /* ++ * SCHED_ISO task is running as RT and limit ++ * has been hit. Force it to reschedule as ++ * SCHED_NORMAL by zeroing its time_slice ++ */ ++ rq->rq_time_slice = 0; ++ } ++ } ++ } ++ ++ /* SCHED_FIFO tasks never run out of timeslice. */ ++ if (rq->rq_policy == SCHED_FIFO) ++ return; ++ /* ++ * Tasks that were scheduled in the first half of a tick are not ++ * allowed to run into the 2nd half of the next tick if they will ++ * run out of time slice in the interim. Otherwise, if they have ++ * less than RESCHED_US μs of time slice left they will be rescheduled. ++ */ ++ if (rq->dither) { ++ if (rq->rq_time_slice > HALF_JIFFY_US) ++ return; ++ else ++ rq->rq_time_slice = 0; ++ } else if (rq->rq_time_slice >= RESCHED_US) ++ return; ++ ++ /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ ++ p = rq->curr; ++ grq_lock(); ++ requeue_task(p); ++ set_tsk_need_resched(p); ++ grq_unlock(); ++} ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. The data modified is all ++ * local to struct rq so we don't need to grab grq lock. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu __maybe_unused = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ ++ sched_clock_tick(); ++ /* grq lock not grabbed, so only update rq clock */ ++ update_rq_clock(rq); ++ update_cpu_clock_tick(rq, rq->curr); ++ if (!rq_idle(rq)) ++ task_running_tick(rq); ++ else ++ no_iso_tick(); ++ rq->last_tick = rq->clock; ++ perf_event_task_tick(); ++} ++ ++notrace unsigned long get_parent_ip(unsigned long addr) ++{ ++ if (in_lock_functions(addr)) { ++ addr = CALLER_ADDR2; ++ if (in_lock_functions(addr)) ++ addr = CALLER_ADDR3; ++ } ++ return addr; ++} ++ ++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_PREEMPT_TRACER)) ++void __kprobes add_preempt_count(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ preempt_count() += val; ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ if (preempt_count() == val) ++ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++} ++EXPORT_SYMBOL(add_preempt_count); ++ ++void __kprobes sub_preempt_count(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++ preempt_count() -= val; ++} ++EXPORT_SYMBOL(sub_preempt_count); ++#endif ++ ++/* ++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline ++ * is the key to everything. It distributes cpu fairly amongst tasks of the ++ * same nice value, it proportions cpu according to nice level, it means the ++ * task that last woke up the longest ago has the earliest deadline, thus ++ * ensuring that interactive tasks get low latency on wake up. The CPU ++ * proportion works out to the square of the virtual deadline difference, so ++ * this equation will give nice 19 3% CPU compared to nice 0. ++ */ ++static inline u64 prio_deadline_diff(int user_prio) ++{ ++ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128)); ++} ++ ++static inline u64 task_deadline_diff(struct task_struct *p) ++{ ++ return prio_deadline_diff(TASK_USER_PRIO(p)); ++} ++ ++static inline u64 static_deadline_diff(int static_prio) ++{ ++ return prio_deadline_diff(USER_PRIO(static_prio)); ++} ++ ++static inline int longest_deadline_diff(void) ++{ ++ return prio_deadline_diff(39); ++} ++ ++static inline int ms_longest_deadline_diff(void) ++{ ++ return NS_TO_MS(longest_deadline_diff()); ++} ++ ++/* ++ * The time_slice is only refilled when it is empty and that is when we set a ++ * new deadline. ++ */ ++static void time_slice_expired(struct task_struct *p) ++{ ++ p->time_slice = timeslice(); ++ p->deadline = grq.niffies + task_deadline_diff(p); ++} ++ ++/* ++ * Timeslices below RESCHED_US are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. SCHED_BATCH tasks ++ * have been flagged be not latency sensitive and likely to be fully CPU ++ * bound so every time they're rescheduled they have their time_slice ++ * refilled, but get a new later deadline to have little effect on ++ * SCHED_NORMAL tasks. ++ ++ */ ++static inline void check_deadline(struct task_struct *p) ++{ ++ if (p->time_slice < RESCHED_US || batch_task(p)) ++ time_slice_expired(p); ++} ++ ++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) ++ ++/* ++ * Scheduler queue bitmap specific find next bit. ++ */ ++static inline unsigned long ++next_sched_bit(const unsigned long *addr, unsigned long offset) ++{ ++ const unsigned long *p; ++ unsigned long result; ++ unsigned long size; ++ unsigned long tmp; ++ ++ size = PRIO_LIMIT; ++ if (offset >= size) ++ return size; ++ ++ p = addr + BITOP_WORD(offset); ++ result = offset & ~(BITS_PER_LONG-1); ++ size -= result; ++ offset %= BITS_PER_LONG; ++ if (offset) { ++ tmp = *(p++); ++ tmp &= (~0UL << offset); ++ if (size < BITS_PER_LONG) ++ goto found_first; ++ if (tmp) ++ goto found_middle; ++ size -= BITS_PER_LONG; ++ result += BITS_PER_LONG; ++ } ++ while (size & ~(BITS_PER_LONG-1)) { ++ if ((tmp = *(p++))) ++ goto found_middle; ++ result += BITS_PER_LONG; ++ size -= BITS_PER_LONG; ++ } ++ if (!size) ++ return result; ++ tmp = *p; ++ ++found_first: ++ tmp &= (~0UL >> (BITS_PER_LONG - size)); ++ if (tmp == 0UL) /* Are any bits set? */ ++ return result + size; /* Nope. */ ++found_middle: ++ return result + __ffs(tmp); ++} ++ ++/* ++ * O(n) lookup of all tasks in the global runqueue. The real brainfuck ++ * of lock contention and O(n). It's not really O(n) as only the queued, ++ * but not running tasks are scanned, and is O(n) queued in the worst case ++ * scenario only because the right task can be found before scanning all of ++ * them. ++ * Tasks are selected in this order: ++ * Real time tasks are selected purely by their static priority and in the ++ * order they were queued, so the lowest value idx, and the first queued task ++ * of that priority value is chosen. ++ * If no real time tasks are found, the SCHED_ISO priority is checked, and ++ * all SCHED_ISO tasks have the same priority value, so they're selected by ++ * the earliest deadline value. ++ * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the ++ * earliest deadline. ++ * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are ++ * selected by the earliest deadline. ++ */ ++static inline struct ++task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) ++{ ++ struct task_struct *edt = NULL; ++ unsigned long idx = -1; ++ ++ do { ++ struct list_head *queue; ++ struct task_struct *p; ++ u64 earliest_deadline; ++ ++ idx = next_sched_bit(grq.prio_bitmap, ++idx); ++ if (idx >= PRIO_LIMIT) ++ return idle; ++ queue = grq.queue + idx; ++ ++ if (idx < MAX_RT_PRIO) { ++ /* We found an rt task */ ++ list_for_each_entry(p, queue, run_list) { ++ /* Make sure cpu affinity is ok */ ++ if (needs_other_cpu(p, cpu)) ++ continue; ++ edt = p; ++ goto out_take; ++ } ++ /* ++ * None of the RT tasks at this priority can run on ++ * this cpu ++ */ ++ continue; ++ } ++ ++ /* ++ * No rt tasks. Find the earliest deadline task. Now we're in ++ * O(n) territory. ++ */ ++ earliest_deadline = ~0ULL; ++ list_for_each_entry(p, queue, run_list) { ++ u64 dl; ++ ++ /* Make sure cpu affinity is ok */ ++ if (needs_other_cpu(p, cpu)) ++ continue; ++ ++ /* ++ * Soft affinity happens here by not scheduling a task ++ * with its sticky flag set that ran on a different CPU ++ * last when the CPU is scaling, or by greatly biasing ++ * against its deadline when not, based on cpu cache ++ * locality. ++ */ ++ if (task_sticky(p) && task_rq(p) != rq) { ++ if (scaling_rq(rq)) ++ continue; ++ dl = p->deadline << locality_diff(p, rq); ++ } else ++ dl = p->deadline; ++ ++ if (deadline_before(dl, earliest_deadline)) { ++ earliest_deadline = dl; ++ edt = p; ++ } ++ } ++ } while (!edt); ++ ++out_take: ++ take_task(cpu, edt); ++ return edt; ++} ++ ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ if (oops_in_progress) ++ return; ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev) ++{ ++ /* ++ * Test if we are atomic. Since do_exit() needs to call into ++ * schedule() atomically, we ignore that path for now. ++ * Otherwise, whine if we are scheduling when we should not be. ++ */ ++ if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) ++ __schedule_bug(prev); ++ rcu_sleep_check(); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq(), sched_count); ++} ++ ++/* ++ * The currently running task's information is all stored in rq local data ++ * which is only modified by the local CPU, thereby allowing the data to be ++ * changed without grabbing the grq lock. ++ */ ++static inline void set_rq_task(struct rq *rq, struct task_struct *p) ++{ ++ rq->rq_time_slice = p->time_slice; ++ rq->rq_deadline = p->deadline; ++ rq->rq_last_ran = p->last_ran = rq->clock_task; ++ rq->rq_policy = p->policy; ++ rq->rq_prio = p->prio; ++ if (p != rq->idle) ++ rq->rq_running = true; ++ else ++ rq->rq_running = false; ++} ++ ++static void reset_rq_task(struct rq *rq, struct task_struct *p) ++{ ++ rq->rq_policy = p->policy; ++ rq->rq_prio = p->prio; ++} ++ ++/* ++ * schedule() is the main scheduler function. ++ * ++ * The main means of driving the scheduler and thus entering this function are: ++ * ++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. ++ * ++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return ++ * paths. For example, see arch/x86/entry_64.S. ++ * ++ * To drive preemption between tasks, the scheduler sets the flag in timer ++ * interrupt handler scheduler_tick(). ++ * ++ * 3. Wakeups don't really cause entry into schedule(). They add a ++ * task to the run-queue and that's it. ++ * ++ * Now, if the new task added to the run-queue preempts the current ++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets ++ * called on the nearest possible occasion: ++ * ++ * - If the kernel is preemptible (CONFIG_PREEMPT=y): ++ * ++ * - in syscall or exception context, at the next outmost ++ * preempt_enable(). (this might be as soon as the wake_up()'s ++ * spin_unlock()!) ++ * ++ * - in IRQ context, return from interrupt-handler to ++ * preemptible context ++ * ++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) ++ * then at the next: ++ * ++ * - cond_resched() call ++ * - explicit schedule() call ++ * - return from syscall or exception to user-space ++ * - return from interrupt-handler to user-space ++ */ ++asmlinkage void __sched schedule(void) ++{ ++ struct task_struct *prev, *next, *idle; ++ unsigned long *switch_count; ++ bool deactivate; ++ struct rq *rq; ++ int cpu; ++ ++need_resched: ++ preempt_disable(); ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ rcu_note_context_switch(cpu); ++ prev = rq->curr; ++ ++ deactivate = false; ++ schedule_debug(prev); ++ ++ grq_lock_irq(); ++ ++ switch_count = &prev->nivcsw; ++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { ++ if (unlikely(signal_pending_state(prev->state, prev))) { ++ prev->state = TASK_RUNNING; ++ } else { ++ deactivate = true; ++ /* ++ * If a worker is going to sleep, notify and ++ * ask workqueue whether it wants to wake up a ++ * task to maintain concurrency. If so, wake ++ * up the task. ++ */ ++ if (prev->flags & PF_WQ_WORKER) { ++ struct task_struct *to_wakeup; ++ ++ to_wakeup = wq_worker_sleeping(prev, cpu); ++ if (to_wakeup) { ++ /* This shouldn't happen, but does */ ++ if (unlikely(to_wakeup == prev)) ++ deactivate = false; ++ else ++ try_to_wake_up_local(to_wakeup); ++ } ++ } ++ } ++ switch_count = &prev->nvcsw; ++ } ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, make ++ * sure to submit it to avoid deadlocks. ++ */ ++ if (unlikely(deactivate && blk_needs_flush_plug(prev))) { ++ grq_unlock_irq(); ++ preempt_enable_no_resched(); ++ blk_schedule_flush_plug(prev); ++ goto need_resched; ++ } ++ ++ update_clocks(rq); ++ update_cpu_clock_switch(rq, prev); ++ if (rq->clock - rq->last_tick > HALF_JIFFY_NS) ++ rq->dither = false; ++ else ++ rq->dither = true; ++ ++ clear_tsk_need_resched(prev); ++ ++ idle = rq->idle; ++ if (idle != prev) { ++ /* Update all the information stored on struct rq */ ++ prev->time_slice = rq->rq_time_slice; ++ prev->deadline = rq->rq_deadline; ++ check_deadline(prev); ++ prev->last_ran = rq->clock_task; ++ ++ /* Task changed affinity off this CPU */ ++ if (needs_other_cpu(prev, cpu)) { ++ if (!deactivate) ++ resched_suitable_idle(prev); ++ } else if (!deactivate) { ++ if (!queued_notrunning()) { ++ /* ++ * We now know prev is the only thing that is ++ * awaiting CPU so we can bypass rechecking for ++ * the earliest deadline task and just run it ++ * again. ++ */ ++ set_rq_task(rq, prev); ++ grq_unlock_irq(); ++ goto rerun_prev_unlocked; ++ } else ++ swap_sticky(rq, cpu, prev); ++ } ++ return_task(prev, deactivate); ++ } ++ ++ if (unlikely(!queued_notrunning())) { ++ /* ++ * This CPU is now truly idle as opposed to when idle is ++ * scheduled as a high priority task in its own right. ++ */ ++ next = idle; ++ schedstat_inc(rq, sched_goidle); ++ set_cpuidle_map(cpu); ++ } else { ++ next = earliest_deadline_task(rq, cpu, idle); ++ if (likely(next->prio != PRIO_LIMIT)) ++ clear_cpuidle_map(cpu); ++ else ++ set_cpuidle_map(cpu); ++ } ++ ++ if (likely(prev != next)) { ++ resched_suitable_idle(prev); ++ /* ++ * Don't stick tasks when a real time task is going to run as ++ * they may literally get stuck. ++ */ ++ if (rt_task(next)) ++ unstick_task(rq, prev); ++ set_rq_task(rq, next); ++ grq.nr_switches++; ++ prev->on_cpu = false; ++ next->on_cpu = true; ++ rq->curr = next; ++ ++*switch_count; ++ ++ context_switch(rq, prev, next); /* unlocks the grq */ ++ /* ++ * The context switch have flipped the stack from under us ++ * and restored the local variables which were saved when ++ * this task called schedule() in the past. prev == current ++ * is still correct, but it can be moved to another cpu/rq. ++ */ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ idle = rq->idle; ++ } else ++ grq_unlock_irq(); ++ ++rerun_prev_unlocked: ++ sched_preempt_enable_no_resched(); ++ if (unlikely(need_resched())) ++ goto need_resched; ++} ++EXPORT_SYMBOL(schedule); ++ ++#ifdef CONFIG_RCU_USER_QS ++asmlinkage void __sched schedule_user(void) ++{ ++ /* ++ * If we come here after a random call to set_need_resched(), ++ * or we have been woken up remotely but the IPI has not yet arrived, ++ * we haven't yet exited the RCU idle mode. Do it here manually until ++ * we find a better solution. ++ */ ++ user_exit(); ++ schedule(); ++ user_enter(); ++} ++#endif ++ ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ sched_preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ ++#ifdef CONFIG_PREEMPT ++/* ++ * this is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. Kernel preemptions off return from interrupt ++ * occur there and call schedule directly. ++ */ ++asmlinkage void __sched notrace preempt_schedule(void) ++{ ++ struct thread_info *ti = current_thread_info(); ++ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(ti->preempt_count || irqs_disabled())) ++ return; ++ ++ do { ++ add_preempt_count_notrace(PREEMPT_ACTIVE); ++ schedule(); ++ sub_preempt_count_notrace(PREEMPT_ACTIVE); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ barrier(); ++ } while (need_resched()); ++} ++EXPORT_SYMBOL(preempt_schedule); ++ ++/* ++ * this is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage void __sched preempt_schedule_irq(void) ++{ ++ struct thread_info *ti = current_thread_info(); ++ enum ctx_state prev_state; ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(ti->preempt_count || !irqs_disabled()); ++ ++ prev_state = exception_enter(); ++ ++ do { ++ add_preempt_count(PREEMPT_ACTIVE); ++ local_irq_enable(); ++ schedule(); ++ local_irq_disable(); ++ sub_preempt_count(PREEMPT_ACTIVE); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ barrier(); ++ } while (need_resched()); ++ ++ exception_exit(prev_state); ++} ++ ++#endif /* CONFIG_PREEMPT */ ++ ++int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, ++ void *key) ++{ ++ return try_to_wake_up(curr->private, mode, wake_flags); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++/* ++ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just ++ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve ++ * number) then we wake all the non-exclusive tasks and one exclusive task. ++ * ++ * There are circumstances in which we can try to wake a task which has already ++ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns ++ * zero in this (rare) case, and we handle it by continuing to scan the queue. ++ */ ++static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, ++ int nr_exclusive, int wake_flags, void *key) ++{ ++ struct list_head *tmp, *next; ++ ++ list_for_each_safe(tmp, next, &q->task_list) { ++ wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); ++ unsigned int flags = curr->flags; ++ ++ if (curr->func(curr, mode, wake_flags, key) && ++ (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) ++ break; ++ } ++} ++ ++/** ++ * __wake_up - wake up threads blocked on a waitqueue. ++ * @q: the waitqueue ++ * @mode: which threads ++ * @nr_exclusive: how many wake-one or wake-many threads to wake up ++ * @key: is directly passed to the wakeup function ++ * ++ * It may be assumed that this function implies a write memory barrier before ++ * changing the task state if and only if any tasks are woken up. ++ */ ++void __wake_up(wait_queue_head_t *q, unsigned int mode, ++ int nr_exclusive, void *key) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&q->lock, flags); ++ __wake_up_common(q, mode, nr_exclusive, 0, key); ++ spin_unlock_irqrestore(&q->lock, flags); ++} ++EXPORT_SYMBOL(__wake_up); ++ ++/* ++ * Same as __wake_up but called with the spinlock in wait_queue_head_t held. ++ */ ++void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) ++{ ++ __wake_up_common(q, mode, nr, 0, NULL); ++} ++EXPORT_SYMBOL_GPL(__wake_up_locked); ++ ++void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) ++{ ++ __wake_up_common(q, mode, 1, 0, key); ++} ++EXPORT_SYMBOL_GPL(__wake_up_locked_key); ++ ++/** ++ * __wake_up_sync_key - wake up threads blocked on a waitqueue. ++ * @q: the waitqueue ++ * @mode: which threads ++ * @nr_exclusive: how many wake-one or wake-many threads to wake up ++ * @key: opaque value to be passed to wakeup targets ++ * ++ * The sync wakeup differs that the waker knows that it will schedule ++ * away soon, so while the target thread will be woken up, it will not ++ * be migrated to another CPU - ie. the two threads are 'synchronised' ++ * with each other. This can prevent needless bouncing between CPUs. ++ * ++ * On UP it can prevent extra preemption. ++ * ++ * It may be assumed that this function implies a write memory barrier before ++ * changing the task state if and only if any tasks are woken up. ++ */ ++void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, ++ int nr_exclusive, void *key) ++{ ++ unsigned long flags; ++ int wake_flags = WF_SYNC; ++ ++ if (unlikely(!q)) ++ return; ++ ++ if (unlikely(!nr_exclusive)) ++ wake_flags = 0; ++ ++ spin_lock_irqsave(&q->lock, flags); ++ __wake_up_common(q, mode, nr_exclusive, wake_flags, key); ++ spin_unlock_irqrestore(&q->lock, flags); ++} ++EXPORT_SYMBOL_GPL(__wake_up_sync_key); ++ ++/** ++ * __wake_up_sync - wake up threads blocked on a waitqueue. ++ * @q: the waitqueue ++ * @mode: which threads ++ * @nr_exclusive: how many wake-one or wake-many threads to wake up ++ * ++ * The sync wakeup differs that the waker knows that it will schedule ++ * away soon, so while the target thread will be woken up, it will not ++ * be migrated to another CPU - ie. the two threads are 'synchronised' ++ * with each other. This can prevent needless bouncing between CPUs. ++ * ++ * On UP it can prevent extra preemption. ++ */ ++void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) ++{ ++ unsigned long flags; ++ int sync = 1; ++ ++ if (unlikely(!q)) ++ return; ++ ++ if (unlikely(!nr_exclusive)) ++ sync = 0; ++ ++ spin_lock_irqsave(&q->lock, flags); ++ __wake_up_common(q, mode, nr_exclusive, sync, NULL); ++ spin_unlock_irqrestore(&q->lock, flags); ++} ++EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ ++ ++/** ++ * complete: - signals a single thread waiting on this completion ++ * @x: holds the state of this particular completion ++ * ++ * This will wake up a single thread waiting on this completion. Threads will be ++ * awakened in the same order in which they were queued. ++ * ++ * See also complete_all(), wait_for_completion() and related routines. ++ * ++ * It may be assumed that this function implies a write memory barrier before ++ * changing the task state if and only if any tasks are woken up. ++ */ ++void complete(struct completion *x) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&x->wait.lock, flags); ++ x->done++; ++ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); ++ spin_unlock_irqrestore(&x->wait.lock, flags); ++} ++EXPORT_SYMBOL(complete); ++ ++/** ++ * complete_all: - signals all threads waiting on this completion ++ * @x: holds the state of this particular completion ++ * ++ * This will wake up all threads waiting on this particular completion event. ++ * ++ * It may be assumed that this function implies a write memory barrier before ++ * changing the task state if and only if any tasks are woken up. ++ */ ++void complete_all(struct completion *x) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&x->wait.lock, flags); ++ x->done += UINT_MAX/2; ++ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); ++ spin_unlock_irqrestore(&x->wait.lock, flags); ++} ++EXPORT_SYMBOL(complete_all); ++ ++static inline long __sched ++do_wait_for_common(struct completion *x, ++ long (*action)(long), long timeout, int state) ++{ ++ if (!x->done) { ++ DECLARE_WAITQUEUE(wait, current); ++ ++ __add_wait_queue_tail_exclusive(&x->wait, &wait); ++ do { ++ if (signal_pending_state(state, current)) { ++ timeout = -ERESTARTSYS; ++ break; ++ } ++ __set_current_state(state); ++ spin_unlock_irq(&x->wait.lock); ++ timeout = action(timeout); ++ spin_lock_irq(&x->wait.lock); ++ } while (!x->done && timeout); ++ __remove_wait_queue(&x->wait, &wait); ++ if (!x->done) ++ return timeout; ++ } ++ x->done--; ++ return timeout ?: 1; ++} ++ ++static inline long __sched ++__wait_for_common(struct completion *x, ++ long (*action)(long), long timeout, int state) ++{ ++ might_sleep(); ++ ++ spin_lock_irq(&x->wait.lock); ++ timeout = do_wait_for_common(x, action, timeout, state); ++ spin_unlock_irq(&x->wait.lock); ++ return timeout; ++} ++ ++static long __sched ++wait_for_common(struct completion *x, long timeout, int state) ++{ ++ return __wait_for_common(x, schedule_timeout, timeout, state); ++} ++ ++static long __sched ++wait_for_common_io(struct completion *x, long timeout, int state) ++{ ++ return __wait_for_common(x, io_schedule_timeout, timeout, state); ++} ++ ++/** ++ * wait_for_completion: - waits for completion of a task ++ * @x: holds the state of this particular completion ++ * ++ * This waits to be signaled for completion of a specific task. It is NOT ++ * interruptible and there is no timeout. ++ * ++ * See also similar routines (i.e. wait_for_completion_timeout()) with timeout ++ * and interrupt capability. Also see complete(). ++ */ ++void __sched wait_for_completion(struct completion *x) ++{ ++ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion); ++ ++/** ++ * wait_for_completion_timeout: - waits for completion of a task (w/timeout) ++ * @x: holds the state of this particular completion ++ * @timeout: timeout value in jiffies ++ * ++ * This waits for either a completion of a specific task to be signaled or for a ++ * specified timeout to expire. The timeout is in jiffies. It is not ++ * interruptible. ++ * ++ * The return value is 0 if timed out, and positive (at least 1, or number of ++ * jiffies left till timeout) if completed. ++ */ ++unsigned long __sched ++wait_for_completion_timeout(struct completion *x, unsigned long timeout) ++{ ++ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion_timeout); ++ ++ /** ++ * wait_for_completion_io: - waits for completion of a task ++ * @x: holds the state of this particular completion ++ * ++ * This waits to be signaled for completion of a specific task. It is NOT ++ * interruptible and there is no timeout. The caller is accounted as waiting ++ * for IO. ++ */ ++void __sched wait_for_completion_io(struct completion *x) ++{ ++ wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion_io); ++ ++/** ++ * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout) ++ * @x: holds the state of this particular completion ++ * @timeout: timeout value in jiffies ++ * ++ * This waits for either a completion of a specific task to be signaled or for a ++ * specified timeout to expire. The timeout is in jiffies. It is not ++ * interruptible. The caller is accounted as waiting for IO. ++ * ++ * The return value is 0 if timed out, and positive (at least 1, or number of ++ * jiffies left till timeout) if completed. ++ */ ++unsigned long __sched ++wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) ++{ ++ return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion_io_timeout); ++ ++/** ++ * wait_for_completion_interruptible: - waits for completion of a task (w/intr) ++ * @x: holds the state of this particular completion ++ * ++ * This waits for completion of a specific task to be signaled. It is ++ * interruptible. ++ * ++ * The return value is -ERESTARTSYS if interrupted, 0 if completed. ++ */ ++int __sched wait_for_completion_interruptible(struct completion *x) ++{ ++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); ++ if (t == -ERESTARTSYS) ++ return t; ++ return 0; ++} ++EXPORT_SYMBOL(wait_for_completion_interruptible); ++ ++/** ++ * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) ++ * @x: holds the state of this particular completion ++ * @timeout: timeout value in jiffies ++ * ++ * This waits for either a completion of a specific task to be signaled or for a ++ * specified timeout to expire. It is interruptible. The timeout is in jiffies. ++ * ++ * The return value is -ERESTARTSYS if interrupted, 0 if timed out, ++ * positive (at least 1, or number of jiffies left till timeout) if completed. ++ */ ++long __sched ++wait_for_completion_interruptible_timeout(struct completion *x, ++ unsigned long timeout) ++{ ++ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); ++} ++EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); ++ ++/** ++ * wait_for_completion_killable: - waits for completion of a task (killable) ++ * @x: holds the state of this particular completion ++ * ++ * This waits to be signaled for completion of a specific task. It can be ++ * interrupted by a kill signal. ++ * ++ * The return value is -ERESTARTSYS if interrupted, 0 if timed out, ++ * positive (at least 1, or number of jiffies left till timeout) if completed. ++ */ ++int __sched wait_for_completion_killable(struct completion *x) ++{ ++ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); ++ if (t == -ERESTARTSYS) ++ return t; ++ return 0; ++} ++EXPORT_SYMBOL(wait_for_completion_killable); ++ ++/** ++ * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) ++ * @x: holds the state of this particular completion ++ * @timeout: timeout value in jiffies ++ * ++ * This waits for either a completion of a specific task to be ++ * signaled or for a specified timeout to expire. It can be ++ * interrupted by a kill signal. The timeout is in jiffies. ++ */ ++long __sched ++wait_for_completion_killable_timeout(struct completion *x, ++ unsigned long timeout) ++{ ++ return wait_for_common(x, timeout, TASK_KILLABLE); ++} ++EXPORT_SYMBOL(wait_for_completion_killable_timeout); ++ ++/** ++ * try_wait_for_completion - try to decrement a completion without blocking ++ * @x: completion structure ++ * ++ * Returns: 0 if a decrement cannot be done without blocking ++ * 1 if a decrement succeeded. ++ * ++ * If a completion is being used as a counting completion, ++ * attempt to decrement the counter without blocking. This ++ * enables us to avoid waiting if the resource the completion ++ * is protecting is not available. ++ */ ++bool try_wait_for_completion(struct completion *x) ++{ ++ unsigned long flags; ++ int ret = 1; ++ ++ spin_lock_irqsave(&x->wait.lock, flags); ++ if (!x->done) ++ ret = 0; ++ else ++ x->done--; ++ spin_unlock_irqrestore(&x->wait.lock, flags); ++ return ret; ++} ++EXPORT_SYMBOL(try_wait_for_completion); ++ ++/** ++ * completion_done - Test to see if a completion has any waiters ++ * @x: completion structure ++ * ++ * Returns: 0 if there are waiters (wait_for_completion() in progress) ++ * 1 if there are no waiters. ++ * ++ */ ++bool completion_done(struct completion *x) ++{ ++ unsigned long flags; ++ int ret = 1; ++ ++ spin_lock_irqsave(&x->wait.lock, flags); ++ if (!x->done) ++ ret = 0; ++ spin_unlock_irqrestore(&x->wait.lock, flags); ++ return ret; ++} ++EXPORT_SYMBOL(completion_done); ++ ++static long __sched ++sleep_on_common(wait_queue_head_t *q, int state, long timeout) ++{ ++ unsigned long flags; ++ wait_queue_t wait; ++ ++ init_waitqueue_entry(&wait, current); ++ ++ __set_current_state(state); ++ ++ spin_lock_irqsave(&q->lock, flags); ++ __add_wait_queue(q, &wait); ++ spin_unlock(&q->lock); ++ timeout = schedule_timeout(timeout); ++ spin_lock_irq(&q->lock); ++ __remove_wait_queue(q, &wait); ++ spin_unlock_irqrestore(&q->lock, flags); ++ ++ return timeout; ++} ++ ++void __sched interruptible_sleep_on(wait_queue_head_t *q) ++{ ++ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); ++} ++EXPORT_SYMBOL(interruptible_sleep_on); ++ ++long __sched ++interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) ++{ ++ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); ++} ++EXPORT_SYMBOL(interruptible_sleep_on_timeout); ++ ++void __sched sleep_on(wait_queue_head_t *q) ++{ ++ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); ++} ++EXPORT_SYMBOL(sleep_on); ++ ++long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) ++{ ++ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); ++} ++EXPORT_SYMBOL(sleep_on_timeout); ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task ++ * @prio: prio value (kernel-internal form) ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance logic. ++ */ ++void rt_mutex_setprio(struct task_struct *p, int prio) ++{ ++ unsigned long flags; ++ int queued, oldprio; ++ struct rq *rq; ++ ++ BUG_ON(prio < 0 || prio > MAX_PRIO); ++ ++ rq = task_grq_lock(p, &flags); ++ ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ ++ trace_sched_pi_setprio(p, prio); ++ oldprio = p->prio; ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ p->prio = prio; ++ if (task_running(p) && prio > oldprio) ++ resched_task(p); ++ if (queued) { ++ enqueue_task(p); ++ try_preempt(p, rq); ++ } ++ ++out_unlock: ++ task_grq_unlock(&flags); ++} ++ ++#endif ++ ++/* ++ * Adjust the deadline for when the priority is to change, before it's ++ * changed. ++ */ ++static inline void adjust_deadline(struct task_struct *p, int new_prio) ++{ ++ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p); ++} ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ int queued, new_static, old_static; ++ unsigned long flags; ++ struct rq *rq; ++ ++ if (TASK_NICE(p) == nice || nice < -20 || nice > 19) ++ return; ++ new_static = NICE_TO_PRIO(nice); ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ rq = time_task_grq_lock(p, &flags); ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it wont have any effect on scheduling until the task is ++ * not SCHED_NORMAL/SCHED_BATCH: ++ */ ++ if (has_rt_policy(p)) { ++ p->static_prio = new_static; ++ goto out_unlock; ++ } ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ ++ adjust_deadline(p, new_static); ++ old_static = p->static_prio; ++ p->static_prio = new_static; ++ p->prio = effective_prio(p); ++ ++ if (queued) { ++ enqueue_task(p); ++ if (new_static < old_static) ++ try_preempt(p, rq); ++ } else if (task_running(p)) { ++ reset_rq_task(rq, p); ++ if (old_static < new_static) ++ resched_task(p); ++ } ++out_unlock: ++ task_grq_unlock(&flags); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = 20 - nice; ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ if (increment < -40) ++ increment = -40; ++ if (increment > 40) ++ increment = 40; ++ ++ nice = TASK_NICE(current) + increment; ++ if (nice < -20) ++ nice = -20; ++ if (nice > 19) ++ nice = 19; ++ ++ if (increment < 0 && !can_nice(current, nice)) ++ return -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * This is the priority value as seen by users in /proc. ++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes ++ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO). ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ int delta, prio = p->prio - MAX_RT_PRIO; ++ ++ /* rt tasks and iso tasks */ ++ if (prio <= 0) ++ goto out; ++ ++ /* Convert to ms to avoid overflows */ ++ delta = NS_TO_MS(p->deadline - grq.niffies); ++ delta = delta * 40 / ms_longest_deadline_diff(); ++ if (delta > 0 && delta <= 80) ++ prio += delta; ++ if (idleprio_task(p)) ++ prio += 40; ++out: ++ return prio; ++} ++ ++/** ++ * task_nice - return the nice value of a given task. ++ * @p: the task in question. ++ */ ++int task_nice(const struct task_struct *p) ++{ ++ return TASK_NICE(p); ++} ++EXPORT_SYMBOL_GPL(task_nice); ++ ++/** ++ * idle_cpu - is a given cpu idle currently? ++ * @cpu: the processor in question. ++ */ ++int idle_cpu(int cpu) ++{ ++ return cpu_curr(cpu) == cpu_rq(cpu)->idle; ++} ++ ++/** ++ * idle_task - return the idle task for a given cpu. ++ * @cpu: the processor in question. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ */ ++static inline struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++/* Actually do priority change: must hold grq lock. */ ++static void ++__setscheduler(struct task_struct *p, struct rq *rq, int policy, int prio) ++{ ++ int oldrtprio, oldprio; ++ ++ p->policy = policy; ++ oldrtprio = p->rt_priority; ++ p->rt_priority = prio; ++ p->normal_prio = normal_prio(p); ++ oldprio = p->prio; ++ /* we are holding p->pi_lock already */ ++ p->prio = rt_mutex_getprio(p); ++ if (task_running(p)) { ++ reset_rq_task(rq, p); ++ /* Resched only if we might now be preempted */ ++ if (p->prio > oldprio || p->rt_priority > oldrtprio) ++ resched_task(p); ++ } ++} ++ ++/* ++ * check the target process has a UID that matches the current process's ++ */ ++static bool check_same_owner(struct task_struct *p) ++{ ++ const struct cred *cred = current_cred(), *pcred; ++ bool match; ++ ++ rcu_read_lock(); ++ pcred = __task_cred(p); ++ match = (uid_eq(cred->euid, pcred->euid) || ++ uid_eq(cred->euid, pcred->uid)); ++ rcu_read_unlock(); ++ return match; ++} ++ ++static int __sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param, bool user) ++{ ++ struct sched_param zero_param = { .sched_priority = 0 }; ++ int queued, retval, oldpolicy = -1; ++ unsigned long flags, rlim_rtprio = 0; ++ int reset_on_fork; ++ struct rq *rq; ++ ++ /* may grab non-irq protected spin_locks */ ++ BUG_ON(in_interrupt()); ++ ++ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) { ++ unsigned long lflags; ++ ++ if (!lock_task_sighand(p, &lflags)) ++ return -ESRCH; ++ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); ++ unlock_task_sighand(p, &lflags); ++ if (rlim_rtprio) ++ goto recheck; ++ /* ++ * If the caller requested an RT policy without having the ++ * necessary rights, we downgrade the policy to SCHED_ISO. ++ * We also set the parameter to zero to pass the checks. ++ */ ++ policy = SCHED_ISO; ++ param = &zero_param; ++ } ++recheck: ++ /* double check policy once rq lock held */ ++ if (policy < 0) { ++ reset_on_fork = p->sched_reset_on_fork; ++ policy = oldpolicy = p->policy; ++ } else { ++ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); ++ policy &= ~SCHED_RESET_ON_FORK; ++ ++ if (!SCHED_RANGE(policy)) ++ return -EINVAL; ++ } ++ ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and ++ * SCHED_BATCH is 0. ++ */ ++ if (param->sched_priority < 0 || ++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) || ++ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1)) ++ return -EINVAL; ++ if (is_rt_policy(policy) != (param->sched_priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (is_rt_policy(policy)) { ++ unsigned long rlim_rtprio = ++ task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* can't increase priority */ ++ if (param->sched_priority > p->rt_priority && ++ param->sched_priority > rlim_rtprio) ++ return -EPERM; ++ } else { ++ switch (p->policy) { ++ /* ++ * Can only downgrade policies but not back to ++ * SCHED_NORMAL ++ */ ++ case SCHED_ISO: ++ if (policy == SCHED_ISO) ++ goto out; ++ if (policy == SCHED_NORMAL) ++ return -EPERM; ++ break; ++ case SCHED_BATCH: ++ if (policy == SCHED_BATCH) ++ goto out; ++ if (policy != SCHED_IDLEPRIO) ++ return -EPERM; ++ break; ++ case SCHED_IDLEPRIO: ++ if (policy == SCHED_IDLEPRIO) ++ goto out; ++ return -EPERM; ++ default: ++ break; ++ } ++ } ++ ++ /* can't change other user's priorities */ ++ if (!check_same_owner(p)) ++ return -EPERM; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ return -EPERM; ++ } ++ ++ if (user) { ++ retval = security_task_setscheduler(p); ++ if (retval) ++ return retval; ++ } ++ ++ /* ++ * make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ /* ++ * To be able to change p->policy safely, the grunqueue lock must be ++ * held. ++ */ ++ rq = __task_grq_lock(p); ++ ++ /* ++ * Changing the policy of the stop threads its a very bad idea ++ */ ++ if (p == rq->stop) { ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ return -EINVAL; ++ } ++ ++ /* ++ * If not changing anything there's no need to proceed further: ++ */ ++ if (unlikely(policy == p->policy && (!is_rt_policy(policy) || ++ param->sched_priority == p->rt_priority))) { ++ ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ return 0; ++ } ++ ++ /* recheck policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ goto recheck; ++ } ++ update_clocks(rq); ++ p->sched_reset_on_fork = reset_on_fork; ++ ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ __setscheduler(p, rq, policy, param->sched_priority); ++ if (queued) { ++ enqueue_task(p); ++ try_preempt(p, rq); ++ } ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ rt_mutex_adjust_pi(p); ++out: ++ return 0; ++} ++ ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return __sched_setscheduler(p, policy, param, true); ++} ++ ++EXPORT_SYMBOL_GPL(sched_setscheduler); ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return __sched_setscheduler(p, policy, param, false); ++} ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setscheduler(p, policy, &lparam); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ */ ++asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, ++ struct sched_param __user *param) ++{ ++ /* negative values for policy are not valid */ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, -1, param); ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (pid < 0) ++ goto out_nounlock; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ rcu_read_unlock(); ++ ++out_nounlock: ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp; ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (!param || pid < 0) ++ goto out_nounlock; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ lp.sched_priority = p->rt_priority; ++ rcu_read_unlock(); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++out_nounlock: ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ++{ ++ cpumask_var_t cpus_allowed, new_mask; ++ struct task_struct *p; ++ int retval; ++ ++ get_online_cpus(); ++ rcu_read_lock(); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ rcu_read_unlock(); ++ put_online_cpus(); ++ return -ESRCH; ++ } ++ ++ /* Prevent p going away */ ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (p->flags & PF_NO_SETAFFINITY) { ++ retval = -EINVAL; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_free_cpus_allowed; ++ } ++ retval = -EPERM; ++ if (!check_same_owner(p)) { ++ rcu_read_lock(); ++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { ++ rcu_read_unlock(); ++ goto out_unlock; ++ } ++ rcu_read_unlock(); ++ } ++ ++ retval = security_task_setscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ cpuset_cpus_allowed(p, cpus_allowed); ++ cpumask_and(new_mask, in_mask, cpus_allowed); ++again: ++ retval = set_cpus_allowed_ptr(p, new_mask); ++ ++ if (!retval) { ++ cpuset_cpus_allowed(p, cpus_allowed); ++ if (!cpumask_subset(new_mask, cpus_allowed)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_allowed to the ++ * cpuset's cpus_allowed ++ */ ++ cpumask_copy(new_mask, cpus_allowed); ++ goto again; ++ } ++ } ++out_unlock: ++ free_cpumask_var(new_mask); ++out_free_cpus_allowed: ++ free_cpumask_var(cpus_allowed); ++out_put_task: ++ put_task_struct(p); ++ put_online_cpus(); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ cpumask_t *new_mask) ++{ ++ if (len < sizeof(cpumask_t)) { ++ memset(new_mask, 0, sizeof(cpumask_t)); ++ } else if (len > sizeof(cpumask_t)) { ++ len = sizeof(cpumask_t); ++ } ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++ ++/** ++ * sys_sched_setaffinity - set the cpu affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new cpu mask ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_var_t new_mask; ++ int retval; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); ++ if (retval == 0) ++ retval = sched_setaffinity(pid, new_mask); ++ free_cpumask_var(new_mask); ++ return retval; ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ unsigned long flags; ++ int retval; ++ ++ get_online_cpus(); ++ rcu_read_lock(); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ grq_lock_irqsave(&flags); ++ cpumask_and(mask, tsk_cpus_allowed(p), cpu_online_mask); ++ grq_unlock_irqrestore(&flags); ++ ++out_unlock: ++ rcu_read_unlock(); ++ put_online_cpus(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the cpu affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current cpu mask ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_var_t mask; ++ ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids) ++ return -EINVAL; ++ if (len & (sizeof(unsigned long)-1)) ++ return -EINVAL; ++ ++ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ ret = sched_getaffinity(pid, mask); ++ if (ret == 0) { ++ size_t retlen = min_t(size_t, len, cpumask_size()); ++ ++ if (copy_to_user(user_mask_ptr, mask, retlen)) ++ ret = -EFAULT; ++ else ++ ret = retlen; ++ } ++ free_cpumask_var(mask); ++ ++ return ret; ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. It does this by ++ * scheduling away the current task. If it still has the earliest deadline ++ * it will be scheduled again as the next task. ++ */ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ struct task_struct *p; ++ ++ p = current; ++ grq_lock_irq(); ++ schedstat_inc(task_rq(p), yld_count); ++ requeue_task(p); ++ ++ /* ++ * Since we are going to call schedule() anyway, there's ++ * no need to preempt or enable interrupts: ++ */ ++ __release(grq.lock); ++ spin_release(&grq.lock.dep_map, 1, _THIS_IP_); ++ do_raw_spin_unlock(&grq.lock); ++ sched_preempt_enable_no_resched(); ++ ++ schedule(); ++ ++ return 0; ++} ++ ++static inline bool should_resched(void) ++{ ++ return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); ++} ++ ++static void __cond_resched(void) ++{ ++ add_preempt_count(PREEMPT_ACTIVE); ++ schedule(); ++ sub_preempt_count(PREEMPT_ACTIVE); ++} ++ ++int __sched _cond_resched(void) ++{ ++ if (should_resched()) { ++ __cond_resched(); ++ return 1; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(_cond_resched); ++ ++/* ++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int __cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = should_resched(); ++ int ret = 0; ++ ++ lockdep_assert_held(lock); ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (resched) ++ __cond_resched(); ++ else ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_lock); ++ ++int __sched __cond_resched_softirq(void) ++{ ++ BUG_ON(!in_softirq()); ++ ++ if (should_resched()) { ++ local_bh_enable(); ++ __cond_resched(); ++ local_bh_disable(); ++ return 1; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(__cond_resched_softirq); ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * Do not ever use this function, there's a 99% chance you're doing it wrong. ++ * ++ * The scheduler is at all times free to pick the calling task as the most ++ * eligible task to run, if removing the yield() call from your code breaks ++ * it, its already broken. ++ * ++ * Typical broken usage is: ++ * ++ * while (!event) ++ * yield(); ++ * ++ * where one assumes that yield() will let 'the other' process run that will ++ * make event true. If the current task is a SCHED_FIFO task that will never ++ * happen. Never use yield() as a progress guarantee!! ++ * ++ * If you want to use yield() to wait for something, use wait_event(). ++ * If you want to use yield() to be 'nice' for others, use cond_resched(). ++ * If you still want to use yield(), do not! ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ sys_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/** ++ * yield_to - yield the current processor to another thread in ++ * your thread group, or accelerate that thread toward the ++ * processor it's on. ++ * @p: target task ++ * @preempt: whether task preemption is allowed or not ++ * ++ * It's the caller's job to ensure that the target task struct ++ * can't go away on us before we can do any checks. ++ * ++ * Returns: ++ * true (>0) if we indeed boosted the target task. ++ * false (0) if we failed to boost the target. ++ * -ESRCH if there's no task to yield to. ++ */ ++bool __sched yield_to(struct task_struct *p, bool preempt) ++{ ++ unsigned long flags; ++ int yielded = 0; ++ struct rq *rq; ++ ++ rq = this_rq(); ++ grq_lock_irqsave(&flags); ++ if (task_running(p) || p->state) { ++ yielded = -ESRCH; ++ goto out_unlock; ++ } ++ yielded = 1; ++ if (p->deadline > rq->rq_deadline) ++ p->deadline = rq->rq_deadline; ++ p->time_slice += rq->rq_time_slice; ++ rq->rq_time_slice = 0; ++ if (p->time_slice > timeslice()) ++ p->time_slice = timeslice(); ++ set_tsk_need_resched(rq->curr); ++out_unlock: ++ grq_unlock_irqrestore(&flags); ++ ++ if (yielded > 0) ++ schedule(); ++ return yielded; ++} ++EXPORT_SYMBOL_GPL(yield_to); ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++void __sched io_schedule(void) ++{ ++ struct rq *rq = raw_rq(); ++ ++ delayacct_blkio_start(); ++ atomic_inc(&rq->nr_iowait); ++ blk_flush_plug(current); ++ current->in_iowait = 1; ++ schedule(); ++ current->in_iowait = 0; ++ atomic_dec(&rq->nr_iowait); ++ delayacct_blkio_end(); ++} ++EXPORT_SYMBOL(io_schedule); ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ struct rq *rq = raw_rq(); ++ long ret; ++ ++ delayacct_blkio_start(); ++ atomic_inc(&rq->nr_iowait); ++ blk_flush_plug(current); ++ current->in_iowait = 1; ++ ret = schedule_timeout(timeout); ++ current->in_iowait = 0; ++ atomic_dec(&rq->nr_iowait); ++ delayacct_blkio_end(); ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * this syscall returns the maximum rt_priority that can be used ++ * by a given scheduling class. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_USER_RT_PRIO-1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * this syscall returns the minimum rt_priority that can be used ++ * by a given scheduling class. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * this syscall writes the default timeslice value of a given process ++ * into the user-space timespec buffer. A value of '0' means infinity. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct timespec __user *, interval) ++{ ++ struct task_struct *p; ++ unsigned int time_slice; ++ unsigned long flags; ++ int retval; ++ struct timespec t; ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ grq_lock_irqsave(&flags); ++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p)); ++ grq_unlock_irqrestore(&flags); ++ ++ rcu_read_unlock(); ++ t = ns_to_timespec(time_slice); ++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ int ppid; ++ unsigned state; ++ ++ state = p->state ? __ffs(p->state) + 1 : 0; ++ printk(KERN_INFO "%-15.15s %c", p->comm, ++ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); ++#if BITS_PER_LONG == 32 ++ if (state == TASK_RUNNING) ++ printk(KERN_CONT " running "); ++ else ++ printk(KERN_CONT " %08lx ", thread_saved_pc(p)); ++#else ++ if (state == TASK_RUNNING) ++ printk(KERN_CONT " running task "); ++ else ++ printk(KERN_CONT " %016lx ", thread_saved_pc(p)); ++#endif ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ free = stack_not_used(p); ++#endif ++ rcu_read_lock(); ++ ppid = task_pid_nr(rcu_dereference(p->real_parent)); ++ rcu_read_unlock(); ++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, ++ task_pid_nr(p), ppid, ++ (unsigned long)task_thread_info(p)->flags); ++ ++ print_worker_info(KERN_INFO, p); ++ show_stack(p, NULL); ++} ++ ++void show_state_filter(unsigned long state_filter) ++{ ++ struct task_struct *g, *p; ++ ++#if BITS_PER_LONG == 32 ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#else ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#endif ++ rcu_read_lock(); ++ do_each_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take a lot of time: ++ */ ++ touch_nmi_watchdog(); ++ if (!state_filter || (p->state & state_filter)) ++ sched_show_task(p); ++ } while_each_thread(g, p); ++ ++ touch_all_softlockup_watchdogs(); ++ ++ rcu_read_unlock(); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (!state_filter) ++ debug_show_all_locks(); ++} ++ ++void dump_cpu_task(int cpu) ++{ ++ pr_info("Task dump for CPU %d:\n", cpu); ++ sched_show_task(cpu_curr(cpu)); ++} ++ ++#ifdef CONFIG_SMP ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(tsk_cpus_allowed(p), new_mask); ++} ++#endif ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: cpu the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ time_grq_lock(rq, &flags); ++ idle->last_ran = rq->clock_task; ++ idle->state = TASK_RUNNING; ++ /* Setting prio to illegal value shouldn't matter when never queued */ ++ idle->prio = PRIO_LIMIT; ++ set_rq_task(rq, idle); ++ do_set_cpus_allowed(idle, &cpumask_of_cpu(cpu)); ++ /* Silence PROVE_RCU */ ++ rcu_read_lock(); ++ set_task_cpu(idle, cpu); ++ rcu_read_unlock(); ++ rq->curr = rq->idle = idle; ++ idle->on_cpu = 1; ++ grq_unlock_irqrestore(&flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++ task_thread_info(idle)->preempt_count = 0; ++ ++ ftrace_graph_init_idle_task(idle, cpu); ++#if defined(CONFIG_SMP) ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif ++} ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_NO_HZ_COMMON ++void nohz_balance_enter_idle(int cpu) ++{ ++} ++ ++void select_nohz_load_balancer(int stop_tick) ++{ ++} ++ ++void set_cpu_sd_state_idle(void) {} ++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) ++/** ++ * lowest_flag_domain - Return lowest sched_domain containing flag. ++ * @cpu: The cpu whose lowest level of sched domain is to ++ * be returned. ++ * @flag: The flag to check for the lowest sched_domain ++ * for the given cpu. ++ * ++ * Returns the lowest sched_domain of a cpu which contains the given flag. ++ */ ++static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) ++{ ++ struct sched_domain *sd; ++ ++ for_each_domain(cpu, sd) ++ if (sd && (sd->flags & flag)) ++ break; ++ ++ return sd; ++} ++ ++/** ++ * for_each_flag_domain - Iterates over sched_domains containing the flag. ++ * @cpu: The cpu whose domains we're iterating over. ++ * @sd: variable holding the value of the power_savings_sd ++ * for cpu. ++ * @flag: The flag to filter the sched_domains to be iterated. ++ * ++ * Iterates over all the scheduler domains for a given cpu that has the 'flag' ++ * set, starting from the lowest sched_domain to the highest. ++ */ ++#define for_each_flag_domain(cpu, sd, flag) \ ++ for (sd = lowest_flag_domain(cpu, flag); \ ++ (sd && (sd->flags & flag)); sd = sd->parent) ++ ++#endif /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ ++ ++static inline void resched_cpu(int cpu) ++{ ++ unsigned long flags; ++ ++ grq_lock_irqsave(&flags); ++ resched_task(cpu_curr(cpu)); ++ grq_unlock_irqrestore(&flags); ++} ++ ++/* ++ * In the semi idle case, use the nearest busy cpu for migrating timers ++ * from an idle cpu. This is good for power-savings. ++ * ++ * We don't do similar optimization for completely idle system, as ++ * selecting an idle cpu will add more delays to the timers than intended ++ * (as that cpu's timer base may not be uptodate wrt jiffies etc). ++ */ ++int get_nohz_timer_target(void) ++{ ++ int cpu = smp_processor_id(); ++ int i; ++ struct sched_domain *sd; ++ ++ rcu_read_lock(); ++ for_each_domain(cpu, sd) { ++ for_each_cpu(i, sched_domain_span(sd)) { ++ if (!idle_cpu(i)) ++ cpu = i; ++ goto unlock; ++ } ++ } ++unlock: ++ rcu_read_unlock(); ++ return cpu; ++} ++ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++void wake_up_idle_cpu(int cpu) ++{ ++ struct task_struct *idle; ++ struct rq *rq; ++ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ rq = cpu_rq(cpu); ++ idle = rq->idle; ++ ++ /* ++ * This is safe, as this function is called with the timer ++ * wheel base lock of (cpu) held. When the CPU is on the way ++ * to idle and has not yet set rq->curr to idle then it will ++ * be serialised on the timer wheel base lock and take the new ++ * timer into account automatically. ++ */ ++ if (unlikely(rq->curr != idle)) ++ return; ++ ++ /* ++ * We can set TIF_RESCHED on the idle task of the other CPU ++ * lockless. The worst case is that the other CPU runs the ++ * idle task through an additional NOOP schedule() ++ */ ++ set_tsk_need_resched(idle); ++ ++ /* NEED_RESCHED must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(idle)) ++ smp_send_reschedule(cpu); ++} ++ ++void wake_up_nohz_cpu(int cpu) ++{ ++ wake_up_idle_cpu(cpu); ++} ++#endif /* CONFIG_NO_HZ_COMMON */ ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ bool running_wrong = false; ++ bool queued = false; ++ unsigned long flags; ++ struct rq *rq; ++ int ret = 0; ++ ++ rq = task_grq_lock(p, &flags); ++ ++ if (cpumask_equal(tsk_cpus_allowed(p), new_mask)) ++ goto out; ++ ++ if (!cpumask_intersects(new_mask, cpu_active_mask)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ queued = task_queued(p); ++ ++ do_set_cpus_allowed(p, new_mask); ++ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ goto out; ++ ++ if (task_running(p)) { ++ /* Task is running on the wrong cpu now, reschedule it. */ ++ if (rq == this_rq()) { ++ set_tsk_need_resched(p); ++ running_wrong = true; ++ } else ++ resched_task(p); ++ } else ++ set_task_cpu(p, cpumask_any_and(cpu_active_mask, new_mask)); ++ ++out: ++ if (queued) ++ try_preempt(p, rq); ++ task_grq_unlock(&flags); ++ ++ if (running_wrong) ++ _cond_resched(); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++#ifdef CONFIG_HOTPLUG_CPU ++extern struct task_struct *cpu_stopper_task; ++/* Run through task list and find tasks affined to just the dead cpu, then ++ * allocate a new affinity */ ++static void break_sole_affinity(int src_cpu, struct task_struct *idle) ++{ ++ struct task_struct *p, *t, *stopper; ++ ++ stopper = per_cpu(cpu_stopper_task, src_cpu); ++ do_each_thread(t, p) { ++ if (p != stopper && p != idle && !online_cpus(p)) { ++ cpumask_copy(tsk_cpus_allowed(p), cpu_possible_mask); ++ /* ++ * Don't tell them about moving exiting tasks or ++ * kernel threads (both mm NULL), since they never ++ * leave kernel. ++ */ ++ if (p->mm && printk_ratelimit()) { ++ printk(KERN_INFO "process %d (%s) no " ++ "longer affine to cpu %d\n", ++ task_pid_nr(p), p->comm, src_cpu); ++ } ++ } ++ clear_sticky(p); ++ } while_each_thread(t, p); ++} ++ ++/* ++ * Ensures that the idle task is using init_mm right before its cpu goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(cpu_online(smp_processor_id())); ++ ++ if (mm != &init_mm) ++ switch_mm(mm, &init_mm, current); ++ mmdrop(mm); ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++void sched_set_stop_task(int cpu, struct task_struct *stop) ++{ ++ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; ++ struct sched_param start_param = { .sched_priority = 0 }; ++ struct task_struct *old_stop = cpu_rq(cpu)->stop; ++ ++ if (stop) { ++ /* ++ * Make it appear like a SCHED_FIFO task, its something ++ * userspace knows about and won't get confused about. ++ * ++ * Also, it will make PI more or less work without too ++ * much confusion -- but then, stop work should not ++ * rely on PI working anyway. ++ */ ++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); ++ } ++ ++ cpu_rq(cpu)->stop = stop; ++ ++ if (old_stop) { ++ /* ++ * Reset it back to a normal scheduling policy so that ++ * it can die in pieces. ++ */ ++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); ++ } ++} ++ ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++ ++static struct ctl_table sd_ctl_dir[] = { ++ { ++ .procname = "sched_domain", ++ .mode = 0555, ++ }, ++ {} ++}; ++ ++static struct ctl_table sd_ctl_root[] = { ++ { ++ .procname = "kernel", ++ .mode = 0555, ++ .child = sd_ctl_dir, ++ }, ++ {} ++}; ++ ++static struct ctl_table *sd_alloc_ctl_entry(int n) ++{ ++ struct ctl_table *entry = ++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); ++ ++ return entry; ++} ++ ++static void sd_free_ctl_entry(struct ctl_table **tablep) ++{ ++ struct ctl_table *entry; ++ ++ /* ++ * In the intermediate directories, both the child directory and ++ * procname are dynamically allocated and could fail but the mode ++ * will always be set. In the lowest directory the names are ++ * static strings and all have proc handlers. ++ */ ++ for (entry = *tablep; entry->mode; entry++) { ++ if (entry->child) ++ sd_free_ctl_entry(&entry->child); ++ if (entry->proc_handler == NULL) ++ kfree(entry->procname); ++ } ++ ++ kfree(*tablep); ++ *tablep = NULL; ++} ++ ++static void ++set_table_entry(struct ctl_table *entry, ++ const char *procname, void *data, int maxlen, ++ mode_t mode, proc_handler *proc_handler) ++{ ++ entry->procname = procname; ++ entry->data = data; ++ entry->maxlen = maxlen; ++ entry->mode = mode; ++ entry->proc_handler = proc_handler; ++} ++ ++static struct ctl_table * ++sd_alloc_ctl_domain_table(struct sched_domain *sd) ++{ ++ struct ctl_table *table = sd_alloc_ctl_entry(13); ++ ++ if (table == NULL) ++ return NULL; ++ ++ set_table_entry(&table[0], "min_interval", &sd->min_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax); ++ set_table_entry(&table[1], "max_interval", &sd->max_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax); ++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[9], "cache_nice_tries", ++ &sd->cache_nice_tries, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[10], "flags", &sd->flags, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[11], "name", sd->name, ++ CORENAME_MAX_SIZE, 0444, proc_dostring); ++ /* &table[12] is terminator */ ++ ++ return table; ++} ++ ++static ctl_table *sd_alloc_ctl_cpu_table(int cpu) ++{ ++ struct ctl_table *entry, *table; ++ struct sched_domain *sd; ++ int domain_num = 0, i; ++ char buf[32]; ++ ++ for_each_domain(cpu, sd) ++ domain_num++; ++ entry = table = sd_alloc_ctl_entry(domain_num + 1); ++ if (table == NULL) ++ return NULL; ++ ++ i = 0; ++ for_each_domain(cpu, sd) { ++ snprintf(buf, 32, "domain%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_domain_table(sd); ++ entry++; ++ i++; ++ } ++ return table; ++} ++ ++static struct ctl_table_header *sd_sysctl_header; ++static void register_sched_domain_sysctl(void) ++{ ++ int i, cpu_num = num_possible_cpus(); ++ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); ++ char buf[32]; ++ ++ WARN_ON(sd_ctl_dir[0].child); ++ sd_ctl_dir[0].child = entry; ++ ++ if (entry == NULL) ++ return; ++ ++ for_each_possible_cpu(i) { ++ snprintf(buf, 32, "cpu%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_cpu_table(i); ++ entry++; ++ } ++ ++ WARN_ON(sd_sysctl_header); ++ sd_sysctl_header = register_sysctl_table(sd_ctl_root); ++} ++ ++/* may be called multiple times per register */ ++static void unregister_sched_domain_sysctl(void) ++{ ++ if (sd_sysctl_header) ++ unregister_sysctl_table(sd_sysctl_header); ++ sd_sysctl_header = NULL; ++ if (sd_ctl_dir[0].child) ++ sd_free_ctl_entry(&sd_ctl_dir[0].child); ++} ++#else ++static void register_sched_domain_sysctl(void) ++{ ++} ++static void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif ++ ++static void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) { ++ cpumask_set_cpu(cpu_of(rq), rq->rd->online); ++ rq->online = true; ++ } ++} ++ ++static void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) { ++ cpumask_clear_cpu(cpu_of(rq), rq->rd->online); ++ rq->online = false; ++ } ++} ++ ++/* ++ * migration_call - callback that gets triggered when a CPU is added. ++ */ ++static int __cpuinit ++migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ unsigned long flags; ++ struct rq *rq = cpu_rq(cpu); ++#ifdef CONFIG_HOTPLUG_CPU ++ struct task_struct *idle = rq->idle; ++#endif ++ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ ++ case CPU_UP_PREPARE: ++ break; ++ ++ case CPU_ONLINE: ++ /* Update our root-domain */ ++ grq_lock_irqsave(&flags); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ ++ set_rq_online(rq); ++ } ++ grq.noc = num_online_cpus(); ++ grq_unlock_irqrestore(&flags); ++ break; ++ ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_DEAD: ++ /* Idle task back to normal (off runqueue, low prio) */ ++ grq_lock_irq(); ++ return_task(idle, true); ++ idle->static_prio = MAX_PRIO; ++ __setscheduler(idle, rq, SCHED_NORMAL, 0); ++ idle->prio = PRIO_LIMIT; ++ set_rq_task(rq, idle); ++ update_clocks(rq); ++ grq_unlock_irq(); ++ break; ++ ++ case CPU_DYING: ++ /* Update our root-domain */ ++ grq_lock_irqsave(&flags); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_offline(rq); ++ } ++ break_sole_affinity(cpu, idle); ++ grq.noc = num_online_cpus(); ++ grq_unlock_irqrestore(&flags); ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* ++ * Register at high priority so that task migration (migrate_all_tasks) ++ * happens before everything else. This has to be lower priority than ++ * the notifier in the perf_counter subsystem, though. ++ */ ++static struct notifier_block __cpuinitdata migration_notifier = { ++ .notifier_call = migration_call, ++ .priority = CPU_PRI_MIGRATION, ++}; ++ ++static int __cpuinit sched_cpu_active(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_STARTING: ++ case CPU_DOWN_FAILED: ++ set_cpu_active((long)hcpu, true); ++ return NOTIFY_OK; ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ ++static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_DOWN_PREPARE: ++ set_cpu_active((long)hcpu, false); ++ return NOTIFY_OK; ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ ++int __init migration_init(void) ++{ ++ void *cpu = (void *)(long)smp_processor_id(); ++ int err; ++ ++ /* Initialise migration for the boot CPU */ ++ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); ++ BUG_ON(err == NOTIFY_BAD); ++ migration_call(&migration_notifier, CPU_ONLINE, cpu); ++ register_cpu_notifier(&migration_notifier); ++ ++ /* Register cpu active notifiers */ ++ cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); ++ cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); ++ ++ return 0; ++} ++early_initcall(migration_init); ++#endif ++ ++#ifdef CONFIG_SMP ++ ++static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ ++ ++#ifdef CONFIG_SCHED_DEBUG ++ ++static __read_mostly int sched_debug_enabled; ++ ++static int __init sched_debug_setup(char *str) ++{ ++ sched_debug_enabled = 1; ++ ++ return 0; ++} ++early_param("sched_debug", sched_debug_setup); ++ ++static inline bool sched_debug(void) ++{ ++ return sched_debug_enabled; ++} ++ ++static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ++ struct cpumask *groupmask) ++{ ++ char str[256]; ++ ++ cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); ++ cpumask_clear(groupmask); ++ ++ printk(KERN_DEBUG "%*s domain %d: ", level, "", level); ++ ++ if (!(sd->flags & SD_LOAD_BALANCE)) { ++ printk("does not load-balance\n"); ++ if (sd->parent) ++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" ++ " has parent"); ++ return -1; ++ } ++ ++ printk(KERN_CONT "span %s level %s\n", str, sd->name); ++ ++ if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { ++ printk(KERN_ERR "ERROR: domain->span does not contain " ++ "CPU%d\n", cpu); ++ } ++ ++ printk(KERN_CONT "\n"); ++ ++ if (!cpumask_equal(sched_domain_span(sd), groupmask)) ++ printk(KERN_ERR "ERROR: groups don't span domain->span\n"); ++ ++ if (sd->parent && ++ !cpumask_subset(groupmask, sched_domain_span(sd->parent))) ++ printk(KERN_ERR "ERROR: parent span is not a superset " ++ "of domain->span\n"); ++ return 0; ++} ++ ++static void sched_domain_debug(struct sched_domain *sd, int cpu) ++{ ++ int level = 0; ++ ++ if (!sched_debug_enabled) ++ return; ++ ++ if (!sd) { ++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); ++ return; ++ } ++ ++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); ++ ++ for (;;) { ++ if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) ++ break; ++ level++; ++ sd = sd->parent; ++ if (!sd) ++ break; ++ } ++} ++#else /* !CONFIG_SCHED_DEBUG */ ++# define sched_domain_debug(sd, cpu) do { } while (0) ++static inline bool sched_debug(void) ++{ ++ return false; ++} ++#endif /* CONFIG_SCHED_DEBUG */ ++ ++static int sd_degenerate(struct sched_domain *sd) ++{ ++ if (cpumask_weight(sched_domain_span(sd)) == 1) ++ return 1; ++ ++ /* Following flags don't use groups */ ++ if (sd->flags & (SD_WAKE_AFFINE)) ++ return 0; ++ ++ return 1; ++} ++ ++static int ++sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ++{ ++ unsigned long cflags = sd->flags, pflags = parent->flags; ++ ++ if (sd_degenerate(parent)) ++ return 1; ++ ++ if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) ++ return 0; ++ ++ if (~cflags & pflags) ++ return 0; ++ ++ return 1; ++} ++ ++static void free_rootdomain(struct rcu_head *rcu) ++{ ++ struct root_domain *rd = container_of(rcu, struct root_domain, rcu); ++ ++ cpupri_cleanup(&rd->cpupri); ++ free_cpumask_var(rd->rto_mask); ++ free_cpumask_var(rd->online); ++ free_cpumask_var(rd->span); ++ kfree(rd); ++} ++ ++static void rq_attach_root(struct rq *rq, struct root_domain *rd) ++{ ++ struct root_domain *old_rd = NULL; ++ unsigned long flags; ++ ++ grq_lock_irqsave(&flags); ++ ++ if (rq->rd) { ++ old_rd = rq->rd; ++ ++ if (cpumask_test_cpu(rq->cpu, old_rd->online)) ++ set_rq_offline(rq); ++ ++ cpumask_clear_cpu(rq->cpu, old_rd->span); ++ ++ /* ++ * If we dont want to free the old_rt yet then ++ * set old_rd to NULL to skip the freeing later ++ * in this function: ++ */ ++ if (!atomic_dec_and_test(&old_rd->refcount)) ++ old_rd = NULL; ++ } ++ ++ atomic_inc(&rd->refcount); ++ rq->rd = rd; ++ ++ cpumask_set_cpu(rq->cpu, rd->span); ++ if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) ++ set_rq_online(rq); ++ ++ grq_unlock_irqrestore(&flags); ++ ++ if (old_rd) ++ call_rcu_sched(&old_rd->rcu, free_rootdomain); ++} ++ ++static int init_rootdomain(struct root_domain *rd) ++{ ++ memset(rd, 0, sizeof(*rd)); ++ ++ if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) ++ goto out; ++ if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) ++ goto free_span; ++ if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) ++ goto free_online; ++ ++ if (cpupri_init(&rd->cpupri) != 0) ++ goto free_rto_mask; ++ return 0; ++ ++free_rto_mask: ++ free_cpumask_var(rd->rto_mask); ++free_online: ++ free_cpumask_var(rd->online); ++free_span: ++ free_cpumask_var(rd->span); ++out: ++ return -ENOMEM; ++} ++ ++static void init_defrootdomain(void) ++{ ++ init_rootdomain(&def_root_domain); ++ ++ atomic_set(&def_root_domain.refcount, 1); ++} ++ ++static struct root_domain *alloc_rootdomain(void) ++{ ++ struct root_domain *rd; ++ ++ rd = kmalloc(sizeof(*rd), GFP_KERNEL); ++ if (!rd) ++ return NULL; ++ ++ if (init_rootdomain(rd) != 0) { ++ kfree(rd); ++ return NULL; ++ } ++ ++ return rd; ++} ++ ++static void free_sched_domain(struct rcu_head *rcu) ++{ ++ struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); ++ ++ kfree(sd); ++} ++ ++static void destroy_sched_domain(struct sched_domain *sd, int cpu) ++{ ++ call_rcu(&sd->rcu, free_sched_domain); ++} ++ ++static void destroy_sched_domains(struct sched_domain *sd, int cpu) ++{ ++ for (; sd; sd = sd->parent) ++ destroy_sched_domain(sd, cpu); ++} ++ ++/* ++ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must ++ * hold the hotplug lock. ++ */ ++static void ++cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct sched_domain *tmp; ++ ++ /* Remove the sched domains which do not contribute to scheduling. */ ++ for (tmp = sd; tmp; ) { ++ struct sched_domain *parent = tmp->parent; ++ if (!parent) ++ break; ++ ++ if (sd_parent_degenerate(tmp, parent)) { ++ tmp->parent = parent->parent; ++ if (parent->parent) ++ parent->parent->child = tmp; ++ destroy_sched_domain(parent, cpu); ++ } else ++ tmp = tmp->parent; ++ } ++ ++ if (sd && sd_degenerate(sd)) { ++ tmp = sd; ++ sd = sd->parent; ++ destroy_sched_domain(tmp, cpu); ++ if (sd) ++ sd->child = NULL; ++ } ++ ++ sched_domain_debug(sd, cpu); ++ ++ rq_attach_root(rq, rd); ++ tmp = rq->sd; ++ rcu_assign_pointer(rq->sd, sd); ++ destroy_sched_domains(tmp, cpu); ++} ++ ++/* cpus with isolated domains */ ++static cpumask_var_t cpu_isolated_map; ++ ++/* Setup the mask of cpus configured for isolated domains */ ++static int __init isolated_cpu_setup(char *str) ++{ ++ alloc_bootmem_cpumask_var(&cpu_isolated_map); ++ cpulist_parse(str, cpu_isolated_map); ++ return 1; ++} ++ ++__setup("isolcpus=", isolated_cpu_setup); ++ ++static const struct cpumask *cpu_cpu_mask(int cpu) ++{ ++ return cpumask_of_node(cpu_to_node(cpu)); ++} ++ ++struct sd_data { ++ struct sched_domain **__percpu sd; ++}; ++ ++struct s_data { ++ struct sched_domain ** __percpu sd; ++ struct root_domain *rd; ++}; ++ ++enum s_alloc { ++ sa_rootdomain, ++ sa_sd, ++ sa_sd_storage, ++ sa_none, ++}; ++ ++struct sched_domain_topology_level; ++ ++typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu); ++typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); ++ ++#define SDTL_OVERLAP 0x01 ++ ++struct sched_domain_topology_level { ++ sched_domain_init_f init; ++ sched_domain_mask_f mask; ++ int flags; ++ int numa_level; ++ struct sd_data data; ++}; ++ ++/* ++ * Initializers for schedule domains ++ * Non-inlined to reduce accumulated stack pressure in build_sched_domains() ++ */ ++ ++#ifdef CONFIG_SCHED_DEBUG ++# define SD_INIT_NAME(sd, type) sd->name = #type ++#else ++# define SD_INIT_NAME(sd, type) do { } while (0) ++#endif ++ ++#define SD_INIT_FUNC(type) \ ++static noinline struct sched_domain * \ ++sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ ++{ \ ++ struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \ ++ *sd = SD_##type##_INIT; \ ++ SD_INIT_NAME(sd, type); \ ++ sd->private = &tl->data; \ ++ return sd; \ ++} ++ ++SD_INIT_FUNC(CPU) ++#ifdef CONFIG_SCHED_SMT ++ SD_INIT_FUNC(SIBLING) ++#endif ++#ifdef CONFIG_SCHED_MC ++ SD_INIT_FUNC(MC) ++#endif ++#ifdef CONFIG_SCHED_BOOK ++ SD_INIT_FUNC(BOOK) ++#endif ++ ++static int default_relax_domain_level = -1; ++int sched_domain_level_max; ++ ++static int __init setup_relax_domain_level(char *str) ++{ ++ if (kstrtoint(str, 0, &default_relax_domain_level)) ++ pr_warn("Unable to set relax_domain_level\n"); ++ ++ return 1; ++} ++__setup("relax_domain_level=", setup_relax_domain_level); ++ ++static void set_domain_attribute(struct sched_domain *sd, ++ struct sched_domain_attr *attr) ++{ ++ int request; ++ ++ if (!attr || attr->relax_domain_level < 0) { ++ if (default_relax_domain_level < 0) ++ return; ++ else ++ request = default_relax_domain_level; ++ } else ++ request = attr->relax_domain_level; ++ if (request < sd->level) { ++ /* turn off idle balance on this domain */ ++ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); ++ } else { ++ /* turn on idle balance on this domain */ ++ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); ++ } ++} ++ ++static void __sdt_free(const struct cpumask *cpu_map); ++static int __sdt_alloc(const struct cpumask *cpu_map); ++ ++static void __free_domain_allocs(struct s_data *d, enum s_alloc what, ++ const struct cpumask *cpu_map) ++{ ++ switch (what) { ++ case sa_rootdomain: ++ if (!atomic_read(&d->rd->refcount)) ++ free_rootdomain(&d->rd->rcu); /* fall through */ ++ case sa_sd: ++ free_percpu(d->sd); /* fall through */ ++ case sa_sd_storage: ++ __sdt_free(cpu_map); /* fall through */ ++ case sa_none: ++ break; ++ } ++} ++ ++static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, ++ const struct cpumask *cpu_map) ++{ ++ memset(d, 0, sizeof(*d)); ++ ++ if (__sdt_alloc(cpu_map)) ++ return sa_sd_storage; ++ d->sd = alloc_percpu(struct sched_domain *); ++ if (!d->sd) ++ return sa_sd_storage; ++ d->rd = alloc_rootdomain(); ++ if (!d->rd) ++ return sa_sd; ++ return sa_rootdomain; ++} ++ ++/* ++ * NULL the sd_data elements we've used to build the sched_domain ++ * structure so that the subsequent __free_domain_allocs() ++ * will not free the data we're using. ++ */ ++static void claim_allocations(int cpu, struct sched_domain *sd) ++{ ++ struct sd_data *sdd = sd->private; ++ ++ WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); ++ *per_cpu_ptr(sdd->sd, cpu) = NULL; ++} ++ ++#ifdef CONFIG_SCHED_SMT ++static const struct cpumask *cpu_smt_mask(int cpu) ++{ ++ return topology_thread_cpumask(cpu); ++} ++#endif ++ ++/* ++ * Topology list, bottom-up. ++ */ ++static struct sched_domain_topology_level default_topology[] = { ++#ifdef CONFIG_SCHED_SMT ++ { sd_init_SIBLING, cpu_smt_mask, }, ++#endif ++#ifdef CONFIG_SCHED_MC ++ { sd_init_MC, cpu_coregroup_mask, }, ++#endif ++#ifdef CONFIG_SCHED_BOOK ++ { sd_init_BOOK, cpu_book_mask, }, ++#endif ++ { sd_init_CPU, cpu_cpu_mask, }, ++ { NULL, }, ++}; ++ ++static struct sched_domain_topology_level *sched_domain_topology = default_topology; ++ ++#ifdef CONFIG_NUMA ++ ++static int sched_domains_numa_levels; ++static int *sched_domains_numa_distance; ++static struct cpumask ***sched_domains_numa_masks; ++static int sched_domains_curr_level; ++ ++static inline int sd_local_flags(int level) ++{ ++ if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE) ++ return 0; ++ ++ return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; ++} ++ ++static struct sched_domain * ++sd_numa_init(struct sched_domain_topology_level *tl, int cpu) ++{ ++ struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); ++ int level = tl->numa_level; ++ int sd_weight = cpumask_weight( ++ sched_domains_numa_masks[level][cpu_to_node(cpu)]); ++ ++ *sd = (struct sched_domain){ ++ .min_interval = sd_weight, ++ .max_interval = 2*sd_weight, ++ .busy_factor = 32, ++ .imbalance_pct = 125, ++ .cache_nice_tries = 2, ++ .busy_idx = 3, ++ .idle_idx = 2, ++ .newidle_idx = 0, ++ .wake_idx = 0, ++ .forkexec_idx = 0, ++ ++ .flags = 1*SD_LOAD_BALANCE ++ | 1*SD_BALANCE_NEWIDLE ++ | 0*SD_BALANCE_EXEC ++ | 0*SD_BALANCE_FORK ++ | 0*SD_BALANCE_WAKE ++ | 0*SD_WAKE_AFFINE ++ | 0*SD_SHARE_CPUPOWER ++ | 0*SD_SHARE_PKG_RESOURCES ++ | 1*SD_SERIALIZE ++ | 0*SD_PREFER_SIBLING ++ | sd_local_flags(level) ++ , ++ .last_balance = jiffies, ++ .balance_interval = sd_weight, ++ }; ++ SD_INIT_NAME(sd, NUMA); ++ sd->private = &tl->data; ++ ++ /* ++ * Ugly hack to pass state to sd_numa_mask()... ++ */ ++ sched_domains_curr_level = tl->numa_level; ++ ++ return sd; ++} ++ ++static const struct cpumask *sd_numa_mask(int cpu) ++{ ++ return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; ++} ++ ++static void sched_numa_warn(const char *str) ++{ ++ static int done = false; ++ int i,j; ++ ++ if (done) ++ return; ++ ++ done = true; ++ ++ printk(KERN_WARNING "ERROR: %s\n\n", str); ++ ++ for (i = 0; i < nr_node_ids; i++) { ++ printk(KERN_WARNING " "); ++ for (j = 0; j < nr_node_ids; j++) ++ printk(KERN_CONT "%02d ", node_distance(i,j)); ++ printk(KERN_CONT "\n"); ++ } ++ printk(KERN_WARNING "\n"); ++} ++ ++static bool find_numa_distance(int distance) ++{ ++ int i; ++ ++ if (distance == node_distance(0, 0)) ++ return true; ++ ++ for (i = 0; i < sched_domains_numa_levels; i++) { ++ if (sched_domains_numa_distance[i] == distance) ++ return true; ++ } ++ ++ return false; ++} ++ ++static void sched_init_numa(void) ++{ ++ int next_distance, curr_distance = node_distance(0, 0); ++ struct sched_domain_topology_level *tl; ++ int level = 0; ++ int i, j, k; ++ ++ sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); ++ if (!sched_domains_numa_distance) ++ return; ++ ++ /* ++ * O(nr_nodes^2) deduplicating selection sort -- in order to find the ++ * unique distances in the node_distance() table. ++ * ++ * Assumes node_distance(0,j) includes all distances in ++ * node_distance(i,j) in order to avoid cubic time. ++ */ ++ next_distance = curr_distance; ++ for (i = 0; i < nr_node_ids; i++) { ++ for (j = 0; j < nr_node_ids; j++) { ++ for (k = 0; k < nr_node_ids; k++) { ++ int distance = node_distance(i, k); ++ ++ if (distance > curr_distance && ++ (distance < next_distance || ++ next_distance == curr_distance)) ++ next_distance = distance; ++ ++ /* ++ * While not a strong assumption it would be nice to know ++ * about cases where if node A is connected to B, B is not ++ * equally connected to A. ++ */ ++ if (sched_debug() && node_distance(k, i) != distance) ++ sched_numa_warn("Node-distance not symmetric"); ++ ++ if (sched_debug() && i && !find_numa_distance(distance)) ++ sched_numa_warn("Node-0 not representative"); ++ } ++ if (next_distance != curr_distance) { ++ sched_domains_numa_distance[level++] = next_distance; ++ sched_domains_numa_levels = level; ++ curr_distance = next_distance; ++ } else break; ++ } ++ ++ /* ++ * In case of sched_debug() we verify the above assumption. ++ */ ++ if (!sched_debug()) ++ break; ++ } ++ /* ++ * 'level' contains the number of unique distances, excluding the ++ * identity distance node_distance(i,i). ++ * ++ * The sched_domains_numa_distance[] array includes the actual distance ++ * numbers. ++ */ ++ ++ /* ++ * Here, we should temporarily reset sched_domains_numa_levels to 0. ++ * If it fails to allocate memory for array sched_domains_numa_masks[][], ++ * the array will contain less then 'level' members. This could be ++ * dangerous when we use it to iterate array sched_domains_numa_masks[][] ++ * in other functions. ++ * ++ * We reset it to 'level' at the end of this function. ++ */ ++ sched_domains_numa_levels = 0; ++ ++ sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); ++ if (!sched_domains_numa_masks) ++ return; ++ ++ /* ++ * Now for each level, construct a mask per node which contains all ++ * cpus of nodes that are that many hops away from us. ++ */ ++ for (i = 0; i < level; i++) { ++ sched_domains_numa_masks[i] = ++ kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); ++ if (!sched_domains_numa_masks[i]) ++ return; ++ ++ for (j = 0; j < nr_node_ids; j++) { ++ struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); ++ if (!mask) ++ return; ++ ++ sched_domains_numa_masks[i][j] = mask; ++ ++ for (k = 0; k < nr_node_ids; k++) { ++ if (node_distance(j, k) > sched_domains_numa_distance[i]) ++ continue; ++ ++ cpumask_or(mask, mask, cpumask_of_node(k)); ++ } ++ } ++ } ++ ++ tl = kzalloc((ARRAY_SIZE(default_topology) + level) * ++ sizeof(struct sched_domain_topology_level), GFP_KERNEL); ++ if (!tl) ++ return; ++ ++ /* ++ * Copy the default topology bits.. ++ */ ++ for (i = 0; default_topology[i].init; i++) ++ tl[i] = default_topology[i]; ++ ++ /* ++ * .. and append 'j' levels of NUMA goodness. ++ */ ++ for (j = 0; j < level; i++, j++) { ++ tl[i] = (struct sched_domain_topology_level){ ++ .init = sd_numa_init, ++ .mask = sd_numa_mask, ++ .flags = SDTL_OVERLAP, ++ .numa_level = j, ++ }; ++ } ++ ++ sched_domain_topology = tl; ++ ++ sched_domains_numa_levels = level; ++} ++ ++static void sched_domains_numa_masks_set(int cpu) ++{ ++ int i, j; ++ int node = cpu_to_node(cpu); ++ ++ for (i = 0; i < sched_domains_numa_levels; i++) { ++ for (j = 0; j < nr_node_ids; j++) { ++ if (node_distance(j, node) <= sched_domains_numa_distance[i]) ++ cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); ++ } ++ } ++} ++ ++static void sched_domains_numa_masks_clear(int cpu) ++{ ++ int i, j; ++ for (i = 0; i < sched_domains_numa_levels; i++) { ++ for (j = 0; j < nr_node_ids; j++) ++ cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); ++ } ++} ++ ++/* ++ * Update sched_domains_numa_masks[level][node] array when new cpus ++ * are onlined. ++ */ ++static int sched_domains_numa_masks_update(struct notifier_block *nfb, ++ unsigned long action, ++ void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_ONLINE: ++ sched_domains_numa_masks_set(cpu); ++ break; ++ ++ case CPU_DEAD: ++ sched_domains_numa_masks_clear(cpu); ++ break; ++ ++ default: ++ return NOTIFY_DONE; ++ } ++ ++ return NOTIFY_OK; ++} ++#else ++static inline void sched_init_numa(void) ++{ ++} ++ ++static int sched_domains_numa_masks_update(struct notifier_block *nfb, ++ unsigned long action, ++ void *hcpu) ++{ ++ return 0; ++} ++#endif /* CONFIG_NUMA */ ++ ++static int __sdt_alloc(const struct cpumask *cpu_map) ++{ ++ struct sched_domain_topology_level *tl; ++ int j; ++ ++ for (tl = sched_domain_topology; tl->init; tl++) { ++ struct sd_data *sdd = &tl->data; ++ ++ sdd->sd = alloc_percpu(struct sched_domain *); ++ if (!sdd->sd) ++ return -ENOMEM; ++ ++ for_each_cpu(j, cpu_map) { ++ struct sched_domain *sd; ++ ++ sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), ++ GFP_KERNEL, cpu_to_node(j)); ++ if (!sd) ++ return -ENOMEM; ++ ++ *per_cpu_ptr(sdd->sd, j) = sd; ++ } ++ } ++ ++ return 0; ++} ++ ++static void __sdt_free(const struct cpumask *cpu_map) ++{ ++ struct sched_domain_topology_level *tl; ++ int j; ++ ++ for (tl = sched_domain_topology; tl->init; tl++) { ++ struct sd_data *sdd = &tl->data; ++ ++ for_each_cpu(j, cpu_map) { ++ struct sched_domain *sd; ++ ++ if (sdd->sd) { ++ sd = *per_cpu_ptr(sdd->sd, j); ++ kfree(*per_cpu_ptr(sdd->sd, j)); ++ } ++ } ++ free_percpu(sdd->sd); ++ sdd->sd = NULL; ++ } ++} ++ ++struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, ++ struct s_data *d, const struct cpumask *cpu_map, ++ struct sched_domain_attr *attr, struct sched_domain *child, ++ int cpu) ++{ ++ struct sched_domain *sd = tl->init(tl, cpu); ++ if (!sd) ++ return child; ++ ++ cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); ++ if (child) { ++ sd->level = child->level + 1; ++ sched_domain_level_max = max(sched_domain_level_max, sd->level); ++ child->parent = sd; ++ } ++ sd->child = child; ++ set_domain_attribute(sd, attr); ++ ++ return sd; ++} ++ ++/* ++ * Build sched domains for a given set of cpus and attach the sched domains ++ * to the individual cpus ++ */ ++static int build_sched_domains(const struct cpumask *cpu_map, ++ struct sched_domain_attr *attr) ++{ ++ enum s_alloc alloc_state = sa_none; ++ struct sched_domain *sd; ++ struct s_data d; ++ int i, ret = -ENOMEM; ++ ++ alloc_state = __visit_domain_allocation_hell(&d, cpu_map); ++ if (alloc_state != sa_rootdomain) ++ goto error; ++ ++ /* Set up domains for cpus specified by the cpu_map. */ ++ for_each_cpu(i, cpu_map) { ++ struct sched_domain_topology_level *tl; ++ ++ sd = NULL; ++ for (tl = sched_domain_topology; tl->init; tl++) { ++ sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); ++ if (tl->flags & SDTL_OVERLAP) ++ sd->flags |= SD_OVERLAP; ++ if (cpumask_equal(cpu_map, sched_domain_span(sd))) ++ break; ++ } ++ ++ while (sd->child) ++ sd = sd->child; ++ ++ *per_cpu_ptr(d.sd, i) = sd; ++ } ++ ++ /* Calculate CPU power for physical packages and nodes */ ++ for (i = nr_cpumask_bits-1; i >= 0; i--) { ++ if (!cpumask_test_cpu(i, cpu_map)) ++ continue; ++ ++ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { ++ claim_allocations(i, sd); ++ } ++ } ++ ++ /* Attach the domains */ ++ rcu_read_lock(); ++ for_each_cpu(i, cpu_map) { ++ sd = *per_cpu_ptr(d.sd, i); ++ cpu_attach_domain(sd, d.rd, i); ++ } ++ rcu_read_unlock(); ++ ++ ret = 0; ++error: ++ __free_domain_allocs(&d, alloc_state, cpu_map); ++ return ret; ++} ++ ++static cpumask_var_t *doms_cur; /* current sched domains */ ++static int ndoms_cur; /* number of sched domains in 'doms_cur' */ ++static struct sched_domain_attr *dattr_cur; ++ /* attribues of custom domains in 'doms_cur' */ ++ ++/* ++ * Special case: If a kmalloc of a doms_cur partition (array of ++ * cpumask) fails, then fallback to a single sched domain, ++ * as determined by the single cpumask fallback_doms. ++ */ ++static cpumask_var_t fallback_doms; ++ ++/* ++ * arch_update_cpu_topology lets virtualized architectures update the ++ * cpu core maps. It is supposed to return 1 if the topology changed ++ * or 0 if it stayed the same. ++ */ ++int __attribute__((weak)) arch_update_cpu_topology(void) ++{ ++ return 0; ++} ++ ++cpumask_var_t *alloc_sched_domains(unsigned int ndoms) ++{ ++ int i; ++ cpumask_var_t *doms; ++ ++ doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); ++ if (!doms) ++ return NULL; ++ for (i = 0; i < ndoms; i++) { ++ if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { ++ free_sched_domains(doms, i); ++ return NULL; ++ } ++ } ++ return doms; ++} ++ ++void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) ++{ ++ unsigned int i; ++ for (i = 0; i < ndoms; i++) ++ free_cpumask_var(doms[i]); ++ kfree(doms); ++} ++ ++/* ++ * Set up scheduler domains and groups. Callers must hold the hotplug lock. ++ * For now this just excludes isolated cpus, but could be used to ++ * exclude other special cases in the future. ++ */ ++static int init_sched_domains(const struct cpumask *cpu_map) ++{ ++ int err; ++ ++ arch_update_cpu_topology(); ++ ndoms_cur = 1; ++ doms_cur = alloc_sched_domains(ndoms_cur); ++ if (!doms_cur) ++ doms_cur = &fallback_doms; ++ cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); ++ err = build_sched_domains(doms_cur[0], NULL); ++ register_sched_domain_sysctl(); ++ ++ return err; ++} ++ ++/* ++ * Detach sched domains from a group of cpus specified in cpu_map ++ * These cpus will now be attached to the NULL domain ++ */ ++static void detach_destroy_domains(const struct cpumask *cpu_map) ++{ ++ int i; ++ ++ rcu_read_lock(); ++ for_each_cpu(i, cpu_map) ++ cpu_attach_domain(NULL, &def_root_domain, i); ++ rcu_read_unlock(); ++} ++ ++/* handle null as "default" */ ++static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, ++ struct sched_domain_attr *new, int idx_new) ++{ ++ struct sched_domain_attr tmp; ++ ++ /* fast path */ ++ if (!new && !cur) ++ return 1; ++ ++ tmp = SD_ATTR_INIT; ++ return !memcmp(cur ? (cur + idx_cur) : &tmp, ++ new ? (new + idx_new) : &tmp, ++ sizeof(struct sched_domain_attr)); ++} ++ ++/* ++ * Partition sched domains as specified by the 'ndoms_new' ++ * cpumasks in the array doms_new[] of cpumasks. This compares ++ * doms_new[] to the current sched domain partitioning, doms_cur[]. ++ * It destroys each deleted domain and builds each new domain. ++ * ++ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. ++ * The masks don't intersect (don't overlap.) We should setup one ++ * sched domain for each mask. CPUs not in any of the cpumasks will ++ * not be load balanced. If the same cpumask appears both in the ++ * current 'doms_cur' domains and in the new 'doms_new', we can leave ++ * it as it is. ++ * ++ * The passed in 'doms_new' should be allocated using ++ * alloc_sched_domains. This routine takes ownership of it and will ++ * free_sched_domains it when done with it. If the caller failed the ++ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, ++ * and partition_sched_domains() will fallback to the single partition ++ * 'fallback_doms', it also forces the domains to be rebuilt. ++ * ++ * If doms_new == NULL it will be replaced with cpu_online_mask. ++ * ndoms_new == 0 is a special case for destroying existing domains, ++ * and it will not create the default domain. ++ * ++ * Call with hotplug lock held ++ */ ++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ++ struct sched_domain_attr *dattr_new) ++{ ++ int i, j, n; ++ int new_topology; ++ ++ mutex_lock(&sched_domains_mutex); ++ ++ /* always unregister in case we don't destroy any domains */ ++ unregister_sched_domain_sysctl(); ++ ++ /* Let architecture update cpu core mappings. */ ++ new_topology = arch_update_cpu_topology(); ++ ++ n = doms_new ? ndoms_new : 0; ++ ++ /* Destroy deleted domains */ ++ for (i = 0; i < ndoms_cur; i++) { ++ for (j = 0; j < n && !new_topology; j++) { ++ if (cpumask_equal(doms_cur[i], doms_new[j]) ++ && dattrs_equal(dattr_cur, i, dattr_new, j)) ++ goto match1; ++ } ++ /* no match - a current sched domain not in new doms_new[] */ ++ detach_destroy_domains(doms_cur[i]); ++match1: ++ ; ++ } ++ ++ if (doms_new == NULL) { ++ ndoms_cur = 0; ++ doms_new = &fallback_doms; ++ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); ++ WARN_ON_ONCE(dattr_new); ++ } ++ ++ /* Build new domains */ ++ for (i = 0; i < ndoms_new; i++) { ++ for (j = 0; j < ndoms_cur && !new_topology; j++) { ++ if (cpumask_equal(doms_new[i], doms_cur[j]) ++ && dattrs_equal(dattr_new, i, dattr_cur, j)) ++ goto match2; ++ } ++ /* no match - add a new doms_new */ ++ build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); ++match2: ++ ; ++ } ++ ++ /* Remember the new sched domains */ ++ if (doms_cur != &fallback_doms) ++ free_sched_domains(doms_cur, ndoms_cur); ++ kfree(dattr_cur); /* kfree(NULL) is safe */ ++ doms_cur = doms_new; ++ dattr_cur = dattr_new; ++ ndoms_cur = ndoms_new; ++ ++ register_sched_domain_sysctl(); ++ ++ mutex_unlock(&sched_domains_mutex); ++} ++ ++/* ++ * Update cpusets according to cpu_active mask. If cpusets are ++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper ++ * around partition_sched_domains(). ++ */ ++static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, ++ void *hcpu) ++{ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_ONLINE: ++ case CPU_DOWN_FAILED: ++ cpuset_update_active_cpus(true); ++ return NOTIFY_OK; ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ ++static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, ++ void *hcpu) ++{ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_DOWN_PREPARE: ++ cpuset_update_active_cpus(false); ++ return NOTIFY_OK; ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ ++#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) ++/* ++ * Cheaper version of the below functions in case support for SMT and MC is ++ * compiled in but CPUs have no siblings. ++ */ ++static bool sole_cpu_idle(int cpu) ++{ ++ return rq_idle(cpu_rq(cpu)); ++} ++#endif ++#ifdef CONFIG_SCHED_SMT ++/* All this CPU's SMT siblings are idle */ ++static bool siblings_cpu_idle(int cpu) ++{ ++ return cpumask_subset(&(cpu_rq(cpu)->smt_siblings), ++ &grq.cpu_idle_map); ++} ++#endif ++#ifdef CONFIG_SCHED_MC ++/* All this CPU's shared cache siblings are idle */ ++static bool cache_cpu_idle(int cpu) ++{ ++ return cpumask_subset(&(cpu_rq(cpu)->cache_siblings), ++ &grq.cpu_idle_map); ++} ++#endif ++ ++enum sched_domain_level { ++ SD_LV_NONE = 0, ++ SD_LV_SIBLING, ++ SD_LV_MC, ++ SD_LV_BOOK, ++ SD_LV_CPU, ++ SD_LV_NODE, ++ SD_LV_ALLNODES, ++ SD_LV_MAX ++}; ++ ++void __init sched_init_smp(void) ++{ ++ struct sched_domain *sd; ++ int cpu; ++ ++ cpumask_var_t non_isolated_cpus; ++ ++ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); ++ alloc_cpumask_var(&fallback_doms, GFP_KERNEL); ++ ++ sched_init_numa(); ++ ++ get_online_cpus(); ++ mutex_lock(&sched_domains_mutex); ++ init_sched_domains(cpu_active_mask); ++ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); ++ if (cpumask_empty(non_isolated_cpus)) ++ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); ++ mutex_unlock(&sched_domains_mutex); ++ put_online_cpus(); ++ ++ hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); ++ hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); ++ hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); ++ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) ++ BUG(); ++ free_cpumask_var(non_isolated_cpus); ++ ++ grq_lock_irq(); ++ /* ++ * Set up the relative cache distance of each online cpu from each ++ * other in a simple array for quick lookup. Locality is determined ++ * by the closest sched_domain that CPUs are separated by. CPUs with ++ * shared cache in SMT and MC are treated as local. Separate CPUs ++ * (within the same package or physically) within the same node are ++ * treated as not local. CPUs not even in the same domain (different ++ * nodes) are treated as very distant. ++ */ ++ for_each_online_cpu(cpu) { ++ struct rq *rq = cpu_rq(cpu); ++ ++ mutex_lock(&sched_domains_mutex); ++ for_each_domain(cpu, sd) { ++ int locality, other_cpu; ++ ++#ifdef CONFIG_SCHED_SMT ++ if (sd->level == SD_LV_SIBLING) { ++ for_each_cpu_mask(other_cpu, *sched_domain_span(sd)) ++ cpumask_set_cpu(other_cpu, &rq->smt_siblings); ++ } ++#endif ++#ifdef CONFIG_SCHED_MC ++ if (sd->level == SD_LV_MC) { ++ for_each_cpu_mask(other_cpu, *sched_domain_span(sd)) ++ cpumask_set_cpu(other_cpu, &rq->cache_siblings); ++ } ++#endif ++ if (sd->level <= SD_LV_SIBLING) ++ locality = 1; ++ else if (sd->level <= SD_LV_MC) ++ locality = 2; ++ else if (sd->level <= SD_LV_NODE) ++ locality = 3; ++ else ++ continue; ++ ++ for_each_cpu_mask(other_cpu, *sched_domain_span(sd)) { ++ if (locality < rq->cpu_locality[other_cpu]) ++ rq->cpu_locality[other_cpu] = locality; ++ } ++ } ++ mutex_unlock(&sched_domains_mutex); ++ ++ /* ++ * Each runqueue has its own function in case it doesn't have ++ * siblings of its own allowing mixed topologies. ++ */ ++#ifdef CONFIG_SCHED_SMT ++ if (cpus_weight(rq->smt_siblings) > 1) ++ rq->siblings_idle = siblings_cpu_idle; ++#endif ++#ifdef CONFIG_SCHED_MC ++ if (cpus_weight(rq->cache_siblings) > 1) ++ rq->cache_idle = cache_cpu_idle; ++#endif ++ } ++ grq_unlock_irq(); ++} ++#else ++void __init sched_init_smp(void) ++{ ++} ++#endif /* CONFIG_SMP */ ++ ++unsigned int sysctl_timer_migration = 1; ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++void __init sched_init(void) ++{ ++ int i; ++ struct rq *rq; ++ ++ prio_ratios[0] = 128; ++ for (i = 1 ; i < PRIO_RANGE ; i++) ++ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; ++ ++ raw_spin_lock_init(&grq.lock); ++ grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0; ++ grq.niffies = 0; ++ grq.last_jiffy = jiffies; ++ raw_spin_lock_init(&grq.iso_lock); ++ grq.iso_ticks = 0; ++ grq.iso_refractory = false; ++ grq.noc = 1; ++#ifdef CONFIG_SMP ++ init_defrootdomain(); ++ grq.qnr = grq.idle_cpus = 0; ++ cpumask_clear(&grq.cpu_idle_map); ++#else ++ uprq = &per_cpu(runqueues, 0); ++#endif ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = ++ rq->iowait_pc = rq->idle_pc = 0; ++ rq->dither = false; ++#ifdef CONFIG_SMP ++ rq->sticky_task = NULL; ++ rq->last_niffy = 0; ++ rq->sd = NULL; ++ rq->rd = NULL; ++ rq->online = false; ++ rq->cpu = i; ++ rq_attach_root(rq, &def_root_domain); ++#endif ++ atomic_set(&rq->nr_iowait, 0); ++ } ++ ++#ifdef CONFIG_SMP ++ nr_cpu_ids = i; ++ /* ++ * Set the base locality for cpu cache distance calculation to ++ * "distant" (3). Make sure the distance from a CPU to itself is 0. ++ */ ++ for_each_possible_cpu(i) { ++ int j; ++ ++ rq = cpu_rq(i); ++#ifdef CONFIG_SCHED_SMT ++ cpumask_clear(&rq->smt_siblings); ++ cpumask_set_cpu(i, &rq->smt_siblings); ++ rq->siblings_idle = sole_cpu_idle; ++ cpumask_set_cpu(i, &rq->smt_siblings); ++#endif ++#ifdef CONFIG_SCHED_MC ++ cpumask_clear(&rq->cache_siblings); ++ cpumask_set_cpu(i, &rq->cache_siblings); ++ rq->cache_idle = sole_cpu_idle; ++ cpumask_set_cpu(i, &rq->cache_siblings); ++#endif ++ rq->cpu_locality = kmalloc(nr_cpu_ids * sizeof(int *), GFP_ATOMIC); ++ for_each_possible_cpu(j) { ++ if (i == j) ++ rq->cpu_locality[j] = 0; ++ else ++ rq->cpu_locality[j] = 4; ++ } ++ } ++#endif ++ ++ for (i = 0; i < PRIO_LIMIT; i++) ++ INIT_LIST_HEAD(grq.queue + i); ++ /* delimiter for bitsearch */ ++ __set_bit(PRIO_LIMIT, grq.prio_bitmap); ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&init_task.preempt_notifiers); ++#endif ++ ++#ifdef CONFIG_RT_MUTEXES ++ plist_head_init(&init_task.pi_waiters); ++#endif ++ ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ atomic_inc(&init_mm.mm_count); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ ++#ifdef CONFIG_SMP ++ zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); ++ /* May be allocated at isolcpus cmdline parse time */ ++ if (cpu_isolated_map == NULL) ++ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); ++ idle_thread_set_boot_cpu(); ++#endif /* SMP */ ++} ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++static inline int preempt_count_equals(int preempt_offset) ++{ ++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); ++ ++ return (nested == preempt_offset); ++} ++ ++void __might_sleep(const char *file, int line, int preempt_offset) ++{ ++ static unsigned long prev_jiffy; /* ratelimiting */ ++ ++ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || ++ system_state != SYSTEM_RUNNING || oops_in_progress) ++ return; ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ printk(KERN_ERR ++ "BUG: sleeping function called from invalid context at %s:%d\n", ++ file, line); ++ printk(KERN_ERR ++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++ dump_stack(); ++} ++EXPORT_SYMBOL(__might_sleep); ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++void normalize_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ unsigned long flags; ++ struct rq *rq; ++ int queued; ++ ++ read_lock_irqsave(&tasklist_lock, flags); ++ ++ do_each_thread(g, p) { ++ if (!rt_task(p) && !iso_task(p)) ++ continue; ++ ++ raw_spin_lock(&p->pi_lock); ++ rq = __task_grq_lock(p); ++ ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ __setscheduler(p, rq, SCHED_NORMAL, 0); ++ if (queued) { ++ enqueue_task(p); ++ try_preempt(p, rq); ++ } ++ ++ __task_grq_unlock(); ++ raw_spin_unlock(&p->pi_lock); ++ } while_each_thread(g, p); ++ ++ read_unlock_irqrestore(&tasklist_lock, flags); ++} ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) ++/* ++ * These functions are only useful for the IA64 MCA handling, or kdb. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given cpu. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ ++ ++#ifdef CONFIG_IA64 ++/** ++ * set_curr_task - set the current task for a given cpu. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a cpu in a non-blocking manner. This function ++ * must be called with all CPU's synchronised, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++/* ++ * Use precise platform statistics if available: ++ */ ++#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ++void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ *ut = p->utime; ++ *st = p->stime; ++} ++ ++void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ struct task_cputime cputime; ++ ++ thread_group_cputime(p, &cputime); ++ ++ *ut = cputime.utime; ++ *st = cputime.stime; ++} ++ ++void vtime_account_system_irqsafe(struct task_struct *tsk) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ vtime_account_system(tsk); ++ local_irq_restore(flags); ++} ++EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe); ++ ++#ifndef __ARCH_HAS_VTIME_TASK_SWITCH ++void vtime_task_switch(struct task_struct *prev) ++{ ++ if (is_idle_task(prev)) ++ vtime_account_idle(prev); ++ else ++ vtime_account_system(prev); ++ ++ vtime_account_user(prev); ++ arch_vtime_task_switch(prev); ++} ++#endif ++ ++#else ++/* ++ * Perform (stime * rtime) / total, but avoid multiplication overflow by ++ * losing precision when the numbers are big. ++ */ ++static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) ++{ ++ u64 scaled; ++ ++ for (;;) { ++ /* Make sure "rtime" is the bigger of stime/rtime */ ++ if (stime > rtime) { ++ u64 tmp = rtime; rtime = stime; stime = tmp; ++ } ++ ++ /* Make sure 'total' fits in 32 bits */ ++ if (total >> 32) ++ goto drop_precision; ++ ++ /* Does rtime (and thus stime) fit in 32 bits? */ ++ if (!(rtime >> 32)) ++ break; ++ ++ /* Can we just balance rtime/stime rather than dropping bits? */ ++ if (stime >> 31) ++ goto drop_precision; ++ ++ /* We can grow stime and shrink rtime and try to make them both fit */ ++ stime <<= 1; ++ rtime >>= 1; ++ continue; ++ ++drop_precision: ++ /* We drop from rtime, it has more bits than stime */ ++ rtime >>= 1; ++ total >>= 1; ++ } ++ ++ /* ++ * Make sure gcc understands that this is a 32x32->64 multiply, ++ * followed by a 64/32->64 divide. ++ */ ++ scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); ++ return (__force cputime_t) scaled; ++} ++ ++/* ++ * Adjust tick based cputime random precision against scheduler ++ * runtime accounting. ++ */ ++static void cputime_adjust(struct task_cputime *curr, ++ struct cputime *prev, ++ cputime_t *ut, cputime_t *st) ++{ ++ cputime_t rtime, stime, utime, total; ++ ++ stime = curr->stime; ++ total = stime + curr->utime; ++ ++ /* ++ * Tick based cputime accounting depend on random scheduling ++ * timeslices of a task to be interrupted or not by the timer. ++ * Depending on these circumstances, the number of these interrupts ++ * may be over or under-optimistic, matching the real user and system ++ * cputime with a variable precision. ++ * ++ * Fix this by scaling these tick based values against the total ++ * runtime accounted by the CFS scheduler. ++ */ ++ rtime = nsecs_to_cputime(curr->sum_exec_runtime); ++ ++ /* ++ * Update userspace visible utime/stime values only if actual execution ++ * time is bigger than already exported. Note that can happen, that we ++ * provided bigger values due to scaling inaccuracy on big numbers. ++ */ ++ if (prev->stime + prev->utime >= rtime) ++ goto out; ++ ++ if (total) { ++ stime = scale_stime((__force u64)stime, ++ (__force u64)rtime, (__force u64)total); ++ utime = rtime - stime; ++ } else { ++ stime = rtime; ++ utime = 0; ++ } ++ ++ /* ++ * If the tick based count grows faster than the scheduler one, ++ * the result of the scaling may go backward. ++ * Let's enforce monotonicity. ++ */ ++ prev->stime = max(prev->stime, stime); ++ prev->utime = max(prev->utime, utime); ++ ++out: ++ *ut = prev->utime; ++ *st = prev->stime; ++} ++ ++void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ struct task_cputime cputime = { ++ .sum_exec_runtime = tsk_seruntime(p), ++ }; ++ ++ task_cputime(p, &cputime.utime, &cputime.stime); ++ cputime_adjust(&cputime, &p->prev_cputime, ut, st); ++} ++ ++/* ++ * Must be called with siglock held. ++ */ ++void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ struct task_cputime cputime; ++ ++ thread_group_cputime(p, &cputime); ++ cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); ++} ++#endif ++ ++void __cpuinit init_idle_bootup_task(struct task_struct *idle) ++{} ++ ++#ifdef CONFIG_SCHED_DEBUG ++void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ++{} ++ ++void proc_sched_set_task(struct task_struct *p) ++{} ++#endif ++ ++#ifdef CONFIG_SMP ++#define SCHED_LOAD_SHIFT (10) ++#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) ++ ++unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) ++{ ++ return SCHED_LOAD_SCALE; ++} ++ ++unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) ++{ ++ unsigned long weight = cpumask_weight(sched_domain_span(sd)); ++ unsigned long smt_gain = sd->smt_gain; ++ ++ smt_gain /= weight; ++ ++ return smt_gain; ++} ++#endif +Index: linux-3.10-ck1/include/uapi/linux/sched.h +=================================================================== +--- linux-3.10-ck1.orig/include/uapi/linux/sched.h 2013-07-09 17:28:57.142502083 +1000 ++++ linux-3.10-ck1/include/uapi/linux/sched.h 2013-07-09 17:29:00.843501924 +1000 +@@ -37,8 +37,15 @@ + #define SCHED_FIFO 1 + #define SCHED_RR 2 + #define SCHED_BATCH 3 +-/* SCHED_ISO: reserved but not implemented yet */ ++/* SCHED_ISO: Implemented on BFS only */ + #define SCHED_IDLE 5 ++#ifdef CONFIG_SCHED_BFS ++#define SCHED_ISO 4 ++#define SCHED_IDLEPRIO SCHED_IDLE ++#define SCHED_MAX (SCHED_IDLEPRIO) ++#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) ++#endif ++ + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ + #define SCHED_RESET_ON_FORK 0x40000000 + +Index: linux-3.10-ck1/include/linux/sched/rt.h +=================================================================== +--- linux-3.10-ck1.orig/include/linux/sched/rt.h 2013-07-09 17:28:57.158502083 +1000 ++++ linux-3.10-ck1/include/linux/sched/rt.h 2013-07-09 17:29:00.844501924 +1000 +@@ -14,11 +14,24 @@ + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. + */ + ++#ifdef CONFIG_SCHED_BFS ++#define MAX_USER_RT_PRIO 100 ++#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1) ++#define DEFAULT_PRIO (MAX_RT_PRIO + 20) ++ ++#define PRIO_RANGE (40) ++#define MAX_PRIO (MAX_RT_PRIO + PRIO_RANGE) ++#define ISO_PRIO (MAX_RT_PRIO) ++#define NORMAL_PRIO (MAX_RT_PRIO + 1) ++#define IDLE_PRIO (MAX_RT_PRIO + 2) ++#define PRIO_LIMIT ((IDLE_PRIO) + 1) ++#else /* CONFIG_SCHED_BFS */ + #define MAX_USER_RT_PRIO 100 + #define MAX_RT_PRIO MAX_USER_RT_PRIO + + #define MAX_PRIO (MAX_RT_PRIO + 40) + #define DEFAULT_PRIO (MAX_RT_PRIO + 20) ++#endif /* CONFIG_SCHED_BFS */ + + static inline int rt_prio(int prio) + { +Index: linux-3.10-ck1/kernel/stop_machine.c +=================================================================== +--- linux-3.10-ck1.orig/kernel/stop_machine.c 2013-07-09 17:28:57.177502082 +1000 ++++ linux-3.10-ck1/kernel/stop_machine.c 2013-07-09 17:29:00.844501924 +1000 +@@ -40,7 +40,8 @@ + }; + + static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); +-static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); ++DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task); ++ + static bool stop_machine_initialized = false; + + static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) +Index: linux-3.10-ck1/drivers/cpufreq/cpufreq_conservative.c +=================================================================== +--- linux-3.10-ck1.orig/drivers/cpufreq/cpufreq_conservative.c 2013-07-09 17:28:57.219502080 +1000 ++++ linux-3.10-ck1/drivers/cpufreq/cpufreq_conservative.c 2013-07-09 17:29:00.844501924 +1000 +@@ -27,8 +27,8 @@ + #include "cpufreq_governor.h" + + /* Conservative governor macros */ +-#define DEF_FREQUENCY_UP_THRESHOLD (80) +-#define DEF_FREQUENCY_DOWN_THRESHOLD (20) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) ++#define DEF_FREQUENCY_DOWN_THRESHOLD (26) + #define DEF_FREQUENCY_STEP (5) + #define DEF_SAMPLING_DOWN_FACTOR (1) + #define MAX_SAMPLING_DOWN_FACTOR (10) +Index: linux-3.10-ck1/kernel/sched/Makefile +=================================================================== +--- linux-3.10-ck1.orig/kernel/sched/Makefile 2013-07-09 17:28:57.194502081 +1000 ++++ linux-3.10-ck1/kernel/sched/Makefile 2013-07-09 17:29:00.844501924 +1000 +@@ -11,9 +11,13 @@ + CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer + endif + ++ifdef CONFIG_SCHED_BFS ++obj-y += bfs.o clock.o ++else + obj-y += core.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o +-obj-$(CONFIG_SMP) += cpupri.o + obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o +-obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o + obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o ++endif ++obj-$(CONFIG_SMP) += cpupri.o ++obj-$(CONFIG_SCHEDSTATS) += stats.o +Index: linux-3.10-ck1/kernel/time/Kconfig +=================================================================== +--- linux-3.10-ck1.orig/kernel/time/Kconfig 2013-07-09 17:28:57.190502081 +1000 ++++ linux-3.10-ck1/kernel/time/Kconfig 2013-07-09 17:29:00.844501924 +1000 +@@ -94,7 +94,7 @@ + config NO_HZ_FULL + bool "Full dynticks system (tickless)" + # NO_HZ_COMMON dependency +- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS ++ depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS && !SCHED_BFS + # We need at least one periodic CPU for timekeeping + depends on SMP + # RCU_USER_QS dependency +Index: linux-3.10-ck1/kernel/Kconfig.preempt +=================================================================== +--- linux-3.10-ck1.orig/kernel/Kconfig.preempt 2013-07-09 17:28:57.103502085 +1000 ++++ linux-3.10-ck1/kernel/Kconfig.preempt 2013-07-09 17:29:01.081501914 +1000 +@@ -1,7 +1,7 @@ + + choice + prompt "Preemption Model" +- default PREEMPT_NONE ++ default PREEMPT + + config PREEMPT_NONE + bool "No Forced Preemption (Server)" +@@ -17,7 +17,7 @@ + latencies. + + config PREEMPT_VOLUNTARY +- bool "Voluntary Kernel Preemption (Desktop)" ++ bool "Voluntary Kernel Preemption (Nothing)" + help + This option reduces the latency of the kernel by adding more + "explicit preemption points" to the kernel code. These new +@@ -31,7 +31,8 @@ + applications to run more 'smoothly' even when the system is + under load. + +- Select this if you are building a kernel for a desktop system. ++ Select this for no system in particular (choose Preemptible ++ instead on a desktop if you know what's good for you). + + config PREEMPT + bool "Preemptible Kernel (Low-Latency Desktop)" +Index: linux-3.10-ck1/kernel/Kconfig.hz +=================================================================== +--- linux-3.10-ck1.orig/kernel/Kconfig.hz 2013-07-09 17:28:57.088502086 +1000 ++++ linux-3.10-ck1/kernel/Kconfig.hz 2013-07-09 17:29:01.287501905 +1000 +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_1000 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -23,13 +23,14 @@ + with lots of processors that may show reduced performance if + too many timer interrupts are occurring. + +- config HZ_250 ++ config HZ_250_NODEFAULT + bool "250 HZ" + help +- 250 Hz is a good compromise choice allowing server performance +- while also showing good interactive responsiveness even +- on SMP and NUMA systems. If you are going to be using NTSC video +- or multimedia, selected 300Hz instead. ++ 250 HZ is a lousy compromise choice allowing server interactivity ++ while also showing desktop throughput and no extra power saving on ++ laptops. No good for anything. ++ ++ Recommend 100 or 1000 instead. + + config HZ_300 + bool "300 HZ" +@@ -43,14 +44,16 @@ + bool "1000 HZ" + help + 1000 Hz is the preferred choice for desktop systems and other +- systems requiring fast interactive responses to events. ++ systems requiring fast interactive responses to events. Laptops ++ can also benefit from this choice without sacrificing battery life ++ if dynticks is also enabled. + + endchoice + + config HZ + int + default 100 if HZ_100 +- default 250 if HZ_250 ++ default 250 if HZ_250_NODEFAULT + default 300 if HZ_300 + default 1000 if HZ_1000 + +Index: linux-3.10-ck1/arch/x86/Kconfig +=================================================================== +--- linux-3.10-ck1.orig/arch/x86/Kconfig 2013-07-09 17:28:57.044502087 +1000 ++++ linux-3.10-ck1/arch/x86/Kconfig 2013-07-09 17:29:01.392501900 +1000 +@@ -1149,7 +1149,7 @@ + endchoice + + choice +- prompt "Memory split" if EXPERT ++ prompt "Memory split" + default VMSPLIT_3G + depends on X86_32 + ---help--- +@@ -1169,17 +1169,17 @@ + option alone! + + config VMSPLIT_3G +- bool "3G/1G user/kernel split" ++ bool "Default 896MB lowmem (3G/1G user/kernel split)" + config VMSPLIT_3G_OPT + depends on !X86_PAE +- bool "3G/1G user/kernel split (for full 1G low memory)" ++ bool "1GB lowmem (3G/1G user/kernel split)" + config VMSPLIT_2G +- bool "2G/2G user/kernel split" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_2G_OPT + depends on !X86_PAE +- bool "2G/2G user/kernel split (for full 2G low memory)" ++ bool "2GB lowmem (2G/2G user/kernel split)" + config VMSPLIT_1G +- bool "1G/3G user/kernel split" ++ bool "3GB lowmem (1G/3G user/kernel split)" + endchoice + + config PAGE_OFFSET +Index: linux-3.10-ck1/Makefile +=================================================================== +--- linux-3.10-ck1.orig/Makefile 2013-07-09 17:28:57.029502088 +1000 ++++ linux-3.10-ck1/Makefile 2013-07-09 17:29:01.490501896 +1000 +@@ -10,6 +10,10 @@ + # Comments in this file are targeted only to the developer, do not + # expect to learn how to build the kernel reading this file. + ++CKVERSION = -ck1 ++CKNAME = BFS Powered ++EXTRAVERSION := $(EXTRAVERSION)$(CKVERSION) ++ + # Do not: + # o use make's built-in rules and variables + # (this increases performance and avoids hard-to-debug behaviour); diff --git a/sys-kernel/kogaion-sources/files/desktop/change-default-console-loglevel.patch b/sys-kernel/kogaion-sources/files/desktop/change-default-console-loglevel.patch new file mode 100644 index 00000000..5f16dee5 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/change-default-console-loglevel.patch @@ -0,0 +1,13 @@ +// change-default-console-loglevel.patch +diff -upr linux-3.0.orig/kernel/printk.c linux-3.0/kernel/printk.c +--- linux-3.0.orig/kernel/printk.c 2011-07-22 05:17:23.000000000 +0300 ++++ linux-3.0/kernel/printk.c 2011-07-27 14:43:07.000000000 +0300 +@@ -58,7 +58,7 @@ void asmlinkage __attribute__((weak)) ea + + /* We show everything that is MORE important than this.. */ + #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ +-#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ ++#define DEFAULT_CONSOLE_LOGLEVEL 4 /* anything MORE serious than KERN_WARNING */ + + DECLARE_WAIT_QUEUE_HEAD(log_wait); + diff --git a/sys-kernel/kogaion-sources/files/desktop/criu-no-expert.patch b/sys-kernel/kogaion-sources/files/desktop/criu-no-expert.patch new file mode 100644 index 00000000..b22aa9f5 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/criu-no-expert.patch @@ -0,0 +1,23 @@ +// criu-no-expert.patch +diff --git a/init/Kconfig b/init/Kconfig +index be8b7f5..7461760 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -989,7 +989,7 @@ config DEBUG_BLK_CGROUP + endif # CGROUPS + + config CHECKPOINT_RESTORE +- bool "Checkpoint/restore support" if EXPERT ++ bool "Checkpoint/restore support" + default n + help + Enables additional kernel features in a sake of checkpoint/restore. +@@ -1000,7 +1000,7 @@ config CHECKPOINT_RESTORE + If unsure, say N here. + + menuconfig NAMESPACES +- bool "Namespaces support" if EXPERT ++ bool "Namespaces support" + default !EXPERT + help + Provides the way to make tasks work with different objects using diff --git a/sys-kernel/kogaion-sources/files/desktop/enable_haswell_pstate_driver.patch b/sys-kernel/kogaion-sources/files/desktop/enable_haswell_pstate_driver.patch new file mode 100644 index 00000000..031f8d2f --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/enable_haswell_pstate_driver.patch @@ -0,0 +1,33 @@ +// enable_haswell_pstate_driver.patch +--- linux-3.10/drivers/cpufreq/intel_pstate.c 2013-06-30 18:13:29.000000000 -0400 ++++ linux-3.10.mod/drivers/cpufreq/intel_pstate.c 2013-07-05 03:10:36.164568840 -0400 +@@ -522,6 +522,11 @@ + ICPU(0x2a, default_policy), + ICPU(0x2d, default_policy), + ICPU(0x3a, default_policy), ++ ICPU(0x3a, default_policy), ++ ICPU(0x3c, default_policy), ++ ICPU(0x3f, default_policy), ++ ICPU(0x45, default_policy), ++ ICPU(0x46, default_policy), + {} + }; + MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); +--- linux-3.10/drivers/cpufreq/Kconfig.x86 2013-06-30 18:13:29.000000000 -0400 ++++ linux-3.10.mod/drivers/cpufreq/Kconfig.x86 2013-07-05 03:13:22.823827792 -0400 +@@ -6,12 +6,12 @@ + bool "Intel P state control" + depends on X86 + help +- This driver provides a P state for Intel core processors. ++ This driver provides a P state for Intel Core processors. + The driver implements an internal governor and will become +- the scaling driver and governor for Sandy bridge processors. ++ the scaling driver and governor for Sandy/Ivy Bridge and Haswell processors. + + When this driver is enabled it will become the perferred +- scaling driver for Sandy bridge processors. ++ scaling driver for Sandy/Ivy Bridge and Haswell processors. + + If in doubt, say N. + diff --git a/sys-kernel/kogaion-sources/files/desktop/set_kogaion_extraversion_in_makefile.patch b/sys-kernel/kogaion-sources/files/desktop/set_kogaion_extraversion_in_makefile.patch new file mode 100644 index 00000000..a20090f8 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/set_kogaion_extraversion_in_makefile.patch @@ -0,0 +1,12 @@ +diff -Nur a/Makefile b/Makefile +--- a/Makefile 2014-01-30 20:52:03.849613917 +0200 ++++ b/Makefile 2014-01-30 20:56:12.859500865 +0200 +@@ -10,7 +10,7 @@ + # Comments in this file are targeted only to the developer, do not + # expect to learn how to build the kernel reading this file. + +-CKVERSION = -ck1 ++CKVERSION = -kogaion + CKNAME = BFS Powered + EXTRAVERSION := $(EXTRAVERSION)$(CKVERSION) + diff --git a/sys-kernel/kogaion-sources/files/desktop/uksm-0.1.2.2-for-v3.10.patch b/sys-kernel/kogaion-sources/files/desktop/uksm-0.1.2.2-for-v3.10.patch new file mode 100644 index 00000000..f62addd2 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/desktop/uksm-0.1.2.2-for-v3.10.patch @@ -0,0 +1,7064 @@ +diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX +index 5481c8b..7141876 100644 +--- a/Documentation/vm/00-INDEX ++++ b/Documentation/vm/00-INDEX +@@ -14,6 +14,8 @@ hwpoison.txt + - explains what hwpoison is + ksm.txt + - how to use the Kernel Samepage Merging feature. ++uksm.txt ++ - Introduction to Ultra KSM + locking + - info on how locking and synchronization is done in the Linux vm code. + map_hugetlb.c +diff --git a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt +new file mode 100644 +index 0000000..9b2cb51 +--- /dev/null ++++ b/Documentation/vm/uksm.txt +@@ -0,0 +1,57 @@ ++The Ultra Kernel Samepage Merging feature ++---------------------------------------------- ++/* ++ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia ++ * ++ * This is an improvement upon KSM. Some basic data structures and routines ++ * are borrowed from ksm.c . ++ * ++ * Its new features: ++ * 1. Full system scan: ++ * It automatically scans all user processes' anonymous VMAs. Kernel-user ++ * interaction to submit a memory area to KSM is no longer needed. ++ * ++ * 2. Rich area detection: ++ * It automatically detects rich areas containing abundant duplicated ++ * pages based. Rich areas are given a full scan speed. Poor areas are ++ * sampled at a reasonable speed with very low CPU consumption. ++ * ++ * 3. Ultra Per-page scan speed improvement: ++ * A new hash algorithm is proposed. As a result, on a machine with ++ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it ++ * can scan memory areas that does not contain duplicated pages at speed of ++ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of ++ * 477MB/sec ~ 923MB/sec. ++ * ++ * 4. Thrashing area avoidance: ++ * Thrashing area(an VMA that has frequent Ksm page break-out) can be ++ * filtered out. My benchmark shows it's more efficient than KSM's per-page ++ * hash value based volatile page detection. ++ * ++ * ++ * 5. Misc changes upon KSM: ++ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page ++ * comparison. It's much faster than default C version on x86. ++ * * rmap_item now has an struct *page member to loosely cache a ++ * address-->page mapping, which reduces too much time-costly ++ * follow_page(). ++ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know. ++ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_ ++ * ksm is needed for this case. ++ * ++ * 6. Full Zero Page consideration(contributed by Figo Zhang) ++ * Now uksmd consider full zero pages as special pages and merge them to an ++ * special unswappable uksm zero page. ++ */ ++ ++ChangeLog: ++ ++2012-05-05 The creation of this Doc ++2012-05-08 UKSM 0.1.1.1 libc crash bug fix, api clean up, doc clean up. ++2012-05-28 UKSM 0.1.1.2 bug fix release ++2012-06-26 UKSM 0.1.2-beta1 first beta release for 0.1.2 ++2012-07-2 UKSM 0.1.2-beta2 ++2012-07-10 UKSM 0.1.2-beta3 ++2012-07-26 UKSM 0.1.2 Fine grained speed control, more scan optimization. ++2012-10-13 UKSM 0.1.2.1 Bug fixes. ++2012-12-31 UKSM 0.1.2.2 Minor bug fixes +diff --git a/fs/exec.c b/fs/exec.c +index ffd7a81..1c4d7d3 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -19,7 +19,7 @@ + * current->executable is only used by the procfs. This allows a dispatch + * table to check for several different types of binary formats. We keep + * trying until we recognize the file or we run out of supported binary +- * formats. ++ * formats. + */ + + #include +@@ -55,6 +55,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1139,7 +1140,7 @@ void setup_new_exec(struct linux_binprm * bprm) + group */ + + current->self_exec_id++; +- ++ + flush_signal_handlers(current, 0); + do_close_on_exec(current->files); + } +@@ -1265,8 +1266,8 @@ static int check_unsafe_exec(struct linux_binprm *bprm) + return res; + } + +-/* +- * Fill the binprm structure from the inode. ++/* ++ * Fill the binprm structure from the inode. + * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes + * + * This may be called multiple times for binary chains (scripts for example). +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c +index 5aa847a..c6c5553 100644 +--- a/fs/proc/meminfo.c ++++ b/fs/proc/meminfo.c +@@ -88,6 +88,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + "SUnreclaim: %8lu kB\n" + "KernelStack: %8lu kB\n" + "PageTables: %8lu kB\n" ++#ifdef CONFIG_UKSM ++ "KsmZeroPages: %8lu kB\n" ++#endif + #ifdef CONFIG_QUICKLIST + "Quicklists: %8lu kB\n" + #endif +@@ -147,6 +150,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + K(global_page_state(NR_SLAB_UNRECLAIMABLE)), + global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, + K(global_page_state(NR_PAGETABLE)), ++#ifdef CONFIG_UKSM ++ K(global_page_state(NR_UKSM_ZERO_PAGES)), ++#endif + #ifdef CONFIG_QUICKLIST + K(quicklist_total_size()), + #endif +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index a59ff51..df359cc 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -453,12 +453,25 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); + #endif + ++#ifdef CONFIG_UKSM ++static inline int is_uksm_zero_pfn(unsigned long pfn) ++{ ++ extern unsigned long uksm_zero_pfn; ++ return pfn == uksm_zero_pfn; ++} ++#else ++static inline int is_uksm_zero_pfn(unsigned long pfn) ++{ ++ return 0; ++} ++#endif ++ + #ifdef __HAVE_COLOR_ZERO_PAGE + static inline int is_zero_pfn(unsigned long pfn) + { + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; +- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); ++ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn); + } + + #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) +@@ -467,7 +480,7 @@ static inline int is_zero_pfn(unsigned long pfn) + static inline int is_zero_pfn(unsigned long pfn) + { + extern unsigned long zero_pfn; +- return pfn == zero_pfn; ++ return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn)); + } + + static inline unsigned long my_zero_pfn(unsigned long addr) +diff --git a/include/linux/ksm.h b/include/linux/ksm.h +index 45c9b6a..c7de7a7 100644 +--- a/include/linux/ksm.h ++++ b/include/linux/ksm.h +@@ -19,21 +19,6 @@ struct mem_cgroup; + #ifdef CONFIG_KSM + int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags); +-int __ksm_enter(struct mm_struct *mm); +-void __ksm_exit(struct mm_struct *mm); +- +-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +-{ +- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) +- return __ksm_enter(mm); +- return 0; +-} +- +-static inline void ksm_exit(struct mm_struct *mm) +-{ +- if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) +- __ksm_exit(mm); +-} + + /* + * A KSM page is one of those write-protected "shared pages" or "merged pages" +@@ -80,6 +65,33 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, + struct vm_area_struct *, unsigned long, void *), void *arg); + void ksm_migrate_page(struct page *newpage, struct page *oldpage); + ++#ifdef CONFIG_KSM_LEGACY ++int __ksm_enter(struct mm_struct *mm); ++void __ksm_exit(struct mm_struct *mm); ++static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ++{ ++ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) ++ return __ksm_enter(mm); ++ return 0; ++} ++ ++static inline void ksm_exit(struct mm_struct *mm) ++{ ++ if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) ++ __ksm_exit(mm); ++} ++ ++#elif defined(CONFIG_UKSM) ++static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ++{ ++ return 0; ++} ++ ++static inline void ksm_exit(struct mm_struct *mm) ++{ ++} ++#endif /* !CONFIG_UKSM */ ++ + #else /* !CONFIG_KSM */ + + static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +@@ -132,4 +144,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) + #endif /* CONFIG_MMU */ + #endif /* !CONFIG_KSM */ + ++#include ++ + #endif /* __LINUX_KSM_H */ +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index ace9a5f..6a76d6e 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -289,6 +289,9 @@ struct vm_area_struct { + #ifdef CONFIG_NUMA + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ + #endif ++#ifdef CONFIG_UKSM ++ struct vma_slot *uksm_vma_slot; ++#endif + }; + + struct core_thread { +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index 5c76737..a631b29 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -143,6 +143,9 @@ enum zone_stat_item { + #endif + NR_ANON_TRANSPARENT_HUGEPAGES, + NR_FREE_CMA_PAGES, ++#ifdef CONFIG_UKSM ++ NR_UKSM_ZERO_PAGES, ++#endif + NR_VM_ZONE_STAT_ITEMS }; + + /* +@@ -849,7 +852,7 @@ static inline int is_normal_idx(enum zone_type idx) + } + + /** +- * is_highmem - helper function to quickly check if a struct zone is a ++ * is_highmem - helper function to quickly check if a struct zone is a + * highmem zone or not. This is an attempt to keep references + * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. + * @zone - pointer to struct zone variable +diff --git a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h +new file mode 100644 +index 0000000..6780fdb +--- /dev/null ++++ b/include/linux/sradix-tree.h +@@ -0,0 +1,77 @@ ++#ifndef _LINUX_SRADIX_TREE_H ++#define _LINUX_SRADIX_TREE_H ++ ++ ++#define INIT_SRADIX_TREE(root, mask) \ ++do { \ ++ (root)->height = 0; \ ++ (root)->gfp_mask = (mask); \ ++ (root)->rnode = NULL; \ ++} while (0) ++ ++#define ULONG_BITS (sizeof(unsigned long) * 8) ++#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) ++//#define SRADIX_TREE_MAP_SHIFT 6 ++//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT) ++//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1) ++ ++struct sradix_tree_node { ++ unsigned int height; /* Height from the bottom */ ++ unsigned int count; ++ unsigned int fulls; /* Number of full sublevel trees */ ++ struct sradix_tree_node *parent; ++ void *stores[0]; ++}; ++ ++/* A simple radix tree implementation */ ++struct sradix_tree_root { ++ unsigned int height; ++ struct sradix_tree_node *rnode; ++ ++ /* Where found to have available empty stores in its sublevels */ ++ struct sradix_tree_node *enter_node; ++ unsigned int shift; ++ unsigned int stores_size; ++ unsigned int mask; ++ unsigned long min; /* The first hole index */ ++ unsigned long num; ++ //unsigned long *height_to_maxindex; ++ ++ /* How the node is allocated and freed. */ ++ struct sradix_tree_node *(*alloc)(void); ++ void (*free)(struct sradix_tree_node *node); ++ ++ /* When a new node is added and removed */ ++ void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child); ++ void (*assign)(struct sradix_tree_node *node, unsigned index, void *item); ++ void (*rm)(struct sradix_tree_node *node, unsigned offset); ++}; ++ ++struct sradix_tree_path { ++ struct sradix_tree_node *node; ++ int offset; ++}; ++ ++static inline ++void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift) ++{ ++ root->height = 0; ++ root->rnode = NULL; ++ root->shift = shift; ++ root->stores_size = 1UL << shift; ++ root->mask = root->stores_size - 1; ++} ++ ++ ++extern void *sradix_tree_next(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index, ++ int (*iter)(void *, unsigned long)); ++ ++extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num); ++ ++extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index); ++ ++extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index); ++ ++#endif /* _LINUX_SRADIX_TREE_H */ +diff --git a/include/linux/uksm.h b/include/linux/uksm.h +new file mode 100644 +index 0000000..a644bca +--- /dev/null ++++ b/include/linux/uksm.h +@@ -0,0 +1,146 @@ ++#ifndef __LINUX_UKSM_H ++#define __LINUX_UKSM_H ++/* ++ * Memory merging support. ++ * ++ * This code enables dynamic sharing of identical pages found in different ++ * memory areas, even if they are not shared by fork(). ++ */ ++ ++/* if !CONFIG_UKSM this file should not be compiled at all. */ ++#ifdef CONFIG_UKSM ++ ++#include ++#include ++#include ++#include ++#include ++ ++extern unsigned long zero_pfn __read_mostly; ++extern unsigned long uksm_zero_pfn __read_mostly; ++extern struct page *empty_uksm_zero_page; ++ ++/* must be done before linked to mm */ ++extern void uksm_vma_add_new(struct vm_area_struct *vma); ++extern void uksm_remove_vma(struct vm_area_struct *vma); ++ ++#define UKSM_SLOT_NEED_SORT (1 << 0) ++#define UKSM_SLOT_NEED_RERAND (1 << 1) ++#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */ ++#define UKSM_SLOT_FUL_SCANNED (1 << 3) ++#define UKSM_SLOT_IN_UKSM (1 << 4) ++ ++struct vma_slot { ++ struct sradix_tree_node *snode; ++ unsigned long sindex; ++ ++ struct list_head slot_list; ++ unsigned long fully_scanned_round; ++ unsigned long dedup_num; ++ unsigned long pages_scanned; ++ unsigned long last_scanned; ++ unsigned long pages_to_scan; ++ struct scan_rung *rung; ++ struct page **rmap_list_pool; ++ unsigned int *pool_counts; ++ unsigned long pool_size; ++ struct vm_area_struct *vma; ++ struct mm_struct *mm; ++ unsigned long ctime_j; ++ unsigned long pages; ++ unsigned long flags; ++ unsigned long pages_cowed; /* pages cowed this round */ ++ unsigned long pages_merged; /* pages merged this round */ ++ unsigned long pages_bemerged; ++ ++ /* when it has page merged in this eval round */ ++ struct list_head dedup_list; ++}; ++ ++static inline void uksm_unmap_zero_page(pte_t pte) ++{ ++ if (pte_pfn(pte) == uksm_zero_pfn) ++ __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES); ++} ++ ++static inline void uksm_map_zero_page(pte_t pte) ++{ ++ if (pte_pfn(pte) == uksm_zero_pfn) ++ __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES); ++} ++ ++static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page) ++{ ++ if (vma->uksm_vma_slot && PageKsm(page)) ++ vma->uksm_vma_slot->pages_cowed++; ++} ++ ++static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte) ++{ ++ if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn) ++ vma->uksm_vma_slot->pages_cowed++; ++} ++ ++static inline int uksm_flags_can_scan(unsigned long vm_flags) ++{ ++#ifndef VM_SAO ++#define VM_SAO 0 ++#endif ++ return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND | ++ VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP | ++ VM_SHARED | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN | VM_SAO)); ++} ++ ++static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p) ++{ ++ if (uksm_flags_can_scan(*vm_flags_p)) ++ *vm_flags_p |= VM_MERGEABLE; ++} ++ ++/* ++ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will ++ * be removed when uksm zero page patch is stable enough. ++ */ ++static inline void uksm_bugon_zeropage(pte_t pte) ++{ ++ BUG_ON(pte_pfn(pte) == uksm_zero_pfn); ++} ++#else ++static inline void uksm_vma_add_new(struct vm_area_struct *vma) ++{ ++} ++ ++static inline void uksm_remove_vma(struct vm_area_struct *vma) ++{ ++} ++ ++static inline void uksm_unmap_zero_page(pte_t pte) ++{ ++} ++ ++static inline void uksm_map_zero_page(pte_t pte) ++{ ++} ++ ++static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page) ++{ ++} ++ ++static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte) ++{ ++} ++ ++static inline int uksm_flags_can_scan(unsigned long vm_flags) ++{ ++ return 0; ++} ++ ++static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p) ++{ ++} ++ ++static inline void uksm_bugon_zeropage(pte_t pte) ++{ ++} ++#endif /* !CONFIG_UKSM */ ++#endif /* __LINUX_UKSM_H */ +diff --git a/kernel/fork.c b/kernel/fork.c +index 987b28a..3e89974 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -397,7 +397,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + goto fail_nomem; + charge = len; + } +- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ tmp = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!tmp) + goto fail_nomem; + *tmp = *mpnt; +@@ -454,7 +454,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + __vma_link_rb(mm, tmp, rb_link, rb_parent); + rb_link = &tmp->vm_rb.rb_right; + rb_parent = &tmp->vm_rb; +- ++ uksm_vma_add_new(tmp); + mm->map_count++; + retval = copy_page_range(mm, oldmm, mpnt); + +diff --git a/lib/Makefile b/lib/Makefile +index c55a037..fcf7e6d 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -8,7 +8,7 @@ KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) + endif + + lib-y := ctype.o string.o vsprintf.o cmdline.o \ +- rbtree.o radix-tree.o dump_stack.o timerqueue.o\ ++ rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\ + idr.o int_sqrt.o extable.o \ + sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ + proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ +diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c +new file mode 100644 +index 0000000..8d06329 +--- /dev/null ++++ b/lib/sradix-tree.c +@@ -0,0 +1,476 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static inline int sradix_node_full(struct sradix_tree_root *root, struct sradix_tree_node *node) ++{ ++ return node->fulls == root->stores_size || ++ (node->height == 1 && node->count == root->stores_size); ++} ++ ++/* ++ * Extend a sradix tree so it can store key @index. ++ */ ++static int sradix_tree_extend(struct sradix_tree_root *root, unsigned long index) ++{ ++ struct sradix_tree_node *node; ++ unsigned int height; ++ ++ if (unlikely(root->rnode == NULL)) { ++ if (!(node = root->alloc())) ++ return -ENOMEM; ++ ++ node->height = 1; ++ root->rnode = node; ++ root->height = 1; ++ } ++ ++ /* Figure out what the height should be. */ ++ height = root->height; ++ index >>= root->shift * height; ++ ++ while (index) { ++ index >>= root->shift; ++ height++; ++ } ++ ++ while (height > root->height) { ++ unsigned int newheight; ++ if (!(node = root->alloc())) ++ return -ENOMEM; ++ ++ /* Increase the height. */ ++ node->stores[0] = root->rnode; ++ root->rnode->parent = node; ++ if (root->extend) ++ root->extend(node, root->rnode); ++ ++ newheight = root->height + 1; ++ node->height = newheight; ++ node->count = 1; ++ if (sradix_node_full(root, root->rnode)) ++ node->fulls = 1; ++ ++ root->rnode = node; ++ root->height = newheight; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Search the next item from the current node, that is not NULL ++ * and can satify root->iter(). ++ */ ++void *sradix_tree_next(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index, ++ int (*iter)(void *item, unsigned long height)) ++{ ++ unsigned long offset; ++ void *item; ++ ++ if (unlikely(node == NULL)) { ++ node = root->rnode; ++ for (offset = 0; offset < root->stores_size; offset++) { ++ item = node->stores[offset]; ++ if (item && (!iter || iter(item, node->height))) ++ break; ++ } ++ ++ if (unlikely(offset >= root->stores_size)) ++ return NULL; ++ ++ if (node->height == 1) ++ return item; ++ else ++ goto go_down; ++ } ++ ++ while (node) { ++ offset = (index & root->mask) + 1; ++ for (;offset < root->stores_size; offset++) { ++ item = node->stores[offset]; ++ if (item && (!iter || iter(item, node->height))) ++ break; ++ } ++ ++ if (offset < root->stores_size) ++ break; ++ ++ node = node->parent; ++ index >>= root->shift; ++ } ++ ++ if (!node) ++ return NULL; ++ ++ while (node->height > 1) { ++go_down: ++ node = item; ++ for (offset = 0; offset < root->stores_size; offset++) { ++ item = node->stores[offset]; ++ if (item && (!iter || iter(item, node->height))) ++ break; ++ } ++ ++ if (unlikely(offset >= root->stores_size)) ++ return NULL; ++ } ++ ++ BUG_ON(offset > root->stores_size); ++ ++ return item; ++} ++ ++/* ++ * Blindly insert the item to the tree. Typically, we reuse the ++ * first empty store item. ++ */ ++int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num) ++{ ++ unsigned long index; ++ unsigned int height; ++ struct sradix_tree_node *node, *tmp = NULL; ++ int offset, offset_saved; ++ void **store = NULL; ++ int error, i, j, shift; ++ ++go_on: ++ index = root->min; ++ ++ if (root->enter_node && !sradix_node_full(root, root->enter_node)) { ++ node = root->enter_node; ++ BUG_ON((index >> (root->shift * root->height))); ++ } else { ++ node = root->rnode; ++ if (node == NULL || (index >> (root->shift * root->height)) ++ || sradix_node_full(root, node)) { ++ error = sradix_tree_extend(root, index); ++ if (error) ++ return error; ++ ++ node = root->rnode; ++ } ++ } ++ ++ ++ height = node->height; ++ shift = (height - 1) * root->shift; ++ offset = (index >> shift) & root->mask; ++ while (shift > 0) { ++ offset_saved = offset; ++ for (; offset < root->stores_size; offset++) { ++ store = &node->stores[offset]; ++ tmp = *store; ++ ++ if (!tmp || !sradix_node_full(root, tmp)) ++ break; ++ } ++ BUG_ON(offset >= root->stores_size); ++ ++ if (offset != offset_saved) { ++ index += (offset - offset_saved) << shift; ++ index &= ~((1UL << shift) - 1); ++ } ++ ++ if (!tmp) { ++ if (!(tmp = root->alloc())) ++ return -ENOMEM; ++ ++ tmp->height = shift / root->shift; ++ *store = tmp; ++ tmp->parent = node; ++ node->count++; ++// if (root->extend) ++// root->extend(node, tmp); ++ } ++ ++ node = tmp; ++ shift -= root->shift; ++ offset = (index >> shift) & root->mask; ++ } ++ ++ BUG_ON(node->height != 1); ++ ++ ++ store = &node->stores[offset]; ++ for (i = 0, j = 0; ++ j < root->stores_size - node->count && ++ i < root->stores_size - offset && j < num; i++) { ++ if (!store[i]) { ++ store[i] = item[j]; ++ if (root->assign) ++ root->assign(node, index + i, item[j]); ++ j++; ++ } ++ } ++ ++ node->count += j; ++ root->num += j; ++ num -= j; ++ ++ while (sradix_node_full(root, node)) { ++ node = node->parent; ++ if (!node) ++ break; ++ ++ node->fulls++; ++ } ++ ++ if (unlikely(!node)) { ++ /* All nodes are full */ ++ root->min = 1 << (root->height * root->shift); ++ root->enter_node = NULL; ++ } else { ++ root->min = index + i - 1; ++ root->min |= (1UL << (node->height - 1)) - 1; ++ root->min++; ++ root->enter_node = node; ++ } ++ ++ if (num) { ++ item += j; ++ goto go_on; ++ } ++ ++ return 0; ++} ++ ++ ++/** ++ * sradix_tree_shrink - shrink height of a sradix tree to minimal ++ * @root sradix tree root ++ * ++ */ ++static inline void sradix_tree_shrink(struct sradix_tree_root *root) ++{ ++ /* try to shrink tree height */ ++ while (root->height > 1) { ++ struct sradix_tree_node *to_free = root->rnode; ++ ++ /* ++ * The candidate node has more than one child, or its child ++ * is not at the leftmost store, we cannot shrink. ++ */ ++ if (to_free->count != 1 || !to_free->stores[0]) ++ break; ++ ++ root->rnode = to_free->stores[0]; ++ root->rnode->parent = NULL; ++ root->height--; ++ if (unlikely(root->enter_node == to_free)) { ++ root->enter_node = NULL; ++ } ++ root->free(to_free); ++ } ++} ++ ++/* ++ * Del the item on the known leaf node and index ++ */ ++void sradix_tree_delete_from_leaf(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index) ++{ ++ unsigned int offset; ++ struct sradix_tree_node *start, *end; ++ ++ BUG_ON(node->height != 1); ++ ++ start = node; ++ while (node && !(--node->count)) ++ node = node->parent; ++ ++ end = node; ++ if (!node) { ++ root->rnode = NULL; ++ root->height = 0; ++ root->min = 0; ++ root->num = 0; ++ root->enter_node = NULL; ++ } else { ++ offset = (index >> (root->shift * (node->height - 1))) & root->mask; ++ if (root->rm) ++ root->rm(node, offset); ++ node->stores[offset] = NULL; ++ root->num--; ++ if (root->min > index) { ++ root->min = index; ++ root->enter_node = node; ++ } ++ } ++ ++ if (start != end) { ++ do { ++ node = start; ++ start = start->parent; ++ if (unlikely(root->enter_node == node)) ++ root->enter_node = end; ++ root->free(node); ++ } while (start != end); ++ ++ /* ++ * Note that shrink may free "end", so enter_node still need to ++ * be checked inside. ++ */ ++ sradix_tree_shrink(root); ++ } else if (node->count == root->stores_size - 1) { ++ /* It WAS a full leaf node. Update the ancestors */ ++ node = node->parent; ++ while (node) { ++ node->fulls--; ++ if (node->fulls != root->stores_size - 1) ++ break; ++ ++ node = node->parent; ++ } ++ } ++} ++ ++void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index) ++{ ++ unsigned int height, offset; ++ struct sradix_tree_node *node; ++ int shift; ++ ++ node = root->rnode; ++ if (node == NULL || (index >> (root->shift * root->height))) ++ return NULL; ++ ++ height = root->height; ++ shift = (height - 1) * root->shift; ++ ++ do { ++ offset = (index >> shift) & root->mask; ++ node = node->stores[offset]; ++ if (!node) ++ return NULL; ++ ++ shift -= root->shift; ++ } while (shift >= 0); ++ ++ return node; ++} ++ ++/* ++ * Return the item if it exists, otherwise create it in place ++ * and return the created item. ++ */ ++void *sradix_tree_lookup_create(struct sradix_tree_root *root, ++ unsigned long index, void *(*item_alloc)(void)) ++{ ++ unsigned int height, offset; ++ struct sradix_tree_node *node, *tmp; ++ void *item; ++ int shift, error; ++ ++ if (root->rnode == NULL || (index >> (root->shift * root->height))) { ++ if (item_alloc) { ++ error = sradix_tree_extend(root, index); ++ if (error) ++ return NULL; ++ } else { ++ return NULL; ++ } ++ } ++ ++ node = root->rnode; ++ height = root->height; ++ shift = (height - 1) * root->shift; ++ ++ do { ++ offset = (index >> shift) & root->mask; ++ if (!node->stores[offset]) { ++ if (!(tmp = root->alloc())) ++ return NULL; ++ ++ tmp->height = shift / root->shift; ++ node->stores[offset] = tmp; ++ tmp->parent = node; ++ node->count++; ++ node = tmp; ++ } else { ++ node = node->stores[offset]; ++ } ++ ++ shift -= root->shift; ++ } while (shift > 0); ++ ++ BUG_ON(node->height != 1); ++ offset = index & root->mask; ++ if (node->stores[offset]) { ++ return node->stores[offset]; ++ } else if (item_alloc) { ++ if (!(item = item_alloc())) ++ return NULL; ++ ++ node->stores[offset] = item; ++ ++ /* ++ * NOTE: we do NOT call root->assign here, since this item is ++ * newly created by us having no meaning. Caller can call this ++ * if it's necessary to do so. ++ */ ++ ++ node->count++; ++ root->num++; ++ ++ while (sradix_node_full(root, node)) { ++ node = node->parent; ++ if (!node) ++ break; ++ ++ node->fulls++; ++ } ++ ++ if (unlikely(!node)) { ++ /* All nodes are full */ ++ root->min = 1 << (root->height * root->shift); ++ } else { ++ if (root->min == index) { ++ root->min |= (1UL << (node->height - 1)) - 1; ++ root->min++; ++ root->enter_node = node; ++ } ++ } ++ ++ return item; ++ } else { ++ return NULL; ++ } ++ ++} ++ ++int sradix_tree_delete(struct sradix_tree_root *root, unsigned long index) ++{ ++ unsigned int height, offset; ++ struct sradix_tree_node *node; ++ int shift; ++ ++ node = root->rnode; ++ if (node == NULL || (index >> (root->shift * root->height))) ++ return -ENOENT; ++ ++ height = root->height; ++ shift = (height - 1) * root->shift; ++ ++ do { ++ offset = (index >> shift) & root->mask; ++ node = node->stores[offset]; ++ if (!node) ++ return -ENOENT; ++ ++ shift -= root->shift; ++ } while (shift > 0); ++ ++ offset = index & root->mask; ++ if (!node->stores[offset]) ++ return -ENOENT; ++ ++ sradix_tree_delete_from_leaf(root, node, index); ++ ++ return 0; ++} +diff --git a/mm/Kconfig b/mm/Kconfig +index e742d06..93c2533 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -315,6 +315,32 @@ config KSM + See Documentation/vm/ksm.txt for more information: KSM is inactive + until a program has madvised that an area is MADV_MERGEABLE, and + root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). ++choice ++ prompt "Choose UKSM/KSM strategy" ++ default UKSM ++ depends on KSM ++ help ++ This option allows to select a UKSM/KSM stragety. ++ ++config UKSM ++ bool "Ultra-KSM for page merging" ++ depends on KSM ++ help ++ UKSM is inspired by the Linux kernel project \u2014 KSM(Kernel Same ++ page Merging), but with a fundamentally rewritten core algorithm. With ++ an advanced algorithm, UKSM now can transparently scans all anonymously ++ mapped user space applications with an significantly improved scan speed ++ and CPU efficiency. Since KVM is friendly to KSM, KVM can also benefit from ++ UKSM. Now UKSM has its first stable release and first real world enterprise user. ++ For more information, please goto its project page. ++ (www.kerneldedup.org) ++ ++config KSM_LEGACY ++ bool "Legacy KSM implementation" ++ depends on KSM ++ help ++ The legacy KSM implementation from Redhat. ++endchoice + + config DEFAULT_MMAP_MIN_ADDR + int "Low address space to protect from user allocation" +diff --git a/mm/Makefile b/mm/Makefile +index 72c5acb..77882b7 100644 +--- a/mm/Makefile ++++ b/mm/Makefile +@@ -39,7 +39,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o + obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o + obj-$(CONFIG_SLOB) += slob.o + obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o +-obj-$(CONFIG_KSM) += ksm.o ++obj-$(CONFIG_KSM_LEGACY) += ksm.o ++obj-$(CONFIG_UKSM) += uksm.o + obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o + obj-$(CONFIG_SLAB) += slab.o + obj-$(CONFIG_SLUB) += slub.o +diff --git a/mm/memory.c b/mm/memory.c +index 61a262b..a506b9d 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -118,6 +118,27 @@ __setup("norandmaps", disable_randmaps); + unsigned long zero_pfn __read_mostly; + unsigned long highest_memmap_pfn __read_mostly; + ++#ifdef CONFIG_UKSM ++unsigned long uksm_zero_pfn __read_mostly; ++struct page *empty_uksm_zero_page; ++ ++static int __init setup_uksm_zero_page(void) ++{ ++ unsigned long addr; ++ addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); ++ if (!addr) ++ panic("Oh boy, that early out of memory?"); ++ ++ empty_uksm_zero_page = virt_to_page((void *) addr); ++ SetPageReserved(empty_uksm_zero_page); ++ ++ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page); ++ ++ return 0; ++} ++core_initcall(setup_uksm_zero_page); ++#endif ++ + /* + * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() + */ +@@ -129,6 +150,7 @@ static int __init init_zero_pfn(void) + core_initcall(init_zero_pfn); + + ++ + #if defined(SPLIT_RSS_COUNTING) + + void sync_mm_rss(struct mm_struct *mm) +@@ -896,6 +918,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, + rss[MM_ANONPAGES]++; + else + rss[MM_FILEPAGES]++; ++ ++ /* Should return NULL in vm_normal_page() */ ++ uksm_bugon_zeropage(pte); ++ } else { ++ uksm_map_zero_page(pte); + } + + out_set_pte: +@@ -1138,8 +1165,10 @@ again: + ptent = ptep_get_and_clear_full(mm, addr, pte, + tlb->fullmm); + tlb_remove_tlb_entry(tlb, pte, addr); +- if (unlikely(!page)) ++ if (unlikely(!page)) { ++ uksm_unmap_zero_page(ptent); + continue; ++ } + if (unlikely(details) && details->nonlinear_vma + && linear_page_index(details->nonlinear_vma, + addr) != page->index) +@@ -1704,7 +1733,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + + VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); + +- /* ++ /* + * Require read or write permissions. + * If FOLL_FORCE is set, we only require the "MAY" flags. + */ +@@ -1764,7 +1793,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + page = vm_normal_page(vma, start, *pte); + if (!page) { + if (!(gup_flags & FOLL_DUMP) && +- is_zero_pfn(pte_pfn(*pte))) ++ (is_zero_pfn(pte_pfn(*pte)))) + page = pte_page(*pte); + else { + pte_unmap(pte); +@@ -2579,8 +2608,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo + clear_page(kaddr); + kunmap_atomic(kaddr); + flush_dcache_page(dst); +- } else ++ } else { + copy_user_highpage(dst, src, va, vma); ++ uksm_cow_page(vma, src); ++ } + } + + /* +@@ -2779,6 +2810,7 @@ gotten: + new_page = alloc_zeroed_user_highpage_movable(vma, address); + if (!new_page) + goto oom; ++ uksm_cow_pte(vma, orig_pte); + } else { + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + if (!new_page) +@@ -2804,8 +2836,11 @@ gotten: + dec_mm_counter_fast(mm, MM_FILEPAGES); + inc_mm_counter_fast(mm, MM_ANONPAGES); + } +- } else ++ uksm_bugon_zeropage(orig_pte); ++ } else { ++ uksm_unmap_zero_page(orig_pte); + inc_mm_counter_fast(mm, MM_ANONPAGES); ++ } + flush_cache_page(vma, address, pte_pfn(orig_pte)); + entry = mk_pte(new_page, vma->vm_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); +diff --git a/mm/mmap.c b/mm/mmap.c +index f681e18..31ef952 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -65,7 +66,7 @@ static void unmap_region(struct mm_struct *mm, + * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (yes) yes w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes +- * ++ * + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (copy) copy w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes +@@ -252,6 +253,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) + if (vma->vm_file) + fput(vma->vm_file); + mpol_put(vma_policy(vma)); ++ uksm_remove_vma(vma); + kmem_cache_free(vm_area_cachep, vma); + return next; + } +@@ -707,9 +709,16 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, + long adjust_next = 0; + int remove_next = 0; + ++/* ++ * to avoid deadlock, ksm_remove_vma must be done before any spin_lock is ++ * acquired ++ */ ++ uksm_remove_vma(vma); ++ + if (next && !insert) { + struct vm_area_struct *exporter = NULL; + ++ uksm_remove_vma(next); + if (end >= next->vm_end) { + /* + * vma expands, overlapping all the next, and +@@ -803,6 +812,7 @@ again: remove_next = 1 + (end > next->vm_end); + end_changed = true; + } + vma->vm_pgoff = pgoff; ++ + if (adjust_next) { + next->vm_start += adjust_next << PAGE_SHIFT; + next->vm_pgoff += adjust_next; +@@ -873,16 +883,22 @@ again: remove_next = 1 + (end > next->vm_end); + * up the code too much to do both in one go. + */ + next = vma->vm_next; +- if (remove_next == 2) ++ if (remove_next == 2) { ++ uksm_remove_vma(next); + goto again; +- else if (next) ++ } else if (next) { + vma_gap_update(next); +- else ++ } else { + mm->highest_vm_end = end; ++ } ++ } else { ++ if (next && !insert) ++ uksm_vma_add_new(next); + } + if (insert && file) + uprobe_mmap(insert); + ++ uksm_vma_add_new(vma); + validate_mm(mm); + + return 0; +@@ -1250,6 +1266,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, + vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + ++ /* If uksm is enabled, we add VM_MERGABLE to new VMAs. */ ++ uksm_vm_flags_mod(&vm_flags); ++ + if (flags & MAP_LOCKED) + if (!can_do_mlock()) + return -EPERM; +@@ -1595,6 +1614,7 @@ munmap_back: + + vma_link(mm, vma, prev, rb_link, rb_parent); + file = vma->vm_file; ++ uksm_vma_add_new(vma); + + /* Once vma denies write, undo our temporary denial count */ + if (correct_wcount) +@@ -1626,6 +1646,7 @@ unmap_and_free_vma: + unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); + charged = 0; + free_vma: ++ uksm_remove_vma(vma); + kmem_cache_free(vm_area_cachep, vma); + unacct_error: + if (charged) +@@ -1874,7 +1895,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + info.align_mask = 0; + return vm_unmapped_area(&info); + } +-#endif ++#endif + + void arch_unmap_area(struct mm_struct *mm, unsigned long addr) + { +@@ -2452,6 +2473,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, + else + err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + ++ uksm_vma_add_new(new); ++ + /* Success. */ + if (!err) + return 0; +@@ -2617,6 +2640,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) + return addr; + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; ++ uksm_vm_flags_mod(&flags); + + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (error & ~PAGE_MASK) +@@ -2684,6 +2708,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) + vma->vm_flags = flags; + vma->vm_page_prot = vm_get_page_prot(flags); + vma_link(mm, vma, prev, rb_link, rb_parent); ++ uksm_vma_add_new(vma); + out: + perf_event_mmap(vma); + mm->total_vm += len >> PAGE_SHIFT; +@@ -2718,6 +2743,12 @@ void exit_mmap(struct mm_struct *mm) + /* mm's last user has gone, and its about to be pulled down */ + mmu_notifier_release(mm); + ++ /* ++ * Taking write lock on mmap_sem does not harm others, ++ * but it's crucial for uksm to avoid races. ++ */ ++ down_write(&mm->mmap_sem); ++ + if (mm->locked_vm) { + vma = mm->mmap; + while (vma) { +@@ -2754,6 +2785,11 @@ void exit_mmap(struct mm_struct *mm) + } + vm_unacct_memory(nr_accounted); + ++ mm->mmap = NULL; ++ mm->mm_rb = RB_ROOT; ++ mm->mmap_cache = NULL; ++ up_write(&mm->mmap_sem); ++ + WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + } + +@@ -2864,6 +2900,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + new_vma->vm_ops->open(new_vma); + vma_link(mm, new_vma, prev, rb_link, rb_parent); + *need_rmap_locks = false; ++ uksm_vma_add_new(new_vma); + } + } + return new_vma; +@@ -2965,10 +3002,10 @@ int install_special_mapping(struct mm_struct *mm, + ret = insert_vm_struct(mm, vma); + if (ret) + goto out; +- + mm->total_vm += len >> PAGE_SHIFT; + + perf_event_mmap(vma); ++ uksm_vma_add_new(vma); + + return 0; + +diff --git a/mm/rmap.c b/mm/rmap.c +index 6280da8..645cf22 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -973,9 +973,9 @@ void page_move_anon_rmap(struct page *page, + + /** + * __page_set_anon_rmap - set up new anonymous rmap +- * @page: Page to add to rmap ++ * @page: Page to add to rmap + * @vma: VM area to add page to. +- * @address: User virtual address of the mapping ++ * @address: User virtual address of the mapping + * @exclusive: the page is exclusively owned by the current process + */ + static void __page_set_anon_rmap(struct page *page, +diff --git a/mm/uksm.c b/mm/uksm.c +new file mode 100644 +index 0000000..794867a +--- /dev/null ++++ b/mm/uksm.c +@@ -0,0 +1,5640 @@ ++/* ++ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia ++ * ++ * This is an improvement upon KSM. Some basic data structures and routines ++ * are borrowed from ksm.c . ++ * ++ * Its new features: ++ * 1. Full system scan: ++ * It automatically scans all user processes' anonymous VMAs. Kernel-user ++ * interaction to submit a memory area to KSM is no longer needed. ++ * ++ * 2. Rich area detection: ++ * It automatically detects rich areas containing abundant duplicated ++ * pages based. Rich areas are given a full scan speed. Poor areas are ++ * sampled at a reasonable speed with very low CPU consumption. ++ * ++ * 3. Ultra Per-page scan speed improvement: ++ * A new hash algorithm is proposed. As a result, on a machine with ++ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it ++ * can scan memory areas that does not contain duplicated pages at speed of ++ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of ++ * 477MB/sec ~ 923MB/sec. ++ * ++ * 4. Thrashing area avoidance: ++ * Thrashing area(an VMA that has frequent Ksm page break-out) can be ++ * filtered out. My benchmark shows it's more efficient than KSM's per-page ++ * hash value based volatile page detection. ++ * ++ * ++ * 5. Misc changes upon KSM: ++ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page ++ * comparison. It's much faster than default C version on x86. ++ * * rmap_item now has an struct *page member to loosely cache a ++ * address-->page mapping, which reduces too much time-costly ++ * follow_page(). ++ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know. ++ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_ ++ * ksm is needed for this case. ++ * ++ * 6. Full Zero Page consideration(contributed by Figo Zhang) ++ * Now uksmd consider full zero pages as special pages and merge them to an ++ * special unswappable uksm zero page. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "internal.h" ++ ++#ifdef CONFIG_X86 ++#undef memcmp ++ ++#ifdef CONFIG_X86_32 ++#define memcmp memcmpx86_32 ++/* ++ * Compare 4-byte-aligned address s1 and s2, with length n ++ */ ++int memcmpx86_32(void *s1, void *s2, size_t n) ++{ ++ size_t num = n / 4; ++ register int res; ++ ++ __asm__ __volatile__ ++ ( ++ "testl %3,%3\n\t" ++ "repe; cmpsd\n\t" ++ "je 1f\n\t" ++ "sbbl %0,%0\n\t" ++ "orl $1,%0\n" ++ "1:" ++ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num) ++ : "0" (0) ++ : "cc"); ++ ++ return res; ++} ++ ++/* ++ * Check the page is all zero ? ++ */ ++static int is_full_zero(const void *s1, size_t len) ++{ ++ unsigned char same; ++ ++ len /= 4; ++ ++ __asm__ __volatile__ ++ ("repe; scasl;" ++ "sete %0" ++ : "=qm" (same), "+D" (s1), "+c" (len) ++ : "a" (0) ++ : "cc"); ++ ++ return same; ++} ++ ++ ++#elif defined(CONFIG_X86_64) ++#define memcmp memcmpx86_64 ++/* ++ * Compare 8-byte-aligned address s1 and s2, with length n ++ */ ++int memcmpx86_64(void *s1, void *s2, size_t n) ++{ ++ size_t num = n / 8; ++ register int res; ++ ++ __asm__ __volatile__ ++ ( ++ "testq %q3,%q3\n\t" ++ "repe; cmpsq\n\t" ++ "je 1f\n\t" ++ "sbbq %q0,%q0\n\t" ++ "orq $1,%q0\n" ++ "1:" ++ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num) ++ : "0" (0) ++ : "cc"); ++ ++ return res; ++} ++ ++static int is_full_zero(const void *s1, size_t len) ++{ ++ unsigned char same; ++ ++ len /= 8; ++ ++ __asm__ __volatile__ ++ ("repe; scasq;" ++ "sete %0" ++ : "=qm" (same), "+D" (s1), "+c" (len) ++ : "a" (0) ++ : "cc"); ++ ++ return same; ++} ++ ++#endif ++#else ++static int is_full_zero(const void *s1, size_t len) ++{ ++ unsigned long *src = s1; ++ int i; ++ ++ len /= sizeof(*src); ++ ++ for (i = 0; i < len; i++) { ++ if (src[i]) ++ return 0; ++ } ++ ++ return 1; ++} ++#endif ++ ++#define U64_MAX (~((u64)0)) ++#define UKSM_RUNG_ROUND_FINISHED (1 << 0) ++#define TIME_RATIO_SCALE 10000 ++ ++#define SLOT_TREE_NODE_SHIFT 8 ++#define SLOT_TREE_NODE_STORE_SIZE (1UL << SLOT_TREE_NODE_SHIFT) ++struct slot_tree_node { ++ unsigned long size; ++ struct sradix_tree_node snode; ++ void *stores[SLOT_TREE_NODE_STORE_SIZE]; ++}; ++ ++static struct kmem_cache *slot_tree_node_cachep; ++ ++static struct sradix_tree_node *slot_tree_node_alloc(void) ++{ ++ struct slot_tree_node *p; ++ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL); ++ if (!p) ++ return NULL; ++ ++ return &p->snode; ++} ++ ++static void slot_tree_node_free(struct sradix_tree_node *node) ++{ ++ struct slot_tree_node *p; ++ ++ p = container_of(node, struct slot_tree_node, snode); ++ kmem_cache_free(slot_tree_node_cachep, p); ++} ++ ++static void slot_tree_node_extend(struct sradix_tree_node *parent, ++ struct sradix_tree_node *child) ++{ ++ struct slot_tree_node *p, *c; ++ ++ p = container_of(parent, struct slot_tree_node, snode); ++ c = container_of(child, struct slot_tree_node, snode); ++ ++ p->size += c->size; ++} ++ ++void slot_tree_node_assign(struct sradix_tree_node *node, ++ unsigned index, void *item) ++{ ++ struct vma_slot *slot = item; ++ struct slot_tree_node *cur; ++ ++ slot->snode = node; ++ slot->sindex = index; ++ ++ while (node) { ++ cur = container_of(node, struct slot_tree_node, snode); ++ cur->size += slot->pages; ++ node = node->parent; ++ } ++} ++ ++void slot_tree_node_rm(struct sradix_tree_node *node, unsigned offset) ++{ ++ struct vma_slot *slot; ++ struct slot_tree_node *cur; ++ unsigned long pages; ++ ++ if (node->height == 1) { ++ slot = node->stores[offset]; ++ pages = slot->pages; ++ } else { ++ cur = container_of(node->stores[offset], ++ struct slot_tree_node, snode); ++ pages = cur->size; ++ } ++ ++ while (node) { ++ cur = container_of(node, struct slot_tree_node, snode); ++ cur->size -= pages; ++ node = node->parent; ++ } ++} ++ ++unsigned long slot_iter_index; ++int slot_iter(void *item, unsigned long height) ++{ ++ struct slot_tree_node *node; ++ struct vma_slot *slot; ++ ++ if (height == 1) { ++ slot = item; ++ if (slot_iter_index < slot->pages) { ++ /*in this one*/ ++ return 1; ++ } else { ++ slot_iter_index -= slot->pages; ++ return 0; ++ } ++ ++ } else { ++ node = container_of(item, struct slot_tree_node, snode); ++ if (slot_iter_index < node->size) { ++ /*in this one*/ ++ return 1; ++ } else { ++ slot_iter_index -= node->size; ++ return 0; ++ } ++ } ++} ++ ++ ++static inline void slot_tree_init_root(struct sradix_tree_root *root) ++{ ++ init_sradix_tree_root(root, SLOT_TREE_NODE_SHIFT); ++ root->alloc = slot_tree_node_alloc; ++ root->free = slot_tree_node_free; ++ root->extend = slot_tree_node_extend; ++ root->assign = slot_tree_node_assign; ++ root->rm = slot_tree_node_rm; ++} ++ ++void slot_tree_init(void) ++{ ++ slot_tree_node_cachep = kmem_cache_create("slot_tree_node", ++ sizeof(struct slot_tree_node), 0, ++ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, ++ NULL); ++} ++ ++ ++/* Each rung of this ladder is a list of VMAs having a same scan ratio */ ++struct scan_rung { ++ //struct list_head scanned_list; ++ struct sradix_tree_root vma_root; ++ struct sradix_tree_root vma_root2; ++ ++ struct vma_slot *current_scan; ++ unsigned long current_offset; ++ ++ /* ++ * The initial value for current_offset, it should loop over ++ * [0~ step - 1] to let all slot have its chance to be scanned. ++ */ ++ unsigned long offset_init; ++ unsigned long step; /* dynamic step for current_offset */ ++ unsigned int flags; ++ unsigned long pages_to_scan; ++ //unsigned long fully_scanned_slots; ++ /* ++ * a little bit tricky - if cpu_time_ratio > 0, then the value is the ++ * the cpu time ratio it can spend in rung_i for every scan ++ * period. if < 0, then it is the cpu time ratio relative to the ++ * max cpu percentage user specified. Both in unit of ++ * 1/TIME_RATIO_SCALE ++ */ ++ int cpu_ratio; ++ ++ /* ++ * How long it will take for all slots in this rung to be fully ++ * scanned? If it's zero, we don't care about the cover time: ++ * it's fully scanned. ++ */ ++ unsigned int cover_msecs; ++ //unsigned long vma_num; ++ //unsigned long pages; /* Sum of all slot's pages in rung */ ++}; ++ ++/** ++ * node of either the stable or unstale rbtree ++ * ++ */ ++struct tree_node { ++ struct rb_node node; /* link in the main (un)stable rbtree */ ++ struct rb_root sub_root; /* rb_root for sublevel collision rbtree */ ++ u32 hash; ++ unsigned long count; /* TODO: merged with sub_root */ ++ struct list_head all_list; /* all tree nodes in stable/unstable tree */ ++}; ++ ++/** ++ * struct stable_node - node of the stable rbtree ++ * @node: rb node of this ksm page in the stable tree ++ * @hlist: hlist head of rmap_items using this ksm page ++ * @kpfn: page frame number of this ksm page ++ */ ++struct stable_node { ++ struct rb_node node; /* link in sub-rbtree */ ++ struct tree_node *tree_node; /* it's tree node root in stable tree, NULL if it's in hell list */ ++ struct hlist_head hlist; ++ unsigned long kpfn; ++ u32 hash_max; /* if ==0 then it's not been calculated yet */ ++ struct list_head all_list; /* in a list for all stable nodes */ ++}; ++ ++/** ++ * struct node_vma - group rmap_items linked in a same stable ++ * node together. ++ */ ++struct node_vma { ++ union { ++ struct vma_slot *slot; ++ unsigned long key; /* slot is used as key sorted on hlist */ ++ }; ++ struct hlist_node hlist; ++ struct hlist_head rmap_hlist; ++ struct stable_node *head; ++}; ++ ++/** ++ * struct rmap_item - reverse mapping item for virtual addresses ++ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list ++ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree ++ * @mm: the memory structure this rmap_item is pointing into ++ * @address: the virtual address this rmap_item tracks (+ flags in low bits) ++ * @node: rb node of this rmap_item in the unstable tree ++ * @head: pointer to stable_node heading this list in the stable tree ++ * @hlist: link into hlist of rmap_items hanging off that stable_node ++ */ ++struct rmap_item { ++ struct vma_slot *slot; ++ struct page *page; ++ unsigned long address; /* + low bits used for flags below */ ++ unsigned long hash_round; ++ unsigned long entry_index; ++ union { ++ struct {/* when in unstable tree */ ++ struct rb_node node; ++ struct tree_node *tree_node; ++ u32 hash_max; ++ }; ++ struct { /* when in stable tree */ ++ struct node_vma *head; ++ struct hlist_node hlist; ++ struct anon_vma *anon_vma; ++ }; ++ }; ++} __attribute__((aligned(4))); ++ ++struct rmap_list_entry { ++ union { ++ struct rmap_item *item; ++ unsigned long addr; ++ }; ++ /* lowest bit is used for is_addr tag */ ++} __attribute__((aligned(4))); /* 4 aligned to fit in to pages*/ ++ ++ ++/* Basic data structure definition ends */ ++ ++ ++/* ++ * Flags for rmap_item to judge if it's listed in the stable/unstable tree. ++ * The flags use the low bits of rmap_item.address ++ */ ++#define UNSTABLE_FLAG 0x1 ++#define STABLE_FLAG 0x2 ++#define get_rmap_addr(x) ((x)->address & PAGE_MASK) ++ ++/* ++ * rmap_list_entry helpers ++ */ ++#define IS_ADDR_FLAG 1 ++#define is_addr(ptr) ((unsigned long)(ptr) & IS_ADDR_FLAG) ++#define set_is_addr(ptr) ((ptr) |= IS_ADDR_FLAG) ++#define get_clean_addr(ptr) (((ptr) & ~(__typeof__(ptr))IS_ADDR_FLAG)) ++ ++ ++/* ++ * High speed caches for frequently allocated and freed structs ++ */ ++static struct kmem_cache *rmap_item_cache; ++static struct kmem_cache *stable_node_cache; ++static struct kmem_cache *node_vma_cache; ++static struct kmem_cache *vma_slot_cache; ++static struct kmem_cache *tree_node_cache; ++#define UKSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("uksm_"#__struct,\ ++ sizeof(struct __struct), __alignof__(struct __struct),\ ++ (__flags), NULL) ++ ++/* Array of all scan_rung, uksm_scan_ladder[0] having the minimum scan ratio */ ++#define SCAN_LADDER_SIZE 4 ++static struct scan_rung uksm_scan_ladder[SCAN_LADDER_SIZE]; ++ ++/* The evaluation rounds uksmd has finished */ ++static unsigned long long uksm_eval_round = 1; ++ ++/* ++ * we add 1 to this var when we consider we should rebuild the whole ++ * unstable tree. ++ */ ++static unsigned long uksm_hash_round = 1; ++ ++/* ++ * How many times the whole memory is scanned. ++ */ ++static unsigned long long fully_scanned_round = 1; ++ ++/* The total number of virtual pages of all vma slots */ ++static u64 uksm_pages_total; ++ ++/* The number of pages has been scanned since the start up */ ++static u64 uksm_pages_scanned; ++ ++static u64 scanned_virtual_pages; ++ ++/* The number of pages has been scanned since last encode_benefit call */ ++static u64 uksm_pages_scanned_last; ++ ++/* If the scanned number is tooo large, we encode it here */ ++static u64 pages_scanned_stored; ++ ++static unsigned long pages_scanned_base; ++ ++/* The number of nodes in the stable tree */ ++static unsigned long uksm_pages_shared; ++ ++/* The number of page slots additionally sharing those nodes */ ++static unsigned long uksm_pages_sharing; ++ ++/* The number of nodes in the unstable tree */ ++static unsigned long uksm_pages_unshared; ++ ++/* ++ * Milliseconds ksmd should sleep between scans, ++ * >= 100ms to be consistent with ++ * scan_time_to_sleep_msec() ++ */ ++static unsigned int uksm_sleep_jiffies; ++ ++/* The real value for the uksmd next sleep */ ++static unsigned int uksm_sleep_real; ++ ++/* Saved value for user input uksm_sleep_jiffies when it's enlarged */ ++static unsigned int uksm_sleep_saved; ++ ++/* Max percentage of cpu utilization ksmd can take to scan in one batch */ ++static unsigned int uksm_max_cpu_percentage; ++ ++static int uksm_cpu_governor; ++ ++static char *uksm_cpu_governor_str[4] = { "full", "medium", "low", "quiet" }; ++ ++struct uksm_cpu_preset_s { ++ int cpu_ratio[SCAN_LADDER_SIZE]; ++ unsigned int cover_msecs[SCAN_LADDER_SIZE]; ++ unsigned int max_cpu; /* percentage */ ++}; ++ ++struct uksm_cpu_preset_s uksm_cpu_preset[4] = { ++ { {20, 40, -2500, -10000}, {1000, 500, 200, 50}, 95}, ++ { {20, 30, -2500, -10000}, {1000, 500, 400, 100}, 50}, ++ { {10, 20, -5000, -10000}, {1500, 1000, 1000, 250}, 20}, ++ { {10, 20, 40, 75}, {2000, 1000, 1000, 1000}, 1}, ++}; ++ ++/* The default value for uksm_ema_page_time if it's not initialized */ ++#define UKSM_PAGE_TIME_DEFAULT 500 ++ ++/*cost to scan one page by expotional moving average in nsecs */ ++static unsigned long uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT; ++ ++/* The expotional moving average alpha weight, in percentage. */ ++#define EMA_ALPHA 20 ++ ++/* ++ * The threshold used to filter out thrashing areas, ++ * If it == 0, filtering is disabled, otherwise it's the percentage up-bound ++ * of the thrashing ratio of all areas. Any area with a bigger thrashing ratio ++ * will be considered as having a zero duplication ratio. ++ */ ++static unsigned int uksm_thrash_threshold = 50; ++ ++/* How much dedup ratio is considered to be abundant*/ ++static unsigned int uksm_abundant_threshold = 10; ++ ++/* All slots having merged pages in this eval round. */ ++struct list_head vma_slot_dedup = LIST_HEAD_INIT(vma_slot_dedup); ++ ++/* How many times the ksmd has slept since startup */ ++static unsigned long long uksm_sleep_times; ++ ++#define UKSM_RUN_STOP 0 ++#define UKSM_RUN_MERGE 1 ++static unsigned int uksm_run = 1; ++ ++static DECLARE_WAIT_QUEUE_HEAD(uksm_thread_wait); ++static DEFINE_MUTEX(uksm_thread_mutex); ++ ++/* ++ * List vma_slot_new is for newly created vma_slot waiting to be added by ++ * ksmd. If one cannot be added(e.g. due to it's too small), it's moved to ++ * vma_slot_noadd. vma_slot_del is the list for vma_slot whose corresponding ++ * VMA has been removed/freed. ++ */ ++struct list_head vma_slot_new = LIST_HEAD_INIT(vma_slot_new); ++struct list_head vma_slot_noadd = LIST_HEAD_INIT(vma_slot_noadd); ++struct list_head vma_slot_del = LIST_HEAD_INIT(vma_slot_del); ++static DEFINE_SPINLOCK(vma_slot_list_lock); ++ ++/* The unstable tree heads */ ++static struct rb_root root_unstable_tree = RB_ROOT; ++ ++/* ++ * All tree_nodes are in a list to be freed at once when unstable tree is ++ * freed after each scan round. ++ */ ++static struct list_head unstable_tree_node_list = ++ LIST_HEAD_INIT(unstable_tree_node_list); ++ ++/* List contains all stable nodes */ ++static struct list_head stable_node_list = LIST_HEAD_INIT(stable_node_list); ++ ++/* ++ * When the hash strength is changed, the stable tree must be delta_hashed and ++ * re-structured. We use two set of below structs to speed up the ++ * re-structuring of stable tree. ++ */ ++static struct list_head ++stable_tree_node_list[2] = {LIST_HEAD_INIT(stable_tree_node_list[0]), ++ LIST_HEAD_INIT(stable_tree_node_list[1])}; ++ ++static struct list_head *stable_tree_node_listp = &stable_tree_node_list[0]; ++static struct rb_root root_stable_tree[2] = {RB_ROOT, RB_ROOT}; ++static struct rb_root *root_stable_treep = &root_stable_tree[0]; ++static unsigned long stable_tree_index; ++ ++/* The hash strength needed to hash a full page */ ++#define HASH_STRENGTH_FULL (PAGE_SIZE / sizeof(u32)) ++ ++/* The hash strength needed for loop-back hashing */ ++#define HASH_STRENGTH_MAX (HASH_STRENGTH_FULL + 10) ++ ++/* The random offsets in a page */ ++static u32 *random_nums; ++ ++/* The hash strength */ ++static unsigned long hash_strength = HASH_STRENGTH_FULL >> 4; ++ ++/* The delta value each time the hash strength increases or decreases */ ++static unsigned long hash_strength_delta; ++#define HASH_STRENGTH_DELTA_MAX 5 ++ ++/* The time we have saved due to random_sample_hash */ ++static u64 rshash_pos; ++ ++/* The time we have wasted due to hash collision */ ++static u64 rshash_neg; ++ ++struct uksm_benefit { ++ u64 pos; ++ u64 neg; ++ u64 scanned; ++ unsigned long base; ++} benefit; ++ ++/* ++ * The relative cost of memcmp, compared to 1 time unit of random sample ++ * hash, this value is tested when ksm module is initialized ++ */ ++static unsigned long memcmp_cost; ++ ++static unsigned long rshash_neg_cont_zero; ++static unsigned long rshash_cont_obscure; ++ ++/* The possible states of hash strength adjustment heuristic */ ++enum rshash_states { ++ RSHASH_STILL, ++ RSHASH_TRYUP, ++ RSHASH_TRYDOWN, ++ RSHASH_NEW, ++ RSHASH_PRE_STILL, ++}; ++ ++/* The possible direction we are about to adjust hash strength */ ++enum rshash_direct { ++ GO_UP, ++ GO_DOWN, ++ OBSCURE, ++ STILL, ++}; ++ ++/* random sampling hash state machine */ ++static struct { ++ enum rshash_states state; ++ enum rshash_direct pre_direct; ++ u8 below_count; ++ /* Keep a lookup window of size 5, iff above_count/below_count > 3 ++ * in this window we stop trying. ++ */ ++ u8 lookup_window_index; ++ u64 stable_benefit; ++ unsigned long turn_point_down; ++ unsigned long turn_benefit_down; ++ unsigned long turn_point_up; ++ unsigned long turn_benefit_up; ++ unsigned long stable_point; ++} rshash_state; ++ ++/*zero page hash table, hash_strength [0 ~ HASH_STRENGTH_MAX]*/ ++static u32 *zero_hash_table; ++ ++static inline struct node_vma *alloc_node_vma(void) ++{ ++ struct node_vma *node_vma; ++ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL); ++ if (node_vma) { ++ INIT_HLIST_HEAD(&node_vma->rmap_hlist); ++ INIT_HLIST_NODE(&node_vma->hlist); ++ } ++ return node_vma; ++} ++ ++static inline void free_node_vma(struct node_vma *node_vma) ++{ ++ kmem_cache_free(node_vma_cache, node_vma); ++} ++ ++ ++static inline struct vma_slot *alloc_vma_slot(void) ++{ ++ struct vma_slot *slot; ++ ++ /* ++ * In case ksm is not initialized by now. ++ * Oops, we need to consider the call site of uksm_init() in the future. ++ */ ++ if (!vma_slot_cache) ++ return NULL; ++ ++ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL); ++ if (slot) { ++ INIT_LIST_HEAD(&slot->slot_list); ++ INIT_LIST_HEAD(&slot->dedup_list); ++ slot->flags |= UKSM_SLOT_NEED_RERAND; ++ } ++ return slot; ++} ++ ++static inline void free_vma_slot(struct vma_slot *vma_slot) ++{ ++ kmem_cache_free(vma_slot_cache, vma_slot); ++} ++ ++ ++ ++static inline struct rmap_item *alloc_rmap_item(void) ++{ ++ struct rmap_item *rmap_item; ++ ++ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); ++ if (rmap_item) { ++ /* bug on lowest bit is not clear for flag use */ ++ BUG_ON(is_addr(rmap_item)); ++ } ++ return rmap_item; ++} ++ ++static inline void free_rmap_item(struct rmap_item *rmap_item) ++{ ++ rmap_item->slot = NULL; /* debug safety */ ++ kmem_cache_free(rmap_item_cache, rmap_item); ++} ++ ++static inline struct stable_node *alloc_stable_node(void) ++{ ++ struct stable_node *node; ++ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL | GFP_ATOMIC); ++ if (!node) ++ return NULL; ++ ++ INIT_HLIST_HEAD(&node->hlist); ++ list_add(&node->all_list, &stable_node_list); ++ return node; ++} ++ ++static inline void free_stable_node(struct stable_node *stable_node) ++{ ++ list_del(&stable_node->all_list); ++ kmem_cache_free(stable_node_cache, stable_node); ++} ++ ++static inline struct tree_node *alloc_tree_node(struct list_head *list) ++{ ++ struct tree_node *node; ++ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL | GFP_ATOMIC); ++ if (!node) ++ return NULL; ++ ++ list_add(&node->all_list, list); ++ return node; ++} ++ ++static inline void free_tree_node(struct tree_node *node) ++{ ++ list_del(&node->all_list); ++ kmem_cache_free(tree_node_cache, node); ++} ++ ++static void uksm_drop_anon_vma(struct rmap_item *rmap_item) ++{ ++ struct anon_vma *anon_vma = rmap_item->anon_vma; ++ ++ put_anon_vma(anon_vma); ++} ++ ++ ++/** ++ * Remove a stable node from stable_tree, may unlink from its tree_node and ++ * may remove its parent tree_node if no other stable node is pending. ++ * ++ * @stable_node The node need to be removed ++ * @unlink_rb Will this node be unlinked from the rbtree? ++ * @remove_tree_ node Will its tree_node be removed if empty? ++ */ ++static void remove_node_from_stable_tree(struct stable_node *stable_node, ++ int unlink_rb, int remove_tree_node) ++{ ++ struct node_vma *node_vma; ++ struct rmap_item *rmap_item; ++ struct hlist_node *n; ++ ++ if (!hlist_empty(&stable_node->hlist)) { ++ hlist_for_each_entry_safe(node_vma, n, ++ &stable_node->hlist, hlist) { ++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) { ++ uksm_pages_sharing--; ++ ++ uksm_drop_anon_vma(rmap_item); ++ rmap_item->address &= PAGE_MASK; ++ } ++ free_node_vma(node_vma); ++ cond_resched(); ++ } ++ ++ /* the last one is counted as shared */ ++ uksm_pages_shared--; ++ uksm_pages_sharing++; ++ } ++ ++ if (stable_node->tree_node && unlink_rb) { ++ rb_erase(&stable_node->node, ++ &stable_node->tree_node->sub_root); ++ ++ if (RB_EMPTY_ROOT(&stable_node->tree_node->sub_root) && ++ remove_tree_node) { ++ rb_erase(&stable_node->tree_node->node, ++ root_stable_treep); ++ free_tree_node(stable_node->tree_node); ++ } else { ++ stable_node->tree_node->count--; ++ } ++ } ++ ++ free_stable_node(stable_node); ++} ++ ++ ++/* ++ * get_uksm_page: checks if the page indicated by the stable node ++ * is still its ksm page, despite having held no reference to it. ++ * In which case we can trust the content of the page, and it ++ * returns the gotten page; but if the page has now been zapped, ++ * remove the stale node from the stable tree and return NULL. ++ * ++ * You would expect the stable_node to hold a reference to the ksm page. ++ * But if it increments the page's count, swapping out has to wait for ++ * ksmd to come around again before it can free the page, which may take ++ * seconds or even minutes: much too unresponsive. So instead we use a ++ * "keyhole reference": access to the ksm page from the stable node peeps ++ * out through its keyhole to see if that page still holds the right key, ++ * pointing back to this stable node. This relies on freeing a PageAnon ++ * page to reset its page->mapping to NULL, and relies on no other use of ++ * a page to put something that might look like our key in page->mapping. ++ * ++ * include/linux/pagemap.h page_cache_get_speculative() is a good reference, ++ * but this is different - made simpler by uksm_thread_mutex being held, but ++ * interesting for assuming that no other use of the struct page could ever ++ * put our expected_mapping into page->mapping (or a field of the union which ++ * coincides with page->mapping). The RCU calls are not for KSM at all, but ++ * to keep the page_count protocol described with page_cache_get_speculative. ++ * ++ * Note: it is possible that get_uksm_page() will return NULL one moment, ++ * then page the next, if the page is in between page_freeze_refs() and ++ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page ++ * is on its way to being freed; but it is an anomaly to bear in mind. ++ * ++ * @unlink_rb: if the removal of this node will firstly unlink from ++ * its rbtree. stable_node_reinsert will prevent this when restructuring the ++ * node from its old tree. ++ * ++ * @remove_tree_node: if this is the last one of its tree_node, will the ++ * tree_node be freed ? If we are inserting stable node, this tree_node may ++ * be reused, so don't free it. ++ */ ++static struct page *get_uksm_page(struct stable_node *stable_node, ++ int unlink_rb, int remove_tree_node) ++{ ++ struct page *page; ++ void *expected_mapping; ++ ++ page = pfn_to_page(stable_node->kpfn); ++ expected_mapping = (void *)stable_node + ++ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); ++ rcu_read_lock(); ++ if (page->mapping != expected_mapping) ++ goto stale; ++ if (!get_page_unless_zero(page)) ++ goto stale; ++ if (page->mapping != expected_mapping) { ++ put_page(page); ++ goto stale; ++ } ++ rcu_read_unlock(); ++ return page; ++stale: ++ rcu_read_unlock(); ++ remove_node_from_stable_tree(stable_node, unlink_rb, remove_tree_node); ++ ++ return NULL; ++} ++ ++/* ++ * Removing rmap_item from stable or unstable tree. ++ * This function will clean the information from the stable/unstable tree. ++ */ ++static inline void remove_rmap_item_from_tree(struct rmap_item *rmap_item) ++{ ++ if (rmap_item->address & STABLE_FLAG) { ++ struct stable_node *stable_node; ++ struct node_vma *node_vma; ++ struct page *page; ++ ++ node_vma = rmap_item->head; ++ stable_node = node_vma->head; ++ page = get_uksm_page(stable_node, 1, 1); ++ if (!page) ++ goto out; ++ ++ /* ++ * page lock is needed because it's racing with ++ * try_to_unmap_ksm(), etc. ++ */ ++ lock_page(page); ++ hlist_del(&rmap_item->hlist); ++ ++ if (hlist_empty(&node_vma->rmap_hlist)) { ++ hlist_del(&node_vma->hlist); ++ free_node_vma(node_vma); ++ } ++ unlock_page(page); ++ ++ put_page(page); ++ if (hlist_empty(&stable_node->hlist)) { ++ /* do NOT call remove_node_from_stable_tree() here, ++ * it's possible for a forked rmap_item not in ++ * stable tree while the in-tree rmap_items were ++ * deleted. ++ */ ++ uksm_pages_shared--; ++ } else ++ uksm_pages_sharing--; ++ ++ ++ uksm_drop_anon_vma(rmap_item); ++ } else if (rmap_item->address & UNSTABLE_FLAG) { ++ if (rmap_item->hash_round == uksm_hash_round) { ++ ++ rb_erase(&rmap_item->node, ++ &rmap_item->tree_node->sub_root); ++ if (RB_EMPTY_ROOT(&rmap_item->tree_node->sub_root)) { ++ rb_erase(&rmap_item->tree_node->node, ++ &root_unstable_tree); ++ ++ free_tree_node(rmap_item->tree_node); ++ } else ++ rmap_item->tree_node->count--; ++ } ++ uksm_pages_unshared--; ++ } ++ ++ rmap_item->address &= PAGE_MASK; ++ rmap_item->hash_max = 0; ++ ++out: ++ cond_resched(); /* we're called from many long loops */ ++} ++ ++static inline int slot_in_uksm(struct vma_slot *slot) ++{ ++ return list_empty(&slot->slot_list); ++} ++ ++/* ++ * Test if the mm is exiting ++ */ ++static inline bool uksm_test_exit(struct mm_struct *mm) ++{ ++ return atomic_read(&mm->mm_users) == 0; ++} ++ ++/** ++ * Need to do two things: ++ * 1. check if slot was moved to del list ++ * 2. make sure the mmap_sem is manipulated under valid vma. ++ * ++ * My concern here is that in some cases, this may make ++ * vma_slot_list_lock() waiters to serialized further by some ++ * sem->wait_lock, can this really be expensive? ++ * ++ * ++ * @return ++ * 0: if successfully locked mmap_sem ++ * -ENOENT: this slot was moved to del list ++ * -EBUSY: vma lock failed ++ */ ++static int try_down_read_slot_mmap_sem(struct vma_slot *slot) ++{ ++ struct vm_area_struct *vma; ++ struct mm_struct *mm; ++ struct rw_semaphore *sem; ++ ++ spin_lock(&vma_slot_list_lock); ++ ++ /* the slot_list was removed and inited from new list, when it enters ++ * uksm_list. If now it's not empty, then it must be moved to del list ++ */ ++ if (!slot_in_uksm(slot)) { ++ spin_unlock(&vma_slot_list_lock); ++ return -ENOENT; ++ } ++ ++ BUG_ON(slot->pages != vma_pages(slot->vma)); ++ /* Ok, vma still valid */ ++ vma = slot->vma; ++ mm = vma->vm_mm; ++ sem = &mm->mmap_sem; ++ ++ if (uksm_test_exit(mm)) { ++ spin_unlock(&vma_slot_list_lock); ++ return -ENOENT; ++ } ++ ++ if (down_read_trylock(sem)) { ++ spin_unlock(&vma_slot_list_lock); ++ return 0; ++ } ++ ++ spin_unlock(&vma_slot_list_lock); ++ return -EBUSY; ++} ++ ++static inline unsigned long ++vma_page_address(struct page *page, struct vm_area_struct *vma) ++{ ++ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); ++ unsigned long address; ++ ++ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); ++ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { ++ /* page should be within @vma mapping range */ ++ return -EFAULT; ++ } ++ return address; ++} ++ ++ ++/* return 0 on success with the item's mmap_sem locked */ ++static inline int get_mergeable_page_lock_mmap(struct rmap_item *item) ++{ ++ struct mm_struct *mm; ++ struct vma_slot *slot = item->slot; ++ int err = -EINVAL; ++ ++ struct page *page; ++ ++ /* ++ * try_down_read_slot_mmap_sem() returns non-zero if the slot ++ * has been removed by uksm_remove_vma(). ++ */ ++ if (try_down_read_slot_mmap_sem(slot)) ++ return -EBUSY; ++ ++ mm = slot->vma->vm_mm; ++ ++ if (uksm_test_exit(mm)) ++ goto failout_up; ++ ++ page = item->page; ++ rcu_read_lock(); ++ if (!get_page_unless_zero(page)) { ++ rcu_read_unlock(); ++ goto failout_up; ++ } ++ ++ /* No need to consider huge page here. */ ++ if (item->slot->vma->anon_vma != page_anon_vma(page) || ++ vma_page_address(page, item->slot->vma) != get_rmap_addr(item)) { ++ /* ++ * TODO: ++ * should we release this item becase of its stale page ++ * mapping? ++ */ ++ put_page(page); ++ rcu_read_unlock(); ++ goto failout_up; ++ } ++ rcu_read_unlock(); ++ return 0; ++ ++failout_up: ++ up_read(&mm->mmap_sem); ++ return err; ++} ++ ++/* ++ * What kind of VMA is considered ? ++ */ ++static inline int vma_can_enter(struct vm_area_struct *vma) ++{ ++ return uksm_flags_can_scan(vma->vm_flags); ++} ++ ++/* ++ * Called whenever a fresh new vma is created A new vma_slot. ++ * is created and inserted into a global list Must be called. ++ * after vma is inserted to its mm . ++ */ ++void uksm_vma_add_new(struct vm_area_struct *vma) ++{ ++ struct vma_slot *slot; ++ ++ if (!vma_can_enter(vma)) { ++ vma->uksm_vma_slot = NULL; ++ return; ++ } ++ ++ slot = alloc_vma_slot(); ++ if (!slot) { ++ vma->uksm_vma_slot = NULL; ++ return; ++ } ++ ++ vma->uksm_vma_slot = slot; ++ vma->vm_flags |= VM_MERGEABLE; ++ slot->vma = vma; ++ slot->mm = vma->vm_mm; ++ slot->ctime_j = jiffies; ++ slot->pages = vma_pages(vma); ++ spin_lock(&vma_slot_list_lock); ++ list_add_tail(&slot->slot_list, &vma_slot_new); ++ spin_unlock(&vma_slot_list_lock); ++} ++ ++/* ++ * Called after vma is unlinked from its mm ++ */ ++void uksm_remove_vma(struct vm_area_struct *vma) ++{ ++ struct vma_slot *slot; ++ ++ if (!vma->uksm_vma_slot) ++ return; ++ ++ slot = vma->uksm_vma_slot; ++ spin_lock(&vma_slot_list_lock); ++ if (slot_in_uksm(slot)) { ++ /** ++ * This slot has been added by ksmd, so move to the del list ++ * waiting ksmd to free it. ++ */ ++ list_add_tail(&slot->slot_list, &vma_slot_del); ++ } else { ++ /** ++ * It's still on new list. It's ok to free slot directly. ++ */ ++ list_del(&slot->slot_list); ++ free_vma_slot(slot); ++ } ++ spin_unlock(&vma_slot_list_lock); ++ vma->uksm_vma_slot = NULL; ++} ++ ++/* 32/3 < they < 32/2 */ ++#define shiftl 8 ++#define shiftr 12 ++ ++#define HASH_FROM_TO(from, to) \ ++for (index = from; index < to; index++) { \ ++ pos = random_nums[index]; \ ++ hash += key[pos]; \ ++ hash += (hash << shiftl); \ ++ hash ^= (hash >> shiftr); \ ++} ++ ++ ++#define HASH_FROM_DOWN_TO(from, to) \ ++for (index = from - 1; index >= to; index--) { \ ++ hash ^= (hash >> shiftr); \ ++ hash ^= (hash >> (shiftr*2)); \ ++ hash -= (hash << shiftl); \ ++ hash += (hash << (shiftl*2)); \ ++ pos = random_nums[index]; \ ++ hash -= key[pos]; \ ++} ++ ++/* ++ * The main random sample hash function. ++ */ ++static u32 random_sample_hash(void *addr, u32 hash_strength) ++{ ++ u32 hash = 0xdeadbeef; ++ int index, pos, loop = hash_strength; ++ u32 *key = (u32 *)addr; ++ ++ if (loop > HASH_STRENGTH_FULL) ++ loop = HASH_STRENGTH_FULL; ++ ++ HASH_FROM_TO(0, loop); ++ ++ if (hash_strength > HASH_STRENGTH_FULL) { ++ loop = hash_strength - HASH_STRENGTH_FULL; ++ HASH_FROM_TO(0, loop); ++ } ++ ++ return hash; ++} ++ ++ ++/** ++ * It's used when hash strength is adjusted ++ * ++ * @addr The page's virtual address ++ * @from The original hash strength ++ * @to The hash strength changed to ++ * @hash The hash value generated with "from" hash value ++ * ++ * return the hash value ++ */ ++static u32 delta_hash(void *addr, int from, int to, u32 hash) ++{ ++ u32 *key = (u32 *)addr; ++ int index, pos; /* make sure they are int type */ ++ ++ if (to > from) { ++ if (from >= HASH_STRENGTH_FULL) { ++ from -= HASH_STRENGTH_FULL; ++ to -= HASH_STRENGTH_FULL; ++ HASH_FROM_TO(from, to); ++ } else if (to <= HASH_STRENGTH_FULL) { ++ HASH_FROM_TO(from, to); ++ } else { ++ HASH_FROM_TO(from, HASH_STRENGTH_FULL); ++ HASH_FROM_TO(0, to - HASH_STRENGTH_FULL); ++ } ++ } else { ++ if (from <= HASH_STRENGTH_FULL) { ++ HASH_FROM_DOWN_TO(from, to); ++ } else if (to >= HASH_STRENGTH_FULL) { ++ from -= HASH_STRENGTH_FULL; ++ to -= HASH_STRENGTH_FULL; ++ HASH_FROM_DOWN_TO(from, to); ++ } else { ++ HASH_FROM_DOWN_TO(from - HASH_STRENGTH_FULL, 0); ++ HASH_FROM_DOWN_TO(HASH_STRENGTH_FULL, to); ++ } ++ } ++ ++ return hash; ++} ++ ++ ++ ++ ++#define CAN_OVERFLOW_U64(x, delta) (U64_MAX - (x) < (delta)) ++ ++/** ++ * ++ * Called when: rshash_pos or rshash_neg is about to overflow or a scan round ++ * has finished. ++ * ++ * return 0 if no page has been scanned since last call, 1 otherwise. ++ */ ++static inline int encode_benefit(void) ++{ ++ u64 scanned_delta, pos_delta, neg_delta; ++ unsigned long base = benefit.base; ++ ++ scanned_delta = uksm_pages_scanned - uksm_pages_scanned_last; ++ ++ if (!scanned_delta) ++ return 0; ++ ++ scanned_delta >>= base; ++ pos_delta = rshash_pos >> base; ++ neg_delta = rshash_neg >> base; ++ ++ if (CAN_OVERFLOW_U64(benefit.pos, pos_delta) || ++ CAN_OVERFLOW_U64(benefit.neg, neg_delta) || ++ CAN_OVERFLOW_U64(benefit.scanned, scanned_delta)) { ++ benefit.scanned >>= 1; ++ benefit.neg >>= 1; ++ benefit.pos >>= 1; ++ benefit.base++; ++ scanned_delta >>= 1; ++ pos_delta >>= 1; ++ neg_delta >>= 1; ++ } ++ ++ benefit.pos += pos_delta; ++ benefit.neg += neg_delta; ++ benefit.scanned += scanned_delta; ++ ++ BUG_ON(!benefit.scanned); ++ ++ rshash_pos = rshash_neg = 0; ++ uksm_pages_scanned_last = uksm_pages_scanned; ++ ++ return 1; ++} ++ ++static inline void reset_benefit(void) ++{ ++ benefit.pos = 0; ++ benefit.neg = 0; ++ benefit.base = 0; ++ benefit.scanned = 0; ++} ++ ++static inline void inc_rshash_pos(unsigned long delta) ++{ ++ if (CAN_OVERFLOW_U64(rshash_pos, delta)) ++ encode_benefit(); ++ ++ rshash_pos += delta; ++} ++ ++static inline void inc_rshash_neg(unsigned long delta) ++{ ++ if (CAN_OVERFLOW_U64(rshash_neg, delta)) ++ encode_benefit(); ++ ++ rshash_neg += delta; ++} ++ ++ ++static inline u32 page_hash(struct page *page, unsigned long hash_strength, ++ int cost_accounting) ++{ ++ u32 val; ++ unsigned long delta; ++ ++ void *addr = kmap_atomic(page); ++ ++ val = random_sample_hash(addr, hash_strength); ++ kunmap_atomic(addr); ++ ++ if (cost_accounting) { ++ if (HASH_STRENGTH_FULL > hash_strength) ++ delta = HASH_STRENGTH_FULL - hash_strength; ++ else ++ delta = 0; ++ ++ inc_rshash_pos(delta); ++ } ++ ++ return val; ++} ++ ++static int memcmp_pages(struct page *page1, struct page *page2, ++ int cost_accounting) ++{ ++ char *addr1, *addr2; ++ int ret; ++ ++ addr1 = kmap_atomic(page1); ++ addr2 = kmap_atomic(page2); ++ ret = memcmp(addr1, addr2, PAGE_SIZE); ++ kunmap_atomic(addr2); ++ kunmap_atomic(addr1); ++ ++ if (cost_accounting) ++ inc_rshash_neg(memcmp_cost); ++ ++ return ret; ++} ++ ++static inline int pages_identical(struct page *page1, struct page *page2) ++{ ++ return !memcmp_pages(page1, page2, 0); ++} ++ ++static inline int is_page_full_zero(struct page *page) ++{ ++ char *addr; ++ int ret; ++ ++ addr = kmap_atomic(page); ++ ret = is_full_zero(addr, PAGE_SIZE); ++ kunmap_atomic(addr); ++ ++ return ret; ++} ++ ++static int write_protect_page(struct vm_area_struct *vma, struct page *page, ++ pte_t *orig_pte, pte_t *old_pte) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long addr; ++ pte_t *ptep; ++ spinlock_t *ptl; ++ int swapped; ++ int err = -EFAULT; ++ unsigned long mmun_start; /* For mmu_notifiers */ ++ unsigned long mmun_end; /* For mmu_notifiers */ ++ ++ addr = page_address_in_vma(page, vma); ++ if (addr == -EFAULT) ++ goto out; ++ ++ BUG_ON(PageTransCompound(page)); ++ ++ mmun_start = addr; ++ mmun_end = addr + PAGE_SIZE; ++ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ ++ ptep = page_check_address(page, mm, addr, &ptl, 0); ++ if (!ptep) ++ goto out_mn; ++ ++ if (old_pte) ++ *old_pte = *ptep; ++ ++ if (pte_write(*ptep) || pte_dirty(*ptep)) { ++ pte_t entry; ++ ++ swapped = PageSwapCache(page); ++ flush_cache_page(vma, addr, page_to_pfn(page)); ++ /* ++ * Ok this is tricky, when get_user_pages_fast() run it doesnt ++ * take any lock, therefore the check that we are going to make ++ * with the pagecount against the mapcount is racey and ++ * O_DIRECT can happen right after the check. ++ * So we clear the pte and flush the tlb before the check ++ * this assure us that no O_DIRECT can happen after the check ++ * or in the middle of the check. ++ */ ++ entry = ptep_clear_flush(vma, addr, ptep); ++ /* ++ * Check that no O_DIRECT or similar I/O is in progress on the ++ * page ++ */ ++ if (page_mapcount(page) + 1 + swapped != page_count(page)) { ++ set_pte_at(mm, addr, ptep, entry); ++ goto out_unlock; ++ } ++ if (pte_dirty(entry)) ++ set_page_dirty(page); ++ entry = pte_mkclean(pte_wrprotect(entry)); ++ set_pte_at_notify(mm, addr, ptep, entry); ++ } ++ *orig_pte = *ptep; ++ err = 0; ++ ++out_unlock: ++ pte_unmap_unlock(ptep, ptl); ++out_mn: ++ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++out: ++ return err; ++} ++ ++#define MERGE_ERR_PGERR 1 /* the page is invalid cannot continue */ ++#define MERGE_ERR_COLLI 2 /* there is a collision */ ++#define MERGE_ERR_COLLI_MAX 3 /* collision at the max hash strength */ ++#define MERGE_ERR_CHANGED 4 /* the page has changed since last hash */ ++ ++ ++/** ++ * replace_page - replace page in vma by new ksm page ++ * @vma: vma that holds the pte pointing to page ++ * @page: the page we are replacing by kpage ++ * @kpage: the ksm page we replace page by ++ * @orig_pte: the original value of the pte ++ * ++ * Returns 0 on success, MERGE_ERR_PGERR on failure. ++ */ ++static int replace_page(struct vm_area_struct *vma, struct page *page, ++ struct page *kpage, pte_t orig_pte) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep; ++ spinlock_t *ptl; ++ pte_t entry; ++ ++ unsigned long addr; ++ int err = MERGE_ERR_PGERR; ++ unsigned long mmun_start; /* For mmu_notifiers */ ++ unsigned long mmun_end; /* For mmu_notifiers */ ++ ++ addr = page_address_in_vma(page, vma); ++ if (addr == -EFAULT) ++ goto out; ++ ++ pgd = pgd_offset(mm, addr); ++ if (!pgd_present(*pgd)) ++ goto out; ++ ++ pud = pud_offset(pgd, addr); ++ if (!pud_present(*pud)) ++ goto out; ++ ++ pmd = pmd_offset(pud, addr); ++ BUG_ON(pmd_trans_huge(*pmd)); ++ if (!pmd_present(*pmd)) ++ goto out; ++ ++ mmun_start = addr; ++ mmun_end = addr + PAGE_SIZE; ++ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ ++ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); ++ if (!pte_same(*ptep, orig_pte)) { ++ pte_unmap_unlock(ptep, ptl); ++ goto out_mn; ++ } ++ ++ flush_cache_page(vma, addr, pte_pfn(*ptep)); ++ ptep_clear_flush(vma, addr, ptep); ++ entry = mk_pte(kpage, vma->vm_page_prot); ++ ++ /* special treatment is needed for zero_page */ ++ if ((page_to_pfn(kpage) == uksm_zero_pfn) || ++ (page_to_pfn(kpage) == zero_pfn)) ++ entry = pte_mkspecial(entry); ++ else { ++ get_page(kpage); ++ page_add_anon_rmap(kpage, vma, addr); ++ } ++ ++ set_pte_at_notify(mm, addr, ptep, entry); ++ ++ page_remove_rmap(page); ++ if (!page_mapped(page)) ++ try_to_free_swap(page); ++ put_page(page); ++ ++ pte_unmap_unlock(ptep, ptl); ++ err = 0; ++out_mn: ++ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++out: ++ return err; ++} ++ ++ ++/** ++ * Fully hash a page with HASH_STRENGTH_MAX return a non-zero hash value. The ++ * zero hash value at HASH_STRENGTH_MAX is used to indicated that its ++ * hash_max member has not been calculated. ++ * ++ * @page The page needs to be hashed ++ * @hash_old The hash value calculated with current hash strength ++ * ++ * return the new hash value calculated at HASH_STRENGTH_MAX ++ */ ++static inline u32 page_hash_max(struct page *page, u32 hash_old) ++{ ++ u32 hash_max = 0; ++ void *addr; ++ ++ addr = kmap_atomic(page); ++ hash_max = delta_hash(addr, hash_strength, ++ HASH_STRENGTH_MAX, hash_old); ++ ++ kunmap_atomic(addr); ++ ++ if (!hash_max) ++ hash_max = 1; ++ ++ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength); ++ return hash_max; ++} ++ ++/* ++ * We compare the hash again, to ensure that it is really a hash collision ++ * instead of being caused by page write. ++ */ ++static inline int check_collision(struct rmap_item *rmap_item, ++ u32 hash) ++{ ++ int err; ++ struct page *page = rmap_item->page; ++ ++ /* if this rmap_item has already been hash_maxed, then the collision ++ * must appears in the second-level rbtree search. In this case we check ++ * if its hash_max value has been changed. Otherwise, the collision ++ * happens in the first-level rbtree search, so we check against it's ++ * current hash value. ++ */ ++ if (rmap_item->hash_max) { ++ inc_rshash_neg(memcmp_cost); ++ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength); ++ ++ if (rmap_item->hash_max == page_hash_max(page, hash)) ++ err = MERGE_ERR_COLLI; ++ else ++ err = MERGE_ERR_CHANGED; ++ } else { ++ inc_rshash_neg(memcmp_cost + hash_strength); ++ ++ if (page_hash(page, hash_strength, 0) == hash) ++ err = MERGE_ERR_COLLI; ++ else ++ err = MERGE_ERR_CHANGED; ++ } ++ ++ return err; ++} ++ ++static struct page *page_trans_compound_anon(struct page *page) ++{ ++ if (PageTransCompound(page)) { ++ struct page *head = compound_trans_head(page); ++ /* ++ * head may actually be splitted and freed from under ++ * us but it's ok here. ++ */ ++ if (PageAnon(head)) ++ return head; ++ } ++ return NULL; ++} ++ ++static int page_trans_compound_anon_split(struct page *page) ++{ ++ int ret = 0; ++ struct page *transhuge_head = page_trans_compound_anon(page); ++ if (transhuge_head) { ++ /* Get the reference on the head to split it. */ ++ if (get_page_unless_zero(transhuge_head)) { ++ /* ++ * Recheck we got the reference while the head ++ * was still anonymous. ++ */ ++ if (PageAnon(transhuge_head)) ++ ret = split_huge_page(transhuge_head); ++ else ++ /* ++ * Retry later if split_huge_page run ++ * from under us. ++ */ ++ ret = 1; ++ put_page(transhuge_head); ++ } else ++ /* Retry later if split_huge_page run from under us. */ ++ ret = 1; ++ } ++ return ret; ++} ++ ++/** ++ * Try to merge a rmap_item.page with a kpage in stable node. kpage must ++ * already be a ksm page. ++ * ++ * @return 0 if the pages were merged, -EFAULT otherwise. ++ */ ++static int try_to_merge_with_uksm_page(struct rmap_item *rmap_item, ++ struct page *kpage, u32 hash) ++{ ++ struct vm_area_struct *vma = rmap_item->slot->vma; ++ struct mm_struct *mm = vma->vm_mm; ++ pte_t orig_pte = __pte(0); ++ int err = MERGE_ERR_PGERR; ++ struct page *page; ++ ++ if (uksm_test_exit(mm)) ++ goto out; ++ ++ page = rmap_item->page; ++ ++ if (page == kpage) { /* ksm page forked */ ++ err = 0; ++ goto out; ++ } ++ ++ if (PageTransCompound(page) && page_trans_compound_anon_split(page)) ++ goto out; ++ BUG_ON(PageTransCompound(page)); ++ ++ if (!PageAnon(page) || !PageKsm(kpage)) ++ goto out; ++ ++ /* ++ * We need the page lock to read a stable PageSwapCache in ++ * write_protect_page(). We use trylock_page() instead of ++ * lock_page() because we don't want to wait here - we ++ * prefer to continue scanning and merging different pages, ++ * then come back to this page when it is unlocked. ++ */ ++ if (!trylock_page(page)) ++ goto out; ++ /* ++ * If this anonymous page is mapped only here, its pte may need ++ * to be write-protected. If it's mapped elsewhere, all of its ++ * ptes are necessarily already write-protected. But in either ++ * case, we need to lock and check page_count is not raised. ++ */ ++ if (write_protect_page(vma, page, &orig_pte, NULL) == 0) { ++ if (pages_identical(page, kpage)) ++ err = replace_page(vma, page, kpage, orig_pte); ++ else ++ err = check_collision(rmap_item, hash); ++ } ++ ++ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { ++ munlock_vma_page(page); ++ if (!PageMlocked(kpage)) { ++ unlock_page(page); ++ lock_page(kpage); ++ mlock_vma_page(kpage); ++ page = kpage; /* for final unlock */ ++ } ++ } ++ ++ unlock_page(page); ++out: ++ return err; ++} ++ ++ ++ ++/** ++ * If two pages fail to merge in try_to_merge_two_pages, then we have a chance ++ * to restore a page mapping that has been changed in try_to_merge_two_pages. ++ * ++ * @return 0 on success. ++ */ ++static int restore_uksm_page_pte(struct vm_area_struct *vma, unsigned long addr, ++ pte_t orig_pte, pte_t wprt_pte) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep; ++ spinlock_t *ptl; ++ ++ int err = -EFAULT; ++ ++ pgd = pgd_offset(mm, addr); ++ if (!pgd_present(*pgd)) ++ goto out; ++ ++ pud = pud_offset(pgd, addr); ++ if (!pud_present(*pud)) ++ goto out; ++ ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ goto out; ++ ++ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); ++ if (!pte_same(*ptep, wprt_pte)) { ++ /* already copied, let it be */ ++ pte_unmap_unlock(ptep, ptl); ++ goto out; ++ } ++ ++ /* ++ * Good boy, still here. When we still get the ksm page, it does not ++ * return to the free page pool, there is no way that a pte was changed ++ * to other page and gets back to this page. And remind that ksm page ++ * do not reuse in do_wp_page(). So it's safe to restore the original ++ * pte. ++ */ ++ flush_cache_page(vma, addr, pte_pfn(*ptep)); ++ ptep_clear_flush(vma, addr, ptep); ++ set_pte_at_notify(mm, addr, ptep, orig_pte); ++ ++ pte_unmap_unlock(ptep, ptl); ++ err = 0; ++out: ++ return err; ++} ++ ++/** ++ * try_to_merge_two_pages() - take two identical pages and prepare ++ * them to be merged into one page(rmap_item->page) ++ * ++ * @return 0 if we successfully merged two identical pages into ++ * one ksm page. MERGE_ERR_COLLI if it's only a hash collision ++ * search in rbtree. MERGE_ERR_CHANGED if rmap_item has been ++ * changed since it's hashed. MERGE_ERR_PGERR otherwise. ++ * ++ */ ++static int try_to_merge_two_pages(struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ u32 hash) ++{ ++ pte_t orig_pte1 = __pte(0), orig_pte2 = __pte(0); ++ pte_t wprt_pte1 = __pte(0), wprt_pte2 = __pte(0); ++ struct vm_area_struct *vma1 = rmap_item->slot->vma; ++ struct vm_area_struct *vma2 = tree_rmap_item->slot->vma; ++ struct page *page = rmap_item->page; ++ struct page *tree_page = tree_rmap_item->page; ++ int err = MERGE_ERR_PGERR; ++ struct address_space *saved_mapping; ++ ++ ++ if (rmap_item->page == tree_rmap_item->page) ++ goto out; ++ ++ if (PageTransCompound(page) && page_trans_compound_anon_split(page)) ++ goto out; ++ BUG_ON(PageTransCompound(page)); ++ ++ if (PageTransCompound(tree_page) && page_trans_compound_anon_split(tree_page)) ++ goto out; ++ BUG_ON(PageTransCompound(tree_page)); ++ ++ if (!PageAnon(page) || !PageAnon(tree_page)) ++ goto out; ++ ++ if (!trylock_page(page)) ++ goto out; ++ ++ ++ if (write_protect_page(vma1, page, &wprt_pte1, &orig_pte1) != 0) { ++ unlock_page(page); ++ goto out; ++ } ++ ++ /* ++ * While we hold page lock, upgrade page from ++ * PageAnon+anon_vma to PageKsm+NULL stable_node: ++ * stable_tree_insert() will update stable_node. ++ */ ++ saved_mapping = page->mapping; ++ set_page_stable_node(page, NULL); ++ mark_page_accessed(page); ++ unlock_page(page); ++ ++ if (!trylock_page(tree_page)) ++ goto restore_out; ++ ++ if (write_protect_page(vma2, tree_page, &wprt_pte2, &orig_pte2) != 0) { ++ unlock_page(tree_page); ++ goto restore_out; ++ } ++ ++ if (pages_identical(page, tree_page)) { ++ err = replace_page(vma2, tree_page, page, wprt_pte2); ++ if (err) { ++ unlock_page(tree_page); ++ goto restore_out; ++ } ++ ++ if ((vma2->vm_flags & VM_LOCKED)) { ++ munlock_vma_page(tree_page); ++ if (!PageMlocked(page)) { ++ unlock_page(tree_page); ++ lock_page(page); ++ mlock_vma_page(page); ++ tree_page = page; /* for final unlock */ ++ } ++ } ++ ++ unlock_page(tree_page); ++ ++ goto out; /* success */ ++ ++ } else { ++ if (tree_rmap_item->hash_max && ++ tree_rmap_item->hash_max == rmap_item->hash_max) { ++ err = MERGE_ERR_COLLI_MAX; ++ } else if (page_hash(page, hash_strength, 0) == ++ page_hash(tree_page, hash_strength, 0)) { ++ inc_rshash_neg(memcmp_cost + hash_strength * 2); ++ err = MERGE_ERR_COLLI; ++ } else { ++ err = MERGE_ERR_CHANGED; ++ } ++ ++ unlock_page(tree_page); ++ } ++ ++restore_out: ++ lock_page(page); ++ if (!restore_uksm_page_pte(vma1, get_rmap_addr(rmap_item), ++ orig_pte1, wprt_pte1)) ++ page->mapping = saved_mapping; ++ ++ unlock_page(page); ++out: ++ return err; ++} ++ ++static inline int hash_cmp(u32 new_val, u32 node_val) ++{ ++ if (new_val > node_val) ++ return 1; ++ else if (new_val < node_val) ++ return -1; ++ else ++ return 0; ++} ++ ++static inline u32 rmap_item_hash_max(struct rmap_item *item, u32 hash) ++{ ++ u32 hash_max = item->hash_max; ++ ++ if (!hash_max) { ++ hash_max = page_hash_max(item->page, hash); ++ ++ item->hash_max = hash_max; ++ } ++ ++ return hash_max; ++} ++ ++ ++ ++/** ++ * stable_tree_search() - search the stable tree for a page ++ * ++ * @item: the rmap_item we are comparing with ++ * @hash: the hash value of this item->page already calculated ++ * ++ * @return the page we have found, NULL otherwise. The page returned has ++ * been gotten. ++ */ ++static struct page *stable_tree_search(struct rmap_item *item, u32 hash) ++{ ++ struct rb_node *node = root_stable_treep->rb_node; ++ struct tree_node *tree_node; ++ unsigned long hash_max; ++ struct page *page = item->page; ++ struct stable_node *stable_node; ++ ++ stable_node = page_stable_node(page); ++ if (stable_node) { ++ /* ksm page forked, that is ++ * if (PageKsm(page) && !in_stable_tree(rmap_item)) ++ * it's actually gotten once outside. ++ */ ++ get_page(page); ++ return page; ++ } ++ ++ while (node) { ++ int cmp; ++ ++ tree_node = rb_entry(node, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) ++ node = node->rb_left; ++ else if (cmp > 0) ++ node = node->rb_right; ++ else ++ break; ++ } ++ ++ if (!node) ++ return NULL; ++ ++ if (tree_node->count == 1) { ++ stable_node = rb_entry(tree_node->sub_root.rb_node, ++ struct stable_node, node); ++ BUG_ON(!stable_node); ++ ++ goto get_page_out; ++ } ++ ++ /* ++ * ok, we have to search the second ++ * level subtree, hash the page to a ++ * full strength. ++ */ ++ node = tree_node->sub_root.rb_node; ++ BUG_ON(!node); ++ hash_max = rmap_item_hash_max(item, hash); ++ ++ while (node) { ++ int cmp; ++ ++ stable_node = rb_entry(node, struct stable_node, node); ++ ++ cmp = hash_cmp(hash_max, stable_node->hash_max); ++ ++ if (cmp < 0) ++ node = node->rb_left; ++ else if (cmp > 0) ++ node = node->rb_right; ++ else ++ goto get_page_out; ++ } ++ ++ return NULL; ++ ++get_page_out: ++ page = get_uksm_page(stable_node, 1, 1); ++ return page; ++} ++ ++static int try_merge_rmap_item(struct rmap_item *item, ++ struct page *kpage, ++ struct page *tree_page) ++{ ++ spinlock_t *ptl; ++ pte_t *ptep; ++ unsigned long addr; ++ struct vm_area_struct *vma = item->slot->vma; ++ ++ addr = get_rmap_addr(item); ++ ptep = page_check_address(kpage, vma->vm_mm, addr, &ptl, 0); ++ if (!ptep) ++ return 0; ++ ++ if (pte_write(*ptep)) { ++ /* has changed, abort! */ ++ pte_unmap_unlock(ptep, ptl); ++ return 0; ++ } ++ ++ get_page(tree_page); ++ page_add_anon_rmap(tree_page, vma, addr); ++ ++ flush_cache_page(vma, addr, pte_pfn(*ptep)); ++ ptep_clear_flush(vma, addr, ptep); ++ set_pte_at_notify(vma->vm_mm, addr, ptep, ++ mk_pte(tree_page, vma->vm_page_prot)); ++ ++ page_remove_rmap(kpage); ++ put_page(kpage); ++ ++ pte_unmap_unlock(ptep, ptl); ++ ++ return 1; ++} ++ ++/** ++ * try_to_merge_with_stable_page() - when two rmap_items need to be inserted ++ * into stable tree, the page was found to be identical to a stable ksm page, ++ * this is the last chance we can merge them into one. ++ * ++ * @item1: the rmap_item holding the page which we wanted to insert ++ * into stable tree. ++ * @item2: the other rmap_item we found when unstable tree search ++ * @oldpage: the page currently mapped by the two rmap_items ++ * @tree_page: the page we found identical in stable tree node ++ * @success1: return if item1 is successfully merged ++ * @success2: return if item2 is successfully merged ++ */ ++static void try_merge_with_stable(struct rmap_item *item1, ++ struct rmap_item *item2, ++ struct page **kpage, ++ struct page *tree_page, ++ int *success1, int *success2) ++{ ++ struct vm_area_struct *vma1 = item1->slot->vma; ++ struct vm_area_struct *vma2 = item2->slot->vma; ++ *success1 = 0; ++ *success2 = 0; ++ ++ if (unlikely(*kpage == tree_page)) { ++ /* I don't think this can really happen */ ++ printk(KERN_WARNING "UKSM: unexpected condition detected in " ++ "try_merge_with_stable() -- *kpage == tree_page !\n"); ++ *success1 = 1; ++ *success2 = 1; ++ return; ++ } ++ ++ if (!PageAnon(*kpage) || !PageKsm(*kpage)) ++ goto failed; ++ ++ if (!trylock_page(tree_page)) ++ goto failed; ++ ++ /* If the oldpage is still ksm and still pointed ++ * to in the right place, and still write protected, ++ * we are confident it's not changed, no need to ++ * memcmp anymore. ++ * be ware, we cannot take nested pte locks, ++ * deadlock risk. ++ */ ++ if (!try_merge_rmap_item(item1, *kpage, tree_page)) ++ goto unlock_failed; ++ ++ /* ok, then vma2, remind that pte1 already set */ ++ if (!try_merge_rmap_item(item2, *kpage, tree_page)) ++ goto success_1; ++ ++ *success2 = 1; ++success_1: ++ *success1 = 1; ++ ++ ++ if ((*success1 && vma1->vm_flags & VM_LOCKED) || ++ (*success2 && vma2->vm_flags & VM_LOCKED)) { ++ munlock_vma_page(*kpage); ++ if (!PageMlocked(tree_page)) ++ mlock_vma_page(tree_page); ++ } ++ ++ /* ++ * We do not need oldpage any more in the caller, so can break the lock ++ * now. ++ */ ++ unlock_page(*kpage); ++ *kpage = tree_page; /* Get unlocked outside. */ ++ return; ++ ++unlock_failed: ++ unlock_page(tree_page); ++failed: ++ return; ++} ++ ++static inline void stable_node_hash_max(struct stable_node *node, ++ struct page *page, u32 hash) ++{ ++ u32 hash_max = node->hash_max; ++ ++ if (!hash_max) { ++ hash_max = page_hash_max(page, hash); ++ node->hash_max = hash_max; ++ } ++} ++ ++static inline ++struct stable_node *new_stable_node(struct tree_node *tree_node, ++ struct page *kpage, u32 hash_max) ++{ ++ struct stable_node *new_stable_node; ++ ++ new_stable_node = alloc_stable_node(); ++ if (!new_stable_node) ++ return NULL; ++ ++ new_stable_node->kpfn = page_to_pfn(kpage); ++ new_stable_node->hash_max = hash_max; ++ new_stable_node->tree_node = tree_node; ++ set_page_stable_node(kpage, new_stable_node); ++ ++ return new_stable_node; ++} ++ ++static inline ++struct stable_node *first_level_insert(struct tree_node *tree_node, ++ struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ struct page **kpage, u32 hash, ++ int *success1, int *success2) ++{ ++ int cmp; ++ struct page *tree_page; ++ u32 hash_max = 0; ++ struct stable_node *stable_node, *new_snode; ++ struct rb_node *parent = NULL, **new; ++ ++ /* this tree node contains no sub-tree yet */ ++ stable_node = rb_entry(tree_node->sub_root.rb_node, ++ struct stable_node, node); ++ ++ tree_page = get_uksm_page(stable_node, 1, 0); ++ if (tree_page) { ++ cmp = memcmp_pages(*kpage, tree_page, 1); ++ if (!cmp) { ++ try_merge_with_stable(rmap_item, tree_rmap_item, kpage, ++ tree_page, success1, success2); ++ put_page(tree_page); ++ if (!*success1 && !*success2) ++ goto failed; ++ ++ return stable_node; ++ ++ } else { ++ /* ++ * collision in first level try to create a subtree. ++ * A new node need to be created. ++ */ ++ put_page(tree_page); ++ ++ stable_node_hash_max(stable_node, tree_page, ++ tree_node->hash); ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ cmp = hash_cmp(hash_max, stable_node->hash_max); ++ ++ parent = &stable_node->node; ++ if (cmp < 0) { ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ new = &parent->rb_right; ++ } else { ++ goto failed; ++ } ++ } ++ ++ } else { ++ /* the only stable_node deleted, we reuse its tree_node. ++ */ ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ ++ new_snode = new_stable_node(tree_node, *kpage, hash_max); ++ if (!new_snode) ++ goto failed; ++ ++ rb_link_node(&new_snode->node, parent, new); ++ rb_insert_color(&new_snode->node, &tree_node->sub_root); ++ tree_node->count++; ++ *success1 = *success2 = 1; ++ ++ return new_snode; ++ ++failed: ++ return NULL; ++} ++ ++static inline ++struct stable_node *stable_subtree_insert(struct tree_node *tree_node, ++ struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ struct page **kpage, u32 hash, ++ int *success1, int *success2) ++{ ++ struct page *tree_page; ++ u32 hash_max; ++ struct stable_node *stable_node, *new_snode; ++ struct rb_node *parent, **new; ++ ++research: ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ BUG_ON(!*new); ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ while (*new) { ++ int cmp; ++ ++ stable_node = rb_entry(*new, struct stable_node, node); ++ ++ cmp = hash_cmp(hash_max, stable_node->hash_max); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else { ++ tree_page = get_uksm_page(stable_node, 1, 0); ++ if (tree_page) { ++ cmp = memcmp_pages(*kpage, tree_page, 1); ++ if (!cmp) { ++ try_merge_with_stable(rmap_item, ++ tree_rmap_item, kpage, ++ tree_page, success1, success2); ++ ++ put_page(tree_page); ++ if (!*success1 && !*success2) ++ goto failed; ++ /* ++ * successfully merged with a stable ++ * node ++ */ ++ return stable_node; ++ } else { ++ put_page(tree_page); ++ goto failed; ++ } ++ } else { ++ /* ++ * stable node may be deleted, ++ * and subtree maybe ++ * restructed, cannot ++ * continue, research it. ++ */ ++ if (tree_node->count) { ++ goto research; ++ } else { ++ /* reuse the tree node*/ ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ } ++ } ++ } ++ ++ new_snode = new_stable_node(tree_node, *kpage, hash_max); ++ if (!new_snode) ++ goto failed; ++ ++ rb_link_node(&new_snode->node, parent, new); ++ rb_insert_color(&new_snode->node, &tree_node->sub_root); ++ tree_node->count++; ++ *success1 = *success2 = 1; ++ ++ return new_snode; ++ ++failed: ++ return NULL; ++} ++ ++ ++/** ++ * stable_tree_insert() - try to insert a merged page in unstable tree to ++ * the stable tree ++ * ++ * @kpage: the page need to be inserted ++ * @hash: the current hash of this page ++ * @rmap_item: the rmap_item being scanned ++ * @tree_rmap_item: the rmap_item found on unstable tree ++ * @success1: return if rmap_item is merged ++ * @success2: return if tree_rmap_item is merged ++ * ++ * @return the stable_node on stable tree if at least one ++ * rmap_item is inserted into stable tree, NULL ++ * otherwise. ++ */ ++static struct stable_node * ++stable_tree_insert(struct page **kpage, u32 hash, ++ struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ int *success1, int *success2) ++{ ++ struct rb_node **new = &root_stable_treep->rb_node; ++ struct rb_node *parent = NULL; ++ struct stable_node *stable_node; ++ struct tree_node *tree_node; ++ u32 hash_max = 0; ++ ++ *success1 = *success2 = 0; ++ ++ while (*new) { ++ int cmp; ++ ++ tree_node = rb_entry(*new, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else ++ break; ++ } ++ ++ if (*new) { ++ if (tree_node->count == 1) { ++ stable_node = first_level_insert(tree_node, rmap_item, ++ tree_rmap_item, kpage, ++ hash, success1, success2); ++ } else { ++ stable_node = stable_subtree_insert(tree_node, ++ rmap_item, tree_rmap_item, kpage, ++ hash, success1, success2); ++ } ++ } else { ++ ++ /* no tree node found */ ++ tree_node = alloc_tree_node(stable_tree_node_listp); ++ if (!tree_node) { ++ stable_node = NULL; ++ goto out; ++ } ++ ++ stable_node = new_stable_node(tree_node, *kpage, hash_max); ++ if (!stable_node) { ++ free_tree_node(tree_node); ++ goto out; ++ } ++ ++ tree_node->hash = hash; ++ rb_link_node(&tree_node->node, parent, new); ++ rb_insert_color(&tree_node->node, root_stable_treep); ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ ++ rb_link_node(&stable_node->node, parent, new); ++ rb_insert_color(&stable_node->node, &tree_node->sub_root); ++ tree_node->count++; ++ *success1 = *success2 = 1; ++ } ++ ++out: ++ return stable_node; ++} ++ ++ ++/** ++ * get_tree_rmap_item_page() - try to get the page and lock the mmap_sem ++ * ++ * @return 0 on success, -EBUSY if unable to lock the mmap_sem, ++ * -EINVAL if the page mapping has been changed. ++ */ ++static inline int get_tree_rmap_item_page(struct rmap_item *tree_rmap_item) ++{ ++ int err; ++ ++ err = get_mergeable_page_lock_mmap(tree_rmap_item); ++ ++ if (err == -EINVAL) { ++ /* its page map has been changed, remove it */ ++ remove_rmap_item_from_tree(tree_rmap_item); ++ } ++ ++ /* The page is gotten and mmap_sem is locked now. */ ++ return err; ++} ++ ++ ++/** ++ * unstable_tree_search_insert() - search an unstable tree rmap_item with the ++ * same hash value. Get its page and trylock the mmap_sem ++ */ ++static inline ++struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, ++ u32 hash) ++ ++{ ++ struct rb_node **new = &root_unstable_tree.rb_node; ++ struct rb_node *parent = NULL; ++ struct tree_node *tree_node; ++ u32 hash_max; ++ struct rmap_item *tree_rmap_item; ++ ++ while (*new) { ++ int cmp; ++ ++ tree_node = rb_entry(*new, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else ++ break; ++ } ++ ++ if (*new) { ++ /* got the tree_node */ ++ if (tree_node->count == 1) { ++ tree_rmap_item = rb_entry(tree_node->sub_root.rb_node, ++ struct rmap_item, node); ++ BUG_ON(!tree_rmap_item); ++ ++ goto get_page_out; ++ } ++ ++ /* well, search the collision subtree */ ++ new = &tree_node->sub_root.rb_node; ++ BUG_ON(!*new); ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ ++ while (*new) { ++ int cmp; ++ ++ tree_rmap_item = rb_entry(*new, struct rmap_item, ++ node); ++ ++ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max); ++ parent = *new; ++ if (cmp < 0) ++ new = &parent->rb_left; ++ else if (cmp > 0) ++ new = &parent->rb_right; ++ else ++ goto get_page_out; ++ } ++ } else { ++ /* alloc a new tree_node */ ++ tree_node = alloc_tree_node(&unstable_tree_node_list); ++ if (!tree_node) ++ return NULL; ++ ++ tree_node->hash = hash; ++ rb_link_node(&tree_node->node, parent, new); ++ rb_insert_color(&tree_node->node, &root_unstable_tree); ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ ++ /* did not found even in sub-tree */ ++ rmap_item->tree_node = tree_node; ++ rmap_item->address |= UNSTABLE_FLAG; ++ rmap_item->hash_round = uksm_hash_round; ++ rb_link_node(&rmap_item->node, parent, new); ++ rb_insert_color(&rmap_item->node, &tree_node->sub_root); ++ ++ uksm_pages_unshared++; ++ return NULL; ++ ++get_page_out: ++ if (tree_rmap_item->page == rmap_item->page) ++ return NULL; ++ ++ if (get_tree_rmap_item_page(tree_rmap_item)) ++ return NULL; ++ ++ return tree_rmap_item; ++} ++ ++static void hold_anon_vma(struct rmap_item *rmap_item, ++ struct anon_vma *anon_vma) ++{ ++ rmap_item->anon_vma = anon_vma; ++ get_anon_vma(anon_vma); ++} ++ ++ ++/** ++ * stable_tree_append() - append a rmap_item to a stable node. Deduplication ++ * ratio statistics is done in this function. ++ * ++ */ ++static void stable_tree_append(struct rmap_item *rmap_item, ++ struct stable_node *stable_node, int logdedup) ++{ ++ struct node_vma *node_vma = NULL, *new_node_vma, *node_vma_cont = NULL; ++ unsigned long key = (unsigned long)rmap_item->slot; ++ unsigned long factor = rmap_item->slot->rung->step; ++ ++ BUG_ON(!stable_node); ++ rmap_item->address |= STABLE_FLAG; ++ ++ if (hlist_empty(&stable_node->hlist)) { ++ uksm_pages_shared++; ++ goto node_vma_new; ++ } else { ++ uksm_pages_sharing++; ++ } ++ ++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) { ++ if (node_vma->key >= key) ++ break; ++ ++ if (logdedup) { ++ node_vma->slot->pages_bemerged += factor; ++ if (list_empty(&node_vma->slot->dedup_list)) ++ list_add(&node_vma->slot->dedup_list, ++ &vma_slot_dedup); ++ } ++ } ++ ++ if (node_vma) { ++ if (node_vma->key == key) { ++ node_vma_cont = hlist_entry_safe(node_vma->hlist.next, struct node_vma, hlist); ++ goto node_vma_ok; ++ } else if (node_vma->key > key) { ++ node_vma_cont = node_vma; ++ } ++ } ++ ++node_vma_new: ++ /* no same vma already in node, alloc a new node_vma */ ++ new_node_vma = alloc_node_vma(); ++ BUG_ON(!new_node_vma); ++ new_node_vma->head = stable_node; ++ new_node_vma->slot = rmap_item->slot; ++ ++ if (!node_vma) { ++ hlist_add_head(&new_node_vma->hlist, &stable_node->hlist); ++ } else if (node_vma->key != key) { ++ if (node_vma->key < key) ++ hlist_add_after(&node_vma->hlist, &new_node_vma->hlist); ++ else { ++ hlist_add_before(&new_node_vma->hlist, ++ &node_vma->hlist); ++ } ++ ++ } ++ node_vma = new_node_vma; ++ ++node_vma_ok: /* ok, ready to add to the list */ ++ rmap_item->head = node_vma; ++ hlist_add_head(&rmap_item->hlist, &node_vma->rmap_hlist); ++ hold_anon_vma(rmap_item, rmap_item->slot->vma->anon_vma); ++ if (logdedup) { ++ rmap_item->slot->pages_merged++; ++ if (node_vma_cont) { ++ node_vma = node_vma_cont; ++ hlist_for_each_entry_continue(node_vma, hlist) { ++ node_vma->slot->pages_bemerged += factor; ++ if (list_empty(&node_vma->slot->dedup_list)) ++ list_add(&node_vma->slot->dedup_list, ++ &vma_slot_dedup); ++ } ++ } ++ } ++} ++ ++/* ++ * We use break_ksm to break COW on a ksm page: it's a stripped down ++ * ++ * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) ++ * put_page(page); ++ * ++ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, ++ * in case the application has unmapped and remapped mm,addr meanwhile. ++ * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP ++ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. ++ */ ++static int break_ksm(struct vm_area_struct *vma, unsigned long addr) ++{ ++ struct page *page; ++ int ret = 0; ++ ++ do { ++ cond_resched(); ++ page = follow_page(vma, addr, FOLL_GET); ++ if (IS_ERR_OR_NULL(page)) ++ break; ++ if (PageKsm(page)) { ++ ret = handle_mm_fault(vma->vm_mm, vma, addr, ++ FAULT_FLAG_WRITE); ++ } else ++ ret = VM_FAULT_WRITE; ++ put_page(page); ++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); ++ /* ++ * We must loop because handle_mm_fault() may back out if there's ++ * any difficulty e.g. if pte accessed bit gets updated concurrently. ++ * ++ * VM_FAULT_WRITE is what we have been hoping for: it indicates that ++ * COW has been broken, even if the vma does not permit VM_WRITE; ++ * but note that a concurrent fault might break PageKsm for us. ++ * ++ * VM_FAULT_SIGBUS could occur if we race with truncation of the ++ * backing file, which also invalidates anonymous pages: that's ++ * okay, that truncation will have unmapped the PageKsm for us. ++ * ++ * VM_FAULT_OOM: at the time of writing (late July 2009), setting ++ * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the ++ * current task has TIF_MEMDIE set, and will be OOM killed on return ++ * to user; and ksmd, having no mm, would never be chosen for that. ++ * ++ * But if the mm is in a limited mem_cgroup, then the fault may fail ++ * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and ++ * even ksmd can fail in this way - though it's usually breaking ksm ++ * just to undo a merge it made a moment before, so unlikely to oom. ++ * ++ * That's a pity: we might therefore have more kernel pages allocated ++ * than we're counting as nodes in the stable tree; but uksm_do_scan ++ * will retry to break_cow on each pass, so should recover the page ++ * in due course. The important thing is to not let VM_MERGEABLE ++ * be cleared while any such pages might remain in the area. ++ */ ++ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; ++} ++ ++static void break_cow(struct rmap_item *rmap_item) ++{ ++ struct vm_area_struct *vma = rmap_item->slot->vma; ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long addr = get_rmap_addr(rmap_item); ++ ++ if (uksm_test_exit(mm)) ++ goto out; ++ ++ break_ksm(vma, addr); ++out: ++ return; ++} ++ ++/* ++ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather ++ * than check every pte of a given vma, the locking doesn't quite work for ++ * that - an rmap_item is assigned to the stable tree after inserting ksm ++ * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing ++ * rmap_items from parent to child at fork time (so as not to waste time ++ * if exit comes before the next scan reaches it). ++ * ++ * Similarly, although we'd like to remove rmap_items (so updating counts ++ * and freeing memory) when unmerging an area, it's easier to leave that ++ * to the next pass of ksmd - consider, for example, how ksmd might be ++ * in cmp_and_merge_page on one of the rmap_items we would be removing. ++ */ ++inline int unmerge_uksm_pages(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ unsigned long addr; ++ int err = 0; ++ ++ for (addr = start; addr < end && !err; addr += PAGE_SIZE) { ++ if (uksm_test_exit(vma->vm_mm)) ++ break; ++ if (signal_pending(current)) ++ err = -ERESTARTSYS; ++ else ++ err = break_ksm(vma, addr); ++ } ++ return err; ++} ++ ++static inline void inc_uksm_pages_scanned(void) ++{ ++ u64 delta; ++ ++ ++ if (uksm_pages_scanned == U64_MAX) { ++ encode_benefit(); ++ ++ delta = uksm_pages_scanned >> pages_scanned_base; ++ ++ if (CAN_OVERFLOW_U64(pages_scanned_stored, delta)) { ++ pages_scanned_stored >>= 1; ++ delta >>= 1; ++ pages_scanned_base++; ++ } ++ ++ pages_scanned_stored += delta; ++ ++ uksm_pages_scanned = uksm_pages_scanned_last = 0; ++ } ++ ++ uksm_pages_scanned++; ++} ++ ++static inline int find_zero_page_hash(int strength, u32 hash) ++{ ++ return (zero_hash_table[strength] == hash); ++} ++ ++static ++int cmp_and_merge_zero_page(struct vm_area_struct *vma, struct page *page) ++{ ++ struct page *zero_page = empty_uksm_zero_page; ++ struct mm_struct *mm = vma->vm_mm; ++ pte_t orig_pte = __pte(0); ++ int err = -EFAULT; ++ ++ if (uksm_test_exit(mm)) ++ goto out; ++ ++ if (PageTransCompound(page) && page_trans_compound_anon_split(page)) ++ goto out; ++ BUG_ON(PageTransCompound(page)); ++ ++ if (!PageAnon(page)) ++ goto out; ++ ++ if (!trylock_page(page)) ++ goto out; ++ ++ if (write_protect_page(vma, page, &orig_pte, 0) == 0) { ++ if (is_page_full_zero(page)) ++ err = replace_page(vma, page, zero_page, orig_pte); ++ } ++ ++ unlock_page(page); ++out: ++ return err; ++} ++ ++/* ++ * cmp_and_merge_page() - first see if page can be merged into the stable ++ * tree; if not, compare hash to previous and if it's the same, see if page ++ * can be inserted into the unstable tree, or merged with a page already there ++ * and both transferred to the stable tree. ++ * ++ * @page: the page that we are searching identical page to. ++ * @rmap_item: the reverse mapping into the virtual address of this page ++ */ ++static void cmp_and_merge_page(struct rmap_item *rmap_item, u32 hash) ++{ ++ struct rmap_item *tree_rmap_item; ++ struct page *page; ++ struct page *kpage = NULL; ++ u32 hash_max; ++ int err; ++ unsigned int success1, success2; ++ struct stable_node *snode; ++ int cmp; ++ struct rb_node *parent = NULL, **new; ++ ++ remove_rmap_item_from_tree(rmap_item); ++ page = rmap_item->page; ++ ++ /* We first start with searching the page inside the stable tree */ ++ kpage = stable_tree_search(rmap_item, hash); ++ if (kpage) { ++ err = try_to_merge_with_uksm_page(rmap_item, kpage, ++ hash); ++ if (!err) { ++ /* ++ * The page was successfully merged, add ++ * its rmap_item to the stable tree. ++ * page lock is needed because it's ++ * racing with try_to_unmap_ksm(), etc. ++ */ ++ lock_page(kpage); ++ snode = page_stable_node(kpage); ++ stable_tree_append(rmap_item, snode, 1); ++ unlock_page(kpage); ++ put_page(kpage); ++ return; /* success */ ++ } ++ put_page(kpage); ++ ++ /* ++ * if it's a collision and it has been search in sub-rbtree ++ * (hash_max != 0), we want to abort, because if it is ++ * successfully merged in unstable tree, the collision trends to ++ * happen again. ++ */ ++ if (err == MERGE_ERR_COLLI && rmap_item->hash_max) ++ return; ++ } ++ ++ tree_rmap_item = ++ unstable_tree_search_insert(rmap_item, hash); ++ if (tree_rmap_item) { ++ err = try_to_merge_two_pages(rmap_item, tree_rmap_item, hash); ++ /* ++ * As soon as we merge this page, we want to remove the ++ * rmap_item of the page we have merged with from the unstable ++ * tree, and insert it instead as new node in the stable tree. ++ */ ++ if (!err) { ++ kpage = page; ++ remove_rmap_item_from_tree(tree_rmap_item); ++ lock_page(kpage); ++ snode = stable_tree_insert(&kpage, hash, ++ rmap_item, tree_rmap_item, ++ &success1, &success2); ++ ++ /* ++ * Do not log dedup for tree item, it's not counted as ++ * scanned in this round. ++ */ ++ if (success2) ++ stable_tree_append(tree_rmap_item, snode, 0); ++ ++ /* ++ * The order of these two stable append is important: ++ * we are scanning rmap_item. ++ */ ++ if (success1) ++ stable_tree_append(rmap_item, snode, 1); ++ ++ /* ++ * The original kpage may be unlocked inside ++ * stable_tree_insert() already. This page ++ * should be unlocked before doing ++ * break_cow(). ++ */ ++ unlock_page(kpage); ++ ++ if (!success1) ++ break_cow(rmap_item); ++ ++ if (!success2) ++ break_cow(tree_rmap_item); ++ ++ } else if (err == MERGE_ERR_COLLI) { ++ BUG_ON(tree_rmap_item->tree_node->count > 1); ++ ++ rmap_item_hash_max(tree_rmap_item, ++ tree_rmap_item->tree_node->hash); ++ ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max); ++ parent = &tree_rmap_item->node; ++ if (cmp < 0) ++ new = &parent->rb_left; ++ else if (cmp > 0) ++ new = &parent->rb_right; ++ else ++ goto put_up_out; ++ ++ rmap_item->tree_node = tree_rmap_item->tree_node; ++ rmap_item->address |= UNSTABLE_FLAG; ++ rmap_item->hash_round = uksm_hash_round; ++ rb_link_node(&rmap_item->node, parent, new); ++ rb_insert_color(&rmap_item->node, ++ &tree_rmap_item->tree_node->sub_root); ++ rmap_item->tree_node->count++; ++ } else { ++ /* ++ * either one of the page has changed or they collide ++ * at the max hash, we consider them as ill items. ++ */ ++ remove_rmap_item_from_tree(tree_rmap_item); ++ } ++put_up_out: ++ put_page(tree_rmap_item->page); ++ up_read(&tree_rmap_item->slot->vma->vm_mm->mmap_sem); ++ } ++} ++ ++ ++ ++ ++static inline unsigned long get_pool_index(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = (sizeof(struct rmap_list_entry *) * index) >> PAGE_SHIFT; ++ if (pool_index >= slot->pool_size) ++ BUG(); ++ return pool_index; ++} ++ ++static inline unsigned long index_page_offset(unsigned long index) ++{ ++ return offset_in_page(sizeof(struct rmap_list_entry *) * index); ++} ++ ++static inline ++struct rmap_list_entry *get_rmap_list_entry(struct vma_slot *slot, ++ unsigned long index, int need_alloc) ++{ ++ unsigned long pool_index; ++ struct page *page; ++ void *addr; ++ ++ ++ pool_index = get_pool_index(slot, index); ++ if (!slot->rmap_list_pool[pool_index]) { ++ if (!need_alloc) ++ return NULL; ++ ++ page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); ++ if (!page) ++ return NULL; ++ ++ slot->rmap_list_pool[pool_index] = page; ++ } ++ ++ addr = kmap(slot->rmap_list_pool[pool_index]); ++ addr += index_page_offset(index); ++ ++ return addr; ++} ++ ++static inline void put_rmap_list_entry(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ BUG_ON(!slot->rmap_list_pool[pool_index]); ++ kunmap(slot->rmap_list_pool[pool_index]); ++} ++ ++static inline int entry_is_new(struct rmap_list_entry *entry) ++{ ++ return !entry->item; ++} ++ ++static inline unsigned long get_index_orig_addr(struct vma_slot *slot, ++ unsigned long index) ++{ ++ return slot->vma->vm_start + (index << PAGE_SHIFT); ++} ++ ++static inline unsigned long get_entry_address(struct rmap_list_entry *entry) ++{ ++ unsigned long addr; ++ ++ if (is_addr(entry->addr)) ++ addr = get_clean_addr(entry->addr); ++ else if (entry->item) ++ addr = get_rmap_addr(entry->item); ++ else ++ BUG(); ++ ++ return addr; ++} ++ ++static inline struct rmap_item *get_entry_item(struct rmap_list_entry *entry) ++{ ++ if (is_addr(entry->addr)) ++ return NULL; ++ ++ return entry->item; ++} ++ ++static inline void inc_rmap_list_pool_count(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ BUG_ON(!slot->rmap_list_pool[pool_index]); ++ slot->pool_counts[pool_index]++; ++} ++ ++static inline void dec_rmap_list_pool_count(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ BUG_ON(!slot->rmap_list_pool[pool_index]); ++ BUG_ON(!slot->pool_counts[pool_index]); ++ slot->pool_counts[pool_index]--; ++} ++ ++static inline int entry_has_rmap(struct rmap_list_entry *entry) ++{ ++ return !is_addr(entry->addr) && entry->item; ++} ++ ++static inline void swap_entries(struct rmap_list_entry *entry1, ++ unsigned long index1, ++ struct rmap_list_entry *entry2, ++ unsigned long index2) ++{ ++ struct rmap_list_entry tmp; ++ ++ /* swapping two new entries is meaningless */ ++ BUG_ON(entry_is_new(entry1) && entry_is_new(entry2)); ++ ++ tmp = *entry1; ++ *entry1 = *entry2; ++ *entry2 = tmp; ++ ++ if (entry_has_rmap(entry1)) ++ entry1->item->entry_index = index1; ++ ++ if (entry_has_rmap(entry2)) ++ entry2->item->entry_index = index2; ++ ++ if (entry_has_rmap(entry1) && !entry_has_rmap(entry2)) { ++ inc_rmap_list_pool_count(entry1->item->slot, index1); ++ dec_rmap_list_pool_count(entry1->item->slot, index2); ++ } else if (!entry_has_rmap(entry1) && entry_has_rmap(entry2)) { ++ inc_rmap_list_pool_count(entry2->item->slot, index2); ++ dec_rmap_list_pool_count(entry2->item->slot, index1); ++ } ++} ++ ++static inline void free_entry_item(struct rmap_list_entry *entry) ++{ ++ unsigned long index; ++ struct rmap_item *item; ++ ++ if (!is_addr(entry->addr)) { ++ BUG_ON(!entry->item); ++ item = entry->item; ++ entry->addr = get_rmap_addr(item); ++ set_is_addr(entry->addr); ++ index = item->entry_index; ++ remove_rmap_item_from_tree(item); ++ dec_rmap_list_pool_count(item->slot, index); ++ free_rmap_item(item); ++ } ++} ++ ++static inline int pool_entry_boundary(unsigned long index) ++{ ++ unsigned long linear_addr; ++ ++ linear_addr = sizeof(struct rmap_list_entry *) * index; ++ return index && !offset_in_page(linear_addr); ++} ++ ++static inline void try_free_last_pool(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ if (slot->rmap_list_pool[pool_index] && ++ !slot->pool_counts[pool_index]) { ++ __free_page(slot->rmap_list_pool[pool_index]); ++ slot->rmap_list_pool[pool_index] = NULL; ++ slot->flags |= UKSM_SLOT_NEED_SORT; ++ } ++ ++} ++ ++static inline unsigned long vma_item_index(struct vm_area_struct *vma, ++ struct rmap_item *item) ++{ ++ return (get_rmap_addr(item) - vma->vm_start) >> PAGE_SHIFT; ++} ++ ++static int within_same_pool(struct vma_slot *slot, ++ unsigned long i, unsigned long j) ++{ ++ unsigned long pool_i, pool_j; ++ ++ pool_i = get_pool_index(slot, i); ++ pool_j = get_pool_index(slot, j); ++ ++ return (pool_i == pool_j); ++} ++ ++static void sort_rmap_entry_list(struct vma_slot *slot) ++{ ++ unsigned long i, j; ++ struct rmap_list_entry *entry, *swap_entry; ++ ++ entry = get_rmap_list_entry(slot, 0, 0); ++ for (i = 0; i < slot->pages; ) { ++ ++ if (!entry) ++ goto skip_whole_pool; ++ ++ if (entry_is_new(entry)) ++ goto next_entry; ++ ++ if (is_addr(entry->addr)) { ++ entry->addr = 0; ++ goto next_entry; ++ } ++ ++ j = vma_item_index(slot->vma, entry->item); ++ if (j == i) ++ goto next_entry; ++ ++ if (within_same_pool(slot, i, j)) ++ swap_entry = entry + j - i; ++ else ++ swap_entry = get_rmap_list_entry(slot, j, 1); ++ ++ swap_entries(entry, i, swap_entry, j); ++ if (!within_same_pool(slot, i, j)) ++ put_rmap_list_entry(slot, j); ++ continue; ++ ++skip_whole_pool: ++ i += PAGE_SIZE / sizeof(*entry); ++ if (i < slot->pages) ++ entry = get_rmap_list_entry(slot, i, 0); ++ continue; ++ ++next_entry: ++ if (i >= slot->pages - 1 || ++ !within_same_pool(slot, i, i + 1)) { ++ put_rmap_list_entry(slot, i); ++ if (i + 1 < slot->pages) ++ entry = get_rmap_list_entry(slot, i + 1, 0); ++ } else ++ entry++; ++ i++; ++ continue; ++ } ++ ++ /* free empty pool entries which contain no rmap_item */ ++ /* CAN be simplied to based on only pool_counts when bug freed !!!!! */ ++ for (i = 0; i < slot->pool_size; i++) { ++ unsigned char has_rmap; ++ void *addr; ++ ++ if (!slot->rmap_list_pool[i]) ++ continue; ++ ++ has_rmap = 0; ++ addr = kmap(slot->rmap_list_pool[i]); ++ BUG_ON(!addr); ++ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) { ++ entry = (struct rmap_list_entry *)addr + j; ++ if (is_addr(entry->addr)) ++ continue; ++ if (!entry->item) ++ continue; ++ has_rmap = 1; ++ } ++ kunmap(slot->rmap_list_pool[i]); ++ if (!has_rmap) { ++ BUG_ON(slot->pool_counts[i]); ++ __free_page(slot->rmap_list_pool[i]); ++ slot->rmap_list_pool[i] = NULL; ++ } ++ } ++ ++ slot->flags &= ~UKSM_SLOT_NEED_SORT; ++} ++ ++/* ++ * vma_fully_scanned() - if all the pages in this slot have been scanned. ++ */ ++static inline int vma_fully_scanned(struct vma_slot *slot) ++{ ++ return slot->pages_scanned == slot->pages; ++} ++ ++/** ++ * get_next_rmap_item() - Get the next rmap_item in a vma_slot according to ++ * its random permutation. This function is embedded with the random ++ * permutation index management code. ++ */ ++static struct rmap_item *get_next_rmap_item(struct vma_slot *slot, u32 *hash) ++{ ++ unsigned long rand_range, addr, swap_index, scan_index; ++ struct rmap_item *item = NULL; ++ struct rmap_list_entry *scan_entry, *swap_entry = NULL; ++ struct page *page; ++ ++ scan_index = swap_index = slot->pages_scanned % slot->pages; ++ ++ if (pool_entry_boundary(scan_index)) ++ try_free_last_pool(slot, scan_index - 1); ++ ++ if (vma_fully_scanned(slot)) { ++ if (slot->flags & UKSM_SLOT_NEED_SORT) ++ slot->flags |= UKSM_SLOT_NEED_RERAND; ++ else ++ slot->flags &= ~UKSM_SLOT_NEED_RERAND; ++ if (slot->flags & UKSM_SLOT_NEED_SORT) ++ sort_rmap_entry_list(slot); ++ } ++ ++ scan_entry = get_rmap_list_entry(slot, scan_index, 1); ++ if (!scan_entry) ++ return NULL; ++ ++ if (entry_is_new(scan_entry)) { ++ scan_entry->addr = get_index_orig_addr(slot, scan_index); ++ set_is_addr(scan_entry->addr); ++ } ++ ++ if (slot->flags & UKSM_SLOT_NEED_RERAND) { ++ rand_range = slot->pages - scan_index; ++ BUG_ON(!rand_range); ++ swap_index = scan_index + (prandom_u32() % rand_range); ++ } ++ ++ if (swap_index != scan_index) { ++ swap_entry = get_rmap_list_entry(slot, swap_index, 1); ++ if (entry_is_new(swap_entry)) { ++ swap_entry->addr = get_index_orig_addr(slot, ++ swap_index); ++ set_is_addr(swap_entry->addr); ++ } ++ swap_entries(scan_entry, scan_index, swap_entry, swap_index); ++ } ++ ++ addr = get_entry_address(scan_entry); ++ item = get_entry_item(scan_entry); ++ BUG_ON(addr > slot->vma->vm_end || addr < slot->vma->vm_start); ++ ++ page = follow_page(slot->vma, addr, FOLL_GET); ++ if (IS_ERR_OR_NULL(page)) ++ goto nopage; ++ ++ if (!PageAnon(page) && !page_trans_compound_anon(page)) ++ goto putpage; ++ ++ /*check is zero_page pfn or uksm_zero_page*/ ++ if ((page_to_pfn(page) == zero_pfn) ++ || (page_to_pfn(page) == uksm_zero_pfn)) ++ goto putpage; ++ ++ flush_anon_page(slot->vma, page, addr); ++ flush_dcache_page(page); ++ ++ ++ *hash = page_hash(page, hash_strength, 1); ++ inc_uksm_pages_scanned(); ++ /*if the page content all zero, re-map to zero-page*/ ++ if (find_zero_page_hash(hash_strength, *hash)) { ++ if (!cmp_and_merge_zero_page(slot->vma, page)) { ++ slot->pages_merged++; ++ __inc_zone_page_state(page, NR_UKSM_ZERO_PAGES); ++ dec_mm_counter(slot->mm, MM_ANONPAGES); ++ ++ /* For full-zero pages, no need to create rmap item */ ++ goto putpage; ++ } else { ++ inc_rshash_neg(memcmp_cost / 2); ++ } ++ } ++ ++ if (!item) { ++ item = alloc_rmap_item(); ++ if (item) { ++ /* It has already been zeroed */ ++ item->slot = slot; ++ item->address = addr; ++ item->entry_index = scan_index; ++ scan_entry->item = item; ++ inc_rmap_list_pool_count(slot, scan_index); ++ } else ++ goto putpage; ++ } ++ ++ BUG_ON(item->slot != slot); ++ /* the page may have changed */ ++ item->page = page; ++ put_rmap_list_entry(slot, scan_index); ++ if (swap_entry) ++ put_rmap_list_entry(slot, swap_index); ++ return item; ++ ++putpage: ++ put_page(page); ++ page = NULL; ++nopage: ++ /* no page, store addr back and free rmap_item if possible */ ++ free_entry_item(scan_entry); ++ put_rmap_list_entry(slot, scan_index); ++ if (swap_entry) ++ put_rmap_list_entry(slot, swap_index); ++ return NULL; ++} ++ ++static inline int in_stable_tree(struct rmap_item *rmap_item) ++{ ++ return rmap_item->address & STABLE_FLAG; ++} ++ ++/** ++ * scan_vma_one_page() - scan the next page in a vma_slot. Called with ++ * mmap_sem locked. ++ */ ++static noinline void scan_vma_one_page(struct vma_slot *slot) ++{ ++ u32 hash; ++ struct mm_struct *mm; ++ struct rmap_item *rmap_item = NULL; ++ struct vm_area_struct *vma = slot->vma; ++ ++ mm = vma->vm_mm; ++ BUG_ON(!mm); ++ BUG_ON(!slot); ++ ++ rmap_item = get_next_rmap_item(slot, &hash); ++ if (!rmap_item) ++ goto out1; ++ ++ if (PageKsm(rmap_item->page) && in_stable_tree(rmap_item)) ++ goto out2; ++ ++ cmp_and_merge_page(rmap_item, hash); ++out2: ++ put_page(rmap_item->page); ++out1: ++ slot->pages_scanned++; ++ if (slot->fully_scanned_round != fully_scanned_round) ++ scanned_virtual_pages++; ++ ++ if (vma_fully_scanned(slot)) ++ slot->fully_scanned_round = fully_scanned_round; ++} ++ ++static inline unsigned long rung_get_pages(struct scan_rung *rung) ++{ ++ struct slot_tree_node *node; ++ ++ if (!rung->vma_root.rnode) ++ return 0; ++ ++ node = container_of(rung->vma_root.rnode, struct slot_tree_node, snode); ++ ++ return node->size; ++} ++ ++#define RUNG_SAMPLED_MIN 3 ++ ++static inline ++void uksm_calc_rung_step(struct scan_rung *rung, ++ unsigned long page_time, unsigned long ratio) ++{ ++ unsigned long sampled, pages; ++ ++ /* will be fully scanned ? */ ++ if (!rung->cover_msecs) { ++ rung->step = 1; ++ return; ++ } ++ ++ sampled = rung->cover_msecs * (NSEC_PER_MSEC / TIME_RATIO_SCALE) ++ * ratio / page_time; ++ ++ /* ++ * Before we finsish a scan round and expensive per-round jobs, ++ * we need to have a chance to estimate the per page time. So ++ * the sampled number can not be too small. ++ */ ++ if (sampled < RUNG_SAMPLED_MIN) ++ sampled = RUNG_SAMPLED_MIN; ++ ++ pages = rung_get_pages(rung); ++ if (likely(pages > sampled)) ++ rung->step = pages / sampled; ++ else ++ rung->step = 1; ++} ++ ++static inline int step_need_recalc(struct scan_rung *rung) ++{ ++ unsigned long pages, stepmax; ++ ++ pages = rung_get_pages(rung); ++ stepmax = pages / RUNG_SAMPLED_MIN; ++ ++ return pages && (rung->step > pages || ++ (stepmax && rung->step > stepmax)); ++} ++ ++static inline ++void reset_current_scan(struct scan_rung *rung, int finished, int step_recalc) ++{ ++ struct vma_slot *slot; ++ ++ if (finished) ++ rung->flags |= UKSM_RUNG_ROUND_FINISHED; ++ ++ if (step_recalc || step_need_recalc(rung)) { ++ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio); ++ BUG_ON(step_need_recalc(rung)); ++ } ++ ++ slot_iter_index = prandom_u32() % rung->step; ++ BUG_ON(!rung->vma_root.rnode); ++ slot = sradix_tree_next(&rung->vma_root, NULL, 0, slot_iter); ++ BUG_ON(!slot); ++ ++ rung->current_scan = slot; ++ rung->current_offset = slot_iter_index; ++} ++ ++static inline struct sradix_tree_root *slot_get_root(struct vma_slot *slot) ++{ ++ return &slot->rung->vma_root; ++} ++ ++/* ++ * return if resetted. ++ */ ++static int advance_current_scan(struct scan_rung *rung) ++{ ++ unsigned short n; ++ struct vma_slot *slot, *next = NULL; ++ ++ BUG_ON(!rung->vma_root.num); ++ ++ slot = rung->current_scan; ++ n = (slot->pages - rung->current_offset) % rung->step; ++ slot_iter_index = rung->step - n; ++ next = sradix_tree_next(&rung->vma_root, slot->snode, ++ slot->sindex, slot_iter); ++ ++ if (next) { ++ rung->current_offset = slot_iter_index; ++ rung->current_scan = next; ++ return 0; ++ } else { ++ reset_current_scan(rung, 1, 0); ++ return 1; ++ } ++} ++ ++static inline void rung_rm_slot(struct vma_slot *slot) ++{ ++ struct scan_rung *rung = slot->rung; ++ struct sradix_tree_root *root; ++ ++ if (rung->current_scan == slot) ++ advance_current_scan(rung); ++ ++ root = slot_get_root(slot); ++ sradix_tree_delete_from_leaf(root, slot->snode, slot->sindex); ++ slot->snode = NULL; ++ if (step_need_recalc(rung)) { ++ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio); ++ BUG_ON(step_need_recalc(rung)); ++ } ++ ++ /* In case advance_current_scan loop back to this slot again */ ++ if (rung->vma_root.num && rung->current_scan == slot) ++ reset_current_scan(slot->rung, 1, 0); ++} ++ ++static inline void rung_add_new_slots(struct scan_rung *rung, ++ struct vma_slot **slots, unsigned long num) ++{ ++ int err; ++ struct vma_slot *slot; ++ unsigned long i; ++ struct sradix_tree_root *root = &rung->vma_root; ++ ++ err = sradix_tree_enter(root, (void **)slots, num); ++ BUG_ON(err); ++ ++ for (i = 0; i < num; i++) { ++ slot = slots[i]; ++ slot->rung = rung; ++ BUG_ON(vma_fully_scanned(slot)); ++ } ++ ++ if (rung->vma_root.num == num) ++ reset_current_scan(rung, 0, 1); ++} ++ ++static inline int rung_add_one_slot(struct scan_rung *rung, ++ struct vma_slot *slot) ++{ ++ int err; ++ ++ err = sradix_tree_enter(&rung->vma_root, (void **)&slot, 1); ++ if (err) ++ return err; ++ ++ slot->rung = rung; ++ if (rung->vma_root.num == 1) ++ reset_current_scan(rung, 0, 1); ++ ++ return 0; ++} ++ ++/* ++ * Return true if the slot is deleted from its rung. ++ */ ++static inline int vma_rung_enter(struct vma_slot *slot, struct scan_rung *rung) ++{ ++ struct scan_rung *old_rung = slot->rung; ++ int err; ++ ++ if (old_rung == rung) ++ return 0; ++ ++ rung_rm_slot(slot); ++ err = rung_add_one_slot(rung, slot); ++ if (err) { ++ err = rung_add_one_slot(old_rung, slot); ++ WARN_ON(err); /* OOPS, badly OOM, we lost this slot */ ++ } ++ ++ return 1; ++} ++ ++static inline int vma_rung_up(struct vma_slot *slot) ++{ ++ struct scan_rung *rung; ++ ++ rung = slot->rung; ++ if (slot->rung != &uksm_scan_ladder[SCAN_LADDER_SIZE-1]) ++ rung++; ++ ++ return vma_rung_enter(slot, rung); ++} ++ ++static inline int vma_rung_down(struct vma_slot *slot) ++{ ++ struct scan_rung *rung; ++ ++ rung = slot->rung; ++ if (slot->rung != &uksm_scan_ladder[0]) ++ rung--; ++ ++ return vma_rung_enter(slot, rung); ++} ++ ++/** ++ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot. ++ */ ++static unsigned long cal_dedup_ratio(struct vma_slot *slot) ++{ ++ unsigned long ret; ++ ++ BUG_ON(slot->pages_scanned == slot->last_scanned); ++ ++ ret = slot->pages_merged; ++ ++ /* Thrashing area filtering */ ++ if (ret && uksm_thrash_threshold) { ++ if (slot->pages_cowed * 100 / slot->pages_merged ++ > uksm_thrash_threshold) { ++ ret = 0; ++ } else { ++ ret = slot->pages_merged - slot->pages_cowed; ++ } ++ } ++ ++ return ret; ++} ++ ++/** ++ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot. ++ */ ++static unsigned long cal_dedup_ratio_old(struct vma_slot *slot) ++{ ++ unsigned long ret; ++ unsigned long pages_scanned; ++ ++ pages_scanned = slot->pages_scanned; ++ if (!pages_scanned) { ++ if (uksm_thrash_threshold) ++ return 0; ++ else ++ pages_scanned = slot->pages_scanned; ++ } ++ ++ ret = slot->pages_bemerged * 100 / pages_scanned; ++ ++ /* Thrashing area filtering */ ++ if (ret && uksm_thrash_threshold) { ++ if (slot->pages_cowed * 100 / slot->pages_bemerged ++ > uksm_thrash_threshold) { ++ ret = 0; ++ } else { ++ ret = slot->pages_bemerged - slot->pages_cowed; ++ } ++ } ++ ++ return ret; ++} ++ ++/** ++ * stable_node_reinsert() - When the hash_strength has been adjusted, the ++ * stable tree need to be restructured, this is the function re-inserting the ++ * stable node. ++ */ ++static inline void stable_node_reinsert(struct stable_node *new_node, ++ struct page *page, ++ struct rb_root *root_treep, ++ struct list_head *tree_node_listp, ++ u32 hash) ++{ ++ struct rb_node **new = &root_treep->rb_node; ++ struct rb_node *parent = NULL; ++ struct stable_node *stable_node; ++ struct tree_node *tree_node; ++ struct page *tree_page; ++ int cmp; ++ ++ while (*new) { ++ int cmp; ++ ++ tree_node = rb_entry(*new, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else ++ break; ++ } ++ ++ if (*new) { ++ /* find a stable tree node with same first level hash value */ ++ stable_node_hash_max(new_node, page, hash); ++ if (tree_node->count == 1) { ++ stable_node = rb_entry(tree_node->sub_root.rb_node, ++ struct stable_node, node); ++ tree_page = get_uksm_page(stable_node, 1, 0); ++ if (tree_page) { ++ stable_node_hash_max(stable_node, ++ tree_page, hash); ++ put_page(tree_page); ++ ++ /* prepare for stable node insertion */ ++ ++ cmp = hash_cmp(new_node->hash_max, ++ stable_node->hash_max); ++ parent = &stable_node->node; ++ if (cmp < 0) ++ new = &parent->rb_left; ++ else if (cmp > 0) ++ new = &parent->rb_right; ++ else ++ goto failed; ++ ++ goto add_node; ++ } else { ++ /* the only stable_node deleted, the tree node ++ * was not deleted. ++ */ ++ goto tree_node_reuse; ++ } ++ } ++ ++ /* well, search the collision subtree */ ++ new = &tree_node->sub_root.rb_node; ++ parent = NULL; ++ BUG_ON(!*new); ++ while (*new) { ++ int cmp; ++ ++ stable_node = rb_entry(*new, struct stable_node, node); ++ ++ cmp = hash_cmp(new_node->hash_max, ++ stable_node->hash_max); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else { ++ /* oh, no, still a collision */ ++ goto failed; ++ } ++ } ++ ++ goto add_node; ++ } ++ ++ /* no tree node found */ ++ tree_node = alloc_tree_node(tree_node_listp); ++ if (!tree_node) { ++ printk(KERN_ERR "UKSM: memory allocation error!\n"); ++ goto failed; ++ } else { ++ tree_node->hash = hash; ++ rb_link_node(&tree_node->node, parent, new); ++ rb_insert_color(&tree_node->node, root_treep); ++ ++tree_node_reuse: ++ /* prepare for stable node insertion */ ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ ++add_node: ++ rb_link_node(&new_node->node, parent, new); ++ rb_insert_color(&new_node->node, &tree_node->sub_root); ++ new_node->tree_node = tree_node; ++ tree_node->count++; ++ return; ++ ++failed: ++ /* This can only happen when two nodes have collided ++ * in two levels. ++ */ ++ new_node->tree_node = NULL; ++ return; ++} ++ ++static inline void free_all_tree_nodes(struct list_head *list) ++{ ++ struct tree_node *node, *tmp; ++ ++ list_for_each_entry_safe(node, tmp, list, all_list) { ++ free_tree_node(node); ++ } ++} ++ ++/** ++ * stable_tree_delta_hash() - Delta hash the stable tree from previous hash ++ * strength to the current hash_strength. It re-structures the hole tree. ++ */ ++static inline void stable_tree_delta_hash(u32 prev_hash_strength) ++{ ++ struct stable_node *node, *tmp; ++ struct rb_root *root_new_treep; ++ struct list_head *new_tree_node_listp; ++ ++ stable_tree_index = (stable_tree_index + 1) % 2; ++ root_new_treep = &root_stable_tree[stable_tree_index]; ++ new_tree_node_listp = &stable_tree_node_list[stable_tree_index]; ++ *root_new_treep = RB_ROOT; ++ BUG_ON(!list_empty(new_tree_node_listp)); ++ ++ /* ++ * we need to be safe, the node could be removed by get_uksm_page() ++ */ ++ list_for_each_entry_safe(node, tmp, &stable_node_list, all_list) { ++ void *addr; ++ struct page *node_page; ++ u32 hash; ++ ++ /* ++ * We are completely re-structuring the stable nodes to a new ++ * stable tree. We don't want to touch the old tree unlinks and ++ * old tree_nodes. The old tree_nodes will be freed at once. ++ */ ++ node_page = get_uksm_page(node, 0, 0); ++ if (!node_page) ++ continue; ++ ++ if (node->tree_node) { ++ hash = node->tree_node->hash; ++ ++ addr = kmap_atomic(node_page); ++ ++ hash = delta_hash(addr, prev_hash_strength, ++ hash_strength, hash); ++ kunmap_atomic(addr); ++ } else { ++ /* ++ *it was not inserted to rbtree due to collision in last ++ *round scan. ++ */ ++ hash = page_hash(node_page, hash_strength, 0); ++ } ++ ++ stable_node_reinsert(node, node_page, root_new_treep, ++ new_tree_node_listp, hash); ++ put_page(node_page); ++ } ++ ++ root_stable_treep = root_new_treep; ++ free_all_tree_nodes(stable_tree_node_listp); ++ BUG_ON(!list_empty(stable_tree_node_listp)); ++ stable_tree_node_listp = new_tree_node_listp; ++} ++ ++static inline void inc_hash_strength(unsigned long delta) ++{ ++ hash_strength += 1 << delta; ++ if (hash_strength > HASH_STRENGTH_MAX) ++ hash_strength = HASH_STRENGTH_MAX; ++} ++ ++static inline void dec_hash_strength(unsigned long delta) ++{ ++ unsigned long change = 1 << delta; ++ ++ if (hash_strength <= change + 1) ++ hash_strength = 1; ++ else ++ hash_strength -= change; ++} ++ ++static inline void inc_hash_strength_delta(void) ++{ ++ hash_strength_delta++; ++ if (hash_strength_delta > HASH_STRENGTH_DELTA_MAX) ++ hash_strength_delta = HASH_STRENGTH_DELTA_MAX; ++} ++ ++/* ++static inline unsigned long get_current_neg_ratio(void) ++{ ++ if (!rshash_pos || rshash_neg > rshash_pos) ++ return 100; ++ ++ return div64_u64(100 * rshash_neg , rshash_pos); ++} ++*/ ++ ++static inline unsigned long get_current_neg_ratio(void) ++{ ++ u64 pos = benefit.pos; ++ u64 neg = benefit.neg; ++ ++ if (!neg) ++ return 0; ++ ++ if (!pos || neg > pos) ++ return 100; ++ ++ if (neg > div64_u64(U64_MAX, 100)) ++ pos = div64_u64(pos, 100); ++ else ++ neg *= 100; ++ ++ return div64_u64(neg, pos); ++} ++ ++static inline unsigned long get_current_benefit(void) ++{ ++ u64 pos = benefit.pos; ++ u64 neg = benefit.neg; ++ u64 scanned = benefit.scanned; ++ ++ if (neg > pos) ++ return 0; ++ ++ return div64_u64((pos - neg), scanned); ++} ++ ++static inline int judge_rshash_direction(void) ++{ ++ u64 current_neg_ratio, stable_benefit; ++ u64 current_benefit, delta = 0; ++ int ret = STILL; ++ ++ /* Try to probe a value after the boot, and in case the system ++ are still for a long time. */ ++ if ((fully_scanned_round & 0xFFULL) == 10) { ++ ret = OBSCURE; ++ goto out; ++ } ++ ++ current_neg_ratio = get_current_neg_ratio(); ++ ++ if (current_neg_ratio == 0) { ++ rshash_neg_cont_zero++; ++ if (rshash_neg_cont_zero > 2) ++ return GO_DOWN; ++ else ++ return STILL; ++ } ++ rshash_neg_cont_zero = 0; ++ ++ if (current_neg_ratio > 90) { ++ ret = GO_UP; ++ goto out; ++ } ++ ++ current_benefit = get_current_benefit(); ++ stable_benefit = rshash_state.stable_benefit; ++ ++ if (!stable_benefit) { ++ ret = OBSCURE; ++ goto out; ++ } ++ ++ if (current_benefit > stable_benefit) ++ delta = current_benefit - stable_benefit; ++ else if (current_benefit < stable_benefit) ++ delta = stable_benefit - current_benefit; ++ ++ delta = div64_u64(100 * delta , stable_benefit); ++ ++ if (delta > 50) { ++ rshash_cont_obscure++; ++ if (rshash_cont_obscure > 2) ++ return OBSCURE; ++ else ++ return STILL; ++ } ++ ++out: ++ rshash_cont_obscure = 0; ++ return ret; ++} ++ ++/** ++ * rshash_adjust() - The main function to control the random sampling state ++ * machine for hash strength adapting. ++ * ++ * return true if hash_strength has changed. ++ */ ++static inline int rshash_adjust(void) ++{ ++ unsigned long prev_hash_strength = hash_strength; ++ ++ if (!encode_benefit()) ++ return 0; ++ ++ switch (rshash_state.state) { ++ case RSHASH_STILL: ++ switch (judge_rshash_direction()) { ++ case GO_UP: ++ if (rshash_state.pre_direct == GO_DOWN) ++ hash_strength_delta = 0; ++ ++ inc_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ rshash_state.stable_benefit = get_current_benefit(); ++ rshash_state.pre_direct = GO_UP; ++ break; ++ ++ case GO_DOWN: ++ if (rshash_state.pre_direct == GO_UP) ++ hash_strength_delta = 0; ++ ++ dec_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ rshash_state.stable_benefit = get_current_benefit(); ++ rshash_state.pre_direct = GO_DOWN; ++ break; ++ ++ case OBSCURE: ++ rshash_state.stable_point = hash_strength; ++ rshash_state.turn_point_down = hash_strength; ++ rshash_state.turn_point_up = hash_strength; ++ rshash_state.turn_benefit_down = get_current_benefit(); ++ rshash_state.turn_benefit_up = get_current_benefit(); ++ rshash_state.lookup_window_index = 0; ++ rshash_state.state = RSHASH_TRYDOWN; ++ dec_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ break; ++ ++ case STILL: ++ break; ++ default: ++ BUG(); ++ } ++ break; ++ ++ case RSHASH_TRYDOWN: ++ if (rshash_state.lookup_window_index++ % 5 == 0) ++ rshash_state.below_count = 0; ++ ++ if (get_current_benefit() < rshash_state.stable_benefit) ++ rshash_state.below_count++; ++ else if (get_current_benefit() > ++ rshash_state.turn_benefit_down) { ++ rshash_state.turn_point_down = hash_strength; ++ rshash_state.turn_benefit_down = get_current_benefit(); ++ } ++ ++ if (rshash_state.below_count >= 3 || ++ judge_rshash_direction() == GO_UP || ++ hash_strength == 1) { ++ hash_strength = rshash_state.stable_point; ++ hash_strength_delta = 0; ++ inc_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ rshash_state.lookup_window_index = 0; ++ rshash_state.state = RSHASH_TRYUP; ++ hash_strength_delta = 0; ++ } else { ++ dec_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ } ++ break; ++ ++ case RSHASH_TRYUP: ++ if (rshash_state.lookup_window_index++ % 5 == 0) ++ rshash_state.below_count = 0; ++ ++ if (get_current_benefit() < rshash_state.turn_benefit_down) ++ rshash_state.below_count++; ++ else if (get_current_benefit() > rshash_state.turn_benefit_up) { ++ rshash_state.turn_point_up = hash_strength; ++ rshash_state.turn_benefit_up = get_current_benefit(); ++ } ++ ++ if (rshash_state.below_count >= 3 || ++ judge_rshash_direction() == GO_DOWN || ++ hash_strength == HASH_STRENGTH_MAX) { ++ hash_strength = rshash_state.turn_benefit_up > ++ rshash_state.turn_benefit_down ? ++ rshash_state.turn_point_up : ++ rshash_state.turn_point_down; ++ ++ rshash_state.state = RSHASH_PRE_STILL; ++ } else { ++ inc_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ } ++ ++ break; ++ ++ case RSHASH_NEW: ++ case RSHASH_PRE_STILL: ++ rshash_state.stable_benefit = get_current_benefit(); ++ rshash_state.state = RSHASH_STILL; ++ hash_strength_delta = 0; ++ break; ++ default: ++ BUG(); ++ } ++ ++ /* rshash_neg = rshash_pos = 0; */ ++ reset_benefit(); ++ ++ if (prev_hash_strength != hash_strength) ++ stable_tree_delta_hash(prev_hash_strength); ++ ++ return prev_hash_strength != hash_strength; ++} ++ ++/** ++ * round_update_ladder() - The main function to do update of all the ++ * adjustments whenever a scan round is finished. ++ */ ++static noinline void round_update_ladder(void) ++{ ++ int i; ++ unsigned long dedup; ++ struct vma_slot *slot, *tmp_slot; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ uksm_scan_ladder[i].flags &= ~UKSM_RUNG_ROUND_FINISHED; ++ } ++ ++ list_for_each_entry_safe(slot, tmp_slot, &vma_slot_dedup, dedup_list) { ++ ++ /* slot may be rung_rm_slot() when mm exits */ ++ if (slot->snode) { ++ dedup = cal_dedup_ratio_old(slot); ++ if (dedup && dedup >= uksm_abundant_threshold) ++ vma_rung_up(slot); ++ } ++ ++ slot->pages_bemerged = 0; ++ slot->pages_cowed = 0; ++ ++ list_del_init(&slot->dedup_list); ++ } ++} ++ ++static void uksm_del_vma_slot(struct vma_slot *slot) ++{ ++ int i, j; ++ struct rmap_list_entry *entry; ++ ++ if (slot->snode) { ++ /* ++ * In case it just failed when entering the rung, it's not ++ * necessary. ++ */ ++ rung_rm_slot(slot); ++ } ++ ++ if (!list_empty(&slot->dedup_list)) ++ list_del(&slot->dedup_list); ++ ++ if (!slot->rmap_list_pool || !slot->pool_counts) { ++ /* In case it OOMed in uksm_vma_enter() */ ++ goto out; ++ } ++ ++ for (i = 0; i < slot->pool_size; i++) { ++ void *addr; ++ ++ if (!slot->rmap_list_pool[i]) ++ continue; ++ ++ addr = kmap(slot->rmap_list_pool[i]); ++ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) { ++ entry = (struct rmap_list_entry *)addr + j; ++ if (is_addr(entry->addr)) ++ continue; ++ if (!entry->item) ++ continue; ++ ++ remove_rmap_item_from_tree(entry->item); ++ free_rmap_item(entry->item); ++ slot->pool_counts[i]--; ++ } ++ BUG_ON(slot->pool_counts[i]); ++ kunmap(slot->rmap_list_pool[i]); ++ __free_page(slot->rmap_list_pool[i]); ++ } ++ kfree(slot->rmap_list_pool); ++ kfree(slot->pool_counts); ++ ++out: ++ slot->rung = NULL; ++ BUG_ON(uksm_pages_total < slot->pages); ++ if (slot->flags & UKSM_SLOT_IN_UKSM) ++ uksm_pages_total -= slot->pages; ++ ++ if (slot->fully_scanned_round == fully_scanned_round) ++ scanned_virtual_pages -= slot->pages; ++ else ++ scanned_virtual_pages -= slot->pages_scanned; ++ free_vma_slot(slot); ++} ++ ++ ++#define SPIN_LOCK_PERIOD 32 ++static struct vma_slot *cleanup_slots[SPIN_LOCK_PERIOD]; ++static inline void cleanup_vma_slots(void) ++{ ++ struct vma_slot *slot; ++ int i; ++ ++ i = 0; ++ spin_lock(&vma_slot_list_lock); ++ while (!list_empty(&vma_slot_del)) { ++ slot = list_entry(vma_slot_del.next, ++ struct vma_slot, slot_list); ++ list_del(&slot->slot_list); ++ cleanup_slots[i++] = slot; ++ if (i == SPIN_LOCK_PERIOD) { ++ spin_unlock(&vma_slot_list_lock); ++ while (--i >= 0) ++ uksm_del_vma_slot(cleanup_slots[i]); ++ i = 0; ++ spin_lock(&vma_slot_list_lock); ++ } ++ } ++ spin_unlock(&vma_slot_list_lock); ++ ++ while (--i >= 0) ++ uksm_del_vma_slot(cleanup_slots[i]); ++} ++ ++/* ++*expotional moving average formula ++*/ ++static inline unsigned long ema(unsigned long curr, unsigned long last_ema) ++{ ++ /* ++ * For a very high burst, even the ema cannot work well, a false very ++ * high per-page time estimation can result in feedback in very high ++ * overhead of context swith and rung update -- this will then lead ++ * to higher per-paper time, this may not converge. ++ * ++ * Instead, we try to approach this value in a binary manner. ++ */ ++ if (curr > last_ema * 10) ++ return last_ema * 2; ++ ++ return (EMA_ALPHA * curr + (100 - EMA_ALPHA) * last_ema) / 100; ++} ++ ++/* ++ * convert cpu ratio in 1/TIME_RATIO_SCALE configured by user to ++ * nanoseconds based on current uksm_sleep_jiffies. ++ */ ++static inline unsigned long cpu_ratio_to_nsec(unsigned int ratio) ++{ ++ return NSEC_PER_USEC * jiffies_to_usecs(uksm_sleep_jiffies) / ++ (TIME_RATIO_SCALE - ratio) * ratio; ++} ++ ++ ++static inline unsigned long rung_real_ratio(int cpu_time_ratio) ++{ ++ unsigned long ret; ++ ++ BUG_ON(!cpu_time_ratio); ++ ++ if (cpu_time_ratio > 0) ++ ret = cpu_time_ratio; ++ else ++ ret = (unsigned long)(-cpu_time_ratio) * ++ uksm_max_cpu_percentage / 100UL; ++ ++ return ret ? ret : 1; ++} ++ ++static noinline void uksm_calc_scan_pages(void) ++{ ++ struct scan_rung *ladder = uksm_scan_ladder; ++ unsigned long sleep_usecs, nsecs; ++ unsigned long ratio; ++ int i; ++ unsigned long per_page; ++ ++ if (uksm_ema_page_time > 100000 || ++ (((unsigned long) uksm_eval_round & (256UL - 1)) == 0UL)) ++ uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT; ++ ++ per_page = uksm_ema_page_time; ++ BUG_ON(!per_page); ++ ++ /* ++ * For every 8 eval round, we try to probe a uksm_sleep_jiffies value ++ * based on saved user input. ++ */ ++ if (((unsigned long) uksm_eval_round & (8UL - 1)) == 0UL) ++ uksm_sleep_jiffies = uksm_sleep_saved; ++ ++ /* We require a rung scan at least 1 page in a period. */ ++ nsecs = per_page; ++ ratio = rung_real_ratio(ladder[0].cpu_ratio); ++ if (cpu_ratio_to_nsec(ratio) < nsecs) { ++ sleep_usecs = nsecs * (TIME_RATIO_SCALE - ratio) / ratio ++ / NSEC_PER_USEC; ++ uksm_sleep_jiffies = usecs_to_jiffies(sleep_usecs) + 1; ++ } ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ ratio = rung_real_ratio(ladder[i].cpu_ratio); ++ ladder[i].pages_to_scan = cpu_ratio_to_nsec(ratio) / ++ per_page; ++ BUG_ON(!ladder[i].pages_to_scan); ++ uksm_calc_rung_step(&ladder[i], per_page, ratio); ++ } ++} ++ ++/* ++ * From the scan time of this round (ns) to next expected min sleep time ++ * (ms), be careful of the possible overflows. ratio is taken from ++ * rung_real_ratio() ++ */ ++static inline ++unsigned int scan_time_to_sleep(unsigned long long scan_time, unsigned long ratio) ++{ ++ scan_time >>= 20; /* to msec level now */ ++ BUG_ON(scan_time > (ULONG_MAX / TIME_RATIO_SCALE)); ++ ++ return (unsigned int) ((unsigned long) scan_time * ++ (TIME_RATIO_SCALE - ratio) / ratio); ++} ++ ++#define __round_mask(x, y) ((__typeof__(x))((y)-1)) ++#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) ++ ++static inline unsigned long vma_pool_size(struct vma_slot *slot) ++{ ++ return round_up(sizeof(struct rmap_list_entry) * slot->pages, ++ PAGE_SIZE) >> PAGE_SHIFT; ++} ++ ++static void uksm_vma_enter(struct vma_slot **slots, unsigned long num) ++{ ++ struct scan_rung *rung; ++ unsigned long pool_size, i; ++ struct vma_slot *slot; ++ int failed; ++ ++ rung = &uksm_scan_ladder[0]; ++ ++ failed = 0; ++ for (i = 0; i < num; i++) { ++ slot = slots[i]; ++ ++ pool_size = vma_pool_size(slot); ++ slot->rmap_list_pool = kzalloc(sizeof(struct page *) * ++ pool_size, GFP_KERNEL); ++ if (!slot->rmap_list_pool) ++ break; ++ ++ slot->pool_counts = kzalloc(sizeof(unsigned int) * pool_size, ++ GFP_KERNEL); ++ if (!slot->pool_counts) { ++ kfree(slot->rmap_list_pool); ++ break; ++ } ++ ++ slot->pool_size = pool_size; ++ BUG_ON(CAN_OVERFLOW_U64(uksm_pages_total, slot->pages)); ++ slot->flags |= UKSM_SLOT_IN_UKSM; ++ uksm_pages_total += slot->pages; ++ } ++ ++ if (i) ++ rung_add_new_slots(rung, slots, i); ++ ++ return; ++} ++ ++static struct vma_slot *batch_slots[SLOT_TREE_NODE_STORE_SIZE]; ++ ++static void uksm_enter_all_slots(void) ++{ ++ struct vma_slot *slot; ++ unsigned long index; ++ struct list_head empty_vma_list; ++ int i; ++ ++ i = 0; ++ index = 0; ++ INIT_LIST_HEAD(&empty_vma_list); ++ ++ spin_lock(&vma_slot_list_lock); ++ while (!list_empty(&vma_slot_new)) { ++ slot = list_entry(vma_slot_new.next, ++ struct vma_slot, slot_list); ++ ++ if (!slot->vma->anon_vma) { ++ list_move(&slot->slot_list, &empty_vma_list); ++ } else if (vma_can_enter(slot->vma)) { ++ batch_slots[index++] = slot; ++ list_del_init(&slot->slot_list); ++ } else { ++ list_move(&slot->slot_list, &vma_slot_noadd); ++ } ++ ++ if (++i == SPIN_LOCK_PERIOD || ++ (index && !(index % SLOT_TREE_NODE_STORE_SIZE))) { ++ spin_unlock(&vma_slot_list_lock); ++ ++ if (index && !(index % SLOT_TREE_NODE_STORE_SIZE)) { ++ uksm_vma_enter(batch_slots, index); ++ index = 0; ++ } ++ i = 0; ++ cond_resched(); ++ spin_lock(&vma_slot_list_lock); ++ } ++ } ++ ++ list_splice(&empty_vma_list, &vma_slot_new); ++ ++ spin_unlock(&vma_slot_list_lock); ++ ++ if (index) ++ uksm_vma_enter(batch_slots, index); ++ ++} ++ ++static inline int rung_round_finished(struct scan_rung *rung) ++{ ++ return rung->flags & UKSM_RUNG_ROUND_FINISHED; ++} ++ ++static inline void judge_slot(struct vma_slot *slot) ++{ ++ struct scan_rung *rung = slot->rung; ++ unsigned long dedup; ++ int deleted; ++ ++ dedup = cal_dedup_ratio(slot); ++ if (vma_fully_scanned(slot) && uksm_thrash_threshold) ++ deleted = vma_rung_enter(slot, &uksm_scan_ladder[0]); ++ else if (dedup && dedup >= uksm_abundant_threshold) ++ deleted = vma_rung_up(slot); ++ else ++ deleted = vma_rung_down(slot); ++ ++ slot->pages_merged = 0; ++ slot->pages_cowed = 0; ++ ++ if (vma_fully_scanned(slot)) ++ slot->pages_scanned = 0; ++ ++ slot->last_scanned = slot->pages_scanned; ++ ++ /* If its deleted in above, then rung was already advanced. */ ++ if (!deleted) ++ advance_current_scan(rung); ++} ++ ++ ++static inline int hash_round_finished(void) ++{ ++ if (scanned_virtual_pages > (uksm_pages_total >> 2)) { ++ scanned_virtual_pages = 0; ++ if (uksm_pages_scanned) ++ fully_scanned_round++; ++ ++ return 1; ++ } else { ++ return 0; ++ } ++} ++ ++#define UKSM_MMSEM_BATCH 5 ++#define BUSY_RETRY 100 ++ ++/** ++ * uksm_do_scan() - the main worker function. ++ */ ++static noinline void uksm_do_scan(void) ++{ ++ struct vma_slot *slot, *iter; ++ struct mm_struct *busy_mm; ++ unsigned char round_finished, all_rungs_emtpy; ++ int i, err, mmsem_batch; ++ unsigned long pcost; ++ long long delta_exec; ++ unsigned long vpages, max_cpu_ratio; ++ unsigned long long start_time, end_time, scan_time; ++ unsigned int expected_jiffies; ++ ++ might_sleep(); ++ ++ vpages = 0; ++ ++ start_time = task_sched_runtime(current); ++ max_cpu_ratio = 0; ++ mmsem_batch = 0; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE;) { ++ struct scan_rung *rung = &uksm_scan_ladder[i]; ++ unsigned long ratio; ++ int busy_retry; ++ ++ if (!rung->pages_to_scan) { ++ i++; ++ continue; ++ } ++ ++ if (!rung->vma_root.num) { ++ rung->pages_to_scan = 0; ++ i++; ++ continue; ++ } ++ ++ ratio = rung_real_ratio(rung->cpu_ratio); ++ if (ratio > max_cpu_ratio) ++ max_cpu_ratio = ratio; ++ ++ busy_retry = BUSY_RETRY; ++ /* ++ * Do not consider rung_round_finished() here, just used up the ++ * rung->pages_to_scan quota. ++ */ ++ while (rung->pages_to_scan && rung->vma_root.num && ++ likely(!freezing(current))) { ++ int reset = 0; ++ ++ slot = rung->current_scan; ++ ++ BUG_ON(vma_fully_scanned(slot)); ++ ++ if (mmsem_batch) { ++ err = 0; ++ } else { ++ err = try_down_read_slot_mmap_sem(slot); ++ } ++ ++ if (err == -ENOENT) { ++rm_slot: ++ rung_rm_slot(slot); ++ continue; ++ } ++ ++ busy_mm = slot->mm; ++ ++ if (err == -EBUSY) { ++ /* skip other vmas on the same mm */ ++ do { ++ reset = advance_current_scan(rung); ++ iter = rung->current_scan; ++ busy_retry--; ++ if (iter->vma->vm_mm != busy_mm || ++ !busy_retry || reset) ++ break; ++ } while (1); ++ ++ if (iter->vma->vm_mm != busy_mm) { ++ continue; ++ } else { ++ /* scan round finsished */ ++ break; ++ } ++ } ++ ++ BUG_ON(!vma_can_enter(slot->vma)); ++ if (uksm_test_exit(slot->vma->vm_mm)) { ++ mmsem_batch = 0; ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ goto rm_slot; ++ } ++ ++ if (mmsem_batch) ++ mmsem_batch--; ++ else ++ mmsem_batch = UKSM_MMSEM_BATCH; ++ ++ /* Ok, we have take the mmap_sem, ready to scan */ ++ scan_vma_one_page(slot); ++ rung->pages_to_scan--; ++ vpages++; ++ ++ if (rung->current_offset + rung->step > slot->pages - 1 ++ || vma_fully_scanned(slot)) { ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ judge_slot(slot); ++ mmsem_batch = 0; ++ } else { ++ rung->current_offset += rung->step; ++ if (!mmsem_batch) ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ } ++ ++ busy_retry = BUSY_RETRY; ++ cond_resched(); ++ } ++ ++ if (mmsem_batch) { ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ mmsem_batch = 0; ++ } ++ ++ if (freezing(current)) ++ break; ++ ++ cond_resched(); ++ } ++ end_time = task_sched_runtime(current); ++ delta_exec = end_time - start_time; ++ ++ if (freezing(current)) ++ return; ++ ++ cleanup_vma_slots(); ++ uksm_enter_all_slots(); ++ ++ round_finished = 1; ++ all_rungs_emtpy = 1; ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ struct scan_rung *rung = &uksm_scan_ladder[i]; ++ ++ if (rung->vma_root.num) { ++ all_rungs_emtpy = 0; ++ if (!rung_round_finished(rung)) ++ round_finished = 0; ++ } ++ } ++ ++ if (all_rungs_emtpy) ++ round_finished = 0; ++ ++ if (round_finished) { ++ round_update_ladder(); ++ uksm_eval_round++; ++ ++ if (hash_round_finished() && rshash_adjust()) { ++ /* Reset the unstable root iff hash strength changed */ ++ uksm_hash_round++; ++ root_unstable_tree = RB_ROOT; ++ free_all_tree_nodes(&unstable_tree_node_list); ++ } ++ ++ /* ++ * A number of pages can hang around indefinitely on per-cpu ++ * pagevecs, raised page count preventing write_protect_page ++ * from merging them. Though it doesn't really matter much, ++ * it is puzzling to see some stuck in pages_volatile until ++ * other activity jostles them out, and they also prevented ++ * LTP's KSM test from succeeding deterministically; so drain ++ * them here (here rather than on entry to uksm_do_scan(), ++ * so we don't IPI too often when pages_to_scan is set low). ++ */ ++ lru_add_drain_all(); ++ } ++ ++ ++ if (vpages && delta_exec > 0) { ++ pcost = (unsigned long) delta_exec / vpages; ++ if (likely(uksm_ema_page_time)) ++ uksm_ema_page_time = ema(pcost, uksm_ema_page_time); ++ else ++ uksm_ema_page_time = pcost; ++ } ++ ++ uksm_calc_scan_pages(); ++ uksm_sleep_real = uksm_sleep_jiffies; ++ /* in case of radical cpu bursts, apply the upper bound */ ++ end_time = task_sched_runtime(current); ++ if (max_cpu_ratio && end_time > start_time) { ++ scan_time = end_time - start_time; ++ expected_jiffies = msecs_to_jiffies( ++ scan_time_to_sleep(scan_time, max_cpu_ratio)); ++ ++ if (expected_jiffies > uksm_sleep_real) ++ uksm_sleep_real = expected_jiffies; ++ ++ /* We have a 1 second up bound for responsiveness. */ ++ if (jiffies_to_msecs(uksm_sleep_real) > MSEC_PER_SEC) ++ uksm_sleep_real = msecs_to_jiffies(1000); ++ } ++ ++ return; ++} ++ ++static int ksmd_should_run(void) ++{ ++ return uksm_run & UKSM_RUN_MERGE; ++} ++ ++static int uksm_scan_thread(void *nothing) ++{ ++ set_freezable(); ++ set_user_nice(current, 5); ++ ++ while (!kthread_should_stop()) { ++ mutex_lock(&uksm_thread_mutex); ++ if (ksmd_should_run()) { ++ uksm_do_scan(); ++ } ++ mutex_unlock(&uksm_thread_mutex); ++ ++ try_to_freeze(); ++ ++ if (ksmd_should_run()) { ++ schedule_timeout_interruptible(uksm_sleep_real); ++ uksm_sleep_times++; ++ } else { ++ wait_event_freezable(uksm_thread_wait, ++ ksmd_should_run() || kthread_should_stop()); ++ } ++ } ++ return 0; ++} ++ ++int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, ++ unsigned long *vm_flags) ++{ ++ struct stable_node *stable_node; ++ struct node_vma *node_vma; ++ struct rmap_item *rmap_item; ++ unsigned int mapcount = page_mapcount(page); ++ int referenced = 0; ++ int search_new_forks = 0; ++ unsigned long address; ++ ++ VM_BUG_ON(!PageKsm(page)); ++ VM_BUG_ON(!PageLocked(page)); ++ ++ stable_node = page_stable_node(page); ++ if (!stable_node) ++ return 0; ++ ++ ++again: ++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) { ++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) { ++ struct anon_vma *anon_vma = rmap_item->anon_vma; ++ struct anon_vma_chain *vmac; ++ struct vm_area_struct *vma; ++ ++ anon_vma_lock_read(anon_vma); ++ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, ++ 0, ULONG_MAX) { ++ ++ vma = vmac->vma; ++ address = get_rmap_addr(rmap_item); ++ ++ if (address < vma->vm_start || ++ address >= vma->vm_end) ++ continue; ++ /* ++ * Initially we examine only the vma which ++ * covers this rmap_item; but later, if there ++ * is still work to do, we examine covering ++ * vmas in other mms: in case they were forked ++ * from the original since ksmd passed. ++ */ ++ if ((rmap_item->slot->vma == vma) == ++ search_new_forks) ++ continue; ++ ++ if (memcg && ++ !mm_match_cgroup(vma->vm_mm, memcg)) ++ continue; ++ ++ referenced += ++ page_referenced_one(page, vma, ++ address, &mapcount, vm_flags); ++ if (!search_new_forks || !mapcount) ++ break; ++ } ++ ++ anon_vma_unlock_read(anon_vma); ++ if (!mapcount) ++ goto out; ++ } ++ } ++ if (!search_new_forks++) ++ goto again; ++out: ++ return referenced; ++} ++ ++int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) ++{ ++ struct stable_node *stable_node; ++ struct node_vma *node_vma; ++ struct rmap_item *rmap_item; ++ int ret = SWAP_AGAIN; ++ int search_new_forks = 0; ++ unsigned long address; ++ ++ VM_BUG_ON(!PageKsm(page)); ++ VM_BUG_ON(!PageLocked(page)); ++ ++ stable_node = page_stable_node(page); ++ if (!stable_node) ++ return SWAP_FAIL; ++again: ++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) { ++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) { ++ struct anon_vma *anon_vma = rmap_item->anon_vma; ++ struct anon_vma_chain *vmac; ++ struct vm_area_struct *vma; ++ ++ anon_vma_lock_read(anon_vma); ++ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, ++ 0, ULONG_MAX) { ++ vma = vmac->vma; ++ address = get_rmap_addr(rmap_item); ++ ++ if (address < vma->vm_start || ++ address >= vma->vm_end) ++ continue; ++ /* ++ * Initially we examine only the vma which ++ * covers this rmap_item; but later, if there ++ * is still work to do, we examine covering ++ * vmas in other mms: in case they were forked ++ * from the original since ksmd passed. ++ */ ++ if ((rmap_item->slot->vma == vma) == ++ search_new_forks) ++ continue; ++ ++ ret = try_to_unmap_one(page, vma, ++ address, flags); ++ if (ret != SWAP_AGAIN || !page_mapped(page)) { ++ anon_vma_unlock_read(anon_vma); ++ goto out; ++ } ++ } ++ anon_vma_unlock_read(anon_vma); ++ } ++ } ++ if (!search_new_forks++) ++ goto again; ++out: ++ return ret; ++} ++ ++#ifdef CONFIG_MIGRATION ++int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, ++ struct vm_area_struct *, unsigned long, void *), void *arg) ++{ ++ struct stable_node *stable_node; ++ struct node_vma *node_vma; ++ struct rmap_item *rmap_item; ++ int ret = SWAP_AGAIN; ++ int search_new_forks = 0; ++ unsigned long address; ++ ++ VM_BUG_ON(!PageKsm(page)); ++ VM_BUG_ON(!PageLocked(page)); ++ ++ stable_node = page_stable_node(page); ++ if (!stable_node) ++ return ret; ++again: ++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) { ++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) { ++ struct anon_vma *anon_vma = rmap_item->anon_vma; ++ struct anon_vma_chain *vmac; ++ struct vm_area_struct *vma; ++ ++ anon_vma_lock_read(anon_vma); ++ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, ++ 0, ULONG_MAX) { ++ vma = vmac->vma; ++ address = get_rmap_addr(rmap_item); ++ ++ if (address < vma->vm_start || ++ address >= vma->vm_end) ++ continue; ++ ++ if ((rmap_item->slot->vma == vma) == ++ search_new_forks) ++ continue; ++ ++ ret = rmap_one(page, vma, address, arg); ++ if (ret != SWAP_AGAIN) { ++ anon_vma_unlock_read(anon_vma); ++ goto out; ++ } ++ } ++ anon_vma_unlock_read(anon_vma); ++ } ++ } ++ if (!search_new_forks++) ++ goto again; ++out: ++ return ret; ++} ++ ++/* Common ksm interface but may be specific to uksm */ ++void ksm_migrate_page(struct page *newpage, struct page *oldpage) ++{ ++ struct stable_node *stable_node; ++ ++ VM_BUG_ON(!PageLocked(oldpage)); ++ VM_BUG_ON(!PageLocked(newpage)); ++ VM_BUG_ON(newpage->mapping != oldpage->mapping); ++ ++ stable_node = page_stable_node(newpage); ++ if (stable_node) { ++ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); ++ stable_node->kpfn = page_to_pfn(newpage); ++ } ++} ++#endif /* CONFIG_MIGRATION */ ++ ++#ifdef CONFIG_MEMORY_HOTREMOVE ++static struct stable_node *uksm_check_stable_tree(unsigned long start_pfn, ++ unsigned long end_pfn) ++{ ++ struct rb_node *node; ++ ++ for (node = rb_first(root_stable_treep); node; node = rb_next(node)) { ++ struct stable_node *stable_node; ++ ++ stable_node = rb_entry(node, struct stable_node, node); ++ if (stable_node->kpfn >= start_pfn && ++ stable_node->kpfn < end_pfn) ++ return stable_node; ++ } ++ return NULL; ++} ++ ++static int uksm_memory_callback(struct notifier_block *self, ++ unsigned long action, void *arg) ++{ ++ struct memory_notify *mn = arg; ++ struct stable_node *stable_node; ++ ++ switch (action) { ++ case MEM_GOING_OFFLINE: ++ /* ++ * Keep it very simple for now: just lock out ksmd and ++ * MADV_UNMERGEABLE while any memory is going offline. ++ * mutex_lock_nested() is necessary because lockdep was alarmed ++ * that here we take uksm_thread_mutex inside notifier chain ++ * mutex, and later take notifier chain mutex inside ++ * uksm_thread_mutex to unlock it. But that's safe because both ++ * are inside mem_hotplug_mutex. ++ */ ++ mutex_lock_nested(&uksm_thread_mutex, SINGLE_DEPTH_NESTING); ++ break; ++ ++ case MEM_OFFLINE: ++ /* ++ * Most of the work is done by page migration; but there might ++ * be a few stable_nodes left over, still pointing to struct ++ * pages which have been offlined: prune those from the tree. ++ */ ++ while ((stable_node = uksm_check_stable_tree(mn->start_pfn, ++ mn->start_pfn + mn->nr_pages)) != NULL) ++ remove_node_from_stable_tree(stable_node, 1, 1); ++ /* fallthrough */ ++ ++ case MEM_CANCEL_OFFLINE: ++ mutex_unlock(&uksm_thread_mutex); ++ break; ++ } ++ return NOTIFY_OK; ++} ++#endif /* CONFIG_MEMORY_HOTREMOVE */ ++ ++#ifdef CONFIG_SYSFS ++/* ++ * This all compiles without CONFIG_SYSFS, but is a waste of space. ++ */ ++ ++#define UKSM_ATTR_RO(_name) \ ++ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) ++#define UKSM_ATTR(_name) \ ++ static struct kobj_attribute _name##_attr = \ ++ __ATTR(_name, 0644, _name##_show, _name##_store) ++ ++static ssize_t max_cpu_percentage_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_max_cpu_percentage); ++} ++ ++static ssize_t max_cpu_percentage_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long max_cpu_percentage; ++ int err; ++ ++ err = strict_strtoul(buf, 10, &max_cpu_percentage); ++ if (err || max_cpu_percentage > 100) ++ return -EINVAL; ++ ++ if (max_cpu_percentage == 100) ++ max_cpu_percentage = 99; ++ else if (max_cpu_percentage < 10) ++ max_cpu_percentage = 10; ++ ++ uksm_max_cpu_percentage = max_cpu_percentage; ++ ++ return count; ++} ++UKSM_ATTR(max_cpu_percentage); ++ ++static ssize_t sleep_millisecs_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", jiffies_to_msecs(uksm_sleep_jiffies)); ++} ++ ++static ssize_t sleep_millisecs_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long msecs; ++ int err; ++ ++ err = strict_strtoul(buf, 10, &msecs); ++ if (err || msecs > MSEC_PER_SEC) ++ return -EINVAL; ++ ++ uksm_sleep_jiffies = msecs_to_jiffies(msecs); ++ uksm_sleep_saved = uksm_sleep_jiffies; ++ ++ return count; ++} ++UKSM_ATTR(sleep_millisecs); ++ ++ ++static ssize_t cpu_governor_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *); ++ int i; ++ ++ buf[0] = '\0'; ++ for (i = 0; i < n ; i++) { ++ if (uksm_cpu_governor == i) ++ strcat(buf, "["); ++ ++ strcat(buf, uksm_cpu_governor_str[i]); ++ ++ if (uksm_cpu_governor == i) ++ strcat(buf, "]"); ++ ++ strcat(buf, " "); ++ } ++ strcat(buf, "\n"); ++ ++ return strlen(buf); ++} ++ ++static inline void init_performance_values(void) ++{ ++ int i; ++ struct scan_rung *rung; ++ struct uksm_cpu_preset_s *preset = uksm_cpu_preset + uksm_cpu_governor; ++ ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = uksm_scan_ladder + i; ++ rung->cpu_ratio = preset->cpu_ratio[i]; ++ rung->cover_msecs = preset->cover_msecs[i]; ++ } ++ ++ uksm_max_cpu_percentage = preset->max_cpu; ++} ++ ++static ssize_t cpu_governor_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *); ++ ++ for (n--; n >=0 ; n--) { ++ if (!strncmp(buf, uksm_cpu_governor_str[n], ++ strlen(uksm_cpu_governor_str[n]))) ++ break; ++ } ++ ++ if (n < 0) ++ return -EINVAL; ++ else ++ uksm_cpu_governor = n; ++ ++ init_performance_values(); ++ ++ return count; ++} ++UKSM_ATTR(cpu_governor); ++ ++static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_run); ++} ++ ++static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int err; ++ unsigned long flags; ++ ++ err = strict_strtoul(buf, 10, &flags); ++ if (err || flags > UINT_MAX) ++ return -EINVAL; ++ if (flags > UKSM_RUN_MERGE) ++ return -EINVAL; ++ ++ mutex_lock(&uksm_thread_mutex); ++ if (uksm_run != flags) { ++ uksm_run = flags; ++ } ++ mutex_unlock(&uksm_thread_mutex); ++ ++ if (flags & UKSM_RUN_MERGE) ++ wake_up_interruptible(&uksm_thread_wait); ++ ++ return count; ++} ++UKSM_ATTR(run); ++ ++static ssize_t abundant_threshold_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_abundant_threshold); ++} ++ ++static ssize_t abundant_threshold_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int err; ++ unsigned long flags; ++ ++ err = strict_strtoul(buf, 10, &flags); ++ if (err || flags > 99) ++ return -EINVAL; ++ ++ uksm_abundant_threshold = flags; ++ ++ return count; ++} ++UKSM_ATTR(abundant_threshold); ++ ++static ssize_t thrash_threshold_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_thrash_threshold); ++} ++ ++static ssize_t thrash_threshold_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int err; ++ unsigned long flags; ++ ++ err = strict_strtoul(buf, 10, &flags); ++ if (err || flags > 99) ++ return -EINVAL; ++ ++ uksm_thrash_threshold = flags; ++ ++ return count; ++} ++UKSM_ATTR(thrash_threshold); ++ ++static ssize_t cpu_ratios_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ int i, size; ++ struct scan_rung *rung; ++ char *p = buf; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ ++ if (rung->cpu_ratio > 0) ++ size = sprintf(p, "%d ", rung->cpu_ratio); ++ else ++ size = sprintf(p, "MAX/%d ", ++ TIME_RATIO_SCALE / -rung->cpu_ratio); ++ ++ p += size; ++ } ++ ++ *p++ = '\n'; ++ *p = '\0'; ++ ++ return p - buf; ++} ++ ++static ssize_t cpu_ratios_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int i, cpuratios[SCAN_LADDER_SIZE], err; ++ unsigned long value; ++ struct scan_rung *rung; ++ char *p, *end = NULL; ++ ++ p = kzalloc(count, GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ ++ memcpy(p, buf, count); ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ if (i != SCAN_LADDER_SIZE -1) { ++ end = strchr(p, ' '); ++ if (!end) ++ return -EINVAL; ++ ++ *end = '\0'; ++ } ++ ++ if (strstr(p, "MAX/")) { ++ p = strchr(p, '/') + 1; ++ err = strict_strtoul(p, 10, &value); ++ if (err || value > TIME_RATIO_SCALE || !value) ++ return -EINVAL; ++ ++ cpuratios[i] = - (int) (TIME_RATIO_SCALE / value); ++ } else { ++ err = strict_strtoul(p, 10, &value); ++ if (err || value > TIME_RATIO_SCALE || !value) ++ return -EINVAL; ++ ++ cpuratios[i] = value; ++ } ++ ++ p = end + 1; ++ } ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ ++ rung->cpu_ratio = cpuratios[i]; ++ } ++ ++ return count; ++} ++UKSM_ATTR(cpu_ratios); ++ ++static ssize_t eval_intervals_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ int i, size; ++ struct scan_rung *rung; ++ char *p = buf; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ size = sprintf(p, "%u ", rung->cover_msecs); ++ p += size; ++ } ++ ++ *p++ = '\n'; ++ *p = '\0'; ++ ++ return p - buf; ++} ++ ++static ssize_t eval_intervals_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int i, err; ++ unsigned long values[SCAN_LADDER_SIZE]; ++ struct scan_rung *rung; ++ char *p, *end = NULL; ++ ++ p = kzalloc(count, GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ ++ memcpy(p, buf, count); ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ if (i != SCAN_LADDER_SIZE -1) { ++ end = strchr(p, ' '); ++ if (!end) ++ return -EINVAL; ++ ++ *end = '\0'; ++ } ++ ++ err = strict_strtoul(p, 10, &values[i]); ++ if (err) ++ return -EINVAL; ++ ++ p = end + 1; ++ } ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ ++ rung->cover_msecs = values[i]; ++ } ++ ++ return count; ++} ++UKSM_ATTR(eval_intervals); ++ ++static ssize_t ema_per_page_time_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_ema_page_time); ++} ++UKSM_ATTR_RO(ema_per_page_time); ++ ++static ssize_t pages_shared_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_pages_shared); ++} ++UKSM_ATTR_RO(pages_shared); ++ ++static ssize_t pages_sharing_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_pages_sharing); ++} ++UKSM_ATTR_RO(pages_sharing); ++ ++static ssize_t pages_unshared_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_pages_unshared); ++} ++UKSM_ATTR_RO(pages_unshared); ++ ++static ssize_t full_scans_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%llu\n", fully_scanned_round); ++} ++UKSM_ATTR_RO(full_scans); ++ ++static ssize_t pages_scanned_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ unsigned long base = 0; ++ u64 delta, ret; ++ ++ if (pages_scanned_stored) { ++ base = pages_scanned_base; ++ ret = pages_scanned_stored; ++ delta = uksm_pages_scanned >> base; ++ if (CAN_OVERFLOW_U64(ret, delta)) { ++ ret >>= 1; ++ delta >>= 1; ++ base++; ++ ret += delta; ++ } ++ } else { ++ ret = uksm_pages_scanned; ++ } ++ ++ while (ret > ULONG_MAX) { ++ ret >>= 1; ++ base++; ++ } ++ ++ if (base) ++ return sprintf(buf, "%lu * 2^%lu\n", (unsigned long)ret, base); ++ else ++ return sprintf(buf, "%lu\n", (unsigned long)ret); ++} ++UKSM_ATTR_RO(pages_scanned); ++ ++static ssize_t hash_strength_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", hash_strength); ++} ++UKSM_ATTR_RO(hash_strength); ++ ++static ssize_t sleep_times_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%llu\n", uksm_sleep_times); ++} ++UKSM_ATTR_RO(sleep_times); ++ ++ ++static struct attribute *uksm_attrs[] = { ++ &max_cpu_percentage_attr.attr, ++ &sleep_millisecs_attr.attr, ++ &cpu_governor_attr.attr, ++ &run_attr.attr, ++ &ema_per_page_time_attr.attr, ++ &pages_shared_attr.attr, ++ &pages_sharing_attr.attr, ++ &pages_unshared_attr.attr, ++ &full_scans_attr.attr, ++ &pages_scanned_attr.attr, ++ &hash_strength_attr.attr, ++ &sleep_times_attr.attr, ++ &thrash_threshold_attr.attr, ++ &abundant_threshold_attr.attr, ++ &cpu_ratios_attr.attr, ++ &eval_intervals_attr.attr, ++ NULL, ++}; ++ ++static struct attribute_group uksm_attr_group = { ++ .attrs = uksm_attrs, ++ .name = "uksm", ++}; ++#endif /* CONFIG_SYSFS */ ++ ++static inline void init_scan_ladder(void) ++{ ++ int i; ++ struct scan_rung *rung; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = uksm_scan_ladder + i; ++ slot_tree_init_root(&rung->vma_root); ++ } ++ ++ init_performance_values(); ++ uksm_calc_scan_pages(); ++} ++ ++static inline int cal_positive_negative_costs(void) ++{ ++ struct page *p1, *p2; ++ unsigned char *addr1, *addr2; ++ unsigned long i, time_start, hash_cost; ++ unsigned long loopnum = 0; ++ ++ /*IMPORTANT: volatile is needed to prevent over-optimization by gcc. */ ++ volatile u32 hash; ++ volatile int ret; ++ ++ p1 = alloc_page(GFP_KERNEL); ++ if (!p1) ++ return -ENOMEM; ++ ++ p2 = alloc_page(GFP_KERNEL); ++ if (!p2) ++ return -ENOMEM; ++ ++ addr1 = kmap_atomic(p1); ++ addr2 = kmap_atomic(p2); ++ memset(addr1, prandom_u32(), PAGE_SIZE); ++ memcpy(addr2, addr1, PAGE_SIZE); ++ ++ /* make sure that the two pages differ in last byte */ ++ addr2[PAGE_SIZE-1] = ~addr2[PAGE_SIZE-1]; ++ kunmap_atomic(addr2); ++ kunmap_atomic(addr1); ++ ++ time_start = jiffies; ++ while (jiffies - time_start < 100) { ++ for (i = 0; i < 100; i++) ++ hash = page_hash(p1, HASH_STRENGTH_FULL, 0); ++ loopnum += 100; ++ } ++ hash_cost = (jiffies - time_start); ++ ++ time_start = jiffies; ++ for (i = 0; i < loopnum; i++) ++ ret = pages_identical(p1, p2); ++ memcmp_cost = HASH_STRENGTH_FULL * (jiffies - time_start); ++ memcmp_cost /= hash_cost; ++ printk(KERN_INFO "UKSM: relative memcmp_cost = %lu " ++ "hash=%u cmp_ret=%d.\n", ++ memcmp_cost, hash, ret); ++ ++ __free_page(p1); ++ __free_page(p2); ++ return 0; ++} ++ ++static int init_zeropage_hash_table(void) ++{ ++ struct page *page; ++ char *addr; ++ int i; ++ ++ page = alloc_page(GFP_KERNEL); ++ if (!page) ++ return -ENOMEM; ++ ++ addr = kmap_atomic(page); ++ memset(addr, 0, PAGE_SIZE); ++ kunmap_atomic(addr); ++ ++ zero_hash_table = kmalloc(HASH_STRENGTH_MAX * sizeof(u32), ++ GFP_KERNEL); ++ if (!zero_hash_table) ++ return -ENOMEM; ++ ++ for (i = 0; i < HASH_STRENGTH_MAX; i++) ++ zero_hash_table[i] = page_hash(page, i, 0); ++ ++ __free_page(page); ++ ++ return 0; ++} ++ ++static inline int init_random_sampling(void) ++{ ++ unsigned long i; ++ random_nums = kmalloc(PAGE_SIZE, GFP_KERNEL); ++ if (!random_nums) ++ return -ENOMEM; ++ ++ for (i = 0; i < HASH_STRENGTH_FULL; i++) ++ random_nums[i] = i; ++ ++ for (i = 0; i < HASH_STRENGTH_FULL; i++) { ++ unsigned long rand_range, swap_index, tmp; ++ ++ rand_range = HASH_STRENGTH_FULL - i; ++ swap_index = i + prandom_u32() % rand_range; ++ tmp = random_nums[i]; ++ random_nums[i] = random_nums[swap_index]; ++ random_nums[swap_index] = tmp; ++ } ++ ++ rshash_state.state = RSHASH_NEW; ++ rshash_state.below_count = 0; ++ rshash_state.lookup_window_index = 0; ++ ++ return cal_positive_negative_costs(); ++} ++ ++static int __init uksm_slab_init(void) ++{ ++ rmap_item_cache = UKSM_KMEM_CACHE(rmap_item, 0); ++ if (!rmap_item_cache) ++ goto out; ++ ++ stable_node_cache = UKSM_KMEM_CACHE(stable_node, 0); ++ if (!stable_node_cache) ++ goto out_free1; ++ ++ node_vma_cache = UKSM_KMEM_CACHE(node_vma, 0); ++ if (!node_vma_cache) ++ goto out_free2; ++ ++ vma_slot_cache = UKSM_KMEM_CACHE(vma_slot, 0); ++ if (!vma_slot_cache) ++ goto out_free3; ++ ++ tree_node_cache = UKSM_KMEM_CACHE(tree_node, 0); ++ if (!tree_node_cache) ++ goto out_free4; ++ ++ return 0; ++ ++out_free4: ++ kmem_cache_destroy(vma_slot_cache); ++out_free3: ++ kmem_cache_destroy(node_vma_cache); ++out_free2: ++ kmem_cache_destroy(stable_node_cache); ++out_free1: ++ kmem_cache_destroy(rmap_item_cache); ++out: ++ return -ENOMEM; ++} ++ ++static void __init uksm_slab_free(void) ++{ ++ kmem_cache_destroy(stable_node_cache); ++ kmem_cache_destroy(rmap_item_cache); ++ kmem_cache_destroy(node_vma_cache); ++ kmem_cache_destroy(vma_slot_cache); ++ kmem_cache_destroy(tree_node_cache); ++} ++ ++/* Common interface to ksm, different to it. */ ++int ksm_madvise(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end, int advice, unsigned long *vm_flags) ++{ ++ int err; ++ ++ switch (advice) { ++ case MADV_MERGEABLE: ++ return 0; /* just ignore the advice */ ++ ++ case MADV_UNMERGEABLE: ++ if (!(*vm_flags & VM_MERGEABLE)) ++ return 0; /* just ignore the advice */ ++ ++ if (vma->anon_vma) { ++ err = unmerge_uksm_pages(vma, start, end); ++ if (err) ++ return err; ++ } ++ ++ uksm_remove_vma(vma); ++ *vm_flags &= ~VM_MERGEABLE; ++ break; ++ } ++ ++ return 0; ++} ++ ++/* Common interface to ksm, actually the same. */ ++struct page *ksm_might_need_to_copy(struct page *page, ++ struct vm_area_struct *vma, unsigned long address) ++{ ++ struct anon_vma *anon_vma = page_anon_vma(page); ++ struct page *new_page; ++ ++ if (PageKsm(page)) { ++ if (page_stable_node(page)) ++ return page; /* no need to copy it */ ++ } else if (!anon_vma) { ++ return page; /* no need to copy it */ ++ } else if (anon_vma->root == vma->anon_vma->root && ++ page->index == linear_page_index(vma, address)) { ++ return page; /* still no need to copy it */ ++ } ++ if (!PageUptodate(page)) ++ return page; /* let do_swap_page report the error */ ++ ++ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); ++ if (new_page) { ++ copy_user_highpage(new_page, page, address, vma); ++ ++ SetPageDirty(new_page); ++ __SetPageUptodate(new_page); ++ __set_page_locked(new_page); ++ } ++ ++ return new_page; ++} ++ ++static int __init uksm_init(void) ++{ ++ struct task_struct *uksm_thread; ++ int err; ++ ++ uksm_sleep_jiffies = msecs_to_jiffies(100); ++ uksm_sleep_saved = uksm_sleep_jiffies; ++ ++ slot_tree_init(); ++ init_scan_ladder(); ++ ++ ++ err = init_random_sampling(); ++ if (err) ++ goto out_free2; ++ ++ err = uksm_slab_init(); ++ if (err) ++ goto out_free1; ++ ++ err = init_zeropage_hash_table(); ++ if (err) ++ goto out_free0; ++ ++ uksm_thread = kthread_run(uksm_scan_thread, NULL, "uksmd"); ++ if (IS_ERR(uksm_thread)) { ++ printk(KERN_ERR "uksm: creating kthread failed\n"); ++ err = PTR_ERR(uksm_thread); ++ goto out_free; ++ } ++ ++#ifdef CONFIG_SYSFS ++ err = sysfs_create_group(mm_kobj, &uksm_attr_group); ++ if (err) { ++ printk(KERN_ERR "uksm: register sysfs failed\n"); ++ kthread_stop(uksm_thread); ++ goto out_free; ++ } ++#else ++ uksm_run = UKSM_RUN_MERGE; /* no way for user to start it */ ++ ++#endif /* CONFIG_SYSFS */ ++ ++#ifdef CONFIG_MEMORY_HOTREMOVE ++ /* ++ * Choose a high priority since the callback takes uksm_thread_mutex: ++ * later callbacks could only be taking locks which nest within that. ++ */ ++ hotplug_memory_notifier(uksm_memory_callback, 100); ++#endif ++ return 0; ++ ++out_free: ++ kfree(zero_hash_table); ++out_free0: ++ uksm_slab_free(); ++out_free1: ++ kfree(random_nums); ++out_free2: ++ kfree(uksm_scan_ladder); ++ return err; ++} ++ ++#ifdef MODULE ++module_init(uksm_init) ++#else ++late_initcall(uksm_init); ++#endif ++ +diff --git a/mm/vmstat.c b/mm/vmstat.c +index f42745e..1796e0c 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -739,6 +739,9 @@ const char * const vmstat_text[] = { + #endif + "nr_anon_transparent_hugepages", + "nr_free_cma", ++#ifdef CONFIG_UKSM ++ "nr_uksm_zero_pages", ++#endif + "nr_dirty_threshold", + "nr_dirty_background_threshold", + diff --git a/sys-kernel/kogaion-sources/files/security/0001-x86-x32-Correct-invalid-use-of-user-timespec-in-the-.patch b/sys-kernel/kogaion-sources/files/security/0001-x86-x32-Correct-invalid-use-of-user-timespec-in-the-.patch new file mode 100644 index 00000000..3f1bccc8 --- /dev/null +++ b/sys-kernel/kogaion-sources/files/security/0001-x86-x32-Correct-invalid-use-of-user-timespec-in-the-.patch @@ -0,0 +1,80 @@ +From 2def2ef2ae5f3990aabdbe8a755911902707d268 Mon Sep 17 00:00:00 2001 +From: PaX Team +Date: Thu, 30 Jan 2014 16:59:25 -0800 +Subject: [PATCH] x86, x32: Correct invalid use of user timespec in the kernel + +The x32 case for the recvmsg() timout handling is broken: + + asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + unsigned int vlen, unsigned int flags, + struct compat_timespec __user *timeout) + { + int datagrams; + struct timespec ktspec; + + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + + if (COMPAT_USE_64BIT_TIME) + return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, + flags | MSG_CMSG_COMPAT, + (struct timespec *) timeout); + ... + +The timeout pointer parameter is provided by userland (hence the __user +annotation) but for x32 syscalls it's simply cast to a kernel pointer +and is passed to __sys_recvmmsg which will eventually directly +dereference it for both reading and writing. Other callers to +__sys_recvmmsg properly copy from userland to the kernel first. + +The bug was introduced by commit ee4fa23c4bfc ("compat: Use +COMPAT_USE_64BIT_TIME in net/compat.c") and should affect all kernels +since 3.4 (and perhaps vendor kernels if they backported x32 support +along with this code). + +Note that CONFIG_X86_X32_ABI gets enabled at build time and only if +CONFIG_X86_X32 is enabled and ld can build x32 executables. + +Other uses of COMPAT_USE_64BIT_TIME seem fine. + +This addresses CVE-2014-0038. + +Signed-off-by: PaX Team +Signed-off-by: H. Peter Anvin +Cc: # v3.4+ +Signed-off-by: Linus Torvalds +--- + net/compat.c | 9 ++------- + 1 file changed, 2 insertions(+), 7 deletions(-) + +diff --git a/net/compat.c b/net/compat.c +index dd32e34..f50161f 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -780,21 +780,16 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + +- if (COMPAT_USE_64BIT_TIME) +- return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, +- flags | MSG_CMSG_COMPAT, +- (struct timespec *) timeout); +- + if (timeout == NULL) + return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, + flags | MSG_CMSG_COMPAT, NULL); + +- if (get_compat_timespec(&ktspec, timeout)) ++ if (compat_get_timespec(&ktspec, timeout)) + return -EFAULT; + + datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, + flags | MSG_CMSG_COMPAT, &ktspec); +- if (datagrams > 0 && put_compat_timespec(&ktspec, timeout)) ++ if (datagrams > 0 && compat_put_timespec(&ktspec, timeout)) + datagrams = -EFAULT; + + return datagrams; +-- +1.8.5.3 + diff --git a/sys-kernel/kogaion-sources/kogaion-sources-3.10.25.ebuild b/sys-kernel/kogaion-sources/kogaion-sources-3.10.25.ebuild new file mode 100644 index 00000000..dedc361b --- /dev/null +++ b/sys-kernel/kogaion-sources/kogaion-sources-3.10.25.ebuild @@ -0,0 +1,55 @@ +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +inherit eutils + +SLOT=$PVR +CKV=3.10.25 +KV_FULL=${PN}-${PVR} +EXTRAVERSION=kogaion +KERNEL_ARCHIVE="linux-${PV}.tar.xz" +PATCH_ARCHIVE="linux-${PV}-kogaion.tar.gz" +RESTRICT="binchecks strip mirror" +LICENSE="GPL-2" +KEYWORDS="amd64 x86" + +IUSE="" +DEPEND="" +RDEPEND="" +DESCRIPTION="Kogaion Linux Kernel Sources" +HOMEPAGE="http://www.debian.org" +SRC_URI="https://www.kernel.org/pub/linux/kernel/v3.x/${KERNEL_ARCHIVE}" + +S="$WORKDIR/linux-${CKV}" + +pkg_setup() { + export REAL_ARCH="$ARCH" + unset ARCH ; unset LDFLAGS #will interfere with Makefile if set +} + +src_prepare() { + + for p in $(ls ${FILESDIR}/security); do + epatch -p1 "${FILESDIR}/security/$p" || die + done + + for p in $(ls ${FILESDIR}/desktop) ; do + epatch -p1 "${FILESDIR}/desktop/$p" || die + done + + rm -f .config || die +} + +src_compile() { + einfo "Preparing kernel sources for real-world use" || die + make -s mrproper || die "make mrproper failed" +} + +src_install() { + dodir /usr/src/linux-${PV}-kogaion || die + insinto /usr/src/linux-${PV}-kogaion || die + doins -r "${S}"/* || die + cd ${D}/usr/src/linux-${PV}-kogaion || die + make mrproper || die +} diff --git a/sys-kernel/linux-sabayon/Manifest b/sys-kernel/linux-sabayon/Manifest new file mode 100644 index 00000000..afc97b5e --- /dev/null +++ b/sys-kernel/linux-sabayon/Manifest @@ -0,0 +1,2 @@ +DIST linux-3.2-r2+sabayon.tar.bz2 78655819 RMD160 a198cc183321ef918bf4da890d114f2fd1008924 SHA1 98c84ea498ee37df2fdc148d484660c340f42100 SHA256 2fc73a8afe216476ba18a744cb5d021dc17d1607e305ce6a0de0eae67f86f494 +EBUILD linux-sabayon-3.2-r2.ebuild 373 RMD160 107a74fd914b7a22077a1729f7ff7c1f8e7d328f SHA1 f5060e12322aac857cda3feb0a5c3dd09ab85b5e SHA256 ee2fb4eb10a4dda43eacda6ad5872926557ff2583fad45ef647ea310a967c830 diff --git a/sys-kernel/linux-sabayon/linux-sabayon-3.2-r2.ebuild b/sys-kernel/linux-sabayon/linux-sabayon-3.2-r2.ebuild new file mode 100644 index 00000000..e6b919b0 --- /dev/null +++ b/sys-kernel/linux-sabayon/linux-sabayon-3.2-r2.ebuild @@ -0,0 +1,12 @@ +# Copyright 2004-2010 Sabayon Linux +# Distributed under the terms of the GNU General Public License v2 + +ETYPE="sources" +K_SABKERNEL_SELF_TARBALL_NAME="sabayon" +K_REQUIRED_LINUX_FIRMWARE_VER="20111025" +K_SABKERNEL_FORCE_SUBLEVEL="0" +inherit sabayon-kernel rogentos-artwork + +KEYWORDS="~amd64 ~x86" +DESCRIPTION="Official Sabayon Linux Standard kernel image" +RESTRICT="mirror" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.10.26.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.10.26.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.10.26.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.10.27.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.10.27.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.10.27.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.11.10.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.11.10.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.11.10.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.12.10.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.12.10.ebuild new file mode 100644 index 00000000..756f78ae --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.12.10.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 RogentOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" +K_ROGKERNEL_PATCH_UPSTREAM_TARBALL="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.12.7.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.12.7.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.12.7.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.12.8.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.12.8.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.12.8.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.2-r2.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.2-r2.ebuild new file mode 100644 index 00000000..2dc070d1 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.2-r2.ebuild @@ -0,0 +1,19 @@ +# Copyright 2004-2010 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +ETYPE="sources" +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +inherit rogentos-kernel +KEYWORDS="~amd64 ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" + diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.4.76.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.4.76.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.4.76.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.4.77.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.4.77.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.4.77.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.8.13.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.8.13.ebuild new file mode 100644 index 00000000..98efbe4f --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.8.13.ebuild @@ -0,0 +1,21 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" +K_KERNEL_PATCH_HOTFIXES="${FILESDIR}/0001-rogentos-fix-rogentos-x86-kernel-config.patch" +inherit rogentos-kernel +KEYWORDS="~amd64 ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources-3.9.11.ebuild b/sys-kernel/rogentos-sources/rogentos-sources-3.9.11.ebuild new file mode 100644 index 00000000..2cec6130 --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources-3.9.11.ebuild @@ -0,0 +1,22 @@ +# Copyright 2004-2014 Kogaion, Argent and ArgOS Linux +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS Linux Standard kernel sources" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" diff --git a/sys-kernel/rogentos-sources/rogentos-sources.skel b/sys-kernel/rogentos-sources/rogentos-sources.skel new file mode 100644 index 00000000..257a14ab --- /dev/null +++ b/sys-kernel/rogentos-sources/rogentos-sources.skel @@ -0,0 +1,22 @@ +# Copyright 2004-2014 RogentOS Team +# Distributed under the terms of the GNU General Public License v2 + +EAPI=5 + +K_ROGKERNEL_NAME="rogentos" +K_ROGKERNEL_URI_CONFIG="yes" +K_ROGKERNEL_SELF_TARBALL_NAME="rogentos" +K_ONLY_SOURCES="1" +K_ROGKERNEL_FORCE_SUBLEVEL="0" +K_KERNEL_NEW_VERSIONING="1" + +inherit rogentos-kernel + +KEYWORDS="~amd64 ~arm ~x86" +DESCRIPTION="Official Kogaion, Argent and ArgOS source ebuild" +RESTRICT="mirror" +IUSE="sources_standalone" + +DEPEND="${DEPEND} + sources_standalone? ( !=sys-kernel/linux-rogentos-${PVR} ) + !sources_standalone? ( =sys-kernel/linux-rogentos-${PVR} )" -- cgit v1.2.3