From 687f886abf880617198ae618bfd0ec36cac9bb7e Mon Sep 17 00:00:00 2001 From: Tk-Glitch Date: Mon, 15 Jun 2020 03:32:10 +0200 Subject: [PATCH] Introduce linux58-rc-tkg --- linux58-rc-tkg/PKGBUILD | 1120 ++ linux58-rc-tkg/README.md | 36 + linux58-rc-tkg/customization.cfg | 172 + .../linux58-tkg-config/90-cleanup.hook | 14 + linux58-rc-tkg/linux58-tkg-config/cleanup | 10 + .../linux58-tkg-config/config.x86_64 | 10920 ++++++++++++++++ .../generic-desktop-profile.cfg | 55 + .../ryzen-desktop-profile.cfg | 58 + ...sallow-unprivileged-CLONE_NEWUSER-by.patch | 156 + .../0002-clear-patches.patch | 354 + .../0003-glitched-base.patch | 1446 ++ .../0003-glitched-cfs.patch | 72 + .../0005-glitched-ondemand-pds.patch | 18 + .../0005-glitched-pds.patch | 166 + .../0005-v5.8_undead-pds099o.patch | 8542 ++++++++++++ .../0006-add-acs-overrides_iommu.patch | 193 + .../linux58-tkg-patches/0007-v5.8-fsync.patch | 908 ++ .../linux58-tkg-patches/0011-ZFS-fix.patch | 43 + 18 files changed, 24283 insertions(+) create mode 100644 linux58-rc-tkg/PKGBUILD create mode 100644 linux58-rc-tkg/README.md create mode 100644 linux58-rc-tkg/customization.cfg create mode 100644 linux58-rc-tkg/linux58-tkg-config/90-cleanup.hook create mode 100755 linux58-rc-tkg/linux58-tkg-config/cleanup create mode 100644 linux58-rc-tkg/linux58-tkg-config/config.x86_64 create mode 100644 linux58-rc-tkg/linux58-tkg-config/generic-desktop-profile.cfg create mode 100644 linux58-rc-tkg/linux58-tkg-config/ryzen-desktop-profile.cfg create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0001-add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0002-clear-patches.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0003-glitched-base.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0003-glitched-cfs.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0005-glitched-ondemand-pds.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0005-glitched-pds.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0005-v5.8_undead-pds099o.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0006-add-acs-overrides_iommu.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0007-v5.8-fsync.patch create mode 100644 linux58-rc-tkg/linux58-tkg-patches/0011-ZFS-fix.patch diff --git a/linux58-rc-tkg/PKGBUILD b/linux58-rc-tkg/PKGBUILD new file mode 100644 index 0000000..ed6217c --- /dev/null +++ b/linux58-rc-tkg/PKGBUILD @@ -0,0 +1,1120 @@ +# Based on the file created for Arch Linux by: +# Tobias Powalowski +# Thomas Baechler + +# Contributor: Tk-Glitch + +plain ' .---.` `.---.' +plain ' `/syhhhyso- -osyhhhys/`' +plain ' .syNMdhNNhss/``.---.``/sshNNhdMNys.' +plain ' +sdMh.`+MNsssssssssssssssNM+`.hMds+' +plain ' :syNNdhNNhssssssssssssssshNNhdNNys:' +plain ' /ssyhhhysssssssssssssssssyhhhyss/' +plain ' .ossssssssssssssssssssssssssssso.' +plain ' :sssssssssssssssssssssssssssssssss:' +plain ' /sssssssssssssssssssssssssssssssssss/' +plain ' :sssssssssssssoosssssssoosssssssssssss:' +plain ' osssssssssssssoosssssssoossssssssssssso' +plain ' osssssssssssyyyyhhhhhhhyyyyssssssssssso' +plain ' /yyyyyyhhdmmmmNNNNNNNNNNNmmmmdhhyyyyyy/' +plain ' smmmNNNNNNNNNNNNNNNNNNNNNNNNNNNNNmmms' +plain ' /dNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNd/' +plain ' `:sdNNNNNNNNNNNNNNNNNNNNNNNNNds:`' +plain ' `-+shdNNNNNNNNNNNNNNNdhs+-`' +plain ' `.-:///////:-.`' + +_where="$PWD" # track basedir as different Arch based distros are moving srcdir around + +cp "$_where"/linux58-tkg-patches/* "$_where" # copy patches inside the PKGBUILD's dir to preserve makepkg sourcing and md5sum checking +cp "$_where"/linux58-tkg-config/* "$_where" # copy config files and hooks inside the PKGBUILD's dir to preserve makepkg sourcing and md5sum checking + +source "$_where"/customization.cfg # load default configuration from file + +# Load external configuration file if present. Available variable values will overwrite customization.cfg ones. +if [ -e "$_EXT_CONFIG_PATH" ]; then + source "$_EXT_CONFIG_PATH" && msg2 "External configuration file $_EXT_CONFIG_PATH will be used to override customization.cfg values." && msg2 "" +fi + +if [ -z "$_OPTIPROFILE" ] && [ ! -e "$_where"/cpuschedset ]; then + # Prompt about optimized configurations. Available variable values will overwrite customization.cfg/external config ones. + plain "Do you want to use a predefined optimized profile?" + read -rp "`echo $' > 1.Custom\n 2.Ryzen Desktop (Performance)\n 3.Other Desktop (Performance)\nchoice[1-3?]: '`" _OPTIPROFILE; +fi +if [ "$_OPTIPROFILE" == "2" ]; then + source "$_where"/ryzen-desktop-profile.cfg && msg2 "Ryzen Desktop (Performance) profile will be used." && msg2 "" +elif [ "$_OPTIPROFILE" == "3" ]; then + source "$_where"/generic-desktop-profile.cfg && msg2 "Generic Desktop (Performance) profile will be used." && msg2 "" +fi + +# source cpuschedset early if present +if [ -e "$_where"/cpuschedset ]; then + source "$_where"/cpuschedset +fi + +# CPU SCHED selector +if [ -z "$_cpusched" ] && [ ! -e "$_where"/cpuschedset ]; then + plain "What CPU sched variant do you want to build/install?" + read -rp "`echo $' > 1.PDS\n 2.CFS\nchoice[1-2?]: '`" CONDITION; + if [ "$CONDITION" == "2" ]; then + echo "_cpusched=\"cfs\"" > "$_where"/cpuschedset + else + echo "_cpusched=\"pds\"" > "$_where"/cpuschedset + fi + if [ -n "$_custom_pkgbase" ]; then + echo "_custom_pkgbase=\"${_custom_pkgbase}\"" >> "$_where"/cpuschedset + fi +elif [ "$_cpusched" == "pds" ]; then + echo "_cpusched=\"pds\"" > "$_where"/cpuschedset +elif [ "$_cpusched" == "cfs" ]; then + echo "_cpusched=\"cfs\"" > "$_where"/cpuschedset +else + warning "Invalid selection - Falling back to CFS..." + echo "_cpusched=\"cfs\"" > "$_where"/cpuschedset +fi + +source "$_where"/cpuschedset + +_basever=58 +if [ -n "$_custom_pkgbase" ]; then + pkgbase="${_custom_pkgbase}" +else + pkgbase=linux"${_basever}"-tkg-"${_cpusched}" +fi +pkgname=("${pkgbase}" "${pkgbase}-headers") +_basekernel=5.8 +_sub=rc1 +pkgver="${_basekernel}"."${_sub}" +pkgrel=1 +pkgdesc='Linux-tkg' +arch=('x86_64') # no i686 in here +url="http://www.kernel.org/" +license=('GPL2') +makedepends=('xmlto' 'docbook-xsl' 'kmod' 'inetutils' 'bc' 'libelf' 'patchutils' 'flex' 'python-sphinx' 'python-sphinx_rtd_theme' 'graphviz' 'imagemagick' 'git') +optdepends=('schedtool') +options=('!strip') +source=("https://git.kernel.org/torvalds/t/linux-${_basekernel}-${_sub}.tar.gz" + 'config.x86_64' # stock Arch config + #'config_hardened.x86_64' # hardened Arch config + 90-cleanup.hook + cleanup + # ARCH Patches + 0001-add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by.patch + # TkG + 0002-clear-patches.patch + 0003-glitched-base.patch + 0003-glitched-cfs.patch + #0004-glitched-ondemand-muqss.patch + #0004-glitched-muqss.patch + #0004-5.8-ck1.patch + 0005-glitched-ondemand-pds.patch + 0005-glitched-pds.patch + 0005-v5.8_undead-pds099o.patch + 0006-add-acs-overrides_iommu.patch + 0007-v5.8-fsync.patch + #0008-5.8-bcachefs.patch + #0009-glitched-ondemand-bmq.patch + #0009-glitched-bmq.patch + #0009-bmq_v5.8-r0.patch + 0011-ZFS-fix.patch + #0012-linux-hardened.patch +) +sha256sums=('cc75371d1193f656bf02b7180908bbba8c0272c8316c1a90f81fbc97b77867bd' + '1bfe5ec855c8774f9cade253c0770d3691295ed03f4707abc76b0f29bb6fd67b' + '1e15fc2ef3fa770217ecc63a220e5df2ddbcf3295eb4a021171e7edd4c6cc898' + '66a03c246037451a77b4d448565b1d7e9368270c7d02872fbd0b5d024ed0a997' + 'f6383abef027fd9a430fd33415355e0df492cdc3c90e9938bf2d98f4f63b32e6' + 'd02bf5ca08fd610394b9d3a0c3b176d74af206f897dee826e5cbaec97bb4a4aa' + '3c30d6645680d4818240dcf61172777791533fea18d00d2208e9863a338f8555' + '7058e57fd68367b029adc77f2a82928f1433daaf02c8c279cb2d13556c8804d7' + '62496f9ca788996181ef145f96ad26291282fcc3fb95cdc04080dcf84365be33' + '7fd8e776209dac98627453fda754bdf9aff4a09f27cb0b3766d7983612eb3c74' + '8b84d006888c9f91c294ebff2741b250126a33dee8aeceea2ae12e6f714b806c' + '19661ec0d39f9663452b34433214c755179894528bf73a42f6ba52ccf572832a' + 'cd225e86d72eaf6c31ef3d7b20df397f4cc44ddd04389850691292cdf292b204' + '49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104') + +export KBUILD_BUILD_HOST=archlinux +export KBUILD_BUILD_USER=$pkgbase +export KBUILD_BUILD_TIMESTAMP="$(date -Ru${SOURCE_DATE_EPOCH:+d @$SOURCE_DATE_EPOCH})" + +user_patcher() { + # To patch the user because all your base are belong to us + local _patches=("$_where"/*."${_userpatch_ext}revert") + if [ ${#_patches[@]} -ge 2 ] || [ -e "${_patches}" ]; then + if [ "$_user_patches_no_confirm" != "true" ]; then + msg2 "Found ${#_patches[@]} 'to revert' userpatches for ${_userpatch_target}:" + printf '%s\n' "${_patches[@]}" + read -rp "Do you want to install it/them? - Be careful with that ;)"$'\n> N/y : ' _CONDITION; + fi + if [ "$_CONDITION" == "y" ] || [ "$_user_patches_no_confirm" == "true" ]; then + for _f in "${_patches[@]}"; do + if [ -e "${_f}" ]; then + msg2 "######################################################" + msg2 "" + msg2 "Reverting your own ${_userpatch_target} patch ${_f}" + msg2 "" + msg2 "######################################################" + patch -Np1 -R < "${_f}" + echo "Reverted your own patch ${_f}" >> "$_where"/last_build_config.log + fi + done + fi + fi + + _patches=("$_where"/*."${_userpatch_ext}patch") + if [ ${#_patches[@]} -ge 2 ] || [ -e "${_patches}" ]; then + if [ "$_user_patches_no_confirm" != "true" ]; then + msg2 "Found ${#_patches[@]} userpatches for ${_userpatch_target}:" + printf '%s\n' "${_patches[@]}" + read -rp "Do you want to install it/them? - Be careful with that ;)"$'\n> N/y : ' _CONDITION; + fi + if [ "$_CONDITION" == "y" ] || [ "$_user_patches_no_confirm" == "true" ]; then + for _f in "${_patches[@]}"; do + if [ -e "${_f}" ]; then + msg2 "######################################################" + msg2 "" + msg2 "Applying your own ${_userpatch_target} patch ${_f}" + msg2 "" + msg2 "######################################################" + patch -Np1 < "${_f}" + echo "Applied your own patch ${_f}" >> "$_where"/last_build_config.log + fi + done + fi + fi +} + +prepare() { + rm -rf $pkgdir # Nuke the entire pkg folder so it'll get regenerated clean on next build + + ln -s "${_where}/customization.cfg" "${srcdir}" # workaround + + cd "${srcdir}/linux-${_basekernel}-${_sub}" + + msg2 "Setting version..." + scripts/setlocalversion --save-scmversion + echo "-$pkgrel-tkg-${_cpusched}" > localversion.10-pkgrel + echo "" > localversion.20-pkgname + + # add upstream patch + #patch -p1 -i ../patch-"${pkgver}" + + # ARCH Patches + if [ "${_configfile}" == "config_hardened.x86_64" ] && [ "${_cpusched}" == "cfs" ]; then + msg2 "Using linux hardened patchset" + patch -Np1 -i ../0012-linux-hardened.patch + else + patch -Np1 -i ../0001-add-sysctl-to-disallow-unprivileged-CLONE_NEWUSER-by.patch + fi + + # TkG + patch -Np1 -i ../0002-clear-patches.patch + + patch -Np1 -i ../0003-glitched-base.patch + + if [ "${_cpusched}" == "MuQSS" ]; then + # MuQSS + patch -Np1 -i ../0004-5.8-ck1.patch + if [ "${_aggressive_ondemand}" == "true" ]; then + patch -Np1 -i ../0004-glitched-ondemand-muqss.patch + fi + patch -Np1 -i ../0004-glitched-muqss.patch + elif [ "${_cpusched}" == "pds" ]; then + # PDS-mq + patch -Np1 -i ../0005-v5.8_undead-pds099o.patch + if [ "${_aggressive_ondemand}" == "true" ]; then + patch -Np1 -i ../0005-glitched-ondemand-pds.patch + fi + patch -Np1 -i ../0005-glitched-pds.patch + elif [ "${_cpusched}" == "bmq" ]; then + # BMQ + patch -Np1 -i ../0009-bmq_v5.8-r0.patch + if [ "${_aggressive_ondemand}" == "true" ]; then + patch -Np1 -i ../0009-glitched-ondemand-bmq.patch + fi + patch -Np1 -i ../0009-glitched-bmq.patch + else + patch -Np1 -i ../0003-glitched-cfs.patch + fi + + if [ -z "${_configfile}" ]; then + _configfile="config.x86_64" + fi + + cat "${srcdir}/${_configfile}" > ./.config + + # Set some -tkg defaults + echo "# CONFIG_DYNAMIC_FAULT is not set" >> ./.config + sed -i -e 's/CONFIG_DEFAULT_FQ_CODEL=y/# CONFIG_DEFAULT_FQ_CODEL is not set/' ./.config + echo "CONFIG_DEFAULT_CAKE=y" >> ./.config + echo "CONFIG_NR_TTY_DEVICES=63" >> ./.config + echo "# CONFIG_NTP_PPS is not set" >> ./.config + sed -i -e 's/CONFIG_CRYPTO_LZ4=m/CONFIG_CRYPTO_LZ4=y/' ./.config + sed -i -e 's/CONFIG_CRYPTO_LZ4HC=m/CONFIG_CRYPTO_LZ4HC=y/' ./.config + sed -i -e 's/CONFIG_LZ4_COMPRESS=m/CONFIG_LZ4_COMPRESS=y/' ./.config + sed -i -e 's/CONFIG_LZ4HC_COMPRESS=m/CONFIG_LZ4HC_COMPRESS=y/' ./.config + sed -i -e 's/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y/# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set/' ./.config + sed -i -e 's/# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4=y/' ./.config + sed -i -e 's/CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo"/CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lz4"/' ./.config + #sed -i -e 's/CONFIG_RCU_BOOST_DELAY=500/CONFIG_RCU_BOOST_DELAY=0/' ./.config + sed -i -e 's/# CONFIG_CMDLINE_BOOL is not set/CONFIG_CMDLINE_BOOL=y/' ./.config + echo "CONFIG_CMDLINE=\"${_custom_commandline}\"" >> ./.config + echo "# CONFIG_CMDLINE_OVERRIDE is not set" >> ./.config + if [ "$_noccache" != "true" ] && pacman -Qq ccache &> /dev/null; then + sed -i -e 's/CONFIG_GCC_PLUGINS=y/# CONFIG_GCC_PLUGINS is not set/' ./.config + fi + + if [ "$_font_autoselect" != "false" ]; then + sed -i -e 's/CONFIG_FONT_TER16x32=y/# CONFIG_FONT_TER16x32 is not set\nCONFIG_FONT_AUTOSELECT=y/' ./.config + fi + + # Inject cpuopts options + echo "# CONFIG_MK8SSE3 is not set" >> ./.config + echo "# CONFIG_MK10 is not set" >> ./.config + echo "# CONFIG_MBARCELONA is not set" >> ./.config + echo "# CONFIG_MBOBCAT is not set" >> ./.config + echo "# CONFIG_MJAGUAR is not set" >> ./.config + echo "# CONFIG_MBULLDOZER is not set" >> ./.config + echo "# CONFIG_MPILEDRIVER is not set" >> ./.config + echo "# CONFIG_MSTEAMROLLER is not set" >> ./.config + echo "# CONFIG_MEXCAVATOR is not set" >> ./.config + echo "# CONFIG_MZEN is not set" >> ./.config + echo "# CONFIG_MZEN2 is not set" >> ./.config + echo "# CONFIG_MATOM is not set" >> ./.config + echo "# CONFIG_MNEHALEM is not set" >> ./.config + echo "# CONFIG_MWESTMERE is not set" >> ./.config + echo "# CONFIG_MSILVERMONT is not set" >> ./.config + echo "# CONFIG_MSANDYBRIDGE is not set" >> ./.config + echo "# CONFIG_MIVYBRIDGE is not set" >> ./.config + echo "# CONFIG_MHASWELL is not set" >> ./.config + echo "# CONFIG_MBROADWELL is not set" >> ./.config + echo "# CONFIG_MSKYLAKE is not set" >> ./.config + echo "# CONFIG_MSKYLAKEX is not set" >> ./.config + echo "# CONFIG_MCANNONLAKE is not set" >> ./.config + echo "# CONFIG_MICELAKE is not set" >> ./.config + echo "# CONFIG_MGOLDMONT is not set" >> ./.config + echo "# CONFIG_MGOLDMONTPLUS is not set" >> ./.config + echo "# CONFIG_MCASCADELAKE is not set" >> ./.config + + # Disable some debugging + if [ "${_debugdisable}" == "true" ]; then + sed -i -e 's/CONFIG_SLUB_DEBUG=y/# CONFIG_SLUB_DEBUG is not set/' ./.config + sed -i -e 's/CONFIG_PM_DEBUG=y/# CONFIG_PM_DEBUG is not set/' ./.config + sed -i -e 's/CONFIG_PM_ADVANCED_DEBUG=y/# CONFIG_PM_ADVANCED_DEBUG is not set/' ./.config + sed -i -e 's/CONFIG_PM_SLEEP_DEBUG=y/# CONFIG_PM_SLEEP_DEBUG is not set/' ./.config + sed -i -e 's/CONFIG_ACPI_DEBUG=y/# CONFIG_ACPI_DEBUG is not set/' ./.config + sed -i -e 's/CONFIG_SCHED_DEBUG=y/# CONFIG_SCHED_DEBUG is not set/' ./.config + sed -i -e 's/CONFIG_LATENCYTOP=y/# CONFIG_LATENCYTOP is not set/' ./.config + sed -i -e 's/CONFIG_DEBUG_PREEMPT=y/# CONFIG_DEBUG_PREEMPT is not set/' ./.config + fi + + if [ "${_cpusched}" == "MuQSS" ]; then + # MuQSS default config + echo "CONFIG_SCHED_MUQSS=y" >> ./.config + elif [ "${_cpusched}" == "pds" ]; then + # PDS default config + echo "CONFIG_SCHED_PDS=y" >> ./.config + elif [ "${_cpusched}" == "bmq" ]; then + # BMQ default config + echo "CONFIG_SCHED_BMQ=y" >> ./.config + fi + + if [ "${_cpusched}" == "MuQSS" ] || [ "${_cpusched}" == "pds" ] || [ "${_cpusched}" == "bmq" ]; then + # Disable CFS + sed -i -e 's/CONFIG_FAIR_GROUP_SCHED=y/# CONFIG_FAIR_GROUP_SCHED is not set/' ./.config + sed -i -e 's/CONFIG_CFS_BANDWIDTH=y/# CONFIG_CFS_BANDWIDTH is not set/' ./.config + # sched yield type + if [ -n "$_sched_yield_type" ]; then + CONDITION0="$_sched_yield_type" + else + plain "" + plain "CPU sched_yield_type - Choose what sort of yield sched_yield will perform." + plain "" + plain "For PDS and MuQSS:" + plain "0: No yield." + plain "1: Yield only to better priority/deadline tasks." + plain "2: Expire timeslice and recalculate deadline." + plain "" + plain "For BMQ (experimental) - No recommended value yet, so try for yourself x) :" + plain "0: No yield." + plain "1: Deboost and requeue task. (default)" + plain "2: Set rq skip task." + read -rp "`echo $'\n > 0. Recommended option for gaming on PDS and MuQSS - "tkg" default\n 1. Default, but can lead to stability issues on some platforms\n 2. Can be a good option with low rr_interval on MuQSS\n [0-2?]: '`" CONDITION0; + fi + if [ "$CONDITION0" == "1" ]; then + msg2 "Using default CPU sched yield type (1)" + elif [ "$CONDITION0" == "2" ]; then + sed -i -e 's/int sched_yield_type __read_mostly = 1;/int sched_yield_type __read_mostly = 2;/' ./kernel/sched/"${_cpusched}".c + else + sed -i -e 's/int sched_yield_type __read_mostly = 1;/int sched_yield_type __read_mostly = 0;/' ./kernel/sched/"${_cpusched}".c + fi + fi + + # Round Robin interval + if [ "${_cpusched}" == "MuQSS" ] || [ "${_cpusched}" == "pds" ] || [ "${_cpusched}" == "bmq" ]; then + if [ -n "$_rr_interval" ]; then + CONDITION1="$_rr_interval" + else + plain "" + plain "Round Robin interval is the longest duration two tasks with the same nice level will" + plain "be delayed for. When CPU time is requested by a task, it receives a time slice equal" + plain "to the rr_interval in addition to a virtual deadline. When using yield_type 2, a low" + plain "value can help offset the disadvantages of rescheduling a process that has yielded." + plain "" + plain "MuQSS default: 6ms" + plain "PDS default: 4ms" + plain "BMQ default: 2ms" + read -rp "`echo $'\n > 0.Keep defaults\n 1.2ms\n 2.4ms\n 3.6ms\n 4.8ms\n [0-4?]: '`" CONDITION1; + fi + if [ "$CONDITION1" == "1" ]; then + msg2 "Using 2ms rr_interval" + _rrvalue="2" + elif [ "$CONDITION1" == "2" ]; then + msg2 "Using 4ms rr_interval" + _rrvalue="4" + elif [ "$CONDITION1" == "3" ]; then + msg2 "Using 6ms rr_interval" + _rrvalue="6" + elif [ "$CONDITION1" == "4" ]; then + msg2 "Using 8ms rr_interval" + _rrvalue="8" + else + msg2 "Using default rr_interval" + _rrvalue="default" + fi + if [ "$_rrvalue" != "default" ]; then + if [ "${_cpusched}" == "MuQSS" ]; then + sed -i -e "s/int rr_interval __read_mostly = 6;/int rr_interval __read_mostly = ${_rrvalue};/" ./kernel/sched/"${_cpusched}".c + elif [ "${_cpusched}" == "pds" ]; then + sed -i -e "s/#define SCHED_DEFAULT_RR (4)/#define SCHED_DEFAULT_RR (${_rrvalue})/" ./kernel/sched/"${_cpusched}".c + elif [ "${_cpusched}" == "bmq" ]; then + sed -i -e "s/u64 sched_timeslice_ns __read_mostly = (4 * 1000 * 1000);/u64 sched_timeslice_ns __read_mostly = (${_rrvalue} * 1000 * 1000);/" ./kernel/sched/"${_cpusched}".c + fi + else + if [ "${_cpusched}" == "bmq" ]; then + sed -i -e "s/u64 sched_timeslice_ns __read_mostly = (4 * 1000 * 1000);/u64 sched_timeslice_ns __read_mostly = (2 * 1000 * 1000);/" ./kernel/sched/"${_cpusched}".c + fi + fi + fi + + # zenify + if [ "$_zenify" == "true" ]; then + echo "CONFIG_ZENIFY=y" >> ./.config + elif [ "$_zenify" == "false" ]; then + echo "# CONFIG_ZENIFY is not set" >> ./.config + fi + + # compiler optimization level + if [ "$_compileroptlevel" == "1" ]; then + echo "# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 is not set" >> ./.config + elif [ "$_compileroptlevel" == "2" ]; then + sed -i -e 's/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y/# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set/' ./.config + echo "CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y" >> ./.config + elif [ "$_compileroptlevel" == "3" ]; then + sed -i -e 's/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y/# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set/' ./.config + sed -i -e 's/# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set/CONFIG_CC_OPTIMIZE_FOR_SIZE=y/' ./.config + echo "# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3 is not set" >> ./.config + fi + + # cpu opt + if [ -n "$_processor_opt" ] && [ "$_processor_opt" != "native" ]; then + echo "# CONFIG_MNATIVE is not set" >> ./.config + fi + + if [ -n "$_processor_opt" ] && [ "$_processor_opt" != "generic" ]; then + sed -i -e 's/CONFIG_GENERIC_CPU=y/# CONFIG_GENERIC_CPU is not set/' ./.config + fi + + if [ "$_processor_opt" == "native" ]; then + echo "CONFIG_MNATIVE=y" >> ./.config + elif [ "$_processor_opt" == "k8" ]; then + sed -i -e 's/# CONFIG_MK8 is not set/CONFIG_MK8=y/' ./.config + elif [ "$_processor_opt" == "k8sse3" ]; then + sed -i -e 's/# CONFIG_MK8SSE3 is not set/CONFIG_MK8SSE3=y/' ./.config + elif [ "$_processor_opt" == "k10" ]; then + sed -i -e 's/# CONFIG_MK10 is not set/CONFIG_MK10=y/' ./.config + elif [ "$_processor_opt" == "barcelona" ]; then + sed -i -e 's/# CONFIG_MBARCELONA is not set/CONFIG_MBARCELONA=y/' ./.config + elif [ "$_processor_opt" == "bobcat" ]; then + sed -i -e 's/# CONFIG_MBOBCAT is not set/CONFIG_MBOBCAT=y/' ./.config + elif [ "$_processor_opt" == "jaguar" ]; then + sed -i -e 's/# CONFIG_MJAGUAR is not set/CONFIG_MJAGUAR=y/' ./.config + elif [ "$_processor_opt" == "bulldozer" ]; then + sed -i -e 's/# CONFIG_MBULLDOZER is not set/CONFIG_MBULLDOZER=y/' ./.config + elif [ "$_processor_opt" == "piledriver" ]; then + sed -i -e 's/# CONFIG_MPILEDRIVER is not set/CONFIG_MPILEDRIVER=y/' ./.config + elif [ "$_processor_opt" == "steamroller" ]; then + sed -i -e 's/# CONFIG_MSTEAMROLLER is not set/CONFIG_MSTEAMROLLER=y/' ./.config + elif [ "$_processor_opt" == "excavator" ]; then + sed -i -e 's/# CONFIG_MEXCAVATOR is not set/CONFIG_MEXCAVATOR=y/' ./.config + elif [ "$_processor_opt" == "zen" ]; then + sed -i -e 's/# CONFIG_MZEN is not set/CONFIG_MZEN=y/' ./.config + elif [ "$_processor_opt" == "zen2" ]; then + sed -i -e 's/# CONFIG_MZEN2 is not set/CONFIG_MZEN2=y/' ./.config + elif [ "$_processor_opt" == "mpsc" ]; then + sed -i -e 's/# CONFIG_MPSC is not set/CONFIG_MPSC=y/' ./.config + elif [ "$_processor_opt" == "atom" ]; then + sed -i -e 's/# CONFIG_MATOM is not set/CONFIG_MATOM=y/' ./.config + elif [ "$_processor_opt" == "core2" ]; then + sed -i -e 's/# CONFIG_MCORE2 is not set/CONFIG_MCORE2=y/' ./.config + elif [ "$_processor_opt" == "nehalem" ]; then + sed -i -e 's/# CONFIG_MNEHALEM is not set/CONFIG_MNEHALEM=y/' ./.config + elif [ "$_processor_opt" == "westmere" ]; then + sed -i -e 's/# CONFIG_MWESTMERE is not set/CONFIG_MWESTMERE=y/' ./.config + elif [ "$_processor_opt" == "silvermont" ]; then + sed -i -e 's/# CONFIG_MSILVERMONT is not set/CONFIG_MSILVERMONT=y/' ./.config + elif [ "$_processor_opt" == "sandybridge" ]; then + sed -i -e 's/# CONFIG_MSANDYBRIDGE is not set/CONFIG_MSANDYBRIDGE=y/' ./.config + elif [ "$_processor_opt" == "ivybridge" ]; then + sed -i -e 's/# CONFIG_MIVYBRIDGE is not set/CONFIG_MIVYBRIDGE=y/' ./.config + elif [ "$_processor_opt" == "haswell" ]; then + sed -i -e 's/# CONFIG_MHASWELL is not set/CONFIG_MHASWELL=y/' ./.config + elif [ "$_processor_opt" == "broadwell" ]; then + sed -i -e 's/# CONFIG_MBROADWELL is not set/CONFIG_MBROADWELL=y/' ./.config + elif [ "$_processor_opt" == "skylake" ]; then + sed -i -e 's/# CONFIG_MSKYLAKE is not set/CONFIG_MSKYLAKE=y/' ./.config + elif [ "$_processor_opt" == "skylakex" ]; then + sed -i -e 's/# CONFIG_MSKYLAKEX is not set/CONFIG_MSKYLAKEX=y/' ./.config + elif [ "$_processor_opt" == "cannonlake" ]; then + sed -i -e 's/# CONFIG_MCANNONLAKE is not set/CONFIG_MCANNONLAKE=y/' ./.config + elif [ "$_processor_opt" == "icelake" ]; then + sed -i -e 's/# CONFIG_MICELAKE is not set/CONFIG_MICELAKE=y/' ./.config + elif [ "$_processor_opt" == "goldmont" ]; then + sed -i -e 's/# CONFIG_MGOLDMONT is not set/CONFIG_MGOLDMONT=y/' ./.config + elif [ "$_processor_opt" == "goldmontplus" ]; then + sed -i -e 's/# CONFIG_MGOLDMONTPLUS is not set/CONFIG_MGOLDMONTPLUS=y/' ./.config + elif [ "$_processor_opt" == "cascadelake" ]; then + sed -i -e 's/# CONFIG_MCASCADELAKE is not set/CONFIG_MCASCADELAKE=y/' ./.config + fi + + # irq threading + if [ "$_irq_threading" == "true" ]; then + echo "CONFIG_FORCE_IRQ_THREADING=y" >> ./.config + elif [ "$_irq_threading" == "false" ]; then + echo "# CONFIG_FORCE_IRQ_THREADING is not set" >> ./.config + fi + + # smt nice + if [ "$_smt_nice" == "true" ]; then + echo "CONFIG_SMT_NICE=y" >> ./.config + elif [ "$_smt_nice" == "false" ]; then + echo "# CONFIG_SMT_NICE is not set" >> ./.config + fi + + # random trust cpu + if [ "$_random_trust_cpu" == "true" ]; then + sed -i -e 's/# CONFIG_RANDOM_TRUST_CPU is not set/CONFIG_RANDOM_TRUST_CPU=y/' ./.config + fi + + # rq sharing + if [ "$_runqueue_sharing" == "none" ]; then + echo -e "CONFIG_RQ_NONE=y\n# CONFIG_RQ_SMT is not set\n# CONFIG_RQ_MC is not set\n# CONFIG_RQ_MC_LLC is not set\n# CONFIG_RQ_SMP is not set\n# CONFIG_RQ_ALL is not set" >> ./.config + elif [ -z "$_runqueue_sharing" ] || [ "$_runqueue_sharing" == "smt" ]; then + echo -e "# CONFIG_RQ_NONE is not set\nCONFIG_RQ_SMT=y\n# CONFIG_RQ_MC is not set\n# CONFIG_RQ_MC_LLC is not set\n# CONFIG_RQ_SMP is not set\n# CONFIG_RQ_ALL is not set" >> ./.config + elif [ "$_runqueue_sharing" == "mc" ]; then + echo -e "# CONFIG_RQ_NONE is not set\n# CONFIG_RQ_SMT is not set\nCONFIG_RQ_MC=y\n# CONFIG_RQ_MC_LLC is not set\n# CONFIG_RQ_SMP is not set\n# CONFIG_RQ_ALL is not set" >> ./.config + elif [ "$_runqueue_sharing" == "smp" ]; then + echo -e "# CONFIG_RQ_NONE is not set\n# CONFIG_RQ_SMT is not set\n# CONFIG_RQ_MC is not set\n# CONFIG_RQ_MC_LLC is not set\nCONFIG_RQ_SMP=y\n# CONFIG_RQ_ALL is not set" >> ./.config + elif [ "$_runqueue_sharing" == "all" ]; then + echo -e "# CONFIG_RQ_NONE is not set\n# CONFIG_RQ_SMT is not set\n# CONFIG_RQ_MC is not set\n# CONFIG_RQ_MC_LLC is not set\n# CONFIG_RQ_SMP is not set\nCONFIG_RQ_ALL=y" >> ./.config + elif [ "$_runqueue_sharing" == "mc-llc" ]; then + echo -e "# CONFIG_RQ_NONE is not set\n# CONFIG_RQ_SMT is not set\n# CONFIG_RQ_MC is not set\nCONFIG_RQ_MC_LLC=y\n# CONFIG_RQ_SMP is not set\n# CONFIG_RQ_ALL is not set" >> ./.config + fi + + # timer freq + if [ -n "$_timer_freq" ] && [ "$_timer_freq" != "300" ]; then + sed -i -e 's/CONFIG_HZ_300=y/# CONFIG_HZ_300 is not set/' ./.config + sed -i -e 's/CONFIG_HZ_300_NODEF=y/# CONFIG_HZ_300_NODEF is not set/' ./.config + if [ "$_timer_freq" == "1000" ]; then + sed -i -e 's/# CONFIG_HZ_1000 is not set/CONFIG_HZ_1000=y/' ./.config + sed -i -e 's/CONFIG_HZ=300/CONFIG_HZ=1000/' ./.config + echo "# CONFIG_HZ_500 is not set" >> ./.config + echo "# CONFIG_HZ_500_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_750 is not set" >> ./.config + echo "# CONFIG_HZ_750_NODEF is not set" >> ./.config + echo "CONFIG_HZ_1000_NODEF=y" >> ./.config + echo "# CONFIG_HZ_250_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_300_NODEF is not set" >> ./.config + elif [ "$_timer_freq" == "750" ]; then + sed -i -e 's/CONFIG_HZ=300/CONFIG_HZ=750/' ./.config + echo "# CONFIG_HZ_500 is not set" >> ./.config + echo "# CONFIG_HZ_500_NODEF is not set" >> ./.config + echo "CONFIG_HZ_750=y" >> ./.config + echo "CONFIG_HZ_750_NODEF=y" >> ./.config + echo "# CONFIG_HZ_1000_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_250_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_300_NODEF is not set" >> ./.config + elif [ "$_timer_freq" == "500" ]; then + sed -i -e 's/CONFIG_HZ=300/CONFIG_HZ=500/' ./.config + echo "CONFIG_HZ_500=y" >> ./.config + echo "CONFIG_HZ_500_NODEF=y" >> ./.config + echo "# CONFIG_HZ_750 is not set" >> ./.config + echo "# CONFIG_HZ_750_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_1000_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_250_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_300_NODEF is not set" >> ./.config + elif [ "$_timer_freq" == "100" ]; then + sed -i -e 's/CONFIG_HZ=300/CONFIG_HZ=100/' ./.config + echo "# CONFIG_HZ_500 is not set" >> ./.config + echo "# CONFIG_HZ_750 is not set" >> ./.config + echo "# CONFIG_HZ_1000_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_750_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_500_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_250_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_300_NODEF is not set" >> ./.config + echo "CONFIG_HZ_100=y" >> ./.config + echo "CONFIG_HZ_100_NODEF=y" >> ./.config + fi + elif [ "${_cpusched}" == "MuQSS" ] && [ -z "$_timer_freq" ]; then + sed -i -e 's/CONFIG_HZ=300/CONFIG_HZ=100/' ./.config + echo "# CONFIG_HZ_500 is not set" >> ./.config + echo "# CONFIG_HZ_750 is not set" >> ./.config + echo "# CONFIG_HZ_1000_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_750_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_500_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_250_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_300_NODEF is not set" >> ./.config + echo "CONFIG_HZ_100=y" >> ./.config + echo "CONFIG_HZ_100_NODEF=y" >> ./.config + else + sed -i -e 's/CONFIG_HZ_300=y/# CONFIG_HZ_300 is not set/' ./.config + sed -i -e 's/CONFIG_HZ_300_NODEF=y/# CONFIG_HZ_300_NODEF is not set/' ./.config + sed -i -e 's/CONFIG_HZ=300/CONFIG_HZ=500/' ./.config + echo "CONFIG_HZ_500=y" >> ./.config + echo "CONFIG_HZ_500_NODEF=y" >> ./.config + echo "# CONFIG_HZ_250_NODEF is not set" >> ./.config + echo "# CONFIG_HZ_300_NODEF is not set" >> ./.config + fi + + # default cpu gov + if [ "$_default_cpu_gov" == "performance" ]; then + sed -i -e 's/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y/# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set/' ./.config + sed -i -e 's/# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y/' ./.config + elif [ "$_default_cpu_gov" == "ondemand" ]; then + sed -i -e 's/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y/# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set/' ./.config + sed -i -e 's/# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y/' ./.config + fi + + # ACPI_CPUFREQ disablement + if [ "$_disable_acpi_cpufreq" == "true" ]; then + sed -i -e 's/CONFIG_X86_ACPI_CPUFREQ=m/# CONFIG_X86_ACPI_CPUFREQ is not set/' ./.config + fi + + # ftrace + if [ -z "$_ftracedisable" ]; then + plain "" + plain "Disable FUNCTION_TRACER/GRAPH_TRACER? Lowers overhead but limits debugging" + plain "and analyzing of kernel functions." + read -rp "`echo $' > N/y : '`" CONDITION2; + fi + if [ "$CONDITION2" == "y" ] || [ "$_ftracedisable" == "true" ]; then + sed -i -e 's/CONFIG_FUNCTION_TRACER=y/# CONFIG_FUNCTION_TRACER is not set/' ./.config + sed -i -e 's/CONFIG_FUNCTION_GRAPH_TRACER=y/# CONFIG_FUNCTION_GRAPH_TRACER is not set/' ./.config + fi + + # disable numa + if [ -z "$_numadisable" ]; then + plain "" + plain "Disable NUMA? Lowers overhead, but breaks CUDA/NvEnc on Nvidia if disabled." + plain "https://bbs.archlinux.org/viewtopic.php?id=239174" + read -rp "`echo $' > N/y : '`" CONDITION3; + fi + if [ "$CONDITION3" == "y" ] || [ "$_numadisable" == "true" ]; then + # disable NUMA since 99.9% of users do not have multiple CPUs but do have multiple cores in one CPU + sed -i -e 's/CONFIG_NUMA=y/# CONFIG_NUMA is not set/' \ + -i -e '/CONFIG_AMD_NUMA=y/d' \ + -i -e '/CONFIG_X86_64_ACPI_NUMA=y/d' \ + -i -e '/CONFIG_NODES_SPAN_OTHER_NODES=y/d' \ + -i -e '/# CONFIG_NUMA_EMU is not set/d' \ + -i -e '/CONFIG_NODES_SHIFT=6/d' \ + -i -e '/CONFIG_NEED_MULTIPLE_NODES=y/d' \ + -i -e '/CONFIG_USE_PERCPU_NUMA_NODE_ID=y/d' \ + -i -e '/CONFIG_ACPI_NUMA=y/d' ./.config + fi + + # tickless + if [ -z "$_tickless" ]; then + plain "" + plain "Use CattaRappa mode (Tickless/Dynticks) ?" + plain "Can give higher performances in many cases but lower consistency on some hardware." + plain "Just tickless idle can perform better with some platforms (mostly AMD) or CPU schedulers (mostly MuQSS)." + if [ "${_cpusched}" == "MuQSS" ]; then + read -rp "`echo $'\n 0.No, use periodic ticks\n 1.Yes, full tickless baby!\n > 2.Just tickless idle plz\n [0-2?]: '`" CONDITION4; + else + read -rp "`echo $'\n 0.No, use periodic ticks\n > 1.Yes, full tickless baby!\n 2.Just tickless idle plz\n [0-2?]: '`" CONDITION4; + fi + fi + if [ "$CONDITION4" == "0" ] || [ "$_tickless" == "0" ]; then + echo "# CONFIG_NO_HZ_FULL_NODEF is not set" >> ./.config + sed -i -e 's/# CONFIG_HZ_PERIODIC is not set/CONFIG_HZ_PERIODIC=y/' ./.config + sed -i -e 's/CONFIG_NO_HZ_IDLE=y/# CONFIG_NO_HZ_IDLE is not set/' ./.config + sed -i -e 's/CONFIG_NO_HZ_FULL=y/# CONFIG_NO_HZ_FULL is not set/' ./.config + sed -i -e 's/CONFIG_NO_HZ=y/# CONFIG_NO_HZ is not set/' ./.config + sed -i -e 's/CONFIG_NO_HZ_COMMON=y/# CONFIG_NO_HZ_COMMON is not set/' ./.config + elif [ "$CONDITION4" == "2" ] || [ "$_tickless" == "2" ]; then + echo "# CONFIG_NO_HZ_FULL_NODEF is not set" >> ./.config + sed -i -e 's/CONFIG_HZ_PERIODIC=y/# CONFIG_HZ_PERIODIC is not set/' ./.config + sed -i -e 's/# CONFIG_NO_HZ_IDLE is not set/CONFIG_NO_HZ_IDLE=y/' ./.config + sed -i -e 's/CONFIG_NO_HZ_FULL=y/# CONFIG_NO_HZ_FULL is not set/' ./.config + sed -i -e 's/# CONFIG_NO_HZ is not set/CONFIG_NO_HZ=y/' ./.config + sed -i -e 's/# CONFIG_NO_HZ_COMMON is not set/CONFIG_NO_HZ_COMMON=y/' ./.config + else + if [ "${_cpusched}" == "MuQSS" ]; then + echo "# CONFIG_NO_HZ_FULL_NODEF is not set" >> ./.config + sed -i -e 's/CONFIG_HZ_PERIODIC=y/# CONFIG_HZ_PERIODIC is not set/' ./.config + sed -i -e 's/# CONFIG_NO_HZ_IDLE is not set/CONFIG_NO_HZ_IDLE=y/' ./.config + sed -i -e 's/CONFIG_NO_HZ_FULL=y/# CONFIG_NO_HZ_FULL is not set/' ./.config + sed -i -e 's/# CONFIG_NO_HZ is not set/CONFIG_NO_HZ=y/' ./.config + sed -i -e 's/# CONFIG_NO_HZ_COMMON is not set/CONFIG_NO_HZ_COMMON=y/' ./.config + else + echo "CONFIG_NO_HZ_FULL_NODEF=y" >> ./.config + sed -i -e 's/CONFIG_HZ_PERIODIC=y/# CONFIG_HZ_PERIODIC is not set/' ./.config + sed -i -e 's/CONFIG_NO_HZ_IDLE=y/# CONFIG_NO_HZ_IDLE is not set/' ./.config + sed -i -e 's/# CONFIG_NO_HZ_FULL is not set/CONFIG_NO_HZ_FULL=y/' ./.config + sed -i -e 's/# CONFIG_NO_HZ is not set/CONFIG_NO_HZ=y/' ./.config + sed -i -e 's/# CONFIG_NO_HZ_COMMON is not set/CONFIG_NO_HZ_COMMON=y/' ./.config + echo "CONFIG_CONTEXT_TRACKING=y" >> ./.config + echo "# CONFIG_CONTEXT_TRACKING_FORCE is not set" >> ./.config + fi + fi + + # voluntary preempt + if [ -z "$_voluntary_preempt" ]; then + plain "" + plain "Use explicit preemption points?" + plain "It can improve latency on PDS (at the cost of throughput)" + plain "and improve throughput on other schedulers (at the cost of latency)" + read -rp "`echo $' > N/y : '`" CONDITION5; + fi + if [ "$CONDITION5" == "y" ] || [ "$_voluntary_preempt" == "true" ]; then + sed -i -e 's/CONFIG_PREEMPT=y/# CONFIG_PREEMPT is not set/' ./.config + sed -i -e 's/CONFIG_PREEMPT_LL=y/# CONFIG_PREEMPT_LL is not set/' ./.config + sed -i -e 's/# CONFIG_PREEMPT_VOLUNTARY is not set/CONFIG_PREEMPT_VOLUNTARY=y/' ./.config + fi + + # Open Firmware support + if [ -z "$_OFenable" ]; then + plain "" + plain "Enable Device Tree and Open Firmware support?" + read -rp "`echo $' > N/y : '`" CONDITION6; + fi + if [ "$CONDITION6" == "y" ] || [ "$_OFenable" == "true" ]; then + sed -i -e 's/# CONFIG_OF is not set/CONFIG_OF=y/' ./.config + fi + + # acs override + if [ -z "$_acs_override" ]; then + plain "" + plain "Use ACS override patch?" + plain "https://wiki.archlinux.org/index.php/PCI_passthrough_via_OVMF#Bypassing_the_IOMMU_groups_.28ACS_override_patch.29" + read -rp "`echo $' > N/y : '`" CONDITION7; + fi + if [ "$CONDITION7" == "y" ] || [ "$_acs_override" == "true" ]; then + patch -Np1 -i ../0006-add-acs-overrides_iommu.patch + fi + + # bcachefs +# if [ -z "$_bcachefs" ]; then +# plain "" +# plain "Add Bcache filesystem support? You'll have to install bcachefs-tools-git from AUR for utilities." +# plain "https://bcachefs.org/" +# read -rp "`echo $' > N/y : '`" CONDITION8; +# fi +# if [ "$CONDITION8" == "y" ] || [ "$_bcachefs" == "true" ]; then +# patch -Np1 -i ../0008-5.8-bcachefs.patch +# echo "CONFIG_BCACHEFS_FS=m" >> ./.config +# echo "CONFIG_BCACHEFS_QUOTA=y" >> ./.config +# echo "CONFIG_BCACHEFS_POSIX_ACL=y" >> ./.config +# echo "# CONFIG_BCACHEFS_DEBUG is not set" >> ./.config +# echo "# CONFIG_BCACHEFS_TESTS is not set" >> ./.config +# echo "# CONFIG_DEBUG_CLOSURES is not set" >> ./.config +# fi + + # fsync support + if [ -z "$_fsync" ]; then + plain "" + plain "Enable support for fsync, an experimental replacement for esync in Valve Proton 4.11+" + plain "https://steamcommunity.com/games/221410/announcements/detail/2957094910196249305" + read -rp "`echo $' > N/y : '`" CONDITION9; + fi + if [ "$CONDITION9" == "y" ] || [ "$_fsync" == "true" ]; then + patch -Np1 -i ../0007-v5.8-fsync.patch + fi + + # ZFS fix + if [ -z "$_zfsfix" ]; then + plain "" + plain "Add back missing symbol for AES-NI/AVX support on ZFS" + plain "https://github.com/NixOS/nixpkgs/blob/master/pkgs/os-specific/linux/kernel/export_kernel_fpu_functions_5_3.patch" + read -rp "`echo $' > N/y : '`" CONDITION11; + fi + if [ "$CONDITION11" == "y" ] || [ "$_zfsfix" == "true" ]; then + patch -Np1 -i ../0011-ZFS-fix.patch + fi + + # Community patches + if [ -n "$_community_patches" ]; then + if [ ! -d "$_where/../../community-patches" ]; then + cd "$_where/../.." && git clone https://github.com/Frogging-Family/community-patches.git && cd "${srcdir}/linux-${_basekernel}-${_sub}" + fi + _community_patches=($_community_patches) + for _p in ${_community_patches[@]}; do + ln -s "$_where"/../../community-patches/linux58-tkg/$_p "$_where"/ + done + fi + + # userpatches + if [ "$_user_patches" == "true" ]; then + _userpatch_target="linux-${_basekernel}" + _userpatch_ext="my" + user_patcher + fi + + # Community patches removal + for _p in ${_community_patches[@]}; do + rm -f "$_where"/$_p + done + + # don't run depmod on 'make install'. We'll do this ourselves in packaging + sed -i '2iexit 0' scripts/depmod.sh + + # get kernel version + make prepare + + # modprobed-db + if [ -z "$_modprobeddb" ]; then + plain "" + plain "Use modprobed db to clean config from unneeded modules?" + plain "Speeds up compilation considerably. Requires root." + plain "https://wiki.archlinux.org/index.php/Modprobed-db" + plain "!!!! Make sure to have a well populated db !!!!" + read -rp "`echo $' > N/y : '`" CONDITIONMPDB; + fi + if [ "$CONDITIONMPDB" == "y" ] || [ "$_modprobeddb" == "true" ]; then + sudo modprobed-db recall + make localmodconfig + fi + + if [ true = "$_config_fragments" ]; then + local fragments=() + mapfile -d '' -t fragments < <(find "$_where" -type f -name "*.myfrag" -print0) + + if [ true = "$_config_fragments_no_confirm" ]; then + printf 'Using config fragment %s\n' "${fragments[@]#$_where/}" + else + for i in "${!fragments[@]}"; do + while true; do + read -r -p 'Found config fragment '"${fragments[$i]#$_where/}"', apply it? [y/N] ' CONDITIONMPDB + CONDITIONMPDB="$(printf '%s' "$CONDITIONMPDB" | tr '[:upper:]' '[:lower:]')" + case "$CONDITIONMPDB" in + y|yes) + break;; + n|no|'') + unset fragments[$i] + break;; + *) + echo 'Please answer with yes or no' + esac + done + done + fi + + if [ 0 -lt "${#fragments[@]}" ]; then + scripts/kconfig/merge_config.sh -m .config "${fragments[@]}" + fi + fi + + # menuconfig / nconfig + if [ -z "$_menunconfig" ]; then + plain "" + plain "*Optional* For advanced users - Do you want to use make menuconfig or nconfig" + plain "to configure the kernel before building it?" + plain "If you do, make sure your terminal is currently" + plain "at least 19 lines by 80 columns large or you'll get an error :D" + read -rp "`echo $' > 0. nope\n 1. menuconfig\n 2. nconfig\n choice[0-2?]: '`" CONDITIONMNC; + _menunconfig="$CONDITIONMNC" + fi + if [ 1 = "$_menunconfig" ]; then + cp .config .config.orig + make menuconfig + elif [ 2 = "$_menunconfig" ]; then + cp .config .config.orig + make nconfig + else + # rewrite configuration + yes "" | make config >/dev/null + fi + if [ 1 = "$_menunconfig" ] || [ 2 = "$_menunconfig" ]; then + if [ -z "${_diffconfig}" ]; then + while true; do + read -r -p 'Generate a config fragment from your changes? [y/N] ' CONDITIONF + CONDITIONF="$(printf '%s' "$CONDITIONF" | tr '[:upper:]' '[:lower:]')" + case "$CONDITIONF" in + y|yes) + _diffconfig=true + break;; + n|no|'') + _diffconfig=false + break;; + *) + echo 'Please answer with yes or no' + esac + done + fi + if [ true = "$_diffconfig" ]; then + if [ -z "$_diffconfig_name" ]; then + IFS= read -r -p 'Filename for the config fragment [leave empty to not generate fragment]: ' _diffconfig_name + fi + if [ -z "$_diffconfig_name" ]; then + echo 'No file name given, not generating config fragment.' + else ( + prev_pwd="${PWD:-$(pwd)}" + cd "$_where" + "${prev_pwd}/scripts/diffconfig" -m "${prev_pwd}/.config.orig" "${prev_pwd}/.config" > "$_diffconfig_name" + ) fi + fi + rm .config.orig + fi + + make -s kernelrelease > version + msg2 "Prepared %s version %s" "$pkgbase" "$( /dev/null; then + export PATH="/usr/lib/ccache/bin/:$PATH" + export CCACHE_SLOPPINESS="file_macro,locale,time_macros" + export CCACHE_NOHASHDIR="true" + msg2 'ccache was found and will be used' + fi + + # build! + _runtime=$( time ( schedtool -B -n 1 -e ionice -n 1 make ${_force_all_threads} LOCALVERSION= bzImage modules 2>&1 ) 3>&1 1>&2 2>&3 ) || _runtime=$( time ( make ${_force_all_threads} LOCALVERSION= bzImage modules 2>&1 ) 3>&1 1>&2 2>&3 ) +} + +hackbase() { + pkgdesc="The $pkgdesc kernel and modules" + depends=('coreutils' 'kmod' 'initramfs') + optdepends=('linux-docs: Kernel hackers manual - HTML documentation that comes with the Linux kernel.' + 'crda: to set the correct wireless channels of your country.' + 'linux-firmware: Firmware files for Linux' + 'modprobed-db: Keeps track of EVERY kernel module that has ever been probed. Useful for make localmodconfig.' + 'nvidia-tkg: NVIDIA drivers for all installed kernels - non-dkms version.' + 'nvidia-dkms-tkg: NVIDIA drivers for all installed kernels - dkms version.' + 'update-grub: Simple wrapper around grub-mkconfig.') + provides=("linux=${pkgver}" "${pkgbase}" VIRTUALBOX-GUEST-MODULES WIREGUARD-MODULE) + replaces=(virtualbox-guest-modules-arch wireguard-arch) + + cd "${srcdir}/linux-${_basekernel}-${_sub}" + + # get kernel version + local _kernver="$( +From: Serge Hallyn +Date: Fri, 31 May 2013 19:12:12 +0100 +Subject: [PATCH] add sysctl to disallow unprivileged CLONE_NEWUSER by default + +Signed-off-by: Serge Hallyn +[bwh: Remove unneeded binary sysctl bits] +Signed-off-by: Daniel Micay +--- + kernel/fork.c | 15 +++++++++++++++ + kernel/sysctl.c | 12 ++++++++++++ + kernel/user_namespace.c | 3 +++ + 3 files changed, 30 insertions(+) + +diff --git a/kernel/fork.c b/kernel/fork.c +index 07cc743698d3668e..4011d68a8ff9305c 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -102,6 +102,11 @@ + + #define CREATE_TRACE_POINTS + #include ++#ifdef CONFIG_USER_NS ++extern int unprivileged_userns_clone; ++#else ++#define unprivileged_userns_clone 0 ++#endif + + /* + * Minimum number of threads to boot the kernel +@@ -1555,6 +1560,10 @@ static __latent_entropy struct task_struct *copy_process( + if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) + return ERR_PTR(-EINVAL); + ++ if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) ++ if (!capable(CAP_SYS_ADMIN)) ++ return ERR_PTR(-EPERM); ++ + /* + * Thread groups must share signals as well, and detached threads + * can only be started up within the thread group. +@@ -2348,6 +2357,12 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) + if (unshare_flags & CLONE_NEWNS) + unshare_flags |= CLONE_FS; + ++ if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { ++ err = -EPERM; ++ if (!capable(CAP_SYS_ADMIN)) ++ goto bad_unshare_out; ++ } ++ + err = check_unshare_flags(unshare_flags); + if (err) + goto bad_unshare_out; +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index b86520ed3fb60fbf..f7dab3760839f1a1 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -105,6 +105,9 @@ extern int core_uses_pid; + + #if defined(CONFIG_SYSCTL) + ++#ifdef CONFIG_USER_NS ++extern int unprivileged_userns_clone; ++#endif + /* Constants used for minimum and maximum */ + #ifdef CONFIG_LOCKUP_DETECTOR + static int sixty = 60; +@@ -513,6 +516,15 @@ static struct ctl_table kern_table[] = { + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_USER_NS ++ { ++ .procname = "unprivileged_userns_clone", ++ .data = &unprivileged_userns_clone, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec, ++ }, ++#endif + #ifdef CONFIG_PROC_SYSCTL + { + .procname = "tainted", +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index c490f1e4313b998a..dd03bd39d7bf194d 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -24,6 +24,9 @@ + #include + #include + ++/* sysctl */ ++int unprivileged_userns_clone; ++ + static struct kmem_cache *user_ns_cachep __read_mostly; + static DEFINE_MUTEX(userns_state_mutex); + +-- +2.15.1 + +From b5202296055dd333db4425120d3f93ef4e6a0573 Mon Sep 17 00:00:00 2001 +From: "Jan Alexander Steffens (heftig)" +Date: Thu, 7 Dec 2017 13:50:48 +0100 +Subject: ZEN: Add CONFIG for unprivileged_userns_clone + +This way our default behavior continues to match the vanilla kernel. +--- + init/Kconfig | 16 ++++++++++++++++ + kernel/user_namespace.c | 4 ++++ + 2 files changed, 20 insertions(+) + +diff --git a/init/Kconfig b/init/Kconfig +index 4592bf7997c0..f3df02990aff 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1004,6 +1004,22 @@ config USER_NS + + If unsure, say N. + ++config USER_NS_UNPRIVILEGED ++ bool "Allow unprivileged users to create namespaces" ++ default y ++ depends on USER_NS ++ help ++ When disabled, unprivileged users will not be able to create ++ new namespaces. Allowing users to create their own namespaces ++ has been part of several recent local privilege escalation ++ exploits, so if you need user namespaces but are ++ paranoid^Wsecurity-conscious you want to disable this. ++ ++ This setting can be overridden at runtime via the ++ kernel.unprivileged_userns_clone sysctl. ++ ++ If unsure, say Y. ++ + config PID_NS + bool "PID Namespaces" + default y +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index 6b9dbc257e34..107b17f0d528 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -27,7 +27,11 @@ + #include + + /* sysctl */ ++#ifdef CONFIG_USER_NS_UNPRIVILEGED ++int unprivileged_userns_clone = 1; ++#else + int unprivileged_userns_clone; ++#endif + + static struct kmem_cache *user_ns_cachep __read_mostly; + static DEFINE_MUTEX(userns_state_mutex); diff --git a/linux58-rc-tkg/linux58-tkg-patches/0002-clear-patches.patch b/linux58-rc-tkg/linux58-tkg-patches/0002-clear-patches.patch new file mode 100644 index 0000000..a7c9d4a --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0002-clear-patches.patch @@ -0,0 +1,354 @@ +From 2ac70785613ef4c6b16414986bb18bd7b60d2a13 Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Mon, 14 Mar 2016 11:10:58 -0600 +Subject: [PATCH] pci pme wakeups + +Reduce wakeups for PME checks, which are a workaround for miswired +boards (sadly, too many of them) in laptops. +--- + drivers/pci/pci.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index c25acace7d91..0ddebdad9f5b 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -61,7 +61,7 @@ struct pci_pme_device { + struct pci_dev *dev; + }; + +-#define PME_TIMEOUT 1000 /* How long between PME checks */ ++#define PME_TIMEOUT 4000 /* How long between PME checks */ + + static void pci_dev_d3_sleep(struct pci_dev *dev) + { +-- +2.20.1 + +From 7e7e36c67aa71d6a1ec5676d99d37c1fea389ceb Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Sat, 19 Mar 2016 21:32:19 -0400 +Subject: [PATCH] intel_idle: tweak cpuidle cstates + +Increase target_residency in cpuidle cstate + +Tune intel_idle to be a bit less agressive; +Clear linux is cleaner in hygiene (wakupes) than the average linux, +so we can afford changing these in a way that increases +performance while keeping power efficiency +--- + drivers/idle/intel_idle.c | 44 +++++++++++++++++++-------------------- + 1 file changed, 22 insertions(+), 22 deletions(-) + +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c +index 8b5d85c91e9d..5e2d813a048d 100644 +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -466,7 +466,7 @@ static struct cpuidle_state hsw_cstates[] = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 120, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -474,7 +474,7 @@ static struct cpuidle_state hsw_cstates[] = { + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 33, +- .target_residency = 100, ++ .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -482,7 +482,7 @@ static struct cpuidle_state hsw_cstates[] = { + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, +- .target_residency = 400, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -490,7 +490,7 @@ static struct cpuidle_state hsw_cstates[] = { + .desc = "MWAIT 0x32", + .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 166, +- .target_residency = 500, ++ .target_residency = 1500, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -498,7 +498,7 @@ static struct cpuidle_state hsw_cstates[] = { + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, +- .target_residency = 900, ++ .target_residency = 2000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -506,7 +506,7 @@ static struct cpuidle_state hsw_cstates[] = { + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, +- .target_residency = 1800, ++ .target_residency = 5000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -514,7 +514,7 @@ static struct cpuidle_state hsw_cstates[] = { + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, +- .target_residency = 7700, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -534,7 +534,7 @@ static struct cpuidle_state bdw_cstates[] = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 120, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -542,7 +542,7 @@ static struct cpuidle_state bdw_cstates[] = { + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 40, +- .target_residency = 100, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -550,7 +550,7 @@ static struct cpuidle_state bdw_cstates[] = { + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, +- .target_residency = 400, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -558,7 +558,7 @@ static struct cpuidle_state bdw_cstates[] = { + .desc = "MWAIT 0x32", + .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 166, +- .target_residency = 500, ++ .target_residency = 2000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -566,7 +566,7 @@ static struct cpuidle_state bdw_cstates[] = { + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, +- .target_residency = 900, ++ .target_residency = 4000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -574,7 +574,7 @@ static struct cpuidle_state bdw_cstates[] = { + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, +- .target_residency = 1800, ++ .target_residency = 7000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -582,7 +582,7 @@ static struct cpuidle_state bdw_cstates[] = { + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, +- .target_residency = 7700, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -603,7 +603,7 @@ static struct cpuidle_state skl_cstates[] = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 120, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -611,7 +611,7 @@ static struct cpuidle_state skl_cstates[] = { + .desc = "MWAIT 0x10", + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 70, +- .target_residency = 100, ++ .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -619,7 +619,7 @@ static struct cpuidle_state skl_cstates[] = { + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 85, +- .target_residency = 200, ++ .target_residency = 600, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -627,7 +627,7 @@ static struct cpuidle_state skl_cstates[] = { + .desc = "MWAIT 0x33", + .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 124, +- .target_residency = 800, ++ .target_residency = 3000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -635,7 +635,7 @@ static struct cpuidle_state skl_cstates[] = { + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 200, +- .target_residency = 800, ++ .target_residency = 3200, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -643,7 +643,7 @@ static struct cpuidle_state skl_cstates[] = { + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 480, +- .target_residency = 5000, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -651,7 +651,7 @@ static struct cpuidle_state skl_cstates[] = { + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 890, +- .target_residency = 5000, ++ .target_residency = 9000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +@@ -672,7 +672,7 @@ static struct cpuidle_state skx_cstates[] = { + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, +- .target_residency = 20, ++ .target_residency = 300, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { +-- +2.20.1 + +From b8211d4f79dd88dfc2d4bd52be46103ea0b70e3e Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Fri, 6 Jan 2017 15:34:09 +0000 +Subject: [PATCH] ipv4/tcp: allow the memory tuning for tcp to go a little + bigger than default + +--- + net/ipv4/tcp.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index cf3c5095c10e..b30d51837b2d 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -3897,8 +3897,8 @@ void __init tcp_init(void) + tcp_init_mem(); + /* Set per-socket limits to no more than 1/128 the pressure threshold */ + limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); +- max_wshare = min(4UL*1024*1024, limit); +- max_rshare = min(6UL*1024*1024, limit); ++ max_wshare = min(16UL*1024*1024, limit); ++ max_rshare = min(16UL*1024*1024, limit); + + init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; + init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; +-- +2.20.1 + +From 050223869257b87e22636158a80da38d877248ed Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Sun, 18 Feb 2018 23:35:41 +0000 +Subject: [PATCH] locking: rwsem: spin faster + +tweak rwsem owner spinning a bit +--- + kernel/locking/rwsem.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c +index eef04551eae7..1ec5ab4c8ff7 100644 +--- a/kernel/locking/rwsem.c ++++ b/kernel/locking/rwsem.c +@@ -720,6 +720,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) + struct task_struct *new, *owner; + unsigned long flags, new_flags; + enum owner_state state; ++ int i = 0; + + owner = rwsem_owner_flags(sem, &flags); + state = rwsem_owner_state(owner, flags, nonspinnable); +@@ -753,7 +754,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) + break; + } + +- cpu_relax(); ++ if (i++ > 1000) ++ cpu_relax(); + } + rcu_read_unlock(); + +From b836ea320114643d4354b43acb6ec8bb06ada487 Mon Sep 17 00:00:00 2001 +From: Arjan van de Ven +Date: Thu, 2 Jun 2016 23:36:32 -0500 +Subject: [PATCH] drivers: Initialize ata before graphics + +ATA init is the long pole in the boot process, and its asynchronous. +move the graphics init after it so that ata and graphics initialize +in parallel +--- + drivers/Makefile | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/drivers/Makefile b/drivers/Makefile +index aaef17cc6512..d08f3a394929 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -58,15 +58,8 @@ obj-y += char/ + # iommu/ comes before gpu as gpu are using iommu controllers + obj-y += iommu/ + +-# gpu/ comes after char for AGP vs DRM startup and after iommu +-obj-y += gpu/ +- + obj-$(CONFIG_CONNECTOR) += connector/ + +-# i810fb and intelfb depend on char/agp/ +-obj-$(CONFIG_FB_I810) += video/fbdev/i810/ +-obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ +- + obj-$(CONFIG_PARPORT) += parport/ + obj-$(CONFIG_NVM) += lightnvm/ + obj-y += base/ block/ misc/ mfd/ nfc/ +@@ -79,6 +72,14 @@ obj-$(CONFIG_IDE) += ide/ + obj-y += scsi/ + obj-y += nvme/ + obj-$(CONFIG_ATA) += ata/ ++ ++# gpu/ comes after char for AGP vs DRM startup and after iommu ++obj-y += gpu/ ++ ++# i810fb and intelfb depend on char/agp/ ++obj-$(CONFIG_FB_I810) += video/fbdev/i810/ ++obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ ++ + obj-$(CONFIG_TARGET_CORE) += target/ + obj-$(CONFIG_MTD) += mtd/ + obj-$(CONFIG_SPI) += spi/ diff --git a/linux58-rc-tkg/linux58-tkg-patches/0003-glitched-base.patch b/linux58-rc-tkg/linux58-tkg-patches/0003-glitched-base.patch new file mode 100644 index 0000000..a83ceaf --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0003-glitched-base.patch @@ -0,0 +1,1446 @@ +From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Wed, 4 Jul 2018 04:30:08 +0200 +Subject: glitched + +diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h +index 87f1fc9..b3be470 100755 +--- a/scripts/mkcompile_h ++++ b/scripts/mkcompile_h +@@ -50,8 +50,8 @@ else + fi + + UTS_VERSION="#$VERSION" +-CONFIG_FLAGS="" +-if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi ++CONFIG_FLAGS="TKG" ++if [ -n "$SMP" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi + UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" + +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu +index af9c967782f6..bf07a8c0f495 100644 +--- a/arch/x86/Kconfig.cpu ++++ b/arch/x86/Kconfig.cpu +@@ -123,6 +123,7 @@ config MPENTIUMM + config MPENTIUM4 + bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" + depends on X86_32 ++ select X86_P6_NOP + help + Select this for Intel Pentium 4 chips. This includes the + Pentium 4, Pentium D, P4-based Celeron and Xeon, and +@@ -155,9 +156,8 @@ config MPENTIUM4 + -Paxville + -Dempsey + +- + config MK6 +- bool "K6/K6-II/K6-III" ++ bool "AMD K6/K6-II/K6-III" + depends on X86_32 + help + Select this for an AMD K6-family processor. Enables use of +@@ -165,7 +165,7 @@ config MK6 + flags to GCC. + + config MK7 +- bool "Athlon/Duron/K7" ++ bool "AMD Athlon/Duron/K7" + depends on X86_32 + help + Select this for an AMD Athlon K7-family processor. Enables use of +@@ -173,12 +173,90 @@ config MK7 + flags to GCC. + + config MK8 +- bool "Opteron/Athlon64/Hammer/K8" ++ bool "AMD Opteron/Athlon64/Hammer/K8" + help + Select this for an AMD Opteron or Athlon64 Hammer-family processor. + Enables use of some extended instructions, and passes appropriate + optimization flags to GCC. + ++config MK8SSE3 ++ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3" ++ help ++ Select this for improved AMD Opteron or Athlon64 Hammer-family processors. ++ Enables use of some extended instructions, and passes appropriate ++ optimization flags to GCC. ++ ++config MK10 ++ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10" ++ help ++ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50, ++ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor. ++ Enables use of some extended instructions, and passes appropriate ++ optimization flags to GCC. ++ ++config MBARCELONA ++ bool "AMD Barcelona" ++ help ++ Select this for AMD Family 10h Barcelona processors. ++ ++ Enables -march=barcelona ++ ++config MBOBCAT ++ bool "AMD Bobcat" ++ help ++ Select this for AMD Family 14h Bobcat processors. ++ ++ Enables -march=btver1 ++ ++config MJAGUAR ++ bool "AMD Jaguar" ++ help ++ Select this for AMD Family 16h Jaguar processors. ++ ++ Enables -march=btver2 ++ ++config MBULLDOZER ++ bool "AMD Bulldozer" ++ help ++ Select this for AMD Family 15h Bulldozer processors. ++ ++ Enables -march=bdver1 ++ ++config MPILEDRIVER ++ bool "AMD Piledriver" ++ help ++ Select this for AMD Family 15h Piledriver processors. ++ ++ Enables -march=bdver2 ++ ++config MSTEAMROLLER ++ bool "AMD Steamroller" ++ help ++ Select this for AMD Family 15h Steamroller processors. ++ ++ Enables -march=bdver3 ++ ++config MEXCAVATOR ++ bool "AMD Excavator" ++ help ++ Select this for AMD Family 15h Excavator processors. ++ ++ Enables -march=bdver4 ++ ++config MZEN ++ bool "AMD Zen" ++ help ++ Select this for AMD Family 17h Zen processors. ++ ++ Enables -march=znver1 ++ ++config MZEN2 ++ bool "AMD Zen 2" ++ help ++ Select this for AMD Family 17h Zen 2 processors. ++ ++ Enables -march=znver2 ++ + config MCRUSOE + bool "Crusoe" + depends on X86_32 +@@ -260,6 +338,7 @@ config MVIAC7 + + config MPSC + bool "Intel P4 / older Netburst based Xeon" ++ select X86_P6_NOP + depends on X86_64 + help + Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey +@@ -269,8 +348,19 @@ config MPSC + using the cpu family field + in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one. + ++config MATOM ++ bool "Intel Atom" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Atom platform. Intel Atom CPUs have an ++ in-order pipelining architecture and thus can benefit from ++ accordingly optimized code. Use a recent GCC with specific Atom ++ support in order to fully benefit from selecting this option. ++ + config MCORE2 +- bool "Core 2/newer Xeon" ++ bool "Intel Core 2" ++ select X86_P6_NOP + help + + Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and +@@ -278,14 +368,133 @@ config MCORE2 + family in /proc/cpuinfo. Newer ones have 6 and older ones 15 + (not a typo) + +-config MATOM +- bool "Intel Atom" ++ Enables -march=core2 ++ ++config MNEHALEM ++ bool "Intel Nehalem" ++ select X86_P6_NOP + help + +- Select this for the Intel Atom platform. Intel Atom CPUs have an +- in-order pipelining architecture and thus can benefit from +- accordingly optimized code. Use a recent GCC with specific Atom +- support in order to fully benefit from selecting this option. ++ Select this for 1st Gen Core processors in the Nehalem family. ++ ++ Enables -march=nehalem ++ ++config MWESTMERE ++ bool "Intel Westmere" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Westmere formerly Nehalem-C family. ++ ++ Enables -march=westmere ++ ++config MSILVERMONT ++ bool "Intel Silvermont" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Silvermont platform. ++ ++ Enables -march=silvermont ++ ++config MGOLDMONT ++ bool "Intel Goldmont" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Goldmont platform including Apollo Lake and Denverton. ++ ++ Enables -march=goldmont ++ ++config MGOLDMONTPLUS ++ bool "Intel Goldmont Plus" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Goldmont Plus platform including Gemini Lake. ++ ++ Enables -march=goldmont-plus ++ ++config MSANDYBRIDGE ++ bool "Intel Sandy Bridge" ++ select X86_P6_NOP ++ help ++ ++ Select this for 2nd Gen Core processors in the Sandy Bridge family. ++ ++ Enables -march=sandybridge ++ ++config MIVYBRIDGE ++ bool "Intel Ivy Bridge" ++ select X86_P6_NOP ++ help ++ ++ Select this for 3rd Gen Core processors in the Ivy Bridge family. ++ ++ Enables -march=ivybridge ++ ++config MHASWELL ++ bool "Intel Haswell" ++ select X86_P6_NOP ++ help ++ ++ Select this for 4th Gen Core processors in the Haswell family. ++ ++ Enables -march=haswell ++ ++config MBROADWELL ++ bool "Intel Broadwell" ++ select X86_P6_NOP ++ help ++ ++ Select this for 5th Gen Core processors in the Broadwell family. ++ ++ Enables -march=broadwell ++ ++config MSKYLAKE ++ bool "Intel Skylake" ++ select X86_P6_NOP ++ help ++ ++ Select this for 6th Gen Core processors in the Skylake family. ++ ++ Enables -march=skylake ++ ++config MSKYLAKEX ++ bool "Intel Skylake X" ++ select X86_P6_NOP ++ help ++ ++ Select this for 6th Gen Core processors in the Skylake X family. ++ ++ Enables -march=skylake-avx512 ++ ++config MCANNONLAKE ++ bool "Intel Cannon Lake" ++ select X86_P6_NOP ++ help ++ ++ Select this for 8th Gen Core processors ++ ++ Enables -march=cannonlake ++ ++config MICELAKE ++ bool "Intel Ice Lake" ++ select X86_P6_NOP ++ help ++ ++ Select this for 10th Gen Core processors in the Ice Lake family. ++ ++ Enables -march=icelake-client ++ ++config MCASCADELAKE ++ bool "Intel Cascade Lake" ++ select X86_P6_NOP ++ help ++ ++ Select this for Xeon processors in the Cascade Lake family. ++ ++ Enables -march=cascadelake + + config GENERIC_CPU + bool "Generic-x86-64" +@@ -294,6 +503,19 @@ config GENERIC_CPU + Generic x86-64 CPU. + Run equally well on all x86-64 CPUs. + ++config MNATIVE ++ bool "Native optimizations autodetected by GCC" ++ help ++ ++ GCC 4.2 and above support -march=native, which automatically detects ++ the optimum settings to use based on your processor. -march=native ++ also detects and applies additional settings beyond -march specific ++ to your CPU, (eg. -msse4). Unless you have a specific reason not to ++ (e.g. distcc cross-compiling), you should probably be using ++ -march=native rather than anything listed below. ++ ++ Enables -march=native ++ + endchoice + + config X86_GENERIC +@@ -318,7 +540,7 @@ config X86_INTERNODE_CACHE_SHIFT + config X86_L1_CACHE_SHIFT + int + default "7" if MPENTIUM4 || MPSC +- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU ++ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU + default "4" if MELAN || M486SX || M486 || MGEODEGX1 + default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX + +@@ -336,35 +558,36 @@ config X86_ALIGNMENT_16 + + config X86_INTEL_USERCOPY + def_bool y +- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 ++ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE + + config X86_USE_PPRO_CHECKSUM + def_bool y +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MATOM || MNATIVE + + config X86_USE_3DNOW + def_bool y + depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML + +-# +-# P6_NOPs are a relatively minor optimization that require a family >= +-# 6 processor, except that it is broken on certain VIA chips. +-# Furthermore, AMD chips prefer a totally different sequence of NOPs +-# (which work on all CPUs). In addition, it looks like Virtual PC +-# does not understand them. +-# +-# As a result, disallow these if we're not compiling for X86_64 (these +-# NOPs do work on all x86-64 capable chips); the list of processors in +-# the right-hand clause are the cores that benefit from this optimization. +-# + config X86_P6_NOP +- def_bool y +- depends on X86_64 +- depends on (MCORE2 || MPENTIUM4 || MPSC) ++ default n ++ bool "Support for P6_NOPs on Intel chips" ++ depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE) ++ help ++ P6_NOPs are a relatively minor optimization that require a family >= ++ 6 processor, except that it is broken on certain VIA chips. ++ Furthermore, AMD chips prefer a totally different sequence of NOPs ++ (which work on all CPUs). In addition, it looks like Virtual PC ++ does not understand them. ++ ++ As a result, disallow these if we're not compiling for X86_64 (these ++ NOPs do work on all x86-64 capable chips); the list of processors in ++ the right-hand clause are the cores that benefit from this optimization. ++ ++ Say Y if you have Intel CPU newer than Pentium Pro, N otherwise. + + config X86_TSC + def_bool y +- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64 ++ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM) || X86_64 + + config X86_CMPXCHG64 + def_bool y +@@ -374,7 +597,7 @@ config X86_CMPXCHG64 + # generates cmov. + config X86_CMOV + def_bool y +- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) ++ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX) + + config X86_MINIMUM_CPU_FAMILY + int +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index 94df0868804b..dcbed7e3a070 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -119,13 +119,53 @@ else + KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup) + + # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) ++ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native) + cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) ++ cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8) ++ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10) ++ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona) ++ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1) ++ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2) ++ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1) ++ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2) ++ cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3) ++ cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4) ++ cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1) ++ cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2) + cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) + + cflags-$(CONFIG_MCORE2) += \ +- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic)) +- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \ +- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) ++ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2)) ++ cflags-$(CONFIG_MNEHALEM) += \ ++ $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem)) ++ cflags-$(CONFIG_MWESTMERE) += \ ++ $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere)) ++ cflags-$(CONFIG_MSILVERMONT) += \ ++ $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont)) ++ cflags-$(CONFIG_MGOLDMONT) += \ ++ $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont)) ++ cflags-$(CONFIG_MGOLDMONTPLUS) += \ ++ $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus)) ++ cflags-$(CONFIG_MSANDYBRIDGE) += \ ++ $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge)) ++ cflags-$(CONFIG_MIVYBRIDGE) += \ ++ $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge)) ++ cflags-$(CONFIG_MHASWELL) += \ ++ $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell)) ++ cflags-$(CONFIG_MBROADWELL) += \ ++ $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell)) ++ cflags-$(CONFIG_MSKYLAKE) += \ ++ $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake)) ++ cflags-$(CONFIG_MSKYLAKEX) += \ ++ $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512)) ++ cflags-$(CONFIG_MCANNONLAKE) += \ ++ $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake)) ++ cflags-$(CONFIG_MICELAKE) += \ ++ $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client)) ++ cflags-$(CONFIG_MCASCADELAKE) += \ ++ $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake)) ++ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \ ++ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic)) + cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) + KBUILD_CFLAGS += $(cflags-y) + +diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu +index cd3056759880..2c81838df533 100644 +--- a/arch/x86/Makefile_32.cpu ++++ b/arch/x86/Makefile_32.cpu +@@ -24,7 +24,19 @@ cflags-$(CONFIG_MK6) += -march=k6 + # Please note, that patches that add -march=athlon-xp and friends are pointless. + # They make zero difference whatsosever to performance at this time. + cflags-$(CONFIG_MK7) += -march=athlon ++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native) + cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon) ++cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-march=athlon) ++cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon) ++cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon) ++cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon) ++cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon) ++cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon) ++cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon) ++cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon) ++cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon) ++cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1,-march=athlon) ++cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2,-march=athlon) + cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0 + cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0 + cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) +@@ -33,8 +45,22 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-fu + cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) + cflags-$(CONFIG_MVIAC7) += -march=i686 + cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) +-cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \ +- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic)) ++cflags-$(CONFIG_MNEHALEM) += -march=i686 $(call tune,nehalem) ++cflags-$(CONFIG_MWESTMERE) += -march=i686 $(call tune,westmere) ++cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont) ++cflags-$(CONFIG_MGOLDMONT) += -march=i686 $(call tune,goldmont) ++cflags-$(CONFIG_MGOLDMONTPLUS) += -march=i686 $(call tune,goldmont-plus) ++cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge) ++cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge) ++cflags-$(CONFIG_MHASWELL) += -march=i686 $(call tune,haswell) ++cflags-$(CONFIG_MBROADWELL) += -march=i686 $(call tune,broadwell) ++cflags-$(CONFIG_MSKYLAKE) += -march=i686 $(call tune,skylake) ++cflags-$(CONFIG_MSKYLAKEX) += -march=i686 $(call tune,skylake-avx512) ++cflags-$(CONFIG_MCANNONLAKE) += -march=i686 $(call tune,cannonlake) ++cflags-$(CONFIG_MICELAKE) += -march=i686 $(call tune,icelake-client) ++cflags-$(CONFIG_MCASCADELAKE) += -march=i686 $(call tune,cascadelake) ++cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \ ++ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic)) + + # AMD Elan support + cflags-$(CONFIG_MELAN) += -march=i486 +diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h +index c215d2762488..a4fddfe3d4fb 100644 +--- a/arch/x86/include/asm/vermagic.h ++++ b/arch/x86/include/asm/vermagic.h +@@ -27,6 +27,36 @@ struct mod_arch_specific { + #define MODULE_PROC_FAMILY "586MMX " + #elif defined CONFIG_MCORE2 + #define MODULE_PROC_FAMILY "CORE2 " ++#elif defined CONFIG_MNATIVE ++#define MODULE_PROC_FAMILY "NATIVE " ++#elif defined CONFIG_MNEHALEM ++#define MODULE_PROC_FAMILY "NEHALEM " ++#elif defined CONFIG_MWESTMERE ++#define MODULE_PROC_FAMILY "WESTMERE " ++#elif defined CONFIG_MSILVERMONT ++#define MODULE_PROC_FAMILY "SILVERMONT " ++#elif defined CONFIG_MGOLDMONT ++#define MODULE_PROC_FAMILY "GOLDMONT " ++#elif defined CONFIG_MGOLDMONTPLUS ++#define MODULE_PROC_FAMILY "GOLDMONTPLUS " ++#elif defined CONFIG_MSANDYBRIDGE ++#define MODULE_PROC_FAMILY "SANDYBRIDGE " ++#elif defined CONFIG_MIVYBRIDGE ++#define MODULE_PROC_FAMILY "IVYBRIDGE " ++#elif defined CONFIG_MHASWELL ++#define MODULE_PROC_FAMILY "HASWELL " ++#elif defined CONFIG_MBROADWELL ++#define MODULE_PROC_FAMILY "BROADWELL " ++#elif defined CONFIG_MSKYLAKE ++#define MODULE_PROC_FAMILY "SKYLAKE " ++#elif defined CONFIG_MSKYLAKEX ++#define MODULE_PROC_FAMILY "SKYLAKEX " ++#elif defined CONFIG_MCANNONLAKE ++#define MODULE_PROC_FAMILY "CANNONLAKE " ++#elif defined CONFIG_MICELAKE ++#define MODULE_PROC_FAMILY "ICELAKE " ++#elif defined CONFIG_MCASCADELAKE ++#define MODULE_PROC_FAMILY "CASCADELAKE " + #elif defined CONFIG_MATOM + #define MODULE_PROC_FAMILY "ATOM " + #elif defined CONFIG_M686 +@@ -45,6 +75,28 @@ struct mod_arch_specific { + #define MODULE_PROC_FAMILY "K7 " + #elif defined CONFIG_MK8 + #define MODULE_PROC_FAMILY "K8 " ++#elif defined CONFIG_MK8SSE3 ++#define MODULE_PROC_FAMILY "K8SSE3 " ++#elif defined CONFIG_MK10 ++#define MODULE_PROC_FAMILY "K10 " ++#elif defined CONFIG_MBARCELONA ++#define MODULE_PROC_FAMILY "BARCELONA " ++#elif defined CONFIG_MBOBCAT ++#define MODULE_PROC_FAMILY "BOBCAT " ++#elif defined CONFIG_MBULLDOZER ++#define MODULE_PROC_FAMILY "BULLDOZER " ++#elif defined CONFIG_MPILEDRIVER ++#define MODULE_PROC_FAMILY "PILEDRIVER " ++#elif defined CONFIG_MSTEAMROLLER ++#define MODULE_PROC_FAMILY "STEAMROLLER " ++#elif defined CONFIG_MJAGUAR ++#define MODULE_PROC_FAMILY "JAGUAR " ++#elif defined CONFIG_MEXCAVATOR ++#define MODULE_PROC_FAMILY "EXCAVATOR " ++#elif defined CONFIG_MZEN ++#define MODULE_PROC_FAMILY "ZEN " ++#elif defined CONFIG_MZEN2 ++#define MODULE_PROC_FAMILY "ZEN2 " + #elif defined CONFIG_MELAN + #define MODULE_PROC_FAMILY "ELAN " + #elif defined CONFIG_MCRUSOE +diff --git a/fs/dcache.c b/fs/dcache.c +index 2acfc69878f5..3f1131431e06 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -69,7 +69,7 @@ + * If no ancestor relationship: + * arbitrary, since it's serialized on rename_lock + */ +-int sysctl_vfs_cache_pressure __read_mostly = 100; ++int sysctl_vfs_cache_pressure __read_mostly = 50; + EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); + + __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 211890edf37e..37121563407d 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -41,7 +41,7 @@ const_debug unsigned int sysctl_sched_features = + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ +-const_debug unsigned int sysctl_sched_nr_migrate = 32; ++const_debug unsigned int sysctl_sched_nr_migrate = 128; + + /* + * period over which we average the RT time consumption, measured +@@ -61,9 +61,9 @@ __read_mostly int scheduler_running; + + /* + * part of the period that we allow rt tasks to run in us. +- * default: 0.95s ++ * XanMod default: 0.98s + */ +-int sysctl_sched_rt_runtime = 950000; ++int sysctl_sched_rt_runtime = 980000; + + /* + * __task_rq_lock - lock the rq @p resides on. +diff --git a/scripts/setlocalversion b/scripts/setlocalversion +index 71f39410691b..288f9679e883 100755 +--- a/scripts/setlocalversion ++++ b/scripts/setlocalversion +@@ -54,7 +54,7 @@ scm_version() + # If only the short version is requested, don't bother + # running further git commands + if $short; then +- echo "+" ++ # echo "+" + return + fi + # If we are past a tagged commit (like + +From f85ed068b4d0e6c31edce8574a95757a60e58b87 Mon Sep 17 00:00:00 2001 +From: Etienne Juvigny +Date: Mon, 3 Sep 2018 17:36:25 +0200 +Subject: Zenify & stuff + + +diff --git a/Documentation/tp_smapi.txt b/Documentation/tp_smapi.txt +new file mode 100644 +index 000000000000..a249678a8866 +--- /dev/null ++++ b/Documentation/tp_smapi.txt +@@ -0,0 +1,275 @@ ++tp_smapi version 0.42 ++IBM ThinkPad hardware functions driver ++ ++Author: Shem Multinymous ++Project: http://sourceforge.net/projects/tpctl ++Wiki: http://thinkwiki.org/wiki/tp_smapi ++List: linux-thinkpad@linux-thinkpad.org ++ (http://mailman.linux-thinkpad.org/mailman/listinfo/linux-thinkpad) ++ ++Description ++----------- ++ ++ThinkPad laptops include a proprietary interface called SMAPI BIOS ++(System Management Application Program Interface) which provides some ++hardware control functionality that is not accessible by other means. ++ ++This driver exposes some features of the SMAPI BIOS through a sysfs ++interface. It is suitable for newer models, on which SMAPI is invoked ++through IO port writes. Older models use a different SMAPI interface; ++for those, try the "thinkpad" module from the "tpctl" package. ++ ++WARNING: ++This driver uses undocumented features and direct hardware access. ++It thus cannot be guaranteed to work, and may cause arbitrary damage ++(especially on models it wasn't tested on). ++ ++ ++Module parameters ++----------------- ++ ++thinkpad_ec module: ++ force_io=1 lets thinkpad_ec load on some recent ThinkPad models ++ (e.g., T400 and T500) whose BIOS's ACPI DSDT reserves the ports we need. ++tp_smapi module: ++ debug=1 enables verbose dmesg output. ++ ++ ++Usage ++----- ++ ++Control of battery charging thresholds (in percents of current full charge ++capacity): ++ ++# echo 40 > /sys/devices/platform/smapi/BAT0/start_charge_thresh ++# echo 70 > /sys/devices/platform/smapi/BAT0/stop_charge_thresh ++# cat /sys/devices/platform/smapi/BAT0/*_charge_thresh ++ ++ (This is useful since Li-Ion batteries wear out much faster at very ++ high or low charge levels. The driver will also keeps the thresholds ++ across suspend-to-disk with AC disconnected; this isn't done ++ automatically by the hardware.) ++ ++Inhibiting battery charging for 17 minutes (overrides thresholds): ++ ++# echo 17 > /sys/devices/platform/smapi/BAT0/inhibit_charge_minutes ++# echo 0 > /sys/devices/platform/smapi/BAT0/inhibit_charge_minutes # stop ++# cat /sys/devices/platform/smapi/BAT0/inhibit_charge_minutes ++ ++ (This can be used to control which battery is charged when using an ++ Ultrabay battery.) ++ ++Forcing battery discharging even if AC power available: ++ ++# echo 1 > /sys/devices/platform/smapi/BAT0/force_discharge # start discharge ++# echo 0 > /sys/devices/platform/smapi/BAT0/force_discharge # stop discharge ++# cat /sys/devices/platform/smapi/BAT0/force_discharge ++ ++ (When AC is connected, forced discharging will automatically stop ++ when battery is fully depleted -- this is useful for calibration. ++ Also, this attribute can be used to control which battery is discharged ++ when both a system battery and an Ultrabay battery are connected.) ++ ++Misc read-only battery status attributes (see note about HDAPS below): ++ ++/sys/devices/platform/smapi/BAT0/installed # 0 or 1 ++/sys/devices/platform/smapi/BAT0/state # idle/charging/discharging ++/sys/devices/platform/smapi/BAT0/cycle_count # integer counter ++/sys/devices/platform/smapi/BAT0/current_now # instantaneous current ++/sys/devices/platform/smapi/BAT0/current_avg # last minute average ++/sys/devices/platform/smapi/BAT0/power_now # instantaneous power ++/sys/devices/platform/smapi/BAT0/power_avg # last minute average ++/sys/devices/platform/smapi/BAT0/last_full_capacity # in mWh ++/sys/devices/platform/smapi/BAT0/remaining_percent # remaining percent of energy (set by calibration) ++/sys/devices/platform/smapi/BAT0/remaining_percent_error # error range of remaing_percent (not reset by calibration) ++/sys/devices/platform/smapi/BAT0/remaining_running_time # in minutes, by last minute average power ++/sys/devices/platform/smapi/BAT0/remaining_running_time_now # in minutes, by instantenous power ++/sys/devices/platform/smapi/BAT0/remaining_charging_time # in minutes ++/sys/devices/platform/smapi/BAT0/remaining_capacity # in mWh ++/sys/devices/platform/smapi/BAT0/design_capacity # in mWh ++/sys/devices/platform/smapi/BAT0/voltage # in mV ++/sys/devices/platform/smapi/BAT0/design_voltage # in mV ++/sys/devices/platform/smapi/BAT0/charging_max_current # max charging current ++/sys/devices/platform/smapi/BAT0/charging_max_voltage # max charging voltage ++/sys/devices/platform/smapi/BAT0/group{0,1,2,3}_voltage # see below ++/sys/devices/platform/smapi/BAT0/manufacturer # string ++/sys/devices/platform/smapi/BAT0/model # string ++/sys/devices/platform/smapi/BAT0/barcoding # string ++/sys/devices/platform/smapi/BAT0/chemistry # string ++/sys/devices/platform/smapi/BAT0/serial # integer ++/sys/devices/platform/smapi/BAT0/manufacture_date # YYYY-MM-DD ++/sys/devices/platform/smapi/BAT0/first_use_date # YYYY-MM-DD ++/sys/devices/platform/smapi/BAT0/temperature # in milli-Celsius ++/sys/devices/platform/smapi/BAT0/dump # see below ++/sys/devices/platform/smapi/ac_connected # 0 or 1 ++ ++The BAT0/group{0,1,2,3}_voltage attribute refers to the separate cell groups ++in each battery. For example, on the ThinkPad 600, X3x, T4x and R5x models, ++the battery contains 3 cell groups in series, where each group consisting of 2 ++or 3 cells connected in parallel. The voltage of each group is given by these ++attributes, and their sum (roughly) equals the "voltage" attribute. ++(The effective performance of the battery is determined by the weakest group, ++i.e., the one those voltage changes most rapidly during dis/charging.) ++ ++The "BAT0/dump" attribute gives a a hex dump of the raw status data, which ++contains additional data now in the above (if you can figure it out). Some ++unused values are autodetected and replaced by "--": ++ ++In all of the above, replace BAT0 with BAT1 to address the 2nd battery (e.g. ++in the UltraBay). ++ ++ ++Raw SMAPI calls: ++ ++/sys/devices/platform/smapi/smapi_request ++This performs raw SMAPI calls. It uses a bad interface that cannot handle ++multiple simultaneous access. Don't touch it, it's for development only. ++If you did touch it, you would so something like ++# echo '211a 100 0 0' > /sys/devices/platform/smapi/smapi_request ++# cat /sys/devices/platform/smapi/smapi_request ++and notice that in the output "211a 34b b2 0 0 0 'OK'", the "4b" in the 2nd ++value, converted to decimal is 75: the current charge stop threshold. ++ ++ ++Model-specific status ++--------------------- ++ ++Works (at least partially) on the following ThinkPad model: ++* A30 ++* G41 ++* R40, R50p, R51, R52 ++* T23, T40, T40p, T41, T41p, T42, T42p, T43, T43p, T60, T61, T400, T410, T420 (partially) ++* X24, X31, X32, X40, X41, X60, X61, X200, X201, X220 (partially) ++* Z60t, Z61m ++ ++Does not work on: ++* X230 and newer ++* T430 and newer ++* Any ThinkPad Edge ++* Any ThinkPad Yoga ++* Any ThinkPad L series ++* Any ThinkPad P series ++ ++Not all functions are available on all models; for detailed status, see: ++ http://thinkwiki.org/wiki/tp_smapi ++ ++Please report success/failure by e-mail or on the Wiki. ++If you get a "not implemented" or "not supported" message, your laptop ++probably just can't do that (at least not via the SMAPI BIOS). ++For negative reports, follow the bug reporting guidelines below. ++If you send me the necessary technical data (i.e., SMAPI function ++interfaces), I will support additional models. ++ ++ ++Additional HDAPS features ++------------------------- ++ ++The modified hdaps driver has several improvements on the one in mainline ++(beyond resolving the conflict with thinkpad_ec and tp_smapi): ++ ++- Fixes reliability and improves support for recent ThinkPad models ++ (especially *60 and newer). Unlike the mainline driver, the modified hdaps ++ correctly follows the Embedded Controller communication protocol. ++ ++- Extends the "invert" parameter to cover all possible axis orientations. ++ The possible values are as follows. ++ Let X,Y denote the hardware readouts. ++ Let R denote the laptop's roll (tilt left/right). ++ Let P denote the laptop's pitch (tilt forward/backward). ++ invert=0: R= X P= Y (same as mainline) ++ invert=1: R=-X P=-Y (same as mainline) ++ invert=2: R=-X P= Y (new) ++ invert=3: R= X P=-Y (new) ++ invert=4: R= Y P= X (new) ++ invert=5: R=-Y P=-X (new) ++ invert=6: R=-Y P= X (new) ++ invert=7: R= Y P=-X (new) ++ It's probably easiest to just try all 8 possibilities and see which yields ++ correct results (e.g., in the hdaps-gl visualisation). ++ ++- Adds a whitelist which automatically sets the correct axis orientation for ++ some models. If the value for your model is wrong or missing, you can override ++ it using the "invert" parameter. Please also update the tables at ++ http://www.thinkwiki.org/wiki/tp_smapi and ++ http://www.thinkwiki.org/wiki/List_of_DMI_IDs ++ and submit a patch for the whitelist in hdaps.c. ++ ++- Provides new attributes: ++ /sys/devices/platform/hdaps/sampling_rate: ++ This determines the frequency at which the host queries the embedded ++ controller for accelerometer data (and informs the hdaps input devices). ++ Default=50. ++ /sys/devices/platform/hdaps/oversampling_ratio: ++ When set to X, the embedded controller is told to do physical accelerometer ++ measurements at a rate that is X times higher than the rate at which ++ the driver reads those measurements (i.e., X*sampling_rate). This ++ makes the readouts from the embedded controller more fresh, and is also ++ useful for the running average filter (see next). Default=5 ++ /sys/devices/platform/hdaps/running_avg_filter_order: ++ When set to X, reported readouts will be the average of the last X physical ++ accelerometer measurements. Current firmware allows 1<=X<=8. Setting to a ++ high value decreases readout fluctuations. The averaging is handled by the ++ embedded controller, so no CPU resources are used. Higher values make the ++ readouts smoother, since it averages out both sensor noise (good) and abrupt ++ changes (bad). Default=2. ++ ++- Provides a second input device, which publishes the raw accelerometer ++ measurements (without the fuzzing needed for joystick emulation). This input ++ device can be matched by a udev rule such as the following (all on one line): ++ KERNEL=="event[0-9]*", ATTRS{phys}=="hdaps/input1", ++ ATTRS{modalias}=="input:b0019v1014p5054e4801-*", ++ SYMLINK+="input/hdaps/accelerometer-event ++ ++A new version of the hdapsd userspace daemon, which uses the input device ++interface instead of polling sysfs, is available seprately. Using this reduces ++the total interrupts per second generated by hdaps+hdapsd (on tickless kernels) ++to 50, down from a value that fluctuates between 50 and 100. Set the ++sampling_rate sysfs attribute to a lower value to further reduce interrupts, ++at the expense of response latency. ++ ++Licensing note: all my changes to the HDAPS driver are licensed under the ++GPL version 2 or, at your option and to the extent allowed by derivation from ++prior works, any later version. My version of hdaps is derived work from the ++mainline version, which at the time of writing is available only under ++GPL version 2. ++ ++Bug reporting ++------------- ++ ++Mail . Please include: ++* Details about your model, ++* Relevant "dmesg" output. Make sure thinkpad_ec and tp_smapi are loaded with ++ the "debug=1" parameter (e.g., use "make load HDAPS=1 DEBUG=1"). ++* Output of "dmidecode | grep -C5 Product" ++* Does the failed functionality works under Windows? ++ ++ ++More about SMAPI ++---------------- ++ ++For hints about what may be possible via the SMAPI BIOS and how, see: ++ ++* IBM Technical Reference Manual for the ThinkPad 770 ++ (http://www-307.ibm.com/pc/support/site.wss/document.do?lndocid=PFAN-3TUQQD) ++* Exported symbols in PWRMGRIF.DLL or TPPWRW32.DLL (e.g., use "objdump -x"). ++* drivers/char/mwave/smapi.c in the Linux kernel tree.* ++* The "thinkpad" SMAPI module (http://tpctl.sourceforge.net). ++* The SMAPI_* constants in tp_smapi.c. ++ ++Note that in the above Technical Reference and in the "thinkpad" module, ++SMAPI is invoked through a function call to some physical address. However, ++the interface used by tp_smapi and the above mwave drive, and apparently ++required by newer ThinkPad, is different: you set the parameters up in the ++CPU's registers and write to ports 0xB2 (the APM control port) and 0x4F; this ++triggers an SMI (System Management Interrupt), causing the CPU to enter ++SMM (System Management Mode) and run the BIOS firmware; the results are ++returned in the CPU's registers. It is not clear what is the relation between ++the two variants of SMAPI, though the assignment of error codes seems to be ++similar. ++ ++In addition, the embedded controller on ThinkPad laptops has a non-standard ++interface at IO ports 0x1600-0x161F (mapped to LCP channel 3 of the H8S chip). ++The interface provides various system management services (currently known: ++battery information and accelerometer readouts). For more information see the ++thinkpad_ec module and the H8S hardware documentation: ++http://documentation.renesas.com/eng/products/mpumcu/rej09b0300_2140bhm.pdf +diff --git a/init/Kconfig b/init/Kconfig +index b4daad2bac23..c1e59dc04209 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -1244,7 +1244,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE + + config CC_OPTIMIZE_FOR_PERFORMANCE_O3 + bool "Optimize more for performance (-O3)" +- depends on ARC + help + Choosing this option will pass "-O3" to your compiler to optimize + the kernel yet more for performance. +diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c +index 4f32c4062fb6..c0bf039e1b40 100644 +--- a/drivers/infiniband/core/addr.c ++++ b/drivers/infiniband/core/addr.c +@@ -721,6 +721,7 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; ++ struct sockaddr_ib _sockaddr_ib; + } sgid_addr, dgid_addr; + int ret; + +diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig +index 0840d27381ea..73aba9a31064 100644 +--- a/drivers/tty/Kconfig ++++ b/drivers/tty/Kconfig +@@ -75,6 +75,19 @@ config VT_CONSOLE_SLEEP + def_bool y + depends on VT_CONSOLE && PM_SLEEP + ++config NR_TTY_DEVICES ++ int "Maximum tty device number" ++ depends on VT ++ range 12 63 ++ default 63 ++ help ++ This option is used to change the number of tty devices in /dev. ++ The default value is 63. The lowest number you can set is 12, ++ 63 is also the upper limit so we don't overrun the serial ++ consoles. ++ ++ If unsure, say 63. ++ + config HW_CONSOLE + bool + depends on VT && !UML +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 79226ca8f80f..2a30060e7e1d 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -47,7 +47,11 @@ struct blk_queue_stats; + struct blk_stat_callback; + + #define BLKDEV_MIN_RQ 4 ++#ifdef CONFIG_ZENIFY ++#define BLKDEV_MAX_RQ 512 ++#else + #define BLKDEV_MAX_RQ 128 /* Default maximum */ ++#endif + + /* Must be consistent with blk_mq_poll_stats_bkt() */ + #define BLK_MQ_POLL_STATS_BKTS 16 +diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h +index e9d39c48520a..3bceead8da40 100644 +--- a/include/uapi/linux/vt.h ++++ b/include/uapi/linux/vt.h +@@ -3,12 +3,25 @@ + #define _UAPI_LINUX_VT_H + + ++/* ++ * We will make this definition solely for the purpose of making packages ++ * such as splashutils build, because they can not understand that ++ * NR_TTY_DEVICES is defined in the kernel configuration. ++ */ ++#ifndef CONFIG_NR_TTY_DEVICES ++#define CONFIG_NR_TTY_DEVICES 63 ++#endif ++ + /* + * These constants are also useful for user-level apps (e.g., VC + * resizing). + */ + #define MIN_NR_CONSOLES 1 /* must be at least 1 */ +-#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */ ++/* ++ * NR_TTY_DEVICES: ++ * Value MUST be at least 12 and must never be higher then 63 ++ */ ++#define MAX_NR_CONSOLES CONFIG_NR_TTY_DEVICES /* serial lines start above this */ + /* Note: the ioctl VT_GETSTATE does not work for + consoles 16 and higher (since it returns a short) */ + +diff --git a/init/Kconfig b/init/Kconfig +index 041f3a022122..5ed70eb1ad3a 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -45,6 +45,38 @@ config THREAD_INFO_IN_TASK + + menu "General setup" + ++config ZENIFY ++ bool "A selection of patches from Zen/Liquorix kernel and additional tweaks for a better gaming experience" ++ default y ++ help ++ Tunes the kernel for responsiveness at the cost of throughput and power usage. ++ ++ --- Virtual Memory Subsystem --------------------------- ++ ++ Mem dirty before bg writeback..: 10 % -> 20 % ++ Mem dirty before sync writeback: 20 % -> 50 % ++ ++ --- Block Layer ---------------------------------------- ++ ++ Queue depth...............: 128 -> 512 ++ Default MQ scheduler......: mq-deadline -> bfq ++ ++ --- CFS CPU Scheduler ---------------------------------- ++ ++ Scheduling latency.............: 6 -> 3 ms ++ Minimal granularity............: 0.75 -> 0.3 ms ++ Wakeup granularity.............: 1 -> 0.5 ms ++ CPU migration cost.............: 0.5 -> 0.25 ms ++ Bandwidth slice size...........: 5 -> 3 ms ++ Ondemand fine upscaling limit..: 95 % -> 85 % ++ ++ --- MuQSS CPU Scheduler -------------------------------- ++ ++ Scheduling interval............: 6 -> 3 ms ++ ISO task max realtime use......: 70 % -> 25 % ++ Ondemand coarse upscaling limit: 80 % -> 45 % ++ Ondemand fine upscaling limit..: 95 % -> 45 % ++ + config BROKEN + bool + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 2f0a0be4d344..bada807c7e59 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -37,8 +37,13 @@ + * + * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_latency = 3000000ULL; ++static unsigned int normalized_sysctl_sched_latency = 3000000ULL; ++#else + unsigned int sysctl_sched_latency = 6000000ULL; + static unsigned int normalized_sysctl_sched_latency = 6000000ULL; ++#endif + + /* + * The initial- and re-scaling of tunables is configurable +@@ -58,13 +63,22 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L + * + * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_min_granularity = 300000ULL; ++static unsigned int normalized_sysctl_sched_min_granularity = 300000ULL; ++#else + unsigned int sysctl_sched_min_granularity = 750000ULL; + static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; ++#endif + + /* + * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity + */ ++#ifdef CONFIG_ZENIFY ++static unsigned int sched_nr_latency = 10; ++#else + static unsigned int sched_nr_latency = 8; ++#endif + + /* + * After fork, child runs first. If set to 0 (default) then +@@ -81,10 +95,17 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; + * + * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_wakeup_granularity = 500000UL; ++static unsigned int normalized_sysctl_sched_wakeup_granularity = 500000UL; ++ ++const_debug unsigned int sysctl_sched_migration_cost = 50000UL; ++#else + unsigned int sysctl_sched_wakeup_granularity = 1000000UL; + static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; ++#endif + + #ifdef CONFIG_SMP + /* +@@ -107,8 +128,12 @@ int __weak arch_asym_cpu_priority(int cpu) + * + * (default: 5 msec, units: microseconds) + */ ++#ifdef CONFIG_ZENIFY ++unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL; ++#else + unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; + #endif ++#endif + + /* + * The margin used when comparing utilization with CPU capacity: +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index 337c6afb3345..9315e358f292 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -71,7 +71,11 @@ static long ratelimit_pages = 32; + /* + * Start background writeback (via writeback threads) at this percentage + */ ++#ifdef CONFIG_ZENIFY ++int dirty_background_ratio = 20; ++#else + int dirty_background_ratio = 10; ++#endif + + /* + * dirty_background_bytes starts at 0 (disabled) so that it is a function of +@@ -88,7 +92,11 @@ int vm_highmem_is_dirtyable; + /* + * The generator of dirty data starts writeback at this percentage + */ ++#ifdef CONFIG_ZENIFY ++int vm_dirty_ratio = 50; ++#else + int vm_dirty_ratio = 20; ++#endif + + /* + * vm_dirty_bytes starts at 0 (disabled) so that it is a function of +diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig +index 80dad301361d..42b7fa7d01f8 100644 +--- a/net/ipv4/Kconfig ++++ b/net/ipv4/Kconfig +@@ -702,6 +702,9 @@ choice + config DEFAULT_VEGAS + bool "Vegas" if TCP_CONG_VEGAS=y + ++ config DEFAULT_YEAH ++ bool "YeAH" if TCP_CONG_YEAH=y ++ + config DEFAULT_VENO + bool "Veno" if TCP_CONG_VENO=y + +@@ -735,6 +738,7 @@ config DEFAULT_TCP_CONG + default "htcp" if DEFAULT_HTCP + default "hybla" if DEFAULT_HYBLA + default "vegas" if DEFAULT_VEGAS ++ default "yeah" if DEFAULT_YEAH + default "westwood" if DEFAULT_WESTWOOD + default "veno" if DEFAULT_VENO + default "reno" if DEFAULT_RENO + +From: Nick Desaulniers +Date: Mon, 24 Dec 2018 13:37:41 +0200 +Subject: include/linux/compiler*.h: define asm_volatile_goto + +asm_volatile_goto should also be defined for other compilers that +support asm goto. + +Fixes commit 815f0dd ("include/linux/compiler*.h: make compiler-*.h +mutually exclusive"). + +Signed-off-by: Nick Desaulniers +Signed-off-by: Miguel Ojeda + +diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h +index ba814f1..e77eeb0 100644 +--- a/include/linux/compiler_types.h ++++ b/include/linux/compiler_types.h +@@ -188,6 +188,10 @@ struct ftrace_likely_data { + #define asm_volatile_goto(x...) asm goto(x) + #endif + ++#ifndef asm_volatile_goto ++#define asm_volatile_goto(x...) asm goto(x) ++#endif ++ + /* Are two types/vars the same type (ignoring qualifiers)? */ + #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +From: Andy Lavr +Date: Mon, 24 Dec 2018 14:57:47 +0200 +Subject: avl: Use [defer+madvise] as default khugepaged defrag strategy + +For some reason, the default strategy to respond to THP fault fallbacks +is still just madvise, meaning stall if the program wants transparent +hugepages, but don't trigger a background reclaim / compaction if THP +begins to fail allocations. This creates a snowball affect where we +still use the THP code paths, but we almost always fail once a system +has been active and busy for a while. + +The option "defer" was created for interactive systems where THP can +still improve performance. If we have to fallback to a regular page due +to an allocation failure or anything else, we will trigger a background +reclaim and compaction so future THP attempts succeed and previous +attempts eventually have their smaller pages combined without stalling +running applications. + +We still want madvise to stall applications that explicitely want THP, +so defer+madvise _does_ make a ton of sense. Make it the default for +interactive systems, especially if the kernel maintainer left +transparent hugepages on "always". + +Reasoning and details in the original patch: +https://lwn.net/Articles/711248/ + +Signed-off-by: Andy Lavr + +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index e84a10b..21d62b7 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -53,7 +53,11 @@ unsigned long transparent_hugepage_flags __read_mostly = + #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE + (1< +Date: Mon, 25 Nov 2019 15:13:06 -0300 +Subject: [PATCH] elevator: set default scheduler to bfq for blk-mq + +Signed-off-by: Alexandre Frade +--- + block/elevator.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/block/elevator.c b/block/elevator.c +index 076ba7308e65..81f89095aa77 100644 +--- a/block/elevator.c ++++ b/block/elevator.c +@@ -623,15 +623,15 @@ static inline bool elv_support_iosched(struct request_queue *q) + } + + /* +- * For single queue devices, default to using mq-deadline. If we have multiple +- * queues or mq-deadline is not available, default to "none". ++ * For single queue devices, default to using bfq. If we have multiple ++ * queues or bfq is not available, default to "none". + */ + static struct elevator_type *elevator_get_default(struct request_queue *q) + { + if (q->nr_hw_queues != 1) + return NULL; + +- return elevator_get(q, "mq-deadline", false); ++ return elevator_get(q, "bfq", false); + } + + /* +From c3ec05777c46e19a8a26d0fc4ca0c0db8a19de97 Mon Sep 17 00:00:00 2001 +From: Alexandre Frade +Date: Fri, 10 May 2019 16:45:59 -0300 +Subject: [PATCH] block: set rq_affinity = 2 for full multithreading I/O + requests + +Signed-off-by: Alexandre Frade +--- + include/linux/blkdev.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index f3ea78b0c91c..4dbacc6b073b 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -621,7 +621,8 @@ struct request_queue { + #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ + + #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ +- (1 << QUEUE_FLAG_SAME_COMP)) ++ (1 << QUEUE_FLAG_SAME_COMP) | \ ++ (1 << QUEUE_FLAG_SAME_FORCE)) + + void blk_queue_flag_set(unsigned int flag, struct request_queue *q); + void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); +From 8171d33d0b84a953649863538fdbe4c26c035e4f Mon Sep 17 00:00:00 2001 +From: Alexandre Frade +Date: Fri, 10 May 2019 14:32:50 -0300 +Subject: [PATCH] mm: set 2 megabytes for address_space-level file read-ahead + pages size + +Signed-off-by: Alexandre Frade +--- + include/linux/pagemap.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index a2adf95b3f9c..e804d9f7583a 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -654,7 +654,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); + void delete_from_page_cache_batch(struct address_space *mapping, + struct pagevec *pvec); + +-#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) ++#define VM_READAHEAD_PAGES (SZ_2M / PAGE_SIZE) + + void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, + struct file *, pgoff_t index, unsigned long req_count); +From de7119e3db9fdb4c704355854a02a7e9fad931d4 Mon Sep 17 00:00:00 2001 +From: Steven Barrett +Date: Wed, 15 Jan 2020 20:43:56 -0600 +Subject: [PATCH] ZEN: intel-pstate: Implement "enable" parameter + +If intel-pstate is compiled into the kernel, it will preempt the loading +of acpi-cpufreq so you can take advantage of hardware p-states without +any friction. + +However, intel-pstate is not completely superior to cpufreq's ondemand +for one reason. There's no concept of an up_threshold property. + +In ondemand, up_threshold essentially reduces the maximum utilization to +compare against, allowing you to hit max frequencies and turbo boost +from a much lower core utilization. + +With intel-pstate, you have the concept of minimum and maximum +performance, but no tunable that lets you define, maximum frequency +means 50% core utilization. For just this oversight, there's reasons +you may want ondemand. + +Lets support setting "enable" in kernel boot parameters. This lets +kernel maintainers include "intel_pstate=disable" statically in the +static boot parameters, but let users of the kernel override this +selection. +--- + Documentation/admin-guide/kernel-parameters.txt | 3 +++ + drivers/cpufreq/intel_pstate.c | 2 ++ + 2 files changed, 5 insertions(+) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index ade4e6ec23e03..0b613370d28d8 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -1765,6 +1765,9 @@ + disable + Do not enable intel_pstate as the default + scaling driver for the supported processors ++ enable ++ Enable intel_pstate in-case "disable" was passed ++ previously in the kernel boot parameters + passive + Use intel_pstate as a scaling driver, but configure it + to work with generic cpufreq governors (instead of +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index d2fa3e9ccd97c..bd10cb02fc0ff 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -2826,6 +2826,8 @@ static int __init intel_pstate_setup(char *str) + pr_info("HWP disabled\n"); + no_hwp = 1; + } ++ if (!strcmp(str, "enable")) ++ no_load = 0; + if (!strcmp(str, "force")) + force_load = 1; + if (!strcmp(str, "hwp_only")) diff --git a/linux58-rc-tkg/linux58-tkg-patches/0003-glitched-cfs.patch b/linux58-rc-tkg/linux58-tkg-patches/0003-glitched-cfs.patch new file mode 100644 index 0000000..06b7f02 --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0003-glitched-cfs.patch @@ -0,0 +1,72 @@ +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_500 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -39,6 +39,13 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + ++ config HZ_500 ++ bool "500 HZ" ++ help ++ 500 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with great smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -52,6 +59,7 @@ config HZ + default 100 if HZ_100 + default 250 if HZ_250 + default 300 if HZ_300 ++ default 500 if HZ_500 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_500 ++ default HZ_750 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -46,6 +46,13 @@ choice + on desktops with great smoothness without increasing CPU power + consumption and sacrificing the battery life on laptops. + ++ config HZ_750 ++ bool "750 HZ" ++ help ++ 750 Hz is a good timer frequency for desktops. Provides fast ++ interactivity with great smoothness without sacrificing too ++ much throughput. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -60,6 +67,7 @@ config HZ + default 250 if HZ_250 + default 300 if HZ_300 + default 500 if HZ_500 ++ default 750 if HZ_750 + default 1000 if HZ_1000 + + config SCHED_HRTICK + diff --git a/linux58-rc-tkg/linux58-tkg-patches/0005-glitched-ondemand-pds.patch b/linux58-rc-tkg/linux58-tkg-patches/0005-glitched-ondemand-pds.patch new file mode 100644 index 0000000..c1929e8 --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0005-glitched-ondemand-pds.patch @@ -0,0 +1,18 @@ +diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c +index 6b423eebfd5d..61e3271675d6 100644 +--- a/drivers/cpufreq/cpufreq_ondemand.c ++++ b/drivers/cpufreq/cpufreq_ondemand.c +@@ -21,10 +21,10 @@ + #include "cpufreq_ondemand.h" + + /* On-demand governor macros */ +-#define DEF_FREQUENCY_UP_THRESHOLD (63) +-#define DEF_SAMPLING_DOWN_FACTOR (1) ++#define DEF_FREQUENCY_UP_THRESHOLD (55) ++#define DEF_SAMPLING_DOWN_FACTOR (5) + #define MAX_SAMPLING_DOWN_FACTOR (100000) +-#define MICRO_FREQUENCY_UP_THRESHOLD (95) ++#define MICRO_FREQUENCY_UP_THRESHOLD (63) + #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) + #define MIN_FREQUENCY_UP_THRESHOLD (1) + #define MAX_FREQUENCY_UP_THRESHOLD (100) diff --git a/linux58-rc-tkg/linux58-tkg-patches/0005-glitched-pds.patch b/linux58-rc-tkg/linux58-tkg-patches/0005-glitched-pds.patch new file mode 100644 index 0000000..23271f5 --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0005-glitched-pds.patch @@ -0,0 +1,166 @@ +From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Wed, 4 Jul 2018 04:30:08 +0200 +Subject: glitched - PDS + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_250 ++ default HZ_500 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -39,6 +39,13 @@ choice + on SMP and NUMA systems and exactly dividing by both PAL and + NTSC frame rates for video and multimedia work. + ++ config HZ_500 ++ bool "500 HZ" ++ help ++ 500 Hz is a balanced timer frequency. Provides fast interactivity ++ on desktops with great smoothness without increasing CPU power ++ consumption and sacrificing the battery life on laptops. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -52,6 +59,7 @@ config HZ + default 100 if HZ_100 + default 250 if HZ_250 + default 300 if HZ_300 ++ default 500 if HZ_500 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz +index 2a202a846757..1d9c7ed79b11 100644 +--- a/kernel/Kconfig.hz ++++ b/kernel/Kconfig.hz +@@ -4,7 +4,7 @@ + + choice + prompt "Timer frequency" +- default HZ_500 ++ default HZ_750 + help + Allows the configuration of the timer frequency. It is customary + to have the timer interrupt run at 1000 Hz but 100 Hz may be more +@@ -46,6 +46,13 @@ choice + on desktops with great smoothness without increasing CPU power + consumption and sacrificing the battery life on laptops. + ++ config HZ_750 ++ bool "750 HZ" ++ help ++ 750 Hz is a good timer frequency for desktops. Provides fast ++ interactivity with great smoothness without sacrificing too ++ much throughput. ++ + config HZ_1000 + bool "1000 HZ" + help +@@ -60,6 +67,7 @@ config HZ + default 250 if HZ_250 + default 300 if HZ_300 + default 500 if HZ_500 ++ default 750 if HZ_750 + default 1000 if HZ_1000 + + config SCHED_HRTICK + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 9270a4370d54..30d01e647417 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -159,7 +159,7 @@ struct scan_control { + /* + * From 0 .. 100. Higher means more swappy. + */ +-int vm_swappiness = 60; ++int vm_swappiness = 20; + /* + * The total number of pages which are beyond the high watermark within all + * zones. + +diff --git a/init/Kconfig b/init/Kconfig +index 11fd9b502d06..e9bc34d3019b 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -715,6 +715,7 @@ menu "Scheduler features" + config UCLAMP_TASK + bool "Enable utilization clamping for RT/FAIR tasks" + depends on CPU_FREQ_GOV_SCHEDUTIL ++ depends on !SCHED_PDS + help + This feature enables the scheduler to track the clamped utilization + of each CPU based on RUNNABLE tasks scheduled on that CPU. +@@ -948,7 +948,6 @@ config CGROUP_DEVICE + + config CGROUP_CPUACCT + bool "Simple CPU accounting controller" +- depends on !SCHED_PDS + help + Provides a simple controller for monitoring the + total CPU consumed by the tasks in a cgroup. +diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile +index b23231bae996..cab4e5c5b38e 100644 +--- a/kernel/sched/Makefile ++++ b/kernel/sched/Makefile +@@ -24,13 +24,13 @@ obj-y += fair.o rt.o deadline.o + obj-$(CONFIG_SMP) += cpudeadline.o topology.o stop_task.o + obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o +-obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o + endif + obj-y += loadavg.o clock.o cputime.o + obj-y += idle.o + obj-y += wait.o wait_bit.o swait.o completion.o + obj-$(CONFIG_SMP) += cpupri.o pelt.o + obj-$(CONFIG_SCHEDSTATS) += stats.o ++obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o + obj-$(CONFIG_CPU_FREQ) += cpufreq.o + obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o + obj-$(CONFIG_MEMBARRIER) += membarrier.o + +diff --git a/kernel/sched/pds.c b/kernel/sched/pds.c +index 9281ad164..f09a609cf 100644 +--- a/kernel/sched/pds.c ++++ b/kernel/sched/pds.c +@@ -81,6 +81,18 @@ enum { + NR_CPU_AFFINITY_CHK_LEVEL + }; + ++/* ++ * This allows printing both to /proc/sched_debug and ++ * to the console ++ */ ++#define SEQ_printf(m, x...) \ ++ do { \ ++ if (m) \ ++ seq_printf(m, x); \ ++ else \ ++ pr_cont(x); \ ++ } while (0) ++ + static inline void print_scheduler_version(void) + { + printk(KERN_INFO "pds: PDS-mq CPU Scheduler 0.99o by Alfred Chen.\n"); +@@ -6353,7 +6365,10 @@ void ia64_set_curr_task(int cpu, struct task_struct *p) + #ifdef CONFIG_SCHED_DEBUG + void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, + struct seq_file *m) +-{} ++{ ++ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), ++ get_nr_threads(p)); ++} + + void proc_sched_set_task(struct task_struct *p) + {} diff --git a/linux58-rc-tkg/linux58-tkg-patches/0005-v5.8_undead-pds099o.patch b/linux58-rc-tkg/linux58-tkg-patches/0005-v5.8_undead-pds099o.patch new file mode 100644 index 0000000..8be7282 --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0005-v5.8_undead-pds099o.patch @@ -0,0 +1,8542 @@ +From 68f1a9541ef3185b1021e8e54d2712c7039418d7 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Mon, 15 Jun 2020 23:58:41 +0200 +Subject: PDS 099o, initial 5.8 rebase + + +diff --git a/Documentation/scheduler/sched-PDS-mq.txt b/Documentation/scheduler/sched-PDS-mq.txt +new file mode 100644 +index 000000000000..709e86f6487e +--- /dev/null ++++ b/Documentation/scheduler/sched-PDS-mq.txt +@@ -0,0 +1,56 @@ ++ Priority and Deadline based Skiplist multiple queue Scheduler ++ ------------------------------------------------------------- ++ ++CONTENT ++======== ++ ++ 0. Development ++ 1. Overview ++ 1.1 Design goal ++ 1.2 Design summary ++ 2. Design Detail ++ 2.1 Skip list implementation ++ 2.2 Task preempt ++ 2.3 Task policy, priority and deadline ++ 2.4 Task selection ++ 2.5 Run queue balance ++ 2.6 Task migration ++ ++ ++0. Development ++============== ++ ++Priority and Deadline based Skiplist multiple queue scheduler, referred to as ++PDS from here on, is developed upon the enhancement patchset VRQ(Variable Run ++Queue) for BFS(Brain Fuck Scheduler by Con Kolivas). PDS inherits the existing ++design from VRQ and inspired by the introduction of skiplist data structure ++to the scheduler by Con Kolivas. However, PDS is different from MuQSS(Multiple ++Queue Skiplist Scheduler, the successor after BFS) in many ways. ++ ++1. Overview ++=========== ++ ++1.1 Design goal ++--------------- ++ ++PDS is designed to make the cpu process scheduler code to be simple, but while ++efficiency and scalable. Be Simple, the scheduler code will be easy to be read ++and the behavious of scheduler will be easy to predict. Be efficiency, the ++scheduler shall be well balance the thoughput performance and task interactivity ++at the same time for different properties the tasks behave. Be scalable, the ++performance of the scheduler should be in good shape with the glowing of ++workload or with the growing of the cpu numbers. ++ ++1.2 Design summary ++------------------ ++ ++PDS is described as a multiple run queues cpu scheduler. Each cpu has its own ++run queue. A heavry customized skiplist is used as the backend data structure ++of the cpu run queue. Tasks in run queue is sorted by priority then virtual ++deadline(simplfy to just deadline from here on). In PDS, balance action among ++run queues are kept as less as possible to reduce the migration cost. Cpumask ++data structure is widely used in cpu affinity checking and cpu preemption/ ++selection to make PDS scalable with increasing cpu number. ++ ++ ++To be continued... +diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c +index f18d5067cd0f..fe489fc01c73 100644 +--- a/arch/powerpc/platforms/cell/spufs/sched.c ++++ b/arch/powerpc/platforms/cell/spufs/sched.c +@@ -51,11 +51,6 @@ static struct task_struct *spusched_task; + static struct timer_list spusched_timer; + static struct timer_list spuloadavg_timer; + +-/* +- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). +- */ +-#define NORMAL_PRIO 120 +- + /* + * Frequency of the spu scheduler tick. By default we do one SPU scheduler + * tick for every 10 CPU scheduler ticks. +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig +index 2d3f963fd6f1..5f41ead019b1 100644 +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -1006,6 +1006,22 @@ config NR_CPUS + config SCHED_SMT + def_bool y if SMP + ++config SMT_NICE ++ bool "SMT (Hyperthreading) aware nice priority and policy support" ++ depends on SCHED_PDS && SCHED_SMT ++ default y ++ ---help--- ++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness ++ of the use of 'nice' levels and different scheduling policies ++ (e.g. realtime) due to sharing of CPU power between hyperthreads. ++ SMT nice support makes each logical CPU aware of what is running on ++ its hyperthread siblings, maintaining appropriate distribution of ++ CPU according to nice levels and scheduling policies at the expense ++ of slightly increased overhead. ++ ++ If unsure say Y here. ++ ++ + config SCHED_MC + def_bool y + prompt "Multi-core scheduler support" +diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c +index 737ff3b9c2c0..b5bc5a1b6de7 100644 +--- a/drivers/cpufreq/cpufreq_conservative.c ++++ b/drivers/cpufreq/cpufreq_conservative.c +@@ -28,8 +28,8 @@ struct cs_dbs_tuners { + }; + + /* Conservative governor macros */ +-#define DEF_FREQUENCY_UP_THRESHOLD (80) +-#define DEF_FREQUENCY_DOWN_THRESHOLD (20) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) ++#define DEF_FREQUENCY_DOWN_THRESHOLD (26) + #define DEF_FREQUENCY_STEP (5) + #define DEF_SAMPLING_DOWN_FACTOR (1) + #define MAX_SAMPLING_DOWN_FACTOR (10) +diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c +index 82a4d37ddecb..1130e0f5db72 100644 +--- a/drivers/cpufreq/cpufreq_ondemand.c ++++ b/drivers/cpufreq/cpufreq_ondemand.c +@@ -18,7 +18,7 @@ + #include "cpufreq_ondemand.h" + + /* On-demand governor macros */ +-#define DEF_FREQUENCY_UP_THRESHOLD (80) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) + #define DEF_SAMPLING_DOWN_FACTOR (1) + #define MAX_SAMPLING_DOWN_FACTOR (100000) + #define MICRO_FREQUENCY_UP_THRESHOLD (95) +@@ -127,7 +127,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) + } + + /* +- * Every sampling_rate, we check, if current idle time is less than 20% ++ * Every sampling_rate, we check, if current idle time is less than 37% + * (default), then we try to increase frequency. Else, we adjust the frequency + * proportional to load. + */ +diff --git a/fs/proc/base.c b/fs/proc/base.c +index eb2255e95f62..62b8cedbccb6 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, + seq_puts(m, "0 0 0\n"); + else + seq_printf(m, "%llu %llu %lu\n", +- (unsigned long long)task->se.sum_exec_runtime, ++ (unsigned long long)tsk_seruntime(task), + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + +diff --git a/include/linux/init_task.h b/include/linux/init_task.h +index 2c620d7ac432..1a7987c40c80 100644 +--- a/include/linux/init_task.h ++++ b/include/linux/init_task.h +@@ -36,7 +36,11 @@ extern struct cred init_cred; + #define INIT_PREV_CPUTIME(x) + #endif + ++#ifdef CONFIG_SCHED_PDS ++#define INIT_TASK_COMM "PDS" ++#else + #define INIT_TASK_COMM "swapper" ++#endif /* !CONFIG_SCHED_PDS */ + + /* Attach to the init_task data structure for proper alignment */ + #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK +diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h +index fed6ba96c527..f03a5ee419a1 100644 +--- a/include/linux/jiffies.h ++++ b/include/linux/jiffies.h +@@ -169,7 +169,7 @@ static inline u64 get_jiffies_64(void) + * Have the 32 bit jiffies value wrap 5 minutes after boot + * so jiffies wrap bugs show up earlier. + */ +-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) ++#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ)) + + /* + * Change timeval to jiffies, trying to avoid the +diff --git a/kernel/smp.c b/kernel/smp.c +index 4418f5cb8324..2b51afac5b06 100644 +--- a/kernel/smp.c ++++ b/kernel/smp.c +@@ -669,12 +669,14 @@ void __init smp_init(void) + BUILD_BUG_ON(offsetof(struct irq_work, flags) != + offsetof(struct __call_single_data, flags)); + ++#ifndef CONFIG_SCHED_PDS + /* + * Assert the CSD_TYPE_TTWU layout is similar enough + * for task_struct to be on the @call_single_queue. + */ + BUILD_BUG_ON(offsetof(struct task_struct, wake_entry_type) - offsetof(struct task_struct, wake_entry) != + offsetof(struct __call_single_data, flags) - offsetof(struct __call_single_data, llist)); ++#endif /* !CONFIG_SCHED_PDS */ + + idle_threads_init(); + cpuhp_threads_init(); +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 4418f5cb8324..2b51afac5b06 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + /* task_struct member predeclarations (sorted alphabetically): */ + struct audit_context; +@@ -652,10 +653,14 @@ struct task_struct { + unsigned int flags; + unsigned int ptrace; + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS) + struct llist_node wake_entry; + unsigned int wake_entry_type; ++#endif ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_PDS) + int on_cpu; ++#endif ++#ifdef CONFIG_SMP + #ifdef CONFIG_THREAD_INFO_IN_TASK + /* Current CPU: */ + unsigned int cpu; +@@ -663,6 +668,7 @@ struct task_struct { + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; + ++#ifndef CONFIG_SCHED_PDS + /* + * recent_used_cpu is initially set as the last CPU used by a task + * that wakes affine another task. Waker/wakee relationships can +@@ -671,6 +677,7 @@ struct task_struct { + * used CPU that may be idle. + */ + int recent_used_cpu; ++#endif /* CONFIG_SCHED_PDS */ + int wake_cpu; + #endif + int on_rq; +@@ -680,13 +687,27 @@ struct task_struct { + int normal_prio; + unsigned int rt_priority; + ++#ifdef CONFIG_SCHED_PDS ++ int time_slice; ++ u64 deadline; ++ /* skip list level */ ++ int sl_level; ++ /* skip list node */ ++ struct skiplist_node sl_node; ++ /* 8bits prio and 56bits deadline for quick processing */ ++ u64 priodl; ++ u64 last_ran; ++ /* sched_clock time spent running */ ++ u64 sched_time; ++#else /* CONFIG_SCHED_PDS */ + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; ++ struct sched_dl_entity dl; ++#endif + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; + #endif +- struct sched_dl_entity dl; + + #ifdef CONFIG_UCLAMP_TASK + /* Clamp values requested for a scheduling entity */ +@@ -1306,6 +1327,29 @@ struct task_struct { + */ + }; + ++#ifdef CONFIG_SCHED_PDS ++void cpu_scaling(int cpu); ++void cpu_nonscaling(int cpu); ++#define tsk_seruntime(t) ((t)->sched_time) ++/* replace the uncertian rt_timeout with 0UL */ ++#define tsk_rttimeout(t) (0UL) ++ ++#define task_running_idle(p) ((p)->prio == IDLE_PRIO) ++#else /* CFS */ ++extern int runqueue_is_locked(int cpu); ++static inline void cpu_scaling(int cpu) ++{ ++} ++ ++static inline void cpu_nonscaling(int cpu) ++{ ++} ++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) ++#define tsk_rttimeout(t) ((t)->rt.timeout) ++ ++#define iso_task(p) (false) ++#endif /* CONFIG_SCHED_PDS */ ++ + static inline struct pid *task_pid(struct task_struct *task) + { + return task->thread_pid; +diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h +index 1aff00b65f3c..a5e5fc2c9170 100644 +--- a/include/linux/sched/deadline.h ++++ b/include/linux/sched/deadline.h +@@ -1,5 +1,22 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + ++#ifdef CONFIG_SCHED_PDS ++ ++#define __tsk_deadline(p) ((p)->deadline) ++ ++static inline int dl_prio(int prio) ++{ ++ return 1; ++} ++ ++static inline int dl_task(struct task_struct *p) ++{ ++ return 1; ++} ++#else ++ ++#define __tsk_deadline(p) ((p)->dl.deadline) ++ + /* + * SCHED_DEADLINE tasks has negative priorities, reflecting + * the fact that any of them has higher prio than RT and +@@ -19,6 +36,7 @@ static inline int dl_task(struct task_struct *p) + { + return dl_prio(p->prio); + } ++#endif /* CONFIG_SCHED_PDS */ + + static inline bool dl_time_before(u64 a, u64 b) + { +diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h +index 7d64feafc408..fba04bb91492 100644 +--- a/include/linux/sched/prio.h ++++ b/include/linux/sched/prio.h +@@ -20,7 +20,18 @@ + */ + + #define MAX_USER_RT_PRIO 100 ++ ++#ifdef CONFIG_SCHED_PDS ++#define ISO_PRIO (MAX_USER_RT_PRIO) ++ ++#define MAX_RT_PRIO ((MAX_USER_RT_PRIO) + 1) ++ ++#define NORMAL_PRIO (MAX_RT_PRIO) ++#define IDLE_PRIO ((MAX_RT_PRIO) + 1) ++#define PRIO_LIMIT ((IDLE_PRIO) + 1) ++#else /* !CONFIG_SCHED_PDS */ + #define MAX_RT_PRIO MAX_USER_RT_PRIO ++#endif /* CONFIG_SCHED_PDS */ + + #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) + #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) +diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h +index e5af028c08b4..a96012e6f15e 100644 +--- a/include/linux/sched/rt.h ++++ b/include/linux/sched/rt.h +@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return true; ++#ifndef CONFIG_SCHED_PDS + if (policy == SCHED_DEADLINE) + return true; ++#endif + return false; + } + +diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h +index 38359071236a..90328ccd527f 100644 +--- a/include/linux/sched/task.h ++++ b/include/linux/sched/task.h +@@ -106,7 +106,7 @@ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); + extern void free_task(struct task_struct *tsk); + + /* sched_exec is called by processes performing an exec */ +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS) + extern void sched_exec(void); + #else + #define sched_exec() {} +diff --git a/include/linux/skip_list.h b/include/linux/skip_list.h +new file mode 100644 +index 000000000000..713fedd8034f +--- /dev/null ++++ b/include/linux/skip_list.h +@@ -0,0 +1,177 @@ ++/* ++ Copyright (C) 2016 Alfred Chen. ++ ++ Code based on Con Kolivas's skip list implementation for BFS, and ++ which is based on example originally by William Pugh. ++ ++Skip Lists are a probabilistic alternative to balanced trees, as ++described in the June 1990 issue of CACM and were invented by ++William Pugh in 1987. ++ ++A couple of comments about this implementation: ++ ++This file only provides a infrastructure of skip list. ++ ++skiplist_node is embedded into container data structure, to get rid the ++dependency of kmalloc/kfree operation in scheduler code. ++ ++A customized search function should be defined using DEFINE_SKIPLIST_INSERT ++macro and be used for skip list insert operation. ++ ++Random Level is also not defined in this file, instead, it should be customized ++implemented and set to node->level then pass to the customized skiplist_insert ++function. ++ ++Levels start at zero and go up to (NUM_SKIPLIST_LEVEL -1) ++ ++NUM_SKIPLIST_LEVEL in this implementation is 8 instead of origin 16, ++considering that there will be 256 entries to enable the top level when using ++random level p=0.5, and that number is more than enough for a run queue usage ++in a scheduler usage. And it also help to reduce the memory usage of the ++embedded skip list node in task_struct to about 50%. ++ ++The insertion routine has been implemented so as to use the ++dirty hack described in the CACM paper: if a random level is ++generated that is more than the current maximum level, the ++current maximum level plus one is used instead. ++ ++BFS Notes: In this implementation of skiplists, there are bidirectional ++next/prev pointers and the insert function returns a pointer to the actual ++node the value is stored. The key here is chosen by the scheduler so as to ++sort tasks according to the priority list requirements and is no longer used ++by the scheduler after insertion. The scheduler lookup, however, occurs in ++O(1) time because it is always the first item in the level 0 linked list. ++Since the task struct stores a copy of the node pointer upon skiplist_insert, ++it can also remove it much faster than the original implementation with the ++aid of prev<->next pointer manipulation and no searching. ++*/ ++#ifndef _LINUX_SKIP_LIST_H ++#define _LINUX_SKIP_LIST_H ++ ++#include ++ ++#define NUM_SKIPLIST_LEVEL (8) ++ ++struct skiplist_node { ++ int level; /* Levels in this node */ ++ struct skiplist_node *next[NUM_SKIPLIST_LEVEL]; ++ struct skiplist_node *prev[NUM_SKIPLIST_LEVEL]; ++}; ++ ++#define SKIPLIST_NODE_INIT(name) { 0,\ ++ {&name, &name, &name, &name,\ ++ &name, &name, &name, &name},\ ++ {&name, &name, &name, &name,\ ++ &name, &name, &name, &name},\ ++ } ++ ++static inline void INIT_SKIPLIST_NODE(struct skiplist_node *node) ++{ ++ /* only level 0 ->next matters in skiplist_empty()*/ ++ WRITE_ONCE(node->next[0], node); ++} ++ ++/** ++ * FULL_INIT_SKIPLIST_NODE -- fully init a skiplist_node, expecially for header ++ * @node: the skip list node to be inited. ++ */ ++static inline void FULL_INIT_SKIPLIST_NODE(struct skiplist_node *node) ++{ ++ int i; ++ ++ node->level = 0; ++ for (i = 0; i < NUM_SKIPLIST_LEVEL; i++) { ++ WRITE_ONCE(node->next[i], node); ++ node->prev[i] = node; ++ } ++} ++ ++/** ++ * skiplist_empty - test whether a skip list is empty ++ * @head: the skip list to test. ++ */ ++static inline int skiplist_empty(const struct skiplist_node *head) ++{ ++ return READ_ONCE(head->next[0]) == head; ++} ++ ++/** ++ * skiplist_entry - get the struct for this entry ++ * @ptr: the &struct skiplist_node pointer. ++ * @type: the type of the struct this is embedded in. ++ * @member: the name of the skiplist_node within the struct. ++ */ ++#define skiplist_entry(ptr, type, member) \ ++ container_of(ptr, type, member) ++ ++/** ++ * DEFINE_SKIPLIST_INSERT_FUNC -- macro to define a customized skip list insert ++ * function, which takes two parameters, first one is the header node of the ++ * skip list, second one is the skip list node to be inserted ++ * @func_name: the customized skip list insert function name ++ * @search_func: the search function to be used, which takes two parameters, ++ * 1st one is the itrator of skiplist_node in the list, the 2nd is the skip list ++ * node to be inserted, the function should return true if search should be ++ * continued, otherwise return false. ++ * Returns 1 if @node is inserted as the first item of skip list at level zero, ++ * otherwise 0 ++ */ ++#define DEFINE_SKIPLIST_INSERT_FUNC(func_name, search_func)\ ++static inline int func_name(struct skiplist_node *head, struct skiplist_node *node)\ ++{\ ++ struct skiplist_node *update[NUM_SKIPLIST_LEVEL];\ ++ struct skiplist_node *p, *q;\ ++ int k = head->level;\ ++\ ++ p = head;\ ++ do {\ ++ while (q = p->next[k], q != head && search_func(q, node))\ ++ p = q;\ ++ update[k] = p;\ ++ } while (--k >= 0);\ ++\ ++ k = node->level;\ ++ if (unlikely(k > head->level)) {\ ++ node->level = k = ++head->level;\ ++ update[k] = head;\ ++ }\ ++\ ++ do {\ ++ p = update[k];\ ++ q = p->next[k];\ ++ node->next[k] = q;\ ++ p->next[k] = node;\ ++ node->prev[k] = p;\ ++ q->prev[k] = node;\ ++ } while (--k >= 0);\ ++\ ++ return (p == head);\ ++} ++ ++/** ++ * skiplist_del_init -- delete skip list node from a skip list and reset it's ++ * init state ++ * @head: the header node of the skip list to be deleted from. ++ * @node: the skip list node to be deleted, the caller need to ensure @node is ++ * in skip list which @head represent. ++ * Returns 1 if @node is the first item of skip level at level zero, otherwise 0 ++ */ ++static inline int ++skiplist_del_init(struct skiplist_node *head, struct skiplist_node *node) ++{ ++ int l, m = node->level; ++ ++ for (l = 0; l <= m; l++) { ++ node->prev[l]->next[l] = node->next[l]; ++ node->next[l]->prev[l] = node->prev[l]; ++ } ++ if (m == head->level && m > 0) { ++ while (head->next[m] == head && m > 0) ++ m--; ++ head->level = m; ++ } ++ INIT_SKIPLIST_NODE(node); ++ ++ return (node->prev[0] == head); ++} ++#endif /* _LINUX_SKIP_LIST_H */ +diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h +index 3bac0a8ceab2..d6d384ddb57d 100644 +--- a/include/uapi/linux/sched.h ++++ b/include/uapi/linux/sched.h +@@ -115,7 +115,10 @@ struct clone_args { + #define SCHED_FIFO 1 + #define SCHED_RR 2 + #define SCHED_BATCH 3 +-/* SCHED_ISO: reserved but not implemented yet */ ++/* SCHED_ISO: Implemented in BFS/MuQSSPDS only */ ++#ifdef CONFIG_SCHED_PDS ++#define SCHED_ISO 4 ++#endif + #define SCHED_IDLE 5 + #define SCHED_DEADLINE 6 + +diff --git a/init/Kconfig b/init/Kconfig +index 74a5ac65644f..e4fd406b58dd 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -61,6 +61,21 @@ config THREAD_INFO_IN_TASK + + menu "General setup" + ++config SCHED_PDS ++ bool "PDS-mq cpu scheduler" ++ help ++ The Priority and Deadline based Skip list multiple queue CPU ++ Scheduler for excellent interactivity and responsiveness on the ++ desktop and solid scalability on normal hardware and commodity ++ servers. ++ ++ Currently incompatible with the Group CPU scheduler, and RCU TORTURE ++ TEST so these options are disabled. ++ ++ Say Y here. ++ default y ++ ++ + config BROKEN + bool + +@@ -777,6 +792,7 @@ config NUMA_BALANCING + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION ++ depends on !SCHED_PDS + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when +@@ -878,7 +894,7 @@ menuconfig CGROUP_SCHED + bandwidth allocation to such task groups. It uses cgroups to group + tasks. + +-if CGROUP_SCHED ++if CGROUP_SCHED && !SCHED_PDS + config FAIR_GROUP_SCHED + bool "Group scheduling for SCHED_OTHER" + depends on CGROUP_SCHED +@@ -1007,6 +1023,7 @@ config CGROUP_DEVICE + + config CGROUP_CPUACCT + bool "Simple CPU accounting controller" ++ depends on !SCHED_PDS + help + Provides a simple controller for monitoring the + total CPU consumed by the tasks in a cgroup. +@@ -1134,6 +1151,7 @@ config CHECKPOINT_RESTORE + + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" ++ depends on !SCHED_PDS + select CGROUPS + select CGROUP_SCHED + select FAIR_GROUP_SCHED +diff --git a/init/init_task.c b/init/init_task.c +index bd403ed3e418..162d3deddd45 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -59,6 +59,126 @@ struct task_struct init_task + __init_task_data + #endif + = { ++#ifdef CONFIG_SCHED_PDS ++#ifdef CONFIG_THREAD_INFO_IN_TASK ++ .thread_info = INIT_THREAD_INFO(init_task), ++ .stack_refcount = ATOMIC_INIT(1), ++#endif ++ .state = 0, ++ .stack = init_stack, ++ .usage = ATOMIC_INIT(2), ++ .flags = PF_KTHREAD, ++ .prio = NORMAL_PRIO, ++ .static_prio = MAX_PRIO - 20, ++ .normal_prio = NORMAL_PRIO, ++ .deadline = 0, /* PDS only */ ++ .policy = SCHED_NORMAL, ++ .cpus_ptr = &init_task.cpus_mask, ++ .cpus_mask = CPU_MASK_ALL, ++ .nr_cpus_allowed= NR_CPUS, ++ .mm = NULL, ++ .active_mm = &init_mm, ++ .restart_block = { ++ .fn = do_no_restart_syscall, ++ }, ++ .sl_level = 0, /* PDS only */ ++ .sl_node = SKIPLIST_NODE_INIT(init_task.sl_node), /* PDS only */ ++ .time_slice = HZ, /* PDS only */ ++ .tasks = LIST_HEAD_INIT(init_task.tasks), ++#ifdef CONFIG_SMP ++ .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO), ++#endif ++#ifdef CONFIG_CGROUP_SCHED ++ .sched_task_group = &root_task_group, ++#endif ++ .ptraced = LIST_HEAD_INIT(init_task.ptraced), ++ .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry), ++ .real_parent = &init_task, ++ .parent = &init_task, ++ .children = LIST_HEAD_INIT(init_task.children), ++ .sibling = LIST_HEAD_INIT(init_task.sibling), ++ .group_leader = &init_task, ++ RCU_POINTER_INITIALIZER(real_cred, &init_cred), ++ RCU_POINTER_INITIALIZER(cred, &init_cred), ++ .comm = INIT_TASK_COMM, ++ .thread = INIT_THREAD, ++ .fs = &init_fs, ++ .files = &init_files, ++ .signal = &init_signals, ++ .sighand = &init_sighand, ++ .nsproxy = &init_nsproxy, ++ .pending = { ++ .list = LIST_HEAD_INIT(init_task.pending.list), ++ .signal = {{0}} ++ }, ++ .blocked = {{0}}, ++ .alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock), ++ .journal_info = NULL, ++ INIT_CPU_TIMERS(init_task) ++ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), ++ .timer_slack_ns = 50000, /* 50 usec default slack */ ++ .thread_pid = &init_struct_pid, ++ .thread_group = LIST_HEAD_INIT(init_task.thread_group), ++ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), ++#ifdef CONFIG_AUDITSYSCALL ++ .loginuid = INVALID_UID, ++ .sessionid = AUDIT_SID_UNSET, ++#endif ++#ifdef CONFIG_PERF_EVENTS ++ .perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex), ++ .perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list), ++#endif ++#ifdef CONFIG_PREEMPT_RCU ++ .rcu_read_lock_nesting = 0, ++ .rcu_read_unlock_special.s = 0, ++ .rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry), ++ .rcu_blocked_node = NULL, ++#endif ++#ifdef CONFIG_TASKS_RCU ++ .rcu_tasks_holdout = false, ++ .rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list), ++ .rcu_tasks_idle_cpu = -1, ++#endif ++#ifdef CONFIG_CPUSETS ++ .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq), ++#endif ++#ifdef CONFIG_RT_MUTEXES ++ .pi_waiters = RB_ROOT_CACHED, ++ .pi_top_task = NULL, ++#endif ++ INIT_PREV_CPUTIME(init_task) ++#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN ++ .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount), ++ .vtime.starttime = 0, ++ .vtime.state = VTIME_SYS, ++#endif ++#ifdef CONFIG_NUMA_BALANCING ++ .numa_preferred_nid = -1, ++ .numa_group = NULL, ++ .numa_faults = NULL, ++#endif ++#ifdef CONFIG_KASAN ++ .kasan_depth = 1, ++#endif ++#ifdef CONFIG_TRACE_IRQFLAGS ++ .softirqs_enabled = 1, ++#endif ++#ifdef CONFIG_LOCKDEP ++ .lockdep_recursion = 0, ++#endif ++#ifdef CONFIG_FUNCTION_GRAPH_TRACER ++ .ret_stack = NULL, ++#endif ++#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT) ++ .trace_recursion = 0, ++#endif ++#ifdef CONFIG_LIVEPATCH ++ .patch_state = KLP_UNDEFINED, ++#endif ++#ifdef CONFIG_SECURITY ++ .security = NULL, ++#endif ++#else /* CONFIG_SCHED_PDS */ + #ifdef CONFIG_THREAD_INFO_IN_TASK + .thread_info = INIT_THREAD_INFO(init_task), + .stack_refcount = REFCOUNT_INIT(1), +@@ -182,6 +302,7 @@ struct task_struct init_task + #ifdef CONFIG_SECURITY + .security = NULL, + #endif ++#endif /* CONFIG_SCHED_PDS */ + }; + EXPORT_SYMBOL(init_task); + +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index 729d3a5c772e..10a7c52b90d5 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) + return ret; + } + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_PDS) + /* + * Helper routine for generate_sched_domains(). + * Do cpusets a, b have overlapping effective cpus_allowed masks? +@@ -1009,7 +1009,7 @@ static void rebuild_sched_domains_locked(void) + /* Have scheduler rebuild the domains */ + partition_and_rebuild_sched_domains(ndoms, doms, attr); + } +-#else /* !CONFIG_SMP */ ++#else /* !CONFIG_SMP || CONFIG_SCHED_PDS */ + static void rebuild_sched_domains_locked(void) + { + } +diff --git a/kernel/delayacct.c b/kernel/delayacct.c +index 27725754ac99..769d773c7182 100644 +--- a/kernel/delayacct.c ++++ b/kernel/delayacct.c +@@ -106,7 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) + */ + t1 = tsk->sched_info.pcount; + t2 = tsk->sched_info.run_delay; +- t3 = tsk->se.sum_exec_runtime; ++ t3 = tsk_seruntime(tsk); + + d->cpu_count += t1; + +diff --git a/kernel/exit.c b/kernel/exit.c +index ce2a75bc0ade..f0f864bc1ab9 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk) + sig->curr_target = next_thread(tsk); + } + +- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, ++ add_device_randomness((const void*) &tsk_seruntime(tsk), + sizeof(unsigned long long)); + + /* +@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk) + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); + task_io_accounting_add(&sig->ioac, &tsk->ioac); +- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; ++ sig->sum_sched_runtime += tsk_seruntime(tsk); + sig->nr_threads--; + __unhash_process(tsk, group_dead); + write_sequnlock(&sig->stats_lock); +diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c +index f6310f848f34..b5de980c7d4e 100644 +--- a/kernel/livepatch/transition.c ++++ b/kernel/livepatch/transition.c +@@ -306,7 +306,11 @@ static bool klp_try_switch_task(struct task_struct *task) + */ + rq = task_rq_lock(task, &flags); + ++#ifdef CONFIG_SCHED_PDS ++ if (task_running(task) && task != current) { ++#else + if (task_running(rq, task) && task != current) { ++#endif + snprintf(err_buf, STACK_ERR_BUF_SIZE, + "%s: %s:%d is running\n", __func__, task->comm, + task->pid); +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index c9f090d64f00..063d15a1ab8b 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -229,7 +229,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + * Only use with rt_mutex_waiter_{less,equal}() + */ + #define task_to_waiter(p) \ +- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } ++ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = __tsk_deadline(p) } + + static inline int + rt_mutex_waiter_less(struct rt_mutex_waiter *left, +@@ -680,7 +680,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, + * the values of the node being removed. + */ + waiter->prio = task->prio; +- waiter->deadline = task->dl.deadline; ++ waiter->deadline = __tsk_deadline(task); + + rt_mutex_enqueue(lock, waiter); + +@@ -953,7 +953,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + waiter->task = task; + waiter->lock = lock; + waiter->prio = task->prio; +- waiter->deadline = task->dl.deadline; ++ waiter->deadline = __tsk_deadline(task); + + /* Get the top priority waiter on the lock */ + if (rt_mutex_has_waiters(lock)) +diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile +index 21fb5a5662b5..8ebe4e33fb5f 100644 +--- a/kernel/sched/Makefile ++++ b/kernel/sched/Makefile +@@ -16,15 +16,21 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) + CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer + endif + +-obj-y += core.o loadavg.o clock.o cputime.o +-obj-y += idle.o fair.o rt.o deadline.o +-obj-y += wait.o wait_bit.o swait.o completion.o +- +-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o ++ifdef CONFIG_SCHED_PDS ++obj-y += pds.o ++else ++obj-y += core.o ++obj-y += fair.o rt.o deadline.o ++obj-$(CONFIG_SMP) += cpudeadline.o topology.o stop_task.o + obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o +-obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o + obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o ++endif ++obj-y += loadavg.o clock.o cputime.o ++obj-y += idle.o ++obj-y += wait.o wait_bit.o swait.o completion.o ++obj-$(CONFIG_SMP) += cpupri.o pelt.o ++obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_CPU_FREQ) += cpufreq.o + obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o + obj-$(CONFIG_MEMBARRIER) += membarrier.o +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +index 7fbaee24c824..28377ad56248 100644 +--- a/kernel/sched/cpufreq_schedutil.c ++++ b/kernel/sched/cpufreq_schedutil.c +@@ -183,6 +183,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, + return cpufreq_driver_resolve_freq(policy, freq); + } + ++#ifndef CONFIG_SCHED_PDS + /* + * This function computes an effective utilization for the given CPU, to be + * used for frequency selection given the linear relation: f = u * f_max. +@@ -300,6 +301,13 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) + + return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL); + } ++#else /* CONFIG_SCHED_PDS */ ++static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) ++{ ++ sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu); ++ return sg_cpu->max; ++} ++#endif + + /** + * sugov_iowait_reset() - Reset the IO boost status of a CPU. +@@ -443,7 +451,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } + */ + static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) + { ++#ifndef CONFIG_SCHED_PDS + if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) ++#endif + sg_policy->limits_changed = true; + } + +@@ -686,6 +696,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) + } + + ret = sched_setattr_nocheck(thread, &attr); ++ + if (ret) { + kthread_stop(thread); + pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); +@@ -916,6 +927,7 @@ static int __init sugov_register(void) + core_initcall(sugov_register); + + #ifdef CONFIG_ENERGY_MODEL ++#ifndef CONFIG_SCHED_PDS + extern bool sched_energy_update; + extern struct mutex sched_energy_mutex; + +@@ -946,4 +958,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy, + } + + } ++#else /* CONFIG_SCHED_PDS */ ++void sched_cpufreq_governor_change(struct cpufreq_policy *policy, ++ struct cpufreq_governor *old_gov) ++{ ++} ++#endif + #endif +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index ff9435dee1df..1377ea3d1b76 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -122,7 +122,12 @@ void account_user_time(struct task_struct *p, u64 cputime) + p->utime += cputime; + account_group_user_time(p, cputime); + ++#ifdef CONFIG_SCHED_PDS ++ index = (task_nice(p) > 0 || task_running_idle(p)) ? CPUTIME_NICE : ++ CPUTIME_USER; ++#else + index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; ++#endif + + /* Add user time to cpustat. */ + task_group_account_field(p, index, cputime); +@@ -146,7 +151,11 @@ void account_guest_time(struct task_struct *p, u64 cputime) + p->gtime += cputime; + + /* Add guest time to cpustat. */ ++#ifdef CONFIG_SCHED_PDS ++ if (task_nice(p) > 0 || task_running_idle(p)) { ++#else + if (task_nice(p) > 0) { ++#endif + cpustat[CPUTIME_NICE] += cputime; + cpustat[CPUTIME_GUEST_NICE] += cputime; + } else { +@@ -269,7 +278,7 @@ static inline u64 account_other_time(u64 max) + #ifdef CONFIG_64BIT + static inline u64 read_sum_exec_runtime(struct task_struct *t) + { +- return t->se.sum_exec_runtime; ++ return tsk_seruntime(t); + } + #else + static u64 read_sum_exec_runtime(struct task_struct *t) +@@ -279,7 +288,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) + struct rq *rq; + + rq = task_rq_lock(t, &rf); +- ns = t->se.sum_exec_runtime; ++ ns = tsk_seruntime(t); + task_rq_unlock(rq, t, &rf); + + return ns; +@@ -658,7 +667,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, + void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) + { + struct task_cputime cputime = { +- .sum_exec_runtime = p->se.sum_exec_runtime, ++ .sum_exec_runtime = tsk_seruntime(p), + }; + + task_cputime(p, &cputime.utime, &cputime.stime); +diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c +index b743bf38f08f..16e5754af1cf 100644 +--- a/kernel/sched/idle.c ++++ b/kernel/sched/idle.c +@@ -361,6 +361,7 @@ void cpu_startup_entry(enum cpuhp_state state) + do_idle(); + } + ++#ifndef CONFIG_SCHED_PDS + /* + * idle-task scheduling class. + */ +@@ -481,3 +482,4 @@ const struct sched_class idle_sched_class = { + .switched_to = switched_to_idle, + .update_curr = update_curr_idle, + }; ++#endif +diff --git a/kernel/sched/pds.c b/kernel/sched/pds.c +new file mode 100644 +index 000000000000..02d7d5a67c77 +--- /dev/null ++++ b/kernel/sched/pds.c +@@ -0,0 +1,6619 @@ ++/* ++ * kernel/sched/pds.c, was kernel/sched.c ++ * ++ * PDS-mq Core kernel scheduler code and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes ++ * a whole lot of those previous things. ++ * 2017-09-06 Priority and Deadline based Skip list multiple queue kernel ++ * scheduler by Alfred Chen. ++ */ ++#include "pds_sched.h" ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++ ++#include "../workqueue_internal.h" ++#include "../../fs/io-wq.h" ++#include "../smpboot.h" ++ ++#include "pelt.h" ++#include "smp.h" ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++ ++#define rt_prio(prio) ((prio) < MAX_RT_PRIO) ++#define rt_task(p) rt_prio((p)->prio) ++#define rt_policy(policy) ((policy) == SCHED_FIFO || \ ++ (policy) == SCHED_RR || \ ++ (policy) == SCHED_ISO) ++#define task_has_rt_policy(p) (rt_policy((p)->policy)) ++ ++#define idle_policy(policy) ((policy) == SCHED_IDLE) ++#define idleprio_task(p) unlikely(idle_policy((p)->policy)) ++ ++#define STOP_PRIO (MAX_RT_PRIO - 1) ++ ++/* ++ * Some helpers for converting to/from various scales. Use shifts to get ++ * approximate multiples of ten for less overhead. ++ */ ++#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) ++#define JIFFY_NS (1000000000 / HZ) ++#define HALF_JIFFY_NS (1000000000 / HZ / 2) ++#define HALF_JIFFY_US (1000000 / HZ / 2) ++#define MS_TO_NS(TIME) ((TIME) << 20) ++#define MS_TO_US(TIME) ((TIME) << 10) ++#define NS_TO_MS(TIME) ((TIME) >> 20) ++#define NS_TO_US(TIME) ((TIME) >> 10) ++#define US_TO_NS(TIME) ((TIME) << 10) ++ ++#define RESCHED_US (100) /* Reschedule if less than this many μs left */ ++ ++enum { ++ BASE_CPU_AFFINITY_CHK_LEVEL = 1, ++#ifdef CONFIG_SCHED_SMT ++ SMT_CPU_AFFINITY_CHK_LEVEL_SPACE_HOLDER, ++#endif ++#ifdef CONFIG_SCHED_MC ++ MC_CPU_AFFINITY_CHK_LEVEL_SPACE_HOLDER, ++#endif ++ NR_CPU_AFFINITY_CHK_LEVEL ++}; ++ ++static inline void print_scheduler_version(void) ++{ ++ printk(KERN_INFO "pds: PDS-mq CPU Scheduler 0.99o by Alfred Chen and kept alive artificially by Tk-Glitch.\n"); ++} ++ ++/* ++ * This is the time all tasks within the same priority round robin. ++ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus. ++ * Tunable via /proc interface. ++ */ ++#define SCHED_DEFAULT_RR (4) ++int rr_interval __read_mostly = SCHED_DEFAULT_RR; ++ ++static int __init rr_interval_set(char *str) ++{ ++ u32 rr; ++ ++ pr_info("rr_interval: "); ++ if (kstrtouint(str, 0, &rr)) { ++ pr_cont("using default of %u, unable to parse %s\n", ++ rr_interval, str); ++ return 1; ++ } ++ ++ rr_interval = rr; ++ pr_cont("%d\n", rr_interval); ++ ++ return 1; ++} ++__setup("rr_interval=", rr_interval_set); ++ ++ ++static const u64 sched_prio2deadline[NICE_WIDTH] = { ++/* -20 */ 6291456, 6920601, 7612661, 8373927, 9211319, ++/* -15 */ 10132450, 11145695, 12260264, 13486290, 14834919, ++/* -10 */ 16318410, 17950251, 19745276, 21719803, 23891783, ++/* -5 */ 26280961, 28909057, 31799962, 34979958, 38477953, ++/* 0 */ 42325748, 46558322, 51214154, 56335569, 61969125, ++/* 5 */ 68166037, 74982640, 82480904, 90728994, 99801893, ++/* 10 */ 109782082, 120760290, 132836319, 146119950, 160731945, ++/* 15 */ 176805139, 194485652, 213934217, 235327638, 258860401 ++}; ++ ++/** ++ * sched_yield_type - Choose what sort of yield sched_yield will perform. ++ * 0: No yield. ++ * 1: Yield only to better priority/deadline tasks. (default) ++ * 2: Expire timeslice and recalculate deadline. ++ */ ++int sched_yield_type __read_mostly = 1; ++ ++/* ++ * The quota handed out to tasks of all priority levels when refilling their ++ * time_slice. ++ */ ++static inline int timeslice(void) ++{ ++ return MS_TO_US(rr_interval); ++} ++ ++#ifdef CONFIG_SMP ++enum { ++SCHED_RQ_EMPTY = 0, ++SCHED_RQ_IDLE, ++SCHED_RQ_NORMAL_0, ++SCHED_RQ_NORMAL_1, ++SCHED_RQ_NORMAL_2, ++SCHED_RQ_NORMAL_3, ++SCHED_RQ_NORMAL_4, ++SCHED_RQ_NORMAL_5, ++SCHED_RQ_NORMAL_6, ++SCHED_RQ_NORMAL_7, ++SCHED_RQ_ISO, ++SCHED_RQ_RT, ++NR_SCHED_RQ_QUEUED_LEVEL ++}; ++ ++static cpumask_t sched_rq_queued_masks[NR_SCHED_RQ_QUEUED_LEVEL] ++____cacheline_aligned_in_smp; ++ ++static DECLARE_BITMAP(sched_rq_queued_masks_bitmap, NR_SCHED_RQ_QUEUED_LEVEL) ++____cacheline_aligned_in_smp; ++ ++static cpumask_t sched_rq_pending_masks[NR_SCHED_RQ_QUEUED_LEVEL] ++____cacheline_aligned_in_smp; ++ ++static DECLARE_BITMAP(sched_rq_pending_masks_bitmap, NR_SCHED_RQ_QUEUED_LEVEL) ++____cacheline_aligned_in_smp; ++ ++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_CHK_LEVEL], sched_cpu_affinity_chk_masks); ++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_start_mask); ++DEFINE_PER_CPU(cpumask_t *, sched_cpu_affinity_chk_end_masks); ++ ++#ifdef CONFIG_SCHED_SMT ++DEFINE_PER_CPU(int, sched_sibling_cpu); ++DEFINE_STATIC_KEY_FALSE(sched_smt_present); ++EXPORT_SYMBOL_GPL(sched_smt_present); ++ ++static cpumask_t sched_cpu_sg_idle_mask ____cacheline_aligned_in_smp; ++ ++#ifdef CONFIG_SMT_NICE ++/* ++ * Preemptible sibling group mask ++ * Which all sibling cpus are running at PRIO_LIMIT or IDLE_PRIO ++ */ ++static cpumask_t sched_cpu_psg_mask ____cacheline_aligned_in_smp; ++/* ++ * SMT supressed mask ++ * When a cpu is running task with NORMAL/ISO/RT policy, its sibling cpu ++ * will be supressed to run IDLE priority task. ++ */ ++static cpumask_t sched_smt_supressed_mask ____cacheline_aligned_in_smp; ++#endif /* CONFIG_SMT_NICE */ ++#endif ++ ++static int sched_rq_prio[NR_CPUS] ____cacheline_aligned; ++ ++/* ++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of ++ * the domain), this allows us to quickly tell if two cpus are in the same cache ++ * domain, see cpus_share_cache(). ++ */ ++DEFINE_PER_CPU(int, sd_llc_id); ++ ++int __weak arch_sd_sibling_asym_packing(void) ++{ ++ return 0*SD_ASYM_PACKING; ++} ++#else ++struct rq *uprq; ++#endif /* CONFIG_SMP */ ++ ++static DEFINE_MUTEX(sched_hotcpu_mutex); ++ ++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++/* ++ * Context: p->pi_lock ++ */ ++static inline struct rq ++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock) ++{ ++ struct rq *rq; ++ for (;;) { ++ rq = task_rq(p); ++ if (p->on_cpu || task_on_rq_queued(p)) { ++ raw_spin_lock(&rq->lock); ++ if (likely((p->on_cpu || task_on_rq_queued(p)) ++ && rq == task_rq(p))) { ++ *plock = &rq->lock; ++ return rq; ++ } ++ raw_spin_unlock(&rq->lock); ++ } else if (task_on_rq_migrating(p)) { ++ do { ++ cpu_relax(); ++ } while (unlikely(task_on_rq_migrating(p))); ++ } else { ++ *plock = NULL; ++ return rq; ++ } ++ } ++} ++ ++static inline void ++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock) ++{ ++ if (NULL != lock) ++ raw_spin_unlock(lock); ++} ++ ++static inline struct rq ++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, ++ unsigned long *flags) ++{ ++ struct rq *rq; ++ for (;;) { ++ rq = task_rq(p); ++ if (p->on_cpu || task_on_rq_queued(p)) { ++ raw_spin_lock_irqsave(&rq->lock, *flags); ++ if (likely((p->on_cpu || task_on_rq_queued(p)) ++ && rq == task_rq(p))) { ++ *plock = &rq->lock; ++ return rq; ++ } ++ raw_spin_unlock_irqrestore(&rq->lock, *flags); ++ } else if (task_on_rq_migrating(p)) { ++ do { ++ cpu_relax(); ++ } while (unlikely(task_on_rq_migrating(p))); ++ } else { ++ raw_spin_lock_irqsave(&p->pi_lock, *flags); ++ if (likely(!p->on_cpu && !p->on_rq && ++ rq == task_rq(p))) { ++ *plock = &p->pi_lock; ++ return rq; ++ } ++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags); ++ } ++ } ++} ++ ++static inline void ++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, ++ unsigned long *flags) ++{ ++ raw_spin_unlock_irqrestore(lock, *flags); ++} ++ ++/* ++ * __task_rq_lock - lock the rq @p resides on. ++ */ ++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ lockdep_assert_held(&p->pi_lock); ++ ++ for (;;) { ++ rq = task_rq(p); ++ raw_spin_lock(&rq->lock); ++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) ++ return rq; ++ raw_spin_unlock(&rq->lock); ++ ++ while (unlikely(task_on_rq_migrating(p))) ++ cpu_relax(); ++ } ++} ++ ++/* ++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. ++ */ ++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ for (;;) { ++ raw_spin_lock_irqsave(&p->pi_lock, rf->flags); ++ rq = task_rq(p); ++ raw_spin_lock(&rq->lock); ++ /* ++ * move_queued_task() task_rq_lock() ++ * ++ * ACQUIRE (rq->lock) ++ * [S] ->on_rq = MIGRATING [L] rq = task_rq() ++ * WMB (__set_task_cpu()) ACQUIRE (rq->lock); ++ * [S] ->cpu = new_cpu [L] task_rq() ++ * [L] ->on_rq ++ * RELEASE (rq->lock) ++ * ++ * If we observe the old CPU in task_rq_lock(), the acquire of ++ * the old rq->lock will fully serialize against the stores. ++ * ++ * If we observe the new CPU in task_rq_lock(), the address ++ * dependency headed by '[L] rq = task_rq()' and the acquire ++ * will pair with the WMB to ensure we then also see migrating. ++ */ ++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { ++ return rq; ++ } ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++ ++ while (unlikely(task_on_rq_migrating(p))) ++ cpu_relax(); ++ } ++} ++ ++/* ++ * RQ-clock updating methods: ++ */ ++ ++static void update_rq_clock_task(struct rq *rq, s64 delta) ++{ ++/* ++ * In theory, the compile should just see 0 here, and optimize out the call ++ * to sched_rt_avg_update. But I don't trust it... ++ */ ++ s64 __maybe_unused steal = 0, irq_delta = 0; ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; ++ ++ /* ++ * Since irq_time is only updated on {soft,}irq_exit, we might run into ++ * this case when a previous update_rq_clock() happened inside a ++ * {soft,}irq region. ++ * ++ * When this happens, we stop ->clock_task and only update the ++ * prev_irq_time stamp to account for the part that fit, so that a next ++ * update will consume the rest. This ensures ->clock_task is ++ * monotonic. ++ * ++ * It does however cause some slight miss-attribution of {soft,}irq ++ * time, a more accurate solution would be to update the irq_time using ++ * the current rq->clock timestamp, except that would require using ++ * atomic ops. ++ */ ++ if (irq_delta > delta) ++ irq_delta = delta; ++ ++ rq->prev_irq_time += irq_delta; ++ delta -= irq_delta; ++#endif ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ if (static_key_false((¶virt_steal_rq_enabled))) { ++ steal = paravirt_steal_clock(cpu_of(rq)); ++ steal -= rq->prev_steal_time_rq; ++ ++ if (unlikely(steal > delta)) ++ steal = delta; ++ ++ rq->prev_steal_time_rq += steal; ++ ++ delta -= steal; ++ } ++#endif ++ ++ rq->clock_task += delta; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ if ((irq_delta + steal)) ++ update_irq_load_avg(rq, irq_delta + steal); ++#endif ++} ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; ++ ++ if (unlikely(delta <= 0)) ++ return; ++ rq->clock += delta; ++ update_rq_clock_task(rq, delta); ++} ++ ++static inline void update_task_priodl(struct task_struct *p) ++{ ++ p->priodl = (((u64) (p->prio))<<56) | ((p->deadline)>>8); ++} ++ ++/* ++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline ++ * is the key to everything. It distributes CPU fairly amongst tasks of the ++ * same nice value, it proportions CPU according to nice level, it means the ++ * task that last woke up the longest ago has the earliest deadline, thus ++ * ensuring that interactive tasks get low latency on wake up. The CPU ++ * proportion works out to the square of the virtual deadline difference, so ++ * this equation will give nice 19 3% CPU compared to nice 0. ++ */ ++static inline u64 task_deadline_diff(const struct task_struct *p) ++{ ++ return sched_prio2deadline[TASK_USER_PRIO(p)]; ++} ++ ++static inline u64 static_deadline_diff(int static_prio) ++{ ++ return sched_prio2deadline[USER_PRIO(static_prio)]; ++} ++ ++/* ++ * The time_slice is only refilled when it is empty and that is when we set a ++ * new deadline for non-rt tasks. ++ */ ++static inline void time_slice_expired(struct task_struct *p, struct rq *rq) ++{ ++ p->time_slice = timeslice(); ++ if (p->prio >= NORMAL_PRIO) ++ p->deadline = rq->clock + task_deadline_diff(p); ++ ++ update_task_priodl(p); ++} ++ ++static inline struct task_struct *rq_first_queued_task(struct rq *rq) ++{ ++ struct skiplist_node *node = rq->sl_header.next[0]; ++ ++ if (node == &rq->sl_header) ++ return rq->idle; ++ ++ return skiplist_entry(node, struct task_struct, sl_node); ++} ++ ++static inline struct task_struct *rq_second_queued_task(struct rq *rq) ++{ ++ struct skiplist_node *node = rq->sl_header.next[0]->next[0]; ++ ++ if (node == &rq->sl_header) ++ return rq->idle; ++ ++ return skiplist_entry(node, struct task_struct, sl_node); ++} ++ ++static inline int is_second_in_rq(struct task_struct *p, struct rq *rq) ++{ ++ return (p->sl_node.prev[0]->prev[0] == &rq->sl_header); ++} ++ ++static const int task_dl_hash_tbl[] = { ++/* 0 4 8 12 */ ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, ++/* 16 20 24 28 */ ++ 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6, 7 ++}; ++ ++static inline int ++task_deadline_level(const struct task_struct *p, const struct rq *rq) ++{ ++ u64 delta = (rq->clock + sched_prio2deadline[39] - p->deadline) >> 23; ++ ++ delta = min((size_t)delta, ARRAY_SIZE(task_dl_hash_tbl) - 1); ++ return task_dl_hash_tbl[delta]; ++} ++ ++/* ++ * cmpxchg based fetch_or, macro so it works for different integer types ++ */ ++#define fetch_or(ptr, mask) \ ++ ({ \ ++ typeof(ptr) _ptr = (ptr); \ ++ typeof(mask) _mask = (mask); \ ++ typeof(*_ptr) _old, _val = *_ptr; \ ++ \ ++ for (;;) { \ ++ _old = cmpxchg(_ptr, _val, _val | _mask); \ ++ if (_old == _val) \ ++ break; \ ++ _val = _old; \ ++ } \ ++ _old; \ ++}) ++ ++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) ++/* ++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, ++ * this avoids any races wrt polling state changes and thereby avoids ++ * spurious IPIs. ++ */ ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); ++} ++ ++/* ++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. ++ * ++ * If this returns true, then the idle task promises to call ++ * flush_smp_call_function_from_idle() and reschedule soon. ++ */ ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ typeof(ti->flags) old, val = READ_ONCE(ti->flags); ++ ++ for (;;) { ++ if (!(val & _TIF_POLLING_NRFLAG)) ++ return false; ++ if (val & _TIF_NEED_RESCHED) ++ return true; ++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); ++ if (old == val) ++ break; ++ val = old; ++ } ++ return true; ++} ++ ++#else ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ return false; ++} ++#endif ++#endif ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_SMT_NICE ++static void resched_cpu_if_curr_is(int cpu, int priority) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ rcu_read_lock(); ++ ++ if (rcu_dereference(rq->curr)->prio != priority) ++ goto out; ++ ++ if (set_nr_if_polling(rq->idle)) { ++ trace_sched_wake_idle_without_ipi(cpu); ++ } else { ++ if (!do_raw_spin_trylock(&rq->lock)) ++ goto out; ++ spin_acquire(&rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_); ++ ++ if (priority == rq->curr->prio) ++ smp_send_reschedule(cpu); ++ /* Else CPU is not idle, do nothing here */ ++ ++ spin_release(&rq->lock.dep_map, _RET_IP_); ++ do_raw_spin_unlock(&rq->lock); ++ } ++ ++out: ++ rcu_read_unlock(); ++} ++#endif /* CONFIG_SMT_NICE */ ++ ++static inline bool ++__update_cpumasks_bitmap(int cpu, unsigned long *plevel, unsigned long level, ++ cpumask_t cpumasks[], unsigned long bitmap[]) ++{ ++ if (*plevel == level) ++ return false; ++ ++ cpumask_clear_cpu(cpu, cpumasks + *plevel); ++ if (cpumask_empty(cpumasks + *plevel)) ++ clear_bit(*plevel, bitmap); ++ cpumask_set_cpu(cpu, cpumasks + level); ++ set_bit(level, bitmap); ++ ++ *plevel = level; ++ ++ return true; ++} ++ ++static inline int ++task_running_policy_level(const struct task_struct *p, const struct rq *rq) ++{ ++ int prio = p->prio; ++ ++ if (NORMAL_PRIO == prio) ++ return SCHED_RQ_NORMAL_0 + task_deadline_level(p, rq); ++ ++ if (ISO_PRIO == prio) ++ return SCHED_RQ_ISO; ++ if (prio < MAX_RT_PRIO) ++ return SCHED_RQ_RT; ++ return PRIO_LIMIT - prio; ++} ++ ++static inline void update_sched_rq_queued_masks_normal(struct rq *rq) ++{ ++ struct task_struct *p = rq_first_queued_task(rq); ++ ++ if (p->prio != NORMAL_PRIO) ++ return; ++ ++ __update_cpumasks_bitmap(cpu_of(rq), &rq->queued_level, ++ task_running_policy_level(p, rq), ++ &sched_rq_queued_masks[0], ++ &sched_rq_queued_masks_bitmap[0]); ++} ++ ++#ifdef CONFIG_SMT_NICE ++static inline void update_sched_cpu_psg_mask(const int cpu) ++{ ++ cpumask_t tmp; ++ ++ cpumask_or(&tmp, &sched_rq_queued_masks[SCHED_RQ_EMPTY], ++ &sched_rq_queued_masks[SCHED_RQ_IDLE]); ++ cpumask_and(&tmp, &tmp, cpu_smt_mask(cpu)); ++ if (cpumask_equal(&tmp, cpu_smt_mask(cpu))) ++ cpumask_or(&sched_cpu_psg_mask, &sched_cpu_psg_mask, ++ cpu_smt_mask(cpu)); ++ else ++ cpumask_andnot(&sched_cpu_psg_mask, &sched_cpu_psg_mask, ++ cpu_smt_mask(cpu)); ++} ++#endif ++ ++static inline void update_sched_rq_queued_masks(struct rq *rq) ++{ ++ int cpu = cpu_of(rq); ++ struct task_struct *p = rq_first_queued_task(rq); ++ unsigned long level; ++#ifdef CONFIG_SCHED_SMT ++ unsigned long last_level = rq->queued_level; ++#endif ++ ++ level = task_running_policy_level(p, rq); ++ sched_rq_prio[cpu] = p->prio; ++ ++ if (!__update_cpumasks_bitmap(cpu, &rq->queued_level, level, ++ &sched_rq_queued_masks[0], ++ &sched_rq_queued_masks_bitmap[0])) ++ return; ++ ++#ifdef CONFIG_SCHED_SMT ++ if (cpu == per_cpu(sched_sibling_cpu, cpu)) ++ return; ++ ++ if (SCHED_RQ_EMPTY == last_level) { ++ cpumask_andnot(&sched_cpu_sg_idle_mask, &sched_cpu_sg_idle_mask, ++ cpu_smt_mask(cpu)); ++ } else if (SCHED_RQ_EMPTY == level) { ++ cpumask_t tmp; ++ ++ cpumask_and(&tmp, cpu_smt_mask(cpu), ++ &sched_rq_queued_masks[SCHED_RQ_EMPTY]); ++ if (cpumask_equal(&tmp, cpu_smt_mask(cpu))) ++ cpumask_or(&sched_cpu_sg_idle_mask, cpu_smt_mask(cpu), ++ &sched_cpu_sg_idle_mask); ++ } ++ ++#ifdef CONFIG_SMT_NICE ++ if (level <= SCHED_RQ_IDLE && last_level > SCHED_RQ_IDLE) { ++ cpumask_clear_cpu(per_cpu(sched_sibling_cpu, cpu), ++ &sched_smt_supressed_mask); ++ update_sched_cpu_psg_mask(cpu); ++ resched_cpu_if_curr_is(per_cpu(sched_sibling_cpu, cpu), PRIO_LIMIT); ++ } else if (last_level <= SCHED_RQ_IDLE && level > SCHED_RQ_IDLE) { ++ cpumask_set_cpu(per_cpu(sched_sibling_cpu, cpu), ++ &sched_smt_supressed_mask); ++ update_sched_cpu_psg_mask(cpu); ++ resched_cpu_if_curr_is(per_cpu(sched_sibling_cpu, cpu), IDLE_PRIO); ++ } ++#endif /* CONFIG_SMT_NICE */ ++#endif ++} ++ ++static inline void update_sched_rq_pending_masks(struct rq *rq) ++{ ++ unsigned long level; ++ struct task_struct *p = rq_second_queued_task(rq); ++ ++ level = task_running_policy_level(p, rq); ++ ++ __update_cpumasks_bitmap(cpu_of(rq), &rq->pending_level, level, ++ &sched_rq_pending_masks[0], ++ &sched_rq_pending_masks_bitmap[0]); ++} ++ ++#else /* CONFIG_SMP */ ++static inline void update_sched_rq_queued_masks(struct rq *rq) {} ++static inline void update_sched_rq_queued_masks_normal(struct rq *rq) {} ++static inline void update_sched_rq_pending_masks(struct rq *rq) {} ++#endif ++ ++#ifdef CONFIG_NO_HZ_FULL ++/* ++ * Tick may be needed by tasks in the runqueue depending on their policy and ++ * requirements. If tick is needed, lets send the target an IPI to kick it out ++ * of nohz mode if necessary. ++ */ ++static inline void sched_update_tick_dependency(struct rq *rq) ++{ ++ int cpu; ++ ++ if (!tick_nohz_full_enabled()) ++ return; ++ ++ cpu = cpu_of(rq); ++ ++ if (!tick_nohz_full_cpu(cpu)) ++ return; ++ ++ if (rq->nr_running < 2) ++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); ++ else ++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_update_tick_dependency(struct rq *rq) { } ++#endif ++ ++/* ++ * Removing from the runqueue. Deleting a task from the skip list is done ++ * via the stored node reference in the task struct and does not require a full ++ * look up. Thus it occurs in O(k) time where k is the "level" of the list the ++ * task was stored at - usually < 4, max 16. ++ * ++ * Context: rq->lock ++ */ ++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ WARN_ONCE(task_rq(p) != rq, "pds: dequeue task reside on cpu%d from cpu%d\n", ++ task_cpu(p), cpu_of(rq)); ++ if (skiplist_del_init(&rq->sl_header, &p->sl_node)) { ++ update_sched_rq_queued_masks(rq); ++ update_sched_rq_pending_masks(rq); ++ } else if (is_second_in_rq(p, rq)) ++ update_sched_rq_pending_masks(rq); ++ rq->nr_running--; ++ ++ sched_update_tick_dependency(rq); ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); ++ ++ sched_info_dequeued(rq, p); ++} ++ ++/* ++ * To determine if it's safe for a task of SCHED_IDLE to actually run as ++ * an idle task, we ensure none of the following conditions are met. ++ */ ++static inline bool idleprio_suitable(struct task_struct *p) ++{ ++ return (!freezing(p) && !signal_pending(p) && ++ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING))); ++} ++ ++/* ++ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip ++ * list node which is used in PDS run queue. ++ * ++ * In current implementation, based on testing, the first 8 bits in microseconds ++ * of niffies are suitable for random level population. ++ * find_first_bit() is used to satisfy p = 0.5 between each levels, and there ++ * should be platform hardware supported instruction(known as ctz/clz) to speed ++ * up this function. ++ * The skiplist level for a task is populated when task is created and doesn't ++ * change in task's life time. When task is being inserted into run queue, this ++ * skiplist level is set to task's sl_node->level, the skiplist insert function ++ * may change it based on current level of the skip lsit. ++ */ ++static inline int pds_skiplist_random_level(const struct task_struct *p) ++{ ++ long unsigned int randseed; ++ ++ /* ++ * 1. Some architectures don't have better than microsecond resolution ++ * so mask out ~microseconds as a factor of the random seed for skiplist ++ * insertion. ++ * 2. Use address of task structure pointer as another factor of the ++ * random seed for task burst forking scenario. ++ */ ++ randseed = (task_rq(p)->clock ^ (long unsigned int)p) >> 10; ++ ++ return find_first_bit(&randseed, NUM_SKIPLIST_LEVEL - 1); ++} ++ ++/** ++ * pds_skiplist_task_search -- search function used in PDS run queue skip list ++ * node insert operation. ++ * @it: iterator pointer to the node in the skip list ++ * @node: pointer to the skiplist_node to be inserted ++ * ++ * Returns true if key of @it is less or equal to key value of @node, otherwise ++ * false. ++ */ ++static inline bool ++pds_skiplist_task_search(struct skiplist_node *it, struct skiplist_node *node) ++{ ++ return (skiplist_entry(it, struct task_struct, sl_node)->priodl <= ++ skiplist_entry(node, struct task_struct, sl_node)->priodl); ++} ++ ++/* ++ * Define the skip list insert function for PDS ++ */ ++DEFINE_SKIPLIST_INSERT_FUNC(pds_skiplist_insert, pds_skiplist_task_search); ++ ++/* ++ * Adding task to the runqueue. ++ * ++ * Context: rq->lock ++ */ ++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ WARN_ONCE(task_rq(p) != rq, "pds: enqueue task reside on cpu%d to cpu%d\n", ++ task_cpu(p), cpu_of(rq)); ++ ++ p->sl_node.level = p->sl_level; ++ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node)) { ++ update_sched_rq_queued_masks(rq); ++ update_sched_rq_pending_masks(rq); ++ } else if (is_second_in_rq(p, rq)) ++ update_sched_rq_pending_masks(rq); ++ rq->nr_running++; ++ ++ sched_update_tick_dependency(rq); ++ ++ sched_info_queued(rq, p); ++ psi_enqueue(p, flags); ++ ++ /* ++ * If in_iowait is set, the code below may not trigger any cpufreq ++ * utilization updates, so do it here explicitly with the IOWAIT flag ++ * passed. ++ */ ++ if (p->in_iowait) ++ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT); ++} ++ ++static inline void requeue_task(struct task_struct *p, struct rq *rq) ++{ ++ bool b_first, b_second; ++ ++ lockdep_assert_held(&rq->lock); ++ ++ WARN_ONCE(task_rq(p) != rq, "pds: cpu[%d] requeue task reside on cpu%d\n", ++ cpu_of(rq), task_cpu(p)); ++ ++ b_first = skiplist_del_init(&rq->sl_header, &p->sl_node); ++ b_second = is_second_in_rq(p, rq); ++ ++ p->sl_node.level = p->sl_level; ++ if (pds_skiplist_insert(&rq->sl_header, &p->sl_node) || b_first) { ++ update_sched_rq_queued_masks(rq); ++ update_sched_rq_pending_masks(rq); ++ } else if (is_second_in_rq(p, rq) || b_second) ++ update_sched_rq_pending_masks(rq); ++} ++ ++/* ++ * resched_curr - mark rq's current task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++void resched_curr(struct rq *rq) ++{ ++ struct task_struct *curr = rq->curr; ++ int cpu; ++ ++ lockdep_assert_held(&rq->lock); ++ ++ if (test_tsk_need_resched(curr)) ++ return; ++ ++ cpu = cpu_of(rq); ++ if (cpu == smp_processor_id()) { ++ set_tsk_need_resched(curr); ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ if (set_nr_and_not_polling(curr)) ++ smp_send_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) ++{ ++ struct task_struct *curr = rq->curr; ++ ++ if (curr->prio == PRIO_LIMIT) ++ resched_curr(rq); ++ ++ if (task_running_idle(p)) ++ return; ++ ++ if (p->priodl < curr->priodl) ++ resched_curr(rq); ++} ++ ++#ifdef CONFIG_SCHED_HRTICK ++/* ++ * Use HR-timers to deliver accurate preemption points. ++ */ ++ ++static void hrtick_clear(struct rq *rq) ++{ ++ if (hrtimer_active(&rq->hrtick_timer)) ++ hrtimer_cancel(&rq->hrtick_timer); ++} ++ ++/* ++ * High-resolution timer tick. ++ * Runs from hardirq context with interrupts disabled. ++ */ ++static enum hrtimer_restart hrtick(struct hrtimer *timer) ++{ ++ struct rq *rq = container_of(timer, struct rq, hrtick_timer); ++ struct task_struct *p; ++ ++ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); ++ ++ raw_spin_lock(&rq->lock); ++ p = rq->curr; ++ p->time_slice = 0; ++ resched_curr(rq); ++ raw_spin_unlock(&rq->lock); ++ ++ return HRTIMER_NORESTART; ++} ++ ++/* ++ * Use hrtick when: ++ * - enabled by features ++ * - hrtimer is actually high res ++ */ ++static inline int hrtick_enabled(struct rq *rq) ++{ ++ /** ++ * PDS doesn't support sched_feat yet ++ if (!sched_feat(HRTICK)) ++ return 0; ++ */ ++ if (!cpu_active(cpu_of(rq))) ++ return 0; ++ return hrtimer_is_hres_active(&rq->hrtick_timer); ++} ++ ++#ifdef CONFIG_SMP ++ ++static void __hrtick_restart(struct rq *rq) ++{ ++ struct hrtimer *timer = &rq->hrtick_timer; ++ ++ hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD); ++} ++ ++/* ++ * called from hardirq (IPI) context ++ */ ++static void __hrtick_start(void *arg) ++{ ++ struct rq *rq = arg; ++ ++ raw_spin_lock(&rq->lock); ++ __hrtick_restart(rq); ++ raw_spin_unlock(&rq->lock); ++} ++ ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++void hrtick_start(struct rq *rq, u64 delay) ++{ ++ struct hrtimer *timer = &rq->hrtick_timer; ++ ktime_t time; ++ s64 delta; ++ ++ /* ++ * Don't schedule slices shorter than 10000ns, that just ++ * doesn't make sense and can cause timer DoS. ++ */ ++ delta = max_t(s64, delay, 10000LL); ++ time = ktime_add_ns(timer->base->get_time(), delta); ++ ++ hrtimer_set_expires(timer, time); ++ ++ if (rq == this_rq()) ++ __hrtick_restart(rq); ++ else ++ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); ++} ++ ++#else ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++void hrtick_start(struct rq *rq, u64 delay) ++{ ++ /* ++ * Don't schedule slices shorter than 10000ns, that just ++ * doesn't make sense. Rely on vruntime for fairness. ++ */ ++ delay = max_t(u64, delay, 10000LL); ++ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), ++ HRTIMER_MODE_REL_PINNED_HARD); ++} ++#endif /* CONFIG_SMP */ ++ ++static void hrtick_rq_init(struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ rq->hrtick_csd.flags = 0; ++ rq->hrtick_csd.func = __hrtick_start; ++ rq->hrtick_csd.info = rq; ++#endif ++ ++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); ++ rq->hrtick_timer.function = hrtick; ++} ++ ++static inline int rq_dither(struct rq *rq) ++{ ++ if ((rq->clock - rq->last_tick > HALF_JIFFY_NS) || hrtick_enabled(rq)) ++ return 0; ++ ++ return HALF_JIFFY_NS; ++} ++ ++#else /* CONFIG_SCHED_HRTICK */ ++static inline int hrtick_enabled(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline void hrtick_clear(struct rq *rq) ++{ ++} ++ ++static inline void hrtick_rq_init(struct rq *rq) ++{ ++} ++ ++static inline int rq_dither(struct rq *rq) ++{ ++ return (rq->clock - rq->last_tick > HALF_JIFFY_NS)? 0:HALF_JIFFY_NS; ++} ++#endif /* CONFIG_SCHED_HRTICK */ ++ ++static inline int normal_prio(struct task_struct *p) ++{ ++ static const int policy_to_prio[] = { ++ NORMAL_PRIO, /* SCHED_NORMAL */ ++ 0, /* SCHED_FIFO */ ++ 0, /* SCHED_RR */ ++ IDLE_PRIO, /* SCHED_BATCH */ ++ ISO_PRIO, /* SCHED_ISO */ ++ IDLE_PRIO /* SCHED_IDLE */ ++ }; ++ ++ if (task_has_rt_policy(p)) ++ return MAX_RT_PRIO - 1 - p->rt_priority; ++ return policy_to_prio[p->policy]; ++} ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks as it will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. ++ * ++ * Context: rq->lock ++ */ ++static void activate_task(struct task_struct *p, struct rq *rq) ++{ ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible--; ++ enqueue_task(p, rq, ENQUEUE_WAKEUP); ++ p->on_rq = 1; ++ cpufreq_update_this_cpu(rq, 0); ++} ++ ++/* ++ * deactivate_task - remove a task from the runqueue. ++ * ++ * Context: rq->lock ++ */ ++static inline void deactivate_task(struct task_struct *p, struct rq *rq) ++{ ++ if (task_contributes_to_load(p)) ++ rq->nr_uninterruptible++; ++ dequeue_task(p, rq, DEQUEUE_SLEEP); ++ p->on_rq = 0; ++ cpufreq_update_this_cpu(rq, 0); ++} ++ ++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * After ->cpu is set up to a new value, task_access_lock(p, ...) can be ++ * successfully executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ ++#ifdef CONFIG_THREAD_INFO_IN_TASK ++ WRITE_ONCE(p->cpu, cpu); ++#else ++ WRITE_ONCE(task_thread_info(p)->cpu, cpu); ++#endif ++#endif ++} ++ ++#ifdef CONFIG_SMP ++void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ++{ ++#ifdef CONFIG_SCHED_DEBUG ++ /* ++ * We should never call set_task_cpu() on a blocked task, ++ * ttwu() will sort out the placement. ++ */ ++ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && ++ !p->on_rq); ++#ifdef CONFIG_LOCKDEP ++ /* ++ * The caller should hold either p->pi_lock or rq->lock, when changing ++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. ++ * ++ * sched_move_task() holds both and thus holding either pins the cgroup, ++ * see task_group(). ++ */ ++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || ++ lockdep_is_held(&task_rq(p)->lock))); ++#endif ++ /* ++ * Clearly, migrating tasks to offline CPUs is a fairly daft thing. ++ */ ++ WARN_ON_ONCE(!cpu_online(new_cpu)); ++#endif ++ if (task_cpu(p) == new_cpu) ++ return; ++ trace_sched_migrate_task(p, new_cpu); ++ rseq_migrate(p); ++ perf_event_task_migrate(p); ++ ++ __set_task_cpu(p, new_cpu); ++} ++ ++static inline bool is_per_cpu_kthread(struct task_struct *p) ++{ ++ return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed)); ++} ++ ++/* ++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see ++ * __set_cpus_allowed_ptr() and select_fallback_rq(). ++ */ ++static inline bool is_cpu_allowed(struct task_struct *p, int cpu) ++{ ++ if (!cpumask_test_cpu(cpu, &p->cpus_mask)) ++ return false; ++ ++ if (is_per_cpu_kthread(p)) ++ return cpu_online(cpu); ++ ++ return cpu_active(cpu); ++} ++ ++/* ++ * This is how migration works: ++ * ++ * 1) we invoke migration_cpu_stop() on the target CPU using ++ * stop_one_cpu(). ++ * 2) stopper starts to run (implicitly forcing the migrated thread ++ * off the CPU) ++ * 3) it checks whether the migrated task is still in the wrong runqueue. ++ * 4) if it's in the wrong runqueue then the migration thread removes ++ * it and puts it into the right queue. ++ * 5) stopper completes and stop_one_cpu() returns and the migration ++ * is done. ++ */ ++ ++/* ++ * move_queued_task - move a queued task to new rq. ++ * ++ * Returns (locked) new rq. Old rq's lock is released. ++ */ ++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int ++ new_cpu) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ p->on_rq = TASK_ON_RQ_MIGRATING; ++ dequeue_task(p, rq, 0); ++ set_task_cpu(p, new_cpu); ++ raw_spin_unlock(&rq->lock); ++ ++ rq = cpu_rq(new_cpu); ++ ++ raw_spin_lock(&rq->lock); ++ BUG_ON(task_cpu(p) != new_cpu); ++ enqueue_task(p, rq, 0); ++ p->on_rq = TASK_ON_RQ_QUEUED; ++ check_preempt_curr(rq, p); ++ ++ return rq; ++} ++ ++struct migration_arg { ++ struct task_struct *task; ++ int dest_cpu; ++}; ++ ++/* ++ * Move (not current) task off this CPU, onto the destination CPU. We're doing ++ * this because either it can't run here any more (set_cpus_allowed() ++ * away from this CPU, or CPU going down), or because we're ++ * attempting to rebalance this task on exec (sched_exec). ++ * ++ * So we race with normal scheduler movements, but that's OK, as long ++ * as the task is no longer on this CPU. ++ */ ++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int ++ dest_cpu) ++{ ++ /* Affinity changed (again). */ ++ if (!is_cpu_allowed(p, dest_cpu)) ++ return rq; ++ ++ update_rq_clock(rq); ++ return move_queued_task(rq, p, dest_cpu); ++} ++ ++/* ++ * migration_cpu_stop - this will be executed by a highprio stopper thread ++ * and performs thread migration by bumping thread off CPU then ++ * 'pushing' onto another runqueue. ++ */ ++static int migration_cpu_stop(void *data) ++{ ++ struct migration_arg *arg = data; ++ struct task_struct *p = arg->task; ++ struct rq *rq = this_rq(); ++ ++ /* ++ * The original target CPU might have gone down and we might ++ * be on another CPU but it doesn't matter. ++ */ ++ local_irq_disable(); ++ ++ raw_spin_lock(&p->pi_lock); ++ raw_spin_lock(&rq->lock); ++ /* ++ * If task_rq(p) != rq, it cannot be migrated here, because we're ++ * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because ++ * we're holding p->pi_lock. ++ */ ++ if (task_rq(p) == rq) ++ if (task_on_rq_queued(p)) ++ rq = __migrate_task(rq, p, arg->dest_cpu); ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&p->pi_lock); ++ ++ local_irq_enable(); ++ return 0; ++} ++ ++static inline void ++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(&p->cpus_mask, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++} ++ ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ set_cpus_allowed_common(p, new_mask); ++} ++#endif ++ ++/* Enter with rq lock held. We know p is on the local CPU */ ++static inline void __set_tsk_resched(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++} ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ * ++ * Return: 1 if the task is currently executing. 0 otherwise. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, long match_state) ++{ ++ unsigned long flags; ++ bool running, on_rq; ++ unsigned long ncsw; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ for (;;) { ++ rq = task_rq(p); ++ ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since this will return false ++ * if the runqueue has changed and p is actually now ++ * running somewhere else! ++ */ ++ while (task_running(p) && p == rq->curr) { ++ if (match_state && unlikely(p->state != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the rq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ task_access_lock_irqsave(p, &lock, &flags); ++ trace_sched_wait_task(p); ++ running = task_running(p); ++ on_rq = p->on_rq; ++ ncsw = 0; ++ if (!match_state || p->state == match_state) ++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it was still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(on_rq)) { ++ ktime_t to = NSEC_PER_SEC / HZ; ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_hrtimeout(&to, HRTIMER_MODE_REL); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesn't have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_send_reschedule(cpu); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(kick_process); ++ ++/* ++ * ->cpus_mask is protected by both rq->lock and p->pi_lock ++ * ++ * A few notes on cpu_active vs cpu_online: ++ * ++ * - cpu_active must be a subset of cpu_online ++ * ++ * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, ++ * see __set_cpus_allowed_ptr(). At this point the newly online ++ * CPU isn't yet part of the sched domains, and balancing will not ++ * see it. ++ * ++ * - on cpu-down we clear cpu_active() to mask the sched domains and ++ * avoid the load balancer to place new tasks on the to be removed ++ * CPU. Existing tasks will remain running there and will be taken ++ * off. ++ * ++ * This means that fallback selection must not select !active CPUs. ++ * And can assume that any active CPU must be online. Conversely ++ * select_task_rq() below may allow selection of !active CPUs in order ++ * to satisfy the above rules. ++ */ ++static int select_fallback_rq(int cpu, struct task_struct *p) ++{ ++ int nid = cpu_to_node(cpu); ++ const struct cpumask *nodemask = NULL; ++ enum { cpuset, possible, fail } state = cpuset; ++ int dest_cpu; ++ ++ /* ++ * If the node that the CPU is on has been offlined, cpu_to_node() ++ * will return -1. There is no CPU on the node, and we should ++ * select the CPU on the other node. ++ */ ++ if (nid != -1) { ++ nodemask = cpumask_of_node(nid); ++ ++ /* Look for allowed, online CPU in same node. */ ++ for_each_cpu(dest_cpu, nodemask) { ++ if (!cpu_active(dest_cpu)) ++ continue; ++ if (cpumask_test_cpu(dest_cpu, &p->cpus_mask)) ++ return dest_cpu; ++ } ++ } ++ ++ for (;;) { ++ /* Any allowed, online CPU? */ ++ for_each_cpu(dest_cpu, &p->cpus_mask) { ++ if (!is_cpu_allowed(p, dest_cpu)) ++ continue; ++ goto out; ++ } ++ ++ /* No more Mr. Nice Guy. */ ++ switch (state) { ++ case cpuset: ++ if (IS_ENABLED(CONFIG_CPUSETS)) { ++ cpuset_cpus_allowed_fallback(p); ++ state = possible; ++ break; ++ } ++ /* Fall-through */ ++ case possible: ++ do_set_cpus_allowed(p, cpu_possible_mask); ++ state = fail; ++ break; ++ ++ case fail: ++ BUG(); ++ break; ++ } ++ } ++ ++out: ++ if (state != cpuset) { ++ /* ++ * Don't tell them about moving exiting tasks or ++ * kernel threads (both mm NULL), since they never ++ * leave kernel. ++ */ ++ if (p->mm && printk_ratelimit()) { ++ printk_deferred("process %d (%s) no longer affine to cpu%d\n", ++ task_pid_nr(p), p->comm, cpu); ++ } ++ } ++ ++ return dest_cpu; ++} ++ ++static inline int best_mask_cpu(int cpu, const cpumask_t *cpumask) ++{ ++ cpumask_t *mask; ++ ++ if (cpumask_test_cpu(cpu, cpumask)) ++ return cpu; ++ ++ mask = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); ++ while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids) ++ mask++; ++ ++ return cpu; ++} ++ ++/* ++ * task_preemptible_rq - return the rq which the given task can preempt on ++ * @p: task wants to preempt CPU ++ * @only_preempt_low_policy: indicate only preempt rq running low policy than @p ++ */ ++static inline int ++task_preemptible_rq_idle(struct task_struct *p, cpumask_t *chk_mask) ++{ ++ cpumask_t tmp; ++ ++#ifdef CONFIG_SCHED_SMT ++ if (cpumask_and(&tmp, chk_mask, &sched_cpu_sg_idle_mask)) ++ return best_mask_cpu(task_cpu(p), &tmp); ++#endif ++ ++#ifdef CONFIG_SMT_NICE ++ /* Only ttwu on cpu which is not smt supressed */ ++ if (cpumask_andnot(&tmp, chk_mask, &sched_smt_supressed_mask)) { ++ cpumask_t t; ++ if (cpumask_and(&t, &tmp, &sched_rq_queued_masks[SCHED_RQ_EMPTY])) ++ return best_mask_cpu(task_cpu(p), &t); ++ return best_mask_cpu(task_cpu(p), &tmp); ++ } ++#endif ++ ++ if (cpumask_and(&tmp, chk_mask, &sched_rq_queued_masks[SCHED_RQ_EMPTY])) ++ return best_mask_cpu(task_cpu(p), &tmp); ++ return best_mask_cpu(task_cpu(p), chk_mask); ++} ++ ++static inline int ++task_preemptible_rq(struct task_struct *p, cpumask_t *chk_mask, ++ int preempt_level) ++{ ++ cpumask_t tmp; ++ int level; ++ ++#ifdef CONFIG_SCHED_SMT ++#ifdef CONFIG_SMT_NICE ++ if (cpumask_and(&tmp, chk_mask, &sched_cpu_psg_mask)) ++ return best_mask_cpu(task_cpu(p), &tmp); ++#else ++ if (cpumask_and(&tmp, chk_mask, &sched_cpu_sg_idle_mask)) ++ return best_mask_cpu(task_cpu(p), &tmp); ++#endif ++#endif ++ ++ level = find_first_bit(sched_rq_queued_masks_bitmap, ++ NR_SCHED_RQ_QUEUED_LEVEL); ++ ++ while (level < preempt_level) { ++ if (cpumask_and(&tmp, chk_mask, &sched_rq_queued_masks[level])) ++ return best_mask_cpu(task_cpu(p), &tmp); ++ ++ level = find_next_bit(sched_rq_queued_masks_bitmap, ++ NR_SCHED_RQ_QUEUED_LEVEL, ++ level + 1); ++ } ++ ++ if (unlikely(SCHED_RQ_RT == level && ++ level == preempt_level && ++ cpumask_and(&tmp, chk_mask, ++ &sched_rq_queued_masks[SCHED_RQ_RT]))) { ++ unsigned int cpu; ++ ++ for_each_cpu (cpu, &tmp) ++ if (p->prio < sched_rq_prio[cpu]) ++ return cpu; ++ } ++ ++ return best_mask_cpu(task_cpu(p), chk_mask); ++} ++ ++static inline int select_task_rq(struct task_struct *p) ++{ ++ cpumask_t chk_mask; ++ ++ if (unlikely(!cpumask_and(&chk_mask, &p->cpus_mask, cpu_online_mask))) ++ return select_fallback_rq(task_cpu(p), p); ++ ++ /* Check IDLE tasks suitable to run normal priority */ ++ if (idleprio_task(p)) { ++ if (idleprio_suitable(p)) { ++ p->prio = p->normal_prio; ++ update_task_priodl(p); ++ return task_preemptible_rq_idle(p, &chk_mask); ++ } ++ p->prio = NORMAL_PRIO; ++ update_task_priodl(p); ++ } ++ ++ return task_preemptible_rq(p, &chk_mask, ++ task_running_policy_level(p, this_rq())); ++} ++#else /* CONFIG_SMP */ ++static inline int select_task_rq(struct task_struct *p) ++{ ++ return 0; ++} ++#endif /* CONFIG_SMP */ ++ ++static void ++ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq; ++ ++ if (!schedstat_enabled()) ++ return; ++ ++ rq= this_rq(); ++ ++#ifdef CONFIG_SMP ++ if (cpu == rq->cpu) ++ __schedstat_inc(rq->ttwu_local); ++ else { ++ /** PDS ToDo: ++ * How to do ttwu_wake_remote ++ */ ++ } ++#endif /* CONFIG_SMP */ ++ ++ __schedstat_inc(rq->ttwu_count); ++} ++ ++/* ++ * Mark the task runnable and perform wakeup-preemption. ++ */ ++static inline void ++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ p->state = TASK_RUNNING; ++ trace_sched_wakeup(p); ++} ++ ++static inline void ++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++#ifdef CONFIG_SMP ++ if (p->sched_contributes_to_load) ++ rq->nr_uninterruptible--; ++#endif ++ ++ activate_task(p, rq); ++ ttwu_do_wakeup(rq, p, 0); ++} ++ ++static int ttwu_remote(struct task_struct *p, int wake_flags) ++{ ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ int ret = 0; ++ ++ rq = __task_access_lock(p, &lock); ++ if (task_on_rq_queued(p)) { ++ ttwu_do_wakeup(rq, p, wake_flags); ++ ret = 1; ++ } ++ __task_access_unlock(p, lock); ++ ++ return ret; ++} ++ ++/* ++ * Notes on Program-Order guarantees on SMP systems. ++ * ++ * MIGRATION ++ * ++ * The basic program-order guarantee on SMP systems is that when a task [t] ++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent ++ * execution on its new CPU [c1]. ++ * ++ * For migration (of runnable tasks) this is provided by the following means: ++ * ++ * A) UNLOCK of the rq(c0)->lock scheduling out task t ++ * B) migration for t is required to synchronize *both* rq(c0)->lock and ++ * rq(c1)->lock (if not at the same time, then in that order). ++ * C) LOCK of the rq(c1)->lock scheduling in task ++ * ++ * Transitivity guarantees that B happens after A and C after B. ++ * Note: we only require RCpc transitivity. ++ * Note: the CPU doing B need not be c0 or c1 ++ * ++ * Example: ++ * ++ * CPU0 CPU1 CPU2 ++ * ++ * LOCK rq(0)->lock ++ * sched-out X ++ * sched-in Y ++ * UNLOCK rq(0)->lock ++ * ++ * LOCK rq(0)->lock // orders against CPU0 ++ * dequeue X ++ * UNLOCK rq(0)->lock ++ * ++ * LOCK rq(1)->lock ++ * enqueue X ++ * UNLOCK rq(1)->lock ++ * ++ * LOCK rq(1)->lock // orders against CPU2 ++ * sched-out Z ++ * sched-in X ++ * UNLOCK rq(1)->lock ++ * ++ * ++ * BLOCKING -- aka. SLEEP + WAKEUP ++ * ++ * For blocking we (obviously) need to provide the same guarantee as for ++ * migration. However the means are completely different as there is no lock ++ * chain to provide order. Instead we do: ++ * ++ * 1) smp_store_release(X->on_cpu, 0) ++ * 2) smp_cond_load_acquire(!X->on_cpu) ++ * ++ * Example: ++ * ++ * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) ++ * ++ * LOCK rq(0)->lock LOCK X->pi_lock ++ * dequeue X ++ * sched-out X ++ * smp_store_release(X->on_cpu, 0); ++ * ++ * smp_cond_load_acquire(&X->on_cpu, !VAL); ++ * X->state = WAKING ++ * set_task_cpu(X,2) ++ * ++ * LOCK rq(2)->lock ++ * enqueue X ++ * X->state = RUNNING ++ * UNLOCK rq(2)->lock ++ * ++ * LOCK rq(2)->lock // orders against CPU1 ++ * sched-out Z ++ * sched-in X ++ * UNLOCK rq(2)->lock ++ * ++ * UNLOCK X->pi_lock ++ * UNLOCK rq(0)->lock ++ * ++ * ++ * However; for wakeups there is a second guarantee we must provide, namely we ++ * must observe the state that lead to our wakeup. That is, not only must our ++ * task observe its own prior state, it must also observe the stores prior to ++ * its wakeup. ++ * ++ * This means that any means of doing remote wakeups must order the CPU doing ++ * the wakeup against the CPU the task is going to end up running on. This, ++ * however, is already required for the regular Program-Order guarantee above, ++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire). ++ * ++ */ ++ ++/*** ++ * try_to_wake_up - wake up a thread ++ * @p: the thread to be awakened ++ * @state: the mask of task states that can be woken ++ * @wake_flags: wake modifier flags (WF_*) ++ * ++ * Put it on the run-queue if it's not already there. The "current" ++ * thread is always on the run-queue (except when the actual ++ * re-schedule is in progress), and as such you're allowed to do ++ * the simpler "current->state = TASK_RUNNING" to mark yourself ++ * runnable without the overhead of this. ++ * ++ * Return: %true if @p was woken up, %false if it was already running. ++ * or @state didn't match @p's state. ++ */ ++static int try_to_wake_up(struct task_struct *p, unsigned int state, ++ int wake_flags) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ int cpu, success = 0; ++ ++ /* ++ * If we are going to wake up a thread waiting for CONDITION we ++ * need to ensure that CONDITION=1 done by the caller can not be ++ * reordered with p->state check below. This pairs with mb() in ++ * set_current_state() the waiting thread does. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ smp_mb__after_spinlock(); ++ if (!(p->state & state)) ++ goto out; ++ ++ trace_sched_waking(p); ++ ++ /* We're going to change ->state: */ ++ success = 1; ++ cpu = task_cpu(p); ++ ++ /* ++ * Ensure we load p->on_rq _after_ p->state, otherwise it would ++ * be possible to, falsely, observe p->on_rq == 0 and get stuck ++ * in smp_cond_load_acquire() below. ++ * ++ * flush_smp_call_function_from_idle() try_to_wake_up() ++ * STORE p->on_rq = 1 LOAD p->state ++ * UNLOCK rq->lock ++ * ++ * __schedule() (switch to task 'p') ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * UNLOCK rq->lock ++ * ++ * [task p] ++ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ */ ++ smp_rmb(); ++ if (p->on_rq && ttwu_remote(p, wake_flags)) ++ goto stat; ++ ++#ifdef CONFIG_SMP ++ /* ++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be ++ * possible to, falsely, observe p->on_cpu == 0. ++ * ++ * One must be running (->on_cpu == 1) in order to remove oneself ++ * from the runqueue. ++ * ++ * __schedule() (switch to task 'p') try_to_wake_up() ++ * STORE p->on_cpu = 1 LOAD p->on_rq ++ * UNLOCK rq->lock ++ * ++ * __schedule() (put 'p' to sleep) ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * STORE p->on_rq = 0 LOAD p->on_cpu ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ */ ++ smp_rmb(); ++ ++ /* ++ * If the owning (remote) CPU is still in the middle of schedule() with ++ * this task as prev, wait until its done referencing the task. ++ * ++ * Pairs with the smp_store_release() in finish_task(). ++ * ++ * This ensures that tasks getting woken will be fully ordered against ++ * their previous state and preserve Program Order. ++ */ ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++ ++ p->sched_contributes_to_load = !!task_contributes_to_load(p); ++ p->state = TASK_WAKING; ++ ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++ if (SCHED_ISO == p->policy && ISO_PRIO != p->prio) { ++ p->prio = ISO_PRIO; ++ p->deadline = 0UL; ++ update_task_priodl(p); ++ } ++ ++ cpu = select_task_rq(p); ++ ++ if (cpu != task_cpu(p)) { ++ wake_flags |= WF_MIGRATED; ++ psi_ttwu_dequeue(p); ++ set_task_cpu(p, cpu); ++ } ++#else /* CONFIG_SMP */ ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++#endif ++ ++ rq = cpu_rq(cpu); ++ raw_spin_lock(&rq->lock); ++ ++ update_rq_clock(rq); ++ ttwu_do_activate(rq, p, wake_flags); ++ check_preempt_curr(rq, p); ++ ++ raw_spin_unlock(&rq->lock); ++ ++stat: ++ ttwu_stat(p, cpu, wake_flags); ++out: ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ return success; ++} ++ ++/** ++ * try_invoke_on_locked_down_task - Invoke a function on task in fixed state ++ * @p: Process for which the function is to be invoked. ++ * @func: Function to invoke. ++ * @arg: Argument to function. ++ * ++ * If the specified task can be quickly locked into a definite state ++ * (either sleeping or on a given runqueue), arrange to keep it in that ++ * state while invoking @func(@arg). This function can use ->on_rq and ++ * task_curr() to work out what the state is, if required. Given that ++ * @func can be invoked with a runqueue lock held, it had better be quite ++ * lightweight. ++ * ++ * Returns: ++ * @false if the task slipped out from under the locks. ++ * @true if the task was locked onto a runqueue or is sleeping. ++ * However, @func can override this by returning @false. ++ */ ++bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) ++{ ++ bool ret = false; ++ struct rq_flags rf; ++ struct rq *rq; ++ ++ lockdep_assert_irqs_enabled(); ++ raw_spin_lock_irq(&p->pi_lock); ++ if (p->on_rq) { ++ rq = __task_rq_lock(p, &rf); ++ if (task_rq(p) == rq) ++ ret = func(p, arg); ++ rq_unlock(rq, &rf); ++ } else { ++ switch (p->state) { ++ case TASK_RUNNING: ++ case TASK_WAKING: ++ break; ++ default: ++ smp_rmb(); // See smp_rmb() comment in try_to_wake_up(). ++ if (!p->on_rq) ++ ret = func(p, arg); ++ } ++ } ++ raw_spin_unlock_irq(&p->pi_lock); ++ return ret; ++} ++ ++/** ++ * wake_up_process - Wake up a specific process ++ * @p: The process to be woken up. ++ * ++ * Attempt to wake up the nominated process and move it to the set of runnable ++ * processes. ++ * ++ * Return: 1 if the process was woken up, 0 if it was already running. ++ * ++ * This function executes a full memory barrier before accessing the task state. ++ */ ++int wake_up_process(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_NORMAL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ */ ++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p) ++{ ++ unsigned long flags; ++ int cpu = get_cpu(); ++ struct rq *rq = this_rq(); ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ /* Should be reset in fork.c but done here for ease of PDS patching */ ++ p->on_cpu = ++ p->on_rq = ++ p->utime = ++ p->stime = ++ p->sched_time = 0; ++ ++ p->sl_level = pds_skiplist_random_level(p); ++ INIT_SKIPLIST_NODE(&p->sl_node); ++ ++#ifdef CONFIG_COMPACTION ++ p->capture_control = NULL; ++#endif ++ ++ /* ++ * We mark the process as NEW here. This guarantees that ++ * nobody will actually run it, and a signal or other external ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->state = TASK_NEW; ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child. ++ */ ++ p->prio = current->normal_prio; ++ ++ /* ++ * Revert to default priority/policy on fork if requested. ++ */ ++ if (unlikely(p->sched_reset_on_fork)) { ++ if (task_has_rt_policy(p)) { ++ p->policy = SCHED_NORMAL; ++ p->static_prio = NICE_TO_PRIO(0); ++ p->rt_priority = 0; ++ } else if (PRIO_TO_NICE(p->static_prio) < 0) ++ p->static_prio = NICE_TO_PRIO(0); ++ ++ p->prio = p->normal_prio = normal_prio(p); ++ ++ /* ++ * We don't need the reset flag anymore after the fork. It has ++ * fulfilled its duty: ++ */ ++ p->sched_reset_on_fork = 0; ++ } ++ ++ /* ++ * Share the timeslice between parent and child, thus the ++ * total amount of pending timeslices in the system doesn't change, ++ * resulting in more scheduling fairness. ++ */ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ rq->curr->time_slice /= 2; ++ p->time_slice = rq->curr->time_slice; ++#ifdef CONFIG_SCHED_HRTICK ++ hrtick_start(rq, US_TO_NS(rq->curr->time_slice)); ++#endif ++ ++ if (p->time_slice < RESCHED_US) { ++ update_rq_clock(rq); ++ time_slice_expired(p, rq); ++ resched_curr(rq); ++ } else ++ update_task_priodl(p); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ /* ++ * The child is not yet in the pid-hash so no cgroup attach races, ++ * and the cgroup is pinned to this child due to cgroup_fork() ++ * is ran before sched_fork(). ++ * ++ * Silence PROVE_RCU. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ /* ++ * We're setting the CPU for the first time, we don't migrate, ++ * so use __set_task_cpu(). ++ */ ++ __set_task_cpu(p, cpu); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++#ifdef CONFIG_SCHED_INFO ++ if (unlikely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++ init_task_preempt_count(p); ++ ++ put_cpu(); ++ return 0; ++} ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++DEFINE_STATIC_KEY_FALSE(sched_schedstats); ++static bool __initdata __sched_schedstats = false; ++ ++static void set_schedstats(bool enabled) ++{ ++ if (enabled) ++ static_branch_enable(&sched_schedstats); ++ else ++ static_branch_disable(&sched_schedstats); ++} ++ ++void force_schedstat_enabled(void) ++{ ++ if (!schedstat_enabled()) { ++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); ++ static_branch_enable(&sched_schedstats); ++ } ++} ++ ++static int __init setup_schedstats(char *str) ++{ ++ int ret = 0; ++ if (!str) ++ goto out; ++ ++ /* ++ * This code is called before jump labels have been set up, so we can't ++ * change the static branch directly just yet. Instead set a temporary ++ * variable so init_schedstats() can do it later. ++ */ ++ if (!strcmp(str, "enable")) { ++ __sched_schedstats = true; ++ ret = 1; ++ } else if (!strcmp(str, "disable")) { ++ __sched_schedstats = false; ++ ret = 1; ++ } ++out: ++ if (!ret) ++ pr_warn("Unable to parse schedstats=\n"); ++ ++ return ret; ++} ++__setup("schedstats=", setup_schedstats); ++ ++static void __init init_schedstats(void) ++{ ++ set_schedstats(__sched_schedstats); ++} ++ ++#ifdef CONFIG_PROC_SYSCTL ++int sysctl_schedstats(struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ struct ctl_table t; ++ int err; ++ int state = static_branch_likely(&sched_schedstats); ++ ++ if (write && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ t = *table; ++ t.data = &state; ++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); ++ if (err < 0) ++ return err; ++ if (write) ++ set_schedstats(state); ++ return err; ++} ++#endif /* CONFIG_PROC_SYSCTL */ ++#else /* !CONFIG_SCHEDSTATS */ ++static inline void init_schedstats(void) {} ++#endif /* CONFIG_SCHEDSTATS */ ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ ++ p->state = TASK_RUNNING; ++ ++ rq = cpu_rq(select_task_rq(p)); ++#ifdef CONFIG_SMP ++ /* ++ * Fork balancing, do it here and not earlier because: ++ * - cpus_mask can change in the fork path ++ * - any previously selected CPU might disappear through hotplug ++ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, ++ * as we're not fully set-up yet. ++ */ ++ __set_task_cpu(p, cpu_of(rq)); ++#endif ++ ++ raw_spin_lock(&rq->lock); ++ ++ update_rq_clock(rq); ++ activate_task(p, rq); ++ trace_sched_wakeup_new(p); ++ check_preempt_curr(rq, p); ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); ++ ++void preempt_notifier_inc(void) ++{ ++ static_branch_inc(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_inc); ++ ++void preempt_notifier_dec(void) ++{ ++ static_branch_dec(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_dec); ++ ++/** ++ * preempt_notifier_register - tell me when current is being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ if (!static_branch_unlikely(&preempt_notifier_key)) ++ WARN(1, "registering preempt_notifier while notifiers disabled\n"); ++ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is *not* safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_in_preempt_notifiers(curr); ++} ++ ++static void ++__fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++static __always_inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_out_preempt_notifiers(curr, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void prepare_task(struct task_struct *next) ++{ ++ /* ++ * Claim the task as running, we do this before switching to it ++ * such that any running task will have this set. ++ */ ++ next->on_cpu = 1; ++} ++ ++static inline void finish_task(struct task_struct *prev) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * After ->on_cpu is cleared, the task can be moved to a different CPU. ++ * We must ensure this doesn't happen until the switch is completely ++ * finished. ++ * ++ * In particular, the load of prev->state in finish_task_switch() must ++ * happen before this. ++ * ++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). ++ */ ++ smp_store_release(&prev->on_cpu, 0); ++#else ++ prev->on_cpu = 0; ++#endif ++} ++ ++static inline void ++prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++ spin_release(&rq->lock.dep_map, _THIS_IP_); ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ rq->lock.owner = next; ++#endif ++} ++ ++static inline void finish_lock_switch(struct rq *rq) ++{ ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); ++ raw_spin_unlock_irq(&rq->lock); ++} ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ kcov_prepare_switch(prev); ++ sched_info_switch(rq, prev, next); ++ perf_event_task_sched_out(prev, next); ++ rseq_preempt(prev); ++ fire_sched_out_preempt_notifiers(prev, next); ++ prepare_task(next); ++ prepare_arch_switch(next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ * ++ * The context switch have flipped the stack from under us and restored the ++ * local variables which were saved when this task called schedule() in the ++ * past. prev == current is still correct but we need to recalculate this_rq ++ * because prev may have moved to another CPU. ++ */ ++static struct rq *finish_task_switch(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq = this_rq(); ++ struct mm_struct *mm = rq->prev_mm; ++ long prev_state; ++ ++ /* ++ * The previous task will have left us with a preempt_count of 2 ++ * because it left us after: ++ * ++ * schedule() ++ * preempt_disable(); // 1 ++ * __schedule() ++ * raw_spin_lock_irq(&rq->lock) // 2 ++ * ++ * Also, see FORK_PREEMPT_COUNT. ++ */ ++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, ++ "corrupted preempt_count: %s/%d/0x%x\n", ++ current->comm, current->pid, preempt_count())) ++ preempt_count_set(FORK_PREEMPT_COUNT); ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * ++ * We must observe prev->state before clearing prev->on_cpu (in ++ * finish_task), otherwise a concurrent wakeup can get prev ++ * running on another CPU and we could rave with its RUNNING -> DEAD ++ * transition, resulting in a double drop. ++ */ ++ prev_state = prev->state; ++ vtime_task_switch(prev); ++ perf_event_task_sched_in(prev, current); ++ finish_task(prev); ++ finish_lock_switch(rq); ++ finish_arch_post_lock_switch(); ++ kcov_finish_switch(current); ++ ++ fire_sched_in_preempt_notifiers(current); ++ /* ++ * When switching through a kernel thread, the loop in ++ * membarrier_{private,global}_expedited() may have observed that ++ * kernel thread and not issued an IPI. It is therefore possible to ++ * schedule between user->kernel->user threads without passing though ++ * switch_mm(). Membarrier requires a barrier after storing to ++ * rq->curr, before returning to userspace, so provide them here: ++ * ++ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly ++ * provided by mmdrop(), ++ * - a sync_core for SYNC_CORE. ++ */ ++ if (mm) { ++ membarrier_mm_sync_core_before_usermode(mm); ++ mmdrop(mm); ++ } ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(prev); ++ ++ /* Task is done with its stack. */ ++ put_task_stack(prev); ++ ++ put_task_struct_rcu_user(prev); ++ } ++ ++ tick_nohz_task_switch(); ++ return rq; ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage __visible void schedule_tail(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq; ++ ++ /* ++ * New tasks start with FORK_PREEMPT_COUNT, see there and ++ * finish_task_switch() for details. ++ * ++ * finish_task_switch() will drop rq->lock() and lower preempt_count ++ * and the preempt_enable() will end up enabling preemption (on ++ * PREEMPT_COUNT kernels). ++ */ ++ ++ rq = finish_task_switch(prev); ++ preempt_enable(); ++ ++ if (current->set_child_tid) ++ put_user(task_pid_vnr(current), current->set_child_tid); ++ ++ calculate_sigpending(); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new thread's register state. ++ */ ++static __always_inline struct rq * ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ prepare_task_switch(rq, prev, next); ++ ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_start_context_switch(prev); ++ ++ /* ++ * kernel -> kernel lazy + transfer active ++ * user -> kernel lazy + mmgrab() active ++ * ++ * kernel -> user switch + mmdrop() active ++ * user -> user switch ++ */ ++ if (!next->mm) { // to kernel ++ enter_lazy_tlb(prev->active_mm, next); ++ ++ next->active_mm = prev->active_mm; ++ if (prev->mm) // from user ++ mmgrab(prev->active_mm); ++ else ++ prev->active_mm = NULL; ++ } else { // to user ++ membarrier_switch_mm(rq, prev->active_mm, next->mm); ++ /* ++ * sys_membarrier() requires an smp_mb() between setting ++ * rq->curr / membarrier_switch_mm() and returning to userspace. ++ * ++ * The below provides this either through switch_mm(), or in ++ * case 'prev->active_mm == next->mm' through ++ * finish_task_switch()'s mmdrop(). ++ */ ++ switch_mm_irqs_off(prev->active_mm, next->mm, next); ++ ++ if (!prev->mm) { // from kernel ++ /* will mmdrop() in finish_task_switch(). */ ++ rq->prev_mm = prev->active_mm; ++ prev->active_mm = NULL; ++ } ++ } ++ ++ prepare_lock_switch(rq, next); ++ ++ /* Here we just switch the register state and the stack. */ ++ switch_to(prev, next, prev); ++ barrier(); ++ ++ return finish_task_switch(prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, total number of context switches performed since bootup. ++ */ ++unsigned long nr_running(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_running; ++ ++ return sum; ++} ++ ++/* ++ * Check if only the current task is running on the CPU. ++ * ++ * Caution: this function does not check that the caller has disabled ++ * preemption, thus the result might have a time-of-check-to-time-of-use ++ * race. The caller is responsible to use it correctly, for example: ++ * ++ * - from a non-preemptible section (of course) ++ * ++ * - from a thread that is bound to a single CPU ++ * ++ * - in a loop with very short iterations (e.g. a polling loop) ++ */ ++bool single_task_running(void) ++{ ++ return raw_rq()->nr_running == 1; ++} ++EXPORT_SYMBOL(single_task_running); ++ ++unsigned long long nr_context_switches(void) ++{ ++ int i; ++ unsigned long long sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += cpu_rq(i)->nr_switches; ++ ++ return sum; ++} ++ ++/* ++ * Consumers of these two interfaces, like for example the cpuidle menu ++ * governor, are using nonsensical data. Preferring shallow idle state selection ++ * for a CPU that has IO-wait which might not even end up running the task when ++ * it does become runnable. ++ */ ++ ++unsigned long nr_iowait_cpu(int cpu) ++{ ++ return atomic_read(&cpu_rq(cpu)->nr_iowait); ++} ++ ++/* ++ * IO-wait accounting, and how its mostly bollocks (on SMP). ++ * ++ * The idea behind IO-wait account is to account the idle time that we could ++ * have spend running if it were not for IO. That is, if we were to improve the ++ * storage performance, we'd have a proportional reduction in IO-wait time. ++ * ++ * This all works nicely on UP, where, when a task blocks on IO, we account ++ * idle time as IO-wait, because if the storage were faster, it could've been ++ * running and we'd not be idle. ++ * ++ * This has been extended to SMP, by doing the same for each CPU. This however ++ * is broken. ++ * ++ * Imagine for instance the case where two tasks block on one CPU, only the one ++ * CPU will have IO-wait accounted, while the other has regular idle. Even ++ * though, if the storage were faster, both could've ran at the same time, ++ * utilising both CPUs. ++ * ++ * This means, that when looking globally, the current IO-wait accounting on ++ * SMP is a lower bound, by reason of under accounting. ++ * ++ * Worse, since the numbers are provided per CPU, they are sometimes ++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly ++ * associated with any one particular CPU, it can wake to another CPU than it ++ * blocked on. This means the per CPU IO-wait number is meaningless. ++ * ++ * Task CPU affinities can make all that even more 'interesting'. ++ */ ++ ++unsigned long nr_iowait(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += nr_iowait_cpu(i); ++ ++ return sum; ++} ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++EXPORT_PER_CPU_SYMBOL(kernel_cpustat); ++ ++static inline void pds_update_curr(struct rq *rq, struct task_struct *p) ++{ ++ s64 ns = rq->clock_task - p->last_ran; ++ ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ p->time_slice -= NS_TO_US(ns); ++ p->last_ran = rq->clock_task; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ u64 ns; ++ ++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) ++ /* ++ * 64-bit doesn't need locks to atomically read a 64-bit value. ++ * So we have a optimization chance when the task's delta_exec is 0. ++ * Reading ->on_cpu is racy, but this is ok. ++ * ++ * If we race with it leaving CPU, we'll take a lock. So we're correct. ++ * If we race with it entering CPU, unaccounted time is 0. This is ++ * indistinguishable from the read occurring a few cycles earlier. ++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has ++ * been accounted, so we're correct here as well. ++ */ ++ if (!p->on_cpu || !task_on_rq_queued(p)) ++ return tsk_seruntime(p); ++#endif ++ ++ rq = task_access_lock_irqsave(p, &lock, &flags); ++ /* ++ * Must be ->curr _and_ ->on_rq. If dequeued, we would ++ * project cycles that may never be accounted to this ++ * thread, breaking clock_gettime(). ++ */ ++ if (p == rq->curr && task_on_rq_queued(p)) { ++ update_rq_clock(rq); ++ pds_update_curr(rq, p); ++ } ++ ns = tsk_seruntime(p); ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++ return ns; ++} ++ ++/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++static inline void pds_scheduler_task_tick(struct rq *rq) ++{ ++ struct task_struct *p = rq->curr; ++ ++ if (is_idle_task(p)) ++ return; ++ ++ pds_update_curr(rq, p); ++ ++ cpufreq_update_util(rq, 0); ++ ++ /* ++ * Tasks that were scheduled in the first half of a tick are not ++ * allowed to run into the 2nd half of the next tick if they will ++ * run out of time slice in the interim. Otherwise, if they have ++ * less than RESCHED_US μs of time slice left they will be rescheduled. ++ */ ++ if (p->time_slice - rq->dither >= RESCHED_US) ++ return; ++ ++ /** ++ * p->time_slice < RESCHED_US. We will modify task_struct under ++ * rq lock as p is rq->curr ++ */ ++ __set_tsk_resched(p); ++} ++ ++#ifdef CONFIG_SMP ++ ++#ifdef CONFIG_SCHED_SMT ++static int active_load_balance_cpu_stop(void *data) ++{ ++ struct rq *rq = this_rq(); ++ struct task_struct *p = data; ++ int cpu; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ raw_spin_lock(&p->pi_lock); ++ raw_spin_lock(&rq->lock); ++ ++ rq->active_balance = 0; ++ /* ++ * _something_ may have changed the task, double check again ++ */ ++ if (task_on_rq_queued(p) && task_rq(p) == rq && ++ (cpu = cpumask_any_and(&p->cpus_mask, &sched_cpu_sg_idle_mask)) < nr_cpu_ids) ++ rq = __migrate_task(rq, p, cpu); ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&p->pi_lock); ++ ++ local_irq_restore(flags); ++ ++ return 0; ++} ++ ++/* pds_sg_balance_trigger - trigger slibing group balance for @cpu */ ++static void pds_sg_balance_trigger(const int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ struct task_struct *curr; ++ ++ if (!raw_spin_trylock_irqsave(&rq->lock, flags)) ++ return; ++ curr = rq->curr; ++ if (!is_idle_task(curr) && ++ cpumask_intersects(&curr->cpus_mask, &sched_cpu_sg_idle_mask)) { ++ int active_balance = 0; ++ ++ if (likely(!rq->active_balance)) { ++ rq->active_balance = 1; ++ active_balance = 1; ++ } ++ ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ if (likely(active_balance)) ++ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, ++ curr, &rq->active_balance_work); ++ } else ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++/* ++ * pds_sg_balance_check - slibing group balance check for run queue @rq ++ */ ++static inline void pds_sg_balance_check(const struct rq *rq) ++{ ++ cpumask_t chk; ++ int i; ++ ++ /* Only online cpu will do sg balance checking */ ++ if (unlikely(!rq->online)) ++ return; ++ ++ /* Only cpu in slibing idle group will do the checking */ ++ if (!cpumask_test_cpu(cpu_of(rq), &sched_cpu_sg_idle_mask)) ++ return; ++ ++ /* Find potential cpus which can migrate the currently running task */ ++ if (!cpumask_andnot(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY], ++ &sched_rq_queued_masks[SCHED_RQ_EMPTY])) ++ return; ++ ++ for_each_cpu(i, &chk) { ++ /* skip the cpu which has idle slibing cpu */ ++ if (cpumask_test_cpu(per_cpu(sched_sibling_cpu, i), ++ &sched_rq_queued_masks[SCHED_RQ_EMPTY])) ++ continue; ++ pds_sg_balance_trigger(i); ++ } ++} ++DEFINE_PER_CPU(unsigned long, thermal_pressure); ++ ++void arch_set_thermal_pressure(struct cpumask *cpus, ++ unsigned long th_pressure) ++{ ++ int cpu; ++ ++ for_each_cpu(cpu, cpus) ++ WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); ++} ++#endif /* CONFIG_SCHED_SMT */ ++#endif /* CONFIG_SMP */ ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu __maybe_unused = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ ++ arch_scale_freq_tick(); ++ sched_clock_tick(); ++ ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ++ pds_scheduler_task_tick(rq); ++ update_sched_rq_queued_masks_normal(rq); ++ calc_global_load_tick(rq); ++ psi_task_tick(rq); ++ ++ rq->last_tick = rq->clock; ++ raw_spin_unlock(&rq->lock); ++ ++ perf_event_task_tick(); ++} ++ ++#ifdef CONFIG_NO_HZ_FULL ++struct tick_work { ++ int cpu; ++ atomic_t state; ++ struct delayed_work work; ++}; ++/* Values for ->state, see diagram below. */ ++#define TICK_SCHED_REMOTE_OFFLINE 0 ++#define TICK_SCHED_REMOTE_OFFLINING 1 ++#define TICK_SCHED_REMOTE_RUNNING 2 ++ ++/* ++ * State diagram for ->state: ++ * ++ * ++ * TICK_SCHED_REMOTE_OFFLINE ++ * | ^ ++ * | | ++ * | | sched_tick_remote() ++ * | | ++ * | | ++ * +--TICK_SCHED_REMOTE_OFFLINING ++ * | ^ ++ * | | ++ * sched_tick_start() | | sched_tick_stop() ++ * | | ++ * V | ++ * TICK_SCHED_REMOTE_RUNNING ++ * ++ * ++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() ++ * and sched_tick_start() are happy to leave the state in RUNNING. ++ */ ++ ++static struct tick_work __percpu *tick_work_cpu; ++ ++static void sched_tick_remote(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct tick_work *twork = container_of(dwork, struct tick_work, work); ++ int cpu = twork->cpu; ++ struct rq *rq = cpu_rq(cpu); ++ struct task_struct *curr; ++ unsigned long flags; ++ u64 delta; ++ int os; ++ ++ /* ++ * Handle the tick only if it appears the remote CPU is running in full ++ * dynticks mode. The check is racy by nature, but missing a tick or ++ * having one too much is no big deal because the scheduler tick updates ++ * statistics and checks timeslices in a time-independent way, regardless ++ * of when exactly it is running. ++ */ ++ if (!tick_nohz_tick_stopped_cpu(cpu)) ++ goto out_requeue; ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ curr = rq->curr; ++ if (cpu_is_offline(cpu)) ++ goto out_unlock; ++ ++ update_rq_clock(rq); ++ if (!is_idle_task(curr)) { ++ /* ++ * Make sure the next tick runs within a reasonable ++ * amount of time. ++ */ ++ delta = rq_clock_task(rq) - curr->last_ran; ++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); ++ } ++ pds_scheduler_task_tick(rq); ++ update_sched_rq_queued_masks_normal(rq); ++ calc_load_nohz_remote(rq); ++ ++out_unlock: ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++out_requeue: ++ /* ++ * Run the remote tick once per second (1Hz). This arbitrary ++ * frequency is large enough to avoid overload but short enough ++ * to keep scheduler internal stats reasonably up to date. But ++ * first update state to reflect hotplug activity if required. ++ */ ++ os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); ++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); ++ if (os == TICK_SCHED_REMOTE_RUNNING) ++ queue_delayed_work(system_unbound_wq, dwork, HZ); ++} ++ ++static void sched_tick_start(int cpu) ++{ ++ int os; ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); ++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); ++ if (os == TICK_SCHED_REMOTE_OFFLINE) { ++ twork->cpu = cpu; ++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote); ++ queue_delayed_work(system_unbound_wq, &twork->work, HZ); ++ } ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++static void sched_tick_stop(int cpu) ++{ ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ cancel_delayed_work_sync(&twork->work); ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++int __init sched_tick_offload_init(void) ++{ ++ tick_work_cpu = alloc_percpu(struct tick_work); ++ BUG_ON(!tick_work_cpu); ++ return 0; ++} ++ ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_tick_start(int cpu) { } ++static inline void sched_tick_stop(int cpu) { } ++#endif ++ ++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_PREEMPT_TRACER)) ++/* ++ * If the value passed in is equal to the current preempt count ++ * then we just disabled preemption. Start timing the latency. ++ */ ++static inline void preempt_latency_start(int val) ++{ ++ if (preempt_count() == val) { ++ unsigned long ip = get_lock_parent_ip(); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } ++} ++ ++void preempt_count_add(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ __preempt_count_add(val); ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ preempt_latency_start(val); ++} ++EXPORT_SYMBOL(preempt_count_add); ++NOKPROBE_SYMBOL(preempt_count_add); ++ ++/* ++ * If the value passed in equals to the current preempt count ++ * then we just enabled preemption. Stop timing the latency. ++ */ ++static inline void preempt_latency_stop(int val) ++{ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); ++} ++ ++void preempt_count_sub(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ preempt_latency_stop(val); ++ __preempt_count_sub(val); ++} ++EXPORT_SYMBOL(preempt_count_sub); ++NOKPROBE_SYMBOL(preempt_count_sub); ++ ++#else ++static inline void preempt_latency_start(int val) { } ++static inline void preempt_latency_stop(int val) { } ++#endif ++ ++/* ++ * Timeslices below RESCHED_US are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. SCHED_BATCH tasks ++ * have been flagged be not latency sensitive and likely to be fully CPU ++ * bound so every time they're rescheduled they have their time_slice ++ * refilled, but get a new later deadline to have little effect on ++ * SCHED_NORMAL tasks. ++ ++ */ ++static inline void check_deadline(struct task_struct *p, struct rq *rq) ++{ ++ if (rq->idle == p) ++ return; ++ ++ pds_update_curr(rq, p); ++ ++ if (p->time_slice < RESCHED_US) { ++ time_slice_expired(p, rq); ++ if (SCHED_ISO == p->policy && ISO_PRIO == p->prio) { ++ p->prio = NORMAL_PRIO; ++ p->deadline = rq->clock + task_deadline_diff(p); ++ update_task_priodl(p); ++ } ++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) ++ requeue_task(p, rq); ++ } ++} ++ ++#ifdef CONFIG_SMP ++ ++#define SCHED_RQ_NR_MIGRATION (32UL) ++/* ++ * Migrate pending tasks in @rq to @dest_cpu ++ * Will try to migrate mininal of half of @rq nr_running tasks and ++ * SCHED_RQ_NR_MIGRATION to @dest_cpu ++ */ ++static inline int ++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, int filter_prio) ++{ ++ struct task_struct *p; ++ int dest_cpu = cpu_of(dest_rq); ++ int nr_migrated = 0; ++ int nr_tries = min((rq->nr_running + 1) / 2, SCHED_RQ_NR_MIGRATION); ++ struct skiplist_node *node = rq->sl_header.next[0]; ++ ++ while (nr_tries && node != &rq->sl_header) { ++ p = skiplist_entry(node, struct task_struct, sl_node); ++ node = node->next[0]; ++ ++ if (task_running(p)) ++ continue; ++ if (p->prio >= filter_prio) ++ break; ++ if (cpumask_test_cpu(dest_cpu, &p->cpus_mask)) { ++ dequeue_task(p, rq, 0); ++ set_task_cpu(p, dest_cpu); ++ enqueue_task(p, dest_rq, 0); ++ nr_migrated++; ++ } ++ nr_tries--; ++ /* make a jump */ ++ if (node == &rq->sl_header) ++ break; ++ node = node->next[0]; ++ } ++ ++ return nr_migrated; ++} ++ ++static inline int ++take_queued_task_cpumask(struct rq *rq, cpumask_t *chk_mask, int filter_prio) ++{ ++ int src_cpu; ++ ++ for_each_cpu(src_cpu, chk_mask) { ++ int nr_migrated; ++ struct rq *src_rq = cpu_rq(src_cpu); ++ ++ if (!do_raw_spin_trylock(&src_rq->lock)) { ++ if (PRIO_LIMIT == filter_prio) ++ continue; ++ return 0; ++ } ++ spin_acquire(&src_rq->lock.dep_map, SINGLE_DEPTH_NESTING, 1, _RET_IP_); ++ ++ update_rq_clock(src_rq); ++ if ((nr_migrated = migrate_pending_tasks(src_rq, rq, filter_prio))) ++ cpufreq_update_this_cpu(rq, 0); ++ ++ spin_release(&src_rq->lock.dep_map, _RET_IP_); ++ do_raw_spin_unlock(&src_rq->lock); ++ ++ if (nr_migrated || PRIO_LIMIT != filter_prio) ++ return nr_migrated; ++ } ++ return 0; ++} ++ ++static inline int take_other_rq_task(struct rq *rq, int cpu, int filter_prio) ++{ ++ struct cpumask *affinity_mask, *end; ++ struct cpumask chk; ++ ++ if (PRIO_LIMIT == filter_prio) { ++ cpumask_complement(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY]); ++#ifdef CONFIG_SMT_NICE ++ { ++ /* also try to take IDLE priority tasks from smt supressed cpu */ ++ struct cpumask t; ++ if (cpumask_and(&t, &sched_smt_supressed_mask, ++ &sched_rq_queued_masks[SCHED_RQ_IDLE])) ++ cpumask_or(&chk, &chk, &t); ++ } ++#endif ++ } else if (NORMAL_PRIO == filter_prio) { ++ cpumask_or(&chk, &sched_rq_pending_masks[SCHED_RQ_RT], ++ &sched_rq_pending_masks[SCHED_RQ_ISO]); ++ } else if (IDLE_PRIO == filter_prio) { ++ cpumask_complement(&chk, &sched_rq_pending_masks[SCHED_RQ_EMPTY]); ++ cpumask_andnot(&chk, &chk, &sched_rq_pending_masks[SCHED_RQ_IDLE]); ++ } else ++ cpumask_copy(&chk, &sched_rq_pending_masks[SCHED_RQ_RT]); ++ ++ if (cpumask_empty(&chk)) ++ return 0; ++ ++ affinity_mask = per_cpu(sched_cpu_llc_start_mask, cpu); ++ end = per_cpu(sched_cpu_affinity_chk_end_masks, cpu); ++ do { ++ struct cpumask tmp; ++ ++ if (cpumask_and(&tmp, &chk, affinity_mask) && ++ take_queued_task_cpumask(rq, &tmp, filter_prio)) ++ return 1; ++ } while (++affinity_mask < end); ++ ++ return 0; ++} ++#endif ++ ++static inline struct task_struct * ++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev) ++{ ++ struct task_struct *next = rq_first_queued_task(rq); ++ ++#ifdef CONFIG_SMT_NICE ++ if (cpumask_test_cpu(cpu, &sched_smt_supressed_mask)) { ++ if (next->prio >= IDLE_PRIO) { ++ if (rq->online && ++ take_other_rq_task(rq, cpu, IDLE_PRIO)) ++ return rq_first_queued_task(rq); ++ return rq->idle; ++ } ++ } ++#endif ++ ++#ifdef CONFIG_SMP ++ if (likely(rq->online)) ++ if (take_other_rq_task(rq, cpu, next->prio)) { ++ resched_curr(rq); ++ return rq_first_queued_task(rq); ++ } ++#endif ++ return next; ++} ++ ++static inline unsigned long get_preempt_disable_ip(struct task_struct *p) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ return p->preempt_disable_ip; ++#else ++ return 0; ++#endif ++} ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ /* Save this before calling printk(), since that will clobber it */ ++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ if (oops_in_progress) ++ return; ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) ++ && in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(KERN_ERR, preempt_disable_ip); ++ } ++ if (panic_on_warn) ++ panic("scheduling while atomic\n"); ++ ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev, bool preempt) ++{ ++#ifdef CONFIG_SCHED_STACK_END_CHECK ++ if (task_stack_end_corrupted(prev)) ++ panic("corrupted stack end detected inside scheduler\n"); ++#endif ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++ if (!preempt && prev->state && prev->non_block_count) { ++ printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", ++ prev->comm, prev->pid, prev->non_block_count); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++ } ++#endif ++ ++ if (unlikely(in_atomic_preempt_off())) { ++ __schedule_bug(prev); ++ preempt_count_set(PREEMPT_DISABLED); ++ } ++ rcu_sleep_check(); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq()->sched_count); ++} ++ ++static inline void set_rq_task(struct rq *rq, struct task_struct *p) ++{ ++ p->last_ran = rq->clock_task; ++ ++#ifdef CONFIG_HIGH_RES_TIMERS ++ if (p != rq->idle) ++ hrtick_start(rq, US_TO_NS(p->time_slice)); ++#endif ++ /* update rq->dither */ ++ rq->dither = rq_dither(rq); ++} ++ ++/* ++ * schedule() is the main scheduler function. ++ * ++ * The main means of driving the scheduler and thus entering this function are: ++ * ++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. ++ * ++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return ++ * paths. For example, see arch/x86/entry_64.S. ++ * ++ * To drive preemption between tasks, the scheduler sets the flag in timer ++ * interrupt handler scheduler_tick(). ++ * ++ * 3. Wakeups don't really cause entry into schedule(). They add a ++ * task to the run-queue and that's it. ++ * ++ * Now, if the new task added to the run-queue preempts the current ++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets ++ * called on the nearest possible occasion: ++ * ++ * - If the kernel is preemptible (CONFIG_PREEMPTION=y): ++ * ++ * - in syscall or exception context, at the next outmost ++ * preempt_enable(). (this might be as soon as the wake_up()'s ++ * spin_unlock()!) ++ * ++ * - in IRQ context, return from interrupt-handler to ++ * preemptible context ++ * ++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) ++ * then at the next: ++ * ++ * - cond_resched() call ++ * - explicit schedule() call ++ * - return from syscall or exception to user-space ++ * - return from interrupt-handler to user-space ++ * ++ * WARNING: must be called with preemption disabled! ++ */ ++static void __sched notrace __schedule(bool preempt) ++{ ++ struct task_struct *prev, *next; ++ unsigned long *switch_count; ++ struct rq *rq; ++ int cpu; ++ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ prev = rq->curr; ++ ++ schedule_debug(prev, preempt); ++ ++ /* by passing sched_feat(HRTICK) checking which PDS doesn't support */ ++ hrtick_clear(rq); ++ ++ local_irq_disable(); ++ rcu_note_context_switch(preempt); ++ ++ /* ++ * Make sure that signal_pending_state()->signal_pending() below ++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) ++ * done by the caller to avoid the race with signal_wake_up(). ++ * ++ * The membarrier system call requires a full memory barrier ++ * after coming from user-space, before storing to rq->curr. ++ */ ++ raw_spin_lock(&rq->lock); ++ smp_mb__after_spinlock(); ++ ++ update_rq_clock(rq); ++ ++ switch_count = &prev->nivcsw; ++ if (!preempt && prev->state) { ++ if (signal_pending_state(prev->state, prev)) { ++ prev->state = TASK_RUNNING; ++ } else { ++ deactivate_task(prev, rq); ++ ++ if (prev->in_iowait) { ++ atomic_inc(&rq->nr_iowait); ++ delayacct_blkio_start(); ++ } ++ } ++ switch_count = &prev->nvcsw; ++ } ++ ++ clear_tsk_need_resched(prev); ++ clear_preempt_need_resched(); ++ ++ check_deadline(prev, rq); ++ ++ next = choose_next_task(rq, cpu, prev); ++ ++ set_rq_task(rq, next); ++ ++ if (prev != next) { ++ if (next->prio == PRIO_LIMIT) ++ schedstat_inc(rq->sched_goidle); ++ ++ /* ++ * RCU users of rcu_dereference(rq->curr) may not see ++ * changes to task_struct made by pick_next_task(). ++ */ ++ RCU_INIT_POINTER(rq->curr, next); ++ /* ++ * The membarrier system call requires each architecture ++ * to have a full memory barrier after updating ++ * rq->curr, before returning to user-space. ++ * ++ * Here are the schemes providing that barrier on the ++ * various architectures: ++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. ++ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. ++ * - finish_lock_switch() for weakly-ordered ++ * architectures where spin_unlock is a full barrier, ++ * - switch_to() for arm64 (weakly-ordered, spin_unlock ++ * is a RELEASE barrier), ++ */ ++ ++*switch_count; ++ rq->nr_switches++; ++ ++ psi_sched_switch(prev, next, !task_on_rq_queued(prev)); ++ ++ trace_sched_switch(preempt, prev, next); ++ ++ /* Also unlocks the rq: */ ++ rq = context_switch(rq, prev, next); ++#ifdef CONFIG_SCHED_SMT ++ pds_sg_balance_check(rq); ++#endif ++ } else ++ raw_spin_unlock_irq(&rq->lock); ++} ++ ++void __noreturn do_task_dead(void) ++{ ++ /* Causes final put_task_struct in finish_task_switch(): */ ++ set_special_state(TASK_DEAD); ++ ++ /* Tell freezer to ignore us: */ ++ current->flags |= PF_NOFREEZE; ++ __schedule(false); ++ ++ BUG(); ++ ++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ ++ for (;;) ++ cpu_relax(); ++} ++ ++static inline void sched_submit_work(struct task_struct *tsk) ++{ ++ if (!tsk->state || tsk_is_pi_blocked(tsk) || ++ signal_pending_state(tsk->state, tsk)) ++ return; ++ ++ /* ++ * If a worker went to sleep, notify and ask workqueue whether ++ * it wants to wake up a task to maintain concurrency. ++ * As this function is called inside the schedule() context, ++ * we disable preemption to avoid it calling schedule() again ++ * in the possible wakeup of a kworker. ++ */ ++ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { ++ preempt_disable(); ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ else ++ io_wq_worker_sleeping(tsk); ++ preempt_enable_no_resched(); ++ } ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, ++ * make sure to submit it to avoid deadlocks. ++ */ ++ if (blk_needs_flush_plug(tsk)) ++ blk_schedule_flush_plug(tsk); ++} ++ ++static void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++ else ++ io_wq_worker_running(tsk); ++ } ++} ++ ++asmlinkage __visible void __sched schedule(void) ++{ ++ struct task_struct *tsk = current; ++ ++ sched_submit_work(tsk); ++ do { ++ preempt_disable(); ++ __schedule(false); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ sched_update_worker(tsk); ++} ++EXPORT_SYMBOL(schedule); ++ ++/* ++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted ++ * state (have scheduled out non-voluntarily) by making sure that all ++ * tasks have either left the run queue or have gone into user space. ++ * As idle tasks do not do either, they must not ever be preempted ++ * (schedule out non-voluntarily). ++ * ++ * schedule_idle() is similar to schedule_preempt_disable() except that it ++ * never enables preemption because it does not call sched_submit_work(). ++ */ ++void __sched schedule_idle(void) ++{ ++ /* ++ * As this skips calling sched_submit_work(), which the idle task does ++ * regardless because that function is a nop when the task is in a ++ * TASK_RUNNING state, make sure this isn't used someplace that the ++ * current task can be in any other state. Note, idle is always in the ++ * TASK_RUNNING state. ++ */ ++ WARN_ON_ONCE(current->state); ++ do { ++ __schedule(false); ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_CONTEXT_TRACKING ++asmlinkage __visible void __sched schedule_user(void) ++{ ++ /* ++ * If we come here after a random call to set_need_resched(), ++ * or we have been woken up remotely but the IPI has not yet arrived, ++ * we haven't yet exited the RCU idle mode. Do it here manually until ++ * we find a better solution. ++ * ++ * NB: There are buggy callers of this function. Ideally we ++ * should warn if prev_state != CONTEXT_USER, but that will trigger ++ * too frequently to make sense yet. ++ */ ++ enum ctx_state prev_state = exception_enter(); ++ schedule(); ++ exception_exit(prev_state); ++} ++#endif ++ ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ sched_preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ ++static void __sched notrace preempt_schedule_common(void) ++{ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ __schedule(true); ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_PREEMPTION ++/* ++ * This is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule(void) ++{ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(!preemptible())) ++ return; ++ ++ preempt_schedule_common(); ++} ++NOKPROBE_SYMBOL(preempt_schedule); ++EXPORT_SYMBOL(preempt_schedule); ++ ++/** ++ * preempt_schedule_notrace - preempt_schedule called by tracing ++ * ++ * The tracing infrastructure uses preempt_enable_notrace to prevent ++ * recursion and tracing preempt enabling caused by the tracing ++ * infrastructure itself. But as tracing can happen in areas coming ++ * from userspace or just about to enter userspace, a preempt enable ++ * can occur before user_exit() is called. This will cause the scheduler ++ * to be called when the system is still in usermode. ++ * ++ * To prevent this, the preempt_enable_notrace will use this function ++ * instead of preempt_schedule() to exit user context if needed before ++ * calling the scheduler. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ++{ ++ enum ctx_state prev_ctx; ++ ++ if (likely(!preemptible())) ++ return; ++ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ /* ++ * Needs preempt disabled in case user_exit() is traced ++ * and the tracer calls preempt_enable_notrace() causing ++ * an infinite recursion. ++ */ ++ prev_ctx = exception_enter(); ++ __schedule(true); ++ exception_exit(prev_ctx); ++ ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ } while (need_resched()); ++} ++EXPORT_SYMBOL_GPL(preempt_schedule_notrace); ++ ++#endif /* CONFIG_PREEMPTION */ ++ ++/* ++ * This is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage __visible void __sched preempt_schedule_irq(void) ++{ ++ enum ctx_state prev_state; ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(preempt_count() || !irqs_disabled()); ++ ++ prev_state = exception_enter(); ++ ++ do { ++ preempt_disable(); ++ local_irq_enable(); ++ __schedule(true); ++ local_irq_disable(); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ ++ exception_exit(prev_state); ++} ++ ++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, ++ void *key) ++{ ++ return try_to_wake_up(curr->private, mode, wake_flags); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++static inline void ++check_task_changed(struct rq *rq, struct task_struct *p) ++{ ++ /* ++ * Trigger changes when task priority/deadline modified. ++ */ ++ if (task_on_rq_queued(p)) { ++ struct task_struct *first; ++ ++ requeue_task(p, rq); ++ ++ /* Resched if first queued task not running and not IDLE */ ++ if ((first = rq_first_queued_task(rq)) != rq->curr && ++ !task_running_idle(first)) ++ resched_curr(rq); ++ } ++} ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) ++{ ++ if (pi_task) ++ prio = min(prio, pi_task->prio); ++ ++ return prio; ++} ++ ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ struct task_struct *pi_task = rt_mutex_get_top_task(p); ++ ++ return __rt_effective_prio(pi_task, prio); ++} ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task to boost ++ * @pi_task: donor task ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. ++ */ ++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) ++{ ++ int prio; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ /* XXX used to be waiter->prio, not waiter->task->prio */ ++ prio = __rt_effective_prio(pi_task, p->normal_prio); ++ ++ /* ++ * If nothing changed; bail early. ++ */ ++ if (p->pi_top_task == pi_task && prio == p->prio) ++ return; ++ ++ rq = __task_access_lock(p, &lock); ++ /* ++ * Set under pi_lock && rq->lock, such that the value can be used under ++ * either lock. ++ * ++ * Note that there is loads of tricky to make this pointer cache work ++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to ++ * ensure a task is de-boosted (pi_task is set to NULL) before the ++ * task is allowed to run again (and can exit). This ensures the pointer ++ * points to a blocked task -- which guaratees the task is present. ++ */ ++ p->pi_top_task = pi_task; ++ ++ /* ++ * For FIFO/RR we only need to set prio, if that matches we're done. ++ */ ++ if (prio == p->prio) ++ goto out_unlock; ++ ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ ++ trace_sched_pi_setprio(p, pi_task); ++ p->prio = prio; ++ update_task_priodl(p); ++ ++ check_task_changed(rq, p); ++ ++out_unlock: ++ __task_access_unlock(p, lock); ++} ++#else ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ return prio; ++} ++#endif ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ int new_static; ++ unsigned long flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) ++ return; ++ new_static = NICE_TO_PRIO(nice); ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ rq = __task_access_lock(p, &lock); ++ ++ /* rq lock may not held!! */ ++ update_rq_clock(rq); ++ ++ p->static_prio = new_static; ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it wont have any effect on scheduling until the task is ++ * not SCHED_NORMAL/SCHED_BATCH: ++ */ ++ if (task_has_rt_policy(p)) ++ goto out_unlock; ++ ++ p->deadline -= task_deadline_diff(p); ++ p->deadline += static_deadline_diff(new_static); ++ p->prio = effective_prio(p); ++ update_task_priodl(p); ++ ++ check_task_changed(rq, p); ++out_unlock: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* Convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = nice_to_rlimit(nice); ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ ++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); ++ nice = task_nice(current) + increment; ++ ++ nice = clamp_val(nice, MIN_NICE, MAX_NICE); ++ if (increment < 0 && !can_nice(current, nice)) ++ return -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * Return: The priority value as seen by users in /proc. ++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes ++ * from 0(SCHED_ISO) up to 82 (nice +19 SCHED_IDLE). ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ int level, prio = p->prio - MAX_RT_PRIO; ++ static const int level_to_nice_prio[] = {39, 33, 26, 20, 14, 7, 0, 0}; ++ ++ /* rt tasks */ ++ if (prio <= 0) ++ goto out; ++ ++ preempt_disable(); ++ level = task_deadline_level(p, this_rq()); ++ preempt_enable(); ++ prio += level_to_nice_prio[level]; ++ if (idleprio_task(p)) ++ prio += NICE_WIDTH; ++out: ++ return prio; ++} ++ ++/** ++ * idle_cpu - is a given CPU idle currently? ++ * @cpu: the processor in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int idle_cpu(int cpu) ++{ ++ return cpu_curr(cpu) == cpu_rq(cpu)->idle; ++} ++ ++/** ++ * idle_task - return the idle task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * Return: The idle task for the cpu @cpu. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ * ++ * The task of @pid, if found. %NULL otherwise. ++ */ ++static inline struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++#ifdef CONFIG_SMP ++void sched_set_stop_task(int cpu, struct task_struct *stop) ++{ ++ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; ++ struct sched_param start_param = { .sched_priority = 0 }; ++ struct task_struct *old_stop = cpu_rq(cpu)->stop; ++ ++ if (stop) { ++ /* ++ * Make it appear like a SCHED_FIFO task, its something ++ * userspace knows about and won't get confused about. ++ * ++ * Also, it will make PI more or less work without too ++ * much confusion -- but then, stop work should not ++ * rely on PI working anyway. ++ */ ++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); ++ } ++ ++ cpu_rq(cpu)->stop = stop; ++ ++ if (old_stop) { ++ /* ++ * Reset it back to a normal scheduling policy so that ++ * it can die in pieces. ++ */ ++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); ++ } ++} ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ const struct cpumask *cpu_valid_mask = cpu_active_mask; ++ int dest_cpu; ++ unsigned long flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ int ret = 0; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ rq = __task_access_lock(p, &lock); ++ ++ if (p->flags & PF_KTHREAD) { ++ /* ++ * Kernel threads are allowed on online && !active CPUs ++ */ ++ cpu_valid_mask = cpu_online_mask; ++ } ++ ++ /* ++ * Must re-check here, to close a race against __kthread_bind(), ++ * sched_setaffinity() is not guaranteed to observe the flag. ++ */ ++ if (check && (p->flags & PF_NO_SETAFFINITY)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (cpumask_equal(&p->cpus_mask, new_mask)) ++ goto out; ++ ++ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); ++ if (dest_cpu >= nr_cpu_ids) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ do_set_cpus_allowed(p, new_mask); ++ ++ if (p->flags & PF_KTHREAD) { ++ /* ++ * For kernel threads that do indeed end up on online && ++ * !active we want to ensure they are strict per-CPU threads. ++ */ ++ WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && ++ !cpumask_intersects(new_mask, cpu_active_mask) && ++ p->nr_cpus_allowed != 1); ++ } ++ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ goto out; ++ ++ if (task_running(p) || p->state == TASK_WAKING) { ++ struct migration_arg arg = { p, dest_cpu }; ++ ++ /* Need help from migration thread: drop lock and wait. */ ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); ++ return 0; ++ } ++ if (task_on_rq_queued(p)) { ++ /* ++ * OK, since we're going to drop the lock immediately ++ * afterwards anyway. ++ */ ++ update_rq_clock(rq); ++ rq = move_queued_task(rq, p, dest_cpu); ++ lock = &rq->lock; ++ } ++ ++out: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ return ret; ++} ++ ++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ return __set_cpus_allowed_ptr(p, new_mask, false); ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++#else ++static inline int ++__set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ return set_cpus_allowed_ptr(p, new_mask); ++} ++#endif ++ ++static u64 task_init_deadline(const struct task_struct *p) ++{ ++ return task_rq(p)->clock + task_deadline_diff(p); ++} ++ ++u64 (* task_init_deadline_func_tbl[])(const struct task_struct *p) = { ++ task_init_deadline, /* SCHED_NORMAL */ ++ NULL, /* SCHED_FIFO */ ++ NULL, /* SCHED_RR */ ++ task_init_deadline, /* SCHED_BATCH */ ++ NULL, /* SCHED_ISO */ ++ task_init_deadline /* SCHED_IDLE */ ++}; ++ ++/* ++ * sched_setparam() passes in -1 for its policy, to let the functions ++ * it calls know not to change it. ++ */ ++#define SETPARAM_POLICY -1 ++ ++static void __setscheduler_params(struct task_struct *p, ++ const struct sched_attr *attr) ++{ ++ int old_policy = p->policy; ++ int policy = attr->sched_policy; ++ ++ if (policy == SETPARAM_POLICY) ++ policy = p->policy; ++ ++ p->policy = policy; ++ ++ /* ++ * allow normal nice value to be set, but will not have any ++ * effect on scheduling until the task not SCHED_NORMAL/ ++ * SCHED_BATCH ++ */ ++ p->static_prio = NICE_TO_PRIO(attr->sched_nice); ++ ++ /* ++ * __sched_setscheduler() ensures attr->sched_priority == 0 when ++ * !rt_policy. Always setting this ensures that things like ++ * getparam()/getattr() don't report silly values for !rt tasks. ++ */ ++ p->rt_priority = attr->sched_priority; ++ p->normal_prio = normal_prio(p); ++ ++ if (old_policy != policy) ++ p->deadline = (task_init_deadline_func_tbl[p->policy])? ++ task_init_deadline_func_tbl[p->policy](p):0ULL; ++} ++ ++/* Actually do priority change: must hold rq lock. */ ++static void __setscheduler(struct rq *rq, struct task_struct *p, ++ const struct sched_attr *attr, bool keep_boost) ++{ ++ __setscheduler_params(p, attr); ++ ++ /* ++ * Keep a potential priority boosting if called from ++ * sched_setscheduler(). ++ */ ++ p->prio = normal_prio(p); ++ if (keep_boost) ++ p->prio = rt_effective_prio(p, p->prio); ++ update_task_priodl(p); ++} ++ ++/* ++ * check the target process has a UID that matches the current process's ++ */ ++static bool check_same_owner(struct task_struct *p) ++{ ++ const struct cred *cred = current_cred(), *pcred; ++ bool match; ++ ++ rcu_read_lock(); ++ pcred = __task_cred(p); ++ match = (uid_eq(cred->euid, pcred->euid) || ++ uid_eq(cred->euid, pcred->uid)); ++ rcu_read_unlock(); ++ return match; ++} ++ ++static int ++__sched_setscheduler(struct task_struct *p, ++ const struct sched_attr *attr, bool user, bool pi) ++{ ++ const struct sched_attr dl_squash_attr = { ++ .size = sizeof(struct sched_attr), ++ .sched_policy = SCHED_FIFO, ++ .sched_nice = 0, ++ .sched_priority = 99, ++ }; ++ int newprio = MAX_RT_PRIO - 1 - attr->sched_priority; ++ int retval, oldpolicy = -1; ++ int policy = attr->sched_policy; ++ unsigned long flags; ++ struct rq *rq; ++ int reset_on_fork; ++ raw_spinlock_t *lock; ++ ++ /* The pi code expects interrupts enabled */ ++ BUG_ON(pi && in_interrupt()); ++ ++ /* ++ * PDS supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO ++ */ ++ if (unlikely(SCHED_DEADLINE == policy)) { ++ attr = &dl_squash_attr; ++ policy = attr->sched_policy; ++ newprio = MAX_RT_PRIO - 1 - attr->sched_priority; ++ } ++recheck: ++ /* Double check policy once rq lock held */ ++ if (policy < 0) { ++ reset_on_fork = p->sched_reset_on_fork; ++ policy = oldpolicy = p->policy; ++ } else { ++ reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK); ++ ++ if (policy > SCHED_IDLE) ++ return -EINVAL; ++ } ++ ++ if (attr->sched_flags & ~(SCHED_FLAG_ALL)) ++ return -EINVAL; ++ ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and ++ * SCHED_BATCH and SCHED_IDLE is 0. ++ */ ++ if (attr->sched_priority < 0 || ++ (p->mm && attr->sched_priority > MAX_USER_RT_PRIO - 1) || ++ (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1)) ++ return -EINVAL; ++ if ((SCHED_RR == policy || SCHED_FIFO == policy) != ++ (attr->sched_priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (SCHED_FIFO == policy || SCHED_RR == policy) { ++ unsigned long rlim_rtprio = ++ task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* Can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* Can't increase priority */ ++ if (attr->sched_priority > p->rt_priority && ++ attr->sched_priority > rlim_rtprio) ++ return -EPERM; ++ } ++ ++ /* Can't change other user's priorities */ ++ if (!check_same_owner(p)) ++ return -EPERM; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ return -EPERM; ++ } ++ ++ if (user) { ++ retval = security_task_setscheduler(p); ++ if (retval) ++ return retval; ++ } ++ ++ if (pi) ++ cpuset_read_lock(); ++ ++ /* ++ * Make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ ++ /* ++ * To be able to change p->policy safely, task_access_lock() ++ * must be called. ++ * IF use task_access_lock() here: ++ * For the task p which is not running, reading rq->stop is ++ * racy but acceptable as ->stop doesn't change much. ++ * An enhancemnet can be made to read rq->stop saftly. ++ */ ++ rq = __task_access_lock(p, &lock); ++ ++ /* ++ * Changing the policy of the stop threads its a very bad idea ++ */ ++ if (p == rq->stop) { ++ retval = -EINVAL; ++ goto unlock; ++ } ++ ++ /* ++ * If not changing anything there's no need to proceed further: ++ */ ++ if (unlikely(policy == p->policy)) { ++ if (rt_policy(policy) && attr->sched_priority != p->rt_priority) ++ goto change; ++ if (!rt_policy(policy) && ++ NICE_TO_PRIO(attr->sched_nice) != p->static_prio) ++ goto change; ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ retval = 0; ++ goto unlock; ++ } ++change: ++ ++ /* Re-check policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ if (pi) ++ cpuset_read_unlock(); ++ goto recheck; ++ } ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ ++ if (pi) { ++ /* ++ * Take priority boosted tasks into account. If the new ++ * effective priority is unchanged, we just store the new ++ * normal parameters and do not touch the scheduler class and ++ * the runqueue. This will be done when the task deboost ++ * itself. ++ */ ++ if (rt_effective_prio(p, newprio) == p->prio) { ++ __setscheduler_params(p, attr); ++ retval = 0; ++ goto unlock; ++ } ++ } ++ ++ __setscheduler(rq, p, attr, pi); ++ ++ check_task_changed(rq, p); ++ ++ /* Avoid rq from going away on us: */ ++ preempt_disable(); ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ if (pi) { ++ cpuset_read_unlock(); ++ rt_mutex_adjust_pi(p); ++ } ++ ++ preempt_enable(); ++ ++ return 0; ++ ++unlock: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ if (pi) ++ cpuset_read_unlock(); ++ return retval; ++} ++ ++static int _sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param, bool check) ++{ ++ struct sched_attr attr = { ++ .sched_policy = policy, ++ .sched_priority = param->sched_priority, ++ .sched_nice = PRIO_TO_NICE(p->static_prio), ++ }; ++ ++ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ ++ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { ++ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; ++ policy &= ~SCHED_RESET_ON_FORK; ++ attr.sched_policy = policy; ++ } ++ ++ return __sched_setscheduler(p, &attr, check, true); ++} ++ ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, true); ++} ++ ++EXPORT_SYMBOL_GPL(sched_setscheduler); ++ ++int sched_setattr(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, true, true); ++} ++EXPORT_SYMBOL_GPL(sched_setattr); ++ ++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, false, true); ++} ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, false); ++} ++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (likely(p)) ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (likely(p)) { ++ retval = sched_setscheduler(p, policy, &lparam); ++ put_task_struct(p); ++ } ++ ++ return retval; ++} ++ ++/* ++ * Mimics kernel/events/core.c perf_copy_attr(). ++ */ ++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) ++{ ++ u32 size; ++ int ret; ++ ++ /* Zero the full structure, so that a short copy will be nice: */ ++ memset(attr, 0, sizeof(*attr)); ++ ++ ret = get_user(size, &uattr->size); ++ if (ret) ++ return ret; ++ ++ /* ABI compatibility quirk: */ ++ if (!size) ++ size = SCHED_ATTR_SIZE_VER0; ++ ++ if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) ++ goto err_size; ++ ++ ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); ++ if (ret) { ++ if (ret == -E2BIG) ++ goto err_size; ++ return ret; ++ } ++ ++ /* ++ * XXX: Do we want to be lenient like existing syscalls; or do we want ++ * to be strict and return an error on out-of-bounds values? ++ */ ++ attr->sched_nice = clamp(attr->sched_nice, -20, 19); ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return 0; ++ ++err_size: ++ put_user(sizeof(*attr), &uattr->size); ++ return -E2BIG; ++} ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * @param: structure containing the new RT priority. ++ */ ++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) ++{ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param); ++} ++ ++/** ++ * sys_sched_setattr - same as above, but with extended sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ */ ++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, flags) ++{ ++ struct sched_attr attr; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || flags) ++ return -EINVAL; ++ ++ retval = sched_copy_attr(uattr, &attr); ++ if (retval) ++ return retval; ++ ++ if ((int)attr.sched_policy < 0) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setattr(p, &attr); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ * ++ * Return: On success, the policy of the thread. Otherwise, a negative error ++ * code. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (pid < 0) ++ goto out_nounlock; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ rcu_read_unlock(); ++ ++out_nounlock: ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ * ++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error ++ * code. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp = { .sched_priority = 0 }; ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (!param || pid < 0) ++ goto out_nounlock; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ if (task_has_rt_policy(p)) ++ lp.sched_priority = p->rt_priority; ++ rcu_read_unlock(); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++out_nounlock: ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/* ++ * Copy the kernel size attribute structure (which might be larger ++ * than what user-space knows about) to user-space. ++ * ++ * Note that all cases are valid: user-space buffer can be larger or ++ * smaller than the kernel-space buffer. The usual case is that both ++ * have the same size. ++ */ ++static int ++sched_attr_copy_to_user(struct sched_attr __user *uattr, ++ struct sched_attr *kattr, ++ unsigned int usize) ++{ ++ unsigned int ksize = sizeof(*kattr); ++ ++ if (!access_ok(uattr, usize)) ++ return -EFAULT; ++ ++ /* ++ * sched_getattr() ABI forwards and backwards compatibility: ++ * ++ * If usize == ksize then we just copy everything to user-space and all is good. ++ * ++ * If usize < ksize then we only copy as much as user-space has space for, ++ * this keeps ABI compatibility as well. We skip the rest. ++ * ++ * If usize > ksize then user-space is using a newer version of the ABI, ++ * which part the kernel doesn't know about. Just ignore it - tooling can ++ * detect the kernel's knowledge of attributes from the attr->size value ++ * which is set to ksize in this case. ++ */ ++ kattr->size = min(usize, ksize); ++ ++ if (copy_to_user(uattr, kattr, kattr->size)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++/** ++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ * @usize: sizeof(attr) for fwd/bwd comp. ++ * @flags: for future extension. ++ */ ++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, usize, unsigned int, flags) ++{ ++ struct sched_attr kattr = { }; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || usize > PAGE_SIZE || ++ usize < SCHED_ATTR_SIZE_VER0 || flags) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ kattr.sched_policy = p->policy; ++ if (rt_task(p)) ++ kattr.sched_priority = p->rt_priority; ++ else ++ kattr.sched_nice = task_nice(p); ++ ++#ifdef CONFIG_UCLAMP_TASK ++ kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; ++ kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; ++#endif ++ ++ rcu_read_unlock(); ++ ++ return sched_attr_copy_to_user(uattr, &kattr, usize); ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ++{ ++ cpumask_var_t cpus_mask, new_mask; ++ struct task_struct *p; ++ int retval; ++ ++ get_online_cpus(); ++ rcu_read_lock(); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ rcu_read_unlock(); ++ put_online_cpus(); ++ return -ESRCH; ++ } ++ ++ /* Prevent p going away */ ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (p->flags & PF_NO_SETAFFINITY) { ++ retval = -EINVAL; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&cpus_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_free_cpus_allowed; ++ } ++ retval = -EPERM; ++ if (!check_same_owner(p)) { ++ rcu_read_lock(); ++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { ++ rcu_read_unlock(); ++ goto out_unlock; ++ } ++ rcu_read_unlock(); ++ } ++ ++ retval = security_task_setscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ cpuset_cpus_allowed(p, cpus_mask); ++ cpumask_and(new_mask, in_mask, cpus_mask); ++again: ++ retval = __set_cpus_allowed_ptr(p, new_mask, true); ++ ++ if (!retval) { ++ cpuset_cpus_allowed(p, cpus_mask); ++ if (!cpumask_subset(new_mask, cpus_mask)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_mask to the ++ * cpuset's cpus_mask ++ */ ++ cpumask_copy(new_mask, cpus_mask); ++ goto again; ++ } ++ } ++out_unlock: ++ free_cpumask_var(new_mask); ++out_free_cpus_allowed: ++ free_cpumask_var(cpus_mask); ++out_put_task: ++ put_task_struct(p); ++ put_online_cpus(); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ struct cpumask *new_mask) ++{ ++ if (len < cpumask_size()) ++ cpumask_clear(new_mask); ++ else if (len > cpumask_size()) ++ len = cpumask_size(); ++ ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++/** ++ * sys_sched_setaffinity - set the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new CPU mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_var_t new_mask; ++ int retval; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); ++ if (retval == 0) ++ retval = sched_setaffinity(pid, new_mask); ++ free_cpumask_var(new_mask); ++ return retval; ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ raw_spinlock_t *lock; ++ unsigned long flags; ++ int retval; ++ ++ rcu_read_lock(); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ task_access_lock_irqsave(p, &lock, &flags); ++ cpumask_and(mask, &p->cpus_mask, cpu_active_mask); ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++out_unlock: ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current CPU mask ++ * ++ * Return: size of CPU mask copied to user_mask_ptr on success. An ++ * error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_var_t mask; ++ ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids) ++ return -EINVAL; ++ if (len & (sizeof(unsigned long)-1)) ++ return -EINVAL; ++ ++ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ ret = sched_getaffinity(pid, mask); ++ if (ret == 0) { ++ unsigned int retlen = min_t(size_t, len, cpumask_size()); ++ ++ if (copy_to_user(user_mask_ptr, mask, retlen)) ++ ret = -EFAULT; ++ else ++ ret = retlen; ++ } ++ free_cpumask_var(mask); ++ ++ return ret; ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. It does this by ++ * scheduling away the current task. If it still has the earliest deadline ++ * it will be scheduled again as the next task. ++ * ++ * Return: 0. ++ */ ++static void do_sched_yield(void) ++{ ++ struct rq *rq; ++ struct rq_flags rf; ++ ++ if (!sched_yield_type) ++ return; ++ ++ rq = this_rq_lock_irq(&rf); ++ ++ if (sched_yield_type > 1) { ++ time_slice_expired(current, rq); ++ requeue_task(current, rq); ++ } ++ schedstat_inc(rq->yld_count); ++ ++ /* ++ * Since we are going to call schedule() anyway, there's ++ * no need to preempt or enable interrupts: ++ */ ++ preempt_disable(); ++ raw_spin_unlock(&rq->lock); ++ sched_preempt_enable_no_resched(); ++ ++ schedule(); ++} ++ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ do_sched_yield(); ++ return 0; ++} ++ ++#ifndef CONFIG_PREEMPTION ++int __sched _cond_resched(void) ++{ ++ if (should_resched(0)) { ++ preempt_schedule_common(); ++ return 1; ++ } ++ rcu_all_qs(); ++ return 0; ++} ++EXPORT_SYMBOL(_cond_resched); ++#endif ++ ++/* ++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int __cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held(lock); ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (resched) ++ preempt_schedule_common(); ++ else ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_lock); ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * Do not ever use this function, there's a 99% chance you're doing it wrong. ++ * ++ * The scheduler is at all times free to pick the calling task as the most ++ * eligible task to run, if removing the yield() call from your code breaks ++ * it, its already broken. ++ * ++ * Typical broken usage is: ++ * ++ * while (!event) ++ * yield(); ++ * ++ * where one assumes that yield() will let 'the other' process run that will ++ * make event true. If the current task is a SCHED_FIFO task that will never ++ * happen. Never use yield() as a progress guarantee!! ++ * ++ * If you want to use yield() to wait for something, use wait_event(). ++ * If you want to use yield() to be 'nice' for others, use cond_resched(). ++ * If you still want to use yield(), do not! ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ do_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/** ++ * yield_to - yield the current processor to another thread in ++ * your thread group, or accelerate that thread toward the ++ * processor it's on. ++ * @p: target task ++ * @preempt: whether task preemption is allowed or not ++ * ++ * It's the caller's job to ensure that the target task struct ++ * can't go away on us before we can do any checks. ++ * ++ * In PDS, yield_to is not supported. ++ * ++ * Return: ++ * true (>0) if we indeed boosted the target task. ++ * false (0) if we failed to boost the target. ++ * -ESRCH if there's no task to yield to. ++ */ ++int __sched yield_to(struct task_struct *p, bool preempt) ++{ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(yield_to); ++ ++int io_schedule_prepare(void) ++{ ++ int old_iowait = current->in_iowait; ++ ++ current->in_iowait = 1; ++ blk_schedule_flush_plug(current); ++ ++ return old_iowait; ++} ++ ++void io_schedule_finish(int token) ++{ ++ current->in_iowait = token; ++} ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ int token; ++ long ret; ++ ++ token = io_schedule_prepare(); ++ ret = schedule_timeout(timeout); ++ io_schedule_finish(token); ++ ++ return ret; ++} ++EXPORT_SYMBOL(io_schedule_timeout); ++ ++void io_schedule(void) ++{ ++ int token; ++ ++ token = io_schedule_prepare(); ++ schedule(); ++ io_schedule_finish(token); ++} ++EXPORT_SYMBOL(io_schedule); ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the maximum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_USER_RT_PRIO-1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLE: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the minimum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLE: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) ++{ ++ struct task_struct *p; ++ int retval; ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ rcu_read_unlock(); ++ ++ *t = ns_to_timespec64(MS_TO_NS(rr_interval)); ++ return 0; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * ++ * Return: On success, 0 and the timeslice is in @interval. Otherwise, ++ * an error code. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct __kernel_timespec __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_timespec64(&t, interval); ++ ++ return retval; ++} ++ ++#ifdef CONFIG_COMPAT_32BIT_TIME ++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, ++ struct old_timespec32 __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_old_timespec32(&t, interval); ++ return retval; ++} ++#endif ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ int ppid; ++ ++ if (!try_get_task_stack(p)) ++ return; ++ ++ printk(KERN_INFO "%-15.15s %c", p->comm, task_state_to_char(p)); ++ ++ if (p->state == TASK_RUNNING) ++ printk(KERN_CONT " running task "); ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ free = stack_not_used(p); ++#endif ++ ppid = 0; ++ rcu_read_lock(); ++ if (pid_alive(p)) ++ ppid = task_pid_nr(rcu_dereference(p->real_parent)); ++ rcu_read_unlock(); ++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, ++ task_pid_nr(p), ppid, ++ (unsigned long)task_thread_info(p)->flags); ++ ++ print_worker_info(KERN_INFO, p); ++ show_stack(p, NULL, KERN_INFO); ++ put_task_stack(p); ++} ++EXPORT_SYMBOL_GPL(sched_show_task); ++ ++static inline bool ++state_filter_match(unsigned long state_filter, struct task_struct *p) ++{ ++ /* no filter, everything matches */ ++ if (!state_filter) ++ return true; ++ ++ /* filter, but doesn't match */ ++ if (!(p->state & state_filter)) ++ return false; ++ ++ /* ++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows ++ * TASK_KILLABLE). ++ */ ++ if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE) ++ return false; ++ ++ return true; ++} ++ ++ ++void show_state_filter(unsigned long state_filter) ++{ ++ struct task_struct *g, *p; ++ ++#if BITS_PER_LONG == 32 ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#else ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#endif ++ rcu_read_lock(); ++ for_each_process_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take a lot of time: ++ * Also, reset softlockup watchdogs on all CPUs, because ++ * another CPU might be blocked waiting for us to process ++ * an IPI. ++ */ ++ touch_nmi_watchdog(); ++ touch_all_softlockup_watchdogs(); ++ if (state_filter_match(state_filter, p)) ++ sched_show_task(p); ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ /* PDS TODO: should support this ++ if (!state_filter) ++ sysrq_sched_debug_show(); ++ */ ++#endif ++ rcu_read_unlock(); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (!state_filter) ++ debug_show_all_locks(); ++} ++ ++void dump_cpu_task(int cpu) ++{ ++ pr_info("Task dump for CPU %d:\n", cpu); ++ sched_show_task(cpu_curr(cpu)); ++} ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: cpu the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&idle->pi_lock, flags); ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ++ idle->last_ran = rq->clock_task; ++ idle->state = TASK_RUNNING; ++ idle->flags |= PF_IDLE; ++ /* Setting prio to illegal value shouldn't matter when never queued */ ++ idle->prio = PRIO_LIMIT; ++ idle->deadline = rq_clock(rq) + task_deadline_diff(idle); ++ update_task_priodl(idle); ++ ++ kasan_unpoison_task_stack(idle); ++ ++#ifdef CONFIG_SMP ++ /* ++ * It's possible that init_idle() gets called multiple times on a task, ++ * in that case do_set_cpus_allowed() will not do the right thing. ++ * ++ * And since this is boot we can forgo the serialisation. ++ */ ++ set_cpus_allowed_common(idle, cpumask_of(cpu)); ++#endif ++ ++ /* Silence PROVE_RCU */ ++ rcu_read_lock(); ++ __set_task_cpu(idle, cpu); ++ rcu_read_unlock(); ++ ++ rq->idle = idle; ++ rcu_assign_pointer(rq->curr, idle); ++ idle->on_cpu = 1; ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++ init_idle_preempt_count(idle, cpu); ++ ++ ftrace_graph_init_idle_task(idle, cpu); ++ vtime_init_idle(idle, cpu); ++#ifdef CONFIG_SMP ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif ++} ++ ++void resched_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (cpu_online(cpu) || cpu == smp_processor_id()) ++ resched_curr(cpu_rq(cpu)); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ struct wake_q_node *node = &task->wake_q; ++ ++ /* ++ * Atomically grab the task, if ->wake_q is !nil already it means ++ * its already queued (either by us or someone else) and will get the ++ * wakeup due to that. ++ * ++ * In order to ensure that a pending wakeup will observe our pending ++ * state, even in the failed case, an explicit smp_mb() must be used. ++ */ ++ smp_mb__before_atomic(); ++ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) ++ return false; ++ ++ /* ++ * The head is context local, there can be no concurrency. ++ */ ++ *head->lastp = node; ++ head->lastp = &node->next; ++ return true; ++} ++ ++/** ++ * wake_q_add() - queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ */ ++void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (__wake_q_add(head, task)) ++ get_task_struct(task); ++} ++ ++/** ++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ * ++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers ++ * that already hold reference to @task can call the 'safe' version and trust ++ * wake_q to do the right thing depending whether or not the @task is already ++ * queued for wakeup. ++ */ ++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (!__wake_q_add(head, task)) ++ put_task_struct(task); ++} ++ ++void wake_up_q(struct wake_q_head *head) ++{ ++ struct wake_q_node *node = head->first; ++ ++ while (node != WAKE_Q_TAIL) { ++ struct task_struct *task; ++ ++ task = container_of(node, struct task_struct, wake_q); ++ BUG_ON(!task); ++ /* task can safely be re-inserted now: */ ++ node = node->next; ++ task->wake_q.next = NULL; ++ ++ /* ++ * wake_up_process() executes a full barrier, which pairs with ++ * the queueing in wake_q_add() so as not to miss wakeups. ++ */ ++ wake_up_process(task); ++ put_task_struct(task); ++ } ++} ++ ++#ifdef CONFIG_SMP ++ ++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur, ++ const struct cpumask __maybe_unused *trial) ++{ ++ return 1; ++} ++ ++int task_can_attach(struct task_struct *p, ++ const struct cpumask *cs_cpus_allowed) ++{ ++ int ret = 0; ++ ++ /* ++ * Kthreads which disallow setaffinity shouldn't be moved ++ * to a new cpuset; we don't want to change their CPU ++ * affinity and isolating such threads by their set of ++ * allowed nodes is unnecessary. Thus, cpusets are not ++ * applicable for such threads. This prevents checking for ++ * success of set_cpus_allowed_ptr() on all attached tasks ++ * before cpus_mask may be changed. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++static bool sched_smp_initialized __read_mostly; ++ ++#ifdef CONFIG_NO_HZ_COMMON ++void nohz_balance_enter_idle(int cpu) ++{ ++} ++ ++void select_nohz_load_balancer(int stop_tick) ++{ ++} ++ ++void set_cpu_sd_state_idle(void) {} ++ ++/* ++ * In the semi idle case, use the nearest busy CPU for migrating timers ++ * from an idle CPU. This is good for power-savings. ++ * ++ * We don't do similar optimization for completely idle system, as ++ * selecting an idle CPU will add more delays to the timers than intended ++ * (as that CPU's timer base may not be uptodate wrt jiffies etc). ++ */ ++int get_nohz_timer_target(void) ++{ ++ int i, cpu = smp_processor_id(), default_cpu = -1; ++ struct cpumask *mask; ++ ++ if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) { ++ if (!idle_cpu(cpu)) ++ return cpu; ++ default_cpu = cpu; ++ } ++ ++ for (mask = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); ++ mask < per_cpu(sched_cpu_affinity_chk_end_masks, cpu); mask++) ++ for_each_cpu_and(i, mask, housekeeping_cpumask(HK_FLAG_TIMER)) ++ if (!idle_cpu(i)) ++ return i; ++ ++ if (default_cpu == -1) ++ default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER); ++ cpu = default_cpu; ++ ++ return cpu; ++} ++ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++void wake_up_idle_cpu(int cpu) ++{ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ set_tsk_need_resched(cpu_rq(cpu)->idle); ++ smp_send_reschedule(cpu); ++} ++ ++void wake_up_nohz_cpu(int cpu) ++{ ++ wake_up_idle_cpu(cpu); ++} ++#endif /* CONFIG_NO_HZ_COMMON */ ++ ++#ifdef CONFIG_HOTPLUG_CPU ++/* ++ * Ensures that the idle task is using init_mm right before its CPU goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(cpu_online(smp_processor_id())); ++ ++ if (mm != &init_mm) { ++ switch_mm(mm, &init_mm, current); ++ current->active_mm = &init_mm; ++ finish_arch_post_lock_switch(); ++ } ++ mmdrop(mm); ++} ++ ++/* ++ * Migrate all tasks from the rq, sleeping tasks will be migrated by ++ * try_to_wake_up()->select_task_rq(). ++ * ++ * Called with rq->lock held even though we'er in stop_machine() and ++ * there's no concurrency possible, we hold the required locks anyway ++ * because of lock validation efforts. ++ */ ++static void migrate_tasks(struct rq *dead_rq) ++{ ++ struct rq *rq = dead_rq; ++ struct task_struct *p, *stop = rq->stop; ++ struct skiplist_node *node; ++ int count = 0; ++ ++ /* ++ * Fudge the rq selection such that the below task selection loop ++ * doesn't get stuck on the currently eligible stop task. ++ * ++ * We're currently inside stop_machine() and the rq is either stuck ++ * in the stop_machine_cpu_stop() loop, or we're executing this code, ++ * either way we should never end up calling schedule() until we're ++ * done here. ++ */ ++ rq->stop = NULL; ++ ++ node = &rq->sl_header; ++ while ((node = node->next[0]) != &rq->sl_header) { ++ int dest_cpu; ++ ++ p = skiplist_entry(node, struct task_struct, sl_node); ++ ++ /* skip the running task */ ++ if (task_running(p)) ++ continue; ++ ++ /* ++ * Rules for changing task_struct::cpus_mask are holding ++ * both pi_lock and rq->lock, such that holding either ++ * stabilizes the mask. ++ * ++ * Drop rq->lock is not quite as disastrous as it usually is ++ * because !cpu_active at this point, which means load-balance ++ * will not interfere. Also, stop-machine. ++ */ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_lock(&p->pi_lock); ++ raw_spin_lock(&rq->lock); ++ ++ /* ++ * Since we're inside stop-machine, _nothing_ should have ++ * changed the task, WARN if weird stuff happened, because in ++ * that case the above rq->lock drop is a fail too. ++ */ ++ if (WARN_ON(task_rq(p) != rq || !task_on_rq_queued(p))) { ++ raw_spin_unlock(&p->pi_lock); ++ continue; ++ } ++ ++ count++; ++ /* Find suitable destination for @next, with force if needed. */ ++ dest_cpu = select_fallback_rq(dead_rq->cpu, p); ++ ++ rq = __migrate_task(rq, p, dest_cpu); ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&p->pi_lock); ++ ++ rq = dead_rq; ++ raw_spin_lock(&rq->lock); ++ /* Check queued task all over from the header again */ ++ node = &rq->sl_header; ++ } ++ ++ rq->stop = stop; ++} ++ ++static void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) ++ rq->online = false; ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++static void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) ++ rq->online = true; ++} ++ ++#ifdef CONFIG_SCHED_DEBUG ++ ++static __read_mostly int sched_debug_enabled; ++ ++static int __init sched_debug_setup(char *str) ++{ ++ sched_debug_enabled = 1; ++ ++ return 0; ++} ++early_param("sched_debug", sched_debug_setup); ++ ++static inline bool sched_debug(void) ++{ ++ return sched_debug_enabled; ++} ++#else /* !CONFIG_SCHED_DEBUG */ ++static inline bool sched_debug(void) ++{ ++ return false; ++} ++#endif /* CONFIG_SCHED_DEBUG */ ++ ++#ifdef CONFIG_SMP ++void send_call_function_single_ipi(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (!set_nr_if_polling(rq->idle)) ++ arch_send_call_function_single_ipi(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++void sched_ttwu_pending(void *arg) ++{ ++ struct llist_node *llist = arg; ++ struct rq *rq = this_rq(); ++ struct task_struct *p, *t; ++ struct rq_flags rf; ++ ++ if (!llist) ++ return; ++ ++ /* ++ * rq::ttwu_pending racy indication of out-standing wakeups. ++ * Races such that false-negatives are possible, since they ++ * are shorter lived that false-positives would be. ++ */ ++ WRITE_ONCE(rq->ttwu_pending, 0); ++ ++ rq_lock_irqsave(rq, &rf); ++ update_rq_clock(rq); ++ ++ /*llist_for_each_entry_safe(p, t, llist, wake_entry) ++ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);*/ ++ ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++void wake_up_if_idle(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ rcu_read_lock(); ++ ++ if (!is_idle_task(rcu_dereference(rq->curr))) ++ goto out; ++ ++ if (set_nr_if_polling(rq->idle)) { ++ trace_sched_wake_idle_without_ipi(cpu); ++ } else { ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (is_idle_task(rq->curr)) ++ smp_send_reschedule(cpu); ++ /* Else CPU is not idle, do nothing here */ ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ } ++ ++out: ++ rcu_read_unlock(); ++} ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); ++} ++#endif /* CONFIG_SMP */ ++ ++/* ++ * Topology list, bottom-up. ++ */ ++static struct sched_domain_topology_level default_topology[] = { ++#ifdef CONFIG_SCHED_SMT ++ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, ++#endif ++#ifdef CONFIG_SCHED_MC ++ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, ++#endif ++ { cpu_cpu_mask, SD_INIT_NAME(DIE) }, ++ { NULL, }, ++}; ++ ++static struct sched_domain_topology_level *sched_domain_topology = ++ default_topology; ++ ++#define for_each_sd_topology(tl) \ ++ for (tl = sched_domain_topology; tl->mask; tl++) ++ ++void set_sched_topology(struct sched_domain_topology_level *tl) ++{ ++ if (WARN_ON_ONCE(sched_smp_initialized)) ++ return; ++ ++ sched_domain_topology = tl; ++} ++ ++/* ++ * Initializers for schedule domains ++ * Non-inlined to reduce accumulated stack pressure in build_sched_domains() ++ */ ++ ++int sched_domain_level_max; ++ ++/* ++ * Partition sched domains as specified by the 'ndoms_new' ++ * cpumasks in the array doms_new[] of cpumasks. This compares ++ * doms_new[] to the current sched domain partitioning, doms_cur[]. ++ * It destroys each deleted domain and builds each new domain. ++ * ++ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. ++ * The masks don't intersect (don't overlap.) We should setup one ++ * sched domain for each mask. CPUs not in any of the cpumasks will ++ * not be load balanced. If the same cpumask appears both in the ++ * current 'doms_cur' domains and in the new 'doms_new', we can leave ++ * it as it is. ++ * ++ * The passed in 'doms_new' should be allocated using ++ * alloc_sched_domains. This routine takes ownership of it and will ++ * free_sched_domains it when done with it. If the caller failed the ++ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, ++ * and partition_sched_domains() will fallback to the single partition ++ * 'fallback_doms', it also forces the domains to be rebuilt. ++ * ++ * If doms_new == NULL it will be replaced with cpu_online_mask. ++ * ndoms_new == 0 is a special case for destroying existing domains, ++ * and it will not create the default domain. ++ * ++ * Call with hotplug lock held ++ */ ++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ++ struct sched_domain_attr *dattr_new) ++{ ++ /** ++ * PDS doesn't depend on sched domains, but just keep this api ++ */ ++} ++ ++/* ++ * used to mark begin/end of suspend/resume: ++ */ ++static int num_cpus_frozen; ++ ++#ifdef CONFIG_NUMA ++int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; ++ ++/* ++ * sched_numa_find_closest() - given the NUMA topology, find the cpu ++ * closest to @cpu from @cpumask. ++ * cpumask: cpumask to find a cpu from ++ * cpu: cpu to be close to ++ * ++ * returns: cpu, or nr_cpu_ids when nothing found. ++ */ ++int sched_numa_find_closest(const struct cpumask *cpus, int cpu) ++{ ++ return best_mask_cpu(cpu, cpus); ++} ++#endif /* CONFIG_NUMA */ ++ ++/* ++ * Update cpusets according to cpu_active mask. If cpusets are ++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper ++ * around partition_sched_domains(). ++ * ++ * If we come here as part of a suspend/resume, don't touch cpusets because we ++ * want to restore it back to its original state upon resume anyway. ++ */ ++static void cpuset_cpu_active(void) ++{ ++ if (cpuhp_tasks_frozen) { ++ /* ++ * num_cpus_frozen tracks how many CPUs are involved in suspend ++ * resume sequence. As long as this is not the last online ++ * operation in the resume sequence, just build a single sched ++ * domain, ignoring cpusets. ++ */ ++ partition_sched_domains(1, NULL, NULL); ++ if (--num_cpus_frozen) ++ return; ++ /* ++ * This is the last CPU online operation. So fall through and ++ * restore the original sched domains by considering the ++ * cpuset configurations. ++ */ ++ cpuset_force_rebuild(); ++ } ++ ++ cpuset_update_active_cpus(); ++} ++ ++static int cpuset_cpu_inactive(unsigned int cpu) ++{ ++ if (!cpuhp_tasks_frozen) { ++ cpuset_update_active_cpus(); ++ } else { ++ num_cpus_frozen++; ++ partition_sched_domains(1, NULL, NULL); ++ } ++ return 0; ++} ++ ++int sched_cpu_activate(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++#ifdef CONFIG_SCHED_SMT ++ /* ++ * When going up, increment the number of cores with SMT present. ++ */ ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) ++ static_branch_inc_cpuslocked(&sched_smt_present); ++#endif ++ set_cpu_active(cpu, true); ++ ++ if (sched_smp_initialized) ++ cpuset_cpu_active(); ++ ++ /* ++ * Put the rq online, if not already. This happens: ++ * ++ * 1) In the early boot process, because we build the real domains ++ * after all cpus have been brought up. ++ * ++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the ++ * domains. ++ */ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ set_rq_online(rq); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ return 0; ++} ++ ++int sched_cpu_deactivate(unsigned int cpu) ++{ ++ int ret; ++ ++ set_cpu_active(cpu, false); ++ /* ++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU ++ * users of this state to go away such that all new such users will ++ * observe it. ++ * ++ * Do sync before park smpboot threads to take care the rcu boost case. ++ */ ++ synchronize_rcu(); ++ ++#ifdef CONFIG_SCHED_SMT ++ /* ++ * When going down, decrement the number of cores with SMT present. ++ */ ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) ++ static_branch_dec_cpuslocked(&sched_smt_present); ++#endif ++ ++ if (!sched_smp_initialized) ++ return 0; ++ ++ ret = cpuset_cpu_inactive(cpu); ++ if (ret) { ++ set_cpu_active(cpu, true); ++ return ret; ++ } ++ return 0; ++} ++ ++static void sched_rq_cpu_starting(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ rq->calc_load_update = calc_load_update; ++} ++ ++int sched_cpu_starting(unsigned int cpu) ++{ ++ sched_rq_cpu_starting(cpu); ++ sched_tick_start(cpu); ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++int sched_cpu_dying(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ sched_tick_stop(cpu); ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ set_rq_offline(rq); ++ migrate_tasks(rq); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ hrtick_clear(rq); ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_SMP ++static void sched_init_topology_cpumask_early(void) ++{ ++ int cpu, level; ++ cpumask_t *tmp; ++ ++ for_each_possible_cpu(cpu) { ++ for (level = 0; level < NR_CPU_AFFINITY_CHK_LEVEL; level++) { ++ tmp = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[level]); ++ cpumask_copy(tmp, cpu_possible_mask); ++ cpumask_clear_cpu(cpu, tmp); ++ } ++ per_cpu(sched_cpu_llc_start_mask, cpu) = ++ &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); ++ per_cpu(sched_cpu_affinity_chk_end_masks, cpu) = ++ &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[1]); ++ } ++} ++ ++static void sched_init_topology_cpumask(void) ++{ ++ int cpu; ++ cpumask_t *chk; ++ ++ for_each_online_cpu(cpu) { ++ chk = &(per_cpu(sched_cpu_affinity_chk_masks, cpu)[0]); ++ ++#ifdef CONFIG_SCHED_SMT ++ cpumask_setall(chk); ++ cpumask_clear_cpu(cpu, chk); ++ if (cpumask_and(chk, chk, topology_sibling_cpumask(cpu))) { ++ per_cpu(sched_sibling_cpu, cpu) = cpumask_first(chk); ++ printk(KERN_INFO "pds: cpu #%d affinity check mask - smt 0x%08lx", ++ cpu, (chk++)->bits[0]); ++ } ++#endif ++#ifdef CONFIG_SCHED_MC ++ cpumask_setall(chk); ++ cpumask_clear_cpu(cpu, chk); ++ if (cpumask_and(chk, chk, cpu_coregroup_mask(cpu))) { ++ per_cpu(sched_cpu_llc_start_mask, cpu) = chk; ++ printk(KERN_INFO "pds: cpu #%d affinity check mask - coregroup 0x%08lx", ++ cpu, (chk++)->bits[0]); ++ } ++ cpumask_complement(chk, cpu_coregroup_mask(cpu)); ++ ++ /** ++ * Set up sd_llc_id per CPU ++ */ ++ per_cpu(sd_llc_id, cpu) = ++ cpumask_first(cpu_coregroup_mask(cpu)); ++#else ++ per_cpu(sd_llc_id, cpu) = ++ cpumask_first(topology_core_cpumask(cpu)); ++ ++ per_cpu(sched_cpu_llc_start_mask, cpu) = chk; ++ ++ cpumask_setall(chk); ++ cpumask_clear_cpu(cpu, chk); ++#endif /* NOT CONFIG_SCHED_MC */ ++ if (cpumask_and(chk, chk, topology_core_cpumask(cpu))) ++ printk(KERN_INFO "pds: cpu #%d affinity check mask - core 0x%08lx", ++ cpu, (chk++)->bits[0]); ++ cpumask_complement(chk, topology_core_cpumask(cpu)); ++ ++ if (cpumask_and(chk, chk, cpu_online_mask)) ++ printk(KERN_INFO "pds: cpu #%d affinity check mask - others 0x%08lx", ++ cpu, (chk++)->bits[0]); ++ ++ per_cpu(sched_cpu_affinity_chk_end_masks, cpu) = chk; ++ } ++} ++#endif ++ ++void __init sched_init_smp(void) ++{ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) ++ BUG(); ++ ++ cpumask_copy(&sched_rq_queued_masks[SCHED_RQ_EMPTY], cpu_online_mask); ++ ++ sched_init_topology_cpumask(); ++ ++ sched_smp_initialized = true; ++} ++#else ++void __init sched_init_smp(void) ++{ ++} ++#endif /* CONFIG_SMP */ ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++#ifdef CONFIG_CGROUP_SCHED ++/* task group related information */ ++struct task_group { ++ struct cgroup_subsys_state css; ++ ++ struct rcu_head rcu; ++ struct list_head list; ++ ++ struct task_group *parent; ++ struct list_head siblings; ++ struct list_head children; ++}; ++ ++/* ++ * Default task group. ++ * Every task in system belongs to this group at bootup. ++ */ ++struct task_group root_task_group; ++LIST_HEAD(task_groups); ++ ++/* Cacheline aligned slab cache for task_group */ ++static struct kmem_cache *task_group_cache __read_mostly; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++void __init sched_init(void) ++{ ++ int i; ++ struct rq *rq; ++ ++ print_scheduler_version(); ++ ++ wait_bit_init(); ++ ++#ifdef CONFIG_SMP ++ for (i = 0; i < NR_SCHED_RQ_QUEUED_LEVEL; i++) ++ cpumask_clear(&sched_rq_queued_masks[i]); ++ cpumask_setall(&sched_rq_queued_masks[SCHED_RQ_EMPTY]); ++ set_bit(SCHED_RQ_EMPTY, sched_rq_queued_masks_bitmap); ++ ++ cpumask_setall(&sched_rq_pending_masks[SCHED_RQ_EMPTY]); ++ set_bit(SCHED_RQ_EMPTY, sched_rq_pending_masks_bitmap); ++#else ++ uprq = &per_cpu(runqueues, 0); ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++ task_group_cache = KMEM_CACHE(task_group, 0); ++ ++ list_add(&root_task_group.list, &task_groups); ++ INIT_LIST_HEAD(&root_task_group.children); ++ INIT_LIST_HEAD(&root_task_group.siblings); ++#endif /* CONFIG_CGROUP_SCHED */ ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ FULL_INIT_SKIPLIST_NODE(&rq->sl_header); ++ raw_spin_lock_init(&rq->lock); ++ rq->dither = 0; ++ rq->nr_running = rq->nr_uninterruptible = 0; ++ rq->calc_load_active = 0; ++ rq->calc_load_update = jiffies + LOAD_FREQ; ++#ifdef CONFIG_SMP ++ rq->online = false; ++ rq->cpu = i; ++ ++ rq->queued_level = SCHED_RQ_EMPTY; ++ rq->pending_level = SCHED_RQ_EMPTY; ++#ifdef CONFIG_SCHED_SMT ++ per_cpu(sched_sibling_cpu, i) = i; ++ rq->active_balance = 0; ++#endif ++#endif ++ rq->nr_switches = 0; ++ atomic_set(&rq->nr_iowait, 0); ++ hrtick_rq_init(rq); ++ } ++#ifdef CONFIG_SMP ++ /* Set rq->online for cpu 0 */ ++ cpu_rq(0)->online = true; ++#endif ++ ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ mmgrab(&init_mm); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ ++ calc_load_update = jiffies + LOAD_FREQ; ++ ++#ifdef CONFIG_SMP ++ idle_thread_set_boot_cpu(); ++ ++ sched_init_topology_cpumask_early(); ++#endif /* SMP */ ++ ++ init_schedstats(); ++ ++ psi_init(); ++} ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++static inline int preempt_count_equals(int preempt_offset) ++{ ++ int nested = preempt_count() + rcu_preempt_depth(); ++ ++ return (nested == preempt_offset); ++} ++ ++void __might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* ++ * Blocking primitives will set (and therefore destroy) current->state, ++ * since we will exit with TASK_RUNNING make sure we enter with it, ++ * otherwise we will destroy state. ++ */ ++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, ++ "do not call blocking ops when !TASK_RUNNING; " ++ "state=%lx set at [<%p>] %pS\n", ++ current->state, ++ (void *)current->task_state_change, ++ (void *)current->task_state_change); ++ ++ ___might_sleep(file, line, preempt_offset); ++} ++EXPORT_SYMBOL(__might_sleep); ++ ++void ___might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* Ratelimiting timestamp: */ ++ static unsigned long prev_jiffy; ++ ++ unsigned long preempt_disable_ip; ++ ++ /* WARN_ON_ONCE() by default, no rate limit required: */ ++ rcu_sleep_check(); ++ ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current) && !current->non_block_count) || ++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || ++ oops_in_progress) ++ return; ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ /* Save this before calling printk(), since that will clobber it: */ ++ preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ printk(KERN_ERR ++ "BUG: sleeping function called from invalid context at %s:%d\n", ++ file, line); ++ printk(KERN_ERR ++ "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), current->non_block_count, ++ current->pid, current->comm); ++ ++ if (task_stack_end_corrupted(current)) ++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); ++ ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (!preempt_count_equals(preempt_offset)) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(KERN_ERR, preempt_disable_ip); ++ } ++#endif ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL(___might_sleep); ++ ++void __cant_sleep(const char *file, int line, int preempt_offset) ++{ ++ static unsigned long prev_jiffy; ++ ++ if (irqs_disabled()) ++ return; ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return; ++ ++ if (preempt_count() > preempt_offset) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); ++ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL_GPL(__cant_sleep); ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++void normalize_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ struct sched_attr attr = { ++ .sched_policy = SCHED_NORMAL, ++ }; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ /* ++ * Only normalize user tasks: ++ */ ++ if (p->flags & PF_KTHREAD) ++ continue; ++ ++ if (!rt_task(p)) { ++ /* ++ * Renice negative nice level userspace ++ * tasks back to 0: ++ */ ++ if (task_nice(p) < 0) ++ set_user_nice(p, 0); ++ continue; ++ } ++ ++ __sched_setscheduler(p, &attr, false, false); ++ } ++ read_unlock(&tasklist_lock); ++} ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) ++/* ++ * These functions are only useful for the IA64 MCA handling, or kdb. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ * ++ * Return: The current task for @cpu. ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ ++ ++#ifdef CONFIG_IA64 ++/** ++ * ia64_set_curr_task - set the current task for a given CPU. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a CPU in a non-blocking manner. This function ++ * must be called with all CPU's synchronised, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void ia64_set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++#ifdef CONFIG_SCHED_DEBUG ++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, ++ struct seq_file *m) ++{} ++ ++void proc_sched_set_task(struct task_struct *p) ++{} ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++static void sched_free_group(struct task_group *tg) ++{ ++ kmem_cache_free(task_group_cache, tg); ++} ++ ++/* allocate runqueue etc for a new task group */ ++struct task_group *sched_create_group(struct task_group *parent) ++{ ++ struct task_group *tg; ++ ++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); ++ if (!tg) ++ return ERR_PTR(-ENOMEM); ++ ++ return tg; ++} ++ ++void sched_online_group(struct task_group *tg, struct task_group *parent) ++{ ++} ++ ++/* rcu callback to free various structures associated with a task group */ ++static void sched_free_group_rcu(struct rcu_head *rhp) ++{ ++ /* Now it should be safe to free those cfs_rqs */ ++ sched_free_group(container_of(rhp, struct task_group, rcu)); ++} ++ ++void sched_destroy_group(struct task_group *tg) ++{ ++ /* Wait for possible concurrent references to cfs_rqs complete */ ++ call_rcu(&tg->rcu, sched_free_group_rcu); ++} ++ ++void sched_offline_group(struct task_group *tg) ++{ ++} ++ ++static inline struct task_group *css_tg(struct cgroup_subsys_state *css) ++{ ++ return css ? container_of(css, struct task_group, css) : NULL; ++} ++ ++static struct cgroup_subsys_state * ++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ++{ ++ struct task_group *parent = css_tg(parent_css); ++ struct task_group *tg; ++ ++ if (!parent) { ++ /* This is early initialization for the top cgroup */ ++ return &root_task_group.css; ++ } ++ ++ tg = sched_create_group(parent); ++ if (IS_ERR(tg)) ++ return ERR_PTR(-ENOMEM); ++ return &tg->css; ++} ++ ++/* Expose task group only after completing cgroup initialization */ ++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ struct task_group *parent = css_tg(css->parent); ++ ++ if (parent) ++ sched_online_group(tg, parent); ++ return 0; ++} ++ ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ sched_offline_group(tg); ++} ++ ++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ /* ++ * Relies on the RCU grace period between css_released() and this. ++ */ ++ sched_free_group(tg); ++} ++ ++static void cpu_cgroup_fork(struct task_struct *task) ++{ ++} ++ ++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) ++{ ++ return 0; ++} ++ ++static void cpu_cgroup_attach(struct cgroup_taskset *tset) ++{ ++} ++ ++static struct cftype cpu_legacy_files[] = { ++ { } /* Terminate */ ++}; ++ ++static struct cftype cpu_files[] = { ++ { } /* terminate */ ++}; ++ ++static int cpu_extra_stat_show(struct seq_file *sf, ++ struct cgroup_subsys_state *css) ++{ ++ return 0; ++} ++ ++struct cgroup_subsys cpu_cgrp_subsys = { ++ .css_alloc = cpu_cgroup_css_alloc, ++ .css_online = cpu_cgroup_css_online, ++ .css_released = cpu_cgroup_css_released, ++ .css_free = cpu_cgroup_css_free, ++ .css_extra_stat_show = cpu_extra_stat_show, ++ .fork = cpu_cgroup_fork, ++ .can_attach = cpu_cgroup_can_attach, ++ .attach = cpu_cgroup_attach, ++ .legacy_cftypes = cpu_files, ++ .legacy_cftypes = cpu_legacy_files, ++ .dfl_cftypes = cpu_files, ++ .early_init = true, ++ .threaded = true, ++}; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++#undef CREATE_TRACE_POINTS +diff --git a/kernel/sched/pds_sched.h b/kernel/sched/pds_sched.h +new file mode 100644 +index 000000000000..6c3361f06087 +--- /dev/null ++++ b/kernel/sched/pds_sched.h +@@ -0,0 +1,573 @@ ++#ifndef PDS_SCHED_H ++#define PDS_SCHED_H ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#ifdef CONFIG_PARAVIRT ++# include ++#endif ++ ++#include "cpupri.h" ++ ++/* task_struct::on_rq states: */ ++#define TASK_ON_RQ_QUEUED 1 ++#define TASK_ON_RQ_MIGRATING 2 ++ ++static inline int task_on_rq_queued(struct task_struct *p) ++{ ++ return p->on_rq == TASK_ON_RQ_QUEUED; ++} ++ ++static inline int task_on_rq_migrating(struct task_struct *p) ++{ ++ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; ++} ++ ++/* ++ * wake flags ++ */ ++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ ++#define WF_FORK 0x02 /* child wakeup after fork */ ++#define WF_MIGRATED 0x04 /* internal use, task got migrated */ ++ ++/* ++ * rq::clock_update_flags bits ++ */ ++#define RQCF_REQ_SKIP 0x01 ++#define RQCF_ACT_SKIP 0x02 ++#define RQCF_UPDATED 0x04 ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * This data should only be modified by the local cpu. ++ */ ++struct rq { ++ /* runqueue lock: */ ++ raw_spinlock_t lock; ++ ++ struct task_struct __rcu *curr; ++ struct task_struct *idle, *stop; ++ struct mm_struct *prev_mm; ++ ++ struct skiplist_node sl_header; ++ ++ /* switch count */ ++ u64 nr_switches; ++ ++ atomic_t nr_iowait; ++ ++#ifdef CONFIG_MEMBARRIER ++ int membarrier_state; ++#endif ++ ++#ifdef CONFIG_SMP ++ int cpu; /* cpu of this runqueue */ ++ bool online; ++ unsigned int ttwu_pending; ++ unsigned int clock_update_flags; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ struct sched_avg avg_irq; ++#endif ++#ifdef CONFIG_SCHED_THERMAL_PRESSURE ++ struct sched_avg avg_thermal; ++#endif ++ ++ unsigned long queued_level; ++ unsigned long pending_level; ++ ++#ifdef CONFIG_SCHED_SMT ++ int active_balance; ++ struct cpu_stop_work active_balance_work; ++#endif ++#endif /* CONFIG_SMP */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ u64 prev_irq_time; ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++#ifdef CONFIG_PARAVIRT ++ u64 prev_steal_time; ++#endif /* CONFIG_PARAVIRT */ ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ u64 prev_steal_time_rq; ++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ ++ ++ /* calc_load related fields */ ++ unsigned long calc_load_update; ++ long calc_load_active; ++ ++ u64 clock, last_tick; ++ u64 clock_task; ++ int dither; ++ ++ unsigned long nr_running; ++ unsigned long nr_uninterruptible; ++ ++#ifdef CONFIG_SCHED_HRTICK ++#ifdef CONFIG_SMP ++ call_single_data_t hrtick_csd; ++#endif ++ struct hrtimer hrtick_timer; ++#endif ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ unsigned long long rq_cpu_time; ++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++#endif /* CONFIG_SCHEDSTATS */ ++#ifdef CONFIG_CPU_IDLE ++ /* Must be inspected within a rcu lock section */ ++ struct cpuidle_state *idle_state; ++#endif ++}; ++ ++extern unsigned long calc_load_update; ++extern atomic_long_t calc_load_tasks; ++ ++extern void calc_global_load_tick(struct rq *this_rq); ++extern long calc_load_fold_active(struct rq *this_rq, long adjust); ++ ++#ifndef CONFIG_SMP ++extern struct rq *uprq; ++#define cpu_rq(cpu) (uprq) ++#define this_rq() (uprq) ++#define raw_rq() (uprq) ++#define task_rq(p) (uprq) ++#define cpu_curr(cpu) ((uprq)->curr) ++#else /* CONFIG_SMP */ ++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) ++#define this_rq() this_cpu_ptr(&runqueues) ++#define raw_rq() raw_cpu_ptr(&runqueues) ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++void register_sched_domain_sysctl(void); ++void unregister_sched_domain_sysctl(void); ++#else ++static inline void register_sched_domain_sysctl(void) ++{ ++} ++static inline void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif ++ ++#endif /* CONFIG_SMP */ ++ ++#ifndef arch_scale_freq_tick ++static __always_inline ++void arch_scale_freq_tick(void) ++{ ++} ++#endif ++ ++#ifndef arch_scale_freq_capacity ++static __always_inline ++unsigned long arch_scale_freq_capacity(int cpu) ++{ ++ return SCHED_CAPACITY_SCALE; ++} ++#endif ++ ++static inline u64 __rq_clock_broken(struct rq *rq) ++{ ++ return READ_ONCE(rq->clock); ++} ++ ++static inline u64 rq_clock(struct rq *rq) ++{ ++ /* ++ * Relax lockdep_assert_held() checking as in VRQ, call to ++ * sched_info_xxxx() may not held rq->lock ++ * lockdep_assert_held(&rq->lock); ++ */ ++ return rq->clock; ++} ++ ++static inline u64 rq_clock_task(struct rq *rq) ++{ ++ /* ++ * Relax lockdep_assert_held() checking as in VRQ, call to ++ * sched_info_xxxx() may not held rq->lock ++ * lockdep_assert_held(&rq->lock); ++ */ ++ return rq->clock_task; ++} ++ ++/** ++ * By default the decay is the default pelt decay period. ++ * The decay shift can change the decay period in ++ * multiples of 32. ++ * Decay shift Decay period(ms) ++ * 0 32 ++ * 1 64 ++ * 2 128 ++ * 3 256 ++ * 4 512 ++ */ ++extern int sched_thermal_decay_shift; ++ ++static inline u64 rq_clock_thermal(struct rq *rq) ++{ ++ return rq_clock_task(rq) >> sched_thermal_decay_shift; ++} ++ ++/* ++ * {de,en}queue flags: ++ * ++ * DEQUEUE_SLEEP - task is no longer runnable ++ * ENQUEUE_WAKEUP - task just became runnable ++ * ++ */ ++ ++#define DEQUEUE_SLEEP 0x01 ++ ++#define ENQUEUE_WAKEUP 0x01 ++ ++ ++/* ++ * Below are scheduler API which using in other kernel code ++ * It use the dummy rq_flags ++ * ToDo : PDS need to support these APIs for compatibility with mainline ++ * scheduler code. ++ */ ++struct rq_flags { ++ unsigned long flags; ++ struct pin_cookie cookie; ++ unsigned int clock_update_flags; ++}; ++ ++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(rq->lock); ++ ++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock); ++ ++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock(&rq->lock); ++} ++ ++static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) ++{ ++ rf->cookie = lockdep_pin_lock(&rq->lock); ++ ++#ifdef CONFIG_SCHED_DEBUG ++ rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); ++ rf->clock_update_flags = 0; ++#endif ++} ++ ++static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) ++{ ++#ifdef CONFIG_SCHED_DEBUG ++ if (rq->clock_update_flags > RQCF_ACT_SKIP) ++ rf->clock_update_flags = RQCF_UPDATED; ++#endif ++ ++ lockdep_unpin_lock(&rq->lock, rf->cookie); ++} ++ ++static inline void ++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) ++ __releases(rq->lock) ++ __releases(p->pi_lock) ++{ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++} ++ ++static inline void ++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irqsave(&rq->lock, rf->flags); ++ rq_pin_lock(rq, rf); ++} ++ ++static inline void ++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ rq_unpin_lock(rq, rf); ++ raw_spin_unlock_irqrestore(&rq->lock, rf->flags); ++} ++ ++static inline void ++rq_unlock_irq(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irq(&rq->lock); ++} ++ ++static inline void ++rq_unlock(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ rq_unpin_lock(rq, rf); ++ raw_spin_unlock(&rq->lock); ++} ++ ++static inline struct rq * ++this_rq_lock_irq(struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ raw_spin_lock(&rq->lock); ++ ++ return rq; ++} ++ ++static inline int task_current(struct rq *rq, struct task_struct *p) ++{ ++ return rq->curr == p; ++} ++ ++static inline bool task_running(struct task_struct *p) ++{ ++ return p->on_cpu; ++} ++ ++extern struct static_key_false sched_schedstats; ++ ++extern void flush_smp_call_function_from_idle(void); ++ ++#ifdef CONFIG_CPU_IDLE ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++ rq->idle_state = idle_state; ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ WARN_ON(!rcu_read_lock_held()); ++ return rq->idle_state; ++} ++#else ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ return NULL; ++} ++#endif ++ ++static inline int cpu_of(const struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ return rq->cpu; ++#else ++ return 0; ++#endif ++} ++ ++#include "stats.h" ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++struct irqtime { ++ u64 total; ++ u64 tick_delta; ++ u64 irq_start_time; ++ struct u64_stats_sync sync; ++}; ++ ++DECLARE_PER_CPU(struct irqtime, cpu_irqtime); ++ ++/* ++ * Returns the irqtime minus the softirq time computed by ksoftirqd. ++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime ++ * and never move forward. ++ */ ++static inline u64 irq_time_read(int cpu) ++{ ++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); ++ unsigned int seq; ++ u64 total; ++ ++ do { ++ seq = __u64_stats_fetch_begin(&irqtime->sync); ++ total = irqtime->total; ++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); ++ ++ return total; ++} ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#ifdef CONFIG_CPU_FREQ ++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); ++ ++/** ++ * cpufreq_update_util - Take a note about CPU utilization changes. ++ * @rq: Runqueue to carry out the update for. ++ * @flags: Update reason flags. ++ * ++ * This function is called by the scheduler on the CPU whose utilization is ++ * being updated. ++ * ++ * It can only be called from RCU-sched read-side critical sections. ++ * ++ * The way cpufreq is currently arranged requires it to evaluate the CPU ++ * performance state (frequency/voltage) on a regular basis to prevent it from ++ * being stuck in a completely inadequate performance level for too long. ++ * That is not guaranteed to happen if the updates are only triggered from CFS ++ * and DL, though, because they may not be coming in if only RT tasks are ++ * active all the time (or there are RT tasks only). ++ * ++ * As a workaround for that issue, this function is called periodically by the ++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling, ++ * but that really is a band-aid. Going forward it should be replaced with ++ * solutions targeted more specifically at RT tasks. ++ */ ++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) ++{ ++ struct update_util_data *data; ++ ++ data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); ++ if (data) ++ data->func(data, rq_clock(rq), flags); ++} ++ ++static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) ++{ ++ if (cpu_of(rq) == smp_processor_id()) ++ cpufreq_update_util(rq, flags); ++} ++#else ++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} ++static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} ++#endif /* CONFIG_CPU_FREQ */ ++ ++#ifdef CONFIG_NO_HZ_FULL ++extern int __init sched_tick_offload_init(void); ++#else ++static inline int sched_tick_offload_init(void) { return 0; } ++#endif ++ ++#ifdef arch_scale_freq_capacity ++#ifndef arch_scale_freq_invariant ++#define arch_scale_freq_invariant() (true) ++#endif ++#else /* arch_scale_freq_capacity */ ++#define arch_scale_freq_invariant() (false) ++#endif ++ ++extern void schedule_idle(void); ++ ++/* ++ * !! For sched_setattr_nocheck() (kernel) only !! ++ * ++ * This is actually gross. :( ++ * ++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE ++ * tasks, but still be able to sleep. We need this on platforms that cannot ++ * atomically change clock frequency. Remove once fast switching will be ++ * available on such platforms. ++ * ++ * SUGOV stands for SchedUtil GOVernor. ++ */ ++#define SCHED_FLAG_SUGOV 0x10000000 ++ ++#ifdef CONFIG_MEMBARRIER ++/* ++ * The scheduler provides memory barriers required by membarrier between: ++ * - prior user-space memory accesses and store to rq->membarrier_state, ++ * - store to rq->membarrier_state and following user-space memory accesses. ++ * In the same way it provides those guarantees around store to rq->curr. ++ */ ++static inline void membarrier_switch_mm(struct rq *rq, ++ struct mm_struct *prev_mm, ++ struct mm_struct *next_mm) ++{ ++ int membarrier_state; ++ ++ if (prev_mm == next_mm) ++ return; ++ ++ membarrier_state = atomic_read(&next_mm->membarrier_state); ++ if (READ_ONCE(rq->membarrier_state) == membarrier_state) ++ return; ++ ++ WRITE_ONCE(rq->membarrier_state, membarrier_state); ++} ++#else ++static inline void membarrier_switch_mm(struct rq *rq, ++ struct mm_struct *prev_mm, ++ struct mm_struct *next_mm) ++{ ++} ++#endif ++ ++#ifdef CONFIG_NUMA ++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); ++#else ++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) ++{ ++ return nr_cpu_ids; ++} ++#endif ++ ++void swake_up_all_locked(struct swait_queue_head *q); ++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); ++ ++#endif /* PDS_SCHED_H */ +diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c +index b647d04d9c8b..05b6cfd91842 100644 +--- a/kernel/sched/pelt.c ++++ b/kernel/sched/pelt.c +@@ -250,6 +250,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load) + WRITE_ONCE(sa->util_avg, sa->util_sum / divider); + } + ++#ifndef CONFIG_SCHED_PDS + /* + * sched_entity: + * +@@ -367,6 +368,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) + + return 0; + } ++#endif + + #ifdef CONFIG_SCHED_THERMAL_PRESSURE + /* +diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h +index eb034d9f024d..a074572f2976 100644 +--- a/kernel/sched/pelt.h ++++ b/kernel/sched/pelt.h +@@ -1,11 +1,13 @@ + #ifdef CONFIG_SMP + #include "sched-pelt.h" + ++#ifndef CONFIG_SCHED_PDS + int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); + int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); + int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); + int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); + int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); ++#endif + + #ifdef CONFIG_SCHED_THERMAL_PRESSURE + int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); +@@ -37,6 +39,7 @@ update_irq_load_avg(struct rq *rq, u64 running) + } + #endif + ++#ifndef CONFIG_SCHED_PDS + /* + * When a task is dequeued, its estimated utilization should not be update if + * its util_avg has not been updated at least once. +@@ -157,9 +160,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) + return rq_clock_pelt(rq_of(cfs_rq)); + } + #endif ++#endif /* CONFIG_SCHED_PDS */ + + #else + ++#ifndef CONFIG_SCHED_PDS + static inline int + update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) + { +@@ -188,6 +193,7 @@ static inline u64 thermal_load_avg(struct rq *rq) + { + return 0; + } ++#endif + + static inline int + update_irq_load_avg(struct rq *rq, u64 running) +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index db3a57675ccf..5a8060bd2343 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -2,6 +2,10 @@ + /* + * Scheduler internal types and methods: + */ ++#ifdef CONFIG_SCHED_PDS ++#include "pds_sched.h" ++#else ++ + #include + + #include +@@ -2546,3 +2550,5 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) + + void swake_up_all_locked(struct swait_queue_head *q); + void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); ++ ++#endif /* !CONFIG_SCHED_PDS */ +diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c +index 750fb3c67eed..45bd43942575 100644 +--- a/kernel/sched/stats.c ++++ b/kernel/sched/stats.c +@@ -22,8 +22,10 @@ static int show_schedstat(struct seq_file *seq, void *v) + } else { + struct rq *rq; + #ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_PDS + struct sched_domain *sd; + int dcount = 0; ++#endif + #endif + cpu = (unsigned long)(v - 2); + rq = cpu_rq(cpu); +@@ -40,6 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v) + seq_printf(seq, "\n"); + + #ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_PDS + /* domain-specific stats */ + rcu_read_lock(); + for_each_domain(cpu, sd) { +@@ -68,6 +71,7 @@ static int show_schedstat(struct seq_file *seq, void *v) + sd->ttwu_move_balance); + } + rcu_read_unlock(); ++#endif + #endif + } + return 0; +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 8a176d8727a3..b9dde576b576 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -130,9 +130,13 @@ static int __maybe_unused four = 4; + static unsigned long zero_ul; + static unsigned long one_ul = 1; + static unsigned long long_max = LONG_MAX; +-static int one_hundred = 100; +-static int two_hundred = 200; +-static int one_thousand = 1000; ++static int __read_mostly one_hundred = 100; ++static int __read_mostly two_hundred = 200; ++static int __read_mostly one_thousand = 1000; ++#ifdef CONFIG_SCHED_PDS ++extern int rr_interval; ++extern int sched_yield_type; ++#endif + #ifdef CONFIG_PRINTK + static int ten_thousand = 10000; + #endif +@@ -288,7 +292,7 @@ static struct ctl_table sysctl_base_table[] = { + { } + }; + +-#ifdef CONFIG_SCHED_DEBUG ++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_PDS) + static int min_sched_granularity_ns = 100000; /* 100 usecs */ + static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ + static int min_wakeup_granularity_ns; /* 0 usecs */ +@@ -305,6 +309,7 @@ static int max_extfrag_threshold = 1000; + #endif + + static struct ctl_table kern_table[] = { ++#ifndef CONFIG_SCHED_PDS + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -486,6 +491,7 @@ static struct ctl_table kern_table[] = { + .extra2 = SYSCTL_ONE, + }, + #endif ++#endif /* !CONFIG_SCHED_PDS */ + #ifdef CONFIG_PROVE_LOCKING + { + .procname = "prove_locking", +@@ -1049,6 +1055,26 @@ static struct ctl_table kern_table[] = { + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_PDS ++ { ++ .procname = "rr_interval", ++ .data = &rr_interval, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = SYSCTL_ONE, ++ .extra2 = &one_thousand, ++ }, ++ { ++ .procname = "yield_type", ++ .data = &sched_yield_type, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = &two, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index 2fd3b3fa68bf..6f3b08afdd4c 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -236,7 +236,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples) + u64 stime, utime; + + task_cputime(p, &utime, &stime); +- store_samples(samples, stime, utime, p->se.sum_exec_runtime); ++ store_samples(samples, stime, utime, tsk_seruntime(p)); + } + + static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, +@@ -806,6 +806,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, + } + } + ++#ifndef CONFIG_SCHED_PDS + static inline void check_dl_overrun(struct task_struct *tsk) + { + if (tsk->dl.dl_overrun) { +@@ -813,6 +814,7 @@ static inline void check_dl_overrun(struct task_struct *tsk) + __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); + } + } ++#endif + + static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) + { +@@ -840,8 +842,10 @@ static void check_thread_timers(struct task_struct *tsk, + u64 samples[CPUCLOCK_MAX]; + unsigned long soft; + ++#ifndef CONFIG_SCHED_PDS + if (dl_task(tsk)) + check_dl_overrun(tsk); ++#endif + + if (expiry_cache_is_inactive(pct)) + return; +@@ -855,7 +859,7 @@ static void check_thread_timers(struct task_struct *tsk, + soft = task_rlimit(tsk, RLIMIT_RTTIME); + if (soft != RLIM_INFINITY) { + /* Task RT timeout is accounted in jiffies. RTTIME is usec */ +- unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); ++ unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ); + unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); + + /* At the hard limit, send SIGKILL. No further action. */ +@@ -1091,8 +1095,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) + return true; + } + ++#ifndef CONFIG_SCHED_PDS + if (dl_task(tsk) && tsk->dl.dl_overrun) + return true; ++#endif + + return false; + } +diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c +index b5e3496cf803..0816db0b9c16 100644 +--- a/kernel/trace/trace_selftest.c ++++ b/kernel/trace/trace_selftest.c +@@ -1048,10 +1048,15 @@ static int trace_wakeup_test_thread(void *data) + { + /* Make this a -deadline thread */ + static const struct sched_attr attr = { ++#ifdef CONFIG_SCHED_PDS ++ /* No deadline on BFS, use RR */ ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, + .sched_runtime = 100000ULL, + .sched_deadline = 10000000ULL, + .sched_period = 10000000ULL ++#endif + }; + struct wakeup_test_data *x = data; + diff --git a/linux58-rc-tkg/linux58-tkg-patches/0006-add-acs-overrides_iommu.patch b/linux58-rc-tkg/linux58-tkg-patches/0006-add-acs-overrides_iommu.patch new file mode 100644 index 0000000..d1303a5 --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0006-add-acs-overrides_iommu.patch @@ -0,0 +1,193 @@ +From cdeab384f48dd9c88e2dff2e9ad8d57dca1a1b1c Mon Sep 17 00:00:00 2001 +From: Mark Weiman +Date: Sun, 12 Aug 2018 11:36:21 -0400 +Subject: [PATCH] pci: Enable overrides for missing ACS capabilities + +This an updated version of Alex Williamson's patch from: +https://lkml.org/lkml/2013/5/30/513 + +Original commit message follows: + +PCIe ACS (Access Control Services) is the PCIe 2.0+ feature that +allows us to control whether transactions are allowed to be redirected +in various subnodes of a PCIe topology. For instance, if two +endpoints are below a root port or downsteam switch port, the +downstream port may optionally redirect transactions between the +devices, bypassing upstream devices. The same can happen internally +on multifunction devices. The transaction may never be visible to the +upstream devices. + +One upstream device that we particularly care about is the IOMMU. If +a redirection occurs in the topology below the IOMMU, then the IOMMU +cannot provide isolation between devices. This is why the PCIe spec +encourages topologies to include ACS support. Without it, we have to +assume peer-to-peer DMA within a hierarchy can bypass IOMMU isolation. + +Unfortunately, far too many topologies do not support ACS to make this +a steadfast requirement. Even the latest chipsets from Intel are only +sporadically supporting ACS. We have trouble getting interconnect +vendors to include the PCIe spec required PCIe capability, let alone +suggested features. + +Therefore, we need to add some flexibility. The pcie_acs_override= +boot option lets users opt-in specific devices or sets of devices to +assume ACS support. The "downstream" option assumes full ACS support +on root ports and downstream switch ports. The "multifunction" +option assumes the subset of ACS features available on multifunction +endpoints and upstream switch ports are supported. The "id:nnnn:nnnn" +option enables ACS support on devices matching the provided vendor +and device IDs, allowing more strategic ACS overrides. These options +may be combined in any order. A maximum of 16 id specific overrides +are available. It's suggested to use the most limited set of options +necessary to avoid completely disabling ACS across the topology. +Note to hardware vendors, we have facilities to permanently quirk +specific devices which enforce isolation but not provide an ACS +capability. Please contact me to have your devices added and save +your customers the hassle of this boot option. + +Signed-off-by: Mark Weiman +--- + .../admin-guide/kernel-parameters.txt | 9 ++ + drivers/pci/quirks.c | 101 ++++++++++++++++++ + 2 files changed, 110 insertions(+) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index aefd358a5ca3..173b3596fd9e 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3190,6 +3190,15 @@ + nomsi [MSI] If the PCI_MSI kernel config parameter is + enabled, this kernel boot option can be used to + disable the use of MSI interrupts system-wide. ++ pcie_acs_override = ++ [PCIE] Override missing PCIe ACS support for: ++ downstream ++ All downstream ports - full ACS capabilities ++ multifunction ++ All multifunction devices - multifunction ACS subset ++ id:nnnn:nnnn ++ Specific device - full ACS capabilities ++ Specified as vid:did (vendor/device ID) in hex + noioapicquirk [APIC] Disable all boot interrupt quirks. + Safety option to keep boot IRQs enabled. This + should never be necessary. +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 4700d24e5d55..8f7a3d7fd9c1 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3372,6 +3372,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev) + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + } + ++static bool acs_on_downstream; ++static bool acs_on_multifunction; ++ ++#define NUM_ACS_IDS 16 ++struct acs_on_id { ++ unsigned short vendor; ++ unsigned short device; ++}; ++static struct acs_on_id acs_on_ids[NUM_ACS_IDS]; ++static u8 max_acs_id; ++ ++static __init int pcie_acs_override_setup(char *p) ++{ ++ if (!p) ++ return -EINVAL; ++ ++ while (*p) { ++ if (!strncmp(p, "downstream", 10)) ++ acs_on_downstream = true; ++ if (!strncmp(p, "multifunction", 13)) ++ acs_on_multifunction = true; ++ if (!strncmp(p, "id:", 3)) { ++ char opt[5]; ++ int ret; ++ long val; ++ ++ if (max_acs_id >= NUM_ACS_IDS - 1) { ++ pr_warn("Out of PCIe ACS override slots (%d)\n", ++ NUM_ACS_IDS); ++ goto next; ++ } ++ ++ p += 3; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].vendor = val; ++ ++ p += strcspn(p, ":"); ++ if (*p != ':') { ++ pr_warn("PCIe ACS invalid ID\n"); ++ goto next; ++ } ++ ++ p++; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].device = val; ++ max_acs_id++; ++ } ++next: ++ p += strcspn(p, ","); ++ if (*p == ',') ++ p++; ++ } ++ ++ if (acs_on_downstream || acs_on_multifunction || max_acs_id) ++ pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n"); ++ ++ return 0; ++} ++early_param("pcie_acs_override", pcie_acs_override_setup); ++ ++static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags) ++{ ++ int i; ++ ++ /* Never override ACS for legacy devices or devices with ACS caps */ ++ if (!pci_is_pcie(dev) || ++ pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS)) ++ return -ENOTTY; ++ ++ for (i = 0; i < max_acs_id; i++) ++ if (acs_on_ids[i].vendor == dev->vendor && ++ acs_on_ids[i].device == dev->device) ++ return 1; ++ ++ switch (pci_pcie_type(dev)) { ++ case PCI_EXP_TYPE_DOWNSTREAM: ++ case PCI_EXP_TYPE_ROOT_PORT: ++ if (acs_on_downstream) ++ return 1; ++ break; ++ case PCI_EXP_TYPE_ENDPOINT: ++ case PCI_EXP_TYPE_UPSTREAM: ++ case PCI_EXP_TYPE_LEG_END: ++ case PCI_EXP_TYPE_RC_END: ++ if (acs_on_multifunction && dev->multifunction) ++ return 1; ++ } ++ ++ return -ENOTTY; ++} + /* + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. + * The device will throw a Link Down error on AER-capable systems and +@@ -4513,6 +4613,7 @@ static const struct pci_dev_acs_enabled { + { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs }, + /* Zhaoxin Root/Downstream Ports */ + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, ++ { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides }, + { 0 } + }; + + diff --git a/linux58-rc-tkg/linux58-tkg-patches/0007-v5.8-fsync.patch b/linux58-rc-tkg/linux58-tkg-patches/0007-v5.8-fsync.patch new file mode 100644 index 0000000..01c86d8 --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0007-v5.8-fsync.patch @@ -0,0 +1,908 @@ +From f7f49141a5dbe9c99d78196b58c44307fb2e6be3 Mon Sep 17 00:00:00 2001 +From: Tk-Glitch +Date: Mon, 20 Apr 2020 14:09:11 +0200 +Subject: Import Fsync v3 patchset - Squashed from https://gitlab.collabora.com/tonyk/linux/-/commits/futex-proton-v3 + +diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h +index a89eb0accd5e2ee527be1e3e11b1117ff5bf94b4..580001e89c6caed57dd8b3cb491d65dce846caff 100644 +--- a/include/uapi/linux/futex.h ++++ b/include/uapi/linux/futex.h +@@ -21,6 +21,7 @@ + #define FUTEX_WAKE_BITSET 10 + #define FUTEX_WAIT_REQUEUE_PI 11 + #define FUTEX_CMP_REQUEUE_PI 12 ++#define FUTEX_WAIT_MULTIPLE 13 + + #define FUTEX_PRIVATE_FLAG 128 + #define FUTEX_CLOCK_REALTIME 256 +@@ -40,6 +41,8 @@ + FUTEX_PRIVATE_FLAG) + #define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ + FUTEX_PRIVATE_FLAG) ++#define FUTEX_WAIT_MULTIPLE_PRIVATE (FUTEX_WAIT_MULTIPLE | \ ++ FUTEX_PRIVATE_FLAG) + + /* + * Support for robust futexes: the kernel cleans up held futexes at +@@ -150,4 +153,21 @@ struct robust_list_head { + (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ + | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) + ++/* ++ * Maximum number of multiple futexes to wait for ++ */ ++#define FUTEX_MULTIPLE_MAX_COUNT 128 ++ ++/** ++ * struct futex_wait_block - Block of futexes to be waited for ++ * @uaddr: User address of the futex ++ * @val: Futex value expected by userspace ++ * @bitset: Bitset for the optional bitmasked wakeup ++ */ ++struct futex_wait_block { ++ __u32 __user *uaddr; ++ __u32 val; ++ __u32 bitset; ++}; ++ + #endif /* _UAPI_LINUX_FUTEX_H */ +diff --git a/kernel/futex.c b/kernel/futex.c +index 0cf84c8664f207c574325b899ef2e57f01295a94..58cf9eb2b851b4858e29b5ef4114a29a92e676ba 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -215,6 +215,8 @@ struct futex_pi_state { + * @rt_waiter: rt_waiter storage for use with requeue_pi + * @requeue_pi_key: the requeue_pi target futex key + * @bitset: bitset for the optional bitmasked wakeup ++ * @uaddr: userspace address of futex ++ * @uval: expected futex's value + * + * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so + * we can wake only the relevant ones (hashed queues may be shared). +@@ -237,6 +239,8 @@ struct futex_q { + struct rt_mutex_waiter *rt_waiter; + union futex_key *requeue_pi_key; + u32 bitset; ++ u32 __user *uaddr; ++ u32 uval; + } __randomize_layout; + + static const struct futex_q futex_q_init = { +@@ -2420,6 +2424,29 @@ static int unqueue_me(struct futex_q *q) + return ret; + } + ++/** ++ * unqueue_multiple() - Remove several futexes from their futex_hash_bucket ++ * @q: The list of futexes to unqueue ++ * @count: Number of futexes in the list ++ * ++ * Helper to unqueue a list of futexes. This can't fail. ++ * ++ * Return: ++ * - >=0 - Index of the last futex that was awoken; ++ * - -1 - If no futex was awoken ++ */ ++static int unqueue_multiple(struct futex_q *q, int count) ++{ ++ int ret = -1; ++ int i; ++ ++ for (i = 0; i < count; i++) { ++ if (!unqueue_me(&q[i])) ++ ret = i; ++ } ++ return ret; ++} ++ + /* + * PI futexes can not be requeued and must remove themself from the + * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry +@@ -2783,6 +2810,211 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, + return ret; + } + ++/** ++ * futex_wait_multiple_setup() - Prepare to wait and enqueue multiple futexes ++ * @qs: The corresponding futex list ++ * @count: The size of the lists ++ * @flags: Futex flags (FLAGS_SHARED, etc.) ++ * @awaken: Index of the last awoken futex ++ * ++ * Prepare multiple futexes in a single step and enqueue them. This may fail if ++ * the futex list is invalid or if any futex was already awoken. On success the ++ * task is ready to interruptible sleep. ++ * ++ * Return: ++ * - 1 - One of the futexes was awaken by another thread ++ * - 0 - Success ++ * - <0 - -EFAULT, -EWOULDBLOCK or -EINVAL ++ */ ++static int futex_wait_multiple_setup(struct futex_q *qs, int count, ++ unsigned int flags, int *awaken) ++{ ++ struct futex_hash_bucket *hb; ++ int ret, i; ++ u32 uval; ++ ++ /* ++ * Enqueuing multiple futexes is tricky, because we need to ++ * enqueue each futex in the list before dealing with the next ++ * one to avoid deadlocking on the hash bucket. But, before ++ * enqueuing, we need to make sure that current->state is ++ * TASK_INTERRUPTIBLE, so we don't absorb any awake events, which ++ * cannot be done before the get_futex_key of the next key, ++ * because it calls get_user_pages, which can sleep. Thus, we ++ * fetch the list of futexes keys in two steps, by first pinning ++ * all the memory keys in the futex key, and only then we read ++ * each key and queue the corresponding futex. ++ */ ++retry: ++ for (i = 0; i < count; i++) { ++ qs[i].key = FUTEX_KEY_INIT; ++ ret = get_futex_key(qs[i].uaddr, flags & FLAGS_SHARED, ++ &qs[i].key, FUTEX_READ); ++ if (unlikely(ret)) { ++ for (--i; i >= 0; i--) ++ put_futex_key(&qs[i].key); ++ return ret; ++ } ++ } ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ for (i = 0; i < count; i++) { ++ struct futex_q *q = &qs[i]; ++ ++ hb = queue_lock(q); ++ ++ ret = get_futex_value_locked(&uval, q->uaddr); ++ if (ret) { ++ /* ++ * We need to try to handle the fault, which ++ * cannot be done without sleep, so we need to ++ * undo all the work already done, to make sure ++ * we don't miss any wake ups. Therefore, clean ++ * up, handle the fault and retry from the ++ * beginning. ++ */ ++ queue_unlock(hb); ++ ++ /* ++ * Keys 0..(i-1) are implicitly put ++ * on unqueue_multiple. ++ */ ++ put_futex_key(&q->key); ++ ++ *awaken = unqueue_multiple(qs, i); ++ ++ __set_current_state(TASK_RUNNING); ++ ++ /* ++ * On a real fault, prioritize the error even if ++ * some other futex was awoken. Userspace gave ++ * us a bad address, -EFAULT them. ++ */ ++ ret = get_user(uval, q->uaddr); ++ if (ret) ++ return ret; ++ ++ /* ++ * Even if the page fault was handled, If ++ * something was already awaken, we can safely ++ * give up and succeed to give a hint for userspace to ++ * acquire the right futex faster. ++ */ ++ if (*awaken >= 0) ++ return 1; ++ ++ goto retry; ++ } ++ ++ if (uval != q->uval) { ++ queue_unlock(hb); ++ ++ put_futex_key(&qs[i].key); ++ ++ /* ++ * If something was already awaken, we can ++ * safely ignore the error and succeed. ++ */ ++ *awaken = unqueue_multiple(qs, i); ++ __set_current_state(TASK_RUNNING); ++ if (*awaken >= 0) ++ return 1; ++ ++ return -EWOULDBLOCK; ++ } ++ ++ /* ++ * The bucket lock can't be held while dealing with the ++ * next futex. Queue each futex at this moment so hb can ++ * be unlocked. ++ */ ++ queue_me(&qs[i], hb); ++ } ++ return 0; ++} ++ ++/** ++ * futex_wait_multiple() - Prepare to wait on and enqueue several futexes ++ * @qs: The list of futexes to wait on ++ * @op: Operation code from futex's syscall ++ * @count: The number of objects ++ * @abs_time: Timeout before giving up and returning to userspace ++ * ++ * Entry point for the FUTEX_WAIT_MULTIPLE futex operation, this function ++ * sleeps on a group of futexes and returns on the first futex that ++ * triggered, or after the timeout has elapsed. ++ * ++ * Return: ++ * - >=0 - Hint to the futex that was awoken ++ * - <0 - On error ++ */ ++static int futex_wait_multiple(struct futex_q *qs, int op, ++ u32 count, ktime_t *abs_time) ++{ ++ struct hrtimer_sleeper timeout, *to; ++ int ret, flags = 0, hint = 0; ++ unsigned int i; ++ ++ if (!(op & FUTEX_PRIVATE_FLAG)) ++ flags |= FLAGS_SHARED; ++ ++ if (op & FUTEX_CLOCK_REALTIME) ++ flags |= FLAGS_CLOCKRT; ++ ++ to = futex_setup_timer(abs_time, &timeout, flags, 0); ++ while (1) { ++ ret = futex_wait_multiple_setup(qs, count, flags, &hint); ++ if (ret) { ++ if (ret > 0) { ++ /* A futex was awaken during setup */ ++ ret = hint; ++ } ++ break; ++ } ++ ++ if (to) ++ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); ++ ++ /* ++ * Avoid sleeping if another thread already tried to ++ * wake us. ++ */ ++ for (i = 0; i < count; i++) { ++ if (plist_node_empty(&qs[i].list)) ++ break; ++ } ++ ++ if (i == count && (!to || to->task)) ++ freezable_schedule(); ++ ++ ret = unqueue_multiple(qs, count); ++ ++ __set_current_state(TASK_RUNNING); ++ ++ if (ret >= 0) ++ break; ++ if (to && !to->task) { ++ ret = -ETIMEDOUT; ++ break; ++ } else if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ /* ++ * The final case is a spurious wakeup, for ++ * which just retry. ++ */ ++ } ++ ++ if (to) { ++ hrtimer_cancel(&to->timer); ++ destroy_hrtimer_on_stack(&to->timer); ++ } ++ ++ return ret; ++} ++ + static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, + ktime_t *abs_time, u32 bitset) + { +@@ -3907,6 +4139,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, + return -ENOSYS; + } + ++/** ++ * futex_read_wait_block - Read an array of futex_wait_block from userspace ++ * @uaddr: Userspace address of the block ++ * @count: Number of blocks to be read ++ * ++ * This function creates and allocate an array of futex_q (we zero it to ++ * initialize the fields) and then, for each futex_wait_block element from ++ * userspace, fill a futex_q element with proper values. ++ */ ++inline struct futex_q *futex_read_wait_block(u32 __user *uaddr, u32 count) ++{ ++ unsigned int i; ++ struct futex_q *qs; ++ struct futex_wait_block fwb; ++ struct futex_wait_block __user *entry = ++ (struct futex_wait_block __user *)uaddr; ++ ++ if (!count || count > FUTEX_MULTIPLE_MAX_COUNT) ++ return ERR_PTR(-EINVAL); ++ ++ qs = kcalloc(count, sizeof(*qs), GFP_KERNEL); ++ if (!qs) ++ return ERR_PTR(-ENOMEM); ++ ++ for (i = 0; i < count; i++) { ++ if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) { ++ kfree(qs); ++ return ERR_PTR(-EFAULT); ++ } ++ ++ qs[i].uaddr = fwb.uaddr; ++ qs[i].uval = fwb.val; ++ qs[i].bitset = fwb.bitset; ++ } ++ ++ return qs; ++} + + SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + struct __kernel_timespec __user *, utime, u32 __user *, uaddr2, +@@ -3919,7 +4188,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || + cmd == FUTEX_WAIT_BITSET || +- cmd == FUTEX_WAIT_REQUEUE_PI)) { ++ cmd == FUTEX_WAIT_REQUEUE_PI || ++ cmd == FUTEX_WAIT_MULTIPLE)) { + if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) + return -EFAULT; + if (get_timespec64(&ts, utime)) +@@ -3940,6 +4210,25 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) + val2 = (u32) (unsigned long) utime; + ++ if (cmd == FUTEX_WAIT_MULTIPLE) { ++ int ret; ++ struct futex_q *qs; ++ ++#ifdef CONFIG_X86_X32 ++ if (unlikely(in_x32_syscall())) ++ return -ENOSYS; ++#endif ++ qs = futex_read_wait_block(uaddr, val); ++ ++ if (IS_ERR(qs)) ++ return PTR_ERR(qs); ++ ++ ret = futex_wait_multiple(qs, op, val, tp); ++ kfree(qs); ++ ++ return ret; ++ } ++ + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); + } + +@@ -4102,6 +4391,57 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, + #endif /* CONFIG_COMPAT */ + + #ifdef CONFIG_COMPAT_32BIT_TIME ++/** ++ * struct compat_futex_wait_block - Block of futexes to be waited for ++ * @uaddr: User address of the futex (compatible pointer) ++ * @val: Futex value expected by userspace ++ * @bitset: Bitset for the optional bitmasked wakeup ++ */ ++struct compat_futex_wait_block { ++ compat_uptr_t uaddr; ++ __u32 val; ++ __u32 bitset; ++}; ++ ++/** ++ * compat_futex_read_wait_block - Read an array of futex_wait_block from ++ * userspace ++ * @uaddr: Userspace address of the block ++ * @count: Number of blocks to be read ++ * ++ * This function does the same as futex_read_wait_block(), except that it ++ * converts the pointer to the futex from the compat version to the regular one. ++ */ ++inline struct futex_q *compat_futex_read_wait_block(u32 __user *uaddr, ++ u32 count) ++{ ++ unsigned int i; ++ struct futex_q *qs; ++ struct compat_futex_wait_block fwb; ++ struct compat_futex_wait_block __user *entry = ++ (struct compat_futex_wait_block __user *)uaddr; ++ ++ if (!count || count > FUTEX_MULTIPLE_MAX_COUNT) ++ return ERR_PTR(-EINVAL); ++ ++ qs = kcalloc(count, sizeof(*qs), GFP_KERNEL); ++ if (!qs) ++ return ERR_PTR(-ENOMEM); ++ ++ for (i = 0; i < count; i++) { ++ if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) { ++ kfree(qs); ++ return ERR_PTR(-EFAULT); ++ } ++ ++ qs[i].uaddr = compat_ptr(fwb.uaddr); ++ qs[i].uval = fwb.val; ++ qs[i].bitset = fwb.bitset; ++ } ++ ++ return qs; ++} ++ + SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, + struct old_timespec32 __user *, utime, u32 __user *, uaddr2, + u32, val3) +@@ -4113,7 +4453,8 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, + + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || + cmd == FUTEX_WAIT_BITSET || +- cmd == FUTEX_WAIT_REQUEUE_PI)) { ++ cmd == FUTEX_WAIT_REQUEUE_PI || ++ cmd == FUTEX_WAIT_MULTIPLE)) { + if (get_old_timespec32(&ts, utime)) + return -EFAULT; + if (!timespec64_valid(&ts)) +@@ -4128,6 +4469,19 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, + cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) + val2 = (int) (unsigned long) utime; + ++ if (cmd == FUTEX_WAIT_MULTIPLE) { ++ int ret; ++ struct futex_q *qs = compat_futex_read_wait_block(uaddr, val); ++ ++ if (IS_ERR(qs)) ++ return PTR_ERR(qs); ++ ++ ret = futex_wait_multiple(qs, op, val, tp); ++ kfree(qs); ++ ++ return ret; ++ } ++ + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); + } + #endif /* CONFIG_COMPAT_32BIT_TIME */ +diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c +index ee55e6d389a3f053194435342c4e471dc7cf8786..2a63e1c2cfb6407a5988233217cff2e52787bc66 100644 +--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c ++++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c +@@ -11,6 +11,7 @@ + * + * HISTORY + * 2009-Nov-6: Initial version by Darren Hart ++ * 2019-Dec-13: Add WAIT_MULTIPLE test by Krisman + * + *****************************************************************************/ + +@@ -41,6 +42,8 @@ int main(int argc, char *argv[]) + { + futex_t f1 = FUTEX_INITIALIZER; + struct timespec to; ++ time_t secs; ++ struct futex_wait_block fwb = {&f1, f1, 0}; + int res, ret = RET_PASS; + int c; + +@@ -65,7 +68,7 @@ int main(int argc, char *argv[]) + } + + ksft_print_header(); +- ksft_set_plan(1); ++ ksft_set_plan(2); + ksft_print_msg("%s: Block on a futex and wait for timeout\n", + basename(argv[0])); + ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns); +@@ -79,8 +82,39 @@ int main(int argc, char *argv[]) + if (!res || errno != ETIMEDOUT) { + fail("futex_wait returned %d\n", ret < 0 ? errno : ret); + ret = RET_FAIL; ++ } else ++ ksft_test_result_pass("futex_wait timeout succeeds\n"); ++ ++ info("Calling futex_wait_multiple on f1: %u @ %p\n", f1, &f1); ++ ++ /* Setup absolute time */ ++ ret = clock_gettime(CLOCK_REALTIME, &to); ++ secs = (to.tv_nsec + timeout_ns) / 1000000000; ++ to.tv_nsec = ((int64_t)to.tv_nsec + timeout_ns) % 1000000000; ++ to.tv_sec += secs; ++ info("to.tv_sec = %ld\n", to.tv_sec); ++ info("to.tv_nsec = %ld\n", to.tv_nsec); ++ ++ res = futex_wait_multiple(&fwb, 1, &to, ++ FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME); ++ ++#ifdef __ILP32__ ++ if (res == -1 && errno == ENOSYS) { ++ ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); ++ } else { ++ ksft_test_result_fail("futex_wait_multiple returned %d\n", ++ res < 0 ? errno : res); ++ ret = RET_FAIL; + } ++#else ++ if (!res || errno != ETIMEDOUT) { ++ ksft_test_result_fail("futex_wait_multiple returned %d\n", ++ res < 0 ? errno : res); ++ ret = RET_FAIL; ++ } else ++ ksft_test_result_pass("futex_wait_multiple timeout succeeds\n"); ++#endif /* __ILP32__ */ + +- print_result(TEST_NAME, ret); ++ ksft_print_cnts(); + return ret; + } +diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h +index ddbcfc9b7bac4aebb5bac2f249e26ecfd948aa84..bb103bef4557012ef9a389ca74c868e4476a8a31 100644 +--- a/tools/testing/selftests/futex/include/futextest.h ++++ b/tools/testing/selftests/futex/include/futextest.h +@@ -38,6 +38,14 @@ typedef volatile u_int32_t futex_t; + #ifndef FUTEX_CMP_REQUEUE_PI + #define FUTEX_CMP_REQUEUE_PI 12 + #endif ++#ifndef FUTEX_WAIT_MULTIPLE ++#define FUTEX_WAIT_MULTIPLE 13 ++struct futex_wait_block { ++ futex_t *uaddr; ++ futex_t val; ++ __u32 bitset; ++}; ++#endif + #ifndef FUTEX_WAIT_REQUEUE_PI_PRIVATE + #define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ + FUTEX_PRIVATE_FLAG) +@@ -80,6 +88,20 @@ futex_wait(futex_t *uaddr, futex_t val, struct timespec *timeout, int opflags) + return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags); + } + ++/** ++ * futex_wait_multiple() - block on several futexes with optional timeout ++ * @fwb: wait block user space address ++ * @count: number of entities at fwb ++ * @timeout: absolute timeout ++ */ ++static inline int ++futex_wait_multiple(struct futex_wait_block *fwb, int count, ++ struct timespec *timeout, int opflags) ++{ ++ return futex(fwb, FUTEX_WAIT_MULTIPLE, count, timeout, NULL, 0, ++ opflags); ++} ++ + /** + * futex_wake() - wake one or more tasks blocked on uaddr + * @nr_wake: wake up to this many tasks +diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c +index 0ae390ff816449c88d0bb655a26eb014382c2b4f..bcbac042992d447e0bc9ef5fefe94e875de310f2 100644 +--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c ++++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c +@@ -12,6 +12,7 @@ + * + * HISTORY + * 2009-Nov-14: Initial version by Gowrishankar ++ * 2019-Dec-13: Add WAIT_MULTIPLE test by Krisman + * + *****************************************************************************/ + +@@ -40,6 +41,7 @@ int main(int argc, char *argv[]) + { + struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns}; + futex_t f1 = FUTEX_INITIALIZER; ++ struct futex_wait_block fwb = {&f1, f1+1, 0}; + int res, ret = RET_PASS; + int c; + +@@ -61,7 +63,7 @@ int main(int argc, char *argv[]) + } + + ksft_print_header(); +- ksft_set_plan(1); ++ ksft_set_plan(2); + ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n", + basename(argv[0])); + +@@ -71,8 +73,30 @@ int main(int argc, char *argv[]) + fail("futex_wait returned: %d %s\n", + res ? errno : res, res ? strerror(errno) : ""); + ret = RET_FAIL; ++ } else ++ ksft_test_result_pass("futex_wait wouldblock succeeds\n"); ++ ++ info("Calling futex_wait_multiple on f1: %u @ %p with val=%u\n", ++ f1, &f1, f1+1); ++ res = futex_wait_multiple(&fwb, 1, NULL, FUTEX_PRIVATE_FLAG); ++ ++#ifdef __ILP32__ ++ if (res != -1 || errno != ENOSYS) { ++ ksft_test_result_fail("futex_wait_multiple returned %d\n", ++ res < 0 ? errno : res); ++ ret = RET_FAIL; ++ } else { ++ ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); ++ } ++#else ++ if (!res || errno != EWOULDBLOCK) { ++ ksft_test_result_fail("futex_wait_multiple returned %d\n", ++ res < 0 ? errno : res); ++ ret = RET_FAIL; + } ++ ksft_test_result_pass("futex_wait_multiple wouldblock succeeds\n"); ++#endif /* __ILP32__ */ + +- print_result(TEST_NAME, ret); ++ ksft_print_cnts(); + return ret; + } +diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore +index a09f570619023750f558c84004aff166b4337d72..4660128a545edb04a17cc6bd9760931c1386122f 100644 +--- a/tools/testing/selftests/futex/functional/.gitignore ++++ b/tools/testing/selftests/futex/functional/.gitignore +@@ -5,3 +5,4 @@ futex_wait_private_mapped_file + futex_wait_timeout + futex_wait_uninitialized_heap + futex_wait_wouldblock ++futex_wait_multiple +diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile +index 30996306cabcfe89a47977643e529b122893bb7e..75f9fface11fa3c90c1bdb9a49b3ea51291afd58 100644 +--- a/tools/testing/selftests/futex/functional/Makefile ++++ b/tools/testing/selftests/futex/functional/Makefile +@@ -14,7 +14,8 @@ TEST_GEN_FILES := \ + futex_requeue_pi_signal_restart \ + futex_requeue_pi_mismatched_ops \ + futex_wait_uninitialized_heap \ +- futex_wait_private_mapped_file ++ futex_wait_private_mapped_file \ ++ futex_wait_multiple + + TEST_PROGS := run.sh + +diff --git a/tools/testing/selftests/futex/functional/futex_wait_multiple.c b/tools/testing/selftests/futex/functional/futex_wait_multiple.c +new file mode 100644 +index 0000000000000000000000000000000000000000..b48422e79f42edba1653bb0bd2a4c4fd98d2d48d +--- /dev/null ++++ b/tools/testing/selftests/futex/functional/futex_wait_multiple.c +@@ -0,0 +1,173 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/****************************************************************************** ++ * ++ * Copyright © Collabora, Ltd., 2019 ++ * ++ * DESCRIPTION ++ * Test basic semantics of FUTEX_WAIT_MULTIPLE ++ * ++ * AUTHOR ++ * Gabriel Krisman Bertazi ++ * ++ * HISTORY ++ * 2019-Dec-13: Initial version by Krisman ++ * ++ *****************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "futextest.h" ++#include "logging.h" ++ ++#define TEST_NAME "futex-wait-multiple" ++#define timeout_ns 100000 ++#define MAX_COUNT 128 ++#define WAKE_WAIT_US 3000000 ++ ++int ret = RET_PASS; ++char *progname; ++futex_t f[MAX_COUNT] = {0}; ++struct futex_wait_block fwb[MAX_COUNT]; ++ ++void usage(char *prog) ++{ ++ printf("Usage: %s\n", prog); ++ printf(" -c Use color\n"); ++ printf(" -h Display this help message\n"); ++ printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n", ++ VQUIET, VCRITICAL, VINFO); ++} ++ ++void test_count_overflow(void) ++{ ++ futex_t f = FUTEX_INITIALIZER; ++ struct futex_wait_block fwb[MAX_COUNT+1]; ++ int res, i; ++ ++ ksft_print_msg("%s: Test a too big number of futexes\n", progname); ++ ++ for (i = 0; i < MAX_COUNT+1; i++) { ++ fwb[i].uaddr = &f; ++ fwb[i].val = f; ++ fwb[i].bitset = 0; ++ } ++ ++ res = futex_wait_multiple(fwb, MAX_COUNT+1, NULL, FUTEX_PRIVATE_FLAG); ++ ++#ifdef __ILP32__ ++ if (res != -1 || errno != ENOSYS) { ++ ksft_test_result_fail("futex_wait_multiple returned %d\n", ++ res < 0 ? errno : res); ++ ret = RET_FAIL; ++ } else { ++ ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); ++ } ++#else ++ if (res != -1 || errno != EINVAL) { ++ ksft_test_result_fail("futex_wait_multiple returned %d\n", ++ res < 0 ? errno : res); ++ ret = RET_FAIL; ++ } else { ++ ksft_test_result_pass("futex_wait_multiple count overflow succeed\n"); ++ } ++ ++#endif /* __ILP32__ */ ++} ++ ++void *waiterfn(void *arg) ++{ ++ int res; ++ ++ res = futex_wait_multiple(fwb, MAX_COUNT, NULL, FUTEX_PRIVATE_FLAG); ++ ++#ifdef __ILP32__ ++ if (res != -1 || errno != ENOSYS) { ++ ksft_test_result_fail("futex_wait_multiple returned %d\n", ++ res < 0 ? errno : res); ++ ret = RET_FAIL; ++ } else { ++ ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); ++ } ++#else ++ if (res < 0) ++ ksft_print_msg("waiter failed %d\n", res); ++ ++ info("futex_wait_multiple: Got hint futex %d was freed\n", res); ++#endif /* __ILP32__ */ ++ ++ return NULL; ++} ++ ++void test_fwb_wakeup(void) ++{ ++ int res, i; ++ pthread_t waiter; ++ ++ ksft_print_msg("%s: Test wake up in a list of futex\n", progname); ++ ++ for (i = 0; i < MAX_COUNT; i++) { ++ fwb[i].uaddr = &f[i]; ++ fwb[i].val = f[i]; ++ fwb[i].bitset = 0xffffffff; ++ } ++ ++ res = pthread_create(&waiter, NULL, waiterfn, NULL); ++ if (res) { ++ ksft_test_result_fail("Creating waiting thread failed"); ++ ksft_exit_fail(); ++ } ++ ++ usleep(WAKE_WAIT_US); ++ res = futex_wake(&(f[MAX_COUNT-1]), 1, FUTEX_PRIVATE_FLAG); ++ if (res != 1) { ++ ksft_test_result_fail("Failed to wake thread res=%d\n", res); ++ ksft_exit_fail(); ++ } ++ ++ pthread_join(waiter, NULL); ++ ksft_test_result_pass("%s succeed\n", __func__); ++} ++ ++int main(int argc, char *argv[]) ++{ ++ int c; ++ ++ while ((c = getopt(argc, argv, "cht:v:")) != -1) { ++ switch (c) { ++ case 'c': ++ log_color(1); ++ break; ++ case 'h': ++ usage(basename(argv[0])); ++ exit(0); ++ case 'v': ++ log_verbosity(atoi(optarg)); ++ break; ++ default: ++ usage(basename(argv[0])); ++ exit(1); ++ } ++ } ++ ++ progname = basename(argv[0]); ++ ++ ksft_print_header(); ++ ksft_set_plan(2); ++ ++ test_count_overflow(); ++ ++#ifdef __ILP32__ ++ // if it's a 32x binary, there's no futex to wakeup ++ ksft_test_result_skip("futex_wait_multiple not supported at x32\n"); ++#else ++ test_fwb_wakeup(); ++#endif /* __ILP32__ */ ++ ++ ksft_print_cnts(); ++ return ret; ++} +diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh +index 1acb6ace1680e8f3d6b3ee2dc528c19ddfdb018e..a8be94f28ff78b4879d2d19bca5d9b0fcb26c1f8 100755 +--- a/tools/testing/selftests/futex/functional/run.sh ++++ b/tools/testing/selftests/futex/functional/run.sh +@@ -73,3 +73,6 @@ echo + echo + ./futex_wait_uninitialized_heap $COLOR + ./futex_wait_private_mapped_file $COLOR ++ ++echo ++./futex_wait_multiple $COLOR +diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h +index 580001e89c6caed57dd8b3cb491d65dce846caff..a3e760886b8e7e74285fdcf2caaaa6f66ad16675 100644 +--- a/include/uapi/linux/futex.h ++++ b/include/uapi/linux/futex.h +@@ -21,7 +21,7 @@ + #define FUTEX_WAKE_BITSET 10 + #define FUTEX_WAIT_REQUEUE_PI 11 + #define FUTEX_CMP_REQUEUE_PI 12 +-#define FUTEX_WAIT_MULTIPLE 13 ++#define FUTEX_WAIT_MULTIPLE 31 + + #define FUTEX_PRIVATE_FLAG 128 + #define FUTEX_CLOCK_REALTIME 256 +diff --git a/kernel/futex.c b/kernel/futex.c +index 58cf9eb2b851b4858e29b5ef4114a29a92e676ba..e0bb628a5e1988dcc9ae5442a4259edc229d578d 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -4198,7 +4198,7 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + return -EINVAL; + + t = timespec64_to_ktime(ts); +- if (cmd == FUTEX_WAIT) ++ if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE) + t = ktime_add_safe(ktime_get(), t); + tp = &t; + } +@@ -4399,6 +4399,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, + */ + struct compat_futex_wait_block { + compat_uptr_t uaddr; ++ __u32 pad; + __u32 val; + __u32 bitset; + }; +@@ -4461,7 +4462,7 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, + return -EINVAL; + + t = timespec64_to_ktime(ts); +- if (cmd == FUTEX_WAIT) ++ if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE) + t = ktime_add_safe(ktime_get(), t); + tp = &t; + } diff --git a/linux58-rc-tkg/linux58-tkg-patches/0011-ZFS-fix.patch b/linux58-rc-tkg/linux58-tkg-patches/0011-ZFS-fix.patch new file mode 100644 index 0000000..af71d04 --- /dev/null +++ b/linux58-rc-tkg/linux58-tkg-patches/0011-ZFS-fix.patch @@ -0,0 +1,43 @@ +From 1e010beda2896bdf3082fb37a3e49f8ce20e04d8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?J=C3=B6rg=20Thalheim?= +Date: Thu, 2 May 2019 05:28:08 +0100 +Subject: [PATCH] x86/fpu: Export kernel_fpu_{begin,end}() with + EXPORT_SYMBOL_GPL +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +We need these symbols in zfs as the fpu implementation breaks userspace: + +https://github.com/zfsonlinux/zfs/issues/9346 +Signed-off-by: Jörg Thalheim +--- + arch/x86/kernel/fpu/core.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c +index 12c70840980e..352538b3bb5d 100644 +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -102,7 +102,7 @@ void kernel_fpu_begin(void) + } + __cpu_invalidate_fpregs_state(); + } +-EXPORT_SYMBOL_GPL(kernel_fpu_begin); ++EXPORT_SYMBOL(kernel_fpu_begin); + + void kernel_fpu_end(void) + { +@@ -111,7 +111,7 @@ void kernel_fpu_end(void) + this_cpu_write(in_kernel_fpu, false); + preempt_enable(); + } +-EXPORT_SYMBOL_GPL(kernel_fpu_end); ++EXPORT_SYMBOL(kernel_fpu_end); + + /* + * Save the FPU state (mark it for reload if necessary): +-- +2.23.0 + +