linux58-tkg: Update Project C patchset to v5.8-r3

d2a44a3bba
This commit is contained in:
Tk-Glitch 2020-09-15 13:21:12 +02:00
parent 8d8ec901c9
commit 41cfeabff5
3 changed files with 53 additions and 93 deletions

View File

@ -45,7 +45,7 @@ else
fi
pkgname=("${pkgbase}" "${pkgbase}-headers")
pkgver="${_basekernel}"."${_sub}"
pkgrel=15
pkgrel=16
pkgdesc='Linux-tkg'
arch=('x86_64') # no i686 in here
url="http://www.kernel.org/"
@ -78,7 +78,7 @@ source=("https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-${_basekernel}.tar.x
#0008-5.8-bcachefs.patch
0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch
0009-prjc_v5.8-r2.patch
0009-prjc_v5.8-r3.patch
0011-ZFS-fix.patch
#0012-linux-hardened.patch
0012-misc-additions.patch
@ -101,7 +101,7 @@ sha256sums=('e7f75186aa0642114af8f19d99559937300ca27acaf7451b36d4f9b0f85cf1f5'
'cd225e86d72eaf6c31ef3d7b20df397f4cc44ddd04389850691292cdf292b204'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
'eee99d2a6c681ba22de02c39e60ae7293506142796f19257c219e5d206a56753'
'f5dbff4833a2e3ca94c202e5197894d5f1006c689ff149355353e77d2e17c943'
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
'98311deeb474b39e821cd1e64198793d5c4d797155b3b8bbcb1938b7f11e8d74')

View File

@ -182,7 +182,7 @@ _tkg_srcprep() {
elif [ "${_cpusched}" = "pds" ]; then
# PDS-mq
msg2 "Applying PDS base patch"
patch -Np1 -i "$srcdir"/0009-prjc_v5.8-r2.patch
patch -Np1 -i "$srcdir"/0009-prjc_v5.8-r3.patch
if [ "${_aggressive_ondemand}" = "true" ]; then
msg2 "Applying PDS agressive ondemand governor patch"
@ -209,7 +209,7 @@ _tkg_srcprep() {
# Project C / BMQ
msg2 "Applying Project C / BMQ base patch"
patch -Np1 -i "$srcdir"/0009-prjc_v5.8-r2.patch
patch -Np1 -i "$srcdir"/0009-prjc_v5.8-r3.patch
if [ "${_aggressive_ondemand}" = "true" ]; then
msg2 "Applying BMQ agressive ondemand governor patch"

View File

@ -267,7 +267,7 @@ index 683372943093..d25f2501daf3 100644
{
return task->thread_pid;
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 1aff00b65f3c..45f0b0f3616c 100644
index 1aff00b65f3c..179d77c8360e 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,5 +1,24 @@
@ -275,13 +275,13 @@ index 1aff00b65f3c..45f0b0f3616c 100644
+#ifdef CONFIG_SCHED_ALT
+
+#ifdef CONFIG_SCHED_BMQ
+#define __tsk_deadline(p) (0UL)
+
+static inline int dl_task(struct task_struct *p)
+{
+ return 0;
+}
+
+#ifdef CONFIG_SCHED_BMQ
+#define __tsk_deadline(p) (0UL)
+#endif
+
+#ifdef CONFIG_SCHED_PDS
@ -527,7 +527,7 @@ index 000000000000..47ca955a451d
+}
+#endif /* _LINUX_SKIP_LIST_H */
diff --git a/init/Kconfig b/init/Kconfig
index 0498af567f70..09a302641ba6 100644
index 0498af567f70..aaa7c434eedf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -742,9 +742,39 @@ config GENERIC_SCHED_CLOCK
@ -566,7 +566,7 @@ index 0498af567f70..09a302641ba6 100644
config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL
+ depends on !SCHED_BMQ
+ depends on !SCHED_ALT
help
This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU.
@ -574,7 +574,7 @@ index 0498af567f70..09a302641ba6 100644
depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION
+ depends on !SCHED_BMQ
+ depends on !SCHED_ALT
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
@ -583,7 +583,7 @@ index 0498af567f70..09a302641ba6 100644
tasks.
-if CGROUP_SCHED
+if CGROUP_SCHED && !SCHED_BMQ
+if CGROUP_SCHED && !SCHED_ALT
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
@ -591,7 +591,7 @@ index 0498af567f70..09a302641ba6 100644
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
+ depends on !SCHED_BMQ
+ depends on !SCHED_ALT
select CGROUPS
select CGROUP_SCHED
select FAIR_GROUP_SCHED
@ -827,7 +827,7 @@ index 5fc9c9b70862..eb6d7d87779f 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644
index 000000000000..76f72292e28a
index 000000000000..b469c9488d18
--- /dev/null
+++ b/kernel/sched/alt_core.c
@@ -0,0 +1,6184 @@
@ -878,7 +878,7 @@ index 000000000000..76f72292e28a
+#define CREATE_TRACE_POINTS
+#include <trace/events/sched.h>
+
+#define ALT_SCHED_VERSION "v5.8-r2"
+#define ALT_SCHED_VERSION "v5.8-r3"
+
+/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio)
@ -2426,6 +2426,7 @@ index 000000000000..76f72292e28a
+static inline void
+ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+{
+ check_preempt_curr(rq);
+ p->state = TASK_RUNNING;
+ trace_sched_wakeup(p);
+}
@ -2448,6 +2449,8 @@ index 000000000000..76f72292e28a
+
+ rq = __task_access_lock(p, &lock);
+ if (task_on_rq_queued(p)) {
+ /* check_preempt_curr() may use rq clock */
+ update_rq_clock(rq);
+ ttwu_do_wakeup(rq, p, wake_flags);
+ ret = 1;
+ }
@ -2487,8 +2490,6 @@ index 000000000000..76f72292e28a
+ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
+ }
+
+ check_preempt_curr(rq);
+
+ rq_unlock_irqrestore(rq, &rf);
+}
+
@ -2595,7 +2596,6 @@ index 000000000000..76f72292e28a
+ raw_spin_lock(&rq->lock);
+ update_rq_clock(rq);
+ ttwu_do_activate(rq, p, wake_flags);
+ check_preempt_curr(rq);
+ raw_spin_unlock(&rq->lock);
+}
+
@ -7981,11 +7981,11 @@ index 000000000000..7fdeace7e8a5
+#endif
diff --git a/kernel/sched/pds_imp.h b/kernel/sched/pds_imp.h
new file mode 100644
index 000000000000..041827b92910
index 000000000000..6baee5e961b9
--- /dev/null
+++ b/kernel/sched/pds_imp.h
@@ -0,0 +1,226 @@
+#define ALT_SCHED_VERSION_MSG "sched/bmq: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
@@ -0,0 +1,257 @@
+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+
+static const u64 user_prio2deadline[NICE_WIDTH] = {
+/* -20 */ 4194304, 4613734, 5075107, 5582617, 6140878,
@ -8175,8 +8175,39 @@ index 000000000000..041827b92910
+ return false;
+}
+
+/*
+ * pds_skiplist_random_level -- Returns a pseudo-random level number for skip
+ * list node which is used in PDS run queue.
+ *
+ * In current implementation, based on testing, the first 8 bits in microseconds
+ * of niffies are suitable for random level population.
+ * find_first_bit() is used to satisfy p = 0.5 between each levels, and there
+ * should be platform hardware supported instruction(known as ctz/clz) to speed
+ * up this function.
+ * The skiplist level for a task is populated when task is created and doesn't
+ * change in task's life time. When task is being inserted into run queue, this
+ * skiplist level is set to task's sl_node->level, the skiplist insert function
+ * may change it based on current level of the skip lsit.
+ */
+static inline int pds_skiplist_random_level(const struct task_struct *p)
+{
+ long unsigned int randseed;
+
+ /*
+ * 1. Some architectures don't have better than microsecond resolution
+ * so mask out ~microseconds as a factor of the random seed for skiplist
+ * insertion.
+ * 2. Use address of task structure pointer as another factor of the
+ * random seed for task burst forking scenario.
+ */
+ randseed = (task_rq(p)->clock ^ (long unsigned int)p) >> 10;
+
+ return find_first_bit(&randseed, NUM_SKIPLIST_LEVEL - 1);
+}
+
+static void sched_task_fork(struct task_struct *p, struct rq *rq)
+{
+ p->sl_level = pds_skiplist_random_level(p);
+ if (p->prio >= MAX_RT_PRIO)
+ p->deadline = rq->clock + user_prio2deadline[TASK_USER_PRIO(p)];
+ update_task_priodl(p);
@ -8549,74 +8580,3 @@ index b5e3496cf803..65f60c77bc50 100644
};
struct wakeup_test_data *x = data;
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 45f0b0f3616c934a3bfa43d0f2ba998c6f006dba..179d77c8360ebdd795a5bb9d2b046232403907a1 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -2,13 +2,13 @@
#ifdef CONFIG_SCHED_ALT
-#ifdef CONFIG_SCHED_BMQ
-#define __tsk_deadline(p) (0UL)
-
static inline int dl_task(struct task_struct *p)
{
return 0;
}
+
+#ifdef CONFIG_SCHED_BMQ
+#define __tsk_deadline(p) (0UL)
#endif
#ifdef CONFIG_SCHED_PDS
diff --git a/init/Kconfig b/init/Kconfig
index 09a302641ba67bc0dd6223d240c7172e808abe42..aaa7c434eedfc5bce8c331926d0fab14c58ca007 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -774,7 +774,7 @@ endif
config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL
- depends on !SCHED_BMQ
+ depends on !SCHED_ALT
help
This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -860,7 +860,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION
- depends on !SCHED_BMQ
+ depends on !SCHED_ALT
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
@@ -947,7 +947,7 @@ menuconfig CGROUP_SCHED
bandwidth allocation to such task groups. It uses cgroups to group
tasks.
-if CGROUP_SCHED && !SCHED_BMQ
+if CGROUP_SCHED && !SCHED_ALT
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
@@ -1203,7 +1203,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
- depends on !SCHED_BMQ
+ depends on !SCHED_ALT
select CGROUPS
select CGROUP_SCHED
select FAIR_GROUP_SCHED
diff --git a/kernel/sched/pds_imp.h b/kernel/sched/pds_imp.h
index 041827b92910d0b2ffd83ca41c149d3802b2ad38..66dc16218444c35831b8a93e37463c98b1c0189e 100644
--- a/kernel/sched/pds_imp.h
+++ b/kernel/sched/pds_imp.h
@@ -1,4 +1,4 @@
-#define ALT_SCHED_VERSION_MSG "sched/bmq: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
static const u64 user_prio2deadline[NICE_WIDTH] = {
/* -20 */ 4194304, 4613734, 5075107, 5582617, 6140878,