We already had 2 out of the three changes vs r1 since 1e7f731ba9 so this only adds 0761e7e82a

Fixes https://github.com/Frogging-Family/linux-tkg/issues/2
This commit is contained in:
Tk-Glitch
2020-04-14 00:16:44 +02:00
parent 9b1bb7eb45
commit 498e8c0775
2 changed files with 36 additions and 126 deletions

View File

@@ -118,7 +118,7 @@ source=("https://www.kernel.org/pub/linux/kernel/v5.x/linux-${_basekernel}.tar.x
#0008-5.6-bcachefs.patch #0008-5.6-bcachefs.patch
0009-glitched-ondemand-bmq.patch 0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch 0009-glitched-bmq.patch
0009-bmq_v5.6-r1.patch 0009-bmq_v5.6-r2.patch
0011-ZFS-fix.patch 0011-ZFS-fix.patch
#0012-linux-hardened.patch #0012-linux-hardened.patch
0013-tp_smapi_ec.patch 0013-tp_smapi_ec.patch
@@ -140,7 +140,7 @@ sha256sums=('e342b04a2aa63808ea0ef1baab28fc520bd031ef8cf93d9ee4a31d4058fcb622'
'2d9260b80b43bbd605cf420d6bd53aa7262103dfd77196ba590ece5600b6dc0d' '2d9260b80b43bbd605cf420d6bd53aa7262103dfd77196ba590ece5600b6dc0d'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7' '965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
'fee8594610e5535296bd57dfa21af281cf161264f0f466f204a1dc1a2aa8e0dc' '0e1b569bf16a6c514710715eff06df04447378d1c067b853db0c454ac3c5e463'
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104' '49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
'4a83c17a33779df304ee44ad8e736069b25d917baec429ecdd193fe1a9a63576') '4a83c17a33779df304ee44ad8e736069b25d917baec429ecdd193fe1a9a63576')
@@ -243,7 +243,7 @@ prepare() {
patch -Np1 -i ../0005-glitched-pds.patch patch -Np1 -i ../0005-glitched-pds.patch
elif [ "${_cpusched}" == "bmq" ]; then elif [ "${_cpusched}" == "bmq" ]; then
# BMQ # BMQ
patch -Np1 -i ../0009-bmq_v5.6-r1.patch patch -Np1 -i ../0009-bmq_v5.6-r2.patch
if [ "${_aggressive_ondemand}" == "true" ]; then if [ "${_aggressive_ondemand}" == "true" ]; then
patch -Np1 -i ../0009-glitched-ondemand-bmq.patch patch -Np1 -i ../0009-glitched-ondemand-bmq.patch
fi fi

View File

@@ -571,10 +571,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
new file mode 100644 new file mode 100644
index 000000000000..e6d6fc98bead index 000000000000..b37608bbc23a
--- /dev/null --- /dev/null
+++ b/kernel/sched/bmq.c +++ b/kernel/sched/bmq.c
@@ -0,0 +1,5982 @@ @@ -0,0 +1,5977 @@
+/* +/*
+ * kernel/sched/bmq.c + * kernel/sched/bmq.c
+ * + *
@@ -647,7 +647,7 @@ index 000000000000..e6d6fc98bead
+ +
+static inline void print_scheduler_version(void) +static inline void print_scheduler_version(void)
+{ +{
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r1 by Alfred Chen.\n"); + printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r2 by Alfred Chen.\n");
+} +}
+ +
+/** +/**
@@ -3808,6 +3808,9 @@ index 000000000000..e6d6fc98bead
+{ +{
+ struct cpumask *affinity_mask, *end_mask; + struct cpumask *affinity_mask, *end_mask;
+ +
+ if (unlikely(!rq->online))
+ return 0;
+
+ if (cpumask_empty(&sched_rq_pending_mask)) + if (cpumask_empty(&sched_rq_pending_mask))
+ return 0; + return 0;
+ +
@@ -3870,9 +3873,8 @@ index 000000000000..e6d6fc98bead
+ if (unlikely(rq->skip)) { + if (unlikely(rq->skip)) {
+ next = rq_runnable_task(rq); + next = rq_runnable_task(rq);
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ if (likely(rq->online)) + if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu)) + next = rq_runnable_task(rq);
+ next = rq_runnable_task(rq);
+#endif +#endif
+ rq->skip = NULL; + rq->skip = NULL;
+ return next; + return next;
@@ -3880,9 +3882,8 @@ index 000000000000..e6d6fc98bead
+ +
+ next = rq_first_bmq_task(rq); + next = rq_first_bmq_task(rq);
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ if (likely(rq->online)) + if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu)) + return rq_first_bmq_task(rq);
+ return rq_first_bmq_task(rq);
+#endif +#endif
+ return next; + return next;
+} +}
@@ -3893,8 +3894,11 @@ index 000000000000..e6d6fc98bead
+ +
+ if (unlikely(sched_timeslice_ns == p->time_slice)) + if (unlikely(sched_timeslice_ns == p->time_slice))
+ rq->last_ts_switch = rq->clock; + rq->last_ts_switch = rq->clock;
+
+ if (p == rq->idle)
+ schedstat_inc(rq->sched_goidle);
+#ifdef CONFIG_HIGH_RES_TIMERS +#ifdef CONFIG_HIGH_RES_TIMERS
+ if (p != rq->idle) + else
+ hrtick_start(rq, p->time_slice); + hrtick_start(rq, p->time_slice);
+#endif +#endif
+} +}
@@ -3997,9 +4001,7 @@ index 000000000000..e6d6fc98bead
+ set_rq_task(rq, next); + set_rq_task(rq, next);
+ +
+ if (prev != next) { + if (prev != next) {
+ if (MAX_PRIO == next->prio) + rq->nr_switches++;
+ schedstat_inc(rq->sched_goidle);
+
+ /* + /*
+ * RCU users of rcu_dereference(rq->curr) may not see + * RCU users of rcu_dereference(rq->curr) may not see
+ * changes to task_struct made by pick_next_task(). + * changes to task_struct made by pick_next_task().
@@ -4020,7 +4022,6 @@ index 000000000000..e6d6fc98bead
+ * is a RELEASE barrier), + * is a RELEASE barrier),
+ */ + */
+ ++*switch_count; + ++*switch_count;
+ rq->nr_switches++;
+ rq->last_ts_switch = rq->clock; + rq->last_ts_switch = rq->clock;
+ +
+ trace_sched_switch(preempt, prev, next); + trace_sched_switch(preempt, prev, next);
@@ -5733,8 +5734,6 @@ index 000000000000..e6d6fc98bead
+ idle->last_ran = rq->clock_task; + idle->last_ran = rq->clock_task;
+ idle->state = TASK_RUNNING; + idle->state = TASK_RUNNING;
+ idle->flags |= PF_IDLE; + idle->flags |= PF_IDLE;
+ /* Setting prio to illegal value shouldn't matter as it will never be de/enqueued */
+ idle->prio = MAX_PRIO;
+ idle->bmq_idx = IDLE_TASK_SCHED_PRIO; + idle->bmq_idx = IDLE_TASK_SCHED_PRIO;
+ bmq_init_idle(&rq->queue, idle); + bmq_init_idle(&rq->queue, idle);
+ +
@@ -6076,14 +6075,12 @@ index 000000000000..e6d6fc98bead
+ } + }
+} +}
+ +
+#define TOPOLOGY_CPUMASK(name, func) \ +#define TOPOLOGY_CPUMASK(name, mask, last) \
+ if (cpumask_and(chk, chk, func(cpu))) { \ + if (cpumask_and(chk, chk, mask)) \
+ per_cpu(sched_cpu_llc_mask, cpu) = chk; \ + printk(KERN_INFO "bmq: cpu#%02d affinity mask: 0x%08lx - "#name,\
+ per_cpu(sd_llc_id, cpu) = cpumask_first(func(cpu)); \
+ printk(KERN_INFO "bmq: cpu#%d affinity mask - "#name" 0x%08lx", \
+ cpu, (chk++)->bits[0]); \ + cpu, (chk++)->bits[0]); \
+ } \ + if (!last) \
+ cpumask_complement(chk, func(cpu)) + cpumask_complement(chk, mask)
+ +
+static void sched_init_topology_cpumask(void) +static void sched_init_topology_cpumask(void)
+{ +{
@@ -6095,20 +6092,18 @@ index 000000000000..e6d6fc98bead
+ +
+ cpumask_complement(chk, cpumask_of(cpu)); + cpumask_complement(chk, cpumask_of(cpu));
+#ifdef CONFIG_SCHED_SMT +#ifdef CONFIG_SCHED_SMT
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask); + TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
+#endif
+#ifdef CONFIG_SCHED_MC
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask);
+#endif +#endif
+ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
+ per_cpu(sched_cpu_llc_mask, cpu) = chk;
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
+ +
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask); + TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
+ +
+ if (cpumask_and(chk, chk, cpu_online_mask)) + TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
+ printk(KERN_INFO "bmq: cpu#%d affinity mask - others 0x%08lx",
+ cpu, (chk++)->bits[0]);
+ +
+ per_cpu(sched_cpu_affinity_end_mask, cpu) = chk; + per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
+ printk(KERN_INFO "bmq: cpu#%d llc_id = %d, llc_mask idx = %d\n", + printk(KERN_INFO "bmq: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
+ cpu, per_cpu(sd_llc_id, cpu), + cpu, per_cpu(sd_llc_id, cpu),
+ (int) (per_cpu(sched_cpu_llc_mask, cpu) - + (int) (per_cpu(sched_cpu_llc_mask, cpu) -
+ &(per_cpu(sched_cpu_affinity_masks, cpu)[0]))); + &(per_cpu(sched_cpu_affinity_masks, cpu)[0])));
@@ -7306,7 +7301,7 @@ index afff644da065..4da52afaeff8 100644
static inline int static inline int
update_irq_load_avg(struct rq *rq, u64 running) update_irq_load_avg(struct rq *rq, u64 running)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9ea647835fd6..911b30506c83 100644 index 9ea647835fd6..f38d1343e2bf 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2,6 +2,10 @@ @@ -2,6 +2,10 @@
@@ -7320,24 +7315,15 @@ index 9ea647835fd6..911b30506c83 100644
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/autogroup.h> #include <linux/sched/autogroup.h>
@@ -2480,15 +2484,8 @@ static inline void membarrier_switch_mm(struct rq *rq, @@ -2492,3 +2496,9 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
return true;
} }
#endif #endif
+
-#ifdef CONFIG_SMP
-static inline bool is_per_cpu_kthread(struct task_struct *p)
+static inline int task_running_nice(struct task_struct *p) +static inline int task_running_nice(struct task_struct *p)
{ +{
- if (!(p->flags & PF_KTHREAD))
- return false;
-
- if (p->nr_cpus_allowed != 1)
- return false;
-
- return true;
+ return (task_nice(p) > 0); + return (task_nice(p) > 0);
} +}
-#endif
+#endif /* !CONFIG_SCHED_BMQ */ +#endif /* !CONFIG_SCHED_BMQ */
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 750fb3c67eed..0cc040a28d3f 100644 index 750fb3c67eed..0cc040a28d3f 100644
@@ -7565,79 +7551,3 @@ index b5e3496cf803..545be2c4f07c 100644
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 911b30506c83336a94a2748a321060ab11e8f9a7..f38d1343e2bfeb728637be4959120a7b37351b1a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2484,6 +2484,19 @@ static inline void membarrier_switch_mm(struct rq *rq,
}
#endif
+#ifdef CONFIG_SMP
+static inline bool is_per_cpu_kthread(struct task_struct *p)
+{
+ if (!(p->flags & PF_KTHREAD))
+ return false;
+
+ if (p->nr_cpus_allowed != 1)
+ return false;
+
+ return true;
+}
+#endif
+
static inline int task_running_nice(struct task_struct *p)
{
return (task_nice(p) > 0);
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
index e6d6fc98bead621f0ea7bfaf885b66af523413c1..fed5e132f2af67007f70851527a401b8f0f79cd1 100644
--- a/kernel/sched/bmq.c
+++ b/kernel/sched/bmq.c
@@ -5499,14 +5499,12 @@ static void sched_init_topology_cpumask_early(void)
}
}
-#define TOPOLOGY_CPUMASK(name, func) \
- if (cpumask_and(chk, chk, func(cpu))) { \
- per_cpu(sched_cpu_llc_mask, cpu) = chk; \
- per_cpu(sd_llc_id, cpu) = cpumask_first(func(cpu)); \
- printk(KERN_INFO "bmq: cpu#%d affinity mask - "#name" 0x%08lx", \
+#define TOPOLOGY_CPUMASK(name, mask, last) \
+ if (cpumask_and(chk, chk, mask)) \
+ printk(KERN_INFO "bmq: cpu#%02d affinity mask: 0x%08lx - "#name,\
cpu, (chk++)->bits[0]); \
- } \
- cpumask_complement(chk, func(cpu))
+ if (!last) \
+ cpumask_complement(chk, mask)
static void sched_init_topology_cpumask(void)
{
@@ -5518,20 +5516,18 @@ static void sched_init_topology_cpumask(void)
cpumask_complement(chk, cpumask_of(cpu));
#ifdef CONFIG_SCHED_SMT
- TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask);
-#endif
-#ifdef CONFIG_SCHED_MC
- TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask);
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
#endif
+ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
+ per_cpu(sched_cpu_llc_mask, cpu) = chk;
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
- TOPOLOGY_CPUMASK(core, topology_core_cpumask);
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
- if (cpumask_and(chk, chk, cpu_online_mask))
- printk(KERN_INFO "bmq: cpu#%d affinity mask - others 0x%08lx",
- cpu, (chk++)->bits[0]);
+ TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
- printk(KERN_INFO "bmq: cpu#%d llc_id = %d, llc_mask idx = %d\n",
+ printk(KERN_INFO "bmq: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
cpu, per_cpu(sd_llc_id, cpu),
(int) (per_cpu(sched_cpu_llc_mask, cpu) -
&(per_cpu(sched_cpu_affinity_masks, cpu)[0])));