linux57-rc-tkg: Update our 5.7 BMQ patchset to 5.6-r3 base

This commit is contained in:
Tk-Glitch 2020-04-20 14:50:52 +02:00
parent 9f480a2c03
commit 31f1593918
2 changed files with 60 additions and 157 deletions

View File

@ -131,7 +131,7 @@ sha256sums=('7a5369e141ec8d6c139a9357bf9a4e668bac7364e4bd96e9fafe11e462a6071a'
'2d9260b80b43bbd605cf420d6bd53aa7262103dfd77196ba590ece5600b6dc0d' '2d9260b80b43bbd605cf420d6bd53aa7262103dfd77196ba590ece5600b6dc0d'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7' '965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
'0ae69a3486ab9f24d1f51d7bb46ef90ca597934a05fd2a93bc4affa12025ac9a' '5d95eac8dd9a5866f943d47d155f48d52dfd1218ffa19f95548fb20c1a54a90e'
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104' '49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
'7ba451d95d2bc07d983661a7e9602a9b239522c98d42197c706c01905f0efba2') '7ba451d95d2bc07d983661a7e9602a9b239522c98d42197c706c01905f0efba2')

View File

@ -563,10 +563,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
new file mode 100644 new file mode 100644
index 000000000000..e6d6fc98bead index 000000000000..ad0d073666ae
--- /dev/null --- /dev/null
+++ b/kernel/sched/bmq.c +++ b/kernel/sched/bmq.c
@@ -0,0 +1,5993 @@ @@ -0,0 +1,5980 @@
+/* +/*
+ * kernel/sched/bmq.c + * kernel/sched/bmq.c
+ * + *
@ -639,7 +639,7 @@ index 000000000000..e6d6fc98bead
+ +
+static inline void print_scheduler_version(void) +static inline void print_scheduler_version(void)
+{ +{
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r1 by Alfred Chen.\n"); + printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r3 by Alfred Chen.\n");
+} +}
+ +
+/** +/**
@ -2525,7 +2525,7 @@ index 000000000000..e6d6fc98bead
+ atomic_dec(&task_rq(p)->nr_iowait); + atomic_dec(&task_rq(p)->nr_iowait);
+ } + }
+ +
+ if(cpu_rq(smp_processor_id())->clock - p->last_ran > sched_timeslice_ns) + if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
+ boost_task(p); + boost_task(p);
+ +
+ cpu = select_task_rq(p); + cpu = select_task_rq(p);
@ -2604,8 +2604,7 @@ index 000000000000..e6d6fc98bead
+int sched_fork(unsigned long clone_flags, struct task_struct *p) +int sched_fork(unsigned long clone_flags, struct task_struct *p)
+{ +{
+ unsigned long flags; + unsigned long flags;
+ int cpu = get_cpu(); + struct rq *rq;
+ struct rq *rq = this_rq();
+ +
+ __sched_fork(clone_flags, p); + __sched_fork(clone_flags, p);
+ /* + /*
@ -2643,11 +2642,20 @@ index 000000000000..e6d6fc98bead
+ p->boost_prio = (p->boost_prio < 0) ? + p->boost_prio = (p->boost_prio < 0) ?
+ p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ; + p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ;
+ /* + /*
+ * The child is not yet in the pid-hash so no cgroup attach races,
+ * and the cgroup is pinned to this child due to cgroup_fork()
+ * is ran before sched_fork().
+ *
+ * Silence PROVE_RCU.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /*
+ * Share the timeslice between parent and child, thus the + * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesn't change, + * total amount of pending timeslices in the system doesn't change,
+ * resulting in more scheduling fairness. + * resulting in more scheduling fairness.
+ */ + */
+ raw_spin_lock_irqsave(&rq->lock, flags); + rq = this_rq();
+ raw_spin_lock(&rq->lock);
+ rq->curr->time_slice /= 2; + rq->curr->time_slice /= 2;
+ p->time_slice = rq->curr->time_slice; + p->time_slice = rq->curr->time_slice;
+#ifdef CONFIG_SCHED_HRTICK +#ifdef CONFIG_SCHED_HRTICK
@ -2658,21 +2666,13 @@ index 000000000000..e6d6fc98bead
+ p->time_slice = sched_timeslice_ns; + p->time_slice = sched_timeslice_ns;
+ resched_curr(rq); + resched_curr(rq);
+ } + }
+ raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock(&rq->lock);
+ +
+ /* + /*
+ * The child is not yet in the pid-hash so no cgroup attach races,
+ * and the cgroup is pinned to this child due to cgroup_fork()
+ * is ran before sched_fork().
+ *
+ * Silence PROVE_RCU.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /*
+ * We're setting the CPU for the first time, we don't migrate, + * We're setting the CPU for the first time, we don't migrate,
+ * so use __set_task_cpu(). + * so use __set_task_cpu().
+ */ + */
+ __set_task_cpu(p, cpu); + __set_task_cpu(p, cpu_of(rq));
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); + raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ +
+#ifdef CONFIG_SCHED_INFO +#ifdef CONFIG_SCHED_INFO
@ -2681,7 +2681,6 @@ index 000000000000..e6d6fc98bead
+#endif +#endif
+ init_task_preempt_count(p); + init_task_preempt_count(p);
+ +
+ put_cpu();
+ return 0; + return 0;
+} +}
+ +
@ -3811,6 +3810,9 @@ index 000000000000..e6d6fc98bead
+{ +{
+ struct cpumask *affinity_mask, *end_mask; + struct cpumask *affinity_mask, *end_mask;
+ +
+ if (unlikely(!rq->online))
+ return 0;
+
+ if (cpumask_empty(&sched_rq_pending_mask)) + if (cpumask_empty(&sched_rq_pending_mask))
+ return 0; + return 0;
+ +
@ -3850,7 +3852,7 @@ index 000000000000..e6d6fc98bead
+ */ + */
+static inline void check_curr(struct task_struct *p, struct rq *rq) +static inline void check_curr(struct task_struct *p, struct rq *rq)
+{ +{
+ if (rq->idle == p) + if (unlikely(rq->idle == p))
+ return; + return;
+ +
+ update_curr(rq, p); + update_curr(rq, p);
@ -3873,9 +3875,8 @@ index 000000000000..e6d6fc98bead
+ if (unlikely(rq->skip)) { + if (unlikely(rq->skip)) {
+ next = rq_runnable_task(rq); + next = rq_runnable_task(rq);
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ if (likely(rq->online)) + if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu)) + next = rq_runnable_task(rq);
+ next = rq_runnable_task(rq);
+#endif +#endif
+ rq->skip = NULL; + rq->skip = NULL;
+ return next; + return next;
@ -3883,25 +3884,12 @@ index 000000000000..e6d6fc98bead
+ +
+ next = rq_first_bmq_task(rq); + next = rq_first_bmq_task(rq);
+#ifdef CONFIG_SMP +#ifdef CONFIG_SMP
+ if (likely(rq->online)) + if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu)) + return rq_first_bmq_task(rq);
+ return rq_first_bmq_task(rq);
+#endif +#endif
+ return next; + return next;
+} +}
+ +
+static inline void set_rq_task(struct rq *rq, struct task_struct *p)
+{
+ p->last_ran = rq->clock_task;
+
+ if (unlikely(sched_timeslice_ns == p->time_slice))
+ rq->last_ts_switch = rq->clock;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ if (p != rq->idle)
+ hrtick_start(rq, p->time_slice);
+#endif
+}
+
+/* +/*
+ * schedule() is the main scheduler function. + * schedule() is the main scheduler function.
+ * + *
@ -3997,12 +3985,18 @@ index 000000000000..e6d6fc98bead
+ +
+ next = choose_next_task(rq, cpu, prev); + next = choose_next_task(rq, cpu, prev);
+ +
+ set_rq_task(rq, next); + if (next == rq->idle)
+ schedstat_inc(rq->sched_goidle);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ else
+ hrtick_start(rq, next->time_slice);
+#endif
+ +
+ if (prev != next) { + if (likely(prev != next)) {
+ if (MAX_PRIO == next->prio) + next->last_ran = rq->clock_task;
+ schedstat_inc(rq->sched_goidle); + rq->last_ts_switch = rq->clock;
+ +
+ rq->nr_switches++;
+ /* + /*
+ * RCU users of rcu_dereference(rq->curr) may not see + * RCU users of rcu_dereference(rq->curr) may not see
+ * changes to task_struct made by pick_next_task(). + * changes to task_struct made by pick_next_task().
@ -4023,18 +4017,17 @@ index 000000000000..e6d6fc98bead
+ * is a RELEASE barrier), + * is a RELEASE barrier),
+ */ + */
+ ++*switch_count; + ++*switch_count;
+ rq->nr_switches++;
+ rq->last_ts_switch = rq->clock;
+ +
+ trace_sched_switch(preempt, prev, next); + trace_sched_switch(preempt, prev, next);
+ +
+ /* Also unlocks the rq: */ + /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next); + rq = context_switch(rq, prev, next);
+#ifdef CONFIG_SCHED_SMT
+ sg_balance_check(rq);
+#endif
+ } else + } else
+ raw_spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock);
+
+#ifdef CONFIG_SCHED_SMT
+ sg_balance_check(rq);
+#endif
+} +}
+ +
+void __noreturn do_task_dead(void) +void __noreturn do_task_dead(void)
@ -5736,8 +5729,6 @@ index 000000000000..e6d6fc98bead
+ idle->last_ran = rq->clock_task; + idle->last_ran = rq->clock_task;
+ idle->state = TASK_RUNNING; + idle->state = TASK_RUNNING;
+ idle->flags |= PF_IDLE; + idle->flags |= PF_IDLE;
+ /* Setting prio to illegal value shouldn't matter as it will never be de/enqueued */
+ idle->prio = MAX_PRIO;
+ idle->bmq_idx = IDLE_TASK_SCHED_PRIO; + idle->bmq_idx = IDLE_TASK_SCHED_PRIO;
+ bmq_init_idle(&rq->queue, idle); + bmq_init_idle(&rq->queue, idle);
+ +
@ -6079,14 +6070,12 @@ index 000000000000..e6d6fc98bead
+ } + }
+} +}
+ +
+#define TOPOLOGY_CPUMASK(name, func) \ +#define TOPOLOGY_CPUMASK(name, mask, last) \
+ if (cpumask_and(chk, chk, func(cpu))) { \ + if (cpumask_and(chk, chk, mask)) \
+ per_cpu(sched_cpu_llc_mask, cpu) = chk; \ + printk(KERN_INFO "bmq: cpu#%02d affinity mask: 0x%08lx - "#name,\
+ per_cpu(sd_llc_id, cpu) = cpumask_first(func(cpu)); \
+ printk(KERN_INFO "bmq: cpu#%d affinity mask - "#name" 0x%08lx", \
+ cpu, (chk++)->bits[0]); \ + cpu, (chk++)->bits[0]); \
+ } \ + if (!last) \
+ cpumask_complement(chk, func(cpu)) + cpumask_complement(chk, mask)
+ +
+static void sched_init_topology_cpumask(void) +static void sched_init_topology_cpumask(void)
+{ +{
@ -6098,20 +6087,18 @@ index 000000000000..e6d6fc98bead
+ +
+ cpumask_complement(chk, cpumask_of(cpu)); + cpumask_complement(chk, cpumask_of(cpu));
+#ifdef CONFIG_SCHED_SMT +#ifdef CONFIG_SCHED_SMT
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask); + TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
+#endif
+#ifdef CONFIG_SCHED_MC
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask);
+#endif +#endif
+ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
+ per_cpu(sched_cpu_llc_mask, cpu) = chk;
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
+ +
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask); + TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
+ +
+ if (cpumask_and(chk, chk, cpu_online_mask)) + TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
+ printk(KERN_INFO "bmq: cpu#%d affinity mask - others 0x%08lx",
+ cpu, (chk++)->bits[0]);
+ +
+ per_cpu(sched_cpu_affinity_end_mask, cpu) = chk; + per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
+ printk(KERN_INFO "bmq: cpu#%d llc_id = %d, llc_mask idx = %d\n", + printk(KERN_INFO "bmq: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
+ cpu, per_cpu(sd_llc_id, cpu), + cpu, per_cpu(sd_llc_id, cpu),
+ (int) (per_cpu(sched_cpu_llc_mask, cpu) - + (int) (per_cpu(sched_cpu_llc_mask, cpu) -
+ &(per_cpu(sched_cpu_affinity_masks, cpu)[0]))); + &(per_cpu(sched_cpu_affinity_masks, cpu)[0])));
@ -6602,7 +6589,7 @@ new file mode 100644
index 000000000000..fca42b270620 index 000000000000..fca42b270620
--- /dev/null --- /dev/null
+++ b/kernel/sched/bmq_sched.h +++ b/kernel/sched/bmq_sched.h
@@ -0,0 +1,536 @@ @@ -0,0 +1,537 @@
+#ifndef BMQ_SCHED_H +#ifndef BMQ_SCHED_H
+#define BMQ_SCHED_H +#define BMQ_SCHED_H
+ +
@ -6713,6 +6700,7 @@ index 000000000000..fca42b270620
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ +#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ struct sched_avg avg_irq; + struct sched_avg avg_irq;
+#endif +#endif
+
+#ifdef CONFIG_SCHED_THERMAL_PRESSURE +#ifdef CONFIG_SCHED_THERMAL_PRESSURE
+ struct sched_avg avg_thermal; + struct sched_avg avg_thermal;
+#endif +#endif
@ -7335,7 +7323,7 @@ index afff644da065..4da52afaeff8 100644
static inline int static inline int
update_irq_load_avg(struct rq *rq, u64 running) update_irq_load_avg(struct rq *rq, u64 running)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9ea647835fd6..911b30506c83 100644 index 9ea647835fd6..f38d1343e2bf 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2,6 +2,10 @@ @@ -2,6 +2,10 @@
@ -7349,24 +7337,15 @@ index 9ea647835fd6..911b30506c83 100644
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/autogroup.h> #include <linux/sched/autogroup.h>
@@ -2480,18 +2484,11 @@ static inline void membarrier_switch_mm(struct rq *rq, @@ -2492,6 +2496,12 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
return true;
} }
#endif #endif
+
-#ifdef CONFIG_SMP
-static inline bool is_per_cpu_kthread(struct task_struct *p)
+static inline int task_running_nice(struct task_struct *p) +static inline int task_running_nice(struct task_struct *p)
{ +{
- if (!(p->flags & PF_KTHREAD))
- return false;
-
- if (p->nr_cpus_allowed != 1)
- return false;
-
- return true;
+ return (task_nice(p) > 0); + return (task_nice(p) > 0);
} +}
-#endif
+#endif /* !CONFIG_SCHED_BMQ */ +#endif /* !CONFIG_SCHED_BMQ */
void swake_up_all_locked(struct swait_queue_head *q); void swake_up_all_locked(struct swait_queue_head *q);
@ -7597,79 +7576,3 @@ index b5e3496cf803..545be2c4f07c 100644
}; };
struct wakeup_test_data *x = data; struct wakeup_test_data *x = data;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 911b30506c83336a94a2748a321060ab11e8f9a7..f38d1343e2bfeb728637be4959120a7b37351b1a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2484,6 +2484,19 @@ static inline void membarrier_switch_mm(struct rq *rq,
}
#endif
+#ifdef CONFIG_SMP
+static inline bool is_per_cpu_kthread(struct task_struct *p)
+{
+ if (!(p->flags & PF_KTHREAD))
+ return false;
+
+ if (p->nr_cpus_allowed != 1)
+ return false;
+
+ return true;
+}
+#endif
+
static inline int task_running_nice(struct task_struct *p)
{
return (task_nice(p) > 0);
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
index e6d6fc98bead621f0ea7bfaf885b66af523413c1..fed5e132f2af67007f70851527a401b8f0f79cd1 100644
--- a/kernel/sched/bmq.c
+++ b/kernel/sched/bmq.c
@@ -5499,14 +5499,12 @@ static void sched_init_topology_cpumask_early(void)
}
}
-#define TOPOLOGY_CPUMASK(name, func) \
- if (cpumask_and(chk, chk, func(cpu))) { \
- per_cpu(sched_cpu_llc_mask, cpu) = chk; \
- per_cpu(sd_llc_id, cpu) = cpumask_first(func(cpu)); \
- printk(KERN_INFO "bmq: cpu#%d affinity mask - "#name" 0x%08lx", \
+#define TOPOLOGY_CPUMASK(name, mask, last) \
+ if (cpumask_and(chk, chk, mask)) \
+ printk(KERN_INFO "bmq: cpu#%02d affinity mask: 0x%08lx - "#name,\
cpu, (chk++)->bits[0]); \
- } \
- cpumask_complement(chk, func(cpu))
+ if (!last) \
+ cpumask_complement(chk, mask)
static void sched_init_topology_cpumask(void)
{
@@ -5518,20 +5516,18 @@ static void sched_init_topology_cpumask(void)
cpumask_complement(chk, cpumask_of(cpu));
#ifdef CONFIG_SCHED_SMT
- TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask);
-#endif
-#ifdef CONFIG_SCHED_MC
- TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask);
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
#endif
+ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
+ per_cpu(sched_cpu_llc_mask, cpu) = chk;
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
- TOPOLOGY_CPUMASK(core, topology_core_cpumask);
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
- if (cpumask_and(chk, chk, cpu_online_mask))
- printk(KERN_INFO "bmq: cpu#%d affinity mask - others 0x%08lx",
- cpu, (chk++)->bits[0]);
+ TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
- printk(KERN_INFO "bmq: cpu#%d llc_id = %d, llc_mask idx = %d\n",
+ printk(KERN_INFO "bmq: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
cpu, per_cpu(sd_llc_id, cpu),
(int) (per_cpu(sched_cpu_llc_mask, cpu) -
&(per_cpu(sched_cpu_affinity_masks, cpu)[0])));