This commit is contained in:
Tk-Glitch 2020-04-20 14:51:42 +02:00
parent 31f1593918
commit dcca57bfc1
2 changed files with 36 additions and 44 deletions

View File

@ -89,7 +89,7 @@ pkgname=("${pkgbase}" "${pkgbase}-headers")
_basekernel=5.6 _basekernel=5.6
_sub=5 _sub=5
pkgver="${_basekernel}"."${_sub}" pkgver="${_basekernel}"."${_sub}"
pkgrel=10 pkgrel=11
pkgdesc='Linux-tkg' pkgdesc='Linux-tkg'
arch=('x86_64') # no i686 in here arch=('x86_64') # no i686 in here
url="http://www.kernel.org/" url="http://www.kernel.org/"
@ -118,7 +118,7 @@ source=("https://www.kernel.org/pub/linux/kernel/v5.x/linux-${_basekernel}.tar.x
#0008-5.6-bcachefs.patch #0008-5.6-bcachefs.patch
0009-glitched-ondemand-bmq.patch 0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch 0009-glitched-bmq.patch
0009-bmq_v5.6-r2.patch 0009-bmq_v5.6-r3.patch
0011-ZFS-fix.patch 0011-ZFS-fix.patch
#0012-linux-hardened.patch #0012-linux-hardened.patch
0013-tp_smapi_ec.patch 0013-tp_smapi_ec.patch
@ -140,7 +140,7 @@ sha256sums=('e342b04a2aa63808ea0ef1baab28fc520bd031ef8cf93d9ee4a31d4058fcb622'
'2d9260b80b43bbd605cf420d6bd53aa7262103dfd77196ba590ece5600b6dc0d' '2d9260b80b43bbd605cf420d6bd53aa7262103dfd77196ba590ece5600b6dc0d'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177' '9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7' '965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
'0e1b569bf16a6c514710715eff06df04447378d1c067b853db0c454ac3c5e463' '2340925904efa3594cc65a7bae4fbff233d5d8bc7db605ce08acaca7450d2471'
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104' '49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
'4a83c17a33779df304ee44ad8e736069b25d917baec429ecdd193fe1a9a63576') '4a83c17a33779df304ee44ad8e736069b25d917baec429ecdd193fe1a9a63576')
@ -243,7 +243,7 @@ prepare() {
patch -Np1 -i ../0005-glitched-pds.patch patch -Np1 -i ../0005-glitched-pds.patch
elif [ "${_cpusched}" == "bmq" ]; then elif [ "${_cpusched}" == "bmq" ]; then
# BMQ # BMQ
patch -Np1 -i ../0009-bmq_v5.6-r2.patch patch -Np1 -i ../0009-bmq_v5.6-r3.patch
if [ "${_aggressive_ondemand}" == "true" ]; then if [ "${_aggressive_ondemand}" == "true" ]; then
patch -Np1 -i ../0009-glitched-ondemand-bmq.patch patch -Np1 -i ../0009-glitched-ondemand-bmq.patch
fi fi

View File

@ -571,10 +571,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
new file mode 100644 new file mode 100644
index 000000000000..b37608bbc23a index 000000000000..ad0d073666ae
--- /dev/null --- /dev/null
+++ b/kernel/sched/bmq.c +++ b/kernel/sched/bmq.c
@@ -0,0 +1,5977 @@ @@ -0,0 +1,5969 @@
+/* +/*
+ * kernel/sched/bmq.c + * kernel/sched/bmq.c
+ * + *
@ -647,7 +647,7 @@ index 000000000000..b37608bbc23a
+ +
+static inline void print_scheduler_version(void) +static inline void print_scheduler_version(void)
+{ +{
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r2 by Alfred Chen.\n"); + printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r3 by Alfred Chen.\n");
+} +}
+ +
+/** +/**
@ -2533,7 +2533,7 @@ index 000000000000..b37608bbc23a
+ atomic_dec(&task_rq(p)->nr_iowait); + atomic_dec(&task_rq(p)->nr_iowait);
+ } + }
+ +
+ if(cpu_rq(smp_processor_id())->clock - p->last_ran > sched_timeslice_ns) + if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
+ boost_task(p); + boost_task(p);
+ +
+ cpu = select_task_rq(p); + cpu = select_task_rq(p);
@ -2612,8 +2612,7 @@ index 000000000000..b37608bbc23a
+int sched_fork(unsigned long clone_flags, struct task_struct *p) +int sched_fork(unsigned long clone_flags, struct task_struct *p)
+{ +{
+ unsigned long flags; + unsigned long flags;
+ int cpu = get_cpu(); + struct rq *rq;
+ struct rq *rq = this_rq();
+ +
+ __sched_fork(clone_flags, p); + __sched_fork(clone_flags, p);
+ /* + /*
@ -2651,11 +2650,20 @@ index 000000000000..b37608bbc23a
+ p->boost_prio = (p->boost_prio < 0) ? + p->boost_prio = (p->boost_prio < 0) ?
+ p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ; + p->boost_prio + MAX_PRIORITY_ADJ : MAX_PRIORITY_ADJ;
+ /* + /*
+ * The child is not yet in the pid-hash so no cgroup attach races,
+ * and the cgroup is pinned to this child due to cgroup_fork()
+ * is ran before sched_fork().
+ *
+ * Silence PROVE_RCU.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /*
+ * Share the timeslice between parent and child, thus the + * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesn't change, + * total amount of pending timeslices in the system doesn't change,
+ * resulting in more scheduling fairness. + * resulting in more scheduling fairness.
+ */ + */
+ raw_spin_lock_irqsave(&rq->lock, flags); + rq = this_rq();
+ raw_spin_lock(&rq->lock);
+ rq->curr->time_slice /= 2; + rq->curr->time_slice /= 2;
+ p->time_slice = rq->curr->time_slice; + p->time_slice = rq->curr->time_slice;
+#ifdef CONFIG_SCHED_HRTICK +#ifdef CONFIG_SCHED_HRTICK
@ -2666,21 +2674,13 @@ index 000000000000..b37608bbc23a
+ p->time_slice = sched_timeslice_ns; + p->time_slice = sched_timeslice_ns;
+ resched_curr(rq); + resched_curr(rq);
+ } + }
+ raw_spin_unlock_irqrestore(&rq->lock, flags); + raw_spin_unlock(&rq->lock);
+ +
+ /* + /*
+ * The child is not yet in the pid-hash so no cgroup attach races,
+ * and the cgroup is pinned to this child due to cgroup_fork()
+ * is ran before sched_fork().
+ *
+ * Silence PROVE_RCU.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /*
+ * We're setting the CPU for the first time, we don't migrate, + * We're setting the CPU for the first time, we don't migrate,
+ * so use __set_task_cpu(). + * so use __set_task_cpu().
+ */ + */
+ __set_task_cpu(p, cpu); + __set_task_cpu(p, cpu_of(rq));
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags); + raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ +
+#ifdef CONFIG_SCHED_INFO +#ifdef CONFIG_SCHED_INFO
@ -2689,7 +2689,6 @@ index 000000000000..b37608bbc23a
+#endif +#endif
+ init_task_preempt_count(p); + init_task_preempt_count(p);
+ +
+ put_cpu();
+ return 0; + return 0;
+} +}
+ +
@ -3850,7 +3849,7 @@ index 000000000000..b37608bbc23a
+ */ + */
+static inline void check_curr(struct task_struct *p, struct rq *rq) +static inline void check_curr(struct task_struct *p, struct rq *rq)
+{ +{
+ if (rq->idle == p) + if (unlikely(rq->idle == p))
+ return; + return;
+ +
+ update_curr(rq, p); + update_curr(rq, p);
@ -3888,21 +3887,6 @@ index 000000000000..b37608bbc23a
+ return next; + return next;
+} +}
+ +
+static inline void set_rq_task(struct rq *rq, struct task_struct *p)
+{
+ p->last_ran = rq->clock_task;
+
+ if (unlikely(sched_timeslice_ns == p->time_slice))
+ rq->last_ts_switch = rq->clock;
+
+ if (p == rq->idle)
+ schedstat_inc(rq->sched_goidle);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ else
+ hrtick_start(rq, p->time_slice);
+#endif
+}
+
+/* +/*
+ * schedule() is the main scheduler function. + * schedule() is the main scheduler function.
+ * + *
@ -3998,9 +3982,17 @@ index 000000000000..b37608bbc23a
+ +
+ next = choose_next_task(rq, cpu, prev); + next = choose_next_task(rq, cpu, prev);
+ +
+ set_rq_task(rq, next); + if (next == rq->idle)
+ schedstat_inc(rq->sched_goidle);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ else
+ hrtick_start(rq, next->time_slice);
+#endif
+
+ if (likely(prev != next)) {
+ next->last_ran = rq->clock_task;
+ rq->last_ts_switch = rq->clock;
+ +
+ if (prev != next) {
+ rq->nr_switches++; + rq->nr_switches++;
+ /* + /*
+ * RCU users of rcu_dereference(rq->curr) may not see + * RCU users of rcu_dereference(rq->curr) may not see
@ -4022,17 +4014,17 @@ index 000000000000..b37608bbc23a
+ * is a RELEASE barrier), + * is a RELEASE barrier),
+ */ + */
+ ++*switch_count; + ++*switch_count;
+ rq->last_ts_switch = rq->clock;
+ +
+ trace_sched_switch(preempt, prev, next); + trace_sched_switch(preempt, prev, next);
+ +
+ /* Also unlocks the rq: */ + /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next); + rq = context_switch(rq, prev, next);
+#ifdef CONFIG_SCHED_SMT
+ sg_balance_check(rq);
+#endif
+ } else + } else
+ raw_spin_unlock_irq(&rq->lock); + raw_spin_unlock_irq(&rq->lock);
+
+#ifdef CONFIG_SCHED_SMT
+ sg_balance_check(rq);
+#endif
+} +}
+ +
+void __noreturn do_task_dead(void) +void __noreturn do_task_dead(void)