linux57-rc-tkg: Rebase our BMQ patchset against v5.6-r4
This commit is contained in:
parent
3f8139e4a4
commit
360d1238c2
@ -135,7 +135,7 @@ sha256sums=('6bdbef0aa3e1964d84ec388ca40159753b7377291acc6293549188aea2b4d3d6'
|
||||
'cd225e86d72eaf6c31ef3d7b20df397f4cc44ddd04389850691292cdf292b204'
|
||||
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
|
||||
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
|
||||
'5d95eac8dd9a5866f943d47d155f48d52dfd1218ffa19f95548fb20c1a54a90e'
|
||||
'486445f9190e99d551d154a55d2db40b0b5e4dda202e7bbee17870a123dfbb8c'
|
||||
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
|
||||
'7ba451d95d2bc07d983661a7e9602a9b239522c98d42197c706c01905f0efba2')
|
||||
|
||||
|
@ -563,10 +563,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
|
||||
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
|
||||
new file mode 100644
|
||||
index 000000000000..ad0d073666ae
|
||||
index 000000000000..10560f7720e2
|
||||
--- /dev/null
|
||||
+++ b/kernel/sched/bmq.c
|
||||
@@ -0,0 +1,5980 @@
|
||||
@@ -0,0 +1,6026 @@
|
||||
+/*
|
||||
+ * kernel/sched/bmq.c
|
||||
+ *
|
||||
@ -639,7 +639,7 @@ index 000000000000..ad0d073666ae
|
||||
+
|
||||
+static inline void print_scheduler_version(void)
|
||||
+{
|
||||
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r3 by Alfred Chen.\n");
|
||||
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r4 by Alfred Chen.\n");
|
||||
+}
|
||||
+
|
||||
+/**
|
||||
@ -1049,6 +1049,16 @@ index 000000000000..ad0d073666ae
|
||||
+ * Add/Remove/Requeue task to/from the runqueue routines
|
||||
+ * Context: rq->lock
|
||||
+ */
|
||||
+static inline void __dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
||||
+{
|
||||
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
|
||||
+ sched_info_dequeued(rq, p);
|
||||
+
|
||||
+ list_del(&p->bmq_node);
|
||||
+ if (list_empty(&rq->queue.heads[p->bmq_idx]))
|
||||
+ clear_bit(p->bmq_idx, rq->queue.bitmap);
|
||||
+}
|
||||
+
|
||||
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
|
||||
+{
|
||||
+ lockdep_assert_held(&rq->lock);
|
||||
@ -1056,6 +1066,9 @@ index 000000000000..ad0d073666ae
|
||||
+ WARN_ONCE(task_rq(p) != rq, "bmq: dequeue task reside on cpu%d from cpu%d\n",
|
||||
+ task_cpu(p), cpu_of(rq));
|
||||
+
|
||||
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
|
||||
+ sched_info_dequeued(rq, p);
|
||||
+
|
||||
+ list_del(&p->bmq_node);
|
||||
+ if (list_empty(&rq->queue.heads[p->bmq_idx])) {
|
||||
+ clear_bit(p->bmq_idx, rq->queue.bitmap);
|
||||
@ -1068,9 +1081,16 @@ index 000000000000..ad0d073666ae
|
||||
+#endif
|
||||
+
|
||||
+ sched_update_tick_dependency(rq);
|
||||
+ psi_dequeue(p, flags & DEQUEUE_SLEEP);
|
||||
+}
|
||||
+
|
||||
+ sched_info_dequeued(rq, p);
|
||||
+static inline void __enqueue_task(struct task_struct *p, struct rq *rq, int flags)
|
||||
+{
|
||||
+ sched_info_queued(rq, p);
|
||||
+ psi_enqueue(p, flags);
|
||||
+
|
||||
+ p->bmq_idx = task_sched_prio(p);
|
||||
+ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);
|
||||
+ set_bit(p->bmq_idx, rq->queue.bitmap);
|
||||
+}
|
||||
+
|
||||
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
|
||||
@ -1080,9 +1100,7 @@ index 000000000000..ad0d073666ae
|
||||
+ WARN_ONCE(task_rq(p) != rq, "bmq: enqueue task reside on cpu%d to cpu%d\n",
|
||||
+ task_cpu(p), cpu_of(rq));
|
||||
+
|
||||
+ p->bmq_idx = task_sched_prio(p);
|
||||
+ list_add_tail(&p->bmq_node, &rq->queue.heads[p->bmq_idx]);
|
||||
+ set_bit(p->bmq_idx, rq->queue.bitmap);
|
||||
+ __enqueue_task(p, rq, flags);
|
||||
+ update_sched_rq_watermark(rq);
|
||||
+ ++rq->nr_running;
|
||||
+#ifdef CONFIG_SMP
|
||||
@ -1092,9 +1110,6 @@ index 000000000000..ad0d073666ae
|
||||
+
|
||||
+ sched_update_tick_dependency(rq);
|
||||
+
|
||||
+ sched_info_queued(rq, p);
|
||||
+ psi_enqueue(p, flags);
|
||||
+
|
||||
+ /*
|
||||
+ * If in_iowait is set, the code below may not trigger any cpufreq
|
||||
+ * utilization updates, so do it here explicitly with the IOWAIT flag
|
||||
@ -1303,6 +1318,17 @@ index 000000000000..ad0d073666ae
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+DEFINE_PER_CPU(unsigned long, thermal_pressure);
|
||||
+
|
||||
+void arch_set_thermal_pressure(struct cpumask *cpus,
|
||||
+ unsigned long th_pressure)
|
||||
+{
|
||||
+ int cpu;
|
||||
+
|
||||
+ for_each_cpu(cpu, cpus)
|
||||
+ WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
|
||||
+}
|
||||
+
|
||||
+ if (set_nr_and_not_polling(curr))
|
||||
+ smp_send_reschedule(cpu);
|
||||
+ else
|
||||
@ -3364,17 +3390,6 @@ index 000000000000..ad0d073666ae
|
||||
+ set_preempt_need_resched();
|
||||
+}
|
||||
+
|
||||
+DEFINE_PER_CPU(unsigned long, thermal_pressure);
|
||||
+
|
||||
+void arch_set_thermal_pressure(struct cpumask *cpus,
|
||||
+ unsigned long th_pressure)
|
||||
+{
|
||||
+ int cpu;
|
||||
+
|
||||
+ for_each_cpu(cpu, cpus)
|
||||
+ WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * This function gets called by the timer code, with HZ frequency.
|
||||
+ * We call it with interrupts disabled.
|
||||
@ -3795,9 +3810,9 @@ index 000000000000..ad0d073666ae
|
||||
+ (p = rq_next_bmq_task(skip, rq)) != rq->idle) {
|
||||
+ skip = rq_next_bmq_task(p, rq);
|
||||
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
|
||||
+ dequeue_task(p, rq, 0);
|
||||
+ __dequeue_task(p, rq, 0);
|
||||
+ set_task_cpu(p, dest_cpu);
|
||||
+ enqueue_task(p, dest_rq, 0);
|
||||
+ __enqueue_task(p, dest_rq, 0);
|
||||
+ nr_migrated++;
|
||||
+ }
|
||||
+ nr_tries--;
|
||||
@ -3830,15 +3845,28 @@ index 000000000000..ad0d073666ae
|
||||
+ spin_acquire(&src_rq->lock.dep_map,
|
||||
+ SINGLE_DEPTH_NESTING, 1, _RET_IP_);
|
||||
+
|
||||
+ nr_migrated = migrate_pending_tasks(src_rq, rq, cpu);
|
||||
+ if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
|
||||
+ src_rq->nr_running -= nr_migrated;
|
||||
+#ifdef CONFIG_SMP
|
||||
+ if (src_rq->nr_running < 2)
|
||||
+ cpumask_clear_cpu(i, &sched_rq_pending_mask);
|
||||
+#endif
|
||||
+ rq->nr_running += nr_migrated;
|
||||
+#ifdef CONFIG_SMP
|
||||
+ if (rq->nr_running > 1)
|
||||
+ cpumask_set_cpu(cpu, &sched_rq_pending_mask);
|
||||
+#endif
|
||||
+ update_sched_rq_watermark(rq);
|
||||
+ cpufreq_update_util(rq, 0);
|
||||
+
|
||||
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
|
||||
+ do_raw_spin_unlock(&src_rq->lock);
|
||||
+
|
||||
+ if (nr_migrated) {
|
||||
+ cpufreq_update_util(rq, 0);
|
||||
+ return 1;
|
||||
+ }
|
||||
+
|
||||
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
|
||||
+ do_raw_spin_unlock(&src_rq->lock);
|
||||
+ }
|
||||
+ } while (++affinity_mask < end_mask);
|
||||
+
|
||||
@ -3874,18 +3902,39 @@ index 000000000000..ad0d073666ae
|
||||
+
|
||||
+ if (unlikely(rq->skip)) {
|
||||
+ next = rq_runnable_task(rq);
|
||||
+ if (next == rq->idle) {
|
||||
+#ifdef CONFIG_SMP
|
||||
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
|
||||
+ next = rq_runnable_task(rq);
|
||||
+ if (!take_other_rq_tasks(rq, cpu)) {
|
||||
+#endif
|
||||
+ rq->skip = NULL;
|
||||
+ schedstat_inc(rq->sched_goidle);
|
||||
+ return next;
|
||||
+#ifdef CONFIG_SMP
|
||||
+ }
|
||||
+ next = rq_runnable_task(rq);
|
||||
+#endif
|
||||
+ }
|
||||
+ rq->skip = NULL;
|
||||
+#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
+ hrtick_start(rq, next->time_slice);
|
||||
+#endif
|
||||
+ return next;
|
||||
+ }
|
||||
+
|
||||
+ next = rq_first_bmq_task(rq);
|
||||
+ if (next == rq->idle) {
|
||||
+#ifdef CONFIG_SMP
|
||||
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
|
||||
+ return rq_first_bmq_task(rq);
|
||||
+ if (!take_other_rq_tasks(rq, cpu)) {
|
||||
+#endif
|
||||
+ schedstat_inc(rq->sched_goidle);
|
||||
+ return next;
|
||||
+#ifdef CONFIG_SMP
|
||||
+ }
|
||||
+ next = rq_first_bmq_task(rq);
|
||||
+#endif
|
||||
+ }
|
||||
+#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
+ hrtick_start(rq, next->time_slice);
|
||||
+#endif
|
||||
+ return next;
|
||||
+}
|
||||
@ -3985,13 +4034,6 @@ index 000000000000..ad0d073666ae
|
||||
+
|
||||
+ next = choose_next_task(rq, cpu, prev);
|
||||
+
|
||||
+ if (next == rq->idle)
|
||||
+ schedstat_inc(rq->sched_goidle);
|
||||
+#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
+ else
|
||||
+ hrtick_start(rq, next->time_slice);
|
||||
+#endif
|
||||
+
|
||||
+ if (likely(prev != next)) {
|
||||
+ next->last_ran = rq->clock_task;
|
||||
+ rq->last_ts_switch = rq->clock;
|
||||
@ -6083,6 +6125,9 @@ index 000000000000..ad0d073666ae
|
||||
+ cpumask_t *chk;
|
||||
+
|
||||
+ for_each_online_cpu(cpu) {
|
||||
+ /* take chance to reset time slice for idle tasks */
|
||||
+ cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
|
||||
+
|
||||
+ chk = &(per_cpu(sched_cpu_affinity_masks, cpu)[0]);
|
||||
+
|
||||
+ cpumask_complement(chk, cpumask_of(cpu));
|
||||
@ -6119,6 +6164,7 @@ index 000000000000..ad0d073666ae
|
||||
+#else
|
||||
+void __init sched_init_smp(void)
|
||||
+{
|
||||
+ cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
|
||||
+}
|
||||
+#endif /* CONFIG_SMP */
|
||||
+
|
||||
|
Loading…
Reference in New Issue
Block a user