linux57-tkg: Update PDS patchset (cleanups + sync)

This commit is contained in:
Tk-Glitch 2020-06-02 19:25:36 +02:00
parent 4e3bd6b828
commit 6317a54ef1
2 changed files with 99 additions and 95 deletions

View File

@ -130,7 +130,7 @@ sha256sums=('de8163bb62f822d84f7a3983574ec460060bf013a78ff79cd7c979ff1ec1d7e0'
'7058e57fd68367b029adc77f2a82928f1433daaf02c8c279cb2d13556c8804d7'
'62496f9ca788996181ef145f96ad26291282fcc3fb95cdc04080dcf84365be33'
'7fd8e776209dac98627453fda754bdf9aff4a09f27cb0b3766d7983612eb3c74'
'3767c745aed00798efcdc6c57dcdc5ca84573863a9b76c1461eda15ff6f62037'
'bd0460d436e3ade46ea20b0087f8ccf62dce00f96de76f04b74620f81e63a2dc'
'19661ec0d39f9663452b34433214c755179894528bf73a42f6ba52ccf572832a'
'cd225e86d72eaf6c31ef3d7b20df397f4cc44ddd04389850691292cdf292b204'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'

View File

@ -1,7 +1,7 @@
From 89067d28ca90681fc6cf108de79b9aedb93dfa9d Mon Sep 17 00:00:00 2001
From 68f1a9541ef3185b1021e8e54d2712c7039418d7 Mon Sep 17 00:00:00 2001
From: Tk-Glitch <ti3nou@gmail.com>
Date: Thu, 16 Apr 2020 5:50:12 +0200
Subject: PDS 099o, 5.7 rebase
Date: Tue, 2 Jun 2020 18:55:09 +0200
Subject: PDS 099o, 5.7 rebase (release/v2)
diff --git a/Documentation/scheduler/sched-PDS-mq.txt b/Documentation/scheduler/sched-PDS-mq.txt
@ -83,10 +83,10 @@ index f18d5067cd0f..fe489fc01c73 100644
* Frequency of the spu scheduler tick. By default we do one SPU scheduler
* tick for every 10 CPU scheduler ticks.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8ef85139553f..9d44d8d78259 100644
index 2d3f963fd6f1..5f41ead019b1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1034,6 +1034,22 @@ config NR_CPUS
@@ -1006,6 +1006,22 @@ config NR_CPUS
config SCHED_SMT
def_bool y if SMP
@ -110,7 +110,7 @@ index 8ef85139553f..9d44d8d78259 100644
def_bool y
prompt "Multi-core scheduler support"
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b66e81c06a57..a294f8f5fd75 100644
index 737ff3b9c2c0..b5bc5a1b6de7 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -28,8 +28,8 @@ struct cs_dbs_tuners {
@ -125,7 +125,7 @@ index b66e81c06a57..a294f8f5fd75 100644
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index dced033875bf..d2cd03766b09 100644
index 82a4d37ddecb..1130e0f5db72 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -18,7 +18,7 @@
@ -147,10 +147,10 @@ index dced033875bf..d2cd03766b09 100644
* proportional to load.
*/
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ebea9501afb8..51c9346a69fe 100644
index eb2255e95f62..62b8cedbccb6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -477,7 +477,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
seq_puts(m, "0 0 0\n");
else
seq_printf(m, "%llu %llu %lu\n",
@ -176,10 +176,10 @@ index 2c620d7ac432..1a7987c40c80 100644
/* Attach to the init_task data structure for proper alignment */
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1b6d31da7cbc..dea181bdb1dd 100644
index fed6ba96c527..f03a5ee419a1 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -171,7 +171,7 @@ static inline u64 get_jiffies_64(void)
@@ -169,7 +169,7 @@ static inline u64 get_jiffies_64(void)
* Have the 32 bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
@ -189,7 +189,7 @@ index 1b6d31da7cbc..dea181bdb1dd 100644
/*
* Change timeval to jiffies, trying to avoid the
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 67a1d86981a9..8268cad4b0a2 100644
index 4418f5cb8324..2b51afac5b06 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -31,6 +31,7 @@
@ -200,7 +200,7 @@ index 67a1d86981a9..8268cad4b0a2 100644
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -644,9 +645,13 @@ struct task_struct {
@@ -652,9 +653,13 @@ struct task_struct {
unsigned int flags;
unsigned int ptrace;
@ -215,7 +215,7 @@ index 67a1d86981a9..8268cad4b0a2 100644
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* Current CPU: */
unsigned int cpu;
@@ -655,6 +660,7 @@ struct task_struct {
@@ -663,6 +668,7 @@ struct task_struct {
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@ -223,7 +223,7 @@ index 67a1d86981a9..8268cad4b0a2 100644
/*
* recent_used_cpu is initially set as the last CPU used by a task
* that wakes affine another task. Waker/wakee relationships can
@@ -663,6 +669,7 @@ struct task_struct {
@@ -671,6 +677,7 @@ struct task_struct {
* used CPU that may be idle.
*/
int recent_used_cpu;
@ -231,7 +231,7 @@ index 67a1d86981a9..8268cad4b0a2 100644
int wake_cpu;
#endif
int on_rq;
@@ -672,13 +679,27 @@ struct task_struct {
@@ -680,13 +687,27 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
@ -260,7 +260,7 @@ index 67a1d86981a9..8268cad4b0a2 100644
#ifdef CONFIG_UCLAMP_TASK
/* Clamp values requested for a scheduling entity */
@@ -1283,6 +1304,29 @@ struct task_struct {
@@ -1306,6 +1327,29 @@ struct task_struct {
*/
};
@ -364,10 +364,10 @@ index e5af028c08b4..a96012e6f15e 100644
}
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 4b1c3b664f51..f186b8119ad6 100644
index 38359071236a..90328ccd527f 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -99,7 +99,7 @@ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
@@ -106,7 +106,7 @@ extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
extern void free_task(struct task_struct *tsk);
/* sched_exec is called by processes performing an exec */
@ -560,10 +560,10 @@ index 000000000000..713fedd8034f
+}
+#endif /* _LINUX_SKIP_LIST_H */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 25b4fa00bad1..fc0aabdce15f 100644
index 3bac0a8ceab2..d6d384ddb57d 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -84,7 +84,10 @@ struct clone_args {
@@ -115,7 +115,10 @@ struct clone_args {
#define SCHED_FIFO 1
#define SCHED_RR 2
#define SCHED_BATCH 3
@ -576,10 +576,10 @@ index 25b4fa00bad1..fc0aabdce15f 100644
#define SCHED_DEADLINE 6
diff --git a/init/Kconfig b/init/Kconfig
index b4daad2bac23..ee3b9957cf3b 100644
index 74a5ac65644f..e4fd406b58dd 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -73,6 +73,21 @@ config THREAD_INFO_IN_TASK
@@ -61,6 +61,21 @@ config THREAD_INFO_IN_TASK
menu "General setup"
@ -601,7 +601,7 @@ index b4daad2bac23..ee3b9957cf3b 100644
config BROKEN
bool
@@ -802,6 +817,7 @@ config NUMA_BALANCING
@@ -777,6 +792,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION
@ -609,7 +609,7 @@ index b4daad2bac23..ee3b9957cf3b 100644
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
@@ -903,7 +919,7 @@ menuconfig CGROUP_SCHED
@@ -878,7 +894,7 @@ menuconfig CGROUP_SCHED
bandwidth allocation to such task groups. It uses cgroups to group
tasks.
@ -618,7 +618,7 @@ index b4daad2bac23..ee3b9957cf3b 100644
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
@@ -1032,6 +1048,7 @@ config CGROUP_DEVICE
@@ -1007,6 +1023,7 @@ config CGROUP_DEVICE
config CGROUP_CPUACCT
bool "Simple CPU accounting controller"
@ -626,7 +626,7 @@ index b4daad2bac23..ee3b9957cf3b 100644
help
Provides a simple controller for monitoring the
total CPU consumed by the tasks in a cgroup.
@@ -1150,6 +1167,7 @@ config CHECKPOINT_RESTORE
@@ -1134,6 +1151,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
@ -635,10 +635,10 @@ index b4daad2bac23..ee3b9957cf3b 100644
select CGROUP_SCHED
select FAIR_GROUP_SCHED
diff --git a/init/init_task.c b/init/init_task.c
index 9e5cbe5eab7b..89787e2feb60 100644
index bd403ed3e418..162d3deddd45 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -58,6 +58,126 @@ struct task_struct init_task
@@ -59,6 +59,126 @@ struct task_struct init_task
__init_task_data
#endif
= {
@ -765,7 +765,7 @@ index 9e5cbe5eab7b..89787e2feb60 100644
#ifdef CONFIG_THREAD_INFO_IN_TASK
.thread_info = INIT_THREAD_INFO(init_task),
.stack_refcount = REFCOUNT_INIT(1),
@@ -181,6 +301,7 @@ struct task_struct init_task
@@ -182,6 +302,7 @@ struct task_struct init_task
#ifdef CONFIG_SECURITY
.security = NULL,
#endif
@ -774,10 +774,10 @@ index 9e5cbe5eab7b..89787e2feb60 100644
EXPORT_SYMBOL(init_task);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index c87ee6412b36..4045c8532027 100644
index 729d3a5c772e..10a7c52b90d5 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -632,7 +632,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
@@ -636,7 +636,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret;
}
@ -786,7 +786,7 @@ index c87ee6412b36..4045c8532027 100644
/*
* Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks?
@@ -1007,7 +1007,7 @@ static void rebuild_sched_domains_locked(void)
@@ -1009,7 +1009,7 @@ static void rebuild_sched_domains_locked(void)
/* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr);
}
@ -809,10 +809,10 @@ index 27725754ac99..769d773c7182 100644
d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c
index a46a50d67002..58043176b285 100644
index ce2a75bc0ade..f0f864bc1ab9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -131,7 +131,7 @@ static void __exit_signal(struct task_struct *tsk)
@@ -122,7 +122,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk);
}
@ -821,7 +821,7 @@ index a46a50d67002..58043176b285 100644
sizeof(unsigned long long));
/*
@@ -152,7 +152,7 @@ static void __exit_signal(struct task_struct *tsk)
@@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
@ -831,7 +831,7 @@ index a46a50d67002..58043176b285 100644
__unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index cdf318d86dd6..baa525865d5c 100644
index f6310f848f34..b5de980c7d4e 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -306,7 +306,11 @@ static bool klp_try_switch_task(struct task_struct *task)
@ -847,7 +847,7 @@ index cdf318d86dd6..baa525865d5c 100644
"%s: %s:%d is running\n", __func__, task->comm,
task->pid);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2874bf556162..fad8a279fdfa 100644
index c9f090d64f00..063d15a1ab8b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -229,7 +229,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
@ -910,10 +910,10 @@ index 21fb5a5662b5..8ebe4e33fb5f 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_MEMBARRIER) += membarrier.o
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 86800b4d5453..07f278dc3137 100644
index 7fbaee24c824..28377ad56248 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -185,6 +185,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
@@ -183,6 +183,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
@ -921,7 +921,7 @@ index 86800b4d5453..07f278dc3137 100644
/*
* This function computes an effective utilization for the given CPU, to be
* used for frequency selection given the linear relation: f = u * f_max.
@@ -302,6 +303,13 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
@@ -300,6 +301,13 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
}
@ -935,7 +935,7 @@ index 86800b4d5453..07f278dc3137 100644
/**
* sugov_iowait_reset() - Reset the IO boost status of a CPU.
@@ -445,7 +453,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
@@ -443,7 +451,9 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{
@ -945,7 +945,7 @@ index 86800b4d5453..07f278dc3137 100644
sg_policy->limits_changed = true;
}
@@ -688,6 +698,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
@@ -686,6 +696,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
}
ret = sched_setattr_nocheck(thread, &attr);
@ -953,15 +953,15 @@ index 86800b4d5453..07f278dc3137 100644
if (ret) {
kthread_stop(thread);
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
@@ -918,6 +929,7 @@ static int __init sugov_register(void)
fs_initcall(sugov_register);
@@ -916,6 +927,7 @@ static int __init sugov_register(void)
core_initcall(sugov_register);
#ifdef CONFIG_ENERGY_MODEL
+#ifndef CONFIG_SCHED_PDS
extern bool sched_energy_update;
extern struct mutex sched_energy_mutex;
@@ -948,4 +960,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
@@ -946,4 +958,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
}
}
@ -973,7 +973,7 @@ index 86800b4d5453..07f278dc3137 100644
+#endif
#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 46ed4e1383e2..0a9548ee995c 100644
index ff9435dee1df..1377ea3d1b76 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -122,7 +122,12 @@ void account_user_time(struct task_struct *p, u64 cputime)
@ -1019,7 +1019,7 @@ index 46ed4e1383e2..0a9548ee995c 100644
task_rq_unlock(rq, t, &rf);
return ns;
@@ -663,7 +672,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
@@ -658,7 +667,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime = {
@ -1029,10 +1029,10 @@ index 46ed4e1383e2..0a9548ee995c 100644
task_cputime(p, &cputime.utime, &cputime.stime);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index f65ef1e2f204..454fa7e460e3 100644
index b743bf38f08f..16e5754af1cf 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -355,6 +355,7 @@ void cpu_startup_entry(enum cpuhp_state state)
@@ -361,6 +361,7 @@ void cpu_startup_entry(enum cpuhp_state state)
do_idle();
}
@ -1040,17 +1040,17 @@ index f65ef1e2f204..454fa7e460e3 100644
/*
* idle-task scheduling class.
*/
@@ -479,3 +480,4 @@ const struct sched_class idle_sched_class = {
@@ -481,3 +482,4 @@ const struct sched_class idle_sched_class = {
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
};
+#endif
diff --git a/kernel/sched/pds.c b/kernel/sched/pds.c
new file mode 100644
index 000000000000..aefbd9cebcfb
index 000000000000..02d7d5a67c77
--- /dev/null
+++ b/kernel/sched/pds.c
@@ -0,0 +1,6558 @@
@@ -0,0 +1,6554 @@
+/*
+ * kernel/sched/pds.c, was kernel/sched.c
+ *
@ -2070,7 +2070,6 @@ index 000000000000..aefbd9cebcfb
+
+ raw_spin_lock(&rq->lock);
+ __hrtick_restart(rq);
+ rq->hrtick_csd_pending = 0;
+ raw_spin_unlock(&rq->lock);
+}
+
@ -2094,12 +2093,10 @@ index 000000000000..aefbd9cebcfb
+
+ hrtimer_set_expires(timer, time);
+
+ if (rq == this_rq()) {
+ if (rq == this_rq())
+ __hrtick_restart(rq);
+ } else if (!rq->hrtick_csd_pending) {
+ else
+ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
+ rq->hrtick_csd_pending = 1;
+ }
+}
+
+#else
@ -2123,8 +2120,6 @@ index 000000000000..aefbd9cebcfb
+static void hrtick_rq_init(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+ rq->hrtick_csd_pending = 0;
+
+ rq->hrtick_csd.flags = 0;
+ rq->hrtick_csd.func = __hrtick_start;
+ rq->hrtick_csd.info = rq;
@ -3946,6 +3941,7 @@ index 000000000000..aefbd9cebcfb
+ int cpu __maybe_unused = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+
+ arch_scale_freq_tick();
+ sched_clock_tick();
+
+ raw_spin_lock(&rq->lock);
@ -4021,11 +4017,9 @@ index 000000000000..aefbd9cebcfb
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ curr = rq->curr;
+
+ if (cpu_is_offline(cpu))
+ goto out_unlock;
+
+ curr = rq->curr;
+ update_rq_clock(rq);
+ if (!is_idle_task(curr)) {
+ /*
@ -4547,6 +4541,8 @@ index 000000000000..aefbd9cebcfb
+ ++*switch_count;
+ rq->nr_switches++;
+
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+
+ trace_sched_switch(preempt, prev, next);
+
+ /* Also unlocks the rq: */
@ -7611,10 +7607,10 @@ index 000000000000..aefbd9cebcfb
+#undef CREATE_TRACE_POINTS
diff --git a/kernel/sched/pds_sched.h b/kernel/sched/pds_sched.h
new file mode 100644
index 000000000000..b3926a8425b2
index 000000000000..6c3361f06087
--- /dev/null
+++ b/kernel/sched/pds_sched.h
@@ -0,0 +1,508 @@
@@ -0,0 +1,518 @@
+#ifndef PDS_SCHED_H
+#define PDS_SCHED_H
+
@ -7749,7 +7745,6 @@ index 000000000000..b3926a8425b2
+
+#ifdef CONFIG_SCHED_HRTICK
+#ifdef CONFIG_SMP
+ int hrtick_csd_pending;
+ call_single_data_t hrtick_csd;
+#endif
+ struct hrtimer hrtick_timer;
@ -7815,6 +7810,13 @@ index 000000000000..b3926a8425b2
+
+#endif /* CONFIG_SMP */
+
+#ifndef arch_scale_freq_tick
+static __always_inline
+void arch_scale_freq_tick(void)
+{
+}
+#endif
+
+#ifndef arch_scale_freq_capacity
+static __always_inline
+unsigned long arch_scale_freq_capacity(int cpu)
@ -8122,12 +8124,16 @@ index 000000000000..b3926a8425b2
+ return nr_cpu_ids;
+}
+#endif
+
+void swake_up_all_locked(struct swait_queue_head *q);
+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+#endif /* PDS_SCHED_H */
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index a96db50d40e0..d3d12baa9036 100644
index b647d04d9c8b..05b6cfd91842 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -236,6 +236,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
@@ -250,6 +250,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
}
@ -8135,16 +8141,16 @@ index a96db50d40e0..d3d12baa9036 100644
/*
* sched_entity:
*
@@ -352,6 +353,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
@@ -367,6 +368,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
return 0;
}
+#endif
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
#ifdef CONFIG_SCHED_THERMAL_PRESSURE
/*
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index afff644da065..26d6b47fc156 100644
index eb034d9f024d..a074572f2976 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -1,11 +1,13 @@
@ -8159,9 +8165,9 @@ index afff644da065..26d6b47fc156 100644
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+#endif
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
int update_irq_load_avg(struct rq *rq, u64 running);
@@ -17,6 +19,7 @@ update_irq_load_avg(struct rq *rq, u64 running)
#ifdef CONFIG_SCHED_THERMAL_PRESSURE
int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
@@ -37,6 +39,7 @@ update_irq_load_avg(struct rq *rq, u64 running)
}
#endif
@ -8169,7 +8175,7 @@ index afff644da065..26d6b47fc156 100644
/*
* When a task is dequeued, its estimated utilization should not be update if
* its util_avg has not been updated at least once.
@@ -137,9 +140,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
@@ -157,9 +160,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
return rq_clock_pelt(rq_of(cfs_rq));
}
#endif
@ -8181,7 +8187,7 @@ index afff644da065..26d6b47fc156 100644
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
@@ -157,6 +162,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
@@ -188,6 +193,7 @@ static inline u64 thermal_load_avg(struct rq *rq)
{
return 0;
}
@ -8190,7 +8196,7 @@ index afff644da065..26d6b47fc156 100644
static inline int
update_irq_load_avg(struct rq *rq, u64 running)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c8870c5bd7df..4fc9f2ead4d2 100644
index db3a57675ccf..5a8060bd2343 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,10 @@
@ -8204,14 +8210,12 @@ index c8870c5bd7df..4fc9f2ead4d2 100644
#include <linux/sched.h>
#include <linux/sched/autogroup.h>
@@ -2496,6 +2500,7 @@ static inline void membarrier_switch_mm(struct rq *rq,
return true;
}
#endif
+#endif /* !CONFIG_SCHED_PDS */
@@ -2546,3 +2550,5 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
void swake_up_all_locked(struct swait_queue_head *q);
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+#endif /* !CONFIG_SCHED_PDS */
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 750fb3c67eed..45bd43942575 100644
--- a/kernel/sched/stats.c
@ -8244,7 +8248,7 @@ index 750fb3c67eed..45bd43942575 100644
}
return 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b6f2f35d0bcf..204933ebc95a 100644
index 8a176d8727a3..b9dde576b576 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -130,8 +130,12 @@ static int __maybe_unused four = 4;
@ -8262,7 +8266,7 @@ index b6f2f35d0bcf..204933ebc95a 100644
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -300,7 +304,7 @@ static struct ctl_table sysctl_base_table[] = {
@@ -288,7 +292,7 @@ static struct ctl_table sysctl_base_table[] = {
{ }
};
@ -8271,7 +8275,7 @@ index b6f2f35d0bcf..204933ebc95a 100644
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
@@ -317,6 +321,7 @@ static int max_extfrag_threshold = 1000;
@@ -305,6 +309,7 @@ static int max_extfrag_threshold = 1000;
#endif
static struct ctl_table kern_table[] = {
@ -8279,7 +8283,7 @@ index b6f2f35d0bcf..204933ebc95a 100644
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
@@ -498,6 +503,7 @@ static struct ctl_table kern_table[] = {
@@ -486,6 +491,7 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
@ -8287,7 +8291,7 @@ index b6f2f35d0bcf..204933ebc95a 100644
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
@@ -1070,6 +1076,26 @@ static struct ctl_table kern_table[] = {
@@ -1049,6 +1055,26 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
@ -8315,10 +8319,10 @@ index b6f2f35d0bcf..204933ebc95a 100644
{
.procname = "spin_retry",
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 42d512fcfda2..71af3cd30ccc 100644
index 2fd3b3fa68bf..6f3b08afdd4c 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -226,7 +226,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
@@ -236,7 +236,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
u64 stime, utime;
task_cputime(p, &utime, &stime);
@ -8327,7 +8331,7 @@ index 42d512fcfda2..71af3cd30ccc 100644
}
static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
@@ -796,6 +796,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
@@ -806,6 +806,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
}
}
@ -8335,7 +8339,7 @@ index 42d512fcfda2..71af3cd30ccc 100644
static inline void check_dl_overrun(struct task_struct *tsk)
{
if (tsk->dl.dl_overrun) {
@@ -803,6 +804,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
@@ -813,6 +814,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}
@ -8343,7 +8347,7 @@ index 42d512fcfda2..71af3cd30ccc 100644
static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
{
@@ -830,8 +832,10 @@ static void check_thread_timers(struct task_struct *tsk,
@@ -840,8 +842,10 @@ static void check_thread_timers(struct task_struct *tsk,
u64 samples[CPUCLOCK_MAX];
unsigned long soft;
@ -8354,7 +8358,7 @@ index 42d512fcfda2..71af3cd30ccc 100644
if (expiry_cache_is_inactive(pct))
return;
@@ -845,7 +849,7 @@ static void check_thread_timers(struct task_struct *tsk,
@@ -855,7 +859,7 @@ static void check_thread_timers(struct task_struct *tsk,
soft = task_rlimit(tsk, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
/* Task RT timeout is accounted in jiffies. RTTIME is usec */
@ -8363,7 +8367,7 @@ index 42d512fcfda2..71af3cd30ccc 100644
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
/* At the hard limit, send SIGKILL. No further action. */
@@ -1099,8 +1103,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
@@ -1091,8 +1095,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
return true;
}
@ -8375,7 +8379,7 @@ index 42d512fcfda2..71af3cd30ccc 100644
return false;
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 69ee8ef12cee..3eaa2a21caa4 100644
index b5e3496cf803..0816db0b9c16 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1048,10 +1048,15 @@ static int trace_wakeup_test_thread(void *data)