diff --git a/include/linux/sched/bore.h b/include/linux/sched/bore.h index fbb73a64d082..9215c13a91a8 100644 --- a/include/linux/sched/bore.h +++ b/include/linux/sched/bore.h @@ -11,7 +11,7 @@ #define SCHED_BORE_AUTHOR "Masahito Suzuki" #define SCHED_BORE_PROGNAME "BORE CPU Scheduler modification" -#define SCHED_BORE_VERSION "6.6.2" +#define SCHED_BORE_VERSION "6.6.3" extern u8 __read_mostly sched_bore; DECLARE_STATIC_KEY_TRUE(sched_bore_key); diff --git a/kernel/sched/bore.c b/kernel/sched/bore.c index 759eee843ca3..c27a22cd63d6 100644 --- a/kernel/sched/bore.c +++ b/kernel/sched/bore.c @@ -148,8 +148,9 @@ static inline bool task_is_bore_eligible(struct task_struct *p) static inline u32 count_children_upto2(struct task_struct *p) { struct list_head *head = &p->children; - struct list_head *next = head->next; - return (next != head) + (next->next != head); + struct list_head *first = READ_ONCE(head->next); + struct list_head *second = READ_ONCE(first->next); + return (first != head) + (second != head); } static inline bool burst_cache_expired(struct bore_bc *bc, u64 now) { @@ -224,9 +225,13 @@ static u32 inherit_from_ancestor_hub(struct task_struct *parent, if (scan_count++ >= BURST_CACHE_SCAN_LIMIT) break; struct task_struct *descendant = direct_child; - while (count_children_upto2(descendant) == 1) - descendant = list_first_entry(&descendant->children, - struct task_struct, sibling); + while (count_children_upto2(descendant) == 1) { + struct task_struct *next_descendant = + list_first_or_null_rcu(&descendant->children, + struct task_struct, sibling); + if (!next_descendant) break; + descendant = next_descendant; + } if (!task_is_bore_eligible(descendant)) continue; count++; @@ -247,10 +252,11 @@ static u32 inherit_from_thread_group(struct task_struct *p, u64 now) { if (burst_cache_expired(bc, now)) { struct task_struct *sibling; - u32 count = 0, total = 0; + u32 count = 0, total = 0, scan_count = 0; for_each_thread(leader, sibling) { if (count >= BURST_CACHE_SAMPLE_LIMIT) break; + if (scan_count++ >= BURST_CACHE_SCAN_LIMIT) break; if (!task_is_bore_eligible(sibling)) continue; count++; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0fcdadad8d67..2310dd7c2adc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3880,11 +3880,17 @@ void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool curr = cfs_rq->curr == se; bool rel_vprot = false; u64 vprot; +#ifdef CONFIG_SCHED_BORE + s64 vlag_unscaled = 0; +#endif /* !CONFIG_SCHED_BORE */ if (se->on_rq) { /* commit outstanding execution time */ update_curr(cfs_rq); update_entity_lag(cfs_rq, se); +#ifdef CONFIG_SCHED_BORE + vlag_unscaled = se->vlag; +#endif /* !CONFIG_SCHED_BORE */ se->deadline -= se->vruntime; se->rel_deadline = 1; if (curr && protect_slice(se)) { @@ -3920,6 +3926,16 @@ void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, enqueue_load_avg(cfs_rq, se); if (se->on_rq) { +#ifdef CONFIG_SCHED_BORE + if (curr) { + se->vruntime += vlag_unscaled - se->vlag; + if (se->rel_deadline) { + se->deadline += se->vruntime; + se->rel_deadline = 0; + } + } + else +#endif /* !CONFIG_SCHED_BORE */ place_entity(cfs_rq, se, 0); if (rel_vprot) se->vprot = se->vruntime + vprot;