X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Fkernel%2Fsched%2Fcputime.c;fp=kernel%2Fkernel%2Fsched%2Fcputime.c;h=558b98af241daa78d0f1a2cdf27bb8331c291e31;hb=52f993b8e89487ec9ee15a7fb4979e0f09a45b27;hp=0f75a38cff96b722416871f5b36e684c2da7bbdf;hpb=c189ccac5702322ed843fe17057035b7222a59b6;p=kvmfornfv.git diff --git a/kernel/kernel/sched/cputime.c b/kernel/kernel/sched/cputime.c index 0f75a38cf..558b98af2 100644 --- a/kernel/kernel/sched/cputime.c +++ b/kernel/kernel/sched/cputime.c @@ -259,21 +259,21 @@ static __always_inline bool steal_account_process_tick(void) #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { u64 steal; - cputime_t steal_ct; + unsigned long steal_jiffies; steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; /* - * cputime_t may be less precise than nsecs (eg: if it's - * based on jiffies). Lets cast the result to cputime + * steal is in nsecs but our caller is expecting steal + * time in jiffies. Lets cast the result to jiffies * granularity and account the rest on the next rounds. */ - steal_ct = nsecs_to_cputime(steal); - this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); + steal_jiffies = nsecs_to_jiffies(steal); + this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies); - account_steal_time(steal_ct); - return steal_ct; + account_steal_time(jiffies_to_cputime(steal_jiffies)); + return steal_jiffies; } #endif return false; @@ -600,19 +600,25 @@ static void cputime_adjust(struct task_cputime *curr, stime = curr->stime; utime = curr->utime; - if (utime == 0) { - stime = rtime; + /* + * If either stime or both stime and utime are 0, assume all runtime is + * userspace. Once a task gets some ticks, the monotonicy code at + * 'update' will ensure things converge to the observed ratio. + */ + if (stime == 0) { + utime = rtime; goto update; } - if (stime == 0) { - utime = rtime; + if (utime == 0) { + stime = rtime; goto update; } stime = scale_stime((__force u64)stime, (__force u64)rtime, (__force u64)(stime + utime)); +update: /* * Make sure stime doesn't go backwards; this preserves monotonicity * for utime because rtime is monotonic. @@ -635,7 +641,6 @@ static void cputime_adjust(struct task_cputime *curr, stime = rtime - utime; } -update: prev->stime = stime; prev->utime = utime; out: @@ -680,7 +685,7 @@ static cputime_t get_vtime_delta(struct task_struct *tsk) { unsigned long long delta = vtime_delta(tsk); - WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); + WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); tsk->vtime_snap += delta; /* CHECKME: always safe to convert nsecs to cputime? */ @@ -696,45 +701,37 @@ static void __vtime_account_system(struct task_struct *tsk) void vtime_account_system(struct task_struct *tsk) { - raw_spin_lock(&tsk->vtime_lock); - write_seqcount_begin(&tsk->vtime_seq); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); - write_seqcount_end(&tsk->vtime_seq); - raw_spin_unlock(&tsk->vtime_lock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_gen_account_irq_exit(struct task_struct *tsk) { - raw_spin_lock(&tsk->vtime_lock); - write_seqcount_begin(&tsk->vtime_seq); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); if (context_tracking_in_user()) tsk->vtime_snap_whence = VTIME_USER; - write_seqcount_end(&tsk->vtime_seq); - raw_spin_unlock(&tsk->vtime_lock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_account_user(struct task_struct *tsk) { cputime_t delta_cpu; - raw_spin_lock(&tsk->vtime_lock); - write_seqcount_begin(&tsk->vtime_seq); + write_seqcount_begin(&tsk->vtime_seqcount); delta_cpu = get_vtime_delta(tsk); tsk->vtime_snap_whence = VTIME_SYS; account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); - write_seqcount_end(&tsk->vtime_seq); - raw_spin_unlock(&tsk->vtime_lock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_user_enter(struct task_struct *tsk) { - raw_spin_lock(&tsk->vtime_lock); - write_seqcount_begin(&tsk->vtime_seq); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); tsk->vtime_snap_whence = VTIME_USER; - write_seqcount_end(&tsk->vtime_seq); - raw_spin_unlock(&tsk->vtime_lock); + write_seqcount_end(&tsk->vtime_seqcount); } void vtime_guest_enter(struct task_struct *tsk) @@ -746,23 +743,19 @@ void vtime_guest_enter(struct task_struct *tsk) * synchronization against the reader (task_gtime()) * that can thus safely catch up with a tickless delta. */ - raw_spin_lock(&tsk->vtime_lock); - write_seqcount_begin(&tsk->vtime_seq); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); current->flags |= PF_VCPU; - write_seqcount_end(&tsk->vtime_seq); - raw_spin_unlock(&tsk->vtime_lock); + write_seqcount_end(&tsk->vtime_seqcount); } EXPORT_SYMBOL_GPL(vtime_guest_enter); void vtime_guest_exit(struct task_struct *tsk) { - raw_spin_lock(&tsk->vtime_lock); - write_seqcount_begin(&tsk->vtime_seq); + write_seqcount_begin(&tsk->vtime_seqcount); __vtime_account_system(tsk); current->flags &= ~PF_VCPU; - write_seqcount_end(&tsk->vtime_seq); - raw_spin_unlock(&tsk->vtime_lock); + write_seqcount_end(&tsk->vtime_seqcount); } EXPORT_SYMBOL_GPL(vtime_guest_exit); @@ -775,30 +768,26 @@ void vtime_account_idle(struct task_struct *tsk) void arch_vtime_task_switch(struct task_struct *prev) { - raw_spin_lock(&prev->vtime_lock); - write_seqcount_begin(&prev->vtime_seq); - prev->vtime_snap_whence = VTIME_SLEEPING; - write_seqcount_end(&prev->vtime_seq); - raw_spin_unlock(&prev->vtime_lock); + write_seqcount_begin(&prev->vtime_seqcount); + prev->vtime_snap_whence = VTIME_INACTIVE; + write_seqcount_end(&prev->vtime_seqcount); - raw_spin_lock(¤t->vtime_lock); - write_seqcount_begin(¤t->vtime_seq); + write_seqcount_begin(¤t->vtime_seqcount); current->vtime_snap_whence = VTIME_SYS; current->vtime_snap = sched_clock_cpu(smp_processor_id()); - write_seqcount_end(¤t->vtime_seq); - raw_spin_unlock(¤t->vtime_lock); + write_seqcount_end(¤t->vtime_seqcount); } void vtime_init_idle(struct task_struct *t, int cpu) { unsigned long flags; - raw_spin_lock_irqsave(&t->vtime_lock, flags); - write_seqcount_begin(&t->vtime_seq); + local_irq_save(flags); + write_seqcount_begin(&t->vtime_seqcount); t->vtime_snap_whence = VTIME_SYS; t->vtime_snap = sched_clock_cpu(cpu); - write_seqcount_end(&t->vtime_seq); - raw_spin_unlock_irqrestore(&t->vtime_lock, flags); + write_seqcount_end(&t->vtime_seqcount); + local_irq_restore(flags); } cputime_t task_gtime(struct task_struct *t) @@ -810,13 +799,13 @@ cputime_t task_gtime(struct task_struct *t) return t->gtime; do { - seq = read_seqcount_begin(&t->vtime_seq); + seq = read_seqcount_begin(&t->vtime_seqcount); gtime = t->gtime; if (t->flags & PF_VCPU) gtime += vtime_delta(t); - } while (read_seqcount_retry(&t->vtime_seq, seq)); + } while (read_seqcount_retry(&t->vtime_seqcount, seq)); return gtime; } @@ -839,7 +828,7 @@ fetch_task_cputime(struct task_struct *t, *udelta = 0; *sdelta = 0; - seq = read_seqcount_begin(&t->vtime_seq); + seq = read_seqcount_begin(&t->vtime_seqcount); if (u_dst) *u_dst = *u_src; @@ -847,7 +836,7 @@ fetch_task_cputime(struct task_struct *t, *s_dst = *s_src; /* Task is sleeping, nothing to add */ - if (t->vtime_snap_whence == VTIME_SLEEPING || + if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t)) continue; @@ -863,7 +852,7 @@ fetch_task_cputime(struct task_struct *t, if (t->vtime_snap_whence == VTIME_SYS) *sdelta = delta; } - } while (read_seqcount_retry(&t->vtime_seq, seq)); + } while (read_seqcount_retry(&t->vtime_seqcount, seq)); }