pelt.h 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. #ifdef CONFIG_SMP
  2. int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
  3. int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
  4. int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
  5. int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
  6. int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
  7. #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
  8. int update_irq_load_avg(struct rq *rq, u64 running);
  9. #else
  10. static inline int
  11. update_irq_load_avg(struct rq *rq, u64 running)
  12. {
  13. return 0;
  14. }
  15. #endif
  16. /*
  17. * When a task is dequeued, its estimated utilization should not be update if
  18. * its util_avg has not been updated at least once.
  19. * This flag is used to synchronize util_avg updates with util_est updates.
  20. * We map this information into the LSB bit of the utilization saved at
  21. * dequeue time (i.e. util_est.dequeued).
  22. */
  23. #define UTIL_AVG_UNCHANGED 0x1
  24. static inline void cfs_se_util_change(struct sched_avg *avg)
  25. {
  26. unsigned int enqueued;
  27. if (!sched_feat(UTIL_EST))
  28. return;
  29. /* Avoid store if the flag has been already set */
  30. enqueued = avg->util_est.enqueued;
  31. if (!(enqueued & UTIL_AVG_UNCHANGED))
  32. return;
  33. /* Reset flag to report util_avg has been updated */
  34. enqueued &= ~UTIL_AVG_UNCHANGED;
  35. WRITE_ONCE(avg->util_est.enqueued, enqueued);
  36. }
  37. #else
  38. static inline int
  39. update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  40. {
  41. return 0;
  42. }
  43. static inline int
  44. update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
  45. {
  46. return 0;
  47. }
  48. static inline int
  49. update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
  50. {
  51. return 0;
  52. }
  53. static inline int
  54. update_irq_load_avg(struct rq *rq, u64 running)
  55. {
  56. return 0;
  57. }
  58. #endif