intel_turbo_max_3.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. /*
  2. * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
  3. * Copyright (c) 2017, Intel Corporation.
  4. * All rights reserved.
  5. *
  6. * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/kernel.h>
  20. #include <linux/init.h>
  21. #include <linux/topology.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/cpuhotplug.h>
  24. #include <linux/cpufeature.h>
  25. #include <asm/cpu_device_id.h>
  26. #include <asm/intel-family.h>
  27. #define MSR_OC_MAILBOX 0x150
  28. #define MSR_OC_MAILBOX_CMD_OFFSET 32
  29. #define MSR_OC_MAILBOX_RSP_OFFSET 32
  30. #define MSR_OC_MAILBOX_BUSY_BIT 63
  31. #define OC_MAILBOX_FC_CONTROL_CMD 0x1C
  32. /*
  33. * Typical latency to get mail box response is ~3us, It takes +3 us to
  34. * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
  35. * system. So for most of the time, the first mailbox read should have the
  36. * response, but to avoid some boundary cases retry twice.
  37. */
  38. #define OC_MAILBOX_RETRY_COUNT 2
  39. static int get_oc_core_priority(unsigned int cpu)
  40. {
  41. u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
  42. int ret, i;
  43. /* Issue favored core read command */
  44. value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
  45. /* Set the busy bit to indicate OS is trying to issue command */
  46. value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
  47. ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
  48. if (ret) {
  49. pr_debug("cpu %d OC mailbox write failed\n", cpu);
  50. return ret;
  51. }
  52. for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
  53. ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
  54. if (ret) {
  55. pr_debug("cpu %d OC mailbox read failed\n", cpu);
  56. break;
  57. }
  58. if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
  59. pr_debug("cpu %d OC mailbox still processing\n", cpu);
  60. ret = -EBUSY;
  61. continue;
  62. }
  63. if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
  64. pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
  65. ret = -ENXIO;
  66. break;
  67. }
  68. ret = value & 0xff;
  69. pr_debug("cpu %d max_ratio %d\n", cpu, ret);
  70. break;
  71. }
  72. return ret;
  73. }
  74. /*
  75. * The work item is needed to avoid CPU hotplug locking issues. The function
  76. * itmt_legacy_set_priority() is called from CPU online callback, so can't
  77. * call sched_set_itmt_support() from there as this function will aquire
  78. * hotplug locks in its path.
  79. */
  80. static void itmt_legacy_work_fn(struct work_struct *work)
  81. {
  82. sched_set_itmt_support();
  83. }
  84. static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
  85. static int itmt_legacy_cpu_online(unsigned int cpu)
  86. {
  87. static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
  88. int priority;
  89. priority = get_oc_core_priority(cpu);
  90. if (priority < 0)
  91. return 0;
  92. sched_set_itmt_core_prio(priority, cpu);
  93. /* Enable ITMT feature when a core with different priority is found */
  94. if (max_highest_perf <= min_highest_perf) {
  95. if (priority > max_highest_perf)
  96. max_highest_perf = priority;
  97. if (priority < min_highest_perf)
  98. min_highest_perf = priority;
  99. if (max_highest_perf > min_highest_perf)
  100. schedule_work(&sched_itmt_work);
  101. }
  102. return 0;
  103. }
  104. #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
  105. static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
  106. ICPU(INTEL_FAM6_BROADWELL_X),
  107. {}
  108. };
  109. static int __init itmt_legacy_init(void)
  110. {
  111. const struct x86_cpu_id *id;
  112. int ret;
  113. id = x86_match_cpu(itmt_legacy_cpu_ids);
  114. if (!id)
  115. return -ENODEV;
  116. if (boot_cpu_has(X86_FEATURE_HWP))
  117. return -ENODEV;
  118. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
  119. "platform/x86/turbo_max_3:online",
  120. itmt_legacy_cpu_online, NULL);
  121. if (ret < 0)
  122. return ret;
  123. return 0;
  124. }
  125. late_initcall(itmt_legacy_init)