hwpstate_intel.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2018 Intel Corporation
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted providing that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
  16. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  17. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  18. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  19. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  21. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  22. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  23. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  24. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  25. * POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include <sys/types.h>
  28. #include <sys/param.h>
  29. #include <sys/sbuf.h>
  30. #include <sys/module.h>
  31. #include <sys/systm.h>
  32. #include <sys/errno.h>
  33. #include <sys/param.h>
  34. #include <sys/kernel.h>
  35. #include <sys/bus.h>
  36. #include <sys/cpu.h>
  37. #include <sys/smp.h>
  38. #include <sys/proc.h>
  39. #include <sys/sched.h>
  40. #include <machine/cpu.h>
  41. #include <machine/md_var.h>
  42. #include <machine/cputypes.h>
  43. #include <machine/specialreg.h>
  44. #include <contrib/dev/acpica/include/acpi.h>
  45. #include <dev/acpica/acpivar.h>
  46. #include <x86/cpufreq/hwpstate_intel_internal.h>
  47. #include "acpi_if.h"
  48. #include "cpufreq_if.h"
  49. extern uint64_t tsc_freq;
  50. static int intel_hwpstate_probe(device_t dev);
  51. static int intel_hwpstate_attach(device_t dev);
  52. static int intel_hwpstate_detach(device_t dev);
  53. static int intel_hwpstate_suspend(device_t dev);
  54. static int intel_hwpstate_resume(device_t dev);
  55. static int intel_hwpstate_get(device_t dev, struct cf_setting *cf);
  56. static int intel_hwpstate_type(device_t dev, int *type);
  57. static device_method_t intel_hwpstate_methods[] = {
  58. /* Device interface */
  59. DEVMETHOD(device_identify, intel_hwpstate_identify),
  60. DEVMETHOD(device_probe, intel_hwpstate_probe),
  61. DEVMETHOD(device_attach, intel_hwpstate_attach),
  62. DEVMETHOD(device_detach, intel_hwpstate_detach),
  63. DEVMETHOD(device_suspend, intel_hwpstate_suspend),
  64. DEVMETHOD(device_resume, intel_hwpstate_resume),
  65. /* cpufreq interface */
  66. DEVMETHOD(cpufreq_drv_get, intel_hwpstate_get),
  67. DEVMETHOD(cpufreq_drv_type, intel_hwpstate_type),
  68. DEVMETHOD_END
  69. };
  70. struct hwp_softc {
  71. device_t dev;
  72. bool hwp_notifications;
  73. bool hwp_activity_window;
  74. bool hwp_pref_ctrl;
  75. bool hwp_pkg_ctrl;
  76. bool hwp_pkg_ctrl_en;
  77. bool hwp_perf_bias;
  78. bool hwp_perf_bias_cached;
  79. uint64_t req; /* Cached copy of HWP_REQUEST */
  80. uint64_t hwp_energy_perf_bias; /* Cache PERF_BIAS */
  81. uint8_t high;
  82. uint8_t guaranteed;
  83. uint8_t efficient;
  84. uint8_t low;
  85. };
  86. static driver_t hwpstate_intel_driver = {
  87. "hwpstate_intel",
  88. intel_hwpstate_methods,
  89. sizeof(struct hwp_softc),
  90. };
  91. DRIVER_MODULE(hwpstate_intel, cpu, hwpstate_intel_driver, NULL, NULL);
  92. MODULE_VERSION(hwpstate_intel, 1);
  93. static bool hwpstate_pkg_ctrl_enable = true;
  94. SYSCTL_BOOL(_machdep, OID_AUTO, hwpstate_pkg_ctrl, CTLFLAG_RDTUN,
  95. &hwpstate_pkg_ctrl_enable, 0,
  96. "Set 1 (default) to enable package-level control, 0 to disable");
  97. static int
  98. intel_hwp_dump_sysctl_handler(SYSCTL_HANDLER_ARGS)
  99. {
  100. device_t dev;
  101. struct pcpu *pc;
  102. struct sbuf *sb;
  103. struct hwp_softc *sc;
  104. uint64_t data, data2;
  105. int ret;
  106. sc = (struct hwp_softc *)arg1;
  107. dev = sc->dev;
  108. pc = cpu_get_pcpu(dev);
  109. if (pc == NULL)
  110. return (ENXIO);
  111. sb = sbuf_new(NULL, NULL, 1024, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
  112. sbuf_putc(sb, '\n');
  113. thread_lock(curthread);
  114. sched_bind(curthread, pc->pc_cpuid);
  115. thread_unlock(curthread);
  116. rdmsr_safe(MSR_IA32_PM_ENABLE, &data);
  117. sbuf_printf(sb, "CPU%d: HWP %sabled\n", pc->pc_cpuid,
  118. ((data & 1) ? "En" : "Dis"));
  119. if (data == 0) {
  120. ret = 0;
  121. goto out;
  122. }
  123. rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &data);
  124. sbuf_printf(sb, "\tHighest Performance: %03ju\n", data & 0xff);
  125. sbuf_printf(sb, "\tGuaranteed Performance: %03ju\n", (data >> 8) & 0xff);
  126. sbuf_printf(sb, "\tEfficient Performance: %03ju\n", (data >> 16) & 0xff);
  127. sbuf_printf(sb, "\tLowest Performance: %03ju\n", (data >> 24) & 0xff);
  128. rdmsr_safe(MSR_IA32_HWP_REQUEST, &data);
  129. data2 = 0;
  130. if (sc->hwp_pkg_ctrl && (data & IA32_HWP_REQUEST_PACKAGE_CONTROL))
  131. rdmsr_safe(MSR_IA32_HWP_REQUEST_PKG, &data2);
  132. sbuf_putc(sb, '\n');
  133. #define pkg_print(x, name, offset) do { \
  134. if (!sc->hwp_pkg_ctrl || (data & x) != 0) \
  135. sbuf_printf(sb, "\t%s: %03u\n", name, \
  136. (unsigned)(data >> offset) & 0xff); \
  137. else \
  138. sbuf_printf(sb, "\t%s: %03u\n", name, \
  139. (unsigned)(data2 >> offset) & 0xff); \
  140. } while (0)
  141. pkg_print(IA32_HWP_REQUEST_EPP_VALID,
  142. "Requested Efficiency Performance Preference", 24);
  143. pkg_print(IA32_HWP_REQUEST_DESIRED_VALID,
  144. "Requested Desired Performance", 16);
  145. pkg_print(IA32_HWP_REQUEST_MAXIMUM_VALID,
  146. "Requested Maximum Performance", 8);
  147. pkg_print(IA32_HWP_REQUEST_MINIMUM_VALID,
  148. "Requested Minimum Performance", 0);
  149. #undef pkg_print
  150. sbuf_putc(sb, '\n');
  151. out:
  152. thread_lock(curthread);
  153. sched_unbind(curthread);
  154. thread_unlock(curthread);
  155. ret = sbuf_finish(sb);
  156. if (ret == 0)
  157. ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
  158. sbuf_delete(sb);
  159. return (ret);
  160. }
  161. static inline int
  162. percent_to_raw(int x)
  163. {
  164. MPASS(x <= 100 && x >= 0);
  165. return (0xff * x / 100);
  166. }
  167. /*
  168. * Given x * 10 in [0, 1000], round to the integer nearest x.
  169. *
  170. * This allows round-tripping nice human readable numbers through this
  171. * interface. Otherwise, user-provided percentages such as 25, 50, 75 get
  172. * rounded down to 24, 49, and 74, which is a bit ugly.
  173. */
  174. static inline int
  175. round10(int xtimes10)
  176. {
  177. return ((xtimes10 + 5) / 10);
  178. }
  179. static inline int
  180. raw_to_percent(int x)
  181. {
  182. MPASS(x <= 0xff && x >= 0);
  183. return (round10(x * 1000 / 0xff));
  184. }
  185. /* Range of MSR_IA32_ENERGY_PERF_BIAS is more limited: 0-0xf. */
  186. static inline int
  187. percent_to_raw_perf_bias(int x)
  188. {
  189. /*
  190. * Round up so that raw values present as nice round human numbers and
  191. * also round-trip to the same raw value.
  192. */
  193. MPASS(x <= 100 && x >= 0);
  194. return (((0xf * x) + 50) / 100);
  195. }
  196. static inline int
  197. raw_to_percent_perf_bias(int x)
  198. {
  199. /* Rounding to nice human numbers despite a step interval of 6.67%. */
  200. MPASS(x <= 0xf && x >= 0);
  201. return (((x * 20) / 0xf) * 5);
  202. }
  203. static int
  204. sysctl_epp_select(SYSCTL_HANDLER_ARGS)
  205. {
  206. struct hwp_softc *sc;
  207. device_t dev;
  208. struct pcpu *pc;
  209. uint64_t epb;
  210. uint32_t val;
  211. int ret;
  212. dev = oidp->oid_arg1;
  213. sc = device_get_softc(dev);
  214. if (!sc->hwp_pref_ctrl && !sc->hwp_perf_bias)
  215. return (ENODEV);
  216. pc = cpu_get_pcpu(dev);
  217. if (pc == NULL)
  218. return (ENXIO);
  219. thread_lock(curthread);
  220. sched_bind(curthread, pc->pc_cpuid);
  221. thread_unlock(curthread);
  222. if (sc->hwp_pref_ctrl) {
  223. val = (sc->req & IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE) >> 24;
  224. val = raw_to_percent(val);
  225. } else {
  226. /*
  227. * If cpuid indicates EPP is not supported, the HWP controller
  228. * uses MSR_IA32_ENERGY_PERF_BIAS instead (Intel SDM §14.4.4).
  229. * This register is per-core (but not HT).
  230. */
  231. if (!sc->hwp_perf_bias_cached) {
  232. ret = rdmsr_safe(MSR_IA32_ENERGY_PERF_BIAS, &epb);
  233. if (ret)
  234. goto out;
  235. sc->hwp_energy_perf_bias = epb;
  236. sc->hwp_perf_bias_cached = true;
  237. }
  238. val = sc->hwp_energy_perf_bias &
  239. IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK;
  240. val = raw_to_percent_perf_bias(val);
  241. }
  242. MPASS(val >= 0 && val <= 100);
  243. ret = sysctl_handle_int(oidp, &val, 0, req);
  244. if (ret || req->newptr == NULL)
  245. goto out;
  246. if (val > 100) {
  247. ret = EINVAL;
  248. goto out;
  249. }
  250. if (sc->hwp_pref_ctrl) {
  251. val = percent_to_raw(val);
  252. sc->req =
  253. ((sc->req & ~IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE)
  254. | (val << 24u));
  255. if (sc->hwp_pkg_ctrl_en)
  256. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
  257. else
  258. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
  259. } else {
  260. val = percent_to_raw_perf_bias(val);
  261. MPASS((val & ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) == 0);
  262. sc->hwp_energy_perf_bias =
  263. ((sc->hwp_energy_perf_bias &
  264. ~IA32_ENERGY_PERF_BIAS_POLICY_HINT_MASK) | val);
  265. ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
  266. sc->hwp_energy_perf_bias);
  267. }
  268. out:
  269. thread_lock(curthread);
  270. sched_unbind(curthread);
  271. thread_unlock(curthread);
  272. return (ret);
  273. }
  274. void
  275. intel_hwpstate_identify(driver_t *driver, device_t parent)
  276. {
  277. if (device_find_child(parent, "hwpstate_intel", -1) != NULL)
  278. return;
  279. if (cpu_vendor_id != CPU_VENDOR_INTEL)
  280. return;
  281. if (resource_disabled("hwpstate_intel", 0))
  282. return;
  283. /*
  284. * Intel SDM 14.4.1 (HWP Programming Interfaces):
  285. * Availability of HWP baseline resource and capability,
  286. * CPUID.06H:EAX[bit 7]: If this bit is set, HWP provides several new
  287. * architectural MSRs: IA32_PM_ENABLE, IA32_HWP_CAPABILITIES,
  288. * IA32_HWP_REQUEST, IA32_HWP_STATUS.
  289. */
  290. if ((cpu_power_eax & CPUTPM1_HWP) == 0)
  291. return;
  292. if (BUS_ADD_CHILD(parent, 10, "hwpstate_intel", device_get_unit(parent))
  293. == NULL)
  294. device_printf(parent, "hwpstate_intel: add child failed\n");
  295. }
  296. static int
  297. intel_hwpstate_probe(device_t dev)
  298. {
  299. device_set_desc(dev, "Intel Speed Shift");
  300. return (BUS_PROBE_NOWILDCARD);
  301. }
  302. static int
  303. set_autonomous_hwp(struct hwp_softc *sc)
  304. {
  305. struct pcpu *pc;
  306. device_t dev;
  307. uint64_t caps;
  308. int ret;
  309. dev = sc->dev;
  310. pc = cpu_get_pcpu(dev);
  311. if (pc == NULL)
  312. return (ENXIO);
  313. thread_lock(curthread);
  314. sched_bind(curthread, pc->pc_cpuid);
  315. thread_unlock(curthread);
  316. /* XXX: Many MSRs aren't readable until feature is enabled */
  317. ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
  318. if (ret) {
  319. /*
  320. * This is actually a package-level MSR, and only the first
  321. * write is not ignored. So it is harmless to enable it across
  322. * all devices, and this allows us not to care especially in
  323. * which order cores (and packages) are probed. This error
  324. * condition should not happen given we gate on the HWP CPUID
  325. * feature flag, if the Intel SDM is correct.
  326. */
  327. device_printf(dev, "Failed to enable HWP for cpu%d (%d)\n",
  328. pc->pc_cpuid, ret);
  329. goto out;
  330. }
  331. ret = rdmsr_safe(MSR_IA32_HWP_REQUEST, &sc->req);
  332. if (ret) {
  333. device_printf(dev,
  334. "Failed to read HWP request MSR for cpu%d (%d)\n",
  335. pc->pc_cpuid, ret);
  336. goto out;
  337. }
  338. ret = rdmsr_safe(MSR_IA32_HWP_CAPABILITIES, &caps);
  339. if (ret) {
  340. device_printf(dev,
  341. "Failed to read HWP capabilities MSR for cpu%d (%d)\n",
  342. pc->pc_cpuid, ret);
  343. goto out;
  344. }
  345. /*
  346. * High and low are static; "guaranteed" is dynamic; and efficient is
  347. * also dynamic.
  348. */
  349. sc->high = IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(caps);
  350. sc->guaranteed = IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(caps);
  351. sc->efficient = IA32_HWP_CAPABILITIES_EFFICIENT_PERFORMANCE(caps);
  352. sc->low = IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(caps);
  353. /* hardware autonomous selection determines the performance target */
  354. sc->req &= ~IA32_HWP_DESIRED_PERFORMANCE;
  355. /* enable HW dynamic selection of window size */
  356. sc->req &= ~IA32_HWP_ACTIVITY_WINDOW;
  357. /* IA32_HWP_REQUEST.Minimum_Performance = IA32_HWP_CAPABILITIES.Lowest_Performance */
  358. sc->req &= ~IA32_HWP_MINIMUM_PERFORMANCE;
  359. sc->req |= sc->low;
  360. /* IA32_HWP_REQUEST.Maximum_Performance = IA32_HWP_CAPABILITIES.Highest_Performance. */
  361. sc->req &= ~IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE;
  362. sc->req |= sc->high << 8;
  363. /* If supported, request package-level control for this CPU. */
  364. if (sc->hwp_pkg_ctrl_en)
  365. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
  366. IA32_HWP_REQUEST_PACKAGE_CONTROL);
  367. else
  368. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
  369. if (ret) {
  370. device_printf(dev,
  371. "Failed to setup%s autonomous HWP for cpu%d\n",
  372. sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
  373. goto out;
  374. }
  375. /* If supported, write the PKG-wide control MSR. */
  376. if (sc->hwp_pkg_ctrl_en) {
  377. /*
  378. * "The structure of the IA32_HWP_REQUEST_PKG MSR
  379. * (package-level) is identical to the IA32_HWP_REQUEST MSR
  380. * with the exception of the Package Control field, which does
  381. * not exist." (Intel SDM §14.4.4)
  382. */
  383. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
  384. if (ret) {
  385. device_printf(dev,
  386. "Failed to set autonomous HWP for package\n");
  387. }
  388. }
  389. out:
  390. thread_lock(curthread);
  391. sched_unbind(curthread);
  392. thread_unlock(curthread);
  393. return (ret);
  394. }
  395. static int
  396. intel_hwpstate_attach(device_t dev)
  397. {
  398. struct hwp_softc *sc;
  399. int ret;
  400. sc = device_get_softc(dev);
  401. sc->dev = dev;
  402. /* eax */
  403. if (cpu_power_eax & CPUTPM1_HWP_NOTIFICATION)
  404. sc->hwp_notifications = true;
  405. if (cpu_power_eax & CPUTPM1_HWP_ACTIVITY_WINDOW)
  406. sc->hwp_activity_window = true;
  407. if (cpu_power_eax & CPUTPM1_HWP_PERF_PREF)
  408. sc->hwp_pref_ctrl = true;
  409. if (cpu_power_eax & CPUTPM1_HWP_PKG)
  410. sc->hwp_pkg_ctrl = true;
  411. /* Allow administrators to disable pkg-level control. */
  412. sc->hwp_pkg_ctrl_en = (sc->hwp_pkg_ctrl && hwpstate_pkg_ctrl_enable);
  413. /* ecx */
  414. if (cpu_power_ecx & CPUID_PERF_BIAS)
  415. sc->hwp_perf_bias = true;
  416. ret = set_autonomous_hwp(sc);
  417. if (ret)
  418. return (ret);
  419. SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  420. SYSCTL_STATIC_CHILDREN(_debug), OID_AUTO, device_get_nameunit(dev),
  421. CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
  422. sc, 0, intel_hwp_dump_sysctl_handler, "A", "");
  423. SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
  424. SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
  425. "epp", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, dev, 0,
  426. sysctl_epp_select, "I",
  427. "Efficiency/Performance Preference "
  428. "(range from 0, most performant, through 100, most efficient)");
  429. return (cpufreq_register(dev));
  430. }
  431. static int
  432. intel_hwpstate_detach(device_t dev)
  433. {
  434. return (cpufreq_unregister(dev));
  435. }
  436. static int
  437. intel_hwpstate_get(device_t dev, struct cf_setting *set)
  438. {
  439. struct pcpu *pc;
  440. uint64_t rate;
  441. int ret;
  442. if (set == NULL)
  443. return (EINVAL);
  444. pc = cpu_get_pcpu(dev);
  445. if (pc == NULL)
  446. return (ENXIO);
  447. memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
  448. set->dev = dev;
  449. ret = cpu_est_clockrate(pc->pc_cpuid, &rate);
  450. if (ret == 0)
  451. set->freq = rate / 1000000;
  452. set->volts = CPUFREQ_VAL_UNKNOWN;
  453. set->power = CPUFREQ_VAL_UNKNOWN;
  454. set->lat = CPUFREQ_VAL_UNKNOWN;
  455. return (0);
  456. }
  457. static int
  458. intel_hwpstate_type(device_t dev, int *type)
  459. {
  460. if (type == NULL)
  461. return (EINVAL);
  462. *type = CPUFREQ_TYPE_ABSOLUTE | CPUFREQ_FLAG_INFO_ONLY | CPUFREQ_FLAG_UNCACHED;
  463. return (0);
  464. }
  465. static int
  466. intel_hwpstate_suspend(device_t dev)
  467. {
  468. return (0);
  469. }
  470. /*
  471. * Redo a subset of set_autonomous_hwp on resume; untested. Without this,
  472. * testers observed that on resume MSR_IA32_HWP_REQUEST was bogus.
  473. */
  474. static int
  475. intel_hwpstate_resume(device_t dev)
  476. {
  477. struct hwp_softc *sc;
  478. struct pcpu *pc;
  479. int ret;
  480. sc = device_get_softc(dev);
  481. pc = cpu_get_pcpu(dev);
  482. if (pc == NULL)
  483. return (ENXIO);
  484. thread_lock(curthread);
  485. sched_bind(curthread, pc->pc_cpuid);
  486. thread_unlock(curthread);
  487. ret = wrmsr_safe(MSR_IA32_PM_ENABLE, 1);
  488. if (ret) {
  489. device_printf(dev,
  490. "Failed to enable HWP for cpu%d after suspend (%d)\n",
  491. pc->pc_cpuid, ret);
  492. goto out;
  493. }
  494. if (sc->hwp_pkg_ctrl_en)
  495. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req |
  496. IA32_HWP_REQUEST_PACKAGE_CONTROL);
  497. else
  498. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST, sc->req);
  499. if (ret) {
  500. device_printf(dev,
  501. "Failed to set%s autonomous HWP for cpu%d after suspend\n",
  502. sc->hwp_pkg_ctrl_en ? " PKG" : "", pc->pc_cpuid);
  503. goto out;
  504. }
  505. if (sc->hwp_pkg_ctrl_en) {
  506. ret = wrmsr_safe(MSR_IA32_HWP_REQUEST_PKG, sc->req);
  507. if (ret) {
  508. device_printf(dev,
  509. "Failed to set autonomous HWP for package after "
  510. "suspend\n");
  511. goto out;
  512. }
  513. }
  514. if (!sc->hwp_pref_ctrl && sc->hwp_perf_bias_cached) {
  515. ret = wrmsr_safe(MSR_IA32_ENERGY_PERF_BIAS,
  516. sc->hwp_energy_perf_bias);
  517. if (ret) {
  518. device_printf(dev,
  519. "Failed to set energy perf bias for cpu%d after "
  520. "suspend\n", pc->pc_cpuid);
  521. }
  522. }
  523. out:
  524. thread_lock(curthread);
  525. sched_unbind(curthread);
  526. thread_unlock(curthread);
  527. return (ret);
  528. }