ec.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611
  1. /*
  2. * ec.c - ACPI Embedded Controller Driver (v3)
  3. *
  4. * Copyright (C) 2001-2015 Intel Corporation
  5. * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  6. * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  7. * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  8. * 2004 Luming Yu <luming.yu@intel.com>
  9. * 2001, 2002 Andy Grover <andrew.grover@intel.com>
  10. * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  11. * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  12. *
  13. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or (at
  18. * your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but
  21. * WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License along
  26. * with this program; if not, write to the Free Software Foundation, Inc.,
  27. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  28. *
  29. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  30. */
  31. /* Uncomment next line to get verbose printout */
  32. /* #define DEBUG */
  33. #define pr_fmt(fmt) "ACPI : EC: " fmt
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/init.h>
  37. #include <linux/types.h>
  38. #include <linux/delay.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/list.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/slab.h>
  43. #include <linux/acpi.h>
  44. #include <linux/dmi.h>
  45. #include <asm/io.h>
  46. #include "internal.h"
  47. #define ACPI_EC_CLASS "embedded_controller"
  48. #define ACPI_EC_DEVICE_NAME "Embedded Controller"
  49. #define ACPI_EC_FILE_INFO "info"
  50. /* EC status register */
  51. #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
  52. #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
  53. #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
  54. #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
  55. #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
  56. /*
  57. * The SCI_EVT clearing timing is not defined by the ACPI specification.
  58. * This leads to lots of practical timing issues for the host EC driver.
  59. * The following variations are defined (from the target EC firmware's
  60. * perspective):
  61. * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
  62. * target can clear SCI_EVT at any time so long as the host can see
  63. * the indication by reading the status register (EC_SC). So the
  64. * host should re-check SCI_EVT after the first time the SCI_EVT
  65. * indication is seen, which is the same time the query request
  66. * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
  67. * at any later time could indicate another event. Normally such
  68. * kind of EC firmware has implemented an event queue and will
  69. * return 0x00 to indicate "no outstanding event".
  70. * QUERY: After seeing the query request (QR_EC) written to the command
  71. * register (EC_CMD) by the host and having prepared the responding
  72. * event value in the data register (EC_DATA), the target can safely
  73. * clear SCI_EVT because the target can confirm that the current
  74. * event is being handled by the host. The host then should check
  75. * SCI_EVT right after reading the event response from the data
  76. * register (EC_DATA).
  77. * EVENT: After seeing the event response read from the data register
  78. * (EC_DATA) by the host, the target can clear SCI_EVT. As the
  79. * target requires time to notice the change in the data register
  80. * (EC_DATA), the host may be required to wait additional guarding
  81. * time before checking the SCI_EVT again. Such guarding may not be
  82. * necessary if the host is notified via another IRQ.
  83. */
  84. #define ACPI_EC_EVT_TIMING_STATUS 0x00
  85. #define ACPI_EC_EVT_TIMING_QUERY 0x01
  86. #define ACPI_EC_EVT_TIMING_EVENT 0x02
  87. /* EC commands */
  88. enum ec_command {
  89. ACPI_EC_COMMAND_READ = 0x80,
  90. ACPI_EC_COMMAND_WRITE = 0x81,
  91. ACPI_EC_BURST_ENABLE = 0x82,
  92. ACPI_EC_BURST_DISABLE = 0x83,
  93. ACPI_EC_COMMAND_QUERY = 0x84,
  94. };
  95. #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
  96. #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
  97. #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
  98. #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
  99. * when trying to clear the EC */
  100. enum {
  101. EC_FLAGS_QUERY_PENDING, /* Query is pending */
  102. EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
  103. EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
  104. * OpReg are installed */
  105. EC_FLAGS_STARTED, /* Driver is started */
  106. EC_FLAGS_STOPPED, /* Driver is stopped */
  107. EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
  108. * current command processing */
  109. };
  110. #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
  111. #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
  112. /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
  113. static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
  114. module_param(ec_delay, uint, 0644);
  115. MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
  116. static bool ec_busy_polling __read_mostly;
  117. module_param(ec_busy_polling, bool, 0644);
  118. MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
  119. static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
  120. module_param(ec_polling_guard, uint, 0644);
  121. MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
  122. static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
  123. /*
  124. * If the number of false interrupts per one transaction exceeds
  125. * this threshold, will think there is a GPE storm happened and
  126. * will disable the GPE for normal transaction.
  127. */
  128. static unsigned int ec_storm_threshold __read_mostly = 8;
  129. module_param(ec_storm_threshold, uint, 0644);
  130. MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
  131. struct acpi_ec_query_handler {
  132. struct list_head node;
  133. acpi_ec_query_func func;
  134. acpi_handle handle;
  135. void *data;
  136. u8 query_bit;
  137. struct kref kref;
  138. };
  139. struct transaction {
  140. const u8 *wdata;
  141. u8 *rdata;
  142. unsigned short irq_count;
  143. u8 command;
  144. u8 wi;
  145. u8 ri;
  146. u8 wlen;
  147. u8 rlen;
  148. u8 flags;
  149. };
  150. static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
  151. static void advance_transaction(struct acpi_ec *ec);
  152. struct acpi_ec *boot_ec, *first_ec;
  153. EXPORT_SYMBOL(first_ec);
  154. static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
  155. static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
  156. static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
  157. static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
  158. /* --------------------------------------------------------------------------
  159. * Logging/Debugging
  160. * -------------------------------------------------------------------------- */
  161. /*
  162. * Splitters used by the developers to track the boundary of the EC
  163. * handling processes.
  164. */
  165. #ifdef DEBUG
  166. #define EC_DBG_SEP " "
  167. #define EC_DBG_DRV "+++++"
  168. #define EC_DBG_STM "====="
  169. #define EC_DBG_REQ "*****"
  170. #define EC_DBG_EVT "#####"
  171. #else
  172. #define EC_DBG_SEP ""
  173. #define EC_DBG_DRV
  174. #define EC_DBG_STM
  175. #define EC_DBG_REQ
  176. #define EC_DBG_EVT
  177. #endif
  178. #define ec_log_raw(fmt, ...) \
  179. pr_info(fmt "\n", ##__VA_ARGS__)
  180. #define ec_dbg_raw(fmt, ...) \
  181. pr_debug(fmt "\n", ##__VA_ARGS__)
  182. #define ec_log(filter, fmt, ...) \
  183. ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  184. #define ec_dbg(filter, fmt, ...) \
  185. ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  186. #define ec_log_drv(fmt, ...) \
  187. ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  188. #define ec_dbg_drv(fmt, ...) \
  189. ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  190. #define ec_dbg_stm(fmt, ...) \
  191. ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
  192. #define ec_dbg_req(fmt, ...) \
  193. ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
  194. #define ec_dbg_evt(fmt, ...) \
  195. ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
  196. #define ec_dbg_ref(ec, fmt, ...) \
  197. ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
  198. /* --------------------------------------------------------------------------
  199. * Device Flags
  200. * -------------------------------------------------------------------------- */
  201. static bool acpi_ec_started(struct acpi_ec *ec)
  202. {
  203. return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  204. !test_bit(EC_FLAGS_STOPPED, &ec->flags);
  205. }
  206. static bool acpi_ec_flushed(struct acpi_ec *ec)
  207. {
  208. return ec->reference_count == 1;
  209. }
  210. /* --------------------------------------------------------------------------
  211. * EC Registers
  212. * -------------------------------------------------------------------------- */
  213. static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
  214. {
  215. u8 x = inb(ec->command_addr);
  216. ec_dbg_raw("EC_SC(R) = 0x%2.2x "
  217. "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
  218. x,
  219. !!(x & ACPI_EC_FLAG_SCI),
  220. !!(x & ACPI_EC_FLAG_BURST),
  221. !!(x & ACPI_EC_FLAG_CMD),
  222. !!(x & ACPI_EC_FLAG_IBF),
  223. !!(x & ACPI_EC_FLAG_OBF));
  224. return x;
  225. }
  226. static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
  227. {
  228. u8 x = inb(ec->data_addr);
  229. ec->timestamp = jiffies;
  230. ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
  231. return x;
  232. }
  233. static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
  234. {
  235. ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
  236. outb(command, ec->command_addr);
  237. ec->timestamp = jiffies;
  238. }
  239. static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
  240. {
  241. ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
  242. outb(data, ec->data_addr);
  243. ec->timestamp = jiffies;
  244. }
  245. #ifdef DEBUG
  246. static const char *acpi_ec_cmd_string(u8 cmd)
  247. {
  248. switch (cmd) {
  249. case 0x80:
  250. return "RD_EC";
  251. case 0x81:
  252. return "WR_EC";
  253. case 0x82:
  254. return "BE_EC";
  255. case 0x83:
  256. return "BD_EC";
  257. case 0x84:
  258. return "QR_EC";
  259. }
  260. return "UNKNOWN";
  261. }
  262. #else
  263. #define acpi_ec_cmd_string(cmd) "UNDEF"
  264. #endif
  265. /* --------------------------------------------------------------------------
  266. * GPE Registers
  267. * -------------------------------------------------------------------------- */
  268. static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
  269. {
  270. acpi_event_status gpe_status = 0;
  271. (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
  272. return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
  273. }
  274. static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
  275. {
  276. if (open)
  277. acpi_enable_gpe(NULL, ec->gpe);
  278. else {
  279. BUG_ON(ec->reference_count < 1);
  280. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  281. }
  282. if (acpi_ec_is_gpe_raised(ec)) {
  283. /*
  284. * On some platforms, EN=1 writes cannot trigger GPE. So
  285. * software need to manually trigger a pseudo GPE event on
  286. * EN=1 writes.
  287. */
  288. ec_dbg_raw("Polling quirk");
  289. advance_transaction(ec);
  290. }
  291. }
  292. static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
  293. {
  294. if (close)
  295. acpi_disable_gpe(NULL, ec->gpe);
  296. else {
  297. BUG_ON(ec->reference_count < 1);
  298. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  299. }
  300. }
  301. static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
  302. {
  303. /*
  304. * GPE STS is a W1C register, which means:
  305. * 1. Software can clear it without worrying about clearing other
  306. * GPEs' STS bits when the hardware sets them in parallel.
  307. * 2. As long as software can ensure only clearing it when it is
  308. * set, hardware won't set it in parallel.
  309. * So software can clear GPE in any contexts.
  310. * Warning: do not move the check into advance_transaction() as the
  311. * EC commands will be sent without GPE raised.
  312. */
  313. if (!acpi_ec_is_gpe_raised(ec))
  314. return;
  315. acpi_clear_gpe(NULL, ec->gpe);
  316. }
  317. /* --------------------------------------------------------------------------
  318. * Transaction Management
  319. * -------------------------------------------------------------------------- */
  320. static void acpi_ec_submit_request(struct acpi_ec *ec)
  321. {
  322. ec->reference_count++;
  323. if (ec->reference_count == 1)
  324. acpi_ec_enable_gpe(ec, true);
  325. }
  326. static void acpi_ec_complete_request(struct acpi_ec *ec)
  327. {
  328. bool flushed = false;
  329. ec->reference_count--;
  330. if (ec->reference_count == 0)
  331. acpi_ec_disable_gpe(ec, true);
  332. flushed = acpi_ec_flushed(ec);
  333. if (flushed)
  334. wake_up(&ec->wait);
  335. }
  336. static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
  337. {
  338. if (!test_bit(flag, &ec->flags)) {
  339. acpi_ec_disable_gpe(ec, false);
  340. ec_dbg_drv("Polling enabled");
  341. set_bit(flag, &ec->flags);
  342. }
  343. }
  344. static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
  345. {
  346. if (test_bit(flag, &ec->flags)) {
  347. clear_bit(flag, &ec->flags);
  348. acpi_ec_enable_gpe(ec, false);
  349. ec_dbg_drv("Polling disabled");
  350. }
  351. }
  352. /*
  353. * acpi_ec_submit_flushable_request() - Increase the reference count unless
  354. * the flush operation is not in
  355. * progress
  356. * @ec: the EC device
  357. *
  358. * This function must be used before taking a new action that should hold
  359. * the reference count. If this function returns false, then the action
  360. * must be discarded or it will prevent the flush operation from being
  361. * completed.
  362. */
  363. static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
  364. {
  365. if (!acpi_ec_started(ec))
  366. return false;
  367. acpi_ec_submit_request(ec);
  368. return true;
  369. }
  370. static void acpi_ec_submit_query(struct acpi_ec *ec)
  371. {
  372. if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  373. ec_dbg_evt("Command(%s) submitted/blocked",
  374. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  375. ec->nr_pending_queries++;
  376. schedule_work(&ec->work);
  377. }
  378. }
  379. static void acpi_ec_complete_query(struct acpi_ec *ec)
  380. {
  381. if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  382. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  383. ec_dbg_evt("Command(%s) unblocked",
  384. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  385. }
  386. }
  387. static bool acpi_ec_guard_event(struct acpi_ec *ec)
  388. {
  389. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  390. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
  391. !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
  392. (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
  393. return false;
  394. /*
  395. * Postpone the query submission to allow the firmware to proceed,
  396. * we shouldn't check SCI_EVT before the firmware reflagging it.
  397. */
  398. return true;
  399. }
  400. static int ec_transaction_polled(struct acpi_ec *ec)
  401. {
  402. unsigned long flags;
  403. int ret = 0;
  404. spin_lock_irqsave(&ec->lock, flags);
  405. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
  406. ret = 1;
  407. spin_unlock_irqrestore(&ec->lock, flags);
  408. return ret;
  409. }
  410. static int ec_transaction_completed(struct acpi_ec *ec)
  411. {
  412. unsigned long flags;
  413. int ret = 0;
  414. spin_lock_irqsave(&ec->lock, flags);
  415. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
  416. ret = 1;
  417. spin_unlock_irqrestore(&ec->lock, flags);
  418. return ret;
  419. }
  420. static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
  421. {
  422. ec->curr->flags |= flag;
  423. if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
  424. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
  425. flag == ACPI_EC_COMMAND_POLL)
  426. acpi_ec_complete_query(ec);
  427. if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
  428. flag == ACPI_EC_COMMAND_COMPLETE)
  429. acpi_ec_complete_query(ec);
  430. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  431. flag == ACPI_EC_COMMAND_COMPLETE)
  432. set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  433. }
  434. }
  435. static void advance_transaction(struct acpi_ec *ec)
  436. {
  437. struct transaction *t;
  438. u8 status;
  439. bool wakeup = false;
  440. ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
  441. smp_processor_id());
  442. /*
  443. * By always clearing STS before handling all indications, we can
  444. * ensure a hardware STS 0->1 change after this clearing can always
  445. * trigger a GPE interrupt.
  446. */
  447. acpi_ec_clear_gpe(ec);
  448. status = acpi_ec_read_status(ec);
  449. t = ec->curr;
  450. /*
  451. * Another IRQ or a guarded polling mode advancement is detected,
  452. * the next QR_EC submission is then allowed.
  453. */
  454. if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
  455. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  456. (!ec->nr_pending_queries ||
  457. test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
  458. clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  459. acpi_ec_complete_query(ec);
  460. }
  461. }
  462. if (!t)
  463. goto err;
  464. if (t->flags & ACPI_EC_COMMAND_POLL) {
  465. if (t->wlen > t->wi) {
  466. if ((status & ACPI_EC_FLAG_IBF) == 0)
  467. acpi_ec_write_data(ec, t->wdata[t->wi++]);
  468. else
  469. goto err;
  470. } else if (t->rlen > t->ri) {
  471. if ((status & ACPI_EC_FLAG_OBF) == 1) {
  472. t->rdata[t->ri++] = acpi_ec_read_data(ec);
  473. if (t->rlen == t->ri) {
  474. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  475. if (t->command == ACPI_EC_COMMAND_QUERY)
  476. ec_dbg_evt("Command(%s) completed by hardware",
  477. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  478. wakeup = true;
  479. }
  480. } else
  481. goto err;
  482. } else if (t->wlen == t->wi &&
  483. (status & ACPI_EC_FLAG_IBF) == 0) {
  484. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  485. wakeup = true;
  486. }
  487. goto out;
  488. } else {
  489. if (EC_FLAGS_QUERY_HANDSHAKE &&
  490. !(status & ACPI_EC_FLAG_SCI) &&
  491. (t->command == ACPI_EC_COMMAND_QUERY)) {
  492. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  493. t->rdata[t->ri++] = 0x00;
  494. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  495. ec_dbg_evt("Command(%s) completed by software",
  496. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  497. wakeup = true;
  498. } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
  499. acpi_ec_write_cmd(ec, t->command);
  500. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  501. } else
  502. goto err;
  503. goto out;
  504. }
  505. err:
  506. /*
  507. * If SCI bit is set, then don't think it's a false IRQ
  508. * otherwise will take a not handled IRQ as a false one.
  509. */
  510. if (!(status & ACPI_EC_FLAG_SCI)) {
  511. if (in_interrupt() && t) {
  512. if (t->irq_count < ec_storm_threshold)
  513. ++t->irq_count;
  514. /* Allow triggering on 0 threshold */
  515. if (t->irq_count == ec_storm_threshold)
  516. acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
  517. }
  518. }
  519. out:
  520. if (status & ACPI_EC_FLAG_SCI)
  521. acpi_ec_submit_query(ec);
  522. if (wakeup && in_interrupt())
  523. wake_up(&ec->wait);
  524. }
  525. static void start_transaction(struct acpi_ec *ec)
  526. {
  527. ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
  528. ec->curr->flags = 0;
  529. }
  530. static int ec_guard(struct acpi_ec *ec)
  531. {
  532. unsigned long guard = usecs_to_jiffies(ec_polling_guard);
  533. unsigned long timeout = ec->timestamp + guard;
  534. do {
  535. if (ec_busy_polling) {
  536. /* Perform busy polling */
  537. if (ec_transaction_completed(ec))
  538. return 0;
  539. udelay(jiffies_to_usecs(guard));
  540. } else {
  541. /*
  542. * Perform wait polling
  543. *
  544. * For SCI_EVT clearing timing of "event",
  545. * performing guarding before re-checking the
  546. * SCI_EVT. Otherwise, such guarding is not needed
  547. * due to the old practices.
  548. */
  549. if (!ec_transaction_polled(ec) &&
  550. !acpi_ec_guard_event(ec))
  551. break;
  552. if (wait_event_timeout(ec->wait,
  553. ec_transaction_completed(ec),
  554. guard))
  555. return 0;
  556. }
  557. /* Guard the register accesses for the polling modes */
  558. } while (time_before(jiffies, timeout));
  559. return -ETIME;
  560. }
  561. static int ec_poll(struct acpi_ec *ec)
  562. {
  563. unsigned long flags;
  564. int repeat = 5; /* number of command restarts */
  565. while (repeat--) {
  566. unsigned long delay = jiffies +
  567. msecs_to_jiffies(ec_delay);
  568. do {
  569. if (!ec_guard(ec))
  570. return 0;
  571. spin_lock_irqsave(&ec->lock, flags);
  572. advance_transaction(ec);
  573. spin_unlock_irqrestore(&ec->lock, flags);
  574. } while (time_before(jiffies, delay));
  575. pr_debug("controller reset, restart transaction\n");
  576. spin_lock_irqsave(&ec->lock, flags);
  577. start_transaction(ec);
  578. spin_unlock_irqrestore(&ec->lock, flags);
  579. }
  580. return -ETIME;
  581. }
  582. static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
  583. struct transaction *t)
  584. {
  585. unsigned long tmp;
  586. int ret = 0;
  587. /* start transaction */
  588. spin_lock_irqsave(&ec->lock, tmp);
  589. /* Enable GPE for command processing (IBF=0/OBF=1) */
  590. if (!acpi_ec_submit_flushable_request(ec)) {
  591. ret = -EINVAL;
  592. goto unlock;
  593. }
  594. ec_dbg_ref(ec, "Increase command");
  595. /* following two actions should be kept atomic */
  596. ec->curr = t;
  597. ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
  598. start_transaction(ec);
  599. spin_unlock_irqrestore(&ec->lock, tmp);
  600. ret = ec_poll(ec);
  601. spin_lock_irqsave(&ec->lock, tmp);
  602. if (t->irq_count == ec_storm_threshold)
  603. acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
  604. ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
  605. ec->curr = NULL;
  606. /* Disable GPE for command processing (IBF=0/OBF=1) */
  607. acpi_ec_complete_request(ec);
  608. ec_dbg_ref(ec, "Decrease command");
  609. unlock:
  610. spin_unlock_irqrestore(&ec->lock, tmp);
  611. return ret;
  612. }
  613. static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
  614. {
  615. int status;
  616. u32 glk;
  617. if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
  618. return -EINVAL;
  619. if (t->rdata)
  620. memset(t->rdata, 0, t->rlen);
  621. mutex_lock(&ec->mutex);
  622. if (ec->global_lock) {
  623. status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
  624. if (ACPI_FAILURE(status)) {
  625. status = -ENODEV;
  626. goto unlock;
  627. }
  628. }
  629. status = acpi_ec_transaction_unlocked(ec, t);
  630. if (ec->global_lock)
  631. acpi_release_global_lock(glk);
  632. unlock:
  633. mutex_unlock(&ec->mutex);
  634. return status;
  635. }
  636. static int acpi_ec_burst_enable(struct acpi_ec *ec)
  637. {
  638. u8 d;
  639. struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
  640. .wdata = NULL, .rdata = &d,
  641. .wlen = 0, .rlen = 1};
  642. return acpi_ec_transaction(ec, &t);
  643. }
  644. static int acpi_ec_burst_disable(struct acpi_ec *ec)
  645. {
  646. struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
  647. .wdata = NULL, .rdata = NULL,
  648. .wlen = 0, .rlen = 0};
  649. return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
  650. acpi_ec_transaction(ec, &t) : 0;
  651. }
  652. static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
  653. {
  654. int result;
  655. u8 d;
  656. struct transaction t = {.command = ACPI_EC_COMMAND_READ,
  657. .wdata = &address, .rdata = &d,
  658. .wlen = 1, .rlen = 1};
  659. result = acpi_ec_transaction(ec, &t);
  660. *data = d;
  661. return result;
  662. }
  663. static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
  664. {
  665. u8 wdata[2] = { address, data };
  666. struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
  667. .wdata = wdata, .rdata = NULL,
  668. .wlen = 2, .rlen = 0};
  669. return acpi_ec_transaction(ec, &t);
  670. }
  671. int ec_read(u8 addr, u8 *val)
  672. {
  673. int err;
  674. u8 temp_data;
  675. if (!first_ec)
  676. return -ENODEV;
  677. err = acpi_ec_read(first_ec, addr, &temp_data);
  678. if (!err) {
  679. *val = temp_data;
  680. return 0;
  681. }
  682. return err;
  683. }
  684. EXPORT_SYMBOL(ec_read);
  685. int ec_write(u8 addr, u8 val)
  686. {
  687. int err;
  688. if (!first_ec)
  689. return -ENODEV;
  690. err = acpi_ec_write(first_ec, addr, val);
  691. return err;
  692. }
  693. EXPORT_SYMBOL(ec_write);
  694. int ec_transaction(u8 command,
  695. const u8 *wdata, unsigned wdata_len,
  696. u8 *rdata, unsigned rdata_len)
  697. {
  698. struct transaction t = {.command = command,
  699. .wdata = wdata, .rdata = rdata,
  700. .wlen = wdata_len, .rlen = rdata_len};
  701. if (!first_ec)
  702. return -ENODEV;
  703. return acpi_ec_transaction(first_ec, &t);
  704. }
  705. EXPORT_SYMBOL(ec_transaction);
  706. /* Get the handle to the EC device */
  707. acpi_handle ec_get_handle(void)
  708. {
  709. if (!first_ec)
  710. return NULL;
  711. return first_ec->handle;
  712. }
  713. EXPORT_SYMBOL(ec_get_handle);
  714. /*
  715. * Process _Q events that might have accumulated in the EC.
  716. * Run with locked ec mutex.
  717. */
  718. static void acpi_ec_clear(struct acpi_ec *ec)
  719. {
  720. int i, status;
  721. u8 value = 0;
  722. for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
  723. status = acpi_ec_query(ec, &value);
  724. if (status || !value)
  725. break;
  726. }
  727. if (unlikely(i == ACPI_EC_CLEAR_MAX))
  728. pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
  729. else
  730. pr_info("%d stale EC events cleared\n", i);
  731. }
  732. static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
  733. {
  734. unsigned long flags;
  735. spin_lock_irqsave(&ec->lock, flags);
  736. if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
  737. ec_dbg_drv("Starting EC");
  738. /* Enable GPE for event processing (SCI_EVT=1) */
  739. if (!resuming) {
  740. acpi_ec_submit_request(ec);
  741. ec_dbg_ref(ec, "Increase driver");
  742. }
  743. ec_log_drv("EC started");
  744. }
  745. spin_unlock_irqrestore(&ec->lock, flags);
  746. }
  747. static bool acpi_ec_stopped(struct acpi_ec *ec)
  748. {
  749. unsigned long flags;
  750. bool flushed;
  751. spin_lock_irqsave(&ec->lock, flags);
  752. flushed = acpi_ec_flushed(ec);
  753. spin_unlock_irqrestore(&ec->lock, flags);
  754. return flushed;
  755. }
  756. static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
  757. {
  758. unsigned long flags;
  759. spin_lock_irqsave(&ec->lock, flags);
  760. if (acpi_ec_started(ec)) {
  761. ec_dbg_drv("Stopping EC");
  762. set_bit(EC_FLAGS_STOPPED, &ec->flags);
  763. spin_unlock_irqrestore(&ec->lock, flags);
  764. wait_event(ec->wait, acpi_ec_stopped(ec));
  765. spin_lock_irqsave(&ec->lock, flags);
  766. /* Disable GPE for event processing (SCI_EVT=1) */
  767. if (!suspending) {
  768. acpi_ec_complete_request(ec);
  769. ec_dbg_ref(ec, "Decrease driver");
  770. }
  771. clear_bit(EC_FLAGS_STARTED, &ec->flags);
  772. clear_bit(EC_FLAGS_STOPPED, &ec->flags);
  773. ec_log_drv("EC stopped");
  774. }
  775. spin_unlock_irqrestore(&ec->lock, flags);
  776. }
  777. void acpi_ec_block_transactions(void)
  778. {
  779. struct acpi_ec *ec = first_ec;
  780. if (!ec)
  781. return;
  782. mutex_lock(&ec->mutex);
  783. /* Prevent transactions from being carried out */
  784. acpi_ec_stop(ec, true);
  785. mutex_unlock(&ec->mutex);
  786. }
  787. void acpi_ec_unblock_transactions(void)
  788. {
  789. struct acpi_ec *ec = first_ec;
  790. if (!ec)
  791. return;
  792. /* Allow transactions to be carried out again */
  793. acpi_ec_start(ec, true);
  794. if (EC_FLAGS_CLEAR_ON_RESUME)
  795. acpi_ec_clear(ec);
  796. }
  797. void acpi_ec_unblock_transactions_early(void)
  798. {
  799. /*
  800. * Allow transactions to happen again (this function is called from
  801. * atomic context during wakeup, so we don't need to acquire the mutex).
  802. */
  803. if (first_ec)
  804. acpi_ec_start(first_ec, true);
  805. }
  806. /* --------------------------------------------------------------------------
  807. Event Management
  808. -------------------------------------------------------------------------- */
  809. static struct acpi_ec_query_handler *
  810. acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
  811. {
  812. if (handler)
  813. kref_get(&handler->kref);
  814. return handler;
  815. }
  816. static void acpi_ec_query_handler_release(struct kref *kref)
  817. {
  818. struct acpi_ec_query_handler *handler =
  819. container_of(kref, struct acpi_ec_query_handler, kref);
  820. kfree(handler);
  821. }
  822. static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
  823. {
  824. kref_put(&handler->kref, acpi_ec_query_handler_release);
  825. }
  826. int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
  827. acpi_handle handle, acpi_ec_query_func func,
  828. void *data)
  829. {
  830. struct acpi_ec_query_handler *handler =
  831. kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
  832. if (!handler)
  833. return -ENOMEM;
  834. handler->query_bit = query_bit;
  835. handler->handle = handle;
  836. handler->func = func;
  837. handler->data = data;
  838. mutex_lock(&ec->mutex);
  839. kref_init(&handler->kref);
  840. list_add(&handler->node, &ec->list);
  841. mutex_unlock(&ec->mutex);
  842. return 0;
  843. }
  844. EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
  845. void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
  846. {
  847. struct acpi_ec_query_handler *handler, *tmp;
  848. LIST_HEAD(free_list);
  849. mutex_lock(&ec->mutex);
  850. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  851. if (query_bit == handler->query_bit) {
  852. list_del_init(&handler->node);
  853. list_add(&handler->node, &free_list);
  854. }
  855. }
  856. mutex_unlock(&ec->mutex);
  857. list_for_each_entry_safe(handler, tmp, &free_list, node)
  858. acpi_ec_put_query_handler(handler);
  859. }
  860. EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
  861. static void acpi_ec_run(void *cxt)
  862. {
  863. struct acpi_ec_query_handler *handler = cxt;
  864. if (!handler)
  865. return;
  866. ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
  867. if (handler->func)
  868. handler->func(handler->data);
  869. else if (handler->handle)
  870. acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
  871. ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
  872. acpi_ec_put_query_handler(handler);
  873. }
  874. static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
  875. {
  876. u8 value = 0;
  877. int result;
  878. acpi_status status;
  879. struct acpi_ec_query_handler *handler;
  880. struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
  881. .wdata = NULL, .rdata = &value,
  882. .wlen = 0, .rlen = 1};
  883. /*
  884. * Query the EC to find out which _Qxx method we need to evaluate.
  885. * Note that successful completion of the query causes the ACPI_EC_SCI
  886. * bit to be cleared (and thus clearing the interrupt source).
  887. */
  888. result = acpi_ec_transaction(ec, &t);
  889. if (result)
  890. return result;
  891. if (data)
  892. *data = value;
  893. if (!value)
  894. return -ENODATA;
  895. mutex_lock(&ec->mutex);
  896. list_for_each_entry(handler, &ec->list, node) {
  897. if (value == handler->query_bit) {
  898. /* have custom handler for this bit */
  899. handler = acpi_ec_get_query_handler(handler);
  900. ec_dbg_evt("Query(0x%02x) scheduled",
  901. handler->query_bit);
  902. status = acpi_os_execute((handler->func) ?
  903. OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
  904. acpi_ec_run, handler);
  905. if (ACPI_FAILURE(status))
  906. result = -EBUSY;
  907. break;
  908. }
  909. }
  910. mutex_unlock(&ec->mutex);
  911. return result;
  912. }
  913. static void acpi_ec_check_event(struct acpi_ec *ec)
  914. {
  915. unsigned long flags;
  916. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
  917. if (ec_guard(ec)) {
  918. spin_lock_irqsave(&ec->lock, flags);
  919. /*
  920. * Take care of the SCI_EVT unless no one else is
  921. * taking care of it.
  922. */
  923. if (!ec->curr)
  924. advance_transaction(ec);
  925. spin_unlock_irqrestore(&ec->lock, flags);
  926. }
  927. }
  928. }
  929. static void acpi_ec_event_handler(struct work_struct *work)
  930. {
  931. unsigned long flags;
  932. struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
  933. ec_dbg_evt("Event started");
  934. spin_lock_irqsave(&ec->lock, flags);
  935. while (ec->nr_pending_queries) {
  936. spin_unlock_irqrestore(&ec->lock, flags);
  937. (void)acpi_ec_query(ec, NULL);
  938. spin_lock_irqsave(&ec->lock, flags);
  939. ec->nr_pending_queries--;
  940. /*
  941. * Before exit, make sure that this work item can be
  942. * scheduled again. There might be QR_EC failures, leaving
  943. * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
  944. * item from being scheduled again.
  945. */
  946. if (!ec->nr_pending_queries) {
  947. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  948. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
  949. acpi_ec_complete_query(ec);
  950. }
  951. }
  952. spin_unlock_irqrestore(&ec->lock, flags);
  953. ec_dbg_evt("Event stopped");
  954. acpi_ec_check_event(ec);
  955. }
  956. static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
  957. u32 gpe_number, void *data)
  958. {
  959. unsigned long flags;
  960. struct acpi_ec *ec = data;
  961. spin_lock_irqsave(&ec->lock, flags);
  962. advance_transaction(ec);
  963. spin_unlock_irqrestore(&ec->lock, flags);
  964. return ACPI_INTERRUPT_HANDLED;
  965. }
  966. /* --------------------------------------------------------------------------
  967. * Address Space Management
  968. * -------------------------------------------------------------------------- */
  969. static acpi_status
  970. acpi_ec_space_handler(u32 function, acpi_physical_address address,
  971. u32 bits, u64 *value64,
  972. void *handler_context, void *region_context)
  973. {
  974. struct acpi_ec *ec = handler_context;
  975. int result = 0, i, bytes = bits / 8;
  976. u8 *value = (u8 *)value64;
  977. if ((address > 0xFF) || !value || !handler_context)
  978. return AE_BAD_PARAMETER;
  979. if (function != ACPI_READ && function != ACPI_WRITE)
  980. return AE_BAD_PARAMETER;
  981. if (ec_busy_polling || bits > 8)
  982. acpi_ec_burst_enable(ec);
  983. for (i = 0; i < bytes; ++i, ++address, ++value)
  984. result = (function == ACPI_READ) ?
  985. acpi_ec_read(ec, address, value) :
  986. acpi_ec_write(ec, address, *value);
  987. if (ec_busy_polling || bits > 8)
  988. acpi_ec_burst_disable(ec);
  989. switch (result) {
  990. case -EINVAL:
  991. return AE_BAD_PARAMETER;
  992. case -ENODEV:
  993. return AE_NOT_FOUND;
  994. case -ETIME:
  995. return AE_TIME;
  996. default:
  997. return AE_OK;
  998. }
  999. }
  1000. /* --------------------------------------------------------------------------
  1001. * Driver Interface
  1002. * -------------------------------------------------------------------------- */
  1003. static acpi_status
  1004. ec_parse_io_ports(struct acpi_resource *resource, void *context);
  1005. static struct acpi_ec *make_acpi_ec(void)
  1006. {
  1007. struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
  1008. if (!ec)
  1009. return NULL;
  1010. ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
  1011. mutex_init(&ec->mutex);
  1012. init_waitqueue_head(&ec->wait);
  1013. INIT_LIST_HEAD(&ec->list);
  1014. spin_lock_init(&ec->lock);
  1015. INIT_WORK(&ec->work, acpi_ec_event_handler);
  1016. ec->timestamp = jiffies;
  1017. return ec;
  1018. }
  1019. static acpi_status
  1020. acpi_ec_register_query_methods(acpi_handle handle, u32 level,
  1021. void *context, void **return_value)
  1022. {
  1023. char node_name[5];
  1024. struct acpi_buffer buffer = { sizeof(node_name), node_name };
  1025. struct acpi_ec *ec = context;
  1026. int value = 0;
  1027. acpi_status status;
  1028. status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
  1029. if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
  1030. acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
  1031. return AE_OK;
  1032. }
  1033. static acpi_status
  1034. ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
  1035. {
  1036. acpi_status status;
  1037. unsigned long long tmp = 0;
  1038. struct acpi_ec *ec = context;
  1039. /* clear addr values, ec_parse_io_ports depend on it */
  1040. ec->command_addr = ec->data_addr = 0;
  1041. status = acpi_walk_resources(handle, METHOD_NAME__CRS,
  1042. ec_parse_io_ports, ec);
  1043. if (ACPI_FAILURE(status))
  1044. return status;
  1045. /* Get GPE bit assignment (EC events). */
  1046. /* TODO: Add support for _GPE returning a package */
  1047. status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
  1048. if (ACPI_FAILURE(status))
  1049. return status;
  1050. ec->gpe = tmp;
  1051. /* Use the global lock for all EC transactions? */
  1052. tmp = 0;
  1053. acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
  1054. ec->global_lock = tmp;
  1055. ec->handle = handle;
  1056. return AE_CTRL_TERMINATE;
  1057. }
  1058. static int ec_install_handlers(struct acpi_ec *ec)
  1059. {
  1060. acpi_status status;
  1061. if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  1062. return 0;
  1063. status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
  1064. ACPI_GPE_EDGE_TRIGGERED,
  1065. &acpi_ec_gpe_handler, ec);
  1066. if (ACPI_FAILURE(status))
  1067. return -ENODEV;
  1068. acpi_ec_start(ec, false);
  1069. status = acpi_install_address_space_handler(ec->handle,
  1070. ACPI_ADR_SPACE_EC,
  1071. &acpi_ec_space_handler,
  1072. NULL, ec);
  1073. if (ACPI_FAILURE(status)) {
  1074. if (status == AE_NOT_FOUND) {
  1075. /*
  1076. * Maybe OS fails in evaluating the _REG object.
  1077. * The AE_NOT_FOUND error will be ignored and OS
  1078. * continue to initialize EC.
  1079. */
  1080. pr_err("Fail in evaluating the _REG object"
  1081. " of EC device. Broken bios is suspected.\n");
  1082. } else {
  1083. acpi_ec_stop(ec, false);
  1084. acpi_remove_gpe_handler(NULL, ec->gpe,
  1085. &acpi_ec_gpe_handler);
  1086. return -ENODEV;
  1087. }
  1088. }
  1089. set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  1090. return 0;
  1091. }
  1092. static void ec_remove_handlers(struct acpi_ec *ec)
  1093. {
  1094. if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  1095. return;
  1096. acpi_ec_stop(ec, false);
  1097. if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
  1098. ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
  1099. pr_err("failed to remove space handler\n");
  1100. if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
  1101. &acpi_ec_gpe_handler)))
  1102. pr_err("failed to remove gpe handler\n");
  1103. clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  1104. }
  1105. static int acpi_ec_add(struct acpi_device *device)
  1106. {
  1107. struct acpi_ec *ec = NULL;
  1108. int ret;
  1109. strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
  1110. strcpy(acpi_device_class(device), ACPI_EC_CLASS);
  1111. /* Check for boot EC */
  1112. if (boot_ec &&
  1113. (boot_ec->handle == device->handle ||
  1114. boot_ec->handle == ACPI_ROOT_OBJECT)) {
  1115. ec = boot_ec;
  1116. boot_ec = NULL;
  1117. } else {
  1118. ec = make_acpi_ec();
  1119. if (!ec)
  1120. return -ENOMEM;
  1121. }
  1122. if (ec_parse_device(device->handle, 0, ec, NULL) !=
  1123. AE_CTRL_TERMINATE) {
  1124. kfree(ec);
  1125. return -EINVAL;
  1126. }
  1127. /* Find and register all query methods */
  1128. acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
  1129. acpi_ec_register_query_methods, NULL, ec, NULL);
  1130. if (!first_ec)
  1131. first_ec = ec;
  1132. device->driver_data = ec;
  1133. ret = !!request_region(ec->data_addr, 1, "EC data");
  1134. WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
  1135. ret = !!request_region(ec->command_addr, 1, "EC cmd");
  1136. WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  1137. pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
  1138. ec->gpe, ec->command_addr, ec->data_addr);
  1139. ret = ec_install_handlers(ec);
  1140. /* Reprobe devices depending on the EC */
  1141. acpi_walk_dep_device_list(ec->handle);
  1142. /* EC is fully operational, allow queries */
  1143. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  1144. /* Clear stale _Q events if hardware might require that */
  1145. if (EC_FLAGS_CLEAR_ON_RESUME)
  1146. acpi_ec_clear(ec);
  1147. return ret;
  1148. }
  1149. static int acpi_ec_remove(struct acpi_device *device)
  1150. {
  1151. struct acpi_ec *ec;
  1152. struct acpi_ec_query_handler *handler, *tmp;
  1153. if (!device)
  1154. return -EINVAL;
  1155. ec = acpi_driver_data(device);
  1156. ec_remove_handlers(ec);
  1157. mutex_lock(&ec->mutex);
  1158. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  1159. list_del(&handler->node);
  1160. kfree(handler);
  1161. }
  1162. mutex_unlock(&ec->mutex);
  1163. release_region(ec->data_addr, 1);
  1164. release_region(ec->command_addr, 1);
  1165. device->driver_data = NULL;
  1166. if (ec == first_ec)
  1167. first_ec = NULL;
  1168. kfree(ec);
  1169. return 0;
  1170. }
  1171. static acpi_status
  1172. ec_parse_io_ports(struct acpi_resource *resource, void *context)
  1173. {
  1174. struct acpi_ec *ec = context;
  1175. if (resource->type != ACPI_RESOURCE_TYPE_IO)
  1176. return AE_OK;
  1177. /*
  1178. * The first address region returned is the data port, and
  1179. * the second address region returned is the status/command
  1180. * port.
  1181. */
  1182. if (ec->data_addr == 0)
  1183. ec->data_addr = resource->data.io.minimum;
  1184. else if (ec->command_addr == 0)
  1185. ec->command_addr = resource->data.io.minimum;
  1186. else
  1187. return AE_CTRL_TERMINATE;
  1188. return AE_OK;
  1189. }
  1190. int __init acpi_boot_ec_enable(void)
  1191. {
  1192. if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
  1193. return 0;
  1194. if (!ec_install_handlers(boot_ec)) {
  1195. first_ec = boot_ec;
  1196. return 0;
  1197. }
  1198. return -EFAULT;
  1199. }
  1200. static const struct acpi_device_id ec_device_ids[] = {
  1201. {"PNP0C09", 0},
  1202. {"", 0},
  1203. };
  1204. /* Some BIOS do not survive early DSDT scan, skip it */
  1205. static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
  1206. {
  1207. EC_FLAGS_SKIP_DSDT_SCAN = 1;
  1208. return 0;
  1209. }
  1210. /* ASUStek often supplies us with broken ECDT, validate it */
  1211. static int ec_validate_ecdt(const struct dmi_system_id *id)
  1212. {
  1213. EC_FLAGS_VALIDATE_ECDT = 1;
  1214. return 0;
  1215. }
  1216. #if 0
  1217. /*
  1218. * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
  1219. * set, for which case, we complete the QR_EC without issuing it to the
  1220. * firmware.
  1221. * https://bugzilla.kernel.org/show_bug.cgi?id=82611
  1222. * https://bugzilla.kernel.org/show_bug.cgi?id=97381
  1223. */
  1224. static int ec_flag_query_handshake(const struct dmi_system_id *id)
  1225. {
  1226. pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
  1227. EC_FLAGS_QUERY_HANDSHAKE = 1;
  1228. return 0;
  1229. }
  1230. #endif
  1231. /*
  1232. * On some hardware it is necessary to clear events accumulated by the EC during
  1233. * sleep. These ECs stop reporting GPEs until they are manually polled, if too
  1234. * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
  1235. *
  1236. * https://bugzilla.kernel.org/show_bug.cgi?id=44161
  1237. *
  1238. * Ideally, the EC should also be instructed NOT to accumulate events during
  1239. * sleep (which Windows seems to do somehow), but the interface to control this
  1240. * behaviour is not known at this time.
  1241. *
  1242. * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
  1243. * however it is very likely that other Samsung models are affected.
  1244. *
  1245. * On systems which don't accumulate _Q events during sleep, this extra check
  1246. * should be harmless.
  1247. */
  1248. static int ec_clear_on_resume(const struct dmi_system_id *id)
  1249. {
  1250. pr_debug("Detected system needing EC poll on resume.\n");
  1251. EC_FLAGS_CLEAR_ON_RESUME = 1;
  1252. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1253. return 0;
  1254. }
  1255. static struct dmi_system_id ec_dmi_table[] __initdata = {
  1256. {
  1257. ec_skip_dsdt_scan, "Compal JFL92", {
  1258. DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
  1259. DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
  1260. {
  1261. ec_validate_ecdt, "MSI MS-171F", {
  1262. DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
  1263. DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
  1264. {
  1265. ec_validate_ecdt, "ASUS hardware", {
  1266. DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
  1267. {
  1268. ec_validate_ecdt, "ASUS hardware", {
  1269. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
  1270. {
  1271. ec_skip_dsdt_scan, "HP Folio 13", {
  1272. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1273. DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
  1274. {
  1275. ec_validate_ecdt, "ASUS hardware", {
  1276. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
  1277. DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
  1278. {
  1279. ec_clear_on_resume, "Samsung hardware", {
  1280. DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
  1281. {},
  1282. };
  1283. int __init acpi_ec_ecdt_probe(void)
  1284. {
  1285. acpi_status status;
  1286. struct acpi_ec *saved_ec = NULL;
  1287. struct acpi_table_ecdt *ecdt_ptr;
  1288. boot_ec = make_acpi_ec();
  1289. if (!boot_ec)
  1290. return -ENOMEM;
  1291. /*
  1292. * Generate a boot ec context
  1293. */
  1294. dmi_check_system(ec_dmi_table);
  1295. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1296. (struct acpi_table_header **)&ecdt_ptr);
  1297. if (ACPI_SUCCESS(status)) {
  1298. pr_info("EC description table is found, configuring boot EC\n");
  1299. boot_ec->command_addr = ecdt_ptr->control.address;
  1300. boot_ec->data_addr = ecdt_ptr->data.address;
  1301. boot_ec->gpe = ecdt_ptr->gpe;
  1302. boot_ec->handle = ACPI_ROOT_OBJECT;
  1303. acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
  1304. &boot_ec->handle);
  1305. /* Don't trust ECDT, which comes from ASUSTek */
  1306. if (!EC_FLAGS_VALIDATE_ECDT)
  1307. goto install;
  1308. saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
  1309. if (!saved_ec)
  1310. return -ENOMEM;
  1311. /* fall through */
  1312. }
  1313. if (EC_FLAGS_SKIP_DSDT_SCAN) {
  1314. kfree(saved_ec);
  1315. return -ENODEV;
  1316. }
  1317. /* This workaround is needed only on some broken machines,
  1318. * which require early EC, but fail to provide ECDT */
  1319. pr_debug("Look up EC in DSDT\n");
  1320. status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
  1321. boot_ec, NULL);
  1322. /* Check that acpi_get_devices actually find something */
  1323. if (ACPI_FAILURE(status) || !boot_ec->handle)
  1324. goto error;
  1325. if (saved_ec) {
  1326. /* try to find good ECDT from ASUSTek */
  1327. if (saved_ec->command_addr != boot_ec->command_addr ||
  1328. saved_ec->data_addr != boot_ec->data_addr ||
  1329. saved_ec->gpe != boot_ec->gpe ||
  1330. saved_ec->handle != boot_ec->handle)
  1331. pr_info("ASUSTek keeps feeding us with broken "
  1332. "ECDT tables, which are very hard to workaround. "
  1333. "Trying to use DSDT EC info instead. Please send "
  1334. "output of acpidump to linux-acpi@vger.kernel.org\n");
  1335. kfree(saved_ec);
  1336. saved_ec = NULL;
  1337. } else {
  1338. /* We really need to limit this workaround, the only ASUS,
  1339. * which needs it, has fake EC._INI method, so use it as flag.
  1340. * Keep boot_ec struct as it will be needed soon.
  1341. */
  1342. if (!dmi_name_in_vendors("ASUS") ||
  1343. !acpi_has_method(boot_ec->handle, "_INI"))
  1344. return -ENODEV;
  1345. }
  1346. install:
  1347. if (!ec_install_handlers(boot_ec)) {
  1348. first_ec = boot_ec;
  1349. return 0;
  1350. }
  1351. error:
  1352. kfree(boot_ec);
  1353. kfree(saved_ec);
  1354. boot_ec = NULL;
  1355. return -ENODEV;
  1356. }
  1357. static int param_set_event_clearing(const char *val, struct kernel_param *kp)
  1358. {
  1359. int result = 0;
  1360. if (!strncmp(val, "status", sizeof("status") - 1)) {
  1361. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1362. pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
  1363. } else if (!strncmp(val, "query", sizeof("query") - 1)) {
  1364. ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
  1365. pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
  1366. } else if (!strncmp(val, "event", sizeof("event") - 1)) {
  1367. ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
  1368. pr_info("Assuming SCI_EVT clearing on event reads\n");
  1369. } else
  1370. result = -EINVAL;
  1371. return result;
  1372. }
  1373. static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
  1374. {
  1375. switch (ec_event_clearing) {
  1376. case ACPI_EC_EVT_TIMING_STATUS:
  1377. return sprintf(buffer, "status");
  1378. case ACPI_EC_EVT_TIMING_QUERY:
  1379. return sprintf(buffer, "query");
  1380. case ACPI_EC_EVT_TIMING_EVENT:
  1381. return sprintf(buffer, "event");
  1382. default:
  1383. return sprintf(buffer, "invalid");
  1384. }
  1385. return 0;
  1386. }
  1387. module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
  1388. NULL, 0644);
  1389. MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
  1390. static struct acpi_driver acpi_ec_driver = {
  1391. .name = "ec",
  1392. .class = ACPI_EC_CLASS,
  1393. .ids = ec_device_ids,
  1394. .ops = {
  1395. .add = acpi_ec_add,
  1396. .remove = acpi_ec_remove,
  1397. },
  1398. };
  1399. int __init acpi_ec_init(void)
  1400. {
  1401. int result = 0;
  1402. /* Now register the driver for the EC */
  1403. result = acpi_bus_register_driver(&acpi_ec_driver);
  1404. if (result < 0)
  1405. return -ENODEV;
  1406. return result;
  1407. }
  1408. /* EC driver currently not unloadable */
  1409. #if 0
  1410. static void __exit acpi_ec_exit(void)
  1411. {
  1412. acpi_bus_unregister_driver(&acpi_ec_driver);
  1413. }
  1414. #endif /* 0 */