watchdog_dev.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * watchdog_dev.c
  4. *
  5. * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
  6. * All Rights Reserved.
  7. *
  8. * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
  9. *
  10. *
  11. * This source code is part of the generic code that can be used
  12. * by all the watchdog timer drivers.
  13. *
  14. * This part of the generic code takes care of the following
  15. * misc device: /dev/watchdog.
  16. *
  17. * Based on source code of the following authors:
  18. * Matt Domsch <Matt_Domsch@dell.com>,
  19. * Rob Radez <rob@osinvestor.com>,
  20. * Rusty Lynch <rusty@linux.co.intel.com>
  21. * Satyam Sharma <satyam@infradead.org>
  22. * Randy Dunlap <randy.dunlap@oracle.com>
  23. *
  24. * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
  25. * admit liability nor provide warranty for any of this software.
  26. * This material is provided "AS-IS" and at no charge.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/cdev.h> /* For character device */
  30. #include <linux/errno.h> /* For the -ENODEV/... values */
  31. #include <linux/fs.h> /* For file operations */
  32. #include <linux/init.h> /* For __init/__exit/... */
  33. #include <linux/hrtimer.h> /* For hrtimers */
  34. #include <linux/kernel.h> /* For printk/panic/... */
  35. #include <linux/kthread.h> /* For kthread_work */
  36. #include <linux/miscdevice.h> /* For handling misc devices */
  37. #include <linux/module.h> /* For module stuff/... */
  38. #include <linux/mutex.h> /* For mutexes */
  39. #include <linux/slab.h> /* For memory functions */
  40. #include <linux/types.h> /* For standard types (like size_t) */
  41. #include <linux/watchdog.h> /* For watchdog specific items */
  42. #include <linux/uaccess.h> /* For copy_to_user/put_user/... */
  43. #include <uapi/linux/sched/types.h> /* For struct sched_param */
  44. #include "watchdog_core.h"
  45. #include "watchdog_pretimeout.h"
  46. /*
  47. * struct watchdog_core_data - watchdog core internal data
  48. * @dev: The watchdog's internal device
  49. * @cdev: The watchdog's Character device.
  50. * @wdd: Pointer to watchdog device.
  51. * @lock: Lock for watchdog core.
  52. * @status: Watchdog core internal status bits.
  53. */
  54. struct watchdog_core_data {
  55. struct device dev;
  56. struct cdev cdev;
  57. struct watchdog_device *wdd;
  58. struct mutex lock;
  59. ktime_t last_keepalive;
  60. ktime_t last_hw_keepalive;
  61. ktime_t open_deadline;
  62. struct hrtimer timer;
  63. struct kthread_work work;
  64. unsigned long status; /* Internal status bits */
  65. #define _WDOG_DEV_OPEN 0 /* Opened ? */
  66. #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
  67. #define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
  68. };
  69. /* the dev_t structure to store the dynamically allocated watchdog devices */
  70. static dev_t watchdog_devt;
  71. /* Reference to watchdog device behind /dev/watchdog */
  72. static struct watchdog_core_data *old_wd_data;
  73. static struct kthread_worker *watchdog_kworker;
  74. static bool handle_boot_enabled =
  75. IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED);
  76. static unsigned open_timeout = CONFIG_WATCHDOG_OPEN_TIMEOUT;
  77. static bool watchdog_past_open_deadline(struct watchdog_core_data *data)
  78. {
  79. return ktime_after(ktime_get(), data->open_deadline);
  80. }
  81. static void watchdog_set_open_deadline(struct watchdog_core_data *data)
  82. {
  83. data->open_deadline = open_timeout ?
  84. ktime_get() + ktime_set(open_timeout, 0) : KTIME_MAX;
  85. }
  86. static inline bool watchdog_need_worker(struct watchdog_device *wdd)
  87. {
  88. /* All variables in milli-seconds */
  89. unsigned int hm = wdd->max_hw_heartbeat_ms;
  90. unsigned int t = wdd->timeout * 1000;
  91. /*
  92. * A worker to generate heartbeat requests is needed if all of the
  93. * following conditions are true.
  94. * - Userspace activated the watchdog.
  95. * - The driver provided a value for the maximum hardware timeout, and
  96. * thus is aware that the framework supports generating heartbeat
  97. * requests.
  98. * - Userspace requests a longer timeout than the hardware can handle.
  99. *
  100. * Alternatively, if userspace has not opened the watchdog
  101. * device, we take care of feeding the watchdog if it is
  102. * running.
  103. */
  104. return (hm && watchdog_active(wdd) && t > hm) ||
  105. (t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
  106. }
  107. static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd)
  108. {
  109. struct watchdog_core_data *wd_data = wdd->wd_data;
  110. unsigned int timeout_ms = wdd->timeout * 1000;
  111. ktime_t keepalive_interval;
  112. ktime_t last_heartbeat, latest_heartbeat;
  113. ktime_t virt_timeout;
  114. unsigned int hw_heartbeat_ms;
  115. if (watchdog_active(wdd))
  116. virt_timeout = ktime_add(wd_data->last_keepalive,
  117. ms_to_ktime(timeout_ms));
  118. else
  119. virt_timeout = wd_data->open_deadline;
  120. hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
  121. keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2);
  122. /*
  123. * To ensure that the watchdog times out wdd->timeout seconds
  124. * after the most recent ping from userspace, the last
  125. * worker ping has to come in hw_heartbeat_ms before this timeout.
  126. */
  127. last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms));
  128. latest_heartbeat = ktime_sub(last_heartbeat, ktime_get());
  129. if (ktime_before(latest_heartbeat, keepalive_interval))
  130. return latest_heartbeat;
  131. return keepalive_interval;
  132. }
  133. static inline void watchdog_update_worker(struct watchdog_device *wdd)
  134. {
  135. struct watchdog_core_data *wd_data = wdd->wd_data;
  136. if (watchdog_need_worker(wdd)) {
  137. ktime_t t = watchdog_next_keepalive(wdd);
  138. if (t > 0)
  139. hrtimer_start(&wd_data->timer, t,
  140. HRTIMER_MODE_REL_HARD);
  141. } else {
  142. hrtimer_cancel(&wd_data->timer);
  143. }
  144. }
  145. static int __watchdog_ping(struct watchdog_device *wdd)
  146. {
  147. struct watchdog_core_data *wd_data = wdd->wd_data;
  148. ktime_t earliest_keepalive, now;
  149. int err;
  150. earliest_keepalive = ktime_add(wd_data->last_hw_keepalive,
  151. ms_to_ktime(wdd->min_hw_heartbeat_ms));
  152. now = ktime_get();
  153. if (ktime_after(earliest_keepalive, now)) {
  154. hrtimer_start(&wd_data->timer,
  155. ktime_sub(earliest_keepalive, now),
  156. HRTIMER_MODE_REL_HARD);
  157. return 0;
  158. }
  159. wd_data->last_hw_keepalive = now;
  160. if (wdd->ops->ping)
  161. err = wdd->ops->ping(wdd); /* ping the watchdog */
  162. else
  163. err = wdd->ops->start(wdd); /* restart watchdog */
  164. watchdog_update_worker(wdd);
  165. return err;
  166. }
  167. /*
  168. * watchdog_ping: ping the watchdog.
  169. * @wdd: the watchdog device to ping
  170. *
  171. * The caller must hold wd_data->lock.
  172. *
  173. * If the watchdog has no own ping operation then it needs to be
  174. * restarted via the start operation. This wrapper function does
  175. * exactly that.
  176. * We only ping when the watchdog device is running.
  177. */
  178. static int watchdog_ping(struct watchdog_device *wdd)
  179. {
  180. struct watchdog_core_data *wd_data = wdd->wd_data;
  181. if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
  182. return 0;
  183. set_bit(_WDOG_KEEPALIVE, &wd_data->status);
  184. wd_data->last_keepalive = ktime_get();
  185. return __watchdog_ping(wdd);
  186. }
  187. static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data)
  188. {
  189. struct watchdog_device *wdd = wd_data->wdd;
  190. if (!wdd)
  191. return false;
  192. if (watchdog_active(wdd))
  193. return true;
  194. return watchdog_hw_running(wdd) && !watchdog_past_open_deadline(wd_data);
  195. }
  196. static void watchdog_ping_work(struct kthread_work *work)
  197. {
  198. struct watchdog_core_data *wd_data;
  199. wd_data = container_of(work, struct watchdog_core_data, work);
  200. mutex_lock(&wd_data->lock);
  201. if (watchdog_worker_should_ping(wd_data))
  202. __watchdog_ping(wd_data->wdd);
  203. mutex_unlock(&wd_data->lock);
  204. }
  205. static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
  206. {
  207. struct watchdog_core_data *wd_data;
  208. wd_data = container_of(timer, struct watchdog_core_data, timer);
  209. kthread_queue_work(watchdog_kworker, &wd_data->work);
  210. return HRTIMER_NORESTART;
  211. }
  212. /*
  213. * watchdog_start: wrapper to start the watchdog.
  214. * @wdd: the watchdog device to start
  215. *
  216. * The caller must hold wd_data->lock.
  217. *
  218. * Start the watchdog if it is not active and mark it active.
  219. * This function returns zero on success or a negative errno code for
  220. * failure.
  221. */
  222. static int watchdog_start(struct watchdog_device *wdd)
  223. {
  224. struct watchdog_core_data *wd_data = wdd->wd_data;
  225. ktime_t started_at;
  226. int err;
  227. if (watchdog_active(wdd))
  228. return 0;
  229. set_bit(_WDOG_KEEPALIVE, &wd_data->status);
  230. started_at = ktime_get();
  231. if (watchdog_hw_running(wdd) && wdd->ops->ping)
  232. err = wdd->ops->ping(wdd);
  233. else
  234. err = wdd->ops->start(wdd);
  235. if (err == 0) {
  236. set_bit(WDOG_ACTIVE, &wdd->status);
  237. wd_data->last_keepalive = started_at;
  238. wd_data->last_hw_keepalive = started_at;
  239. watchdog_update_worker(wdd);
  240. }
  241. return err;
  242. }
  243. /*
  244. * watchdog_stop: wrapper to stop the watchdog.
  245. * @wdd: the watchdog device to stop
  246. *
  247. * The caller must hold wd_data->lock.
  248. *
  249. * Stop the watchdog if it is still active and unmark it active.
  250. * This function returns zero on success or a negative errno code for
  251. * failure.
  252. * If the 'nowayout' feature was set, the watchdog cannot be stopped.
  253. */
  254. static int watchdog_stop(struct watchdog_device *wdd)
  255. {
  256. int err = 0;
  257. if (!watchdog_active(wdd))
  258. return 0;
  259. if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
  260. pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
  261. wdd->id);
  262. return -EBUSY;
  263. }
  264. if (wdd->ops->stop) {
  265. clear_bit(WDOG_HW_RUNNING, &wdd->status);
  266. err = wdd->ops->stop(wdd);
  267. } else {
  268. set_bit(WDOG_HW_RUNNING, &wdd->status);
  269. }
  270. if (err == 0) {
  271. clear_bit(WDOG_ACTIVE, &wdd->status);
  272. watchdog_update_worker(wdd);
  273. }
  274. return err;
  275. }
  276. /*
  277. * watchdog_get_status: wrapper to get the watchdog status
  278. * @wdd: the watchdog device to get the status from
  279. *
  280. * The caller must hold wd_data->lock.
  281. *
  282. * Get the watchdog's status flags.
  283. */
  284. static unsigned int watchdog_get_status(struct watchdog_device *wdd)
  285. {
  286. struct watchdog_core_data *wd_data = wdd->wd_data;
  287. unsigned int status;
  288. if (wdd->ops->status)
  289. status = wdd->ops->status(wdd);
  290. else
  291. status = wdd->bootstatus & (WDIOF_CARDRESET |
  292. WDIOF_OVERHEAT |
  293. WDIOF_FANFAULT |
  294. WDIOF_EXTERN1 |
  295. WDIOF_EXTERN2 |
  296. WDIOF_POWERUNDER |
  297. WDIOF_POWEROVER);
  298. if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
  299. status |= WDIOF_MAGICCLOSE;
  300. if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
  301. status |= WDIOF_KEEPALIVEPING;
  302. return status;
  303. }
  304. /*
  305. * watchdog_set_timeout: set the watchdog timer timeout
  306. * @wdd: the watchdog device to set the timeout for
  307. * @timeout: timeout to set in seconds
  308. *
  309. * The caller must hold wd_data->lock.
  310. */
  311. static int watchdog_set_timeout(struct watchdog_device *wdd,
  312. unsigned int timeout)
  313. {
  314. int err = 0;
  315. if (!(wdd->info->options & WDIOF_SETTIMEOUT))
  316. return -EOPNOTSUPP;
  317. if (watchdog_timeout_invalid(wdd, timeout))
  318. return -EINVAL;
  319. if (wdd->ops->set_timeout) {
  320. err = wdd->ops->set_timeout(wdd, timeout);
  321. } else {
  322. wdd->timeout = timeout;
  323. /* Disable pretimeout if it doesn't fit the new timeout */
  324. if (wdd->pretimeout >= wdd->timeout)
  325. wdd->pretimeout = 0;
  326. }
  327. watchdog_update_worker(wdd);
  328. return err;
  329. }
  330. /*
  331. * watchdog_set_pretimeout: set the watchdog timer pretimeout
  332. * @wdd: the watchdog device to set the timeout for
  333. * @timeout: pretimeout to set in seconds
  334. */
  335. static int watchdog_set_pretimeout(struct watchdog_device *wdd,
  336. unsigned int timeout)
  337. {
  338. int err = 0;
  339. if (!(wdd->info->options & WDIOF_PRETIMEOUT))
  340. return -EOPNOTSUPP;
  341. if (watchdog_pretimeout_invalid(wdd, timeout))
  342. return -EINVAL;
  343. if (wdd->ops->set_pretimeout)
  344. err = wdd->ops->set_pretimeout(wdd, timeout);
  345. else
  346. wdd->pretimeout = timeout;
  347. return err;
  348. }
  349. /*
  350. * watchdog_get_timeleft: wrapper to get the time left before a reboot
  351. * @wdd: the watchdog device to get the remaining time from
  352. * @timeleft: the time that's left
  353. *
  354. * The caller must hold wd_data->lock.
  355. *
  356. * Get the time before a watchdog will reboot (if not pinged).
  357. */
  358. static int watchdog_get_timeleft(struct watchdog_device *wdd,
  359. unsigned int *timeleft)
  360. {
  361. *timeleft = 0;
  362. if (!wdd->ops->get_timeleft)
  363. return -EOPNOTSUPP;
  364. *timeleft = wdd->ops->get_timeleft(wdd);
  365. return 0;
  366. }
  367. #ifdef CONFIG_WATCHDOG_SYSFS
  368. static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
  369. char *buf)
  370. {
  371. struct watchdog_device *wdd = dev_get_drvdata(dev);
  372. return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
  373. }
  374. static DEVICE_ATTR_RO(nowayout);
  375. static ssize_t status_show(struct device *dev, struct device_attribute *attr,
  376. char *buf)
  377. {
  378. struct watchdog_device *wdd = dev_get_drvdata(dev);
  379. struct watchdog_core_data *wd_data = wdd->wd_data;
  380. unsigned int status;
  381. mutex_lock(&wd_data->lock);
  382. status = watchdog_get_status(wdd);
  383. mutex_unlock(&wd_data->lock);
  384. return sprintf(buf, "0x%x\n", status);
  385. }
  386. static DEVICE_ATTR_RO(status);
  387. static ssize_t bootstatus_show(struct device *dev,
  388. struct device_attribute *attr, char *buf)
  389. {
  390. struct watchdog_device *wdd = dev_get_drvdata(dev);
  391. return sprintf(buf, "%u\n", wdd->bootstatus);
  392. }
  393. static DEVICE_ATTR_RO(bootstatus);
  394. static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
  395. char *buf)
  396. {
  397. struct watchdog_device *wdd = dev_get_drvdata(dev);
  398. struct watchdog_core_data *wd_data = wdd->wd_data;
  399. ssize_t status;
  400. unsigned int val;
  401. mutex_lock(&wd_data->lock);
  402. status = watchdog_get_timeleft(wdd, &val);
  403. mutex_unlock(&wd_data->lock);
  404. if (!status)
  405. status = sprintf(buf, "%u\n", val);
  406. return status;
  407. }
  408. static DEVICE_ATTR_RO(timeleft);
  409. static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
  410. char *buf)
  411. {
  412. struct watchdog_device *wdd = dev_get_drvdata(dev);
  413. return sprintf(buf, "%u\n", wdd->timeout);
  414. }
  415. static DEVICE_ATTR_RO(timeout);
  416. static ssize_t pretimeout_show(struct device *dev,
  417. struct device_attribute *attr, char *buf)
  418. {
  419. struct watchdog_device *wdd = dev_get_drvdata(dev);
  420. return sprintf(buf, "%u\n", wdd->pretimeout);
  421. }
  422. static DEVICE_ATTR_RO(pretimeout);
  423. static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
  424. char *buf)
  425. {
  426. struct watchdog_device *wdd = dev_get_drvdata(dev);
  427. return sprintf(buf, "%s\n", wdd->info->identity);
  428. }
  429. static DEVICE_ATTR_RO(identity);
  430. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  431. char *buf)
  432. {
  433. struct watchdog_device *wdd = dev_get_drvdata(dev);
  434. if (watchdog_active(wdd))
  435. return sprintf(buf, "active\n");
  436. return sprintf(buf, "inactive\n");
  437. }
  438. static DEVICE_ATTR_RO(state);
  439. static ssize_t pretimeout_available_governors_show(struct device *dev,
  440. struct device_attribute *attr, char *buf)
  441. {
  442. return watchdog_pretimeout_available_governors_get(buf);
  443. }
  444. static DEVICE_ATTR_RO(pretimeout_available_governors);
  445. static ssize_t pretimeout_governor_show(struct device *dev,
  446. struct device_attribute *attr,
  447. char *buf)
  448. {
  449. struct watchdog_device *wdd = dev_get_drvdata(dev);
  450. return watchdog_pretimeout_governor_get(wdd, buf);
  451. }
  452. static ssize_t pretimeout_governor_store(struct device *dev,
  453. struct device_attribute *attr,
  454. const char *buf, size_t count)
  455. {
  456. struct watchdog_device *wdd = dev_get_drvdata(dev);
  457. int ret = watchdog_pretimeout_governor_set(wdd, buf);
  458. if (!ret)
  459. ret = count;
  460. return ret;
  461. }
  462. static DEVICE_ATTR_RW(pretimeout_governor);
  463. static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
  464. int n)
  465. {
  466. struct device *dev = container_of(kobj, struct device, kobj);
  467. struct watchdog_device *wdd = dev_get_drvdata(dev);
  468. umode_t mode = attr->mode;
  469. if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
  470. mode = 0;
  471. else if (attr == &dev_attr_pretimeout.attr &&
  472. !(wdd->info->options & WDIOF_PRETIMEOUT))
  473. mode = 0;
  474. else if ((attr == &dev_attr_pretimeout_governor.attr ||
  475. attr == &dev_attr_pretimeout_available_governors.attr) &&
  476. (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
  477. !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
  478. mode = 0;
  479. return mode;
  480. }
  481. static struct attribute *wdt_attrs[] = {
  482. &dev_attr_state.attr,
  483. &dev_attr_identity.attr,
  484. &dev_attr_timeout.attr,
  485. &dev_attr_pretimeout.attr,
  486. &dev_attr_timeleft.attr,
  487. &dev_attr_bootstatus.attr,
  488. &dev_attr_status.attr,
  489. &dev_attr_nowayout.attr,
  490. &dev_attr_pretimeout_governor.attr,
  491. &dev_attr_pretimeout_available_governors.attr,
  492. NULL,
  493. };
  494. static const struct attribute_group wdt_group = {
  495. .attrs = wdt_attrs,
  496. .is_visible = wdt_is_visible,
  497. };
  498. __ATTRIBUTE_GROUPS(wdt);
  499. #else
  500. #define wdt_groups NULL
  501. #endif
  502. /*
  503. * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
  504. * @wdd: the watchdog device to do the ioctl on
  505. * @cmd: watchdog command
  506. * @arg: argument pointer
  507. *
  508. * The caller must hold wd_data->lock.
  509. */
  510. static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
  511. unsigned long arg)
  512. {
  513. if (!wdd->ops->ioctl)
  514. return -ENOIOCTLCMD;
  515. return wdd->ops->ioctl(wdd, cmd, arg);
  516. }
  517. /*
  518. * watchdog_write: writes to the watchdog.
  519. * @file: file from VFS
  520. * @data: user address of data
  521. * @len: length of data
  522. * @ppos: pointer to the file offset
  523. *
  524. * A write to a watchdog device is defined as a keepalive ping.
  525. * Writing the magic 'V' sequence allows the next close to turn
  526. * off the watchdog (if 'nowayout' is not set).
  527. */
  528. static ssize_t watchdog_write(struct file *file, const char __user *data,
  529. size_t len, loff_t *ppos)
  530. {
  531. struct watchdog_core_data *wd_data = file->private_data;
  532. struct watchdog_device *wdd;
  533. int err;
  534. size_t i;
  535. char c;
  536. if (len == 0)
  537. return 0;
  538. /*
  539. * Note: just in case someone wrote the magic character
  540. * five months ago...
  541. */
  542. clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
  543. /* scan to see whether or not we got the magic character */
  544. for (i = 0; i != len; i++) {
  545. if (get_user(c, data + i))
  546. return -EFAULT;
  547. if (c == 'V')
  548. set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
  549. }
  550. /* someone wrote to us, so we send the watchdog a keepalive ping */
  551. err = -ENODEV;
  552. mutex_lock(&wd_data->lock);
  553. wdd = wd_data->wdd;
  554. if (wdd)
  555. err = watchdog_ping(wdd);
  556. mutex_unlock(&wd_data->lock);
  557. if (err < 0)
  558. return err;
  559. return len;
  560. }
  561. /*
  562. * watchdog_ioctl: handle the different ioctl's for the watchdog device.
  563. * @file: file handle to the device
  564. * @cmd: watchdog command
  565. * @arg: argument pointer
  566. *
  567. * The watchdog API defines a common set of functions for all watchdogs
  568. * according to their available features.
  569. */
  570. static long watchdog_ioctl(struct file *file, unsigned int cmd,
  571. unsigned long arg)
  572. {
  573. struct watchdog_core_data *wd_data = file->private_data;
  574. void __user *argp = (void __user *)arg;
  575. struct watchdog_device *wdd;
  576. int __user *p = argp;
  577. unsigned int val;
  578. int err;
  579. mutex_lock(&wd_data->lock);
  580. wdd = wd_data->wdd;
  581. if (!wdd) {
  582. err = -ENODEV;
  583. goto out_ioctl;
  584. }
  585. err = watchdog_ioctl_op(wdd, cmd, arg);
  586. if (err != -ENOIOCTLCMD)
  587. goto out_ioctl;
  588. switch (cmd) {
  589. case WDIOC_GETSUPPORT:
  590. err = copy_to_user(argp, wdd->info,
  591. sizeof(struct watchdog_info)) ? -EFAULT : 0;
  592. break;
  593. case WDIOC_GETSTATUS:
  594. val = watchdog_get_status(wdd);
  595. err = put_user(val, p);
  596. break;
  597. case WDIOC_GETBOOTSTATUS:
  598. err = put_user(wdd->bootstatus, p);
  599. break;
  600. case WDIOC_SETOPTIONS:
  601. if (get_user(val, p)) {
  602. err = -EFAULT;
  603. break;
  604. }
  605. if (val & WDIOS_DISABLECARD) {
  606. err = watchdog_stop(wdd);
  607. if (err < 0)
  608. break;
  609. }
  610. if (val & WDIOS_ENABLECARD)
  611. err = watchdog_start(wdd);
  612. break;
  613. case WDIOC_KEEPALIVE:
  614. if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
  615. err = -EOPNOTSUPP;
  616. break;
  617. }
  618. err = watchdog_ping(wdd);
  619. break;
  620. case WDIOC_SETTIMEOUT:
  621. if (get_user(val, p)) {
  622. err = -EFAULT;
  623. break;
  624. }
  625. err = watchdog_set_timeout(wdd, val);
  626. if (err < 0)
  627. break;
  628. /* If the watchdog is active then we send a keepalive ping
  629. * to make sure that the watchdog keep's running (and if
  630. * possible that it takes the new timeout) */
  631. err = watchdog_ping(wdd);
  632. if (err < 0)
  633. break;
  634. /* fall through */
  635. case WDIOC_GETTIMEOUT:
  636. /* timeout == 0 means that we don't know the timeout */
  637. if (wdd->timeout == 0) {
  638. err = -EOPNOTSUPP;
  639. break;
  640. }
  641. err = put_user(wdd->timeout, p);
  642. break;
  643. case WDIOC_GETTIMELEFT:
  644. err = watchdog_get_timeleft(wdd, &val);
  645. if (err < 0)
  646. break;
  647. err = put_user(val, p);
  648. break;
  649. case WDIOC_SETPRETIMEOUT:
  650. if (get_user(val, p)) {
  651. err = -EFAULT;
  652. break;
  653. }
  654. err = watchdog_set_pretimeout(wdd, val);
  655. break;
  656. case WDIOC_GETPRETIMEOUT:
  657. err = put_user(wdd->pretimeout, p);
  658. break;
  659. default:
  660. err = -ENOTTY;
  661. break;
  662. }
  663. out_ioctl:
  664. mutex_unlock(&wd_data->lock);
  665. return err;
  666. }
  667. /*
  668. * watchdog_open: open the /dev/watchdog* devices.
  669. * @inode: inode of device
  670. * @file: file handle to device
  671. *
  672. * When the /dev/watchdog* device gets opened, we start the watchdog.
  673. * Watch out: the /dev/watchdog device is single open, so we make sure
  674. * it can only be opened once.
  675. */
  676. static int watchdog_open(struct inode *inode, struct file *file)
  677. {
  678. struct watchdog_core_data *wd_data;
  679. struct watchdog_device *wdd;
  680. bool hw_running;
  681. int err;
  682. /* Get the corresponding watchdog device */
  683. if (imajor(inode) == MISC_MAJOR)
  684. wd_data = old_wd_data;
  685. else
  686. wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
  687. cdev);
  688. /* the watchdog is single open! */
  689. if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
  690. return -EBUSY;
  691. wdd = wd_data->wdd;
  692. /*
  693. * If the /dev/watchdog device is open, we don't want the module
  694. * to be unloaded.
  695. */
  696. hw_running = watchdog_hw_running(wdd);
  697. if (!hw_running && !try_module_get(wdd->ops->owner)) {
  698. err = -EBUSY;
  699. goto out_clear;
  700. }
  701. err = watchdog_start(wdd);
  702. if (err < 0)
  703. goto out_mod;
  704. file->private_data = wd_data;
  705. if (!hw_running)
  706. get_device(&wd_data->dev);
  707. /*
  708. * open_timeout only applies for the first open from
  709. * userspace. Set open_deadline to infinity so that the kernel
  710. * will take care of an always-running hardware watchdog in
  711. * case the device gets magic-closed or WDIOS_DISABLECARD is
  712. * applied.
  713. */
  714. wd_data->open_deadline = KTIME_MAX;
  715. /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
  716. return stream_open(inode, file);
  717. out_mod:
  718. module_put(wd_data->wdd->ops->owner);
  719. out_clear:
  720. clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
  721. return err;
  722. }
  723. static void watchdog_core_data_release(struct device *dev)
  724. {
  725. struct watchdog_core_data *wd_data;
  726. wd_data = container_of(dev, struct watchdog_core_data, dev);
  727. kfree(wd_data);
  728. }
  729. /*
  730. * watchdog_release: release the watchdog device.
  731. * @inode: inode of device
  732. * @file: file handle to device
  733. *
  734. * This is the code for when /dev/watchdog gets closed. We will only
  735. * stop the watchdog when we have received the magic char (and nowayout
  736. * was not set), else the watchdog will keep running.
  737. */
  738. static int watchdog_release(struct inode *inode, struct file *file)
  739. {
  740. struct watchdog_core_data *wd_data = file->private_data;
  741. struct watchdog_device *wdd;
  742. int err = -EBUSY;
  743. bool running;
  744. mutex_lock(&wd_data->lock);
  745. wdd = wd_data->wdd;
  746. if (!wdd)
  747. goto done;
  748. /*
  749. * We only stop the watchdog if we received the magic character
  750. * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
  751. * watchdog_stop will fail.
  752. */
  753. if (!test_bit(WDOG_ACTIVE, &wdd->status))
  754. err = 0;
  755. else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
  756. !(wdd->info->options & WDIOF_MAGICCLOSE))
  757. err = watchdog_stop(wdd);
  758. /* If the watchdog was not stopped, send a keepalive ping */
  759. if (err < 0) {
  760. pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
  761. watchdog_ping(wdd);
  762. }
  763. watchdog_update_worker(wdd);
  764. /* make sure that /dev/watchdog can be re-opened */
  765. clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
  766. done:
  767. running = wdd && watchdog_hw_running(wdd);
  768. mutex_unlock(&wd_data->lock);
  769. /*
  770. * Allow the owner module to be unloaded again unless the watchdog
  771. * is still running. If the watchdog is still running, it can not
  772. * be stopped, and its driver must not be unloaded.
  773. */
  774. if (!running) {
  775. module_put(wd_data->cdev.owner);
  776. put_device(&wd_data->dev);
  777. }
  778. return 0;
  779. }
  780. static const struct file_operations watchdog_fops = {
  781. .owner = THIS_MODULE,
  782. .write = watchdog_write,
  783. .unlocked_ioctl = watchdog_ioctl,
  784. .open = watchdog_open,
  785. .release = watchdog_release,
  786. };
  787. static struct miscdevice watchdog_miscdev = {
  788. .minor = WATCHDOG_MINOR,
  789. .name = "watchdog",
  790. .fops = &watchdog_fops,
  791. };
  792. static struct class watchdog_class = {
  793. .name = "watchdog",
  794. .owner = THIS_MODULE,
  795. .dev_groups = wdt_groups,
  796. };
  797. /*
  798. * watchdog_cdev_register: register watchdog character device
  799. * @wdd: watchdog device
  800. *
  801. * Register a watchdog character device including handling the legacy
  802. * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
  803. * thus we set it up like that.
  804. */
  805. static int watchdog_cdev_register(struct watchdog_device *wdd)
  806. {
  807. struct watchdog_core_data *wd_data;
  808. int err;
  809. wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
  810. if (!wd_data)
  811. return -ENOMEM;
  812. mutex_init(&wd_data->lock);
  813. wd_data->wdd = wdd;
  814. wdd->wd_data = wd_data;
  815. if (IS_ERR_OR_NULL(watchdog_kworker)) {
  816. kfree(wd_data);
  817. return -ENODEV;
  818. }
  819. device_initialize(&wd_data->dev);
  820. wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
  821. wd_data->dev.class = &watchdog_class;
  822. wd_data->dev.parent = wdd->parent;
  823. wd_data->dev.groups = wdd->groups;
  824. wd_data->dev.release = watchdog_core_data_release;
  825. dev_set_drvdata(&wd_data->dev, wdd);
  826. dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
  827. kthread_init_work(&wd_data->work, watchdog_ping_work);
  828. hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
  829. wd_data->timer.function = watchdog_timer_expired;
  830. if (wdd->id == 0) {
  831. old_wd_data = wd_data;
  832. watchdog_miscdev.parent = wdd->parent;
  833. err = misc_register(&watchdog_miscdev);
  834. if (err != 0) {
  835. pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
  836. wdd->info->identity, WATCHDOG_MINOR, err);
  837. if (err == -EBUSY)
  838. pr_err("%s: a legacy watchdog module is probably present.\n",
  839. wdd->info->identity);
  840. old_wd_data = NULL;
  841. put_device(&wd_data->dev);
  842. return err;
  843. }
  844. }
  845. /* Fill in the data structures */
  846. cdev_init(&wd_data->cdev, &watchdog_fops);
  847. /* Add the device */
  848. err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
  849. if (err) {
  850. pr_err("watchdog%d unable to add device %d:%d\n",
  851. wdd->id, MAJOR(watchdog_devt), wdd->id);
  852. if (wdd->id == 0) {
  853. misc_deregister(&watchdog_miscdev);
  854. old_wd_data = NULL;
  855. put_device(&wd_data->dev);
  856. }
  857. return err;
  858. }
  859. wd_data->cdev.owner = wdd->ops->owner;
  860. /* Record time of most recent heartbeat as 'just before now'. */
  861. wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
  862. watchdog_set_open_deadline(wd_data);
  863. /*
  864. * If the watchdog is running, prevent its driver from being unloaded,
  865. * and schedule an immediate ping.
  866. */
  867. if (watchdog_hw_running(wdd)) {
  868. __module_get(wdd->ops->owner);
  869. get_device(&wd_data->dev);
  870. if (handle_boot_enabled)
  871. hrtimer_start(&wd_data->timer, 0,
  872. HRTIMER_MODE_REL_HARD);
  873. else
  874. pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
  875. wdd->id);
  876. }
  877. return 0;
  878. }
  879. /*
  880. * watchdog_cdev_unregister: unregister watchdog character device
  881. * @watchdog: watchdog device
  882. *
  883. * Unregister watchdog character device and if needed the legacy
  884. * /dev/watchdog device.
  885. */
  886. static void watchdog_cdev_unregister(struct watchdog_device *wdd)
  887. {
  888. struct watchdog_core_data *wd_data = wdd->wd_data;
  889. cdev_device_del(&wd_data->cdev, &wd_data->dev);
  890. if (wdd->id == 0) {
  891. misc_deregister(&watchdog_miscdev);
  892. old_wd_data = NULL;
  893. }
  894. if (watchdog_active(wdd) &&
  895. test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
  896. watchdog_stop(wdd);
  897. }
  898. mutex_lock(&wd_data->lock);
  899. wd_data->wdd = NULL;
  900. wdd->wd_data = NULL;
  901. mutex_unlock(&wd_data->lock);
  902. hrtimer_cancel(&wd_data->timer);
  903. kthread_cancel_work_sync(&wd_data->work);
  904. put_device(&wd_data->dev);
  905. }
  906. /*
  907. * watchdog_dev_register: register a watchdog device
  908. * @wdd: watchdog device
  909. *
  910. * Register a watchdog device including handling the legacy
  911. * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
  912. * thus we set it up like that.
  913. */
  914. int watchdog_dev_register(struct watchdog_device *wdd)
  915. {
  916. int ret;
  917. ret = watchdog_cdev_register(wdd);
  918. if (ret)
  919. return ret;
  920. ret = watchdog_register_pretimeout(wdd);
  921. if (ret)
  922. watchdog_cdev_unregister(wdd);
  923. return ret;
  924. }
  925. /*
  926. * watchdog_dev_unregister: unregister a watchdog device
  927. * @watchdog: watchdog device
  928. *
  929. * Unregister watchdog device and if needed the legacy
  930. * /dev/watchdog device.
  931. */
  932. void watchdog_dev_unregister(struct watchdog_device *wdd)
  933. {
  934. watchdog_unregister_pretimeout(wdd);
  935. watchdog_cdev_unregister(wdd);
  936. }
  937. /*
  938. * watchdog_dev_init: init dev part of watchdog core
  939. *
  940. * Allocate a range of chardev nodes to use for watchdog devices
  941. */
  942. int __init watchdog_dev_init(void)
  943. {
  944. int err;
  945. struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1,};
  946. watchdog_kworker = kthread_create_worker(0, "watchdogd");
  947. if (IS_ERR(watchdog_kworker)) {
  948. pr_err("Failed to create watchdog kworker\n");
  949. return PTR_ERR(watchdog_kworker);
  950. }
  951. sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, &param);
  952. err = class_register(&watchdog_class);
  953. if (err < 0) {
  954. pr_err("couldn't register class\n");
  955. goto err_register;
  956. }
  957. err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
  958. if (err < 0) {
  959. pr_err("watchdog: unable to allocate char dev region\n");
  960. goto err_alloc;
  961. }
  962. return 0;
  963. err_alloc:
  964. class_unregister(&watchdog_class);
  965. err_register:
  966. kthread_destroy_worker(watchdog_kworker);
  967. return err;
  968. }
  969. /*
  970. * watchdog_dev_exit: exit dev part of watchdog core
  971. *
  972. * Release the range of chardev nodes used for watchdog devices
  973. */
  974. void __exit watchdog_dev_exit(void)
  975. {
  976. unregister_chrdev_region(watchdog_devt, MAX_DOGS);
  977. class_unregister(&watchdog_class);
  978. kthread_destroy_worker(watchdog_kworker);
  979. }
  980. module_param(handle_boot_enabled, bool, 0444);
  981. MODULE_PARM_DESC(handle_boot_enabled,
  982. "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
  983. __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")");
  984. module_param(open_timeout, uint, 0644);
  985. MODULE_PARM_DESC(open_timeout,
  986. "Maximum time (in seconds, 0 means infinity) for userspace to take over a running watchdog (default="
  987. __MODULE_STRING(CONFIG_WATCHDOG_OPEN_TIMEOUT) ")");