watchdog_dev.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. /*
  2. * watchdog_dev.c
  3. *
  4. * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
  5. * All Rights Reserved.
  6. *
  7. * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
  8. *
  9. *
  10. * This source code is part of the generic code that can be used
  11. * by all the watchdog timer drivers.
  12. *
  13. * This part of the generic code takes care of the following
  14. * misc device: /dev/watchdog.
  15. *
  16. * Based on source code of the following authors:
  17. * Matt Domsch <Matt_Domsch@dell.com>,
  18. * Rob Radez <rob@osinvestor.com>,
  19. * Rusty Lynch <rusty@linux.co.intel.com>
  20. * Satyam Sharma <satyam@infradead.org>
  21. * Randy Dunlap <randy.dunlap@oracle.com>
  22. *
  23. * This program is free software; you can redistribute it and/or
  24. * modify it under the terms of the GNU General Public License
  25. * as published by the Free Software Foundation; either version
  26. * 2 of the License, or (at your option) any later version.
  27. *
  28. * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
  29. * admit liability nor provide warranty for any of this software.
  30. * This material is provided "AS-IS" and at no charge.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #include <linux/cdev.h> /* For character device */
  34. #include <linux/errno.h> /* For the -ENODEV/... values */
  35. #include <linux/fs.h> /* For file operations */
  36. #include <linux/init.h> /* For __init/__exit/... */
  37. #include <linux/hrtimer.h> /* For hrtimers */
  38. #include <linux/kernel.h> /* For printk/panic/... */
  39. #include <linux/kthread.h> /* For kthread_work */
  40. #include <linux/miscdevice.h> /* For handling misc devices */
  41. #include <linux/module.h> /* For module stuff/... */
  42. #include <linux/mutex.h> /* For mutexes */
  43. #include <linux/slab.h> /* For memory functions */
  44. #include <linux/types.h> /* For standard types (like size_t) */
  45. #include <linux/watchdog.h> /* For watchdog specific items */
  46. #include <linux/uaccess.h> /* For copy_to_user/put_user/... */
  47. #include <uapi/linux/sched/types.h> /* For struct sched_param */
  48. #include "watchdog_core.h"
  49. #include "watchdog_pretimeout.h"
  50. /*
  51. * struct watchdog_core_data - watchdog core internal data
  52. * @dev: The watchdog's internal device
  53. * @cdev: The watchdog's Character device.
  54. * @wdd: Pointer to watchdog device.
  55. * @lock: Lock for watchdog core.
  56. * @status: Watchdog core internal status bits.
  57. */
  58. struct watchdog_core_data {
  59. struct device dev;
  60. struct cdev cdev;
  61. struct watchdog_device *wdd;
  62. struct mutex lock;
  63. ktime_t last_keepalive;
  64. ktime_t last_hw_keepalive;
  65. struct hrtimer timer;
  66. struct kthread_work work;
  67. unsigned long status; /* Internal status bits */
  68. #define _WDOG_DEV_OPEN 0 /* Opened ? */
  69. #define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
  70. #define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
  71. };
  72. /* the dev_t structure to store the dynamically allocated watchdog devices */
  73. static dev_t watchdog_devt;
  74. /* Reference to watchdog device behind /dev/watchdog */
  75. static struct watchdog_core_data *old_wd_data;
  76. static struct kthread_worker *watchdog_kworker;
  77. static bool handle_boot_enabled =
  78. IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED);
  79. static inline bool watchdog_need_worker(struct watchdog_device *wdd)
  80. {
  81. /* All variables in milli-seconds */
  82. unsigned int hm = wdd->max_hw_heartbeat_ms;
  83. unsigned int t = wdd->timeout * 1000;
  84. /*
  85. * A worker to generate heartbeat requests is needed if all of the
  86. * following conditions are true.
  87. * - Userspace activated the watchdog.
  88. * - The driver provided a value for the maximum hardware timeout, and
  89. * thus is aware that the framework supports generating heartbeat
  90. * requests.
  91. * - Userspace requests a longer timeout than the hardware can handle.
  92. *
  93. * Alternatively, if userspace has not opened the watchdog
  94. * device, we take care of feeding the watchdog if it is
  95. * running.
  96. */
  97. return (hm && watchdog_active(wdd) && t > hm) ||
  98. (t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
  99. }
  100. static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd)
  101. {
  102. struct watchdog_core_data *wd_data = wdd->wd_data;
  103. unsigned int timeout_ms = wdd->timeout * 1000;
  104. ktime_t keepalive_interval;
  105. ktime_t last_heartbeat, latest_heartbeat;
  106. ktime_t virt_timeout;
  107. unsigned int hw_heartbeat_ms;
  108. virt_timeout = ktime_add(wd_data->last_keepalive,
  109. ms_to_ktime(timeout_ms));
  110. hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
  111. keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2);
  112. if (!watchdog_active(wdd))
  113. return keepalive_interval;
  114. /*
  115. * To ensure that the watchdog times out wdd->timeout seconds
  116. * after the most recent ping from userspace, the last
  117. * worker ping has to come in hw_heartbeat_ms before this timeout.
  118. */
  119. last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms));
  120. latest_heartbeat = ktime_sub(last_heartbeat, ktime_get());
  121. if (ktime_before(latest_heartbeat, keepalive_interval))
  122. return latest_heartbeat;
  123. return keepalive_interval;
  124. }
  125. static inline void watchdog_update_worker(struct watchdog_device *wdd)
  126. {
  127. struct watchdog_core_data *wd_data = wdd->wd_data;
  128. if (watchdog_need_worker(wdd)) {
  129. ktime_t t = watchdog_next_keepalive(wdd);
  130. if (t > 0)
  131. hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
  132. } else {
  133. hrtimer_cancel(&wd_data->timer);
  134. }
  135. }
  136. static int __watchdog_ping(struct watchdog_device *wdd)
  137. {
  138. struct watchdog_core_data *wd_data = wdd->wd_data;
  139. ktime_t earliest_keepalive, now;
  140. int err;
  141. earliest_keepalive = ktime_add(wd_data->last_hw_keepalive,
  142. ms_to_ktime(wdd->min_hw_heartbeat_ms));
  143. now = ktime_get();
  144. if (ktime_after(earliest_keepalive, now)) {
  145. hrtimer_start(&wd_data->timer,
  146. ktime_sub(earliest_keepalive, now),
  147. HRTIMER_MODE_REL);
  148. return 0;
  149. }
  150. wd_data->last_hw_keepalive = now;
  151. if (wdd->ops->ping)
  152. err = wdd->ops->ping(wdd); /* ping the watchdog */
  153. else
  154. err = wdd->ops->start(wdd); /* restart watchdog */
  155. watchdog_update_worker(wdd);
  156. return err;
  157. }
  158. /*
  159. * watchdog_ping: ping the watchdog.
  160. * @wdd: the watchdog device to ping
  161. *
  162. * The caller must hold wd_data->lock.
  163. *
  164. * If the watchdog has no own ping operation then it needs to be
  165. * restarted via the start operation. This wrapper function does
  166. * exactly that.
  167. * We only ping when the watchdog device is running.
  168. */
  169. static int watchdog_ping(struct watchdog_device *wdd)
  170. {
  171. struct watchdog_core_data *wd_data = wdd->wd_data;
  172. if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
  173. return 0;
  174. set_bit(_WDOG_KEEPALIVE, &wd_data->status);
  175. wd_data->last_keepalive = ktime_get();
  176. return __watchdog_ping(wdd);
  177. }
  178. static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data)
  179. {
  180. struct watchdog_device *wdd = wd_data->wdd;
  181. return wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd));
  182. }
  183. static void watchdog_ping_work(struct kthread_work *work)
  184. {
  185. struct watchdog_core_data *wd_data;
  186. wd_data = container_of(work, struct watchdog_core_data, work);
  187. mutex_lock(&wd_data->lock);
  188. if (watchdog_worker_should_ping(wd_data))
  189. __watchdog_ping(wd_data->wdd);
  190. mutex_unlock(&wd_data->lock);
  191. }
  192. static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
  193. {
  194. struct watchdog_core_data *wd_data;
  195. wd_data = container_of(timer, struct watchdog_core_data, timer);
  196. kthread_queue_work(watchdog_kworker, &wd_data->work);
  197. return HRTIMER_NORESTART;
  198. }
  199. /*
  200. * watchdog_start: wrapper to start the watchdog.
  201. * @wdd: the watchdog device to start
  202. *
  203. * The caller must hold wd_data->lock.
  204. *
  205. * Start the watchdog if it is not active and mark it active.
  206. * This function returns zero on success or a negative errno code for
  207. * failure.
  208. */
  209. static int watchdog_start(struct watchdog_device *wdd)
  210. {
  211. struct watchdog_core_data *wd_data = wdd->wd_data;
  212. ktime_t started_at;
  213. int err;
  214. if (watchdog_active(wdd))
  215. return 0;
  216. set_bit(_WDOG_KEEPALIVE, &wd_data->status);
  217. started_at = ktime_get();
  218. if (watchdog_hw_running(wdd) && wdd->ops->ping)
  219. err = wdd->ops->ping(wdd);
  220. else
  221. err = wdd->ops->start(wdd);
  222. if (err == 0) {
  223. set_bit(WDOG_ACTIVE, &wdd->status);
  224. wd_data->last_keepalive = started_at;
  225. watchdog_update_worker(wdd);
  226. }
  227. return err;
  228. }
  229. /*
  230. * watchdog_stop: wrapper to stop the watchdog.
  231. * @wdd: the watchdog device to stop
  232. *
  233. * The caller must hold wd_data->lock.
  234. *
  235. * Stop the watchdog if it is still active and unmark it active.
  236. * This function returns zero on success or a negative errno code for
  237. * failure.
  238. * If the 'nowayout' feature was set, the watchdog cannot be stopped.
  239. */
  240. static int watchdog_stop(struct watchdog_device *wdd)
  241. {
  242. int err = 0;
  243. if (!watchdog_active(wdd))
  244. return 0;
  245. if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
  246. pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
  247. wdd->id);
  248. return -EBUSY;
  249. }
  250. if (wdd->ops->stop) {
  251. clear_bit(WDOG_HW_RUNNING, &wdd->status);
  252. err = wdd->ops->stop(wdd);
  253. } else {
  254. set_bit(WDOG_HW_RUNNING, &wdd->status);
  255. }
  256. if (err == 0) {
  257. clear_bit(WDOG_ACTIVE, &wdd->status);
  258. watchdog_update_worker(wdd);
  259. }
  260. return err;
  261. }
  262. /*
  263. * watchdog_get_status: wrapper to get the watchdog status
  264. * @wdd: the watchdog device to get the status from
  265. *
  266. * The caller must hold wd_data->lock.
  267. *
  268. * Get the watchdog's status flags.
  269. */
  270. static unsigned int watchdog_get_status(struct watchdog_device *wdd)
  271. {
  272. struct watchdog_core_data *wd_data = wdd->wd_data;
  273. unsigned int status;
  274. if (wdd->ops->status)
  275. status = wdd->ops->status(wdd);
  276. else
  277. status = wdd->bootstatus & (WDIOF_CARDRESET |
  278. WDIOF_OVERHEAT |
  279. WDIOF_FANFAULT |
  280. WDIOF_EXTERN1 |
  281. WDIOF_EXTERN2 |
  282. WDIOF_POWERUNDER |
  283. WDIOF_POWEROVER);
  284. if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
  285. status |= WDIOF_MAGICCLOSE;
  286. if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
  287. status |= WDIOF_KEEPALIVEPING;
  288. return status;
  289. }
  290. /*
  291. * watchdog_set_timeout: set the watchdog timer timeout
  292. * @wdd: the watchdog device to set the timeout for
  293. * @timeout: timeout to set in seconds
  294. *
  295. * The caller must hold wd_data->lock.
  296. */
  297. static int watchdog_set_timeout(struct watchdog_device *wdd,
  298. unsigned int timeout)
  299. {
  300. int err = 0;
  301. if (!(wdd->info->options & WDIOF_SETTIMEOUT))
  302. return -EOPNOTSUPP;
  303. if (watchdog_timeout_invalid(wdd, timeout))
  304. return -EINVAL;
  305. if (wdd->ops->set_timeout) {
  306. err = wdd->ops->set_timeout(wdd, timeout);
  307. } else {
  308. wdd->timeout = timeout;
  309. /* Disable pretimeout if it doesn't fit the new timeout */
  310. if (wdd->pretimeout >= wdd->timeout)
  311. wdd->pretimeout = 0;
  312. }
  313. watchdog_update_worker(wdd);
  314. return err;
  315. }
  316. /*
  317. * watchdog_set_pretimeout: set the watchdog timer pretimeout
  318. * @wdd: the watchdog device to set the timeout for
  319. * @timeout: pretimeout to set in seconds
  320. */
  321. static int watchdog_set_pretimeout(struct watchdog_device *wdd,
  322. unsigned int timeout)
  323. {
  324. int err = 0;
  325. if (!(wdd->info->options & WDIOF_PRETIMEOUT))
  326. return -EOPNOTSUPP;
  327. if (watchdog_pretimeout_invalid(wdd, timeout))
  328. return -EINVAL;
  329. if (wdd->ops->set_pretimeout)
  330. err = wdd->ops->set_pretimeout(wdd, timeout);
  331. else
  332. wdd->pretimeout = timeout;
  333. return err;
  334. }
  335. /*
  336. * watchdog_get_timeleft: wrapper to get the time left before a reboot
  337. * @wdd: the watchdog device to get the remaining time from
  338. * @timeleft: the time that's left
  339. *
  340. * The caller must hold wd_data->lock.
  341. *
  342. * Get the time before a watchdog will reboot (if not pinged).
  343. */
  344. static int watchdog_get_timeleft(struct watchdog_device *wdd,
  345. unsigned int *timeleft)
  346. {
  347. *timeleft = 0;
  348. if (!wdd->ops->get_timeleft)
  349. return -EOPNOTSUPP;
  350. *timeleft = wdd->ops->get_timeleft(wdd);
  351. return 0;
  352. }
  353. #ifdef CONFIG_WATCHDOG_SYSFS
  354. static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
  355. char *buf)
  356. {
  357. struct watchdog_device *wdd = dev_get_drvdata(dev);
  358. return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
  359. }
  360. static DEVICE_ATTR_RO(nowayout);
  361. static ssize_t status_show(struct device *dev, struct device_attribute *attr,
  362. char *buf)
  363. {
  364. struct watchdog_device *wdd = dev_get_drvdata(dev);
  365. struct watchdog_core_data *wd_data = wdd->wd_data;
  366. unsigned int status;
  367. mutex_lock(&wd_data->lock);
  368. status = watchdog_get_status(wdd);
  369. mutex_unlock(&wd_data->lock);
  370. return sprintf(buf, "0x%x\n", status);
  371. }
  372. static DEVICE_ATTR_RO(status);
  373. static ssize_t bootstatus_show(struct device *dev,
  374. struct device_attribute *attr, char *buf)
  375. {
  376. struct watchdog_device *wdd = dev_get_drvdata(dev);
  377. return sprintf(buf, "%u\n", wdd->bootstatus);
  378. }
  379. static DEVICE_ATTR_RO(bootstatus);
  380. static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
  381. char *buf)
  382. {
  383. struct watchdog_device *wdd = dev_get_drvdata(dev);
  384. struct watchdog_core_data *wd_data = wdd->wd_data;
  385. ssize_t status;
  386. unsigned int val;
  387. mutex_lock(&wd_data->lock);
  388. status = watchdog_get_timeleft(wdd, &val);
  389. mutex_unlock(&wd_data->lock);
  390. if (!status)
  391. status = sprintf(buf, "%u\n", val);
  392. return status;
  393. }
  394. static DEVICE_ATTR_RO(timeleft);
  395. static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
  396. char *buf)
  397. {
  398. struct watchdog_device *wdd = dev_get_drvdata(dev);
  399. return sprintf(buf, "%u\n", wdd->timeout);
  400. }
  401. static DEVICE_ATTR_RO(timeout);
  402. static ssize_t pretimeout_show(struct device *dev,
  403. struct device_attribute *attr, char *buf)
  404. {
  405. struct watchdog_device *wdd = dev_get_drvdata(dev);
  406. return sprintf(buf, "%u\n", wdd->pretimeout);
  407. }
  408. static DEVICE_ATTR_RO(pretimeout);
  409. static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
  410. char *buf)
  411. {
  412. struct watchdog_device *wdd = dev_get_drvdata(dev);
  413. return sprintf(buf, "%s\n", wdd->info->identity);
  414. }
  415. static DEVICE_ATTR_RO(identity);
  416. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  417. char *buf)
  418. {
  419. struct watchdog_device *wdd = dev_get_drvdata(dev);
  420. if (watchdog_active(wdd))
  421. return sprintf(buf, "active\n");
  422. return sprintf(buf, "inactive\n");
  423. }
  424. static DEVICE_ATTR_RO(state);
  425. static ssize_t pretimeout_available_governors_show(struct device *dev,
  426. struct device_attribute *attr, char *buf)
  427. {
  428. return watchdog_pretimeout_available_governors_get(buf);
  429. }
  430. static DEVICE_ATTR_RO(pretimeout_available_governors);
  431. static ssize_t pretimeout_governor_show(struct device *dev,
  432. struct device_attribute *attr,
  433. char *buf)
  434. {
  435. struct watchdog_device *wdd = dev_get_drvdata(dev);
  436. return watchdog_pretimeout_governor_get(wdd, buf);
  437. }
  438. static ssize_t pretimeout_governor_store(struct device *dev,
  439. struct device_attribute *attr,
  440. const char *buf, size_t count)
  441. {
  442. struct watchdog_device *wdd = dev_get_drvdata(dev);
  443. int ret = watchdog_pretimeout_governor_set(wdd, buf);
  444. if (!ret)
  445. ret = count;
  446. return ret;
  447. }
  448. static DEVICE_ATTR_RW(pretimeout_governor);
  449. static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
  450. int n)
  451. {
  452. struct device *dev = container_of(kobj, struct device, kobj);
  453. struct watchdog_device *wdd = dev_get_drvdata(dev);
  454. umode_t mode = attr->mode;
  455. if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
  456. mode = 0;
  457. else if (attr == &dev_attr_pretimeout.attr &&
  458. !(wdd->info->options & WDIOF_PRETIMEOUT))
  459. mode = 0;
  460. else if ((attr == &dev_attr_pretimeout_governor.attr ||
  461. attr == &dev_attr_pretimeout_available_governors.attr) &&
  462. (!(wdd->info->options & WDIOF_PRETIMEOUT) ||
  463. !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
  464. mode = 0;
  465. return mode;
  466. }
  467. static struct attribute *wdt_attrs[] = {
  468. &dev_attr_state.attr,
  469. &dev_attr_identity.attr,
  470. &dev_attr_timeout.attr,
  471. &dev_attr_pretimeout.attr,
  472. &dev_attr_timeleft.attr,
  473. &dev_attr_bootstatus.attr,
  474. &dev_attr_status.attr,
  475. &dev_attr_nowayout.attr,
  476. &dev_attr_pretimeout_governor.attr,
  477. &dev_attr_pretimeout_available_governors.attr,
  478. NULL,
  479. };
  480. static const struct attribute_group wdt_group = {
  481. .attrs = wdt_attrs,
  482. .is_visible = wdt_is_visible,
  483. };
  484. __ATTRIBUTE_GROUPS(wdt);
  485. #else
  486. #define wdt_groups NULL
  487. #endif
  488. /*
  489. * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
  490. * @wdd: the watchdog device to do the ioctl on
  491. * @cmd: watchdog command
  492. * @arg: argument pointer
  493. *
  494. * The caller must hold wd_data->lock.
  495. */
  496. static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
  497. unsigned long arg)
  498. {
  499. if (!wdd->ops->ioctl)
  500. return -ENOIOCTLCMD;
  501. return wdd->ops->ioctl(wdd, cmd, arg);
  502. }
  503. /*
  504. * watchdog_write: writes to the watchdog.
  505. * @file: file from VFS
  506. * @data: user address of data
  507. * @len: length of data
  508. * @ppos: pointer to the file offset
  509. *
  510. * A write to a watchdog device is defined as a keepalive ping.
  511. * Writing the magic 'V' sequence allows the next close to turn
  512. * off the watchdog (if 'nowayout' is not set).
  513. */
  514. static ssize_t watchdog_write(struct file *file, const char __user *data,
  515. size_t len, loff_t *ppos)
  516. {
  517. struct watchdog_core_data *wd_data = file->private_data;
  518. struct watchdog_device *wdd;
  519. int err;
  520. size_t i;
  521. char c;
  522. if (len == 0)
  523. return 0;
  524. /*
  525. * Note: just in case someone wrote the magic character
  526. * five months ago...
  527. */
  528. clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
  529. /* scan to see whether or not we got the magic character */
  530. for (i = 0; i != len; i++) {
  531. if (get_user(c, data + i))
  532. return -EFAULT;
  533. if (c == 'V')
  534. set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
  535. }
  536. /* someone wrote to us, so we send the watchdog a keepalive ping */
  537. err = -ENODEV;
  538. mutex_lock(&wd_data->lock);
  539. wdd = wd_data->wdd;
  540. if (wdd)
  541. err = watchdog_ping(wdd);
  542. mutex_unlock(&wd_data->lock);
  543. if (err < 0)
  544. return err;
  545. return len;
  546. }
  547. /*
  548. * watchdog_ioctl: handle the different ioctl's for the watchdog device.
  549. * @file: file handle to the device
  550. * @cmd: watchdog command
  551. * @arg: argument pointer
  552. *
  553. * The watchdog API defines a common set of functions for all watchdogs
  554. * according to their available features.
  555. */
  556. static long watchdog_ioctl(struct file *file, unsigned int cmd,
  557. unsigned long arg)
  558. {
  559. struct watchdog_core_data *wd_data = file->private_data;
  560. void __user *argp = (void __user *)arg;
  561. struct watchdog_device *wdd;
  562. int __user *p = argp;
  563. unsigned int val;
  564. int err;
  565. mutex_lock(&wd_data->lock);
  566. wdd = wd_data->wdd;
  567. if (!wdd) {
  568. err = -ENODEV;
  569. goto out_ioctl;
  570. }
  571. err = watchdog_ioctl_op(wdd, cmd, arg);
  572. if (err != -ENOIOCTLCMD)
  573. goto out_ioctl;
  574. switch (cmd) {
  575. case WDIOC_GETSUPPORT:
  576. err = copy_to_user(argp, wdd->info,
  577. sizeof(struct watchdog_info)) ? -EFAULT : 0;
  578. break;
  579. case WDIOC_GETSTATUS:
  580. val = watchdog_get_status(wdd);
  581. err = put_user(val, p);
  582. break;
  583. case WDIOC_GETBOOTSTATUS:
  584. err = put_user(wdd->bootstatus, p);
  585. break;
  586. case WDIOC_SETOPTIONS:
  587. if (get_user(val, p)) {
  588. err = -EFAULT;
  589. break;
  590. }
  591. if (val & WDIOS_DISABLECARD) {
  592. err = watchdog_stop(wdd);
  593. if (err < 0)
  594. break;
  595. }
  596. if (val & WDIOS_ENABLECARD)
  597. err = watchdog_start(wdd);
  598. break;
  599. case WDIOC_KEEPALIVE:
  600. if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
  601. err = -EOPNOTSUPP;
  602. break;
  603. }
  604. err = watchdog_ping(wdd);
  605. break;
  606. case WDIOC_SETTIMEOUT:
  607. if (get_user(val, p)) {
  608. err = -EFAULT;
  609. break;
  610. }
  611. err = watchdog_set_timeout(wdd, val);
  612. if (err < 0)
  613. break;
  614. /* If the watchdog is active then we send a keepalive ping
  615. * to make sure that the watchdog keep's running (and if
  616. * possible that it takes the new timeout) */
  617. err = watchdog_ping(wdd);
  618. if (err < 0)
  619. break;
  620. /* fall through */
  621. case WDIOC_GETTIMEOUT:
  622. /* timeout == 0 means that we don't know the timeout */
  623. if (wdd->timeout == 0) {
  624. err = -EOPNOTSUPP;
  625. break;
  626. }
  627. err = put_user(wdd->timeout, p);
  628. break;
  629. case WDIOC_GETTIMELEFT:
  630. err = watchdog_get_timeleft(wdd, &val);
  631. if (err < 0)
  632. break;
  633. err = put_user(val, p);
  634. break;
  635. case WDIOC_SETPRETIMEOUT:
  636. if (get_user(val, p)) {
  637. err = -EFAULT;
  638. break;
  639. }
  640. err = watchdog_set_pretimeout(wdd, val);
  641. break;
  642. case WDIOC_GETPRETIMEOUT:
  643. err = put_user(wdd->pretimeout, p);
  644. break;
  645. default:
  646. err = -ENOTTY;
  647. break;
  648. }
  649. out_ioctl:
  650. mutex_unlock(&wd_data->lock);
  651. return err;
  652. }
  653. /*
  654. * watchdog_open: open the /dev/watchdog* devices.
  655. * @inode: inode of device
  656. * @file: file handle to device
  657. *
  658. * When the /dev/watchdog* device gets opened, we start the watchdog.
  659. * Watch out: the /dev/watchdog device is single open, so we make sure
  660. * it can only be opened once.
  661. */
  662. static int watchdog_open(struct inode *inode, struct file *file)
  663. {
  664. struct watchdog_core_data *wd_data;
  665. struct watchdog_device *wdd;
  666. bool hw_running;
  667. int err;
  668. /* Get the corresponding watchdog device */
  669. if (imajor(inode) == MISC_MAJOR)
  670. wd_data = old_wd_data;
  671. else
  672. wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
  673. cdev);
  674. /* the watchdog is single open! */
  675. if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
  676. return -EBUSY;
  677. wdd = wd_data->wdd;
  678. /*
  679. * If the /dev/watchdog device is open, we don't want the module
  680. * to be unloaded.
  681. */
  682. hw_running = watchdog_hw_running(wdd);
  683. if (!hw_running && !try_module_get(wdd->ops->owner)) {
  684. err = -EBUSY;
  685. goto out_clear;
  686. }
  687. err = watchdog_start(wdd);
  688. if (err < 0)
  689. goto out_mod;
  690. file->private_data = wd_data;
  691. if (!hw_running)
  692. get_device(&wd_data->dev);
  693. /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
  694. return nonseekable_open(inode, file);
  695. out_mod:
  696. module_put(wd_data->wdd->ops->owner);
  697. out_clear:
  698. clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
  699. return err;
  700. }
  701. static void watchdog_core_data_release(struct device *dev)
  702. {
  703. struct watchdog_core_data *wd_data;
  704. wd_data = container_of(dev, struct watchdog_core_data, dev);
  705. kfree(wd_data);
  706. }
  707. /*
  708. * watchdog_release: release the watchdog device.
  709. * @inode: inode of device
  710. * @file: file handle to device
  711. *
  712. * This is the code for when /dev/watchdog gets closed. We will only
  713. * stop the watchdog when we have received the magic char (and nowayout
  714. * was not set), else the watchdog will keep running.
  715. */
  716. static int watchdog_release(struct inode *inode, struct file *file)
  717. {
  718. struct watchdog_core_data *wd_data = file->private_data;
  719. struct watchdog_device *wdd;
  720. int err = -EBUSY;
  721. bool running;
  722. mutex_lock(&wd_data->lock);
  723. wdd = wd_data->wdd;
  724. if (!wdd)
  725. goto done;
  726. /*
  727. * We only stop the watchdog if we received the magic character
  728. * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
  729. * watchdog_stop will fail.
  730. */
  731. if (!test_bit(WDOG_ACTIVE, &wdd->status))
  732. err = 0;
  733. else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
  734. !(wdd->info->options & WDIOF_MAGICCLOSE))
  735. err = watchdog_stop(wdd);
  736. /* If the watchdog was not stopped, send a keepalive ping */
  737. if (err < 0) {
  738. pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
  739. watchdog_ping(wdd);
  740. }
  741. watchdog_update_worker(wdd);
  742. /* make sure that /dev/watchdog can be re-opened */
  743. clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
  744. done:
  745. running = wdd && watchdog_hw_running(wdd);
  746. mutex_unlock(&wd_data->lock);
  747. /*
  748. * Allow the owner module to be unloaded again unless the watchdog
  749. * is still running. If the watchdog is still running, it can not
  750. * be stopped, and its driver must not be unloaded.
  751. */
  752. if (!running) {
  753. module_put(wd_data->cdev.owner);
  754. put_device(&wd_data->dev);
  755. }
  756. return 0;
  757. }
  758. static const struct file_operations watchdog_fops = {
  759. .owner = THIS_MODULE,
  760. .write = watchdog_write,
  761. .unlocked_ioctl = watchdog_ioctl,
  762. .open = watchdog_open,
  763. .release = watchdog_release,
  764. };
  765. static struct miscdevice watchdog_miscdev = {
  766. .minor = WATCHDOG_MINOR,
  767. .name = "watchdog",
  768. .fops = &watchdog_fops,
  769. };
  770. static struct class watchdog_class = {
  771. .name = "watchdog",
  772. .owner = THIS_MODULE,
  773. .dev_groups = wdt_groups,
  774. };
  775. /*
  776. * watchdog_cdev_register: register watchdog character device
  777. * @wdd: watchdog device
  778. *
  779. * Register a watchdog character device including handling the legacy
  780. * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
  781. * thus we set it up like that.
  782. */
  783. static int watchdog_cdev_register(struct watchdog_device *wdd)
  784. {
  785. struct watchdog_core_data *wd_data;
  786. int err;
  787. wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
  788. if (!wd_data)
  789. return -ENOMEM;
  790. mutex_init(&wd_data->lock);
  791. wd_data->wdd = wdd;
  792. wdd->wd_data = wd_data;
  793. if (IS_ERR_OR_NULL(watchdog_kworker))
  794. return -ENODEV;
  795. kthread_init_work(&wd_data->work, watchdog_ping_work);
  796. hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  797. wd_data->timer.function = watchdog_timer_expired;
  798. if (wdd->id == 0) {
  799. old_wd_data = wd_data;
  800. watchdog_miscdev.parent = wdd->parent;
  801. err = misc_register(&watchdog_miscdev);
  802. if (err != 0) {
  803. pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
  804. wdd->info->identity, WATCHDOG_MINOR, err);
  805. if (err == -EBUSY)
  806. pr_err("%s: a legacy watchdog module is probably present.\n",
  807. wdd->info->identity);
  808. old_wd_data = NULL;
  809. kfree(wd_data);
  810. return err;
  811. }
  812. }
  813. device_initialize(&wd_data->dev);
  814. wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
  815. wd_data->dev.class = &watchdog_class;
  816. wd_data->dev.parent = wdd->parent;
  817. wd_data->dev.groups = wdd->groups;
  818. wd_data->dev.release = watchdog_core_data_release;
  819. dev_set_drvdata(&wd_data->dev, wdd);
  820. dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
  821. /* Fill in the data structures */
  822. cdev_init(&wd_data->cdev, &watchdog_fops);
  823. /* Add the device */
  824. err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
  825. if (err) {
  826. pr_err("watchdog%d unable to add device %d:%d\n",
  827. wdd->id, MAJOR(watchdog_devt), wdd->id);
  828. if (wdd->id == 0) {
  829. misc_deregister(&watchdog_miscdev);
  830. old_wd_data = NULL;
  831. put_device(&wd_data->dev);
  832. }
  833. return err;
  834. }
  835. wd_data->cdev.owner = wdd->ops->owner;
  836. /* Record time of most recent heartbeat as 'just before now'. */
  837. wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
  838. /*
  839. * If the watchdog is running, prevent its driver from being unloaded,
  840. * and schedule an immediate ping.
  841. */
  842. if (watchdog_hw_running(wdd)) {
  843. __module_get(wdd->ops->owner);
  844. get_device(&wd_data->dev);
  845. if (handle_boot_enabled)
  846. hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
  847. else
  848. pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
  849. wdd->id);
  850. }
  851. return 0;
  852. }
  853. /*
  854. * watchdog_cdev_unregister: unregister watchdog character device
  855. * @watchdog: watchdog device
  856. *
  857. * Unregister watchdog character device and if needed the legacy
  858. * /dev/watchdog device.
  859. */
  860. static void watchdog_cdev_unregister(struct watchdog_device *wdd)
  861. {
  862. struct watchdog_core_data *wd_data = wdd->wd_data;
  863. cdev_device_del(&wd_data->cdev, &wd_data->dev);
  864. if (wdd->id == 0) {
  865. misc_deregister(&watchdog_miscdev);
  866. old_wd_data = NULL;
  867. }
  868. if (watchdog_active(wdd) &&
  869. test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
  870. watchdog_stop(wdd);
  871. }
  872. mutex_lock(&wd_data->lock);
  873. wd_data->wdd = NULL;
  874. wdd->wd_data = NULL;
  875. mutex_unlock(&wd_data->lock);
  876. hrtimer_cancel(&wd_data->timer);
  877. kthread_cancel_work_sync(&wd_data->work);
  878. put_device(&wd_data->dev);
  879. }
  880. /*
  881. * watchdog_dev_register: register a watchdog device
  882. * @wdd: watchdog device
  883. *
  884. * Register a watchdog device including handling the legacy
  885. * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
  886. * thus we set it up like that.
  887. */
  888. int watchdog_dev_register(struct watchdog_device *wdd)
  889. {
  890. int ret;
  891. ret = watchdog_cdev_register(wdd);
  892. if (ret)
  893. return ret;
  894. ret = watchdog_register_pretimeout(wdd);
  895. if (ret)
  896. watchdog_cdev_unregister(wdd);
  897. return ret;
  898. }
  899. /*
  900. * watchdog_dev_unregister: unregister a watchdog device
  901. * @watchdog: watchdog device
  902. *
  903. * Unregister watchdog device and if needed the legacy
  904. * /dev/watchdog device.
  905. */
  906. void watchdog_dev_unregister(struct watchdog_device *wdd)
  907. {
  908. watchdog_unregister_pretimeout(wdd);
  909. watchdog_cdev_unregister(wdd);
  910. }
  911. /*
  912. * watchdog_dev_init: init dev part of watchdog core
  913. *
  914. * Allocate a range of chardev nodes to use for watchdog devices
  915. */
  916. int __init watchdog_dev_init(void)
  917. {
  918. int err;
  919. struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1,};
  920. watchdog_kworker = kthread_create_worker(0, "watchdogd");
  921. if (IS_ERR(watchdog_kworker)) {
  922. pr_err("Failed to create watchdog kworker\n");
  923. return PTR_ERR(watchdog_kworker);
  924. }
  925. sched_setscheduler(watchdog_kworker->task, SCHED_FIFO, &param);
  926. err = class_register(&watchdog_class);
  927. if (err < 0) {
  928. pr_err("couldn't register class\n");
  929. goto err_register;
  930. }
  931. err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
  932. if (err < 0) {
  933. pr_err("watchdog: unable to allocate char dev region\n");
  934. goto err_alloc;
  935. }
  936. return 0;
  937. err_alloc:
  938. class_unregister(&watchdog_class);
  939. err_register:
  940. kthread_destroy_worker(watchdog_kworker);
  941. return err;
  942. }
  943. /*
  944. * watchdog_dev_exit: exit dev part of watchdog core
  945. *
  946. * Release the range of chardev nodes used for watchdog devices
  947. */
  948. void __exit watchdog_dev_exit(void)
  949. {
  950. unregister_chrdev_region(watchdog_devt, MAX_DOGS);
  951. class_unregister(&watchdog_class);
  952. kthread_destroy_worker(watchdog_kworker);
  953. }
  954. module_param(handle_boot_enabled, bool, 0444);
  955. MODULE_PARM_DESC(handle_boot_enabled,
  956. "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
  957. __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")");