devres.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * drivers/base/devres.c - device resource management
  4. *
  5. * Copyright (c) 2006 SUSE Linux Products GmbH
  6. * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
  7. */
  8. #include <linux/device.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/percpu.h>
  12. #include "base.h"
  13. struct devres_node {
  14. struct list_head entry;
  15. dr_release_t release;
  16. #ifdef CONFIG_DEBUG_DEVRES
  17. const char *name;
  18. size_t size;
  19. #endif
  20. };
  21. struct devres {
  22. struct devres_node node;
  23. /*
  24. * Some archs want to perform DMA into kmalloc caches
  25. * and need a guaranteed alignment larger than
  26. * the alignment of a 64-bit integer.
  27. * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
  28. * buffer alignment as if it was allocated by plain kmalloc().
  29. */
  30. u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
  31. };
  32. struct devres_group {
  33. struct devres_node node[2];
  34. void *id;
  35. int color;
  36. /* -- 8 pointers */
  37. };
  38. #ifdef CONFIG_DEBUG_DEVRES
  39. static int log_devres = 0;
  40. module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
  41. static void set_node_dbginfo(struct devres_node *node, const char *name,
  42. size_t size)
  43. {
  44. node->name = name;
  45. node->size = size;
  46. }
  47. static void devres_log(struct device *dev, struct devres_node *node,
  48. const char *op)
  49. {
  50. if (unlikely(log_devres))
  51. dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
  52. op, node, node->name, (unsigned long)node->size);
  53. }
  54. #else /* CONFIG_DEBUG_DEVRES */
  55. #define set_node_dbginfo(node, n, s) do {} while (0)
  56. #define devres_log(dev, node, op) do {} while (0)
  57. #endif /* CONFIG_DEBUG_DEVRES */
  58. /*
  59. * Release functions for devres group. These callbacks are used only
  60. * for identification.
  61. */
  62. static void group_open_release(struct device *dev, void *res)
  63. {
  64. /* noop */
  65. }
  66. static void group_close_release(struct device *dev, void *res)
  67. {
  68. /* noop */
  69. }
  70. static struct devres_group * node_to_group(struct devres_node *node)
  71. {
  72. if (node->release == &group_open_release)
  73. return container_of(node, struct devres_group, node[0]);
  74. if (node->release == &group_close_release)
  75. return container_of(node, struct devres_group, node[1]);
  76. return NULL;
  77. }
  78. static __always_inline struct devres * alloc_dr(dr_release_t release,
  79. size_t size, gfp_t gfp, int nid)
  80. {
  81. size_t tot_size;
  82. struct devres *dr;
  83. /* We must catch any near-SIZE_MAX cases that could overflow. */
  84. if (unlikely(check_add_overflow(sizeof(struct devres), size,
  85. &tot_size)))
  86. return NULL;
  87. dr = kmalloc_node_track_caller(tot_size, gfp, nid);
  88. if (unlikely(!dr))
  89. return NULL;
  90. memset(dr, 0, offsetof(struct devres, data));
  91. INIT_LIST_HEAD(&dr->node.entry);
  92. dr->node.release = release;
  93. return dr;
  94. }
  95. static void add_dr(struct device *dev, struct devres_node *node)
  96. {
  97. devres_log(dev, node, "ADD");
  98. BUG_ON(!list_empty(&node->entry));
  99. list_add_tail(&node->entry, &dev->devres_head);
  100. }
  101. #ifdef CONFIG_DEBUG_DEVRES
  102. void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
  103. const char *name)
  104. {
  105. struct devres *dr;
  106. dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
  107. if (unlikely(!dr))
  108. return NULL;
  109. set_node_dbginfo(&dr->node, name, size);
  110. return dr->data;
  111. }
  112. EXPORT_SYMBOL_GPL(__devres_alloc_node);
  113. #else
  114. /**
  115. * devres_alloc - Allocate device resource data
  116. * @release: Release function devres will be associated with
  117. * @size: Allocation size
  118. * @gfp: Allocation flags
  119. * @nid: NUMA node
  120. *
  121. * Allocate devres of @size bytes. The allocated area is zeroed, then
  122. * associated with @release. The returned pointer can be passed to
  123. * other devres_*() functions.
  124. *
  125. * RETURNS:
  126. * Pointer to allocated devres on success, NULL on failure.
  127. */
  128. void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
  129. {
  130. struct devres *dr;
  131. dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
  132. if (unlikely(!dr))
  133. return NULL;
  134. return dr->data;
  135. }
  136. EXPORT_SYMBOL_GPL(devres_alloc_node);
  137. #endif
  138. /**
  139. * devres_for_each_res - Resource iterator
  140. * @dev: Device to iterate resource from
  141. * @release: Look for resources associated with this release function
  142. * @match: Match function (optional)
  143. * @match_data: Data for the match function
  144. * @fn: Function to be called for each matched resource.
  145. * @data: Data for @fn, the 3rd parameter of @fn
  146. *
  147. * Call @fn for each devres of @dev which is associated with @release
  148. * and for which @match returns 1.
  149. *
  150. * RETURNS:
  151. * void
  152. */
  153. void devres_for_each_res(struct device *dev, dr_release_t release,
  154. dr_match_t match, void *match_data,
  155. void (*fn)(struct device *, void *, void *),
  156. void *data)
  157. {
  158. struct devres_node *node;
  159. struct devres_node *tmp;
  160. unsigned long flags;
  161. if (!fn)
  162. return;
  163. spin_lock_irqsave(&dev->devres_lock, flags);
  164. list_for_each_entry_safe_reverse(node, tmp,
  165. &dev->devres_head, entry) {
  166. struct devres *dr = container_of(node, struct devres, node);
  167. if (node->release != release)
  168. continue;
  169. if (match && !match(dev, dr->data, match_data))
  170. continue;
  171. fn(dev, dr->data, data);
  172. }
  173. spin_unlock_irqrestore(&dev->devres_lock, flags);
  174. }
  175. EXPORT_SYMBOL_GPL(devres_for_each_res);
  176. /**
  177. * devres_free - Free device resource data
  178. * @res: Pointer to devres data to free
  179. *
  180. * Free devres created with devres_alloc().
  181. */
  182. void devres_free(void *res)
  183. {
  184. if (res) {
  185. struct devres *dr = container_of(res, struct devres, data);
  186. BUG_ON(!list_empty(&dr->node.entry));
  187. kfree(dr);
  188. }
  189. }
  190. EXPORT_SYMBOL_GPL(devres_free);
  191. /**
  192. * devres_add - Register device resource
  193. * @dev: Device to add resource to
  194. * @res: Resource to register
  195. *
  196. * Register devres @res to @dev. @res should have been allocated
  197. * using devres_alloc(). On driver detach, the associated release
  198. * function will be invoked and devres will be freed automatically.
  199. */
  200. void devres_add(struct device *dev, void *res)
  201. {
  202. struct devres *dr = container_of(res, struct devres, data);
  203. unsigned long flags;
  204. spin_lock_irqsave(&dev->devres_lock, flags);
  205. add_dr(dev, &dr->node);
  206. spin_unlock_irqrestore(&dev->devres_lock, flags);
  207. }
  208. EXPORT_SYMBOL_GPL(devres_add);
  209. static struct devres *find_dr(struct device *dev, dr_release_t release,
  210. dr_match_t match, void *match_data)
  211. {
  212. struct devres_node *node;
  213. list_for_each_entry_reverse(node, &dev->devres_head, entry) {
  214. struct devres *dr = container_of(node, struct devres, node);
  215. if (node->release != release)
  216. continue;
  217. if (match && !match(dev, dr->data, match_data))
  218. continue;
  219. return dr;
  220. }
  221. return NULL;
  222. }
  223. /**
  224. * devres_find - Find device resource
  225. * @dev: Device to lookup resource from
  226. * @release: Look for resources associated with this release function
  227. * @match: Match function (optional)
  228. * @match_data: Data for the match function
  229. *
  230. * Find the latest devres of @dev which is associated with @release
  231. * and for which @match returns 1. If @match is NULL, it's considered
  232. * to match all.
  233. *
  234. * RETURNS:
  235. * Pointer to found devres, NULL if not found.
  236. */
  237. void * devres_find(struct device *dev, dr_release_t release,
  238. dr_match_t match, void *match_data)
  239. {
  240. struct devres *dr;
  241. unsigned long flags;
  242. spin_lock_irqsave(&dev->devres_lock, flags);
  243. dr = find_dr(dev, release, match, match_data);
  244. spin_unlock_irqrestore(&dev->devres_lock, flags);
  245. if (dr)
  246. return dr->data;
  247. return NULL;
  248. }
  249. EXPORT_SYMBOL_GPL(devres_find);
  250. /**
  251. * devres_get - Find devres, if non-existent, add one atomically
  252. * @dev: Device to lookup or add devres for
  253. * @new_res: Pointer to new initialized devres to add if not found
  254. * @match: Match function (optional)
  255. * @match_data: Data for the match function
  256. *
  257. * Find the latest devres of @dev which has the same release function
  258. * as @new_res and for which @match return 1. If found, @new_res is
  259. * freed; otherwise, @new_res is added atomically.
  260. *
  261. * RETURNS:
  262. * Pointer to found or added devres.
  263. */
  264. void * devres_get(struct device *dev, void *new_res,
  265. dr_match_t match, void *match_data)
  266. {
  267. struct devres *new_dr = container_of(new_res, struct devres, data);
  268. struct devres *dr;
  269. unsigned long flags;
  270. spin_lock_irqsave(&dev->devres_lock, flags);
  271. dr = find_dr(dev, new_dr->node.release, match, match_data);
  272. if (!dr) {
  273. add_dr(dev, &new_dr->node);
  274. dr = new_dr;
  275. new_res = NULL;
  276. }
  277. spin_unlock_irqrestore(&dev->devres_lock, flags);
  278. devres_free(new_res);
  279. return dr->data;
  280. }
  281. EXPORT_SYMBOL_GPL(devres_get);
  282. /**
  283. * devres_remove - Find a device resource and remove it
  284. * @dev: Device to find resource from
  285. * @release: Look for resources associated with this release function
  286. * @match: Match function (optional)
  287. * @match_data: Data for the match function
  288. *
  289. * Find the latest devres of @dev associated with @release and for
  290. * which @match returns 1. If @match is NULL, it's considered to
  291. * match all. If found, the resource is removed atomically and
  292. * returned.
  293. *
  294. * RETURNS:
  295. * Pointer to removed devres on success, NULL if not found.
  296. */
  297. void * devres_remove(struct device *dev, dr_release_t release,
  298. dr_match_t match, void *match_data)
  299. {
  300. struct devres *dr;
  301. unsigned long flags;
  302. spin_lock_irqsave(&dev->devres_lock, flags);
  303. dr = find_dr(dev, release, match, match_data);
  304. if (dr) {
  305. list_del_init(&dr->node.entry);
  306. devres_log(dev, &dr->node, "REM");
  307. }
  308. spin_unlock_irqrestore(&dev->devres_lock, flags);
  309. if (dr)
  310. return dr->data;
  311. return NULL;
  312. }
  313. EXPORT_SYMBOL_GPL(devres_remove);
  314. /**
  315. * devres_destroy - Find a device resource and destroy it
  316. * @dev: Device to find resource from
  317. * @release: Look for resources associated with this release function
  318. * @match: Match function (optional)
  319. * @match_data: Data for the match function
  320. *
  321. * Find the latest devres of @dev associated with @release and for
  322. * which @match returns 1. If @match is NULL, it's considered to
  323. * match all. If found, the resource is removed atomically and freed.
  324. *
  325. * Note that the release function for the resource will not be called,
  326. * only the devres-allocated data will be freed. The caller becomes
  327. * responsible for freeing any other data.
  328. *
  329. * RETURNS:
  330. * 0 if devres is found and freed, -ENOENT if not found.
  331. */
  332. int devres_destroy(struct device *dev, dr_release_t release,
  333. dr_match_t match, void *match_data)
  334. {
  335. void *res;
  336. res = devres_remove(dev, release, match, match_data);
  337. if (unlikely(!res))
  338. return -ENOENT;
  339. devres_free(res);
  340. return 0;
  341. }
  342. EXPORT_SYMBOL_GPL(devres_destroy);
  343. /**
  344. * devres_release - Find a device resource and destroy it, calling release
  345. * @dev: Device to find resource from
  346. * @release: Look for resources associated with this release function
  347. * @match: Match function (optional)
  348. * @match_data: Data for the match function
  349. *
  350. * Find the latest devres of @dev associated with @release and for
  351. * which @match returns 1. If @match is NULL, it's considered to
  352. * match all. If found, the resource is removed atomically, the
  353. * release function called and the resource freed.
  354. *
  355. * RETURNS:
  356. * 0 if devres is found and freed, -ENOENT if not found.
  357. */
  358. int devres_release(struct device *dev, dr_release_t release,
  359. dr_match_t match, void *match_data)
  360. {
  361. void *res;
  362. res = devres_remove(dev, release, match, match_data);
  363. if (unlikely(!res))
  364. return -ENOENT;
  365. (*release)(dev, res);
  366. devres_free(res);
  367. return 0;
  368. }
  369. EXPORT_SYMBOL_GPL(devres_release);
  370. static int remove_nodes(struct device *dev,
  371. struct list_head *first, struct list_head *end,
  372. struct list_head *todo)
  373. {
  374. int cnt = 0, nr_groups = 0;
  375. struct list_head *cur;
  376. /* First pass - move normal devres entries to @todo and clear
  377. * devres_group colors.
  378. */
  379. cur = first;
  380. while (cur != end) {
  381. struct devres_node *node;
  382. struct devres_group *grp;
  383. node = list_entry(cur, struct devres_node, entry);
  384. cur = cur->next;
  385. grp = node_to_group(node);
  386. if (grp) {
  387. /* clear color of group markers in the first pass */
  388. grp->color = 0;
  389. nr_groups++;
  390. } else {
  391. /* regular devres entry */
  392. if (&node->entry == first)
  393. first = first->next;
  394. list_move_tail(&node->entry, todo);
  395. cnt++;
  396. }
  397. }
  398. if (!nr_groups)
  399. return cnt;
  400. /* Second pass - Scan groups and color them. A group gets
  401. * color value of two iff the group is wholly contained in
  402. * [cur, end). That is, for a closed group, both opening and
  403. * closing markers should be in the range, while just the
  404. * opening marker is enough for an open group.
  405. */
  406. cur = first;
  407. while (cur != end) {
  408. struct devres_node *node;
  409. struct devres_group *grp;
  410. node = list_entry(cur, struct devres_node, entry);
  411. cur = cur->next;
  412. grp = node_to_group(node);
  413. BUG_ON(!grp || list_empty(&grp->node[0].entry));
  414. grp->color++;
  415. if (list_empty(&grp->node[1].entry))
  416. grp->color++;
  417. BUG_ON(grp->color <= 0 || grp->color > 2);
  418. if (grp->color == 2) {
  419. /* No need to update cur or end. The removed
  420. * nodes are always before both.
  421. */
  422. list_move_tail(&grp->node[0].entry, todo);
  423. list_del_init(&grp->node[1].entry);
  424. }
  425. }
  426. return cnt;
  427. }
  428. static int release_nodes(struct device *dev, struct list_head *first,
  429. struct list_head *end, unsigned long flags)
  430. __releases(&dev->devres_lock)
  431. {
  432. LIST_HEAD(todo);
  433. int cnt;
  434. struct devres *dr, *tmp;
  435. cnt = remove_nodes(dev, first, end, &todo);
  436. spin_unlock_irqrestore(&dev->devres_lock, flags);
  437. /* Release. Note that both devres and devres_group are
  438. * handled as devres in the following loop. This is safe.
  439. */
  440. list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
  441. devres_log(dev, &dr->node, "REL");
  442. dr->node.release(dev, dr->data);
  443. kfree(dr);
  444. }
  445. return cnt;
  446. }
  447. /**
  448. * devres_release_all - Release all managed resources
  449. * @dev: Device to release resources for
  450. *
  451. * Release all resources associated with @dev. This function is
  452. * called on driver detach.
  453. */
  454. int devres_release_all(struct device *dev)
  455. {
  456. unsigned long flags;
  457. /* Looks like an uninitialized device structure */
  458. if (WARN_ON(dev->devres_head.next == NULL))
  459. return -ENODEV;
  460. spin_lock_irqsave(&dev->devres_lock, flags);
  461. return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
  462. flags);
  463. }
  464. /**
  465. * devres_open_group - Open a new devres group
  466. * @dev: Device to open devres group for
  467. * @id: Separator ID
  468. * @gfp: Allocation flags
  469. *
  470. * Open a new devres group for @dev with @id. For @id, using a
  471. * pointer to an object which won't be used for another group is
  472. * recommended. If @id is NULL, address-wise unique ID is created.
  473. *
  474. * RETURNS:
  475. * ID of the new group, NULL on failure.
  476. */
  477. void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
  478. {
  479. struct devres_group *grp;
  480. unsigned long flags;
  481. grp = kmalloc(sizeof(*grp), gfp);
  482. if (unlikely(!grp))
  483. return NULL;
  484. grp->node[0].release = &group_open_release;
  485. grp->node[1].release = &group_close_release;
  486. INIT_LIST_HEAD(&grp->node[0].entry);
  487. INIT_LIST_HEAD(&grp->node[1].entry);
  488. set_node_dbginfo(&grp->node[0], "grp<", 0);
  489. set_node_dbginfo(&grp->node[1], "grp>", 0);
  490. grp->id = grp;
  491. if (id)
  492. grp->id = id;
  493. spin_lock_irqsave(&dev->devres_lock, flags);
  494. add_dr(dev, &grp->node[0]);
  495. spin_unlock_irqrestore(&dev->devres_lock, flags);
  496. return grp->id;
  497. }
  498. EXPORT_SYMBOL_GPL(devres_open_group);
  499. /* Find devres group with ID @id. If @id is NULL, look for the latest. */
  500. static struct devres_group * find_group(struct device *dev, void *id)
  501. {
  502. struct devres_node *node;
  503. list_for_each_entry_reverse(node, &dev->devres_head, entry) {
  504. struct devres_group *grp;
  505. if (node->release != &group_open_release)
  506. continue;
  507. grp = container_of(node, struct devres_group, node[0]);
  508. if (id) {
  509. if (grp->id == id)
  510. return grp;
  511. } else if (list_empty(&grp->node[1].entry))
  512. return grp;
  513. }
  514. return NULL;
  515. }
  516. /**
  517. * devres_close_group - Close a devres group
  518. * @dev: Device to close devres group for
  519. * @id: ID of target group, can be NULL
  520. *
  521. * Close the group identified by @id. If @id is NULL, the latest open
  522. * group is selected.
  523. */
  524. void devres_close_group(struct device *dev, void *id)
  525. {
  526. struct devres_group *grp;
  527. unsigned long flags;
  528. spin_lock_irqsave(&dev->devres_lock, flags);
  529. grp = find_group(dev, id);
  530. if (grp)
  531. add_dr(dev, &grp->node[1]);
  532. else
  533. WARN_ON(1);
  534. spin_unlock_irqrestore(&dev->devres_lock, flags);
  535. }
  536. EXPORT_SYMBOL_GPL(devres_close_group);
  537. /**
  538. * devres_remove_group - Remove a devres group
  539. * @dev: Device to remove group for
  540. * @id: ID of target group, can be NULL
  541. *
  542. * Remove the group identified by @id. If @id is NULL, the latest
  543. * open group is selected. Note that removing a group doesn't affect
  544. * any other resources.
  545. */
  546. void devres_remove_group(struct device *dev, void *id)
  547. {
  548. struct devres_group *grp;
  549. unsigned long flags;
  550. spin_lock_irqsave(&dev->devres_lock, flags);
  551. grp = find_group(dev, id);
  552. if (grp) {
  553. list_del_init(&grp->node[0].entry);
  554. list_del_init(&grp->node[1].entry);
  555. devres_log(dev, &grp->node[0], "REM");
  556. } else
  557. WARN_ON(1);
  558. spin_unlock_irqrestore(&dev->devres_lock, flags);
  559. kfree(grp);
  560. }
  561. EXPORT_SYMBOL_GPL(devres_remove_group);
  562. /**
  563. * devres_release_group - Release resources in a devres group
  564. * @dev: Device to release group for
  565. * @id: ID of target group, can be NULL
  566. *
  567. * Release all resources in the group identified by @id. If @id is
  568. * NULL, the latest open group is selected. The selected group and
  569. * groups properly nested inside the selected group are removed.
  570. *
  571. * RETURNS:
  572. * The number of released non-group resources.
  573. */
  574. int devres_release_group(struct device *dev, void *id)
  575. {
  576. struct devres_group *grp;
  577. unsigned long flags;
  578. int cnt = 0;
  579. spin_lock_irqsave(&dev->devres_lock, flags);
  580. grp = find_group(dev, id);
  581. if (grp) {
  582. struct list_head *first = &grp->node[0].entry;
  583. struct list_head *end = &dev->devres_head;
  584. if (!list_empty(&grp->node[1].entry))
  585. end = grp->node[1].entry.next;
  586. cnt = release_nodes(dev, first, end, flags);
  587. } else {
  588. WARN_ON(1);
  589. spin_unlock_irqrestore(&dev->devres_lock, flags);
  590. }
  591. return cnt;
  592. }
  593. EXPORT_SYMBOL_GPL(devres_release_group);
  594. /*
  595. * Custom devres actions allow inserting a simple function call
  596. * into the teadown sequence.
  597. */
  598. struct action_devres {
  599. void *data;
  600. void (*action)(void *);
  601. };
  602. static int devm_action_match(struct device *dev, void *res, void *p)
  603. {
  604. struct action_devres *devres = res;
  605. struct action_devres *target = p;
  606. return devres->action == target->action &&
  607. devres->data == target->data;
  608. }
  609. static void devm_action_release(struct device *dev, void *res)
  610. {
  611. struct action_devres *devres = res;
  612. devres->action(devres->data);
  613. }
  614. /**
  615. * devm_add_action() - add a custom action to list of managed resources
  616. * @dev: Device that owns the action
  617. * @action: Function that should be called
  618. * @data: Pointer to data passed to @action implementation
  619. *
  620. * This adds a custom action to the list of managed resources so that
  621. * it gets executed as part of standard resource unwinding.
  622. */
  623. int devm_add_action(struct device *dev, void (*action)(void *), void *data)
  624. {
  625. struct action_devres *devres;
  626. devres = devres_alloc(devm_action_release,
  627. sizeof(struct action_devres), GFP_KERNEL);
  628. if (!devres)
  629. return -ENOMEM;
  630. devres->data = data;
  631. devres->action = action;
  632. devres_add(dev, devres);
  633. return 0;
  634. }
  635. EXPORT_SYMBOL_GPL(devm_add_action);
  636. /**
  637. * devm_remove_action() - removes previously added custom action
  638. * @dev: Device that owns the action
  639. * @action: Function implementing the action
  640. * @data: Pointer to data passed to @action implementation
  641. *
  642. * Removes instance of @action previously added by devm_add_action().
  643. * Both action and data should match one of the existing entries.
  644. */
  645. void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
  646. {
  647. struct action_devres devres = {
  648. .data = data,
  649. .action = action,
  650. };
  651. WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
  652. &devres));
  653. }
  654. EXPORT_SYMBOL_GPL(devm_remove_action);
  655. /*
  656. * Managed kmalloc/kfree
  657. */
  658. static void devm_kmalloc_release(struct device *dev, void *res)
  659. {
  660. /* noop */
  661. }
  662. static int devm_kmalloc_match(struct device *dev, void *res, void *data)
  663. {
  664. return res == data;
  665. }
  666. /**
  667. * devm_kmalloc - Resource-managed kmalloc
  668. * @dev: Device to allocate memory for
  669. * @size: Allocation size
  670. * @gfp: Allocation gfp flags
  671. *
  672. * Managed kmalloc. Memory allocated with this function is
  673. * automatically freed on driver detach. Like all other devres
  674. * resources, guaranteed alignment is unsigned long long.
  675. *
  676. * RETURNS:
  677. * Pointer to allocated memory on success, NULL on failure.
  678. */
  679. void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
  680. {
  681. struct devres *dr;
  682. /* use raw alloc_dr for kmalloc caller tracing */
  683. dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
  684. if (unlikely(!dr))
  685. return NULL;
  686. /*
  687. * This is named devm_kzalloc_release for historical reasons
  688. * The initial implementation did not support kmalloc, only kzalloc
  689. */
  690. set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
  691. devres_add(dev, dr->data);
  692. return dr->data;
  693. }
  694. EXPORT_SYMBOL_GPL(devm_kmalloc);
  695. /**
  696. * devm_kstrdup - Allocate resource managed space and
  697. * copy an existing string into that.
  698. * @dev: Device to allocate memory for
  699. * @s: the string to duplicate
  700. * @gfp: the GFP mask used in the devm_kmalloc() call when
  701. * allocating memory
  702. * RETURNS:
  703. * Pointer to allocated string on success, NULL on failure.
  704. */
  705. char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
  706. {
  707. size_t size;
  708. char *buf;
  709. if (!s)
  710. return NULL;
  711. size = strlen(s) + 1;
  712. buf = devm_kmalloc(dev, size, gfp);
  713. if (buf)
  714. memcpy(buf, s, size);
  715. return buf;
  716. }
  717. EXPORT_SYMBOL_GPL(devm_kstrdup);
  718. /**
  719. * devm_kvasprintf - Allocate resource managed space and format a string
  720. * into that.
  721. * @dev: Device to allocate memory for
  722. * @gfp: the GFP mask used in the devm_kmalloc() call when
  723. * allocating memory
  724. * @fmt: The printf()-style format string
  725. * @ap: Arguments for the format string
  726. * RETURNS:
  727. * Pointer to allocated string on success, NULL on failure.
  728. */
  729. char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
  730. va_list ap)
  731. {
  732. unsigned int len;
  733. char *p;
  734. va_list aq;
  735. va_copy(aq, ap);
  736. len = vsnprintf(NULL, 0, fmt, aq);
  737. va_end(aq);
  738. p = devm_kmalloc(dev, len+1, gfp);
  739. if (!p)
  740. return NULL;
  741. vsnprintf(p, len+1, fmt, ap);
  742. return p;
  743. }
  744. EXPORT_SYMBOL(devm_kvasprintf);
  745. /**
  746. * devm_kasprintf - Allocate resource managed space and format a string
  747. * into that.
  748. * @dev: Device to allocate memory for
  749. * @gfp: the GFP mask used in the devm_kmalloc() call when
  750. * allocating memory
  751. * @fmt: The printf()-style format string
  752. * @...: Arguments for the format string
  753. * RETURNS:
  754. * Pointer to allocated string on success, NULL on failure.
  755. */
  756. char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
  757. {
  758. va_list ap;
  759. char *p;
  760. va_start(ap, fmt);
  761. p = devm_kvasprintf(dev, gfp, fmt, ap);
  762. va_end(ap);
  763. return p;
  764. }
  765. EXPORT_SYMBOL_GPL(devm_kasprintf);
  766. /**
  767. * devm_kfree - Resource-managed kfree
  768. * @dev: Device this memory belongs to
  769. * @p: Memory to free
  770. *
  771. * Free memory allocated with devm_kmalloc().
  772. */
  773. void devm_kfree(struct device *dev, void *p)
  774. {
  775. int rc;
  776. rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
  777. WARN_ON(rc);
  778. }
  779. EXPORT_SYMBOL_GPL(devm_kfree);
  780. /**
  781. * devm_kmemdup - Resource-managed kmemdup
  782. * @dev: Device this memory belongs to
  783. * @src: Memory region to duplicate
  784. * @len: Memory region length
  785. * @gfp: GFP mask to use
  786. *
  787. * Duplicate region of a memory using resource managed kmalloc
  788. */
  789. void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
  790. {
  791. void *p;
  792. p = devm_kmalloc(dev, len, gfp);
  793. if (p)
  794. memcpy(p, src, len);
  795. return p;
  796. }
  797. EXPORT_SYMBOL_GPL(devm_kmemdup);
  798. struct pages_devres {
  799. unsigned long addr;
  800. unsigned int order;
  801. };
  802. static int devm_pages_match(struct device *dev, void *res, void *p)
  803. {
  804. struct pages_devres *devres = res;
  805. struct pages_devres *target = p;
  806. return devres->addr == target->addr;
  807. }
  808. static void devm_pages_release(struct device *dev, void *res)
  809. {
  810. struct pages_devres *devres = res;
  811. free_pages(devres->addr, devres->order);
  812. }
  813. /**
  814. * devm_get_free_pages - Resource-managed __get_free_pages
  815. * @dev: Device to allocate memory for
  816. * @gfp_mask: Allocation gfp flags
  817. * @order: Allocation size is (1 << order) pages
  818. *
  819. * Managed get_free_pages. Memory allocated with this function is
  820. * automatically freed on driver detach.
  821. *
  822. * RETURNS:
  823. * Address of allocated memory on success, 0 on failure.
  824. */
  825. unsigned long devm_get_free_pages(struct device *dev,
  826. gfp_t gfp_mask, unsigned int order)
  827. {
  828. struct pages_devres *devres;
  829. unsigned long addr;
  830. addr = __get_free_pages(gfp_mask, order);
  831. if (unlikely(!addr))
  832. return 0;
  833. devres = devres_alloc(devm_pages_release,
  834. sizeof(struct pages_devres), GFP_KERNEL);
  835. if (unlikely(!devres)) {
  836. free_pages(addr, order);
  837. return 0;
  838. }
  839. devres->addr = addr;
  840. devres->order = order;
  841. devres_add(dev, devres);
  842. return addr;
  843. }
  844. EXPORT_SYMBOL_GPL(devm_get_free_pages);
  845. /**
  846. * devm_free_pages - Resource-managed free_pages
  847. * @dev: Device this memory belongs to
  848. * @addr: Memory to free
  849. *
  850. * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
  851. * there is no need to supply the @order.
  852. */
  853. void devm_free_pages(struct device *dev, unsigned long addr)
  854. {
  855. struct pages_devres devres = { .addr = addr };
  856. WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
  857. &devres));
  858. }
  859. EXPORT_SYMBOL_GPL(devm_free_pages);
  860. static void devm_percpu_release(struct device *dev, void *pdata)
  861. {
  862. void __percpu *p;
  863. p = *(void __percpu **)pdata;
  864. free_percpu(p);
  865. }
  866. static int devm_percpu_match(struct device *dev, void *data, void *p)
  867. {
  868. struct devres *devr = container_of(data, struct devres, data);
  869. return *(void **)devr->data == p;
  870. }
  871. /**
  872. * __devm_alloc_percpu - Resource-managed alloc_percpu
  873. * @dev: Device to allocate per-cpu memory for
  874. * @size: Size of per-cpu memory to allocate
  875. * @align: Alignment of per-cpu memory to allocate
  876. *
  877. * Managed alloc_percpu. Per-cpu memory allocated with this function is
  878. * automatically freed on driver detach.
  879. *
  880. * RETURNS:
  881. * Pointer to allocated memory on success, NULL on failure.
  882. */
  883. void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
  884. size_t align)
  885. {
  886. void *p;
  887. void __percpu *pcpu;
  888. pcpu = __alloc_percpu(size, align);
  889. if (!pcpu)
  890. return NULL;
  891. p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
  892. if (!p) {
  893. free_percpu(pcpu);
  894. return NULL;
  895. }
  896. *(void __percpu **)p = pcpu;
  897. devres_add(dev, p);
  898. return pcpu;
  899. }
  900. EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
  901. /**
  902. * devm_free_percpu - Resource-managed free_percpu
  903. * @dev: Device this memory belongs to
  904. * @pdata: Per-cpu memory to free
  905. *
  906. * Free memory allocated with devm_alloc_percpu().
  907. */
  908. void devm_free_percpu(struct device *dev, void __percpu *pdata)
  909. {
  910. WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
  911. (void *)pdata));
  912. }
  913. EXPORT_SYMBOL_GPL(devm_free_percpu);