grant-table.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. /******************************************************************************
  2. * grant_table.c
  3. *
  4. * Granting foreign access to our memory reservation.
  5. *
  6. * Copyright (c) 2005-2006, Christopher Clark
  7. * Copyright (c) 2004-2005, K A Fraser
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License version 2
  11. * as published by the Free Software Foundation; or, when distributed
  12. * separately from the Linux kernel or incorporated into other
  13. * software packages, subject to the following license:
  14. *
  15. * Permission is hereby granted, free of charge, to any person obtaining a copy
  16. * of this source file (the "Software"), to deal in the Software without
  17. * restriction, including without limitation the rights to use, copy, modify,
  18. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19. * and to permit persons to whom the Software is furnished to do so, subject to
  20. * the following conditions:
  21. *
  22. * The above copyright notice and this permission notice shall be included in
  23. * all copies or substantial portions of the Software.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31. * IN THE SOFTWARE.
  32. */
  33. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  34. #include <linux/module.h>
  35. #include <linux/sched.h>
  36. #include <linux/mm.h>
  37. #include <linux/slab.h>
  38. #include <linux/vmalloc.h>
  39. #include <linux/uaccess.h>
  40. #include <linux/io.h>
  41. #include <linux/delay.h>
  42. #include <linux/hardirq.h>
  43. #include <linux/workqueue.h>
  44. #include <xen/xen.h>
  45. #include <xen/interface/xen.h>
  46. #include <xen/page.h>
  47. #include <xen/grant_table.h>
  48. #include <xen/interface/memory.h>
  49. #include <xen/hvc-console.h>
  50. #include <xen/swiotlb-xen.h>
  51. #include <xen/balloon.h>
  52. #include <asm/xen/hypercall.h>
  53. #include <asm/xen/interface.h>
  54. #include <asm/pgtable.h>
  55. #include <asm/sync_bitops.h>
  56. /* External tools reserve first few grant table entries. */
  57. #define NR_RESERVED_ENTRIES 8
  58. #define GNTTAB_LIST_END 0xffffffff
  59. static grant_ref_t **gnttab_list;
  60. static unsigned int nr_grant_frames;
  61. static int gnttab_free_count;
  62. static grant_ref_t gnttab_free_head;
  63. static DEFINE_SPINLOCK(gnttab_list_lock);
  64. struct grant_frames xen_auto_xlat_grant_frames;
  65. static union {
  66. struct grant_entry_v1 *v1;
  67. void *addr;
  68. } gnttab_shared;
  69. /*This is a structure of function pointers for grant table*/
  70. struct gnttab_ops {
  71. /*
  72. * Mapping a list of frames for storing grant entries. Frames parameter
  73. * is used to store grant table address when grant table being setup,
  74. * nr_gframes is the number of frames to map grant table. Returning
  75. * GNTST_okay means success and negative value means failure.
  76. */
  77. int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
  78. /*
  79. * Release a list of frames which are mapped in map_frames for grant
  80. * entry status.
  81. */
  82. void (*unmap_frames)(void);
  83. /*
  84. * Introducing a valid entry into the grant table, granting the frame of
  85. * this grant entry to domain for accessing or transfering. Ref
  86. * parameter is reference of this introduced grant entry, domid is id of
  87. * granted domain, frame is the page frame to be granted, and flags is
  88. * status of the grant entry to be updated.
  89. */
  90. void (*update_entry)(grant_ref_t ref, domid_t domid,
  91. unsigned long frame, unsigned flags);
  92. /*
  93. * Stop granting a grant entry to domain for accessing. Ref parameter is
  94. * reference of a grant entry whose grant access will be stopped,
  95. * readonly is not in use in this function. If the grant entry is
  96. * currently mapped for reading or writing, just return failure(==0)
  97. * directly and don't tear down the grant access. Otherwise, stop grant
  98. * access for this entry and return success(==1).
  99. */
  100. int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
  101. /*
  102. * Stop granting a grant entry to domain for transfer. Ref parameter is
  103. * reference of a grant entry whose grant transfer will be stopped. If
  104. * tranfer has not started, just reclaim the grant entry and return
  105. * failure(==0). Otherwise, wait for the transfer to complete and then
  106. * return the frame.
  107. */
  108. unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
  109. /*
  110. * Query the status of a grant entry. Ref parameter is reference of
  111. * queried grant entry, return value is the status of queried entry.
  112. * Detailed status(writing/reading) can be gotten from the return value
  113. * by bit operations.
  114. */
  115. int (*query_foreign_access)(grant_ref_t ref);
  116. };
  117. struct unmap_refs_callback_data {
  118. struct completion completion;
  119. int result;
  120. };
  121. static struct gnttab_ops *gnttab_interface;
  122. static int grant_table_version;
  123. static int grefs_per_grant_frame;
  124. static struct gnttab_free_callback *gnttab_free_callback_list;
  125. static int gnttab_expand(unsigned int req_entries);
  126. #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
  127. static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
  128. {
  129. return &gnttab_list[(entry) / RPP][(entry) % RPP];
  130. }
  131. /* This can be used as an l-value */
  132. #define gnttab_entry(entry) (*__gnttab_entry(entry))
  133. static int get_free_entries(unsigned count)
  134. {
  135. unsigned long flags;
  136. int ref, rc = 0;
  137. grant_ref_t head;
  138. spin_lock_irqsave(&gnttab_list_lock, flags);
  139. if ((gnttab_free_count < count) &&
  140. ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
  141. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  142. return rc;
  143. }
  144. ref = head = gnttab_free_head;
  145. gnttab_free_count -= count;
  146. while (count-- > 1)
  147. head = gnttab_entry(head);
  148. gnttab_free_head = gnttab_entry(head);
  149. gnttab_entry(head) = GNTTAB_LIST_END;
  150. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  151. return ref;
  152. }
  153. static void do_free_callbacks(void)
  154. {
  155. struct gnttab_free_callback *callback, *next;
  156. callback = gnttab_free_callback_list;
  157. gnttab_free_callback_list = NULL;
  158. while (callback != NULL) {
  159. next = callback->next;
  160. if (gnttab_free_count >= callback->count) {
  161. callback->next = NULL;
  162. callback->fn(callback->arg);
  163. } else {
  164. callback->next = gnttab_free_callback_list;
  165. gnttab_free_callback_list = callback;
  166. }
  167. callback = next;
  168. }
  169. }
  170. static inline void check_free_callbacks(void)
  171. {
  172. if (unlikely(gnttab_free_callback_list))
  173. do_free_callbacks();
  174. }
  175. static void put_free_entry(grant_ref_t ref)
  176. {
  177. unsigned long flags;
  178. spin_lock_irqsave(&gnttab_list_lock, flags);
  179. gnttab_entry(ref) = gnttab_free_head;
  180. gnttab_free_head = ref;
  181. gnttab_free_count++;
  182. check_free_callbacks();
  183. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  184. }
  185. /*
  186. * Following applies to gnttab_update_entry_v1.
  187. * Introducing a valid entry into the grant table:
  188. * 1. Write ent->domid.
  189. * 2. Write ent->frame:
  190. * GTF_permit_access: Frame to which access is permitted.
  191. * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
  192. * frame, or zero if none.
  193. * 3. Write memory barrier (WMB).
  194. * 4. Write ent->flags, inc. valid type.
  195. */
  196. static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
  197. unsigned long frame, unsigned flags)
  198. {
  199. gnttab_shared.v1[ref].domid = domid;
  200. gnttab_shared.v1[ref].frame = frame;
  201. wmb();
  202. gnttab_shared.v1[ref].flags = flags;
  203. }
  204. /*
  205. * Public grant-issuing interface functions
  206. */
  207. void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
  208. unsigned long frame, int readonly)
  209. {
  210. gnttab_interface->update_entry(ref, domid, frame,
  211. GTF_permit_access | (readonly ? GTF_readonly : 0));
  212. }
  213. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
  214. int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
  215. int readonly)
  216. {
  217. int ref;
  218. ref = get_free_entries(1);
  219. if (unlikely(ref < 0))
  220. return -ENOSPC;
  221. gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
  222. return ref;
  223. }
  224. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
  225. static int gnttab_query_foreign_access_v1(grant_ref_t ref)
  226. {
  227. return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
  228. }
  229. int gnttab_query_foreign_access(grant_ref_t ref)
  230. {
  231. return gnttab_interface->query_foreign_access(ref);
  232. }
  233. EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
  234. static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
  235. {
  236. u16 flags, nflags;
  237. u16 *pflags;
  238. pflags = &gnttab_shared.v1[ref].flags;
  239. nflags = *pflags;
  240. do {
  241. flags = nflags;
  242. if (flags & (GTF_reading|GTF_writing))
  243. return 0;
  244. } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
  245. return 1;
  246. }
  247. static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  248. {
  249. return gnttab_interface->end_foreign_access_ref(ref, readonly);
  250. }
  251. int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
  252. {
  253. if (_gnttab_end_foreign_access_ref(ref, readonly))
  254. return 1;
  255. pr_warn("WARNING: g.e. %#x still in use!\n", ref);
  256. return 0;
  257. }
  258. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
  259. struct deferred_entry {
  260. struct list_head list;
  261. grant_ref_t ref;
  262. bool ro;
  263. uint16_t warn_delay;
  264. struct page *page;
  265. };
  266. static LIST_HEAD(deferred_list);
  267. static void gnttab_handle_deferred(unsigned long);
  268. static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
  269. static void gnttab_handle_deferred(unsigned long unused)
  270. {
  271. unsigned int nr = 10;
  272. struct deferred_entry *first = NULL;
  273. unsigned long flags;
  274. spin_lock_irqsave(&gnttab_list_lock, flags);
  275. while (nr--) {
  276. struct deferred_entry *entry
  277. = list_first_entry(&deferred_list,
  278. struct deferred_entry, list);
  279. if (entry == first)
  280. break;
  281. list_del(&entry->list);
  282. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  283. if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
  284. put_free_entry(entry->ref);
  285. if (entry->page) {
  286. pr_debug("freeing g.e. %#x (pfn %#lx)\n",
  287. entry->ref, page_to_pfn(entry->page));
  288. __free_page(entry->page);
  289. } else
  290. pr_info("freeing g.e. %#x\n", entry->ref);
  291. kfree(entry);
  292. entry = NULL;
  293. } else {
  294. if (!--entry->warn_delay)
  295. pr_info("g.e. %#x still pending\n", entry->ref);
  296. if (!first)
  297. first = entry;
  298. }
  299. spin_lock_irqsave(&gnttab_list_lock, flags);
  300. if (entry)
  301. list_add_tail(&entry->list, &deferred_list);
  302. else if (list_empty(&deferred_list))
  303. break;
  304. }
  305. if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
  306. deferred_timer.expires = jiffies + HZ;
  307. add_timer(&deferred_timer);
  308. }
  309. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  310. }
  311. static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
  312. struct page *page)
  313. {
  314. struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  315. const char *what = KERN_WARNING "leaking";
  316. if (entry) {
  317. unsigned long flags;
  318. entry->ref = ref;
  319. entry->ro = readonly;
  320. entry->page = page;
  321. entry->warn_delay = 60;
  322. spin_lock_irqsave(&gnttab_list_lock, flags);
  323. list_add_tail(&entry->list, &deferred_list);
  324. if (!timer_pending(&deferred_timer)) {
  325. deferred_timer.expires = jiffies + HZ;
  326. add_timer(&deferred_timer);
  327. }
  328. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  329. what = KERN_DEBUG "deferring";
  330. }
  331. printk("%s g.e. %#x (pfn %#lx)\n",
  332. what, ref, page ? page_to_pfn(page) : -1);
  333. }
  334. void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
  335. unsigned long page)
  336. {
  337. if (gnttab_end_foreign_access_ref(ref, readonly)) {
  338. put_free_entry(ref);
  339. if (page != 0)
  340. free_page(page);
  341. } else
  342. gnttab_add_deferred(ref, readonly,
  343. page ? virt_to_page(page) : NULL);
  344. }
  345. EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
  346. int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
  347. {
  348. int ref;
  349. ref = get_free_entries(1);
  350. if (unlikely(ref < 0))
  351. return -ENOSPC;
  352. gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
  353. return ref;
  354. }
  355. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
  356. void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
  357. unsigned long pfn)
  358. {
  359. gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
  360. }
  361. EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
  362. static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
  363. {
  364. unsigned long frame;
  365. u16 flags;
  366. u16 *pflags;
  367. pflags = &gnttab_shared.v1[ref].flags;
  368. /*
  369. * If a transfer is not even yet started, try to reclaim the grant
  370. * reference and return failure (== 0).
  371. */
  372. while (!((flags = *pflags) & GTF_transfer_committed)) {
  373. if (sync_cmpxchg(pflags, flags, 0) == flags)
  374. return 0;
  375. cpu_relax();
  376. }
  377. /* If a transfer is in progress then wait until it is completed. */
  378. while (!(flags & GTF_transfer_completed)) {
  379. flags = *pflags;
  380. cpu_relax();
  381. }
  382. rmb(); /* Read the frame number /after/ reading completion status. */
  383. frame = gnttab_shared.v1[ref].frame;
  384. BUG_ON(frame == 0);
  385. return frame;
  386. }
  387. unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
  388. {
  389. return gnttab_interface->end_foreign_transfer_ref(ref);
  390. }
  391. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
  392. unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
  393. {
  394. unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
  395. put_free_entry(ref);
  396. return frame;
  397. }
  398. EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
  399. void gnttab_free_grant_reference(grant_ref_t ref)
  400. {
  401. put_free_entry(ref);
  402. }
  403. EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
  404. void gnttab_free_grant_references(grant_ref_t head)
  405. {
  406. grant_ref_t ref;
  407. unsigned long flags;
  408. int count = 1;
  409. if (head == GNTTAB_LIST_END)
  410. return;
  411. spin_lock_irqsave(&gnttab_list_lock, flags);
  412. ref = head;
  413. while (gnttab_entry(ref) != GNTTAB_LIST_END) {
  414. ref = gnttab_entry(ref);
  415. count++;
  416. }
  417. gnttab_entry(ref) = gnttab_free_head;
  418. gnttab_free_head = head;
  419. gnttab_free_count += count;
  420. check_free_callbacks();
  421. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  422. }
  423. EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
  424. int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
  425. {
  426. int h = get_free_entries(count);
  427. if (h < 0)
  428. return -ENOSPC;
  429. *head = h;
  430. return 0;
  431. }
  432. EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
  433. int gnttab_empty_grant_references(const grant_ref_t *private_head)
  434. {
  435. return (*private_head == GNTTAB_LIST_END);
  436. }
  437. EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
  438. int gnttab_claim_grant_reference(grant_ref_t *private_head)
  439. {
  440. grant_ref_t g = *private_head;
  441. if (unlikely(g == GNTTAB_LIST_END))
  442. return -ENOSPC;
  443. *private_head = gnttab_entry(g);
  444. return g;
  445. }
  446. EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
  447. void gnttab_release_grant_reference(grant_ref_t *private_head,
  448. grant_ref_t release)
  449. {
  450. gnttab_entry(release) = *private_head;
  451. *private_head = release;
  452. }
  453. EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
  454. void gnttab_request_free_callback(struct gnttab_free_callback *callback,
  455. void (*fn)(void *), void *arg, u16 count)
  456. {
  457. unsigned long flags;
  458. struct gnttab_free_callback *cb;
  459. spin_lock_irqsave(&gnttab_list_lock, flags);
  460. /* Check if the callback is already on the list */
  461. cb = gnttab_free_callback_list;
  462. while (cb) {
  463. if (cb == callback)
  464. goto out;
  465. cb = cb->next;
  466. }
  467. callback->fn = fn;
  468. callback->arg = arg;
  469. callback->count = count;
  470. callback->next = gnttab_free_callback_list;
  471. gnttab_free_callback_list = callback;
  472. check_free_callbacks();
  473. out:
  474. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  475. }
  476. EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
  477. void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
  478. {
  479. struct gnttab_free_callback **pcb;
  480. unsigned long flags;
  481. spin_lock_irqsave(&gnttab_list_lock, flags);
  482. for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
  483. if (*pcb == callback) {
  484. *pcb = callback->next;
  485. break;
  486. }
  487. }
  488. spin_unlock_irqrestore(&gnttab_list_lock, flags);
  489. }
  490. EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
  491. static int grow_gnttab_list(unsigned int more_frames)
  492. {
  493. unsigned int new_nr_grant_frames, extra_entries, i;
  494. unsigned int nr_glist_frames, new_nr_glist_frames;
  495. BUG_ON(grefs_per_grant_frame == 0);
  496. new_nr_grant_frames = nr_grant_frames + more_frames;
  497. extra_entries = more_frames * grefs_per_grant_frame;
  498. nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
  499. new_nr_glist_frames =
  500. (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
  501. for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
  502. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
  503. if (!gnttab_list[i])
  504. goto grow_nomem;
  505. }
  506. for (i = grefs_per_grant_frame * nr_grant_frames;
  507. i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
  508. gnttab_entry(i) = i + 1;
  509. gnttab_entry(i) = gnttab_free_head;
  510. gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
  511. gnttab_free_count += extra_entries;
  512. nr_grant_frames = new_nr_grant_frames;
  513. check_free_callbacks();
  514. return 0;
  515. grow_nomem:
  516. while (i-- > nr_glist_frames)
  517. free_page((unsigned long) gnttab_list[i]);
  518. return -ENOMEM;
  519. }
  520. static unsigned int __max_nr_grant_frames(void)
  521. {
  522. struct gnttab_query_size query;
  523. int rc;
  524. query.dom = DOMID_SELF;
  525. rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
  526. if ((rc < 0) || (query.status != GNTST_okay))
  527. return 4; /* Legacy max supported number of frames */
  528. return query.max_nr_frames;
  529. }
  530. unsigned int gnttab_max_grant_frames(void)
  531. {
  532. unsigned int xen_max = __max_nr_grant_frames();
  533. static unsigned int boot_max_nr_grant_frames;
  534. /* First time, initialize it properly. */
  535. if (!boot_max_nr_grant_frames)
  536. boot_max_nr_grant_frames = __max_nr_grant_frames();
  537. if (xen_max > boot_max_nr_grant_frames)
  538. return boot_max_nr_grant_frames;
  539. return xen_max;
  540. }
  541. EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
  542. int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
  543. {
  544. xen_pfn_t *pfn;
  545. unsigned int max_nr_gframes = __max_nr_grant_frames();
  546. unsigned int i;
  547. void *vaddr;
  548. if (xen_auto_xlat_grant_frames.count)
  549. return -EINVAL;
  550. vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
  551. if (vaddr == NULL) {
  552. pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
  553. &addr);
  554. return -ENOMEM;
  555. }
  556. pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
  557. if (!pfn) {
  558. xen_unmap(vaddr);
  559. return -ENOMEM;
  560. }
  561. for (i = 0; i < max_nr_gframes; i++)
  562. pfn[i] = PFN_DOWN(addr) + i;
  563. xen_auto_xlat_grant_frames.vaddr = vaddr;
  564. xen_auto_xlat_grant_frames.pfn = pfn;
  565. xen_auto_xlat_grant_frames.count = max_nr_gframes;
  566. return 0;
  567. }
  568. EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
  569. void gnttab_free_auto_xlat_frames(void)
  570. {
  571. if (!xen_auto_xlat_grant_frames.count)
  572. return;
  573. kfree(xen_auto_xlat_grant_frames.pfn);
  574. xen_unmap(xen_auto_xlat_grant_frames.vaddr);
  575. xen_auto_xlat_grant_frames.pfn = NULL;
  576. xen_auto_xlat_grant_frames.count = 0;
  577. xen_auto_xlat_grant_frames.vaddr = NULL;
  578. }
  579. EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
  580. /**
  581. * gnttab_alloc_pages - alloc pages suitable for grant mapping into
  582. * @nr_pages: number of pages to alloc
  583. * @pages: returns the pages
  584. */
  585. int gnttab_alloc_pages(int nr_pages, struct page **pages)
  586. {
  587. int i;
  588. int ret;
  589. ret = alloc_xenballooned_pages(nr_pages, pages, false);
  590. if (ret < 0)
  591. return ret;
  592. for (i = 0; i < nr_pages; i++) {
  593. #if BITS_PER_LONG < 64
  594. struct xen_page_foreign *foreign;
  595. foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
  596. if (!foreign) {
  597. gnttab_free_pages(nr_pages, pages);
  598. return -ENOMEM;
  599. }
  600. set_page_private(pages[i], (unsigned long)foreign);
  601. #endif
  602. SetPagePrivate(pages[i]);
  603. }
  604. return 0;
  605. }
  606. EXPORT_SYMBOL(gnttab_alloc_pages);
  607. /**
  608. * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
  609. * @nr_pages; number of pages to free
  610. * @pages: the pages
  611. */
  612. void gnttab_free_pages(int nr_pages, struct page **pages)
  613. {
  614. int i;
  615. for (i = 0; i < nr_pages; i++) {
  616. if (PagePrivate(pages[i])) {
  617. #if BITS_PER_LONG < 64
  618. kfree((void *)page_private(pages[i]));
  619. #endif
  620. ClearPagePrivate(pages[i]);
  621. }
  622. }
  623. free_xenballooned_pages(nr_pages, pages);
  624. }
  625. EXPORT_SYMBOL(gnttab_free_pages);
  626. /* Handling of paged out grant targets (GNTST_eagain) */
  627. #define MAX_DELAY 256
  628. static inline void
  629. gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
  630. const char *func)
  631. {
  632. unsigned delay = 1;
  633. do {
  634. BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
  635. if (*status == GNTST_eagain)
  636. msleep(delay++);
  637. } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
  638. if (delay >= MAX_DELAY) {
  639. pr_err("%s: %s eagain grant\n", func, current->comm);
  640. *status = GNTST_bad_page;
  641. }
  642. }
  643. void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
  644. {
  645. struct gnttab_map_grant_ref *op;
  646. if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
  647. BUG();
  648. for (op = batch; op < batch + count; op++)
  649. if (op->status == GNTST_eagain)
  650. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
  651. &op->status, __func__);
  652. }
  653. EXPORT_SYMBOL_GPL(gnttab_batch_map);
  654. void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
  655. {
  656. struct gnttab_copy *op;
  657. if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
  658. BUG();
  659. for (op = batch; op < batch + count; op++)
  660. if (op->status == GNTST_eagain)
  661. gnttab_retry_eagain_gop(GNTTABOP_copy, op,
  662. &op->status, __func__);
  663. }
  664. EXPORT_SYMBOL_GPL(gnttab_batch_copy);
  665. int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
  666. struct gnttab_map_grant_ref *kmap_ops,
  667. struct page **pages, unsigned int count)
  668. {
  669. int i, ret;
  670. ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
  671. if (ret)
  672. return ret;
  673. for (i = 0; i < count; i++) {
  674. /* Retry eagain maps */
  675. if (map_ops[i].status == GNTST_eagain)
  676. gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
  677. &map_ops[i].status, __func__);
  678. if (map_ops[i].status == GNTST_okay) {
  679. struct xen_page_foreign *foreign;
  680. SetPageForeign(pages[i]);
  681. foreign = xen_page_foreign(pages[i]);
  682. foreign->domid = map_ops[i].dom;
  683. foreign->gref = map_ops[i].ref;
  684. }
  685. }
  686. return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
  687. }
  688. EXPORT_SYMBOL_GPL(gnttab_map_refs);
  689. int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
  690. struct gnttab_unmap_grant_ref *kunmap_ops,
  691. struct page **pages, unsigned int count)
  692. {
  693. unsigned int i;
  694. int ret;
  695. ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
  696. if (ret)
  697. return ret;
  698. for (i = 0; i < count; i++)
  699. ClearPageForeign(pages[i]);
  700. return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
  701. }
  702. EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
  703. #define GNTTAB_UNMAP_REFS_DELAY 5
  704. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
  705. static void gnttab_unmap_work(struct work_struct *work)
  706. {
  707. struct gntab_unmap_queue_data
  708. *unmap_data = container_of(work,
  709. struct gntab_unmap_queue_data,
  710. gnttab_work.work);
  711. if (unmap_data->age != UINT_MAX)
  712. unmap_data->age++;
  713. __gnttab_unmap_refs_async(unmap_data);
  714. }
  715. static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  716. {
  717. int ret;
  718. int pc;
  719. for (pc = 0; pc < item->count; pc++) {
  720. if (page_count(item->pages[pc]) > 1) {
  721. unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
  722. schedule_delayed_work(&item->gnttab_work,
  723. msecs_to_jiffies(delay));
  724. return;
  725. }
  726. }
  727. ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
  728. item->pages, item->count);
  729. item->done(ret, item);
  730. }
  731. void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
  732. {
  733. INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
  734. item->age = 0;
  735. __gnttab_unmap_refs_async(item);
  736. }
  737. EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
  738. static void unmap_refs_callback(int result,
  739. struct gntab_unmap_queue_data *data)
  740. {
  741. struct unmap_refs_callback_data *d = data->data;
  742. d->result = result;
  743. complete(&d->completion);
  744. }
  745. int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
  746. {
  747. struct unmap_refs_callback_data data;
  748. init_completion(&data.completion);
  749. item->data = &data;
  750. item->done = &unmap_refs_callback;
  751. gnttab_unmap_refs_async(item);
  752. wait_for_completion(&data.completion);
  753. return data.result;
  754. }
  755. EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
  756. static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
  757. {
  758. int rc;
  759. rc = arch_gnttab_map_shared(frames, nr_gframes,
  760. gnttab_max_grant_frames(),
  761. &gnttab_shared.addr);
  762. BUG_ON(rc);
  763. return 0;
  764. }
  765. static void gnttab_unmap_frames_v1(void)
  766. {
  767. arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
  768. }
  769. static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
  770. {
  771. struct gnttab_setup_table setup;
  772. xen_pfn_t *frames;
  773. unsigned int nr_gframes = end_idx + 1;
  774. int rc;
  775. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  776. struct xen_add_to_physmap xatp;
  777. unsigned int i = end_idx;
  778. rc = 0;
  779. BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
  780. /*
  781. * Loop backwards, so that the first hypercall has the largest
  782. * index, ensuring that the table will grow only once.
  783. */
  784. do {
  785. xatp.domid = DOMID_SELF;
  786. xatp.idx = i;
  787. xatp.space = XENMAPSPACE_grant_table;
  788. xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
  789. rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
  790. if (rc != 0) {
  791. pr_warn("grant table add_to_physmap failed, err=%d\n",
  792. rc);
  793. break;
  794. }
  795. } while (i-- > start_idx);
  796. return rc;
  797. }
  798. /* No need for kzalloc as it is initialized in following hypercall
  799. * GNTTABOP_setup_table.
  800. */
  801. frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
  802. if (!frames)
  803. return -ENOMEM;
  804. setup.dom = DOMID_SELF;
  805. setup.nr_frames = nr_gframes;
  806. set_xen_guest_handle(setup.frame_list, frames);
  807. rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
  808. if (rc == -ENOSYS) {
  809. kfree(frames);
  810. return -ENOSYS;
  811. }
  812. BUG_ON(rc || setup.status);
  813. rc = gnttab_interface->map_frames(frames, nr_gframes);
  814. kfree(frames);
  815. return rc;
  816. }
  817. static struct gnttab_ops gnttab_v1_ops = {
  818. .map_frames = gnttab_map_frames_v1,
  819. .unmap_frames = gnttab_unmap_frames_v1,
  820. .update_entry = gnttab_update_entry_v1,
  821. .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
  822. .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
  823. .query_foreign_access = gnttab_query_foreign_access_v1,
  824. };
  825. static void gnttab_request_version(void)
  826. {
  827. /* Only version 1 is used, which will always be available. */
  828. grant_table_version = 1;
  829. grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
  830. gnttab_interface = &gnttab_v1_ops;
  831. pr_info("Grant tables using version %d layout\n", grant_table_version);
  832. }
  833. static int gnttab_setup(void)
  834. {
  835. unsigned int max_nr_gframes;
  836. max_nr_gframes = gnttab_max_grant_frames();
  837. if (max_nr_gframes < nr_grant_frames)
  838. return -ENOSYS;
  839. if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
  840. gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
  841. if (gnttab_shared.addr == NULL) {
  842. pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
  843. (unsigned long)xen_auto_xlat_grant_frames.vaddr);
  844. return -ENOMEM;
  845. }
  846. }
  847. return gnttab_map(0, nr_grant_frames - 1);
  848. }
  849. int gnttab_resume(void)
  850. {
  851. gnttab_request_version();
  852. return gnttab_setup();
  853. }
  854. int gnttab_suspend(void)
  855. {
  856. if (!xen_feature(XENFEAT_auto_translated_physmap))
  857. gnttab_interface->unmap_frames();
  858. return 0;
  859. }
  860. static int gnttab_expand(unsigned int req_entries)
  861. {
  862. int rc;
  863. unsigned int cur, extra;
  864. BUG_ON(grefs_per_grant_frame == 0);
  865. cur = nr_grant_frames;
  866. extra = ((req_entries + (grefs_per_grant_frame-1)) /
  867. grefs_per_grant_frame);
  868. if (cur + extra > gnttab_max_grant_frames())
  869. return -ENOSPC;
  870. rc = gnttab_map(cur, cur + extra - 1);
  871. if (rc == 0)
  872. rc = grow_gnttab_list(extra);
  873. return rc;
  874. }
  875. int gnttab_init(void)
  876. {
  877. int i;
  878. unsigned long max_nr_grant_frames;
  879. unsigned int max_nr_glist_frames, nr_glist_frames;
  880. unsigned int nr_init_grefs;
  881. int ret;
  882. gnttab_request_version();
  883. max_nr_grant_frames = gnttab_max_grant_frames();
  884. nr_grant_frames = 1;
  885. /* Determine the maximum number of frames required for the
  886. * grant reference free list on the current hypervisor.
  887. */
  888. BUG_ON(grefs_per_grant_frame == 0);
  889. max_nr_glist_frames = (max_nr_grant_frames *
  890. grefs_per_grant_frame / RPP);
  891. gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
  892. GFP_KERNEL);
  893. if (gnttab_list == NULL)
  894. return -ENOMEM;
  895. nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
  896. for (i = 0; i < nr_glist_frames; i++) {
  897. gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
  898. if (gnttab_list[i] == NULL) {
  899. ret = -ENOMEM;
  900. goto ini_nomem;
  901. }
  902. }
  903. ret = arch_gnttab_init(max_nr_grant_frames);
  904. if (ret < 0)
  905. goto ini_nomem;
  906. if (gnttab_setup() < 0) {
  907. ret = -ENODEV;
  908. goto ini_nomem;
  909. }
  910. nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
  911. for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
  912. gnttab_entry(i) = i + 1;
  913. gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
  914. gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
  915. gnttab_free_head = NR_RESERVED_ENTRIES;
  916. printk("Grant table initialized\n");
  917. return 0;
  918. ini_nomem:
  919. for (i--; i >= 0; i--)
  920. free_page((unsigned long)gnttab_list[i]);
  921. kfree(gnttab_list);
  922. return ret;
  923. }
  924. EXPORT_SYMBOL_GPL(gnttab_init);
  925. static int __gnttab_init(void)
  926. {
  927. /* Delay grant-table initialization in the PV on HVM case */
  928. if (xen_hvm_domain())
  929. return 0;
  930. if (!xen_pv_domain())
  931. return -ENODEV;
  932. return gnttab_init();
  933. }
  934. /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
  935. * beforehand to initialize xen_auto_xlat_grant_frames. */
  936. core_initcall_sync(__gnttab_init);