vme.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * VME Bridge Framework
  4. *
  5. * Author: Martyn Welch <martyn.welch@ge.com>
  6. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  7. *
  8. * Based on work by Tom Armistead and Ajit Prem
  9. * Copyright 2004 Motorola Inc.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/export.h>
  13. #include <linux/mm.h>
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/errno.h>
  17. #include <linux/pci.h>
  18. #include <linux/poll.h>
  19. #include <linux/highmem.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/syscalls.h>
  25. #include <linux/mutex.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/slab.h>
  28. #include <linux/vme.h>
  29. #include "vme_bridge.h"
  30. /* Bitmask and list of registered buses both protected by common mutex */
  31. static unsigned int vme_bus_numbers;
  32. static LIST_HEAD(vme_bus_list);
  33. static DEFINE_MUTEX(vme_buses_lock);
  34. static int __init vme_init(void);
  35. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  36. {
  37. return container_of(dev, struct vme_dev, dev);
  38. }
  39. /*
  40. * Find the bridge that the resource is associated with.
  41. */
  42. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  43. {
  44. /* Get list to search */
  45. switch (resource->type) {
  46. case VME_MASTER:
  47. return list_entry(resource->entry, struct vme_master_resource,
  48. list)->parent;
  49. break;
  50. case VME_SLAVE:
  51. return list_entry(resource->entry, struct vme_slave_resource,
  52. list)->parent;
  53. break;
  54. case VME_DMA:
  55. return list_entry(resource->entry, struct vme_dma_resource,
  56. list)->parent;
  57. break;
  58. case VME_LM:
  59. return list_entry(resource->entry, struct vme_lm_resource,
  60. list)->parent;
  61. break;
  62. default:
  63. printk(KERN_ERR "Unknown resource type\n");
  64. return NULL;
  65. break;
  66. }
  67. }
  68. /**
  69. * vme_free_consistent - Allocate contiguous memory.
  70. * @resource: Pointer to VME resource.
  71. * @size: Size of allocation required.
  72. * @dma: Pointer to variable to store physical address of allocation.
  73. *
  74. * Allocate a contiguous block of memory for use by the driver. This is used to
  75. * create the buffers for the slave windows.
  76. *
  77. * Return: Virtual address of allocation on success, NULL on failure.
  78. */
  79. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  80. dma_addr_t *dma)
  81. {
  82. struct vme_bridge *bridge;
  83. if (!resource) {
  84. printk(KERN_ERR "No resource\n");
  85. return NULL;
  86. }
  87. bridge = find_bridge(resource);
  88. if (!bridge) {
  89. printk(KERN_ERR "Can't find bridge\n");
  90. return NULL;
  91. }
  92. if (!bridge->parent) {
  93. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  94. return NULL;
  95. }
  96. if (!bridge->alloc_consistent) {
  97. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  98. bridge->name);
  99. return NULL;
  100. }
  101. return bridge->alloc_consistent(bridge->parent, size, dma);
  102. }
  103. EXPORT_SYMBOL(vme_alloc_consistent);
  104. /**
  105. * vme_free_consistent - Free previously allocated memory.
  106. * @resource: Pointer to VME resource.
  107. * @size: Size of allocation to free.
  108. * @vaddr: Virtual address of allocation.
  109. * @dma: Physical address of allocation.
  110. *
  111. * Free previously allocated block of contiguous memory.
  112. */
  113. void vme_free_consistent(struct vme_resource *resource, size_t size,
  114. void *vaddr, dma_addr_t dma)
  115. {
  116. struct vme_bridge *bridge;
  117. if (!resource) {
  118. printk(KERN_ERR "No resource\n");
  119. return;
  120. }
  121. bridge = find_bridge(resource);
  122. if (!bridge) {
  123. printk(KERN_ERR "Can't find bridge\n");
  124. return;
  125. }
  126. if (!bridge->parent) {
  127. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  128. return;
  129. }
  130. if (!bridge->free_consistent) {
  131. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  132. bridge->name);
  133. return;
  134. }
  135. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  136. }
  137. EXPORT_SYMBOL(vme_free_consistent);
  138. /**
  139. * vme_get_size - Helper function returning size of a VME window
  140. * @resource: Pointer to VME slave or master resource.
  141. *
  142. * Determine the size of the VME window provided. This is a helper
  143. * function, wrappering the call to vme_master_get or vme_slave_get
  144. * depending on the type of window resource handed to it.
  145. *
  146. * Return: Size of the window on success, zero on failure.
  147. */
  148. size_t vme_get_size(struct vme_resource *resource)
  149. {
  150. int enabled, retval;
  151. unsigned long long base, size;
  152. dma_addr_t buf_base;
  153. u32 aspace, cycle, dwidth;
  154. switch (resource->type) {
  155. case VME_MASTER:
  156. retval = vme_master_get(resource, &enabled, &base, &size,
  157. &aspace, &cycle, &dwidth);
  158. if (retval)
  159. return 0;
  160. return size;
  161. break;
  162. case VME_SLAVE:
  163. retval = vme_slave_get(resource, &enabled, &base, &size,
  164. &buf_base, &aspace, &cycle);
  165. if (retval)
  166. return 0;
  167. return size;
  168. break;
  169. case VME_DMA:
  170. return 0;
  171. break;
  172. default:
  173. printk(KERN_ERR "Unknown resource type\n");
  174. return 0;
  175. break;
  176. }
  177. }
  178. EXPORT_SYMBOL(vme_get_size);
  179. int vme_check_window(u32 aspace, unsigned long long vme_base,
  180. unsigned long long size)
  181. {
  182. int retval = 0;
  183. if (vme_base + size < size)
  184. return -EINVAL;
  185. switch (aspace) {
  186. case VME_A16:
  187. if (vme_base + size > VME_A16_MAX)
  188. retval = -EFAULT;
  189. break;
  190. case VME_A24:
  191. if (vme_base + size > VME_A24_MAX)
  192. retval = -EFAULT;
  193. break;
  194. case VME_A32:
  195. if (vme_base + size > VME_A32_MAX)
  196. retval = -EFAULT;
  197. break;
  198. case VME_A64:
  199. /* The VME_A64_MAX limit is actually U64_MAX + 1 */
  200. break;
  201. case VME_CRCSR:
  202. if (vme_base + size > VME_CRCSR_MAX)
  203. retval = -EFAULT;
  204. break;
  205. case VME_USER1:
  206. case VME_USER2:
  207. case VME_USER3:
  208. case VME_USER4:
  209. /* User Defined */
  210. break;
  211. default:
  212. printk(KERN_ERR "Invalid address space\n");
  213. retval = -EINVAL;
  214. break;
  215. }
  216. return retval;
  217. }
  218. EXPORT_SYMBOL(vme_check_window);
  219. static u32 vme_get_aspace(int am)
  220. {
  221. switch (am) {
  222. case 0x29:
  223. case 0x2D:
  224. return VME_A16;
  225. case 0x38:
  226. case 0x39:
  227. case 0x3A:
  228. case 0x3B:
  229. case 0x3C:
  230. case 0x3D:
  231. case 0x3E:
  232. case 0x3F:
  233. return VME_A24;
  234. case 0x8:
  235. case 0x9:
  236. case 0xA:
  237. case 0xB:
  238. case 0xC:
  239. case 0xD:
  240. case 0xE:
  241. case 0xF:
  242. return VME_A32;
  243. case 0x0:
  244. case 0x1:
  245. case 0x3:
  246. return VME_A64;
  247. }
  248. return 0;
  249. }
  250. /**
  251. * vme_slave_request - Request a VME slave window resource.
  252. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  253. * @address: Required VME address space.
  254. * @cycle: Required VME data transfer cycle type.
  255. *
  256. * Request use of a VME window resource capable of being set for the requested
  257. * address space and data transfer cycle.
  258. *
  259. * Return: Pointer to VME resource on success, NULL on failure.
  260. */
  261. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  262. u32 cycle)
  263. {
  264. struct vme_bridge *bridge;
  265. struct list_head *slave_pos = NULL;
  266. struct vme_slave_resource *allocated_image = NULL;
  267. struct vme_slave_resource *slave_image = NULL;
  268. struct vme_resource *resource = NULL;
  269. bridge = vdev->bridge;
  270. if (!bridge) {
  271. printk(KERN_ERR "Can't find VME bus\n");
  272. goto err_bus;
  273. }
  274. /* Loop through slave resources */
  275. list_for_each(slave_pos, &bridge->slave_resources) {
  276. slave_image = list_entry(slave_pos,
  277. struct vme_slave_resource, list);
  278. if (!slave_image) {
  279. printk(KERN_ERR "Registered NULL Slave resource\n");
  280. continue;
  281. }
  282. /* Find an unlocked and compatible image */
  283. mutex_lock(&slave_image->mtx);
  284. if (((slave_image->address_attr & address) == address) &&
  285. ((slave_image->cycle_attr & cycle) == cycle) &&
  286. (slave_image->locked == 0)) {
  287. slave_image->locked = 1;
  288. mutex_unlock(&slave_image->mtx);
  289. allocated_image = slave_image;
  290. break;
  291. }
  292. mutex_unlock(&slave_image->mtx);
  293. }
  294. /* No free image */
  295. if (!allocated_image)
  296. goto err_image;
  297. resource = kmalloc(sizeof(*resource), GFP_KERNEL);
  298. if (!resource)
  299. goto err_alloc;
  300. resource->type = VME_SLAVE;
  301. resource->entry = &allocated_image->list;
  302. return resource;
  303. err_alloc:
  304. /* Unlock image */
  305. mutex_lock(&slave_image->mtx);
  306. slave_image->locked = 0;
  307. mutex_unlock(&slave_image->mtx);
  308. err_image:
  309. err_bus:
  310. return NULL;
  311. }
  312. EXPORT_SYMBOL(vme_slave_request);
  313. /**
  314. * vme_slave_set - Set VME slave window configuration.
  315. * @resource: Pointer to VME slave resource.
  316. * @enabled: State to which the window should be configured.
  317. * @vme_base: Base address for the window.
  318. * @size: Size of the VME window.
  319. * @buf_base: Based address of buffer used to provide VME slave window storage.
  320. * @aspace: VME address space for the VME window.
  321. * @cycle: VME data transfer cycle type for the VME window.
  322. *
  323. * Set configuration for provided VME slave window.
  324. *
  325. * Return: Zero on success, -EINVAL if operation is not supported on this
  326. * device, if an invalid resource has been provided or invalid
  327. * attributes are provided. Hardware specific errors may also be
  328. * returned.
  329. */
  330. int vme_slave_set(struct vme_resource *resource, int enabled,
  331. unsigned long long vme_base, unsigned long long size,
  332. dma_addr_t buf_base, u32 aspace, u32 cycle)
  333. {
  334. struct vme_bridge *bridge = find_bridge(resource);
  335. struct vme_slave_resource *image;
  336. int retval;
  337. if (resource->type != VME_SLAVE) {
  338. printk(KERN_ERR "Not a slave resource\n");
  339. return -EINVAL;
  340. }
  341. image = list_entry(resource->entry, struct vme_slave_resource, list);
  342. if (!bridge->slave_set) {
  343. printk(KERN_ERR "Function not supported\n");
  344. return -ENOSYS;
  345. }
  346. if (!(((image->address_attr & aspace) == aspace) &&
  347. ((image->cycle_attr & cycle) == cycle))) {
  348. printk(KERN_ERR "Invalid attributes\n");
  349. return -EINVAL;
  350. }
  351. retval = vme_check_window(aspace, vme_base, size);
  352. if (retval)
  353. return retval;
  354. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  355. aspace, cycle);
  356. }
  357. EXPORT_SYMBOL(vme_slave_set);
  358. /**
  359. * vme_slave_get - Retrieve VME slave window configuration.
  360. * @resource: Pointer to VME slave resource.
  361. * @enabled: Pointer to variable for storing state.
  362. * @vme_base: Pointer to variable for storing window base address.
  363. * @size: Pointer to variable for storing window size.
  364. * @buf_base: Pointer to variable for storing slave buffer base address.
  365. * @aspace: Pointer to variable for storing VME address space.
  366. * @cycle: Pointer to variable for storing VME data transfer cycle type.
  367. *
  368. * Return configuration for provided VME slave window.
  369. *
  370. * Return: Zero on success, -EINVAL if operation is not supported on this
  371. * device or if an invalid resource has been provided.
  372. */
  373. int vme_slave_get(struct vme_resource *resource, int *enabled,
  374. unsigned long long *vme_base, unsigned long long *size,
  375. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  376. {
  377. struct vme_bridge *bridge = find_bridge(resource);
  378. struct vme_slave_resource *image;
  379. if (resource->type != VME_SLAVE) {
  380. printk(KERN_ERR "Not a slave resource\n");
  381. return -EINVAL;
  382. }
  383. image = list_entry(resource->entry, struct vme_slave_resource, list);
  384. if (!bridge->slave_get) {
  385. printk(KERN_ERR "vme_slave_get not supported\n");
  386. return -EINVAL;
  387. }
  388. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  389. aspace, cycle);
  390. }
  391. EXPORT_SYMBOL(vme_slave_get);
  392. /**
  393. * vme_slave_free - Free VME slave window
  394. * @resource: Pointer to VME slave resource.
  395. *
  396. * Free the provided slave resource so that it may be reallocated.
  397. */
  398. void vme_slave_free(struct vme_resource *resource)
  399. {
  400. struct vme_slave_resource *slave_image;
  401. if (resource->type != VME_SLAVE) {
  402. printk(KERN_ERR "Not a slave resource\n");
  403. return;
  404. }
  405. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  406. list);
  407. if (!slave_image) {
  408. printk(KERN_ERR "Can't find slave resource\n");
  409. return;
  410. }
  411. /* Unlock image */
  412. mutex_lock(&slave_image->mtx);
  413. if (slave_image->locked == 0)
  414. printk(KERN_ERR "Image is already free\n");
  415. slave_image->locked = 0;
  416. mutex_unlock(&slave_image->mtx);
  417. /* Free up resource memory */
  418. kfree(resource);
  419. }
  420. EXPORT_SYMBOL(vme_slave_free);
  421. /**
  422. * vme_master_request - Request a VME master window resource.
  423. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  424. * @address: Required VME address space.
  425. * @cycle: Required VME data transfer cycle type.
  426. * @dwidth: Required VME data transfer width.
  427. *
  428. * Request use of a VME window resource capable of being set for the requested
  429. * address space, data transfer cycle and width.
  430. *
  431. * Return: Pointer to VME resource on success, NULL on failure.
  432. */
  433. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  434. u32 cycle, u32 dwidth)
  435. {
  436. struct vme_bridge *bridge;
  437. struct list_head *master_pos = NULL;
  438. struct vme_master_resource *allocated_image = NULL;
  439. struct vme_master_resource *master_image = NULL;
  440. struct vme_resource *resource = NULL;
  441. bridge = vdev->bridge;
  442. if (!bridge) {
  443. printk(KERN_ERR "Can't find VME bus\n");
  444. goto err_bus;
  445. }
  446. /* Loop through master resources */
  447. list_for_each(master_pos, &bridge->master_resources) {
  448. master_image = list_entry(master_pos,
  449. struct vme_master_resource, list);
  450. if (!master_image) {
  451. printk(KERN_WARNING "Registered NULL master resource\n");
  452. continue;
  453. }
  454. /* Find an unlocked and compatible image */
  455. spin_lock(&master_image->lock);
  456. if (((master_image->address_attr & address) == address) &&
  457. ((master_image->cycle_attr & cycle) == cycle) &&
  458. ((master_image->width_attr & dwidth) == dwidth) &&
  459. (master_image->locked == 0)) {
  460. master_image->locked = 1;
  461. spin_unlock(&master_image->lock);
  462. allocated_image = master_image;
  463. break;
  464. }
  465. spin_unlock(&master_image->lock);
  466. }
  467. /* Check to see if we found a resource */
  468. if (!allocated_image) {
  469. printk(KERN_ERR "Can't find a suitable resource\n");
  470. goto err_image;
  471. }
  472. resource = kmalloc(sizeof(*resource), GFP_KERNEL);
  473. if (!resource)
  474. goto err_alloc;
  475. resource->type = VME_MASTER;
  476. resource->entry = &allocated_image->list;
  477. return resource;
  478. err_alloc:
  479. /* Unlock image */
  480. spin_lock(&master_image->lock);
  481. master_image->locked = 0;
  482. spin_unlock(&master_image->lock);
  483. err_image:
  484. err_bus:
  485. return NULL;
  486. }
  487. EXPORT_SYMBOL(vme_master_request);
  488. /**
  489. * vme_master_set - Set VME master window configuration.
  490. * @resource: Pointer to VME master resource.
  491. * @enabled: State to which the window should be configured.
  492. * @vme_base: Base address for the window.
  493. * @size: Size of the VME window.
  494. * @aspace: VME address space for the VME window.
  495. * @cycle: VME data transfer cycle type for the VME window.
  496. * @dwidth: VME data transfer width for the VME window.
  497. *
  498. * Set configuration for provided VME master window.
  499. *
  500. * Return: Zero on success, -EINVAL if operation is not supported on this
  501. * device, if an invalid resource has been provided or invalid
  502. * attributes are provided. Hardware specific errors may also be
  503. * returned.
  504. */
  505. int vme_master_set(struct vme_resource *resource, int enabled,
  506. unsigned long long vme_base, unsigned long long size, u32 aspace,
  507. u32 cycle, u32 dwidth)
  508. {
  509. struct vme_bridge *bridge = find_bridge(resource);
  510. struct vme_master_resource *image;
  511. int retval;
  512. if (resource->type != VME_MASTER) {
  513. printk(KERN_ERR "Not a master resource\n");
  514. return -EINVAL;
  515. }
  516. image = list_entry(resource->entry, struct vme_master_resource, list);
  517. if (!bridge->master_set) {
  518. printk(KERN_WARNING "vme_master_set not supported\n");
  519. return -EINVAL;
  520. }
  521. if (!(((image->address_attr & aspace) == aspace) &&
  522. ((image->cycle_attr & cycle) == cycle) &&
  523. ((image->width_attr & dwidth) == dwidth))) {
  524. printk(KERN_WARNING "Invalid attributes\n");
  525. return -EINVAL;
  526. }
  527. retval = vme_check_window(aspace, vme_base, size);
  528. if (retval)
  529. return retval;
  530. return bridge->master_set(image, enabled, vme_base, size, aspace,
  531. cycle, dwidth);
  532. }
  533. EXPORT_SYMBOL(vme_master_set);
  534. /**
  535. * vme_master_get - Retrieve VME master window configuration.
  536. * @resource: Pointer to VME master resource.
  537. * @enabled: Pointer to variable for storing state.
  538. * @vme_base: Pointer to variable for storing window base address.
  539. * @size: Pointer to variable for storing window size.
  540. * @aspace: Pointer to variable for storing VME address space.
  541. * @cycle: Pointer to variable for storing VME data transfer cycle type.
  542. * @dwidth: Pointer to variable for storing VME data transfer width.
  543. *
  544. * Return configuration for provided VME master window.
  545. *
  546. * Return: Zero on success, -EINVAL if operation is not supported on this
  547. * device or if an invalid resource has been provided.
  548. */
  549. int vme_master_get(struct vme_resource *resource, int *enabled,
  550. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  551. u32 *cycle, u32 *dwidth)
  552. {
  553. struct vme_bridge *bridge = find_bridge(resource);
  554. struct vme_master_resource *image;
  555. if (resource->type != VME_MASTER) {
  556. printk(KERN_ERR "Not a master resource\n");
  557. return -EINVAL;
  558. }
  559. image = list_entry(resource->entry, struct vme_master_resource, list);
  560. if (!bridge->master_get) {
  561. printk(KERN_WARNING "%s not supported\n", __func__);
  562. return -EINVAL;
  563. }
  564. return bridge->master_get(image, enabled, vme_base, size, aspace,
  565. cycle, dwidth);
  566. }
  567. EXPORT_SYMBOL(vme_master_get);
  568. /**
  569. * vme_master_write - Read data from VME space into a buffer.
  570. * @resource: Pointer to VME master resource.
  571. * @buf: Pointer to buffer where data should be transferred.
  572. * @count: Number of bytes to transfer.
  573. * @offset: Offset into VME master window at which to start transfer.
  574. *
  575. * Perform read of count bytes of data from location on VME bus which maps into
  576. * the VME master window at offset to buf.
  577. *
  578. * Return: Number of bytes read, -EINVAL if resource is not a VME master
  579. * resource or read operation is not supported. -EFAULT returned if
  580. * invalid offset is provided. Hardware specific errors may also be
  581. * returned.
  582. */
  583. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  584. loff_t offset)
  585. {
  586. struct vme_bridge *bridge = find_bridge(resource);
  587. struct vme_master_resource *image;
  588. size_t length;
  589. if (!bridge->master_read) {
  590. printk(KERN_WARNING "Reading from resource not supported\n");
  591. return -EINVAL;
  592. }
  593. if (resource->type != VME_MASTER) {
  594. printk(KERN_ERR "Not a master resource\n");
  595. return -EINVAL;
  596. }
  597. image = list_entry(resource->entry, struct vme_master_resource, list);
  598. length = vme_get_size(resource);
  599. if (offset > length) {
  600. printk(KERN_WARNING "Invalid Offset\n");
  601. return -EFAULT;
  602. }
  603. if ((offset + count) > length)
  604. count = length - offset;
  605. return bridge->master_read(image, buf, count, offset);
  606. }
  607. EXPORT_SYMBOL(vme_master_read);
  608. /**
  609. * vme_master_write - Write data out to VME space from a buffer.
  610. * @resource: Pointer to VME master resource.
  611. * @buf: Pointer to buffer holding data to transfer.
  612. * @count: Number of bytes to transfer.
  613. * @offset: Offset into VME master window at which to start transfer.
  614. *
  615. * Perform write of count bytes of data from buf to location on VME bus which
  616. * maps into the VME master window at offset.
  617. *
  618. * Return: Number of bytes written, -EINVAL if resource is not a VME master
  619. * resource or write operation is not supported. -EFAULT returned if
  620. * invalid offset is provided. Hardware specific errors may also be
  621. * returned.
  622. */
  623. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  624. size_t count, loff_t offset)
  625. {
  626. struct vme_bridge *bridge = find_bridge(resource);
  627. struct vme_master_resource *image;
  628. size_t length;
  629. if (!bridge->master_write) {
  630. printk(KERN_WARNING "Writing to resource not supported\n");
  631. return -EINVAL;
  632. }
  633. if (resource->type != VME_MASTER) {
  634. printk(KERN_ERR "Not a master resource\n");
  635. return -EINVAL;
  636. }
  637. image = list_entry(resource->entry, struct vme_master_resource, list);
  638. length = vme_get_size(resource);
  639. if (offset > length) {
  640. printk(KERN_WARNING "Invalid Offset\n");
  641. return -EFAULT;
  642. }
  643. if ((offset + count) > length)
  644. count = length - offset;
  645. return bridge->master_write(image, buf, count, offset);
  646. }
  647. EXPORT_SYMBOL(vme_master_write);
  648. /**
  649. * vme_master_rmw - Perform read-modify-write cycle.
  650. * @resource: Pointer to VME master resource.
  651. * @mask: Bits to be compared and swapped in operation.
  652. * @compare: Bits to be compared with data read from offset.
  653. * @swap: Bits to be swapped in data read from offset.
  654. * @offset: Offset into VME master window at which to perform operation.
  655. *
  656. * Perform read-modify-write cycle on provided location:
  657. * - Location on VME bus is read.
  658. * - Bits selected by mask are compared with compare.
  659. * - Where a selected bit matches that in compare and are selected in swap,
  660. * the bit is swapped.
  661. * - Result written back to location on VME bus.
  662. *
  663. * Return: Bytes written on success, -EINVAL if resource is not a VME master
  664. * resource or RMW operation is not supported. Hardware specific
  665. * errors may also be returned.
  666. */
  667. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  668. unsigned int compare, unsigned int swap, loff_t offset)
  669. {
  670. struct vme_bridge *bridge = find_bridge(resource);
  671. struct vme_master_resource *image;
  672. if (!bridge->master_rmw) {
  673. printk(KERN_WARNING "Writing to resource not supported\n");
  674. return -EINVAL;
  675. }
  676. if (resource->type != VME_MASTER) {
  677. printk(KERN_ERR "Not a master resource\n");
  678. return -EINVAL;
  679. }
  680. image = list_entry(resource->entry, struct vme_master_resource, list);
  681. return bridge->master_rmw(image, mask, compare, swap, offset);
  682. }
  683. EXPORT_SYMBOL(vme_master_rmw);
  684. /**
  685. * vme_master_mmap - Mmap region of VME master window.
  686. * @resource: Pointer to VME master resource.
  687. * @vma: Pointer to definition of user mapping.
  688. *
  689. * Memory map a region of the VME master window into user space.
  690. *
  691. * Return: Zero on success, -EINVAL if resource is not a VME master
  692. * resource or -EFAULT if map exceeds window size. Other generic mmap
  693. * errors may also be returned.
  694. */
  695. int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
  696. {
  697. struct vme_master_resource *image;
  698. phys_addr_t phys_addr;
  699. unsigned long vma_size;
  700. if (resource->type != VME_MASTER) {
  701. pr_err("Not a master resource\n");
  702. return -EINVAL;
  703. }
  704. image = list_entry(resource->entry, struct vme_master_resource, list);
  705. phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
  706. vma_size = vma->vm_end - vma->vm_start;
  707. if (phys_addr + vma_size > image->bus_resource.end + 1) {
  708. pr_err("Map size cannot exceed the window size\n");
  709. return -EFAULT;
  710. }
  711. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  712. return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
  713. }
  714. EXPORT_SYMBOL(vme_master_mmap);
  715. /**
  716. * vme_master_free - Free VME master window
  717. * @resource: Pointer to VME master resource.
  718. *
  719. * Free the provided master resource so that it may be reallocated.
  720. */
  721. void vme_master_free(struct vme_resource *resource)
  722. {
  723. struct vme_master_resource *master_image;
  724. if (resource->type != VME_MASTER) {
  725. printk(KERN_ERR "Not a master resource\n");
  726. return;
  727. }
  728. master_image = list_entry(resource->entry, struct vme_master_resource,
  729. list);
  730. if (!master_image) {
  731. printk(KERN_ERR "Can't find master resource\n");
  732. return;
  733. }
  734. /* Unlock image */
  735. spin_lock(&master_image->lock);
  736. if (master_image->locked == 0)
  737. printk(KERN_ERR "Image is already free\n");
  738. master_image->locked = 0;
  739. spin_unlock(&master_image->lock);
  740. /* Free up resource memory */
  741. kfree(resource);
  742. }
  743. EXPORT_SYMBOL(vme_master_free);
  744. /**
  745. * vme_dma_request - Request a DMA controller.
  746. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  747. * @route: Required src/destination combination.
  748. *
  749. * Request a VME DMA controller with capability to perform transfers bewteen
  750. * requested source/destination combination.
  751. *
  752. * Return: Pointer to VME DMA resource on success, NULL on failure.
  753. */
  754. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  755. {
  756. struct vme_bridge *bridge;
  757. struct list_head *dma_pos = NULL;
  758. struct vme_dma_resource *allocated_ctrlr = NULL;
  759. struct vme_dma_resource *dma_ctrlr = NULL;
  760. struct vme_resource *resource = NULL;
  761. /* XXX Not checking resource attributes */
  762. printk(KERN_ERR "No VME resource Attribute tests done\n");
  763. bridge = vdev->bridge;
  764. if (!bridge) {
  765. printk(KERN_ERR "Can't find VME bus\n");
  766. goto err_bus;
  767. }
  768. /* Loop through DMA resources */
  769. list_for_each(dma_pos, &bridge->dma_resources) {
  770. dma_ctrlr = list_entry(dma_pos,
  771. struct vme_dma_resource, list);
  772. if (!dma_ctrlr) {
  773. printk(KERN_ERR "Registered NULL DMA resource\n");
  774. continue;
  775. }
  776. /* Find an unlocked and compatible controller */
  777. mutex_lock(&dma_ctrlr->mtx);
  778. if (((dma_ctrlr->route_attr & route) == route) &&
  779. (dma_ctrlr->locked == 0)) {
  780. dma_ctrlr->locked = 1;
  781. mutex_unlock(&dma_ctrlr->mtx);
  782. allocated_ctrlr = dma_ctrlr;
  783. break;
  784. }
  785. mutex_unlock(&dma_ctrlr->mtx);
  786. }
  787. /* Check to see if we found a resource */
  788. if (!allocated_ctrlr)
  789. goto err_ctrlr;
  790. resource = kmalloc(sizeof(*resource), GFP_KERNEL);
  791. if (!resource)
  792. goto err_alloc;
  793. resource->type = VME_DMA;
  794. resource->entry = &allocated_ctrlr->list;
  795. return resource;
  796. err_alloc:
  797. /* Unlock image */
  798. mutex_lock(&dma_ctrlr->mtx);
  799. dma_ctrlr->locked = 0;
  800. mutex_unlock(&dma_ctrlr->mtx);
  801. err_ctrlr:
  802. err_bus:
  803. return NULL;
  804. }
  805. EXPORT_SYMBOL(vme_dma_request);
  806. /**
  807. * vme_new_dma_list - Create new VME DMA list.
  808. * @resource: Pointer to VME DMA resource.
  809. *
  810. * Create a new VME DMA list. It is the responsibility of the user to free
  811. * the list once it is no longer required with vme_dma_list_free().
  812. *
  813. * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
  814. * VME DMA resource.
  815. */
  816. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  817. {
  818. struct vme_dma_list *dma_list;
  819. if (resource->type != VME_DMA) {
  820. printk(KERN_ERR "Not a DMA resource\n");
  821. return NULL;
  822. }
  823. dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
  824. if (!dma_list)
  825. return NULL;
  826. INIT_LIST_HEAD(&dma_list->entries);
  827. dma_list->parent = list_entry(resource->entry,
  828. struct vme_dma_resource,
  829. list);
  830. mutex_init(&dma_list->mtx);
  831. return dma_list;
  832. }
  833. EXPORT_SYMBOL(vme_new_dma_list);
  834. /**
  835. * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
  836. * @pattern: Value to use used as pattern
  837. * @type: Type of pattern to be written.
  838. *
  839. * Create VME DMA list attribute for pattern generation. It is the
  840. * responsibility of the user to free used attributes using
  841. * vme_dma_free_attribute().
  842. *
  843. * Return: Pointer to VME DMA attribute, NULL on failure.
  844. */
  845. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  846. {
  847. struct vme_dma_attr *attributes;
  848. struct vme_dma_pattern *pattern_attr;
  849. attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
  850. if (!attributes)
  851. goto err_attr;
  852. pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
  853. if (!pattern_attr)
  854. goto err_pat;
  855. attributes->type = VME_DMA_PATTERN;
  856. attributes->private = (void *)pattern_attr;
  857. pattern_attr->pattern = pattern;
  858. pattern_attr->type = type;
  859. return attributes;
  860. err_pat:
  861. kfree(attributes);
  862. err_attr:
  863. return NULL;
  864. }
  865. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  866. /**
  867. * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
  868. * @address: PCI base address for DMA transfer.
  869. *
  870. * Create VME DMA list attribute pointing to a location on PCI for DMA
  871. * transfers. It is the responsibility of the user to free used attributes
  872. * using vme_dma_free_attribute().
  873. *
  874. * Return: Pointer to VME DMA attribute, NULL on failure.
  875. */
  876. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  877. {
  878. struct vme_dma_attr *attributes;
  879. struct vme_dma_pci *pci_attr;
  880. /* XXX Run some sanity checks here */
  881. attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
  882. if (!attributes)
  883. goto err_attr;
  884. pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
  885. if (!pci_attr)
  886. goto err_pci;
  887. attributes->type = VME_DMA_PCI;
  888. attributes->private = (void *)pci_attr;
  889. pci_attr->address = address;
  890. return attributes;
  891. err_pci:
  892. kfree(attributes);
  893. err_attr:
  894. return NULL;
  895. }
  896. EXPORT_SYMBOL(vme_dma_pci_attribute);
  897. /**
  898. * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
  899. * @address: VME base address for DMA transfer.
  900. * @aspace: VME address space to use for DMA transfer.
  901. * @cycle: VME bus cycle to use for DMA transfer.
  902. * @dwidth: VME data width to use for DMA transfer.
  903. *
  904. * Create VME DMA list attribute pointing to a location on the VME bus for DMA
  905. * transfers. It is the responsibility of the user to free used attributes
  906. * using vme_dma_free_attribute().
  907. *
  908. * Return: Pointer to VME DMA attribute, NULL on failure.
  909. */
  910. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  911. u32 aspace, u32 cycle, u32 dwidth)
  912. {
  913. struct vme_dma_attr *attributes;
  914. struct vme_dma_vme *vme_attr;
  915. attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
  916. if (!attributes)
  917. goto err_attr;
  918. vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
  919. if (!vme_attr)
  920. goto err_vme;
  921. attributes->type = VME_DMA_VME;
  922. attributes->private = (void *)vme_attr;
  923. vme_attr->address = address;
  924. vme_attr->aspace = aspace;
  925. vme_attr->cycle = cycle;
  926. vme_attr->dwidth = dwidth;
  927. return attributes;
  928. err_vme:
  929. kfree(attributes);
  930. err_attr:
  931. return NULL;
  932. }
  933. EXPORT_SYMBOL(vme_dma_vme_attribute);
  934. /**
  935. * vme_dma_free_attribute - Free DMA list attribute.
  936. * @attributes: Pointer to DMA list attribute.
  937. *
  938. * Free VME DMA list attribute. VME DMA list attributes can be safely freed
  939. * once vme_dma_list_add() has returned.
  940. */
  941. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  942. {
  943. kfree(attributes->private);
  944. kfree(attributes);
  945. }
  946. EXPORT_SYMBOL(vme_dma_free_attribute);
  947. /**
  948. * vme_dma_list_add - Add enty to a VME DMA list.
  949. * @list: Pointer to VME list.
  950. * @src: Pointer to DMA list attribute to use as source.
  951. * @dest: Pointer to DMA list attribute to use as destination.
  952. * @count: Number of bytes to transfer.
  953. *
  954. * Add an entry to the provided VME DMA list. Entry requires pointers to source
  955. * and destination DMA attributes and a count.
  956. *
  957. * Please note, the attributes supported as source and destinations for
  958. * transfers are hardware dependent.
  959. *
  960. * Return: Zero on success, -EINVAL if operation is not supported on this
  961. * device or if the link list has already been submitted for execution.
  962. * Hardware specific errors also possible.
  963. */
  964. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  965. struct vme_dma_attr *dest, size_t count)
  966. {
  967. struct vme_bridge *bridge = list->parent->parent;
  968. int retval;
  969. if (!bridge->dma_list_add) {
  970. printk(KERN_WARNING "Link List DMA generation not supported\n");
  971. return -EINVAL;
  972. }
  973. if (!mutex_trylock(&list->mtx)) {
  974. printk(KERN_ERR "Link List already submitted\n");
  975. return -EINVAL;
  976. }
  977. retval = bridge->dma_list_add(list, src, dest, count);
  978. mutex_unlock(&list->mtx);
  979. return retval;
  980. }
  981. EXPORT_SYMBOL(vme_dma_list_add);
  982. /**
  983. * vme_dma_list_exec - Queue a VME DMA list for execution.
  984. * @list: Pointer to VME list.
  985. *
  986. * Queue the provided VME DMA list for execution. The call will return once the
  987. * list has been executed.
  988. *
  989. * Return: Zero on success, -EINVAL if operation is not supported on this
  990. * device. Hardware specific errors also possible.
  991. */
  992. int vme_dma_list_exec(struct vme_dma_list *list)
  993. {
  994. struct vme_bridge *bridge = list->parent->parent;
  995. int retval;
  996. if (!bridge->dma_list_exec) {
  997. printk(KERN_ERR "Link List DMA execution not supported\n");
  998. return -EINVAL;
  999. }
  1000. mutex_lock(&list->mtx);
  1001. retval = bridge->dma_list_exec(list);
  1002. mutex_unlock(&list->mtx);
  1003. return retval;
  1004. }
  1005. EXPORT_SYMBOL(vme_dma_list_exec);
  1006. /**
  1007. * vme_dma_list_free - Free a VME DMA list.
  1008. * @list: Pointer to VME list.
  1009. *
  1010. * Free the provided DMA list and all its entries.
  1011. *
  1012. * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
  1013. * is still in use. Hardware specific errors also possible.
  1014. */
  1015. int vme_dma_list_free(struct vme_dma_list *list)
  1016. {
  1017. struct vme_bridge *bridge = list->parent->parent;
  1018. int retval;
  1019. if (!bridge->dma_list_empty) {
  1020. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  1021. return -EINVAL;
  1022. }
  1023. if (!mutex_trylock(&list->mtx)) {
  1024. printk(KERN_ERR "Link List in use\n");
  1025. return -EBUSY;
  1026. }
  1027. /*
  1028. * Empty out all of the entries from the DMA list. We need to go to the
  1029. * low level driver as DMA entries are driver specific.
  1030. */
  1031. retval = bridge->dma_list_empty(list);
  1032. if (retval) {
  1033. printk(KERN_ERR "Unable to empty link-list entries\n");
  1034. mutex_unlock(&list->mtx);
  1035. return retval;
  1036. }
  1037. mutex_unlock(&list->mtx);
  1038. kfree(list);
  1039. return retval;
  1040. }
  1041. EXPORT_SYMBOL(vme_dma_list_free);
  1042. /**
  1043. * vme_dma_free - Free a VME DMA resource.
  1044. * @resource: Pointer to VME DMA resource.
  1045. *
  1046. * Free the provided DMA resource so that it may be reallocated.
  1047. *
  1048. * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
  1049. * is still active.
  1050. */
  1051. int vme_dma_free(struct vme_resource *resource)
  1052. {
  1053. struct vme_dma_resource *ctrlr;
  1054. if (resource->type != VME_DMA) {
  1055. printk(KERN_ERR "Not a DMA resource\n");
  1056. return -EINVAL;
  1057. }
  1058. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  1059. if (!mutex_trylock(&ctrlr->mtx)) {
  1060. printk(KERN_ERR "Resource busy, can't free\n");
  1061. return -EBUSY;
  1062. }
  1063. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  1064. printk(KERN_WARNING "Resource still processing transfers\n");
  1065. mutex_unlock(&ctrlr->mtx);
  1066. return -EBUSY;
  1067. }
  1068. ctrlr->locked = 0;
  1069. mutex_unlock(&ctrlr->mtx);
  1070. kfree(resource);
  1071. return 0;
  1072. }
  1073. EXPORT_SYMBOL(vme_dma_free);
  1074. void vme_bus_error_handler(struct vme_bridge *bridge,
  1075. unsigned long long address, int am)
  1076. {
  1077. struct list_head *handler_pos = NULL;
  1078. struct vme_error_handler *handler;
  1079. int handler_triggered = 0;
  1080. u32 aspace = vme_get_aspace(am);
  1081. list_for_each(handler_pos, &bridge->vme_error_handlers) {
  1082. handler = list_entry(handler_pos, struct vme_error_handler,
  1083. list);
  1084. if ((aspace == handler->aspace) &&
  1085. (address >= handler->start) &&
  1086. (address < handler->end)) {
  1087. if (!handler->num_errors)
  1088. handler->first_error = address;
  1089. if (handler->num_errors != UINT_MAX)
  1090. handler->num_errors++;
  1091. handler_triggered = 1;
  1092. }
  1093. }
  1094. if (!handler_triggered)
  1095. dev_err(bridge->parent,
  1096. "Unhandled VME access error at address 0x%llx\n",
  1097. address);
  1098. }
  1099. EXPORT_SYMBOL(vme_bus_error_handler);
  1100. struct vme_error_handler *vme_register_error_handler(
  1101. struct vme_bridge *bridge, u32 aspace,
  1102. unsigned long long address, size_t len)
  1103. {
  1104. struct vme_error_handler *handler;
  1105. handler = kmalloc(sizeof(*handler), GFP_ATOMIC);
  1106. if (!handler)
  1107. return NULL;
  1108. handler->aspace = aspace;
  1109. handler->start = address;
  1110. handler->end = address + len;
  1111. handler->num_errors = 0;
  1112. handler->first_error = 0;
  1113. list_add_tail(&handler->list, &bridge->vme_error_handlers);
  1114. return handler;
  1115. }
  1116. EXPORT_SYMBOL(vme_register_error_handler);
  1117. void vme_unregister_error_handler(struct vme_error_handler *handler)
  1118. {
  1119. list_del(&handler->list);
  1120. kfree(handler);
  1121. }
  1122. EXPORT_SYMBOL(vme_unregister_error_handler);
  1123. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  1124. {
  1125. void (*call)(int, int, void *);
  1126. void *priv_data;
  1127. call = bridge->irq[level - 1].callback[statid].func;
  1128. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  1129. if (call)
  1130. call(level, statid, priv_data);
  1131. else
  1132. printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
  1133. level, statid);
  1134. }
  1135. EXPORT_SYMBOL(vme_irq_handler);
  1136. /**
  1137. * vme_irq_request - Request a specific VME interrupt.
  1138. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1139. * @level: Interrupt priority being requested.
  1140. * @statid: Interrupt vector being requested.
  1141. * @callback: Pointer to callback function called when VME interrupt/vector
  1142. * received.
  1143. * @priv_data: Generic pointer that will be passed to the callback function.
  1144. *
  1145. * Request callback to be attached as a handler for VME interrupts with provided
  1146. * level and statid.
  1147. *
  1148. * Return: Zero on success, -EINVAL on invalid vme device, level or if the
  1149. * function is not supported, -EBUSY if the level/statid combination is
  1150. * already in use. Hardware specific errors also possible.
  1151. */
  1152. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  1153. void (*callback)(int, int, void *),
  1154. void *priv_data)
  1155. {
  1156. struct vme_bridge *bridge;
  1157. bridge = vdev->bridge;
  1158. if (!bridge) {
  1159. printk(KERN_ERR "Can't find VME bus\n");
  1160. return -EINVAL;
  1161. }
  1162. if ((level < 1) || (level > 7)) {
  1163. printk(KERN_ERR "Invalid interrupt level\n");
  1164. return -EINVAL;
  1165. }
  1166. if (!bridge->irq_set) {
  1167. printk(KERN_ERR "Configuring interrupts not supported\n");
  1168. return -EINVAL;
  1169. }
  1170. mutex_lock(&bridge->irq_mtx);
  1171. if (bridge->irq[level - 1].callback[statid].func) {
  1172. mutex_unlock(&bridge->irq_mtx);
  1173. printk(KERN_WARNING "VME Interrupt already taken\n");
  1174. return -EBUSY;
  1175. }
  1176. bridge->irq[level - 1].count++;
  1177. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  1178. bridge->irq[level - 1].callback[statid].func = callback;
  1179. /* Enable IRQ level */
  1180. bridge->irq_set(bridge, level, 1, 1);
  1181. mutex_unlock(&bridge->irq_mtx);
  1182. return 0;
  1183. }
  1184. EXPORT_SYMBOL(vme_irq_request);
  1185. /**
  1186. * vme_irq_free - Free a VME interrupt.
  1187. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1188. * @level: Interrupt priority of interrupt being freed.
  1189. * @statid: Interrupt vector of interrupt being freed.
  1190. *
  1191. * Remove previously attached callback from VME interrupt priority/vector.
  1192. */
  1193. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  1194. {
  1195. struct vme_bridge *bridge;
  1196. bridge = vdev->bridge;
  1197. if (!bridge) {
  1198. printk(KERN_ERR "Can't find VME bus\n");
  1199. return;
  1200. }
  1201. if ((level < 1) || (level > 7)) {
  1202. printk(KERN_ERR "Invalid interrupt level\n");
  1203. return;
  1204. }
  1205. if (!bridge->irq_set) {
  1206. printk(KERN_ERR "Configuring interrupts not supported\n");
  1207. return;
  1208. }
  1209. mutex_lock(&bridge->irq_mtx);
  1210. bridge->irq[level - 1].count--;
  1211. /* Disable IRQ level if no more interrupts attached at this level*/
  1212. if (bridge->irq[level - 1].count == 0)
  1213. bridge->irq_set(bridge, level, 0, 1);
  1214. bridge->irq[level - 1].callback[statid].func = NULL;
  1215. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  1216. mutex_unlock(&bridge->irq_mtx);
  1217. }
  1218. EXPORT_SYMBOL(vme_irq_free);
  1219. /**
  1220. * vme_irq_generate - Generate VME interrupt.
  1221. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1222. * @level: Interrupt priority at which to assert the interrupt.
  1223. * @statid: Interrupt vector to associate with the interrupt.
  1224. *
  1225. * Generate a VME interrupt of the provided level and with the provided
  1226. * statid.
  1227. *
  1228. * Return: Zero on success, -EINVAL on invalid vme device, level or if the
  1229. * function is not supported. Hardware specific errors also possible.
  1230. */
  1231. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  1232. {
  1233. struct vme_bridge *bridge;
  1234. bridge = vdev->bridge;
  1235. if (!bridge) {
  1236. printk(KERN_ERR "Can't find VME bus\n");
  1237. return -EINVAL;
  1238. }
  1239. if ((level < 1) || (level > 7)) {
  1240. printk(KERN_WARNING "Invalid interrupt level\n");
  1241. return -EINVAL;
  1242. }
  1243. if (!bridge->irq_generate) {
  1244. printk(KERN_WARNING "Interrupt generation not supported\n");
  1245. return -EINVAL;
  1246. }
  1247. return bridge->irq_generate(bridge, level, statid);
  1248. }
  1249. EXPORT_SYMBOL(vme_irq_generate);
  1250. /**
  1251. * vme_lm_request - Request a VME location monitor
  1252. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1253. *
  1254. * Allocate a location monitor resource to the driver. A location monitor
  1255. * allows the driver to monitor accesses to a contiguous number of
  1256. * addresses on the VME bus.
  1257. *
  1258. * Return: Pointer to a VME resource on success or NULL on failure.
  1259. */
  1260. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  1261. {
  1262. struct vme_bridge *bridge;
  1263. struct list_head *lm_pos = NULL;
  1264. struct vme_lm_resource *allocated_lm = NULL;
  1265. struct vme_lm_resource *lm = NULL;
  1266. struct vme_resource *resource = NULL;
  1267. bridge = vdev->bridge;
  1268. if (!bridge) {
  1269. printk(KERN_ERR "Can't find VME bus\n");
  1270. goto err_bus;
  1271. }
  1272. /* Loop through LM resources */
  1273. list_for_each(lm_pos, &bridge->lm_resources) {
  1274. lm = list_entry(lm_pos,
  1275. struct vme_lm_resource, list);
  1276. if (!lm) {
  1277. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  1278. continue;
  1279. }
  1280. /* Find an unlocked controller */
  1281. mutex_lock(&lm->mtx);
  1282. if (lm->locked == 0) {
  1283. lm->locked = 1;
  1284. mutex_unlock(&lm->mtx);
  1285. allocated_lm = lm;
  1286. break;
  1287. }
  1288. mutex_unlock(&lm->mtx);
  1289. }
  1290. /* Check to see if we found a resource */
  1291. if (!allocated_lm)
  1292. goto err_lm;
  1293. resource = kmalloc(sizeof(*resource), GFP_KERNEL);
  1294. if (!resource)
  1295. goto err_alloc;
  1296. resource->type = VME_LM;
  1297. resource->entry = &allocated_lm->list;
  1298. return resource;
  1299. err_alloc:
  1300. /* Unlock image */
  1301. mutex_lock(&lm->mtx);
  1302. lm->locked = 0;
  1303. mutex_unlock(&lm->mtx);
  1304. err_lm:
  1305. err_bus:
  1306. return NULL;
  1307. }
  1308. EXPORT_SYMBOL(vme_lm_request);
  1309. /**
  1310. * vme_lm_count - Determine number of VME Addresses monitored
  1311. * @resource: Pointer to VME location monitor resource.
  1312. *
  1313. * The number of contiguous addresses monitored is hardware dependent.
  1314. * Return the number of contiguous addresses monitored by the
  1315. * location monitor.
  1316. *
  1317. * Return: Count of addresses monitored or -EINVAL when provided with an
  1318. * invalid location monitor resource.
  1319. */
  1320. int vme_lm_count(struct vme_resource *resource)
  1321. {
  1322. struct vme_lm_resource *lm;
  1323. if (resource->type != VME_LM) {
  1324. printk(KERN_ERR "Not a Location Monitor resource\n");
  1325. return -EINVAL;
  1326. }
  1327. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1328. return lm->monitors;
  1329. }
  1330. EXPORT_SYMBOL(vme_lm_count);
  1331. /**
  1332. * vme_lm_set - Configure location monitor
  1333. * @resource: Pointer to VME location monitor resource.
  1334. * @lm_base: Base address to monitor.
  1335. * @aspace: VME address space to monitor.
  1336. * @cycle: VME bus cycle type to monitor.
  1337. *
  1338. * Set the base address, address space and cycle type of accesses to be
  1339. * monitored by the location monitor.
  1340. *
  1341. * Return: Zero on success, -EINVAL when provided with an invalid location
  1342. * monitor resource or function is not supported. Hardware specific
  1343. * errors may also be returned.
  1344. */
  1345. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  1346. u32 aspace, u32 cycle)
  1347. {
  1348. struct vme_bridge *bridge = find_bridge(resource);
  1349. struct vme_lm_resource *lm;
  1350. if (resource->type != VME_LM) {
  1351. printk(KERN_ERR "Not a Location Monitor resource\n");
  1352. return -EINVAL;
  1353. }
  1354. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1355. if (!bridge->lm_set) {
  1356. printk(KERN_ERR "vme_lm_set not supported\n");
  1357. return -EINVAL;
  1358. }
  1359. return bridge->lm_set(lm, lm_base, aspace, cycle);
  1360. }
  1361. EXPORT_SYMBOL(vme_lm_set);
  1362. /**
  1363. * vme_lm_get - Retrieve location monitor settings
  1364. * @resource: Pointer to VME location monitor resource.
  1365. * @lm_base: Pointer used to output the base address monitored.
  1366. * @aspace: Pointer used to output the address space monitored.
  1367. * @cycle: Pointer used to output the VME bus cycle type monitored.
  1368. *
  1369. * Retrieve the base address, address space and cycle type of accesses to
  1370. * be monitored by the location monitor.
  1371. *
  1372. * Return: Zero on success, -EINVAL when provided with an invalid location
  1373. * monitor resource or function is not supported. Hardware specific
  1374. * errors may also be returned.
  1375. */
  1376. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  1377. u32 *aspace, u32 *cycle)
  1378. {
  1379. struct vme_bridge *bridge = find_bridge(resource);
  1380. struct vme_lm_resource *lm;
  1381. if (resource->type != VME_LM) {
  1382. printk(KERN_ERR "Not a Location Monitor resource\n");
  1383. return -EINVAL;
  1384. }
  1385. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1386. if (!bridge->lm_get) {
  1387. printk(KERN_ERR "vme_lm_get not supported\n");
  1388. return -EINVAL;
  1389. }
  1390. return bridge->lm_get(lm, lm_base, aspace, cycle);
  1391. }
  1392. EXPORT_SYMBOL(vme_lm_get);
  1393. /**
  1394. * vme_lm_attach - Provide callback for location monitor address
  1395. * @resource: Pointer to VME location monitor resource.
  1396. * @monitor: Offset to which callback should be attached.
  1397. * @callback: Pointer to callback function called when triggered.
  1398. * @data: Generic pointer that will be passed to the callback function.
  1399. *
  1400. * Attach a callback to the specificed offset into the location monitors
  1401. * monitored addresses. A generic pointer is provided to allow data to be
  1402. * passed to the callback when called.
  1403. *
  1404. * Return: Zero on success, -EINVAL when provided with an invalid location
  1405. * monitor resource or function is not supported. Hardware specific
  1406. * errors may also be returned.
  1407. */
  1408. int vme_lm_attach(struct vme_resource *resource, int monitor,
  1409. void (*callback)(void *), void *data)
  1410. {
  1411. struct vme_bridge *bridge = find_bridge(resource);
  1412. struct vme_lm_resource *lm;
  1413. if (resource->type != VME_LM) {
  1414. printk(KERN_ERR "Not a Location Monitor resource\n");
  1415. return -EINVAL;
  1416. }
  1417. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1418. if (!bridge->lm_attach) {
  1419. printk(KERN_ERR "vme_lm_attach not supported\n");
  1420. return -EINVAL;
  1421. }
  1422. return bridge->lm_attach(lm, monitor, callback, data);
  1423. }
  1424. EXPORT_SYMBOL(vme_lm_attach);
  1425. /**
  1426. * vme_lm_detach - Remove callback for location monitor address
  1427. * @resource: Pointer to VME location monitor resource.
  1428. * @monitor: Offset to which callback should be removed.
  1429. *
  1430. * Remove the callback associated with the specificed offset into the
  1431. * location monitors monitored addresses.
  1432. *
  1433. * Return: Zero on success, -EINVAL when provided with an invalid location
  1434. * monitor resource or function is not supported. Hardware specific
  1435. * errors may also be returned.
  1436. */
  1437. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1438. {
  1439. struct vme_bridge *bridge = find_bridge(resource);
  1440. struct vme_lm_resource *lm;
  1441. if (resource->type != VME_LM) {
  1442. printk(KERN_ERR "Not a Location Monitor resource\n");
  1443. return -EINVAL;
  1444. }
  1445. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1446. if (!bridge->lm_detach) {
  1447. printk(KERN_ERR "vme_lm_detach not supported\n");
  1448. return -EINVAL;
  1449. }
  1450. return bridge->lm_detach(lm, monitor);
  1451. }
  1452. EXPORT_SYMBOL(vme_lm_detach);
  1453. /**
  1454. * vme_lm_free - Free allocated VME location monitor
  1455. * @resource: Pointer to VME location monitor resource.
  1456. *
  1457. * Free allocation of a VME location monitor.
  1458. *
  1459. * WARNING: This function currently expects that any callbacks that have
  1460. * been attached to the location monitor have been removed.
  1461. *
  1462. * Return: Zero on success, -EINVAL when provided with an invalid location
  1463. * monitor resource.
  1464. */
  1465. void vme_lm_free(struct vme_resource *resource)
  1466. {
  1467. struct vme_lm_resource *lm;
  1468. if (resource->type != VME_LM) {
  1469. printk(KERN_ERR "Not a Location Monitor resource\n");
  1470. return;
  1471. }
  1472. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1473. mutex_lock(&lm->mtx);
  1474. /* XXX
  1475. * Check to see that there aren't any callbacks still attached, if
  1476. * there are we should probably be detaching them!
  1477. */
  1478. lm->locked = 0;
  1479. mutex_unlock(&lm->mtx);
  1480. kfree(resource);
  1481. }
  1482. EXPORT_SYMBOL(vme_lm_free);
  1483. /**
  1484. * vme_slot_num - Retrieve slot ID
  1485. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1486. *
  1487. * Retrieve the slot ID associated with the provided VME device.
  1488. *
  1489. * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
  1490. * or the function is not supported. Hardware specific errors may also
  1491. * be returned.
  1492. */
  1493. int vme_slot_num(struct vme_dev *vdev)
  1494. {
  1495. struct vme_bridge *bridge;
  1496. bridge = vdev->bridge;
  1497. if (!bridge) {
  1498. printk(KERN_ERR "Can't find VME bus\n");
  1499. return -EINVAL;
  1500. }
  1501. if (!bridge->slot_get) {
  1502. printk(KERN_WARNING "vme_slot_num not supported\n");
  1503. return -EINVAL;
  1504. }
  1505. return bridge->slot_get(bridge);
  1506. }
  1507. EXPORT_SYMBOL(vme_slot_num);
  1508. /**
  1509. * vme_bus_num - Retrieve bus number
  1510. * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
  1511. *
  1512. * Retrieve the bus enumeration associated with the provided VME device.
  1513. *
  1514. * Return: The bus number on success, -EINVAL if VME bridge cannot be
  1515. * determined.
  1516. */
  1517. int vme_bus_num(struct vme_dev *vdev)
  1518. {
  1519. struct vme_bridge *bridge;
  1520. bridge = vdev->bridge;
  1521. if (!bridge) {
  1522. pr_err("Can't find VME bus\n");
  1523. return -EINVAL;
  1524. }
  1525. return bridge->num;
  1526. }
  1527. EXPORT_SYMBOL(vme_bus_num);
  1528. /* - Bridge Registration --------------------------------------------------- */
  1529. static void vme_dev_release(struct device *dev)
  1530. {
  1531. kfree(dev_to_vme_dev(dev));
  1532. }
  1533. /* Common bridge initialization */
  1534. struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
  1535. {
  1536. INIT_LIST_HEAD(&bridge->vme_error_handlers);
  1537. INIT_LIST_HEAD(&bridge->master_resources);
  1538. INIT_LIST_HEAD(&bridge->slave_resources);
  1539. INIT_LIST_HEAD(&bridge->dma_resources);
  1540. INIT_LIST_HEAD(&bridge->lm_resources);
  1541. mutex_init(&bridge->irq_mtx);
  1542. return bridge;
  1543. }
  1544. EXPORT_SYMBOL(vme_init_bridge);
  1545. int vme_register_bridge(struct vme_bridge *bridge)
  1546. {
  1547. int i;
  1548. int ret = -1;
  1549. mutex_lock(&vme_buses_lock);
  1550. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1551. if ((vme_bus_numbers & (1 << i)) == 0) {
  1552. vme_bus_numbers |= (1 << i);
  1553. bridge->num = i;
  1554. INIT_LIST_HEAD(&bridge->devices);
  1555. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1556. ret = 0;
  1557. break;
  1558. }
  1559. }
  1560. mutex_unlock(&vme_buses_lock);
  1561. return ret;
  1562. }
  1563. EXPORT_SYMBOL(vme_register_bridge);
  1564. void vme_unregister_bridge(struct vme_bridge *bridge)
  1565. {
  1566. struct vme_dev *vdev;
  1567. struct vme_dev *tmp;
  1568. mutex_lock(&vme_buses_lock);
  1569. vme_bus_numbers &= ~(1 << bridge->num);
  1570. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1571. list_del(&vdev->drv_list);
  1572. list_del(&vdev->bridge_list);
  1573. device_unregister(&vdev->dev);
  1574. }
  1575. list_del(&bridge->bus_list);
  1576. mutex_unlock(&vme_buses_lock);
  1577. }
  1578. EXPORT_SYMBOL(vme_unregister_bridge);
  1579. /* - Driver Registration --------------------------------------------------- */
  1580. static int __vme_register_driver_bus(struct vme_driver *drv,
  1581. struct vme_bridge *bridge, unsigned int ndevs)
  1582. {
  1583. int err;
  1584. unsigned int i;
  1585. struct vme_dev *vdev;
  1586. struct vme_dev *tmp;
  1587. for (i = 0; i < ndevs; i++) {
  1588. vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
  1589. if (!vdev) {
  1590. err = -ENOMEM;
  1591. goto err_devalloc;
  1592. }
  1593. vdev->num = i;
  1594. vdev->bridge = bridge;
  1595. vdev->dev.platform_data = drv;
  1596. vdev->dev.release = vme_dev_release;
  1597. vdev->dev.parent = bridge->parent;
  1598. vdev->dev.bus = &vme_bus_type;
  1599. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1600. vdev->num);
  1601. err = device_register(&vdev->dev);
  1602. if (err)
  1603. goto err_reg;
  1604. if (vdev->dev.platform_data) {
  1605. list_add_tail(&vdev->drv_list, &drv->devices);
  1606. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1607. } else
  1608. device_unregister(&vdev->dev);
  1609. }
  1610. return 0;
  1611. err_reg:
  1612. put_device(&vdev->dev);
  1613. err_devalloc:
  1614. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1615. list_del(&vdev->drv_list);
  1616. list_del(&vdev->bridge_list);
  1617. device_unregister(&vdev->dev);
  1618. }
  1619. return err;
  1620. }
  1621. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1622. {
  1623. struct vme_bridge *bridge;
  1624. int err = 0;
  1625. mutex_lock(&vme_buses_lock);
  1626. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1627. /*
  1628. * This cannot cause trouble as we already have vme_buses_lock
  1629. * and if the bridge is removed, it will have to go through
  1630. * vme_unregister_bridge() to do it (which calls remove() on
  1631. * the bridge which in turn tries to acquire vme_buses_lock and
  1632. * will have to wait).
  1633. */
  1634. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1635. if (err)
  1636. break;
  1637. }
  1638. mutex_unlock(&vme_buses_lock);
  1639. return err;
  1640. }
  1641. /**
  1642. * vme_register_driver - Register a VME driver
  1643. * @drv: Pointer to VME driver structure to register.
  1644. * @ndevs: Maximum number of devices to allow to be enumerated.
  1645. *
  1646. * Register a VME device driver with the VME subsystem.
  1647. *
  1648. * Return: Zero on success, error value on registration failure.
  1649. */
  1650. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1651. {
  1652. int err;
  1653. drv->driver.name = drv->name;
  1654. drv->driver.bus = &vme_bus_type;
  1655. INIT_LIST_HEAD(&drv->devices);
  1656. err = driver_register(&drv->driver);
  1657. if (err)
  1658. return err;
  1659. err = __vme_register_driver(drv, ndevs);
  1660. if (err)
  1661. driver_unregister(&drv->driver);
  1662. return err;
  1663. }
  1664. EXPORT_SYMBOL(vme_register_driver);
  1665. /**
  1666. * vme_unregister_driver - Unregister a VME driver
  1667. * @drv: Pointer to VME driver structure to unregister.
  1668. *
  1669. * Unregister a VME device driver from the VME subsystem.
  1670. */
  1671. void vme_unregister_driver(struct vme_driver *drv)
  1672. {
  1673. struct vme_dev *dev, *dev_tmp;
  1674. mutex_lock(&vme_buses_lock);
  1675. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1676. list_del(&dev->drv_list);
  1677. list_del(&dev->bridge_list);
  1678. device_unregister(&dev->dev);
  1679. }
  1680. mutex_unlock(&vme_buses_lock);
  1681. driver_unregister(&drv->driver);
  1682. }
  1683. EXPORT_SYMBOL(vme_unregister_driver);
  1684. /* - Bus Registration ------------------------------------------------------ */
  1685. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1686. {
  1687. struct vme_driver *vme_drv;
  1688. vme_drv = container_of(drv, struct vme_driver, driver);
  1689. if (dev->platform_data == vme_drv) {
  1690. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1691. if (vme_drv->match && vme_drv->match(vdev))
  1692. return 1;
  1693. dev->platform_data = NULL;
  1694. }
  1695. return 0;
  1696. }
  1697. static int vme_bus_probe(struct device *dev)
  1698. {
  1699. struct vme_driver *driver;
  1700. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1701. driver = dev->platform_data;
  1702. if (driver->probe)
  1703. return driver->probe(vdev);
  1704. return -ENODEV;
  1705. }
  1706. static int vme_bus_remove(struct device *dev)
  1707. {
  1708. struct vme_driver *driver;
  1709. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1710. driver = dev->platform_data;
  1711. if (driver->remove)
  1712. return driver->remove(vdev);
  1713. return -ENODEV;
  1714. }
  1715. struct bus_type vme_bus_type = {
  1716. .name = "vme",
  1717. .match = vme_bus_match,
  1718. .probe = vme_bus_probe,
  1719. .remove = vme_bus_remove,
  1720. };
  1721. EXPORT_SYMBOL(vme_bus_type);
  1722. static int __init vme_init(void)
  1723. {
  1724. return bus_register(&vme_bus_type);
  1725. }
  1726. subsys_initcall(vme_init);