main.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * Broadcom specific AMBA
  3. * Bus subsystem
  4. *
  5. * Licensed under the GNU/GPL. See COPYING for details.
  6. */
  7. #include "bcma_private.h"
  8. #include <linux/module.h>
  9. #include <linux/mmc/sdio_func.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/pci.h>
  12. #include <linux/bcma/bcma.h>
  13. #include <linux/slab.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/of_platform.h>
  17. MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
  18. MODULE_LICENSE("GPL");
  19. /* contains the number the next bus should get. */
  20. static unsigned int bcma_bus_next_num = 0;
  21. /* bcma_buses_mutex locks the bcma_bus_next_num */
  22. static DEFINE_MUTEX(bcma_buses_mutex);
  23. static int bcma_bus_match(struct device *dev, struct device_driver *drv);
  24. static int bcma_device_probe(struct device *dev);
  25. static int bcma_device_remove(struct device *dev);
  26. static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
  27. static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
  28. {
  29. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  30. return sprintf(buf, "0x%03X\n", core->id.manuf);
  31. }
  32. static DEVICE_ATTR_RO(manuf);
  33. static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
  34. {
  35. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  36. return sprintf(buf, "0x%03X\n", core->id.id);
  37. }
  38. static DEVICE_ATTR_RO(id);
  39. static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
  40. {
  41. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  42. return sprintf(buf, "0x%02X\n", core->id.rev);
  43. }
  44. static DEVICE_ATTR_RO(rev);
  45. static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
  46. {
  47. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  48. return sprintf(buf, "0x%X\n", core->id.class);
  49. }
  50. static DEVICE_ATTR_RO(class);
  51. static struct attribute *bcma_device_attrs[] = {
  52. &dev_attr_manuf.attr,
  53. &dev_attr_id.attr,
  54. &dev_attr_rev.attr,
  55. &dev_attr_class.attr,
  56. NULL,
  57. };
  58. ATTRIBUTE_GROUPS(bcma_device);
  59. static struct bus_type bcma_bus_type = {
  60. .name = "bcma",
  61. .match = bcma_bus_match,
  62. .probe = bcma_device_probe,
  63. .remove = bcma_device_remove,
  64. .uevent = bcma_device_uevent,
  65. .dev_groups = bcma_device_groups,
  66. };
  67. static u16 bcma_cc_core_id(struct bcma_bus *bus)
  68. {
  69. if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
  70. return BCMA_CORE_4706_CHIPCOMMON;
  71. return BCMA_CORE_CHIPCOMMON;
  72. }
  73. struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
  74. u8 unit)
  75. {
  76. struct bcma_device *core;
  77. list_for_each_entry(core, &bus->cores, list) {
  78. if (core->id.id == coreid && core->core_unit == unit)
  79. return core;
  80. }
  81. return NULL;
  82. }
  83. EXPORT_SYMBOL_GPL(bcma_find_core_unit);
  84. bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
  85. int timeout)
  86. {
  87. unsigned long deadline = jiffies + timeout;
  88. u32 val;
  89. do {
  90. val = bcma_read32(core, reg);
  91. if ((val & mask) == value)
  92. return true;
  93. cpu_relax();
  94. udelay(10);
  95. } while (!time_after_eq(jiffies, deadline));
  96. bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
  97. return false;
  98. }
  99. static void bcma_release_core_dev(struct device *dev)
  100. {
  101. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  102. if (core->io_addr)
  103. iounmap(core->io_addr);
  104. if (core->io_wrap)
  105. iounmap(core->io_wrap);
  106. kfree(core);
  107. }
  108. static bool bcma_is_core_needed_early(u16 core_id)
  109. {
  110. switch (core_id) {
  111. case BCMA_CORE_NS_NAND:
  112. case BCMA_CORE_NS_QSPI:
  113. return true;
  114. }
  115. return false;
  116. }
  117. static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
  118. struct bcma_device *core)
  119. {
  120. struct device_node *node;
  121. u64 size;
  122. const __be32 *reg;
  123. if (!parent || !parent->dev.of_node)
  124. return NULL;
  125. for_each_child_of_node(parent->dev.of_node, node) {
  126. reg = of_get_address(node, 0, &size, NULL);
  127. if (!reg)
  128. continue;
  129. if (of_translate_address(node, reg) == core->addr)
  130. return node;
  131. }
  132. return NULL;
  133. }
  134. static int bcma_of_irq_parse(struct platform_device *parent,
  135. struct bcma_device *core,
  136. struct of_phandle_args *out_irq, int num)
  137. {
  138. __be32 laddr[1];
  139. int rc;
  140. if (core->dev.of_node) {
  141. rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
  142. if (!rc)
  143. return rc;
  144. }
  145. out_irq->np = parent->dev.of_node;
  146. out_irq->args_count = 1;
  147. out_irq->args[0] = num;
  148. laddr[0] = cpu_to_be32(core->addr);
  149. return of_irq_parse_raw(laddr, out_irq);
  150. }
  151. static unsigned int bcma_of_get_irq(struct platform_device *parent,
  152. struct bcma_device *core, int num)
  153. {
  154. struct of_phandle_args out_irq;
  155. int ret;
  156. if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
  157. return 0;
  158. ret = bcma_of_irq_parse(parent, core, &out_irq, num);
  159. if (ret) {
  160. bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
  161. ret);
  162. return 0;
  163. }
  164. return irq_create_of_mapping(&out_irq);
  165. }
  166. static void bcma_of_fill_device(struct platform_device *parent,
  167. struct bcma_device *core)
  168. {
  169. struct device_node *node;
  170. if (!IS_ENABLED(CONFIG_OF_IRQ))
  171. return;
  172. node = bcma_of_find_child_device(parent, core);
  173. if (node)
  174. core->dev.of_node = node;
  175. core->irq = bcma_of_get_irq(parent, core, 0);
  176. of_dma_configure(&core->dev, node);
  177. }
  178. unsigned int bcma_core_irq(struct bcma_device *core, int num)
  179. {
  180. struct bcma_bus *bus = core->bus;
  181. unsigned int mips_irq;
  182. switch (bus->hosttype) {
  183. case BCMA_HOSTTYPE_PCI:
  184. return bus->host_pci->irq;
  185. case BCMA_HOSTTYPE_SOC:
  186. if (bus->drv_mips.core && num == 0) {
  187. mips_irq = bcma_core_mips_irq(core);
  188. return mips_irq <= 4 ? mips_irq + 2 : 0;
  189. }
  190. if (bus->host_pdev)
  191. return bcma_of_get_irq(bus->host_pdev, core, num);
  192. return 0;
  193. case BCMA_HOSTTYPE_SDIO:
  194. return 0;
  195. }
  196. return 0;
  197. }
  198. EXPORT_SYMBOL(bcma_core_irq);
  199. void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
  200. {
  201. core->dev.release = bcma_release_core_dev;
  202. core->dev.bus = &bcma_bus_type;
  203. dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
  204. switch (bus->hosttype) {
  205. case BCMA_HOSTTYPE_PCI:
  206. core->dev.parent = &bus->host_pci->dev;
  207. core->dma_dev = &bus->host_pci->dev;
  208. core->irq = bus->host_pci->irq;
  209. break;
  210. case BCMA_HOSTTYPE_SOC:
  211. if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
  212. core->dma_dev = &bus->host_pdev->dev;
  213. core->dev.parent = &bus->host_pdev->dev;
  214. bcma_of_fill_device(bus->host_pdev, core);
  215. } else {
  216. core->dev.dma_mask = &core->dev.coherent_dma_mask;
  217. core->dma_dev = &core->dev;
  218. }
  219. break;
  220. case BCMA_HOSTTYPE_SDIO:
  221. break;
  222. }
  223. }
  224. struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
  225. {
  226. switch (bus->hosttype) {
  227. case BCMA_HOSTTYPE_PCI:
  228. if (bus->host_pci)
  229. return &bus->host_pci->dev;
  230. else
  231. return NULL;
  232. case BCMA_HOSTTYPE_SOC:
  233. if (bus->host_pdev)
  234. return &bus->host_pdev->dev;
  235. else
  236. return NULL;
  237. case BCMA_HOSTTYPE_SDIO:
  238. if (bus->host_sdio)
  239. return &bus->host_sdio->dev;
  240. else
  241. return NULL;
  242. }
  243. return NULL;
  244. }
  245. void bcma_init_bus(struct bcma_bus *bus)
  246. {
  247. mutex_lock(&bcma_buses_mutex);
  248. bus->num = bcma_bus_next_num++;
  249. mutex_unlock(&bcma_buses_mutex);
  250. INIT_LIST_HEAD(&bus->cores);
  251. bus->nr_cores = 0;
  252. bcma_detect_chip(bus);
  253. }
  254. static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
  255. {
  256. int err;
  257. err = device_register(&core->dev);
  258. if (err) {
  259. bcma_err(bus, "Could not register dev for core 0x%03X\n",
  260. core->id.id);
  261. put_device(&core->dev);
  262. return;
  263. }
  264. core->dev_registered = true;
  265. }
  266. static int bcma_register_devices(struct bcma_bus *bus)
  267. {
  268. struct bcma_device *core;
  269. int err;
  270. list_for_each_entry(core, &bus->cores, list) {
  271. /* We support that cores ourself */
  272. switch (core->id.id) {
  273. case BCMA_CORE_4706_CHIPCOMMON:
  274. case BCMA_CORE_CHIPCOMMON:
  275. case BCMA_CORE_NS_CHIPCOMMON_B:
  276. case BCMA_CORE_PCI:
  277. case BCMA_CORE_PCIE:
  278. case BCMA_CORE_PCIE2:
  279. case BCMA_CORE_MIPS_74K:
  280. case BCMA_CORE_4706_MAC_GBIT_COMMON:
  281. continue;
  282. }
  283. /* Early cores were already registered */
  284. if (bcma_is_core_needed_early(core->id.id))
  285. continue;
  286. /* Only first GMAC core on BCM4706 is connected and working */
  287. if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
  288. core->core_unit > 0)
  289. continue;
  290. bcma_register_core(bus, core);
  291. }
  292. #ifdef CONFIG_BCMA_PFLASH
  293. if (bus->drv_cc.pflash.present) {
  294. err = platform_device_register(&bcma_pflash_dev);
  295. if (err)
  296. bcma_err(bus, "Error registering parallel flash\n");
  297. }
  298. #endif
  299. #ifdef CONFIG_BCMA_SFLASH
  300. if (bus->drv_cc.sflash.present) {
  301. err = platform_device_register(&bcma_sflash_dev);
  302. if (err)
  303. bcma_err(bus, "Error registering serial flash\n");
  304. }
  305. #endif
  306. #ifdef CONFIG_BCMA_NFLASH
  307. if (bus->drv_cc.nflash.present) {
  308. err = platform_device_register(&bcma_nflash_dev);
  309. if (err)
  310. bcma_err(bus, "Error registering NAND flash\n");
  311. }
  312. #endif
  313. err = bcma_gpio_init(&bus->drv_cc);
  314. if (err == -ENOTSUPP)
  315. bcma_debug(bus, "GPIO driver not activated\n");
  316. else if (err)
  317. bcma_err(bus, "Error registering GPIO driver: %i\n", err);
  318. if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
  319. err = bcma_chipco_watchdog_register(&bus->drv_cc);
  320. if (err)
  321. bcma_err(bus, "Error registering watchdog driver\n");
  322. }
  323. return 0;
  324. }
  325. void bcma_unregister_cores(struct bcma_bus *bus)
  326. {
  327. struct bcma_device *core, *tmp;
  328. list_for_each_entry_safe(core, tmp, &bus->cores, list) {
  329. if (!core->dev_registered)
  330. continue;
  331. list_del(&core->list);
  332. device_unregister(&core->dev);
  333. }
  334. if (bus->hosttype == BCMA_HOSTTYPE_SOC)
  335. platform_device_unregister(bus->drv_cc.watchdog);
  336. /* Now noone uses internally-handled cores, we can free them */
  337. list_for_each_entry_safe(core, tmp, &bus->cores, list) {
  338. list_del(&core->list);
  339. kfree(core);
  340. }
  341. }
  342. int bcma_bus_register(struct bcma_bus *bus)
  343. {
  344. int err;
  345. struct bcma_device *core;
  346. struct device *dev;
  347. /* Scan for devices (cores) */
  348. err = bcma_bus_scan(bus);
  349. if (err) {
  350. bcma_err(bus, "Failed to scan: %d\n", err);
  351. return err;
  352. }
  353. /* Early init CC core */
  354. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  355. if (core) {
  356. bus->drv_cc.core = core;
  357. bcma_core_chipcommon_early_init(&bus->drv_cc);
  358. }
  359. /* Early init PCIE core */
  360. core = bcma_find_core(bus, BCMA_CORE_PCIE);
  361. if (core) {
  362. bus->drv_pci[0].core = core;
  363. bcma_core_pci_early_init(&bus->drv_pci[0]);
  364. }
  365. dev = bcma_bus_get_host_dev(bus);
  366. if (dev) {
  367. of_platform_default_populate(dev->of_node, NULL, dev);
  368. }
  369. /* Cores providing flash access go before SPROM init */
  370. list_for_each_entry(core, &bus->cores, list) {
  371. if (bcma_is_core_needed_early(core->id.id))
  372. bcma_register_core(bus, core);
  373. }
  374. /* Try to get SPROM */
  375. err = bcma_sprom_get(bus);
  376. if (err == -ENOENT) {
  377. bcma_err(bus, "No SPROM available\n");
  378. } else if (err)
  379. bcma_err(bus, "Failed to get SPROM: %d\n", err);
  380. /* Init CC core */
  381. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  382. if (core) {
  383. bus->drv_cc.core = core;
  384. bcma_core_chipcommon_init(&bus->drv_cc);
  385. }
  386. /* Init CC core */
  387. core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
  388. if (core) {
  389. bus->drv_cc_b.core = core;
  390. bcma_core_chipcommon_b_init(&bus->drv_cc_b);
  391. }
  392. /* Init MIPS core */
  393. core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
  394. if (core) {
  395. bus->drv_mips.core = core;
  396. bcma_core_mips_init(&bus->drv_mips);
  397. }
  398. /* Init PCIE core */
  399. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
  400. if (core) {
  401. bus->drv_pci[0].core = core;
  402. bcma_core_pci_init(&bus->drv_pci[0]);
  403. }
  404. /* Init PCIE core */
  405. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
  406. if (core) {
  407. bus->drv_pci[1].core = core;
  408. bcma_core_pci_init(&bus->drv_pci[1]);
  409. }
  410. /* Init PCIe Gen 2 core */
  411. core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
  412. if (core) {
  413. bus->drv_pcie2.core = core;
  414. bcma_core_pcie2_init(&bus->drv_pcie2);
  415. }
  416. /* Init GBIT MAC COMMON core */
  417. core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
  418. if (core) {
  419. bus->drv_gmac_cmn.core = core;
  420. bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
  421. }
  422. /* Register found cores */
  423. bcma_register_devices(bus);
  424. bcma_info(bus, "Bus registered\n");
  425. return 0;
  426. }
  427. void bcma_bus_unregister(struct bcma_bus *bus)
  428. {
  429. int err;
  430. err = bcma_gpio_unregister(&bus->drv_cc);
  431. if (err == -EBUSY)
  432. bcma_err(bus, "Some GPIOs are still in use.\n");
  433. else if (err)
  434. bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
  435. bcma_core_chipcommon_b_free(&bus->drv_cc_b);
  436. bcma_unregister_cores(bus);
  437. }
  438. /*
  439. * This is a special version of bus registration function designed for SoCs.
  440. * It scans bus and performs basic initialization of main cores only.
  441. * Please note it requires memory allocation, however it won't try to sleep.
  442. */
  443. int __init bcma_bus_early_register(struct bcma_bus *bus)
  444. {
  445. int err;
  446. struct bcma_device *core;
  447. /* Scan for devices (cores) */
  448. err = bcma_bus_scan(bus);
  449. if (err) {
  450. bcma_err(bus, "Failed to scan bus: %d\n", err);
  451. return -1;
  452. }
  453. /* Early init CC core */
  454. core = bcma_find_core(bus, bcma_cc_core_id(bus));
  455. if (core) {
  456. bus->drv_cc.core = core;
  457. bcma_core_chipcommon_early_init(&bus->drv_cc);
  458. }
  459. /* Early init MIPS core */
  460. core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
  461. if (core) {
  462. bus->drv_mips.core = core;
  463. bcma_core_mips_early_init(&bus->drv_mips);
  464. }
  465. bcma_info(bus, "Early bus registered\n");
  466. return 0;
  467. }
  468. #ifdef CONFIG_PM
  469. int bcma_bus_suspend(struct bcma_bus *bus)
  470. {
  471. struct bcma_device *core;
  472. list_for_each_entry(core, &bus->cores, list) {
  473. struct device_driver *drv = core->dev.driver;
  474. if (drv) {
  475. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  476. if (adrv->suspend)
  477. adrv->suspend(core);
  478. }
  479. }
  480. return 0;
  481. }
  482. int bcma_bus_resume(struct bcma_bus *bus)
  483. {
  484. struct bcma_device *core;
  485. /* Init CC core */
  486. if (bus->drv_cc.core) {
  487. bus->drv_cc.setup_done = false;
  488. bcma_core_chipcommon_init(&bus->drv_cc);
  489. }
  490. list_for_each_entry(core, &bus->cores, list) {
  491. struct device_driver *drv = core->dev.driver;
  492. if (drv) {
  493. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  494. if (adrv->resume)
  495. adrv->resume(core);
  496. }
  497. }
  498. return 0;
  499. }
  500. #endif
  501. int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
  502. {
  503. drv->drv.name = drv->name;
  504. drv->drv.bus = &bcma_bus_type;
  505. drv->drv.owner = owner;
  506. return driver_register(&drv->drv);
  507. }
  508. EXPORT_SYMBOL_GPL(__bcma_driver_register);
  509. void bcma_driver_unregister(struct bcma_driver *drv)
  510. {
  511. driver_unregister(&drv->drv);
  512. }
  513. EXPORT_SYMBOL_GPL(bcma_driver_unregister);
  514. static int bcma_bus_match(struct device *dev, struct device_driver *drv)
  515. {
  516. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  517. struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
  518. const struct bcma_device_id *cid = &core->id;
  519. const struct bcma_device_id *did;
  520. for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
  521. if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
  522. (did->id == cid->id || did->id == BCMA_ANY_ID) &&
  523. (did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
  524. (did->class == cid->class || did->class == BCMA_ANY_CLASS))
  525. return 1;
  526. }
  527. return 0;
  528. }
  529. static int bcma_device_probe(struct device *dev)
  530. {
  531. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  532. struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
  533. drv);
  534. int err = 0;
  535. get_device(dev);
  536. if (adrv->probe)
  537. err = adrv->probe(core);
  538. if (err)
  539. put_device(dev);
  540. return err;
  541. }
  542. static int bcma_device_remove(struct device *dev)
  543. {
  544. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  545. struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
  546. drv);
  547. if (adrv->remove)
  548. adrv->remove(core);
  549. put_device(dev);
  550. return 0;
  551. }
  552. static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
  553. {
  554. struct bcma_device *core = container_of(dev, struct bcma_device, dev);
  555. return add_uevent_var(env,
  556. "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
  557. core->id.manuf, core->id.id,
  558. core->id.rev, core->id.class);
  559. }
  560. static unsigned int bcma_bus_registered;
  561. /*
  562. * If built-in, bus has to be registered early, before any driver calls
  563. * bcma_driver_register.
  564. * Otherwise registering driver would trigger BUG in driver_register.
  565. */
  566. static int __init bcma_init_bus_register(void)
  567. {
  568. int err;
  569. if (bcma_bus_registered)
  570. return 0;
  571. err = bus_register(&bcma_bus_type);
  572. if (!err)
  573. bcma_bus_registered = 1;
  574. return err;
  575. }
  576. #ifndef MODULE
  577. fs_initcall(bcma_init_bus_register);
  578. #endif
  579. /* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
  580. static int __init bcma_modinit(void)
  581. {
  582. int err;
  583. err = bcma_init_bus_register();
  584. if (err)
  585. return err;
  586. err = bcma_host_soc_register_driver();
  587. if (err) {
  588. pr_err("SoC host initialization failed\n");
  589. err = 0;
  590. }
  591. #ifdef CONFIG_BCMA_HOST_PCI
  592. err = bcma_host_pci_init();
  593. if (err) {
  594. pr_err("PCI host initialization failed\n");
  595. err = 0;
  596. }
  597. #endif
  598. return err;
  599. }
  600. module_init(bcma_modinit);
  601. static void __exit bcma_modexit(void)
  602. {
  603. #ifdef CONFIG_BCMA_HOST_PCI
  604. bcma_host_pci_exit();
  605. #endif
  606. bcma_host_soc_unregister_driver();
  607. bus_unregister(&bcma_bus_type);
  608. }
  609. module_exit(bcma_modexit)