core-card.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software Foundation,
  16. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. */
  18. #include <linux/bug.h>
  19. #include <linux/completion.h>
  20. #include <linux/crc-itu-t.h>
  21. #include <linux/device.h>
  22. #include <linux/errno.h>
  23. #include <linux/firewire.h>
  24. #include <linux/firewire-constants.h>
  25. #include <linux/jiffies.h>
  26. #include <linux/kernel.h>
  27. #include <linux/kref.h>
  28. #include <linux/list.h>
  29. #include <linux/module.h>
  30. #include <linux/mutex.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/workqueue.h>
  33. #include <asm/atomic.h>
  34. #include <asm/byteorder.h>
  35. #include "core.h"
  36. int fw_compute_block_crc(__be32 *block)
  37. {
  38. int length;
  39. u16 crc;
  40. length = (be32_to_cpu(block[0]) >> 16) & 0xff;
  41. crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
  42. *block |= cpu_to_be32(crc);
  43. return length;
  44. }
  45. static DEFINE_MUTEX(card_mutex);
  46. static LIST_HEAD(card_list);
  47. static LIST_HEAD(descriptor_list);
  48. static int descriptor_count;
  49. static __be32 tmp_config_rom[256];
  50. /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
  51. static size_t config_rom_length = 1 + 4 + 1 + 1;
  52. #define BIB_CRC(v) ((v) << 0)
  53. #define BIB_CRC_LENGTH(v) ((v) << 16)
  54. #define BIB_INFO_LENGTH(v) ((v) << 24)
  55. #define BIB_BUS_NAME 0x31333934 /* "1394" */
  56. #define BIB_LINK_SPEED(v) ((v) << 0)
  57. #define BIB_GENERATION(v) ((v) << 4)
  58. #define BIB_MAX_ROM(v) ((v) << 8)
  59. #define BIB_MAX_RECEIVE(v) ((v) << 12)
  60. #define BIB_CYC_CLK_ACC(v) ((v) << 16)
  61. #define BIB_PMC ((1) << 27)
  62. #define BIB_BMC ((1) << 28)
  63. #define BIB_ISC ((1) << 29)
  64. #define BIB_CMC ((1) << 30)
  65. #define BIB_IRMC ((1) << 31)
  66. #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
  67. /*
  68. * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
  69. * but we have to make it longer because there are many devices whose firmware
  70. * is just too slow for that.
  71. */
  72. #define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
  73. #define CANON_OUI 0x000085
  74. static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
  75. {
  76. struct fw_descriptor *desc;
  77. int i, j, k, length;
  78. /*
  79. * Initialize contents of config rom buffer. On the OHCI
  80. * controller, block reads to the config rom accesses the host
  81. * memory, but quadlet read access the hardware bus info block
  82. * registers. That's just crack, but it means we should make
  83. * sure the contents of bus info block in host memory matches
  84. * the version stored in the OHCI registers.
  85. */
  86. config_rom[0] = cpu_to_be32(
  87. BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
  88. config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
  89. config_rom[2] = cpu_to_be32(
  90. BIB_LINK_SPEED(card->link_speed) |
  91. BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
  92. BIB_MAX_ROM(2) |
  93. BIB_MAX_RECEIVE(card->max_receive) |
  94. BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
  95. config_rom[3] = cpu_to_be32(card->guid >> 32);
  96. config_rom[4] = cpu_to_be32(card->guid);
  97. /* Generate root directory. */
  98. config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
  99. i = 7;
  100. j = 7 + descriptor_count;
  101. /* Generate root directory entries for descriptors. */
  102. list_for_each_entry (desc, &descriptor_list, link) {
  103. if (desc->immediate > 0)
  104. config_rom[i++] = cpu_to_be32(desc->immediate);
  105. config_rom[i] = cpu_to_be32(desc->key | (j - i));
  106. i++;
  107. j += desc->length;
  108. }
  109. /* Update root directory length. */
  110. config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
  111. /* End of root directory, now copy in descriptors. */
  112. list_for_each_entry (desc, &descriptor_list, link) {
  113. for (k = 0; k < desc->length; k++)
  114. config_rom[i + k] = cpu_to_be32(desc->data[k]);
  115. i += desc->length;
  116. }
  117. /* Calculate CRCs for all blocks in the config rom. This
  118. * assumes that CRC length and info length are identical for
  119. * the bus info block, which is always the case for this
  120. * implementation. */
  121. for (i = 0; i < j; i += length + 1)
  122. length = fw_compute_block_crc(config_rom + i);
  123. WARN_ON(j != config_rom_length);
  124. }
  125. static void update_config_roms(void)
  126. {
  127. struct fw_card *card;
  128. list_for_each_entry (card, &card_list, link) {
  129. generate_config_rom(card, tmp_config_rom);
  130. card->driver->set_config_rom(card, tmp_config_rom,
  131. config_rom_length);
  132. }
  133. }
  134. static size_t required_space(struct fw_descriptor *desc)
  135. {
  136. /* descriptor + entry into root dir + optional immediate entry */
  137. return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
  138. }
  139. int fw_core_add_descriptor(struct fw_descriptor *desc)
  140. {
  141. size_t i;
  142. int ret;
  143. /*
  144. * Check descriptor is valid; the length of all blocks in the
  145. * descriptor has to add up to exactly the length of the
  146. * block.
  147. */
  148. i = 0;
  149. while (i < desc->length)
  150. i += (desc->data[i] >> 16) + 1;
  151. if (i != desc->length)
  152. return -EINVAL;
  153. mutex_lock(&card_mutex);
  154. if (config_rom_length + required_space(desc) > 256) {
  155. ret = -EBUSY;
  156. } else {
  157. list_add_tail(&desc->link, &descriptor_list);
  158. config_rom_length += required_space(desc);
  159. descriptor_count++;
  160. if (desc->immediate > 0)
  161. descriptor_count++;
  162. update_config_roms();
  163. ret = 0;
  164. }
  165. mutex_unlock(&card_mutex);
  166. return ret;
  167. }
  168. EXPORT_SYMBOL(fw_core_add_descriptor);
  169. void fw_core_remove_descriptor(struct fw_descriptor *desc)
  170. {
  171. mutex_lock(&card_mutex);
  172. list_del(&desc->link);
  173. config_rom_length -= required_space(desc);
  174. descriptor_count--;
  175. if (desc->immediate > 0)
  176. descriptor_count--;
  177. update_config_roms();
  178. mutex_unlock(&card_mutex);
  179. }
  180. EXPORT_SYMBOL(fw_core_remove_descriptor);
  181. static int reset_bus(struct fw_card *card, bool short_reset)
  182. {
  183. int reg = short_reset ? 5 : 1;
  184. int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
  185. return card->driver->update_phy_reg(card, reg, 0, bit);
  186. }
  187. void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
  188. {
  189. /* We don't try hard to sort out requests of long vs. short resets. */
  190. card->br_short = short_reset;
  191. /* Use an arbitrary short delay to combine multiple reset requests. */
  192. fw_card_get(card);
  193. if (!queue_delayed_work(fw_workqueue, &card->br_work,
  194. delayed ? DIV_ROUND_UP(HZ, 100) : 0))
  195. fw_card_put(card);
  196. }
  197. EXPORT_SYMBOL(fw_schedule_bus_reset);
  198. static void br_work(struct work_struct *work)
  199. {
  200. struct fw_card *card = container_of(work, struct fw_card, br_work.work);
  201. /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
  202. if (card->reset_jiffies != 0 &&
  203. time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
  204. if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
  205. fw_card_put(card);
  206. return;
  207. }
  208. fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
  209. FW_PHY_CONFIG_CURRENT_GAP_COUNT);
  210. reset_bus(card, card->br_short);
  211. fw_card_put(card);
  212. }
  213. static void allocate_broadcast_channel(struct fw_card *card, int generation)
  214. {
  215. int channel, bandwidth = 0;
  216. if (!card->broadcast_channel_allocated) {
  217. fw_iso_resource_manage(card, generation, 1ULL << 31,
  218. &channel, &bandwidth, true);
  219. if (channel != 31) {
  220. fw_notify("failed to allocate broadcast channel\n");
  221. return;
  222. }
  223. card->broadcast_channel_allocated = true;
  224. }
  225. device_for_each_child(card->device, (void *)(long)generation,
  226. fw_device_set_broadcast_channel);
  227. }
  228. static const char gap_count_table[] = {
  229. 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
  230. };
  231. void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
  232. {
  233. fw_card_get(card);
  234. if (!schedule_delayed_work(&card->bm_work, delay))
  235. fw_card_put(card);
  236. }
  237. static void bm_work(struct work_struct *work)
  238. {
  239. struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
  240. struct fw_device *root_device, *irm_device;
  241. struct fw_node *root_node;
  242. int root_id, new_root_id, irm_id, bm_id, local_id;
  243. int gap_count, generation, grace, rcode;
  244. bool do_reset = false;
  245. bool root_device_is_running;
  246. bool root_device_is_cmc;
  247. bool irm_is_1394_1995_only;
  248. bool keep_this_irm;
  249. __be32 transaction_data[2];
  250. spin_lock_irq(&card->lock);
  251. if (card->local_node == NULL) {
  252. spin_unlock_irq(&card->lock);
  253. goto out_put_card;
  254. }
  255. generation = card->generation;
  256. root_node = card->root_node;
  257. fw_node_get(root_node);
  258. root_device = root_node->data;
  259. root_device_is_running = root_device &&
  260. atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
  261. root_device_is_cmc = root_device && root_device->cmc;
  262. irm_device = card->irm_node->data;
  263. irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
  264. (irm_device->config_rom[2] & 0x000000f0) == 0;
  265. /* Canon MV5i works unreliably if it is not root node. */
  266. keep_this_irm = irm_device && irm_device->config_rom &&
  267. irm_device->config_rom[3] >> 8 == CANON_OUI;
  268. root_id = root_node->node_id;
  269. irm_id = card->irm_node->node_id;
  270. local_id = card->local_node->node_id;
  271. grace = time_after64(get_jiffies_64(),
  272. card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
  273. if ((is_next_generation(generation, card->bm_generation) &&
  274. !card->bm_abdicate) ||
  275. (card->bm_generation != generation && grace)) {
  276. /*
  277. * This first step is to figure out who is IRM and
  278. * then try to become bus manager. If the IRM is not
  279. * well defined (e.g. does not have an active link
  280. * layer or does not responds to our lock request, we
  281. * will have to do a little vigilante bus management.
  282. * In that case, we do a goto into the gap count logic
  283. * so that when we do the reset, we still optimize the
  284. * gap count. That could well save a reset in the
  285. * next generation.
  286. */
  287. if (!card->irm_node->link_on) {
  288. new_root_id = local_id;
  289. fw_notify("%s, making local node (%02x) root.\n",
  290. "IRM has link off", new_root_id);
  291. goto pick_me;
  292. }
  293. if (irm_is_1394_1995_only && !keep_this_irm) {
  294. new_root_id = local_id;
  295. fw_notify("%s, making local node (%02x) root.\n",
  296. "IRM is not 1394a compliant", new_root_id);
  297. goto pick_me;
  298. }
  299. transaction_data[0] = cpu_to_be32(0x3f);
  300. transaction_data[1] = cpu_to_be32(local_id);
  301. spin_unlock_irq(&card->lock);
  302. rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
  303. irm_id, generation, SCODE_100,
  304. CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
  305. transaction_data, 8);
  306. if (rcode == RCODE_GENERATION)
  307. /* Another bus reset, BM work has been rescheduled. */
  308. goto out;
  309. bm_id = be32_to_cpu(transaction_data[0]);
  310. spin_lock_irq(&card->lock);
  311. if (rcode == RCODE_COMPLETE && generation == card->generation)
  312. card->bm_node_id =
  313. bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
  314. spin_unlock_irq(&card->lock);
  315. if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
  316. /* Somebody else is BM. Only act as IRM. */
  317. if (local_id == irm_id)
  318. allocate_broadcast_channel(card, generation);
  319. goto out;
  320. }
  321. if (rcode == RCODE_SEND_ERROR) {
  322. /*
  323. * We have been unable to send the lock request due to
  324. * some local problem. Let's try again later and hope
  325. * that the problem has gone away by then.
  326. */
  327. fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
  328. goto out;
  329. }
  330. spin_lock_irq(&card->lock);
  331. if (rcode != RCODE_COMPLETE && !keep_this_irm) {
  332. /*
  333. * The lock request failed, maybe the IRM
  334. * isn't really IRM capable after all. Let's
  335. * do a bus reset and pick the local node as
  336. * root, and thus, IRM.
  337. */
  338. new_root_id = local_id;
  339. fw_notify("%s, making local node (%02x) root.\n",
  340. "BM lock failed", new_root_id);
  341. goto pick_me;
  342. }
  343. } else if (card->bm_generation != generation) {
  344. /*
  345. * We weren't BM in the last generation, and the last
  346. * bus reset is less than 125ms ago. Reschedule this job.
  347. */
  348. spin_unlock_irq(&card->lock);
  349. fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
  350. goto out;
  351. }
  352. /*
  353. * We're bus manager for this generation, so next step is to
  354. * make sure we have an active cycle master and do gap count
  355. * optimization.
  356. */
  357. card->bm_generation = generation;
  358. if (root_device == NULL) {
  359. /*
  360. * Either link_on is false, or we failed to read the
  361. * config rom. In either case, pick another root.
  362. */
  363. new_root_id = local_id;
  364. } else if (!root_device_is_running) {
  365. /*
  366. * If we haven't probed this device yet, bail out now
  367. * and let's try again once that's done.
  368. */
  369. spin_unlock_irq(&card->lock);
  370. goto out;
  371. } else if (root_device_is_cmc) {
  372. /*
  373. * We will send out a force root packet for this
  374. * node as part of the gap count optimization.
  375. */
  376. new_root_id = root_id;
  377. } else {
  378. /*
  379. * Current root has an active link layer and we
  380. * successfully read the config rom, but it's not
  381. * cycle master capable.
  382. */
  383. new_root_id = local_id;
  384. }
  385. pick_me:
  386. /*
  387. * Pick a gap count from 1394a table E-1. The table doesn't cover
  388. * the typically much larger 1394b beta repeater delays though.
  389. */
  390. if (!card->beta_repeaters_present &&
  391. root_node->max_hops < ARRAY_SIZE(gap_count_table))
  392. gap_count = gap_count_table[root_node->max_hops];
  393. else
  394. gap_count = 63;
  395. /*
  396. * Finally, figure out if we should do a reset or not. If we have
  397. * done less than 5 resets with the same physical topology and we
  398. * have either a new root or a new gap count setting, let's do it.
  399. */
  400. if (card->bm_retries++ < 5 &&
  401. (card->gap_count != gap_count || new_root_id != root_id))
  402. do_reset = true;
  403. spin_unlock_irq(&card->lock);
  404. if (do_reset) {
  405. fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
  406. card->index, new_root_id, gap_count);
  407. fw_send_phy_config(card, new_root_id, generation, gap_count);
  408. reset_bus(card, true);
  409. /* Will allocate broadcast channel after the reset. */
  410. goto out;
  411. }
  412. if (root_device_is_cmc) {
  413. /*
  414. * Make sure that the cycle master sends cycle start packets.
  415. */
  416. transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
  417. rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
  418. root_id, generation, SCODE_100,
  419. CSR_REGISTER_BASE + CSR_STATE_SET,
  420. transaction_data, 4);
  421. if (rcode == RCODE_GENERATION)
  422. goto out;
  423. }
  424. if (local_id == irm_id)
  425. allocate_broadcast_channel(card, generation);
  426. out:
  427. fw_node_put(root_node);
  428. out_put_card:
  429. fw_card_put(card);
  430. }
  431. void fw_card_initialize(struct fw_card *card,
  432. const struct fw_card_driver *driver,
  433. struct device *device)
  434. {
  435. static atomic_t index = ATOMIC_INIT(-1);
  436. card->index = atomic_inc_return(&index);
  437. card->driver = driver;
  438. card->device = device;
  439. card->current_tlabel = 0;
  440. card->tlabel_mask = 0;
  441. card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
  442. card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
  443. card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
  444. card->split_timeout_jiffies =
  445. DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
  446. card->color = 0;
  447. card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
  448. kref_init(&card->kref);
  449. init_completion(&card->done);
  450. INIT_LIST_HEAD(&card->transaction_list);
  451. INIT_LIST_HEAD(&card->phy_receiver_list);
  452. spin_lock_init(&card->lock);
  453. card->local_node = NULL;
  454. INIT_DELAYED_WORK(&card->br_work, br_work);
  455. INIT_DELAYED_WORK(&card->bm_work, bm_work);
  456. }
  457. EXPORT_SYMBOL(fw_card_initialize);
  458. int fw_card_add(struct fw_card *card,
  459. u32 max_receive, u32 link_speed, u64 guid)
  460. {
  461. int ret;
  462. card->max_receive = max_receive;
  463. card->link_speed = link_speed;
  464. card->guid = guid;
  465. mutex_lock(&card_mutex);
  466. generate_config_rom(card, tmp_config_rom);
  467. ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
  468. if (ret == 0)
  469. list_add_tail(&card->link, &card_list);
  470. mutex_unlock(&card_mutex);
  471. return ret;
  472. }
  473. EXPORT_SYMBOL(fw_card_add);
  474. /*
  475. * The next few functions implement a dummy driver that is used once a card
  476. * driver shuts down an fw_card. This allows the driver to cleanly unload,
  477. * as all IO to the card will be handled (and failed) by the dummy driver
  478. * instead of calling into the module. Only functions for iso context
  479. * shutdown still need to be provided by the card driver.
  480. *
  481. * .read/write_csr() should never be called anymore after the dummy driver
  482. * was bound since they are only used within request handler context.
  483. * .set_config_rom() is never called since the card is taken out of card_list
  484. * before switching to the dummy driver.
  485. */
  486. static int dummy_read_phy_reg(struct fw_card *card, int address)
  487. {
  488. return -ENODEV;
  489. }
  490. static int dummy_update_phy_reg(struct fw_card *card, int address,
  491. int clear_bits, int set_bits)
  492. {
  493. return -ENODEV;
  494. }
  495. static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
  496. {
  497. packet->callback(packet, card, RCODE_CANCELLED);
  498. }
  499. static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
  500. {
  501. packet->callback(packet, card, RCODE_CANCELLED);
  502. }
  503. static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  504. {
  505. return -ENOENT;
  506. }
  507. static int dummy_enable_phys_dma(struct fw_card *card,
  508. int node_id, int generation)
  509. {
  510. return -ENODEV;
  511. }
  512. static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
  513. int type, int channel, size_t header_size)
  514. {
  515. return ERR_PTR(-ENODEV);
  516. }
  517. static int dummy_start_iso(struct fw_iso_context *ctx,
  518. s32 cycle, u32 sync, u32 tags)
  519. {
  520. return -ENODEV;
  521. }
  522. static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
  523. {
  524. return -ENODEV;
  525. }
  526. static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
  527. struct fw_iso_buffer *buffer, unsigned long payload)
  528. {
  529. return -ENODEV;
  530. }
  531. static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
  532. {
  533. }
  534. static const struct fw_card_driver dummy_driver_template = {
  535. .read_phy_reg = dummy_read_phy_reg,
  536. .update_phy_reg = dummy_update_phy_reg,
  537. .send_request = dummy_send_request,
  538. .send_response = dummy_send_response,
  539. .cancel_packet = dummy_cancel_packet,
  540. .enable_phys_dma = dummy_enable_phys_dma,
  541. .allocate_iso_context = dummy_allocate_iso_context,
  542. .start_iso = dummy_start_iso,
  543. .set_iso_channels = dummy_set_iso_channels,
  544. .queue_iso = dummy_queue_iso,
  545. .flush_queue_iso = dummy_flush_queue_iso,
  546. };
  547. void fw_card_release(struct kref *kref)
  548. {
  549. struct fw_card *card = container_of(kref, struct fw_card, kref);
  550. complete(&card->done);
  551. }
  552. void fw_core_remove_card(struct fw_card *card)
  553. {
  554. struct fw_card_driver dummy_driver = dummy_driver_template;
  555. card->driver->update_phy_reg(card, 4,
  556. PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
  557. fw_schedule_bus_reset(card, false, true);
  558. mutex_lock(&card_mutex);
  559. list_del_init(&card->link);
  560. mutex_unlock(&card_mutex);
  561. /* Switch off most of the card driver interface. */
  562. dummy_driver.free_iso_context = card->driver->free_iso_context;
  563. dummy_driver.stop_iso = card->driver->stop_iso;
  564. card->driver = &dummy_driver;
  565. fw_destroy_nodes(card);
  566. /* Wait for all users, especially device workqueue jobs, to finish. */
  567. fw_card_put(card);
  568. wait_for_completion(&card->done);
  569. WARN_ON(!list_empty(&card->transaction_list));
  570. }
  571. EXPORT_SYMBOL(fw_core_remove_card);