dahdi_dynamic_ethmf.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /*
  2. * Dynamic Span Interface for DAHDI (Multi-Span Ethernet Interface)
  3. *
  4. * Written by Joseph Benden <joe@thrallingpenguin.com>
  5. *
  6. * Copyright (C) 2007-2010, Thralling Penguin LLC.
  7. *
  8. * All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/errno.h>
  27. #include <linux/module.h>
  28. #include <linux/init.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/slab.h>
  31. #include <linux/kmod.h>
  32. #include <linux/sched.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/notifier.h>
  37. #include <linux/crc32.h>
  38. #include <linux/seq_file.h>
  39. /**
  40. * Undefine USE_PROC_FS, if you do not want the /proc/dahdi/dynamic-ethmf
  41. * support. Undefining this would give a slight performance increase.
  42. */
  43. #define USE_PROC_FS
  44. #ifdef USE_PROC_FS
  45. # include <linux/proc_fs.h>
  46. # include <asm/atomic.h>
  47. #endif
  48. #ifdef CONFIG_DEVFS_FS
  49. # include <linux/devfs_fs_kernel.h>
  50. #endif
  51. #include <dahdi/kernel.h>
  52. #include <dahdi/user.h>
  53. #define ETH_P_ZTDETH 0xd00d
  54. #define ETHMF_MAX_PER_SPAN_GROUP 8
  55. #define ETHMF_MAX_GROUPS 16
  56. #define ETHMF_FLAG_IGNORE_CHAN0 (1 << 3)
  57. #define ETHMF_MAX_SPANS 4
  58. struct ztdeth_header {
  59. unsigned short subaddr;
  60. };
  61. /* Timer for enabling spans - used to combat a lock problem */
  62. static struct timer_list timer;
  63. /* Whether or not the timer has been deleted */
  64. static atomic_t timer_deleted = ATOMIC_INIT(0);
  65. /* Global error counter */
  66. static atomic_t errcount = ATOMIC_INIT(0);
  67. /* Whether or not we are in shutdown */
  68. static atomic_t shutdown = ATOMIC_INIT(0);
  69. static struct sk_buff_head skbs;
  70. #ifdef USE_PROC_FS
  71. struct ethmf_group {
  72. unsigned int hash_addr;
  73. atomic_t spans;
  74. atomic_t rxframecount;
  75. atomic_t txframecount;
  76. atomic_t rxbytecount;
  77. atomic_t txbytecount;
  78. atomic_t devupcount;
  79. atomic_t devdowncount;
  80. };
  81. static struct ethmf_group ethmf_groups[ETHMF_MAX_GROUPS];
  82. #endif
  83. struct ztdeth {
  84. /* Destination MAC address */
  85. unsigned char addr[ETH_ALEN];
  86. /* Destination MAC address hash value */
  87. unsigned int addr_hash;
  88. /* span sub-address, in network byte order */
  89. unsigned short subaddr;
  90. /* DAHDI span associated with this TDMoE-mf span */
  91. struct dahdi_span *span;
  92. /* Ethernet interface name */
  93. char ethdev[IFNAMSIZ];
  94. /* Ethernet device reference */
  95. struct net_device *dev;
  96. /* trx buffer */
  97. unsigned char *msgbuf;
  98. /* trx buffer length */
  99. int msgbuf_len;
  100. /* wether or not this frame is ready for trx */
  101. atomic_t ready;
  102. /* delay counter, to ensure all spans are added, prior to usage */
  103. atomic_t delay;
  104. /* rvc buffer */
  105. unsigned char *rcvbuf;
  106. /* the number of channels in this span */
  107. int real_channels;
  108. /* use padding if 1, else no padding */
  109. atomic_t no_front_padding;
  110. /* counter to pseudo lock the rcvbuf */
  111. atomic_t refcnt;
  112. struct list_head list;
  113. };
  114. /**
  115. * Lock for adding and removing items in ethmf_list
  116. */
  117. static DEFINE_SPINLOCK(ethmf_lock);
  118. /**
  119. * The active list of all running spans
  120. */
  121. static LIST_HEAD(ethmf_list);
  122. static inline void ethmf_errors_inc(void)
  123. {
  124. #ifdef USE_PROC_FS
  125. atomic_inc(&errcount);
  126. #endif
  127. }
  128. #ifdef USE_PROC_FS
  129. static inline int hashaddr_to_index(unsigned int hash_addr)
  130. {
  131. int i, z = -1;
  132. for (i = 0; i < ETHMF_MAX_GROUPS; ++i) {
  133. if (z == -1 && ethmf_groups[i].hash_addr == 0)
  134. z = i;
  135. if (ethmf_groups[i].hash_addr == hash_addr)
  136. return i;
  137. }
  138. if (z != -1) {
  139. ethmf_groups[z].hash_addr = hash_addr;
  140. }
  141. return z;
  142. }
  143. #endif
  144. /**
  145. * Find the Ztdeth Struct and DAHDI span for a given MAC address and subaddr.
  146. *
  147. * NOTE: RCU read lock must already be held.
  148. */
  149. static inline void find_ethmf(const unsigned char *addr,
  150. const unsigned short subaddr, struct ztdeth **ze,
  151. struct dahdi_span **span)
  152. {
  153. struct ztdeth *z;
  154. list_for_each_entry_rcu(z, &ethmf_list, list) {
  155. if (!atomic_read(&z->delay)) {
  156. if (!memcmp(addr, z->addr, ETH_ALEN)
  157. && z->subaddr == subaddr) {
  158. *ze = z;
  159. *span = z->span;
  160. return;
  161. }
  162. }
  163. }
  164. /* no results */
  165. *ze = NULL;
  166. *span = NULL;
  167. }
  168. /**
  169. * Determines if all spans are ready for transmit. If all spans are ready,
  170. * we return the number of spans which indeed are ready and populate the
  171. * array of pointers to those spans..
  172. *
  173. * NOTE: RCU read lock must already be held.
  174. */
  175. static inline int ethmf_trx_spans_ready(unsigned int addr_hash, struct ztdeth *(*ready_spans)[ETHMF_MAX_PER_SPAN_GROUP])
  176. {
  177. struct ztdeth *t;
  178. int span_count = 0, spans_ready = 0;
  179. list_for_each_entry_rcu(t, &ethmf_list, list) {
  180. if (!atomic_read(&t->delay) && t->addr_hash == addr_hash) {
  181. ++span_count;
  182. if (atomic_read(&t->ready)) {
  183. short subaddr = ntohs(t->subaddr);
  184. if (subaddr < ETHMF_MAX_PER_SPAN_GROUP) {
  185. (*ready_spans)[subaddr] = t;
  186. ++spans_ready;
  187. } else {
  188. printk(KERN_ERR "More than %d spans per multi-frame group are not currently supported.",
  189. ETHMF_MAX_PER_SPAN_GROUP);
  190. }
  191. }
  192. }
  193. }
  194. if (span_count && spans_ready && span_count == spans_ready) {
  195. return spans_ready;
  196. }
  197. return 0;
  198. }
  199. /**
  200. * Ethernet receiving side processing function.
  201. */
  202. static int ztdethmf_rcv(struct sk_buff *skb, struct net_device *dev,
  203. struct packet_type *pt, struct net_device *orig_dev)
  204. {
  205. int num_spans = 0, span_index = 0;
  206. unsigned char *data;
  207. struct dahdi_span *span;
  208. struct ztdeth *z = NULL;
  209. struct ztdeth_header *zh;
  210. unsigned int samples, channels, rbslen, flags;
  211. unsigned int skip = 0;
  212. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
  213. zh = (struct ztdeth_header *) skb_network_header(skb);
  214. #else
  215. zh = (struct ztdeth_header *) skb->nh.raw;
  216. #endif
  217. if (ntohs(zh->subaddr) & 0x8000) {
  218. /* got a multi-span frame */
  219. num_spans = ntohs(zh->subaddr) & 0xFF;
  220. /* Currently max of 4 spans supported */
  221. if (unlikely(num_spans > ETHMF_MAX_SPANS)) {
  222. kfree_skb(skb);
  223. return 0;
  224. }
  225. skb_pull(skb, sizeof(struct ztdeth_header));
  226. #ifdef NEW_SKB_LINEARIZE
  227. if (skb_is_nonlinear(skb))
  228. skb_linearize(skb);
  229. #else
  230. if (skb_is_nonlinear(skb))
  231. skb_linearize(skb, GFP_KERNEL);
  232. #endif
  233. data = (unsigned char *) skb->data;
  234. rcu_read_lock();
  235. do {
  236. find_ethmf(eth_hdr(skb)->h_source,
  237. htons(span_index), &z, &span);
  238. if (unlikely(!z || !span)) {
  239. /* The recv'd span does not belong to us */
  240. /* ethmf_errors_inc(); */
  241. ++span_index;
  242. continue;
  243. }
  244. samples = data[(span_index * 6)] & 0xFF;
  245. flags = data[((span_index * 6) + 1)] & 0xFF;
  246. channels = data[((span_index * 6) + 5)] & 0xFF;
  247. /* Precomputed defaults for most typical values */
  248. if (channels == 24)
  249. rbslen = 12;
  250. else if (channels == 31)
  251. rbslen = 16;
  252. else
  253. rbslen = ((channels + 3) / 4) * 2;
  254. if (unlikely(samples != 8 || channels >= 32 || channels == 0)) {
  255. ethmf_errors_inc();
  256. ++span_index;
  257. continue;
  258. }
  259. if (atomic_dec_and_test(&z->refcnt) == 0) {
  260. memcpy(z->rcvbuf, data + 6*span_index, 6); /* TDM Header */
  261. /*
  262. * If we ignore channel zero we must skip the first eight bytes and
  263. * ensure that ztdynamic doesn't get confused by this new flag
  264. */
  265. if (flags & ETHMF_FLAG_IGNORE_CHAN0) {
  266. skip = 8;
  267. /* Remove this flag since ztdynamic may not understand it */
  268. z->rcvbuf[1] = flags & ~(ETHMF_FLAG_IGNORE_CHAN0);
  269. /* Additionally, now we will transmit with front padding */
  270. atomic_set(&z->no_front_padding, 0);
  271. } else {
  272. /* Disable front padding if we recv'd a packet without it */
  273. atomic_set(&z->no_front_padding, 1);
  274. }
  275. memcpy(z->rcvbuf + 6, data + 6*num_spans + 16
  276. *span_index, rbslen); /* RBS Header */
  277. /* 256 == 32*8; if padding lengths change, this must be modified */
  278. memcpy(z->rcvbuf + 6 + rbslen, data + 6*num_spans + 16
  279. *num_spans + (256)*span_index + skip, channels
  280. * 8); /* Payload */
  281. dahdi_dynamic_receive(span, z->rcvbuf, 6 + rbslen
  282. + channels*8);
  283. } else {
  284. ethmf_errors_inc();
  285. printk(KERN_INFO "TDMoE span overflow detected. Span %d was dropped.", span_index);
  286. }
  287. atomic_inc(&z->refcnt);
  288. #ifdef USE_PROC_FS
  289. if (span_index == 0) {
  290. atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].rxframecount));
  291. atomic_add(skb->len + z->dev->hard_header_len +
  292. sizeof(struct ztdeth_header),
  293. &(ethmf_groups[hashaddr_to_index(z->addr_hash)].rxbytecount));
  294. }
  295. #endif
  296. ++span_index;
  297. } while (!atomic_read(&shutdown) && span_index < num_spans);
  298. rcu_read_unlock();
  299. }
  300. kfree_skb(skb);
  301. return 0;
  302. }
  303. static int ztdethmf_notifier(struct notifier_block *block, unsigned long event,
  304. void *ptr)
  305. {
  306. struct net_device *dev = ptr;
  307. struct ztdeth *z;
  308. switch (event) {
  309. case NETDEV_GOING_DOWN:
  310. case NETDEV_DOWN:
  311. rcu_read_lock();
  312. list_for_each_entry_rcu(z, &ethmf_list, list) {
  313. /* Note that the device no longer exists */
  314. if (z->dev == dev) {
  315. z->dev = NULL;
  316. #ifdef USE_PROC_FS
  317. atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].devdowncount));
  318. #endif
  319. }
  320. }
  321. rcu_read_unlock();
  322. break;
  323. case NETDEV_UP:
  324. rcu_read_lock();
  325. list_for_each_entry_rcu(z, &ethmf_list, list) {
  326. /* Now that the device exists again, use it */
  327. if (!strcmp(z->ethdev, dev->name)) {
  328. z->dev = dev;
  329. #ifdef USE_PROC_FS
  330. atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].devupcount));
  331. #endif
  332. }
  333. }
  334. rcu_read_unlock();
  335. break;
  336. }
  337. return 0;
  338. }
  339. static void ztdethmf_transmit(struct dahdi_dynamic *dyn, u8 *msg, size_t msglen)
  340. {
  341. struct ztdeth *z = dyn->pvt, *ready_spans[ETHMF_MAX_PER_SPAN_GROUP];
  342. struct sk_buff *skb;
  343. struct ztdeth_header *zh;
  344. struct net_device *dev;
  345. unsigned char addr[ETH_ALEN];
  346. int spans_ready = 0, index = 0;
  347. if (atomic_read(&shutdown))
  348. return;
  349. rcu_read_lock();
  350. if (unlikely(!z || !z->dev)) {
  351. rcu_read_unlock();
  352. return;
  353. }
  354. if (!atomic_read(&z->ready)) {
  355. if (atomic_inc_return(&z->ready) == 1) {
  356. memcpy(z->msgbuf, msg, msglen);
  357. z->msgbuf_len = msglen;
  358. }
  359. }
  360. spans_ready = ethmf_trx_spans_ready(z->addr_hash, &ready_spans);
  361. if (spans_ready) {
  362. int pad[ETHMF_MAX_SPANS], rbs[ETHMF_MAX_SPANS];
  363. dev = z->dev;
  364. memcpy(addr, z->addr, sizeof(z->addr));
  365. for (index = 0; index < spans_ready; index++) {
  366. int chan = ready_spans[index]->real_channels;
  367. /* By default we pad to 32 channels, but if
  368. * no_front_padding is false then we have a pad
  369. * in the front of 8 bytes, so this implies one
  370. * less channel
  371. */
  372. if (atomic_read(&(ready_spans[index]->no_front_padding)))
  373. pad[index] = (32 - chan)*8;
  374. else
  375. pad[index] = (31 - chan)*8;
  376. if (chan == 24)
  377. rbs[index] = 12;
  378. else if (chan == 31)
  379. rbs[index] = 16;
  380. else
  381. /* Shouldn't this be index, not spans_ready? */
  382. rbs[spans_ready] = ((chan + 3) / 4) * 2;
  383. }
  384. /* Allocate the standard size for a 32-chan frame */
  385. skb = dev_alloc_skb(1112 + dev->hard_header_len
  386. + sizeof(struct ztdeth_header) + 32);
  387. if (unlikely(!skb)) {
  388. rcu_read_unlock();
  389. ethmf_errors_inc();
  390. return;
  391. }
  392. /* Reserve header space */
  393. skb_reserve(skb, dev->hard_header_len
  394. + sizeof(struct ztdeth_header));
  395. /* copy each spans header */
  396. for (index = 0; index < spans_ready; index++) {
  397. if (!atomic_read(&(ready_spans[index]->no_front_padding)))
  398. ready_spans[index]->msgbuf[1]
  399. |= ETHMF_FLAG_IGNORE_CHAN0;
  400. memcpy(skb_put(skb, 6), ready_spans[index]->msgbuf, 6);
  401. }
  402. /* copy each spans RBS payload */
  403. for (index = 0; index < spans_ready; index++) {
  404. memcpy(skb_put(skb, 16), ready_spans[index]->msgbuf + 6,
  405. rbs[index]);
  406. }
  407. /* copy each spans data/voice payload */
  408. for (index = 0; index < spans_ready; index++) {
  409. int chan = ready_spans[index]->real_channels;
  410. if (!atomic_read(&(ready_spans[index]->no_front_padding))) {
  411. /* This adds an additional (padded) channel to our total */
  412. memset(skb_put(skb, 8), 0xA5, 8); /* ETHMF_IGNORE_CHAN0 */
  413. }
  414. memcpy(skb_put(skb, chan*8), ready_spans[index]->msgbuf
  415. + (6 + rbs[index]), chan*8);
  416. if (pad[index] > 0) {
  417. memset(skb_put(skb, pad[index]), 0xDD, pad[index]);
  418. }
  419. /* mark span as ready for new data/voice */
  420. atomic_set(&(ready_spans[index]->ready), 0);
  421. }
  422. /* Throw on header */
  423. zh = (struct ztdeth_header *)skb_push(skb,
  424. sizeof(struct ztdeth_header));
  425. zh->subaddr = htons((unsigned short)(0x8000 | (unsigned char)(spans_ready & 0xFF)));
  426. /* Setup protocol type */
  427. skb->protocol = __constant_htons(ETH_P_ZTDETH);
  428. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
  429. skb_set_network_header(skb, 0);
  430. #else
  431. skb->nh.raw = skb->data;
  432. #endif
  433. skb->dev = dev;
  434. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
  435. dev_hard_header(skb, dev, ETH_P_ZTDETH, addr, dev->dev_addr, skb->len);
  436. #else
  437. if (dev->hard_header)
  438. dev->hard_header(skb, dev, ETH_P_ZTDETH, addr,
  439. dev->dev_addr, skb->len);
  440. #endif
  441. /* queue frame for delivery */
  442. if (dev) {
  443. skb_queue_tail(&skbs, skb);
  444. }
  445. #ifdef USE_PROC_FS
  446. atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].txframecount));
  447. atomic_add(skb->len, &(ethmf_groups[hashaddr_to_index(z->addr_hash)].txbytecount));
  448. #endif
  449. }
  450. rcu_read_unlock();
  451. return;
  452. }
  453. static int ztdethmf_flush(void)
  454. {
  455. struct sk_buff *skb;
  456. /* Handle all transmissions now */
  457. while ((skb = skb_dequeue(&skbs))) {
  458. dev_queue_xmit(skb);
  459. }
  460. return 0;
  461. }
  462. static struct packet_type ztdethmf_ptype = {
  463. .type = __constant_htons(ETH_P_ZTDETH), /* Protocol */
  464. .dev = NULL, /* Device (NULL = wildcard) */
  465. .func = ztdethmf_rcv, /* Receiver */
  466. };
  467. static void ztdethmf_destroy(struct dahdi_dynamic *dyn)
  468. {
  469. struct ztdeth *z = dyn->pvt;
  470. unsigned long flags;
  471. atomic_set(&shutdown, 1);
  472. synchronize_rcu();
  473. spin_lock_irqsave(&ethmf_lock, flags);
  474. list_del_rcu(&z->list);
  475. spin_unlock_irqrestore(&ethmf_lock, flags);
  476. synchronize_rcu();
  477. atomic_dec(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].spans));
  478. if (z) { /* Successfully removed */
  479. printk(KERN_INFO "Removed interface for %s\n",
  480. z->span->name);
  481. kfree(z->msgbuf);
  482. kfree(z);
  483. } else {
  484. if (z && z->span && z->span->name) {
  485. printk(KERN_ERR "Cannot find interface for %s\n",
  486. z->span->name);
  487. }
  488. }
  489. }
  490. static int ztdethmf_create(struct dahdi_dynamic *dyn, const char *addr)
  491. {
  492. struct ztdeth *z;
  493. char src[256];
  494. char *src_ptr;
  495. int x, bufsize, num_matched;
  496. unsigned long flags;
  497. struct dahdi_span *const span = &dyn->span;
  498. BUG_ON(!span);
  499. BUG_ON(!addr);
  500. z = kmalloc(sizeof(struct ztdeth), GFP_KERNEL);
  501. if (!z)
  502. return -ENOMEM;
  503. /* Zero it out */
  504. memset(z, 0, sizeof(struct ztdeth));
  505. /* set a delay for xmit/recv to workaround Zaptel problems */
  506. atomic_set(&z->delay, 4);
  507. /* create a msg buffer. MAX OF 31 CHANNELS!!!! */
  508. bufsize = 31 * DAHDI_CHUNKSIZE + 31 / 4 + 48;
  509. z->msgbuf = kmalloc(bufsize, GFP_KERNEL);
  510. z->rcvbuf = kmalloc(bufsize, GFP_KERNEL);
  511. /* Address should be <dev>/<macaddr>/subaddr */
  512. strlcpy(src, addr, sizeof(src));
  513. /* replace all / with space; otherwise kernel sscanf does not work */
  514. src_ptr = src;
  515. while (*src_ptr) {
  516. if (*src_ptr == '/')
  517. *src_ptr = ' ';
  518. ++src_ptr;
  519. }
  520. num_matched = sscanf(src,
  521. "%16s %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hu",
  522. z->ethdev, &z->addr[0], &z->addr[1],
  523. &z->addr[2], &z->addr[3], &z->addr[4],
  524. &z->addr[5], &z->subaddr);
  525. if (8 != num_matched) {
  526. printk(KERN_ERR "Only matched %d entries in '%s'\n", num_matched, src);
  527. printk(KERN_ERR "Invalid TDMoE Multiframe address: %s\n", addr);
  528. kfree(z);
  529. return -EINVAL;
  530. }
  531. z->dev = dev_get_by_name(
  532. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
  533. &init_net,
  534. #endif
  535. z->ethdev);
  536. if (!z->dev) {
  537. printk(KERN_ERR "TDMoE Multiframe: Invalid device '%s'\n", z->ethdev);
  538. kfree(z);
  539. return -EINVAL;
  540. }
  541. z->span = span;
  542. z->subaddr = htons(z->subaddr);
  543. z->addr_hash = crc32_le(0, z->addr, ETH_ALEN);
  544. z->real_channels = span->channels;
  545. src[0] = '\0';
  546. for (x = 0; x < 5; x++)
  547. sprintf(src + strlen(src), "%02x:", z->dev->dev_addr[x]);
  548. sprintf(src + strlen(src), "%02x", z->dev->dev_addr[5]);
  549. printk(KERN_INFO "TDMoEmf: Added new interface for %s at %s "
  550. "(addr=%s, src=%s, subaddr=%d)\n", span->name, z->dev->name,
  551. addr, src, ntohs(z->subaddr));
  552. atomic_set(&z->ready, 0);
  553. atomic_set(&z->refcnt, 0);
  554. spin_lock_irqsave(&ethmf_lock, flags);
  555. list_add_rcu(&z->list, &ethmf_list);
  556. spin_unlock_irqrestore(&ethmf_lock, flags);
  557. atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].spans));
  558. /* enable the timer for enabling the spans */
  559. mod_timer(&timer, jiffies + HZ);
  560. atomic_set(&shutdown, 0);
  561. dyn->pvt = z;
  562. return 0;
  563. }
  564. static struct dahdi_dynamic_driver ztd_ethmf = {
  565. .owner = THIS_MODULE,
  566. .name = "ethmf",
  567. .desc = "Ethernet",
  568. .create = ztdethmf_create,
  569. .destroy = ztdethmf_destroy,
  570. .transmit = ztdethmf_transmit,
  571. .flush = ztdethmf_flush,
  572. };
  573. static struct notifier_block ztdethmf_nblock = {
  574. .notifier_call = ztdethmf_notifier,
  575. };
  576. /**
  577. * Decrements each delay counter in the ethmf_list and returns the number of
  578. * delay counters that are not equal to zero.
  579. */
  580. static int ethmf_delay_dec(void)
  581. {
  582. struct ztdeth *z;
  583. int count_nonzero = 0;
  584. rcu_read_lock();
  585. list_for_each_entry_rcu(z, &ethmf_list, list) {
  586. if (atomic_read(&z->delay)) {
  587. atomic_dec(&z->delay);
  588. ++count_nonzero;
  589. } else
  590. atomic_set(&z->delay, 0);
  591. }
  592. rcu_read_unlock();
  593. return count_nonzero;
  594. }
  595. /**
  596. * Timer callback function to allow all spans to be added, prior to any of
  597. * them being used.
  598. */
  599. static void timer_callback(unsigned long param)
  600. {
  601. if (ethmf_delay_dec()) {
  602. if (!atomic_read(&timer_deleted)) {
  603. timer.expires = jiffies + HZ;
  604. add_timer(&timer);
  605. }
  606. } else {
  607. printk(KERN_INFO "All TDMoE multiframe span groups are active.\n");
  608. del_timer(&timer);
  609. }
  610. }
  611. #ifdef USE_PROC_FS
  612. static struct proc_dir_entry *proc_entry;
  613. static const char *ztdethmf_procname = "dahdi/dynamic-ethmf";
  614. static int ztdethmf_proc_show(struct seq_file *sfile, void *not_used)
  615. {
  616. struct ztdeth *z = NULL;
  617. int i = 0;
  618. unsigned int group = 0, c = 0;
  619. rcu_read_lock();
  620. seq_printf(sfile, "Errors: %d\n\n", atomic_read(&errcount));
  621. for (group = 0; group < ETHMF_MAX_GROUPS; ++group) {
  622. if (atomic_read(&(ethmf_groups[group].spans))) {
  623. seq_printf(sfile, "Group #%d (0x%x)\n", i++,
  624. ethmf_groups[group].hash_addr);
  625. seq_printf(sfile, "Spans: %d\n",
  626. atomic_read(&(ethmf_groups[group].spans)));
  627. c = 1;
  628. list_for_each_entry_rcu(z, &ethmf_list, list) {
  629. if (z->addr_hash == ethmf_groups[group].hash_addr) {
  630. if (c == 1) {
  631. seq_printf(sfile,
  632. " Device: %s (MAC: %02x:%02x:%02x:%02x:%02x:%02x)\n",
  633. z->ethdev,
  634. z->addr[0], z->addr[1], z->addr[2],
  635. z->addr[3], z->addr[4], z->addr[5]);
  636. }
  637. seq_printf(sfile, " Span %d: subaddr=%u ready=%d delay=%d real_channels=%d no_front_padding=%d\n",
  638. c++, ntohs(z->subaddr),
  639. atomic_read(&z->ready), atomic_read(&z->delay),
  640. z->real_channels, atomic_read(&z->no_front_padding));
  641. }
  642. }
  643. seq_printf(sfile, " Device UPs: %u\n",
  644. atomic_read(&(ethmf_groups[group].devupcount)));
  645. seq_printf(sfile, " Device DOWNs: %u\n",
  646. atomic_read(&(ethmf_groups[group].devdowncount)));
  647. seq_printf(sfile, " Rx Frames: %u\n",
  648. atomic_read(&(ethmf_groups[group].rxframecount)));
  649. seq_printf(sfile, " Tx Frames: %u\n",
  650. atomic_read(&(ethmf_groups[group].txframecount)));
  651. seq_printf(sfile, " Rx Bytes: %u\n",
  652. atomic_read(&(ethmf_groups[group].rxbytecount)));
  653. seq_printf(sfile, " Tx Bytes: %u\n",
  654. atomic_read(&(ethmf_groups[group].txbytecount)));
  655. }
  656. }
  657. rcu_read_unlock();
  658. return 0;
  659. }
  660. static int ztdethmf_proc_open(struct inode *inode, struct file *file)
  661. {
  662. return single_open(file, ztdethmf_proc_show, NULL);
  663. }
  664. static const struct file_operations ztdethmf_proc_fops = {
  665. .open = ztdethmf_proc_open,
  666. .read = seq_read,
  667. .llseek = seq_lseek,
  668. .release = seq_release,
  669. };
  670. #endif
  671. static int __init ztdethmf_init(void)
  672. {
  673. init_timer(&timer);
  674. timer.expires = jiffies + HZ;
  675. timer.function = &timer_callback;
  676. if (!timer_pending(&timer))
  677. add_timer(&timer);
  678. dev_add_pack(&ztdethmf_ptype);
  679. register_netdevice_notifier(&ztdethmf_nblock);
  680. dahdi_dynamic_register_driver(&ztd_ethmf);
  681. skb_queue_head_init(&skbs);
  682. #ifdef USE_PROC_FS
  683. proc_entry = proc_create_data(ztdethmf_procname, 0444, NULL,
  684. &ztdethmf_proc_fops, NULL);
  685. if (!proc_entry) {
  686. printk(KERN_ALERT "create_proc_read_entry failed.\n");
  687. }
  688. #endif
  689. return 0;
  690. }
  691. static void __exit ztdethmf_exit(void)
  692. {
  693. atomic_set(&timer_deleted, 1);
  694. del_timer_sync(&timer);
  695. dev_remove_pack(&ztdethmf_ptype);
  696. unregister_netdevice_notifier(&ztdethmf_nblock);
  697. dahdi_dynamic_unregister_driver(&ztd_ethmf);
  698. #ifdef USE_PROC_FS
  699. if (proc_entry)
  700. remove_proc_entry(ztdethmf_procname, NULL);
  701. #endif
  702. }
  703. MODULE_DESCRIPTION("DAHDI Dynamic TDMoEmf Support");
  704. MODULE_AUTHOR("Joseph Benden <joe@thrallingpenguin.com>");
  705. #ifdef MODULE_LICENSE
  706. MODULE_LICENSE("GPL");
  707. #endif
  708. module_init(ztdethmf_init);
  709. module_exit(ztdethmf_exit);