hdlc_fr.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298
  1. /*
  2. * Generic HDLC support routines for Linux
  3. * Frame Relay support
  4. *
  5. * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of version 2 of the GNU General Public License
  9. * as published by the Free Software Foundation.
  10. *
  11. Theory of PVC state
  12. DCE mode:
  13. (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
  14. 0,x -> 1,1 if "link reliable" when sending FULL STATUS
  15. 1,1 -> 1,0 if received FULL STATUS ACK
  16. (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
  17. -> 1 when "PVC up" and (exist,new) = 1,0
  18. DTE mode:
  19. (exist,new,active) = FULL STATUS if "link reliable"
  20. = 0, 0, 0 if "link unreliable"
  21. No LMI:
  22. active = open and "link reliable"
  23. exist = new = not used
  24. CCITT LMI: ITU-T Q.933 Annex A
  25. ANSI LMI: ANSI T1.617 Annex D
  26. CISCO LMI: the original, aka "Gang of Four" LMI
  27. */
  28. #include <linux/errno.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/hdlc.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/inetdevice.h>
  33. #include <linux/init.h>
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pkt_sched.h>
  37. #include <linux/poll.h>
  38. #include <linux/rtnetlink.h>
  39. #include <linux/skbuff.h>
  40. #include <linux/slab.h>
  41. #undef DEBUG_PKT
  42. #undef DEBUG_ECN
  43. #undef DEBUG_LINK
  44. #undef DEBUG_PROTO
  45. #undef DEBUG_PVC
  46. #define FR_UI 0x03
  47. #define FR_PAD 0x00
  48. #define NLPID_IP 0xCC
  49. #define NLPID_IPV6 0x8E
  50. #define NLPID_SNAP 0x80
  51. #define NLPID_PAD 0x00
  52. #define NLPID_CCITT_ANSI_LMI 0x08
  53. #define NLPID_CISCO_LMI 0x09
  54. #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
  55. #define LMI_CISCO_DLCI 1023
  56. #define LMI_CALLREF 0x00 /* Call Reference */
  57. #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
  58. #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
  59. #define LMI_CCITT_REPTYPE 0x51
  60. #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
  61. #define LMI_CCITT_ALIVE 0x53
  62. #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
  63. #define LMI_CCITT_PVCSTAT 0x57
  64. #define LMI_FULLREP 0x00 /* full report */
  65. #define LMI_INTEGRITY 0x01 /* link integrity report */
  66. #define LMI_SINGLE 0x02 /* single PVC report */
  67. #define LMI_STATUS_ENQUIRY 0x75
  68. #define LMI_STATUS 0x7D /* reply */
  69. #define LMI_REPT_LEN 1 /* report type element length */
  70. #define LMI_INTEG_LEN 2 /* link integrity element length */
  71. #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
  72. #define LMI_ANSI_LENGTH 14
  73. struct fr_hdr {
  74. #if defined(__LITTLE_ENDIAN_BITFIELD)
  75. unsigned ea1: 1;
  76. unsigned cr: 1;
  77. unsigned dlcih: 6;
  78. unsigned ea2: 1;
  79. unsigned de: 1;
  80. unsigned becn: 1;
  81. unsigned fecn: 1;
  82. unsigned dlcil: 4;
  83. #else
  84. unsigned dlcih: 6;
  85. unsigned cr: 1;
  86. unsigned ea1: 1;
  87. unsigned dlcil: 4;
  88. unsigned fecn: 1;
  89. unsigned becn: 1;
  90. unsigned de: 1;
  91. unsigned ea2: 1;
  92. #endif
  93. } __packed;
  94. struct pvc_device {
  95. struct net_device *frad;
  96. struct net_device *main;
  97. struct net_device *ether; /* bridged Ethernet interface */
  98. struct pvc_device *next; /* Sorted in ascending DLCI order */
  99. int dlci;
  100. int open_count;
  101. struct {
  102. unsigned int new: 1;
  103. unsigned int active: 1;
  104. unsigned int exist: 1;
  105. unsigned int deleted: 1;
  106. unsigned int fecn: 1;
  107. unsigned int becn: 1;
  108. unsigned int bandwidth; /* Cisco LMI reporting only */
  109. }state;
  110. };
  111. struct frad_state {
  112. fr_proto settings;
  113. struct pvc_device *first_pvc;
  114. int dce_pvc_count;
  115. struct timer_list timer;
  116. unsigned long last_poll;
  117. int reliable;
  118. int dce_changed;
  119. int request;
  120. int fullrep_sent;
  121. u32 last_errors; /* last errors bit list */
  122. u8 n391cnt;
  123. u8 txseq; /* TX sequence number */
  124. u8 rxseq; /* RX sequence number */
  125. };
  126. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
  127. static inline u16 q922_to_dlci(u8 *hdr)
  128. {
  129. return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
  130. }
  131. static inline void dlci_to_q922(u8 *hdr, u16 dlci)
  132. {
  133. hdr[0] = (dlci >> 2) & 0xFC;
  134. hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
  135. }
  136. static inline struct frad_state* state(hdlc_device *hdlc)
  137. {
  138. return(struct frad_state *)(hdlc->state);
  139. }
  140. static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
  141. {
  142. struct pvc_device *pvc = state(hdlc)->first_pvc;
  143. while (pvc) {
  144. if (pvc->dlci == dlci)
  145. return pvc;
  146. if (pvc->dlci > dlci)
  147. return NULL; /* the list is sorted */
  148. pvc = pvc->next;
  149. }
  150. return NULL;
  151. }
  152. static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
  153. {
  154. hdlc_device *hdlc = dev_to_hdlc(dev);
  155. struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
  156. while (*pvc_p) {
  157. if ((*pvc_p)->dlci == dlci)
  158. return *pvc_p;
  159. if ((*pvc_p)->dlci > dlci)
  160. break; /* the list is sorted */
  161. pvc_p = &(*pvc_p)->next;
  162. }
  163. pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
  164. #ifdef DEBUG_PVC
  165. printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
  166. #endif
  167. if (!pvc)
  168. return NULL;
  169. pvc->dlci = dlci;
  170. pvc->frad = dev;
  171. pvc->next = *pvc_p; /* Put it in the chain */
  172. *pvc_p = pvc;
  173. return pvc;
  174. }
  175. static inline int pvc_is_used(struct pvc_device *pvc)
  176. {
  177. return pvc->main || pvc->ether;
  178. }
  179. static inline void pvc_carrier(int on, struct pvc_device *pvc)
  180. {
  181. if (on) {
  182. if (pvc->main)
  183. if (!netif_carrier_ok(pvc->main))
  184. netif_carrier_on(pvc->main);
  185. if (pvc->ether)
  186. if (!netif_carrier_ok(pvc->ether))
  187. netif_carrier_on(pvc->ether);
  188. } else {
  189. if (pvc->main)
  190. if (netif_carrier_ok(pvc->main))
  191. netif_carrier_off(pvc->main);
  192. if (pvc->ether)
  193. if (netif_carrier_ok(pvc->ether))
  194. netif_carrier_off(pvc->ether);
  195. }
  196. }
  197. static inline void delete_unused_pvcs(hdlc_device *hdlc)
  198. {
  199. struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
  200. while (*pvc_p) {
  201. if (!pvc_is_used(*pvc_p)) {
  202. struct pvc_device *pvc = *pvc_p;
  203. #ifdef DEBUG_PVC
  204. printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
  205. #endif
  206. *pvc_p = pvc->next;
  207. kfree(pvc);
  208. continue;
  209. }
  210. pvc_p = &(*pvc_p)->next;
  211. }
  212. }
  213. static inline struct net_device **get_dev_p(struct pvc_device *pvc,
  214. int type)
  215. {
  216. if (type == ARPHRD_ETHER)
  217. return &pvc->ether;
  218. else
  219. return &pvc->main;
  220. }
  221. static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
  222. {
  223. u16 head_len;
  224. struct sk_buff *skb = *skb_p;
  225. switch (skb->protocol) {
  226. case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
  227. head_len = 4;
  228. skb_push(skb, head_len);
  229. skb->data[3] = NLPID_CCITT_ANSI_LMI;
  230. break;
  231. case cpu_to_be16(NLPID_CISCO_LMI):
  232. head_len = 4;
  233. skb_push(skb, head_len);
  234. skb->data[3] = NLPID_CISCO_LMI;
  235. break;
  236. case cpu_to_be16(ETH_P_IP):
  237. head_len = 4;
  238. skb_push(skb, head_len);
  239. skb->data[3] = NLPID_IP;
  240. break;
  241. case cpu_to_be16(ETH_P_IPV6):
  242. head_len = 4;
  243. skb_push(skb, head_len);
  244. skb->data[3] = NLPID_IPV6;
  245. break;
  246. case cpu_to_be16(ETH_P_802_3):
  247. head_len = 10;
  248. if (skb_headroom(skb) < head_len) {
  249. struct sk_buff *skb2 = skb_realloc_headroom(skb,
  250. head_len);
  251. if (!skb2)
  252. return -ENOBUFS;
  253. dev_kfree_skb(skb);
  254. skb = *skb_p = skb2;
  255. }
  256. skb_push(skb, head_len);
  257. skb->data[3] = FR_PAD;
  258. skb->data[4] = NLPID_SNAP;
  259. skb->data[5] = FR_PAD;
  260. skb->data[6] = 0x80;
  261. skb->data[7] = 0xC2;
  262. skb->data[8] = 0x00;
  263. skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
  264. break;
  265. default:
  266. head_len = 10;
  267. skb_push(skb, head_len);
  268. skb->data[3] = FR_PAD;
  269. skb->data[4] = NLPID_SNAP;
  270. skb->data[5] = FR_PAD;
  271. skb->data[6] = FR_PAD;
  272. skb->data[7] = FR_PAD;
  273. *(__be16*)(skb->data + 8) = skb->protocol;
  274. }
  275. dlci_to_q922(skb->data, dlci);
  276. skb->data[2] = FR_UI;
  277. return 0;
  278. }
  279. static int pvc_open(struct net_device *dev)
  280. {
  281. struct pvc_device *pvc = dev->ml_priv;
  282. if ((pvc->frad->flags & IFF_UP) == 0)
  283. return -EIO; /* Frad must be UP in order to activate PVC */
  284. if (pvc->open_count++ == 0) {
  285. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  286. if (state(hdlc)->settings.lmi == LMI_NONE)
  287. pvc->state.active = netif_carrier_ok(pvc->frad);
  288. pvc_carrier(pvc->state.active, pvc);
  289. state(hdlc)->dce_changed = 1;
  290. }
  291. return 0;
  292. }
  293. static int pvc_close(struct net_device *dev)
  294. {
  295. struct pvc_device *pvc = dev->ml_priv;
  296. if (--pvc->open_count == 0) {
  297. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  298. if (state(hdlc)->settings.lmi == LMI_NONE)
  299. pvc->state.active = 0;
  300. if (state(hdlc)->settings.dce) {
  301. state(hdlc)->dce_changed = 1;
  302. pvc->state.active = 0;
  303. }
  304. }
  305. return 0;
  306. }
  307. static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  308. {
  309. struct pvc_device *pvc = dev->ml_priv;
  310. fr_proto_pvc_info info;
  311. if (ifr->ifr_settings.type == IF_GET_PROTO) {
  312. if (dev->type == ARPHRD_ETHER)
  313. ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
  314. else
  315. ifr->ifr_settings.type = IF_PROTO_FR_PVC;
  316. if (ifr->ifr_settings.size < sizeof(info)) {
  317. /* data size wanted */
  318. ifr->ifr_settings.size = sizeof(info);
  319. return -ENOBUFS;
  320. }
  321. info.dlci = pvc->dlci;
  322. memcpy(info.master, pvc->frad->name, IFNAMSIZ);
  323. if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
  324. &info, sizeof(info)))
  325. return -EFAULT;
  326. return 0;
  327. }
  328. return -EINVAL;
  329. }
  330. static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
  331. {
  332. struct pvc_device *pvc = dev->ml_priv;
  333. if (pvc->state.active) {
  334. if (dev->type == ARPHRD_ETHER) {
  335. int pad = ETH_ZLEN - skb->len;
  336. if (pad > 0) { /* Pad the frame with zeros */
  337. int len = skb->len;
  338. if (skb_tailroom(skb) < pad)
  339. if (pskb_expand_head(skb, 0, pad,
  340. GFP_ATOMIC)) {
  341. dev->stats.tx_dropped++;
  342. dev_kfree_skb(skb);
  343. return NETDEV_TX_OK;
  344. }
  345. skb_put(skb, pad);
  346. memset(skb->data + len, 0, pad);
  347. }
  348. skb->protocol = cpu_to_be16(ETH_P_802_3);
  349. }
  350. if (!fr_hard_header(&skb, pvc->dlci)) {
  351. dev->stats.tx_bytes += skb->len;
  352. dev->stats.tx_packets++;
  353. if (pvc->state.fecn) /* TX Congestion counter */
  354. dev->stats.tx_compressed++;
  355. skb->dev = pvc->frad;
  356. dev_queue_xmit(skb);
  357. return NETDEV_TX_OK;
  358. }
  359. }
  360. dev->stats.tx_dropped++;
  361. dev_kfree_skb(skb);
  362. return NETDEV_TX_OK;
  363. }
  364. static inline void fr_log_dlci_active(struct pvc_device *pvc)
  365. {
  366. netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
  367. pvc->dlci,
  368. pvc->main ? pvc->main->name : "",
  369. pvc->main && pvc->ether ? " " : "",
  370. pvc->ether ? pvc->ether->name : "",
  371. pvc->state.new ? " new" : "",
  372. !pvc->state.exist ? "deleted" :
  373. pvc->state.active ? "active" : "inactive");
  374. }
  375. static inline u8 fr_lmi_nextseq(u8 x)
  376. {
  377. x++;
  378. return x ? x : 1;
  379. }
  380. static void fr_lmi_send(struct net_device *dev, int fullrep)
  381. {
  382. hdlc_device *hdlc = dev_to_hdlc(dev);
  383. struct sk_buff *skb;
  384. struct pvc_device *pvc = state(hdlc)->first_pvc;
  385. int lmi = state(hdlc)->settings.lmi;
  386. int dce = state(hdlc)->settings.dce;
  387. int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
  388. int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
  389. u8 *data;
  390. int i = 0;
  391. if (dce && fullrep) {
  392. len += state(hdlc)->dce_pvc_count * (2 + stat_len);
  393. if (len > HDLC_MAX_MRU) {
  394. netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
  395. return;
  396. }
  397. }
  398. skb = dev_alloc_skb(len);
  399. if (!skb) {
  400. netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
  401. return;
  402. }
  403. memset(skb->data, 0, len);
  404. skb_reserve(skb, 4);
  405. if (lmi == LMI_CISCO) {
  406. skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
  407. fr_hard_header(&skb, LMI_CISCO_DLCI);
  408. } else {
  409. skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
  410. fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
  411. }
  412. data = skb_tail_pointer(skb);
  413. data[i++] = LMI_CALLREF;
  414. data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
  415. if (lmi == LMI_ANSI)
  416. data[i++] = LMI_ANSI_LOCKSHIFT;
  417. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  418. LMI_ANSI_CISCO_REPTYPE;
  419. data[i++] = LMI_REPT_LEN;
  420. data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
  421. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
  422. data[i++] = LMI_INTEG_LEN;
  423. data[i++] = state(hdlc)->txseq =
  424. fr_lmi_nextseq(state(hdlc)->txseq);
  425. data[i++] = state(hdlc)->rxseq;
  426. if (dce && fullrep) {
  427. while (pvc) {
  428. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  429. LMI_ANSI_CISCO_PVCSTAT;
  430. data[i++] = stat_len;
  431. /* LMI start/restart */
  432. if (state(hdlc)->reliable && !pvc->state.exist) {
  433. pvc->state.exist = pvc->state.new = 1;
  434. fr_log_dlci_active(pvc);
  435. }
  436. /* ifconfig PVC up */
  437. if (pvc->open_count && !pvc->state.active &&
  438. pvc->state.exist && !pvc->state.new) {
  439. pvc_carrier(1, pvc);
  440. pvc->state.active = 1;
  441. fr_log_dlci_active(pvc);
  442. }
  443. if (lmi == LMI_CISCO) {
  444. data[i] = pvc->dlci >> 8;
  445. data[i + 1] = pvc->dlci & 0xFF;
  446. } else {
  447. data[i] = (pvc->dlci >> 4) & 0x3F;
  448. data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
  449. data[i + 2] = 0x80;
  450. }
  451. if (pvc->state.new)
  452. data[i + 2] |= 0x08;
  453. else if (pvc->state.active)
  454. data[i + 2] |= 0x02;
  455. i += stat_len;
  456. pvc = pvc->next;
  457. }
  458. }
  459. skb_put(skb, i);
  460. skb->priority = TC_PRIO_CONTROL;
  461. skb->dev = dev;
  462. skb_reset_network_header(skb);
  463. dev_queue_xmit(skb);
  464. }
  465. static void fr_set_link_state(int reliable, struct net_device *dev)
  466. {
  467. hdlc_device *hdlc = dev_to_hdlc(dev);
  468. struct pvc_device *pvc = state(hdlc)->first_pvc;
  469. state(hdlc)->reliable = reliable;
  470. if (reliable) {
  471. netif_dormant_off(dev);
  472. state(hdlc)->n391cnt = 0; /* Request full status */
  473. state(hdlc)->dce_changed = 1;
  474. if (state(hdlc)->settings.lmi == LMI_NONE) {
  475. while (pvc) { /* Activate all PVCs */
  476. pvc_carrier(1, pvc);
  477. pvc->state.exist = pvc->state.active = 1;
  478. pvc->state.new = 0;
  479. pvc = pvc->next;
  480. }
  481. }
  482. } else {
  483. netif_dormant_on(dev);
  484. while (pvc) { /* Deactivate all PVCs */
  485. pvc_carrier(0, pvc);
  486. pvc->state.exist = pvc->state.active = 0;
  487. pvc->state.new = 0;
  488. if (!state(hdlc)->settings.dce)
  489. pvc->state.bandwidth = 0;
  490. pvc = pvc->next;
  491. }
  492. }
  493. }
  494. static void fr_timer(unsigned long arg)
  495. {
  496. struct net_device *dev = (struct net_device *)arg;
  497. hdlc_device *hdlc = dev_to_hdlc(dev);
  498. int i, cnt = 0, reliable;
  499. u32 list;
  500. if (state(hdlc)->settings.dce) {
  501. reliable = state(hdlc)->request &&
  502. time_before(jiffies, state(hdlc)->last_poll +
  503. state(hdlc)->settings.t392 * HZ);
  504. state(hdlc)->request = 0;
  505. } else {
  506. state(hdlc)->last_errors <<= 1; /* Shift the list */
  507. if (state(hdlc)->request) {
  508. if (state(hdlc)->reliable)
  509. netdev_info(dev, "No LMI status reply received\n");
  510. state(hdlc)->last_errors |= 1;
  511. }
  512. list = state(hdlc)->last_errors;
  513. for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
  514. cnt += (list & 1); /* errors count */
  515. reliable = (cnt < state(hdlc)->settings.n392);
  516. }
  517. if (state(hdlc)->reliable != reliable) {
  518. netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
  519. fr_set_link_state(reliable, dev);
  520. }
  521. if (state(hdlc)->settings.dce)
  522. state(hdlc)->timer.expires = jiffies +
  523. state(hdlc)->settings.t392 * HZ;
  524. else {
  525. if (state(hdlc)->n391cnt)
  526. state(hdlc)->n391cnt--;
  527. fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
  528. state(hdlc)->last_poll = jiffies;
  529. state(hdlc)->request = 1;
  530. state(hdlc)->timer.expires = jiffies +
  531. state(hdlc)->settings.t391 * HZ;
  532. }
  533. state(hdlc)->timer.function = fr_timer;
  534. state(hdlc)->timer.data = arg;
  535. add_timer(&state(hdlc)->timer);
  536. }
  537. static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
  538. {
  539. hdlc_device *hdlc = dev_to_hdlc(dev);
  540. struct pvc_device *pvc;
  541. u8 rxseq, txseq;
  542. int lmi = state(hdlc)->settings.lmi;
  543. int dce = state(hdlc)->settings.dce;
  544. int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
  545. if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
  546. LMI_CCITT_CISCO_LENGTH)) {
  547. netdev_info(dev, "Short LMI frame\n");
  548. return 1;
  549. }
  550. if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
  551. NLPID_CCITT_ANSI_LMI)) {
  552. netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
  553. return 1;
  554. }
  555. if (skb->data[4] != LMI_CALLREF) {
  556. netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
  557. skb->data[4]);
  558. return 1;
  559. }
  560. if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
  561. netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
  562. skb->data[5]);
  563. return 1;
  564. }
  565. if (lmi == LMI_ANSI) {
  566. if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
  567. netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
  568. skb->data[6]);
  569. return 1;
  570. }
  571. i = 7;
  572. } else
  573. i = 6;
  574. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  575. LMI_ANSI_CISCO_REPTYPE)) {
  576. netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
  577. skb->data[i]);
  578. return 1;
  579. }
  580. if (skb->data[++i] != LMI_REPT_LEN) {
  581. netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
  582. skb->data[i]);
  583. return 1;
  584. }
  585. reptype = skb->data[++i];
  586. if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
  587. netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
  588. reptype);
  589. return 1;
  590. }
  591. if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
  592. LMI_ANSI_CISCO_ALIVE)) {
  593. netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
  594. skb->data[i]);
  595. return 1;
  596. }
  597. if (skb->data[++i] != LMI_INTEG_LEN) {
  598. netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
  599. skb->data[i]);
  600. return 1;
  601. }
  602. i++;
  603. state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
  604. rxseq = skb->data[i++]; /* Should confirm our sequence */
  605. txseq = state(hdlc)->txseq;
  606. if (dce)
  607. state(hdlc)->last_poll = jiffies;
  608. error = 0;
  609. if (!state(hdlc)->reliable)
  610. error = 1;
  611. if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
  612. state(hdlc)->n391cnt = 0;
  613. error = 1;
  614. }
  615. if (dce) {
  616. if (state(hdlc)->fullrep_sent && !error) {
  617. /* Stop sending full report - the last one has been confirmed by DTE */
  618. state(hdlc)->fullrep_sent = 0;
  619. pvc = state(hdlc)->first_pvc;
  620. while (pvc) {
  621. if (pvc->state.new) {
  622. pvc->state.new = 0;
  623. /* Tell DTE that new PVC is now active */
  624. state(hdlc)->dce_changed = 1;
  625. }
  626. pvc = pvc->next;
  627. }
  628. }
  629. if (state(hdlc)->dce_changed) {
  630. reptype = LMI_FULLREP;
  631. state(hdlc)->fullrep_sent = 1;
  632. state(hdlc)->dce_changed = 0;
  633. }
  634. state(hdlc)->request = 1; /* got request */
  635. fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
  636. return 0;
  637. }
  638. /* DTE */
  639. state(hdlc)->request = 0; /* got response, no request pending */
  640. if (error)
  641. return 0;
  642. if (reptype != LMI_FULLREP)
  643. return 0;
  644. pvc = state(hdlc)->first_pvc;
  645. while (pvc) {
  646. pvc->state.deleted = 1;
  647. pvc = pvc->next;
  648. }
  649. no_ram = 0;
  650. while (skb->len >= i + 2 + stat_len) {
  651. u16 dlci;
  652. u32 bw;
  653. unsigned int active, new;
  654. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  655. LMI_ANSI_CISCO_PVCSTAT)) {
  656. netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
  657. skb->data[i]);
  658. return 1;
  659. }
  660. if (skb->data[++i] != stat_len) {
  661. netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
  662. skb->data[i]);
  663. return 1;
  664. }
  665. i++;
  666. new = !! (skb->data[i + 2] & 0x08);
  667. active = !! (skb->data[i + 2] & 0x02);
  668. if (lmi == LMI_CISCO) {
  669. dlci = (skb->data[i] << 8) | skb->data[i + 1];
  670. bw = (skb->data[i + 3] << 16) |
  671. (skb->data[i + 4] << 8) |
  672. (skb->data[i + 5]);
  673. } else {
  674. dlci = ((skb->data[i] & 0x3F) << 4) |
  675. ((skb->data[i + 1] & 0x78) >> 3);
  676. bw = 0;
  677. }
  678. pvc = add_pvc(dev, dlci);
  679. if (!pvc && !no_ram) {
  680. netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
  681. no_ram = 1;
  682. }
  683. if (pvc) {
  684. pvc->state.exist = 1;
  685. pvc->state.deleted = 0;
  686. if (active != pvc->state.active ||
  687. new != pvc->state.new ||
  688. bw != pvc->state.bandwidth ||
  689. !pvc->state.exist) {
  690. pvc->state.new = new;
  691. pvc->state.active = active;
  692. pvc->state.bandwidth = bw;
  693. pvc_carrier(active, pvc);
  694. fr_log_dlci_active(pvc);
  695. }
  696. }
  697. i += stat_len;
  698. }
  699. pvc = state(hdlc)->first_pvc;
  700. while (pvc) {
  701. if (pvc->state.deleted && pvc->state.exist) {
  702. pvc_carrier(0, pvc);
  703. pvc->state.active = pvc->state.new = 0;
  704. pvc->state.exist = 0;
  705. pvc->state.bandwidth = 0;
  706. fr_log_dlci_active(pvc);
  707. }
  708. pvc = pvc->next;
  709. }
  710. /* Next full report after N391 polls */
  711. state(hdlc)->n391cnt = state(hdlc)->settings.n391;
  712. return 0;
  713. }
  714. static int fr_rx(struct sk_buff *skb)
  715. {
  716. struct net_device *frad = skb->dev;
  717. hdlc_device *hdlc = dev_to_hdlc(frad);
  718. struct fr_hdr *fh = (struct fr_hdr *)skb->data;
  719. u8 *data = skb->data;
  720. u16 dlci;
  721. struct pvc_device *pvc;
  722. struct net_device *dev = NULL;
  723. if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
  724. goto rx_error;
  725. dlci = q922_to_dlci(skb->data);
  726. if ((dlci == LMI_CCITT_ANSI_DLCI &&
  727. (state(hdlc)->settings.lmi == LMI_ANSI ||
  728. state(hdlc)->settings.lmi == LMI_CCITT)) ||
  729. (dlci == LMI_CISCO_DLCI &&
  730. state(hdlc)->settings.lmi == LMI_CISCO)) {
  731. if (fr_lmi_recv(frad, skb))
  732. goto rx_error;
  733. dev_kfree_skb_any(skb);
  734. return NET_RX_SUCCESS;
  735. }
  736. pvc = find_pvc(hdlc, dlci);
  737. if (!pvc) {
  738. #ifdef DEBUG_PKT
  739. netdev_info(frad, "No PVC for received frame's DLCI %d\n",
  740. dlci);
  741. #endif
  742. dev_kfree_skb_any(skb);
  743. return NET_RX_DROP;
  744. }
  745. if (pvc->state.fecn != fh->fecn) {
  746. #ifdef DEBUG_ECN
  747. printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
  748. dlci, fh->fecn ? "N" : "FF");
  749. #endif
  750. pvc->state.fecn ^= 1;
  751. }
  752. if (pvc->state.becn != fh->becn) {
  753. #ifdef DEBUG_ECN
  754. printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
  755. dlci, fh->becn ? "N" : "FF");
  756. #endif
  757. pvc->state.becn ^= 1;
  758. }
  759. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
  760. frad->stats.rx_dropped++;
  761. return NET_RX_DROP;
  762. }
  763. if (data[3] == NLPID_IP) {
  764. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  765. dev = pvc->main;
  766. skb->protocol = htons(ETH_P_IP);
  767. } else if (data[3] == NLPID_IPV6) {
  768. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  769. dev = pvc->main;
  770. skb->protocol = htons(ETH_P_IPV6);
  771. } else if (skb->len > 10 && data[3] == FR_PAD &&
  772. data[4] == NLPID_SNAP && data[5] == FR_PAD) {
  773. u16 oui = ntohs(*(__be16*)(data + 6));
  774. u16 pid = ntohs(*(__be16*)(data + 8));
  775. skb_pull(skb, 10);
  776. switch ((((u32)oui) << 16) | pid) {
  777. case ETH_P_ARP: /* routed frame with SNAP */
  778. case ETH_P_IPX:
  779. case ETH_P_IP: /* a long variant */
  780. case ETH_P_IPV6:
  781. dev = pvc->main;
  782. skb->protocol = htons(pid);
  783. break;
  784. case 0x80C20007: /* bridged Ethernet frame */
  785. if ((dev = pvc->ether) != NULL)
  786. skb->protocol = eth_type_trans(skb, dev);
  787. break;
  788. default:
  789. netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
  790. oui, pid);
  791. dev_kfree_skb_any(skb);
  792. return NET_RX_DROP;
  793. }
  794. } else {
  795. netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
  796. data[3], skb->len);
  797. dev_kfree_skb_any(skb);
  798. return NET_RX_DROP;
  799. }
  800. if (dev) {
  801. dev->stats.rx_packets++; /* PVC traffic */
  802. dev->stats.rx_bytes += skb->len;
  803. if (pvc->state.becn)
  804. dev->stats.rx_compressed++;
  805. skb->dev = dev;
  806. netif_rx(skb);
  807. return NET_RX_SUCCESS;
  808. } else {
  809. dev_kfree_skb_any(skb);
  810. return NET_RX_DROP;
  811. }
  812. rx_error:
  813. frad->stats.rx_errors++; /* Mark error */
  814. dev_kfree_skb_any(skb);
  815. return NET_RX_DROP;
  816. }
  817. static void fr_start(struct net_device *dev)
  818. {
  819. hdlc_device *hdlc = dev_to_hdlc(dev);
  820. #ifdef DEBUG_LINK
  821. printk(KERN_DEBUG "fr_start\n");
  822. #endif
  823. if (state(hdlc)->settings.lmi != LMI_NONE) {
  824. state(hdlc)->reliable = 0;
  825. state(hdlc)->dce_changed = 1;
  826. state(hdlc)->request = 0;
  827. state(hdlc)->fullrep_sent = 0;
  828. state(hdlc)->last_errors = 0xFFFFFFFF;
  829. state(hdlc)->n391cnt = 0;
  830. state(hdlc)->txseq = state(hdlc)->rxseq = 0;
  831. init_timer(&state(hdlc)->timer);
  832. /* First poll after 1 s */
  833. state(hdlc)->timer.expires = jiffies + HZ;
  834. state(hdlc)->timer.function = fr_timer;
  835. state(hdlc)->timer.data = (unsigned long)dev;
  836. add_timer(&state(hdlc)->timer);
  837. } else
  838. fr_set_link_state(1, dev);
  839. }
  840. static void fr_stop(struct net_device *dev)
  841. {
  842. hdlc_device *hdlc = dev_to_hdlc(dev);
  843. #ifdef DEBUG_LINK
  844. printk(KERN_DEBUG "fr_stop\n");
  845. #endif
  846. if (state(hdlc)->settings.lmi != LMI_NONE)
  847. del_timer_sync(&state(hdlc)->timer);
  848. fr_set_link_state(0, dev);
  849. }
  850. static void fr_close(struct net_device *dev)
  851. {
  852. hdlc_device *hdlc = dev_to_hdlc(dev);
  853. struct pvc_device *pvc = state(hdlc)->first_pvc;
  854. while (pvc) { /* Shutdown all PVCs for this FRAD */
  855. if (pvc->main)
  856. dev_close(pvc->main);
  857. if (pvc->ether)
  858. dev_close(pvc->ether);
  859. pvc = pvc->next;
  860. }
  861. }
  862. static void pvc_setup(struct net_device *dev)
  863. {
  864. dev->type = ARPHRD_DLCI;
  865. dev->flags = IFF_POINTOPOINT;
  866. dev->hard_header_len = 10;
  867. dev->addr_len = 2;
  868. netif_keep_dst(dev);
  869. }
  870. static const struct net_device_ops pvc_ops = {
  871. .ndo_open = pvc_open,
  872. .ndo_stop = pvc_close,
  873. .ndo_change_mtu = hdlc_change_mtu,
  874. .ndo_start_xmit = pvc_xmit,
  875. .ndo_do_ioctl = pvc_ioctl,
  876. };
  877. static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
  878. {
  879. hdlc_device *hdlc = dev_to_hdlc(frad);
  880. struct pvc_device *pvc;
  881. struct net_device *dev;
  882. int used;
  883. if ((pvc = add_pvc(frad, dlci)) == NULL) {
  884. netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
  885. return -ENOBUFS;
  886. }
  887. if (*get_dev_p(pvc, type))
  888. return -EEXIST;
  889. used = pvc_is_used(pvc);
  890. if (type == ARPHRD_ETHER) {
  891. dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
  892. ether_setup);
  893. dev->priv_flags &= ~IFF_TX_SKB_SHARING;
  894. } else
  895. dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
  896. if (!dev) {
  897. netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
  898. delete_unused_pvcs(hdlc);
  899. return -ENOBUFS;
  900. }
  901. if (type == ARPHRD_ETHER)
  902. eth_hw_addr_random(dev);
  903. else {
  904. *(__be16*)dev->dev_addr = htons(dlci);
  905. dlci_to_q922(dev->broadcast, dlci);
  906. }
  907. dev->netdev_ops = &pvc_ops;
  908. dev->mtu = HDLC_MAX_MTU;
  909. dev->tx_queue_len = 0;
  910. dev->ml_priv = pvc;
  911. if (register_netdevice(dev) != 0) {
  912. free_netdev(dev);
  913. delete_unused_pvcs(hdlc);
  914. return -EIO;
  915. }
  916. dev->destructor = free_netdev;
  917. *get_dev_p(pvc, type) = dev;
  918. if (!used) {
  919. state(hdlc)->dce_changed = 1;
  920. state(hdlc)->dce_pvc_count++;
  921. }
  922. return 0;
  923. }
  924. static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
  925. {
  926. struct pvc_device *pvc;
  927. struct net_device *dev;
  928. if ((pvc = find_pvc(hdlc, dlci)) == NULL)
  929. return -ENOENT;
  930. if ((dev = *get_dev_p(pvc, type)) == NULL)
  931. return -ENOENT;
  932. if (dev->flags & IFF_UP)
  933. return -EBUSY; /* PVC in use */
  934. unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
  935. *get_dev_p(pvc, type) = NULL;
  936. if (!pvc_is_used(pvc)) {
  937. state(hdlc)->dce_pvc_count--;
  938. state(hdlc)->dce_changed = 1;
  939. }
  940. delete_unused_pvcs(hdlc);
  941. return 0;
  942. }
  943. static void fr_destroy(struct net_device *frad)
  944. {
  945. hdlc_device *hdlc = dev_to_hdlc(frad);
  946. struct pvc_device *pvc = state(hdlc)->first_pvc;
  947. state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
  948. state(hdlc)->dce_pvc_count = 0;
  949. state(hdlc)->dce_changed = 1;
  950. while (pvc) {
  951. struct pvc_device *next = pvc->next;
  952. /* destructors will free_netdev() main and ether */
  953. if (pvc->main)
  954. unregister_netdevice(pvc->main);
  955. if (pvc->ether)
  956. unregister_netdevice(pvc->ether);
  957. kfree(pvc);
  958. pvc = next;
  959. }
  960. }
  961. static struct hdlc_proto proto = {
  962. .close = fr_close,
  963. .start = fr_start,
  964. .stop = fr_stop,
  965. .detach = fr_destroy,
  966. .ioctl = fr_ioctl,
  967. .netif_rx = fr_rx,
  968. .module = THIS_MODULE,
  969. };
  970. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
  971. {
  972. fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
  973. const size_t size = sizeof(fr_proto);
  974. fr_proto new_settings;
  975. hdlc_device *hdlc = dev_to_hdlc(dev);
  976. fr_proto_pvc pvc;
  977. int result;
  978. switch (ifr->ifr_settings.type) {
  979. case IF_GET_PROTO:
  980. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  981. return -EINVAL;
  982. ifr->ifr_settings.type = IF_PROTO_FR;
  983. if (ifr->ifr_settings.size < size) {
  984. ifr->ifr_settings.size = size; /* data size wanted */
  985. return -ENOBUFS;
  986. }
  987. if (copy_to_user(fr_s, &state(hdlc)->settings, size))
  988. return -EFAULT;
  989. return 0;
  990. case IF_PROTO_FR:
  991. if (!capable(CAP_NET_ADMIN))
  992. return -EPERM;
  993. if (dev->flags & IFF_UP)
  994. return -EBUSY;
  995. if (copy_from_user(&new_settings, fr_s, size))
  996. return -EFAULT;
  997. if (new_settings.lmi == LMI_DEFAULT)
  998. new_settings.lmi = LMI_ANSI;
  999. if ((new_settings.lmi != LMI_NONE &&
  1000. new_settings.lmi != LMI_ANSI &&
  1001. new_settings.lmi != LMI_CCITT &&
  1002. new_settings.lmi != LMI_CISCO) ||
  1003. new_settings.t391 < 1 ||
  1004. new_settings.t392 < 2 ||
  1005. new_settings.n391 < 1 ||
  1006. new_settings.n392 < 1 ||
  1007. new_settings.n393 < new_settings.n392 ||
  1008. new_settings.n393 > 32 ||
  1009. (new_settings.dce != 0 &&
  1010. new_settings.dce != 1))
  1011. return -EINVAL;
  1012. result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
  1013. if (result)
  1014. return result;
  1015. if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
  1016. result = attach_hdlc_protocol(dev, &proto,
  1017. sizeof(struct frad_state));
  1018. if (result)
  1019. return result;
  1020. state(hdlc)->first_pvc = NULL;
  1021. state(hdlc)->dce_pvc_count = 0;
  1022. }
  1023. memcpy(&state(hdlc)->settings, &new_settings, size);
  1024. dev->type = ARPHRD_FRAD;
  1025. return 0;
  1026. case IF_PROTO_FR_ADD_PVC:
  1027. case IF_PROTO_FR_DEL_PVC:
  1028. case IF_PROTO_FR_ADD_ETH_PVC:
  1029. case IF_PROTO_FR_DEL_ETH_PVC:
  1030. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  1031. return -EINVAL;
  1032. if (!capable(CAP_NET_ADMIN))
  1033. return -EPERM;
  1034. if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
  1035. sizeof(fr_proto_pvc)))
  1036. return -EFAULT;
  1037. if (pvc.dlci <= 0 || pvc.dlci >= 1024)
  1038. return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
  1039. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
  1040. ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
  1041. result = ARPHRD_ETHER; /* bridged Ethernet device */
  1042. else
  1043. result = ARPHRD_DLCI;
  1044. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
  1045. ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
  1046. return fr_add_pvc(dev, pvc.dlci, result);
  1047. else
  1048. return fr_del_pvc(hdlc, pvc.dlci, result);
  1049. }
  1050. return -EINVAL;
  1051. }
  1052. static int __init mod_init(void)
  1053. {
  1054. register_hdlc_protocol(&proto);
  1055. return 0;
  1056. }
  1057. static void __exit mod_exit(void)
  1058. {
  1059. unregister_hdlc_protocol(&proto);
  1060. }
  1061. module_init(mod_init);
  1062. module_exit(mod_exit);
  1063. MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
  1064. MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
  1065. MODULE_LICENSE("GPL v2");