hdlc_fr.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285
  1. /*
  2. * Generic HDLC support routines for Linux
  3. * Frame Relay support
  4. *
  5. * Copyright (C) 1999 - 2005 Krzysztof Halasa <khc@pm.waw.pl>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of version 2 of the GNU General Public License
  9. * as published by the Free Software Foundation.
  10. *
  11. Theory of PVC state
  12. DCE mode:
  13. (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
  14. 0,x -> 1,1 if "link reliable" when sending FULL STATUS
  15. 1,1 -> 1,0 if received FULL STATUS ACK
  16. (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
  17. -> 1 when "PVC up" and (exist,new) = 1,0
  18. DTE mode:
  19. (exist,new,active) = FULL STATUS if "link reliable"
  20. = 0, 0, 0 if "link unreliable"
  21. No LMI:
  22. active = open and "link reliable"
  23. exist = new = not used
  24. CCITT LMI: ITU-T Q.933 Annex A
  25. ANSI LMI: ANSI T1.617 Annex D
  26. CISCO LMI: the original, aka "Gang of Four" LMI
  27. */
  28. #include <linux/module.h>
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <linux/poll.h>
  32. #include <linux/errno.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/init.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/pkt_sched.h>
  37. #include <linux/random.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/lapb.h>
  40. #include <linux/rtnetlink.h>
  41. #include <linux/etherdevice.h>
  42. #include <linux/hdlc.h>
  43. #undef DEBUG_PKT
  44. #undef DEBUG_ECN
  45. #undef DEBUG_LINK
  46. #define FR_UI 0x03
  47. #define FR_PAD 0x00
  48. #define NLPID_IP 0xCC
  49. #define NLPID_IPV6 0x8E
  50. #define NLPID_SNAP 0x80
  51. #define NLPID_PAD 0x00
  52. #define NLPID_CCITT_ANSI_LMI 0x08
  53. #define NLPID_CISCO_LMI 0x09
  54. #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
  55. #define LMI_CISCO_DLCI 1023
  56. #define LMI_CALLREF 0x00 /* Call Reference */
  57. #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
  58. #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
  59. #define LMI_CCITT_REPTYPE 0x51
  60. #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
  61. #define LMI_CCITT_ALIVE 0x53
  62. #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
  63. #define LMI_CCITT_PVCSTAT 0x57
  64. #define LMI_FULLREP 0x00 /* full report */
  65. #define LMI_INTEGRITY 0x01 /* link integrity report */
  66. #define LMI_SINGLE 0x02 /* single PVC report */
  67. #define LMI_STATUS_ENQUIRY 0x75
  68. #define LMI_STATUS 0x7D /* reply */
  69. #define LMI_REPT_LEN 1 /* report type element length */
  70. #define LMI_INTEG_LEN 2 /* link integrity element length */
  71. #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
  72. #define LMI_ANSI_LENGTH 14
  73. typedef struct {
  74. #if defined(__LITTLE_ENDIAN_BITFIELD)
  75. unsigned ea1: 1;
  76. unsigned cr: 1;
  77. unsigned dlcih: 6;
  78. unsigned ea2: 1;
  79. unsigned de: 1;
  80. unsigned becn: 1;
  81. unsigned fecn: 1;
  82. unsigned dlcil: 4;
  83. #else
  84. unsigned dlcih: 6;
  85. unsigned cr: 1;
  86. unsigned ea1: 1;
  87. unsigned dlcil: 4;
  88. unsigned fecn: 1;
  89. unsigned becn: 1;
  90. unsigned de: 1;
  91. unsigned ea2: 1;
  92. #endif
  93. }__attribute__ ((packed)) fr_hdr;
  94. static inline u16 q922_to_dlci(u8 *hdr)
  95. {
  96. return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
  97. }
  98. static inline void dlci_to_q922(u8 *hdr, u16 dlci)
  99. {
  100. hdr[0] = (dlci >> 2) & 0xFC;
  101. hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
  102. }
  103. static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
  104. {
  105. pvc_device *pvc = hdlc->state.fr.first_pvc;
  106. while (pvc) {
  107. if (pvc->dlci == dlci)
  108. return pvc;
  109. if (pvc->dlci > dlci)
  110. return NULL; /* the listed is sorted */
  111. pvc = pvc->next;
  112. }
  113. return NULL;
  114. }
  115. static inline pvc_device* add_pvc(struct net_device *dev, u16 dlci)
  116. {
  117. hdlc_device *hdlc = dev_to_hdlc(dev);
  118. pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc;
  119. while (*pvc_p) {
  120. if ((*pvc_p)->dlci == dlci)
  121. return *pvc_p;
  122. if ((*pvc_p)->dlci > dlci)
  123. break; /* the list is sorted */
  124. pvc_p = &(*pvc_p)->next;
  125. }
  126. pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC);
  127. if (!pvc)
  128. return NULL;
  129. memset(pvc, 0, sizeof(pvc_device));
  130. pvc->dlci = dlci;
  131. pvc->master = dev;
  132. pvc->next = *pvc_p; /* Put it in the chain */
  133. *pvc_p = pvc;
  134. return pvc;
  135. }
  136. static inline int pvc_is_used(pvc_device *pvc)
  137. {
  138. return pvc->main != NULL || pvc->ether != NULL;
  139. }
  140. static inline void pvc_carrier(int on, pvc_device *pvc)
  141. {
  142. if (on) {
  143. if (pvc->main)
  144. if (!netif_carrier_ok(pvc->main))
  145. netif_carrier_on(pvc->main);
  146. if (pvc->ether)
  147. if (!netif_carrier_ok(pvc->ether))
  148. netif_carrier_on(pvc->ether);
  149. } else {
  150. if (pvc->main)
  151. if (netif_carrier_ok(pvc->main))
  152. netif_carrier_off(pvc->main);
  153. if (pvc->ether)
  154. if (netif_carrier_ok(pvc->ether))
  155. netif_carrier_off(pvc->ether);
  156. }
  157. }
  158. static inline void delete_unused_pvcs(hdlc_device *hdlc)
  159. {
  160. pvc_device **pvc_p = &hdlc->state.fr.first_pvc;
  161. while (*pvc_p) {
  162. if (!pvc_is_used(*pvc_p)) {
  163. pvc_device *pvc = *pvc_p;
  164. *pvc_p = pvc->next;
  165. kfree(pvc);
  166. continue;
  167. }
  168. pvc_p = &(*pvc_p)->next;
  169. }
  170. }
  171. static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
  172. {
  173. if (type == ARPHRD_ETHER)
  174. return &pvc->ether;
  175. else
  176. return &pvc->main;
  177. }
  178. static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
  179. {
  180. u16 head_len;
  181. struct sk_buff *skb = *skb_p;
  182. switch (skb->protocol) {
  183. case __constant_ntohs(NLPID_CCITT_ANSI_LMI):
  184. head_len = 4;
  185. skb_push(skb, head_len);
  186. skb->data[3] = NLPID_CCITT_ANSI_LMI;
  187. break;
  188. case __constant_ntohs(NLPID_CISCO_LMI):
  189. head_len = 4;
  190. skb_push(skb, head_len);
  191. skb->data[3] = NLPID_CISCO_LMI;
  192. break;
  193. case __constant_ntohs(ETH_P_IP):
  194. head_len = 4;
  195. skb_push(skb, head_len);
  196. skb->data[3] = NLPID_IP;
  197. break;
  198. case __constant_ntohs(ETH_P_IPV6):
  199. head_len = 4;
  200. skb_push(skb, head_len);
  201. skb->data[3] = NLPID_IPV6;
  202. break;
  203. case __constant_ntohs(ETH_P_802_3):
  204. head_len = 10;
  205. if (skb_headroom(skb) < head_len) {
  206. struct sk_buff *skb2 = skb_realloc_headroom(skb,
  207. head_len);
  208. if (!skb2)
  209. return -ENOBUFS;
  210. dev_kfree_skb(skb);
  211. skb = *skb_p = skb2;
  212. }
  213. skb_push(skb, head_len);
  214. skb->data[3] = FR_PAD;
  215. skb->data[4] = NLPID_SNAP;
  216. skb->data[5] = FR_PAD;
  217. skb->data[6] = 0x80;
  218. skb->data[7] = 0xC2;
  219. skb->data[8] = 0x00;
  220. skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
  221. break;
  222. default:
  223. head_len = 10;
  224. skb_push(skb, head_len);
  225. skb->data[3] = FR_PAD;
  226. skb->data[4] = NLPID_SNAP;
  227. skb->data[5] = FR_PAD;
  228. skb->data[6] = FR_PAD;
  229. skb->data[7] = FR_PAD;
  230. *(u16*)(skb->data + 8) = skb->protocol;
  231. }
  232. dlci_to_q922(skb->data, dlci);
  233. skb->data[2] = FR_UI;
  234. return 0;
  235. }
  236. static int pvc_open(struct net_device *dev)
  237. {
  238. pvc_device *pvc = dev_to_pvc(dev);
  239. if ((pvc->master->flags & IFF_UP) == 0)
  240. return -EIO; /* Master must be UP in order to activate PVC */
  241. if (pvc->open_count++ == 0) {
  242. hdlc_device *hdlc = dev_to_hdlc(pvc->master);
  243. if (hdlc->state.fr.settings.lmi == LMI_NONE)
  244. pvc->state.active = hdlc->carrier;
  245. pvc_carrier(pvc->state.active, pvc);
  246. hdlc->state.fr.dce_changed = 1;
  247. }
  248. return 0;
  249. }
  250. static int pvc_close(struct net_device *dev)
  251. {
  252. pvc_device *pvc = dev_to_pvc(dev);
  253. if (--pvc->open_count == 0) {
  254. hdlc_device *hdlc = dev_to_hdlc(pvc->master);
  255. if (hdlc->state.fr.settings.lmi == LMI_NONE)
  256. pvc->state.active = 0;
  257. if (hdlc->state.fr.settings.dce) {
  258. hdlc->state.fr.dce_changed = 1;
  259. pvc->state.active = 0;
  260. }
  261. }
  262. return 0;
  263. }
  264. static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  265. {
  266. pvc_device *pvc = dev_to_pvc(dev);
  267. fr_proto_pvc_info info;
  268. if (ifr->ifr_settings.type == IF_GET_PROTO) {
  269. if (dev->type == ARPHRD_ETHER)
  270. ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
  271. else
  272. ifr->ifr_settings.type = IF_PROTO_FR_PVC;
  273. if (ifr->ifr_settings.size < sizeof(info)) {
  274. /* data size wanted */
  275. ifr->ifr_settings.size = sizeof(info);
  276. return -ENOBUFS;
  277. }
  278. info.dlci = pvc->dlci;
  279. memcpy(info.master, pvc->master->name, IFNAMSIZ);
  280. if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
  281. &info, sizeof(info)))
  282. return -EFAULT;
  283. return 0;
  284. }
  285. return -EINVAL;
  286. }
  287. static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
  288. {
  289. return netdev_priv(dev);
  290. }
  291. static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
  292. {
  293. pvc_device *pvc = dev_to_pvc(dev);
  294. struct net_device_stats *stats = pvc_get_stats(dev);
  295. if (pvc->state.active) {
  296. if (dev->type == ARPHRD_ETHER) {
  297. int pad = ETH_ZLEN - skb->len;
  298. if (pad > 0) { /* Pad the frame with zeros */
  299. int len = skb->len;
  300. if (skb_tailroom(skb) < pad)
  301. if (pskb_expand_head(skb, 0, pad,
  302. GFP_ATOMIC)) {
  303. stats->tx_dropped++;
  304. dev_kfree_skb(skb);
  305. return 0;
  306. }
  307. skb_put(skb, pad);
  308. memset(skb->data + len, 0, pad);
  309. }
  310. skb->protocol = __constant_htons(ETH_P_802_3);
  311. }
  312. if (!fr_hard_header(&skb, pvc->dlci)) {
  313. stats->tx_bytes += skb->len;
  314. stats->tx_packets++;
  315. if (pvc->state.fecn) /* TX Congestion counter */
  316. stats->tx_compressed++;
  317. skb->dev = pvc->master;
  318. dev_queue_xmit(skb);
  319. return 0;
  320. }
  321. }
  322. stats->tx_dropped++;
  323. dev_kfree_skb(skb);
  324. return 0;
  325. }
  326. static int pvc_change_mtu(struct net_device *dev, int new_mtu)
  327. {
  328. if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
  329. return -EINVAL;
  330. dev->mtu = new_mtu;
  331. return 0;
  332. }
  333. static inline void fr_log_dlci_active(pvc_device *pvc)
  334. {
  335. printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
  336. pvc->master->name,
  337. pvc->dlci,
  338. pvc->main ? pvc->main->name : "",
  339. pvc->main && pvc->ether ? " " : "",
  340. pvc->ether ? pvc->ether->name : "",
  341. pvc->state.new ? " new" : "",
  342. !pvc->state.exist ? "deleted" :
  343. pvc->state.active ? "active" : "inactive");
  344. }
  345. static inline u8 fr_lmi_nextseq(u8 x)
  346. {
  347. x++;
  348. return x ? x : 1;
  349. }
  350. static void fr_lmi_send(struct net_device *dev, int fullrep)
  351. {
  352. hdlc_device *hdlc = dev_to_hdlc(dev);
  353. struct sk_buff *skb;
  354. pvc_device *pvc = hdlc->state.fr.first_pvc;
  355. int lmi = hdlc->state.fr.settings.lmi;
  356. int dce = hdlc->state.fr.settings.dce;
  357. int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
  358. int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
  359. u8 *data;
  360. int i = 0;
  361. if (dce && fullrep) {
  362. len += hdlc->state.fr.dce_pvc_count * (2 + stat_len);
  363. if (len > HDLC_MAX_MRU) {
  364. printk(KERN_WARNING "%s: Too many PVCs while sending "
  365. "LMI full report\n", dev->name);
  366. return;
  367. }
  368. }
  369. skb = dev_alloc_skb(len);
  370. if (!skb) {
  371. printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
  372. dev->name);
  373. return;
  374. }
  375. memset(skb->data, 0, len);
  376. skb_reserve(skb, 4);
  377. if (lmi == LMI_CISCO) {
  378. skb->protocol = __constant_htons(NLPID_CISCO_LMI);
  379. fr_hard_header(&skb, LMI_CISCO_DLCI);
  380. } else {
  381. skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
  382. fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
  383. }
  384. data = skb->tail;
  385. data[i++] = LMI_CALLREF;
  386. data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
  387. if (lmi == LMI_ANSI)
  388. data[i++] = LMI_ANSI_LOCKSHIFT;
  389. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  390. LMI_ANSI_CISCO_REPTYPE;
  391. data[i++] = LMI_REPT_LEN;
  392. data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
  393. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
  394. data[i++] = LMI_INTEG_LEN;
  395. data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq);
  396. data[i++] = hdlc->state.fr.rxseq;
  397. if (dce && fullrep) {
  398. while (pvc) {
  399. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  400. LMI_ANSI_CISCO_PVCSTAT;
  401. data[i++] = stat_len;
  402. /* LMI start/restart */
  403. if (hdlc->state.fr.reliable && !pvc->state.exist) {
  404. pvc->state.exist = pvc->state.new = 1;
  405. fr_log_dlci_active(pvc);
  406. }
  407. /* ifconfig PVC up */
  408. if (pvc->open_count && !pvc->state.active &&
  409. pvc->state.exist && !pvc->state.new) {
  410. pvc_carrier(1, pvc);
  411. pvc->state.active = 1;
  412. fr_log_dlci_active(pvc);
  413. }
  414. if (lmi == LMI_CISCO) {
  415. data[i] = pvc->dlci >> 8;
  416. data[i + 1] = pvc->dlci & 0xFF;
  417. } else {
  418. data[i] = (pvc->dlci >> 4) & 0x3F;
  419. data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
  420. data[i + 2] = 0x80;
  421. }
  422. if (pvc->state.new)
  423. data[i + 2] |= 0x08;
  424. else if (pvc->state.active)
  425. data[i + 2] |= 0x02;
  426. i += stat_len;
  427. pvc = pvc->next;
  428. }
  429. }
  430. skb_put(skb, i);
  431. skb->priority = TC_PRIO_CONTROL;
  432. skb->dev = dev;
  433. skb->nh.raw = skb->data;
  434. dev_queue_xmit(skb);
  435. }
  436. static void fr_set_link_state(int reliable, struct net_device *dev)
  437. {
  438. hdlc_device *hdlc = dev_to_hdlc(dev);
  439. pvc_device *pvc = hdlc->state.fr.first_pvc;
  440. hdlc->state.fr.reliable = reliable;
  441. if (reliable) {
  442. #if 0
  443. if (!netif_carrier_ok(dev))
  444. netif_carrier_on(dev);
  445. #endif
  446. hdlc->state.fr.n391cnt = 0; /* Request full status */
  447. hdlc->state.fr.dce_changed = 1;
  448. if (hdlc->state.fr.settings.lmi == LMI_NONE) {
  449. while (pvc) { /* Activate all PVCs */
  450. pvc_carrier(1, pvc);
  451. pvc->state.exist = pvc->state.active = 1;
  452. pvc->state.new = 0;
  453. pvc = pvc->next;
  454. }
  455. }
  456. } else {
  457. #if 0
  458. if (netif_carrier_ok(dev))
  459. netif_carrier_off(dev);
  460. #endif
  461. while (pvc) { /* Deactivate all PVCs */
  462. pvc_carrier(0, pvc);
  463. pvc->state.exist = pvc->state.active = 0;
  464. pvc->state.new = 0;
  465. if (!hdlc->state.fr.settings.dce)
  466. pvc->state.bandwidth = 0;
  467. pvc = pvc->next;
  468. }
  469. }
  470. }
  471. static void fr_timer(unsigned long arg)
  472. {
  473. struct net_device *dev = (struct net_device *)arg;
  474. hdlc_device *hdlc = dev_to_hdlc(dev);
  475. int i, cnt = 0, reliable;
  476. u32 list;
  477. if (hdlc->state.fr.settings.dce) {
  478. reliable = hdlc->state.fr.request &&
  479. time_before(jiffies, hdlc->state.fr.last_poll +
  480. hdlc->state.fr.settings.t392 * HZ);
  481. hdlc->state.fr.request = 0;
  482. } else {
  483. hdlc->state.fr.last_errors <<= 1; /* Shift the list */
  484. if (hdlc->state.fr.request) {
  485. if (hdlc->state.fr.reliable)
  486. printk(KERN_INFO "%s: No LMI status reply "
  487. "received\n", dev->name);
  488. hdlc->state.fr.last_errors |= 1;
  489. }
  490. list = hdlc->state.fr.last_errors;
  491. for (i = 0; i < hdlc->state.fr.settings.n393; i++, list >>= 1)
  492. cnt += (list & 1); /* errors count */
  493. reliable = (cnt < hdlc->state.fr.settings.n392);
  494. }
  495. if (hdlc->state.fr.reliable != reliable) {
  496. printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
  497. reliable ? "" : "un");
  498. fr_set_link_state(reliable, dev);
  499. }
  500. if (hdlc->state.fr.settings.dce)
  501. hdlc->state.fr.timer.expires = jiffies +
  502. hdlc->state.fr.settings.t392 * HZ;
  503. else {
  504. if (hdlc->state.fr.n391cnt)
  505. hdlc->state.fr.n391cnt--;
  506. fr_lmi_send(dev, hdlc->state.fr.n391cnt == 0);
  507. hdlc->state.fr.last_poll = jiffies;
  508. hdlc->state.fr.request = 1;
  509. hdlc->state.fr.timer.expires = jiffies +
  510. hdlc->state.fr.settings.t391 * HZ;
  511. }
  512. hdlc->state.fr.timer.function = fr_timer;
  513. hdlc->state.fr.timer.data = arg;
  514. add_timer(&hdlc->state.fr.timer);
  515. }
  516. static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
  517. {
  518. hdlc_device *hdlc = dev_to_hdlc(dev);
  519. pvc_device *pvc;
  520. u8 rxseq, txseq;
  521. int lmi = hdlc->state.fr.settings.lmi;
  522. int dce = hdlc->state.fr.settings.dce;
  523. int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
  524. if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
  525. LMI_CCITT_CISCO_LENGTH)) {
  526. printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
  527. return 1;
  528. }
  529. if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
  530. NLPID_CCITT_ANSI_LMI)) {
  531. printk(KERN_INFO "%s: Received non-LMI frame with LMI"
  532. " DLCI\n", dev->name);
  533. return 1;
  534. }
  535. if (skb->data[4] != LMI_CALLREF) {
  536. printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
  537. dev->name, skb->data[4]);
  538. return 1;
  539. }
  540. if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
  541. printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
  542. dev->name, skb->data[5]);
  543. return 1;
  544. }
  545. if (lmi == LMI_ANSI) {
  546. if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
  547. printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
  548. " message (0x%02X)\n", dev->name, skb->data[6]);
  549. return 1;
  550. }
  551. i = 7;
  552. } else
  553. i = 6;
  554. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  555. LMI_ANSI_CISCO_REPTYPE)) {
  556. printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
  557. dev->name, skb->data[i]);
  558. return 1;
  559. }
  560. if (skb->data[++i] != LMI_REPT_LEN) {
  561. printk(KERN_INFO "%s: Invalid LMI Report type IE length"
  562. " (%u)\n", dev->name, skb->data[i]);
  563. return 1;
  564. }
  565. reptype = skb->data[++i];
  566. if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
  567. printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
  568. dev->name, reptype);
  569. return 1;
  570. }
  571. if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
  572. LMI_ANSI_CISCO_ALIVE)) {
  573. printk(KERN_INFO "%s: Not an LMI Link integrity verification"
  574. " IE (0x%02X)\n", dev->name, skb->data[i]);
  575. return 1;
  576. }
  577. if (skb->data[++i] != LMI_INTEG_LEN) {
  578. printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
  579. " IE length (%u)\n", dev->name, skb->data[i]);
  580. return 1;
  581. }
  582. i++;
  583. hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */
  584. rxseq = skb->data[i++]; /* Should confirm our sequence */
  585. txseq = hdlc->state.fr.txseq;
  586. if (dce)
  587. hdlc->state.fr.last_poll = jiffies;
  588. error = 0;
  589. if (!hdlc->state.fr.reliable)
  590. error = 1;
  591. if (rxseq == 0 || rxseq != txseq) {
  592. hdlc->state.fr.n391cnt = 0; /* Ask for full report next time */
  593. error = 1;
  594. }
  595. if (dce) {
  596. if (hdlc->state.fr.fullrep_sent && !error) {
  597. /* Stop sending full report - the last one has been confirmed by DTE */
  598. hdlc->state.fr.fullrep_sent = 0;
  599. pvc = hdlc->state.fr.first_pvc;
  600. while (pvc) {
  601. if (pvc->state.new) {
  602. pvc->state.new = 0;
  603. /* Tell DTE that new PVC is now active */
  604. hdlc->state.fr.dce_changed = 1;
  605. }
  606. pvc = pvc->next;
  607. }
  608. }
  609. if (hdlc->state.fr.dce_changed) {
  610. reptype = LMI_FULLREP;
  611. hdlc->state.fr.fullrep_sent = 1;
  612. hdlc->state.fr.dce_changed = 0;
  613. }
  614. hdlc->state.fr.request = 1; /* got request */
  615. fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
  616. return 0;
  617. }
  618. /* DTE */
  619. hdlc->state.fr.request = 0; /* got response, no request pending */
  620. if (error)
  621. return 0;
  622. if (reptype != LMI_FULLREP)
  623. return 0;
  624. pvc = hdlc->state.fr.first_pvc;
  625. while (pvc) {
  626. pvc->state.deleted = 1;
  627. pvc = pvc->next;
  628. }
  629. no_ram = 0;
  630. while (skb->len >= i + 2 + stat_len) {
  631. u16 dlci;
  632. u32 bw;
  633. unsigned int active, new;
  634. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  635. LMI_ANSI_CISCO_PVCSTAT)) {
  636. printk(KERN_INFO "%s: Not an LMI PVC status IE"
  637. " (0x%02X)\n", dev->name, skb->data[i]);
  638. return 1;
  639. }
  640. if (skb->data[++i] != stat_len) {
  641. printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
  642. " (%u)\n", dev->name, skb->data[i]);
  643. return 1;
  644. }
  645. i++;
  646. new = !! (skb->data[i + 2] & 0x08);
  647. active = !! (skb->data[i + 2] & 0x02);
  648. if (lmi == LMI_CISCO) {
  649. dlci = (skb->data[i] << 8) | skb->data[i + 1];
  650. bw = (skb->data[i + 3] << 16) |
  651. (skb->data[i + 4] << 8) |
  652. (skb->data[i + 5]);
  653. } else {
  654. dlci = ((skb->data[i] & 0x3F) << 4) |
  655. ((skb->data[i + 1] & 0x78) >> 3);
  656. bw = 0;
  657. }
  658. pvc = add_pvc(dev, dlci);
  659. if (!pvc && !no_ram) {
  660. printk(KERN_WARNING
  661. "%s: Memory squeeze on fr_lmi_recv()\n",
  662. dev->name);
  663. no_ram = 1;
  664. }
  665. if (pvc) {
  666. pvc->state.exist = 1;
  667. pvc->state.deleted = 0;
  668. if (active != pvc->state.active ||
  669. new != pvc->state.new ||
  670. bw != pvc->state.bandwidth ||
  671. !pvc->state.exist) {
  672. pvc->state.new = new;
  673. pvc->state.active = active;
  674. pvc->state.bandwidth = bw;
  675. pvc_carrier(active, pvc);
  676. fr_log_dlci_active(pvc);
  677. }
  678. }
  679. i += stat_len;
  680. }
  681. pvc = hdlc->state.fr.first_pvc;
  682. while (pvc) {
  683. if (pvc->state.deleted && pvc->state.exist) {
  684. pvc_carrier(0, pvc);
  685. pvc->state.active = pvc->state.new = 0;
  686. pvc->state.exist = 0;
  687. pvc->state.bandwidth = 0;
  688. fr_log_dlci_active(pvc);
  689. }
  690. pvc = pvc->next;
  691. }
  692. /* Next full report after N391 polls */
  693. hdlc->state.fr.n391cnt = hdlc->state.fr.settings.n391;
  694. return 0;
  695. }
  696. static int fr_rx(struct sk_buff *skb)
  697. {
  698. struct net_device *ndev = skb->dev;
  699. hdlc_device *hdlc = dev_to_hdlc(ndev);
  700. fr_hdr *fh = (fr_hdr*)skb->data;
  701. u8 *data = skb->data;
  702. u16 dlci;
  703. pvc_device *pvc;
  704. struct net_device *dev = NULL;
  705. if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
  706. goto rx_error;
  707. dlci = q922_to_dlci(skb->data);
  708. if ((dlci == LMI_CCITT_ANSI_DLCI &&
  709. (hdlc->state.fr.settings.lmi == LMI_ANSI ||
  710. hdlc->state.fr.settings.lmi == LMI_CCITT)) ||
  711. (dlci == LMI_CISCO_DLCI &&
  712. hdlc->state.fr.settings.lmi == LMI_CISCO)) {
  713. if (fr_lmi_recv(ndev, skb))
  714. goto rx_error;
  715. dev_kfree_skb_any(skb);
  716. return NET_RX_SUCCESS;
  717. }
  718. pvc = find_pvc(hdlc, dlci);
  719. if (!pvc) {
  720. #ifdef DEBUG_PKT
  721. printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
  722. ndev->name, dlci);
  723. #endif
  724. dev_kfree_skb_any(skb);
  725. return NET_RX_DROP;
  726. }
  727. if (pvc->state.fecn != fh->fecn) {
  728. #ifdef DEBUG_ECN
  729. printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", ndev->name,
  730. dlci, fh->fecn ? "N" : "FF");
  731. #endif
  732. pvc->state.fecn ^= 1;
  733. }
  734. if (pvc->state.becn != fh->becn) {
  735. #ifdef DEBUG_ECN
  736. printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", ndev->name,
  737. dlci, fh->becn ? "N" : "FF");
  738. #endif
  739. pvc->state.becn ^= 1;
  740. }
  741. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
  742. hdlc->stats.rx_dropped++;
  743. return NET_RX_DROP;
  744. }
  745. if (data[3] == NLPID_IP) {
  746. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  747. dev = pvc->main;
  748. skb->protocol = htons(ETH_P_IP);
  749. } else if (data[3] == NLPID_IPV6) {
  750. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  751. dev = pvc->main;
  752. skb->protocol = htons(ETH_P_IPV6);
  753. } else if (skb->len > 10 && data[3] == FR_PAD &&
  754. data[4] == NLPID_SNAP && data[5] == FR_PAD) {
  755. u16 oui = ntohs(*(u16*)(data + 6));
  756. u16 pid = ntohs(*(u16*)(data + 8));
  757. skb_pull(skb, 10);
  758. switch ((((u32)oui) << 16) | pid) {
  759. case ETH_P_ARP: /* routed frame with SNAP */
  760. case ETH_P_IPX:
  761. case ETH_P_IP: /* a long variant */
  762. case ETH_P_IPV6:
  763. dev = pvc->main;
  764. skb->protocol = htons(pid);
  765. break;
  766. case 0x80C20007: /* bridged Ethernet frame */
  767. if ((dev = pvc->ether) != NULL)
  768. skb->protocol = eth_type_trans(skb, dev);
  769. break;
  770. default:
  771. printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
  772. "PID=%x\n", ndev->name, oui, pid);
  773. dev_kfree_skb_any(skb);
  774. return NET_RX_DROP;
  775. }
  776. } else {
  777. printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
  778. "length = %i\n", ndev->name, data[3], skb->len);
  779. dev_kfree_skb_any(skb);
  780. return NET_RX_DROP;
  781. }
  782. if (dev) {
  783. struct net_device_stats *stats = pvc_get_stats(dev);
  784. stats->rx_packets++; /* PVC traffic */
  785. stats->rx_bytes += skb->len;
  786. if (pvc->state.becn)
  787. stats->rx_compressed++;
  788. skb->dev = dev;
  789. netif_rx(skb);
  790. return NET_RX_SUCCESS;
  791. } else {
  792. dev_kfree_skb_any(skb);
  793. return NET_RX_DROP;
  794. }
  795. rx_error:
  796. hdlc->stats.rx_errors++; /* Mark error */
  797. dev_kfree_skb_any(skb);
  798. return NET_RX_DROP;
  799. }
  800. static void fr_start(struct net_device *dev)
  801. {
  802. hdlc_device *hdlc = dev_to_hdlc(dev);
  803. #ifdef DEBUG_LINK
  804. printk(KERN_DEBUG "fr_start\n");
  805. #endif
  806. if (hdlc->state.fr.settings.lmi != LMI_NONE) {
  807. hdlc->state.fr.reliable = 0;
  808. hdlc->state.fr.dce_changed = 1;
  809. hdlc->state.fr.request = 0;
  810. hdlc->state.fr.fullrep_sent = 0;
  811. hdlc->state.fr.last_errors = 0xFFFFFFFF;
  812. hdlc->state.fr.n391cnt = 0;
  813. hdlc->state.fr.txseq = hdlc->state.fr.rxseq = 0;
  814. init_timer(&hdlc->state.fr.timer);
  815. /* First poll after 1 s */
  816. hdlc->state.fr.timer.expires = jiffies + HZ;
  817. hdlc->state.fr.timer.function = fr_timer;
  818. hdlc->state.fr.timer.data = (unsigned long)dev;
  819. add_timer(&hdlc->state.fr.timer);
  820. } else
  821. fr_set_link_state(1, dev);
  822. }
  823. static void fr_stop(struct net_device *dev)
  824. {
  825. hdlc_device *hdlc = dev_to_hdlc(dev);
  826. #ifdef DEBUG_LINK
  827. printk(KERN_DEBUG "fr_stop\n");
  828. #endif
  829. if (hdlc->state.fr.settings.lmi != LMI_NONE)
  830. del_timer_sync(&hdlc->state.fr.timer);
  831. fr_set_link_state(0, dev);
  832. }
  833. static void fr_close(struct net_device *dev)
  834. {
  835. hdlc_device *hdlc = dev_to_hdlc(dev);
  836. pvc_device *pvc = hdlc->state.fr.first_pvc;
  837. while (pvc) { /* Shutdown all PVCs for this FRAD */
  838. if (pvc->main)
  839. dev_close(pvc->main);
  840. if (pvc->ether)
  841. dev_close(pvc->ether);
  842. pvc = pvc->next;
  843. }
  844. }
  845. static void dlci_setup(struct net_device *dev)
  846. {
  847. dev->type = ARPHRD_DLCI;
  848. dev->flags = IFF_POINTOPOINT;
  849. dev->hard_header_len = 10;
  850. dev->addr_len = 2;
  851. }
  852. static int fr_add_pvc(struct net_device *master, unsigned int dlci, int type)
  853. {
  854. hdlc_device *hdlc = dev_to_hdlc(master);
  855. pvc_device *pvc = NULL;
  856. struct net_device *dev;
  857. int result, used;
  858. char * prefix = "pvc%d";
  859. if (type == ARPHRD_ETHER)
  860. prefix = "pvceth%d";
  861. if ((pvc = add_pvc(master, dlci)) == NULL) {
  862. printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
  863. master->name);
  864. return -ENOBUFS;
  865. }
  866. if (*get_dev_p(pvc, type))
  867. return -EEXIST;
  868. used = pvc_is_used(pvc);
  869. if (type == ARPHRD_ETHER) {
  870. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
  871. dev = alloc_netdev(sizeof(struct net_device_stats),
  872. "pvceth%d", NET_NAME_UNKNOWN, ether_setup);
  873. #else
  874. dev = alloc_netdev(sizeof(struct net_device_stats),
  875. "pvceth%d", ether_setup);
  876. #endif
  877. } else {
  878. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
  879. dev = alloc_netdev(sizeof(struct net_device_stats),
  880. "pvc%d", NET_NAME_UNKNOWN, dlci_setup);
  881. #else
  882. dev = alloc_netdev(sizeof(struct net_device_stats),
  883. "pvc%d", dlci_setup);
  884. #endif
  885. }
  886. if (!dev) {
  887. printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
  888. master->name);
  889. delete_unused_pvcs(hdlc);
  890. return -ENOBUFS;
  891. }
  892. if (type == ARPHRD_ETHER) {
  893. memcpy(dev->dev_addr, "\x00\x01", 2);
  894. get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
  895. } else {
  896. *(u16*)dev->dev_addr = htons(dlci);
  897. dlci_to_q922(dev->broadcast, dlci);
  898. }
  899. dev->hard_start_xmit = pvc_xmit;
  900. dev->get_stats = pvc_get_stats;
  901. dev->open = pvc_open;
  902. dev->stop = pvc_close;
  903. dev->do_ioctl = pvc_ioctl;
  904. dev->change_mtu = pvc_change_mtu;
  905. dev->mtu = HDLC_MAX_MTU;
  906. dev->tx_queue_len = 0;
  907. dev->priv = pvc;
  908. result = dev_alloc_name(dev, dev->name);
  909. if (result < 0) {
  910. free_netdev(dev);
  911. delete_unused_pvcs(hdlc);
  912. return result;
  913. }
  914. if (register_netdevice(dev) != 0) {
  915. free_netdev(dev);
  916. delete_unused_pvcs(hdlc);
  917. return -EIO;
  918. }
  919. dev->destructor = free_netdev;
  920. *get_dev_p(pvc, type) = dev;
  921. if (!used) {
  922. hdlc->state.fr.dce_changed = 1;
  923. hdlc->state.fr.dce_pvc_count++;
  924. }
  925. return 0;
  926. }
  927. static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
  928. {
  929. pvc_device *pvc;
  930. struct net_device *dev;
  931. if ((pvc = find_pvc(hdlc, dlci)) == NULL)
  932. return -ENOENT;
  933. if ((dev = *get_dev_p(pvc, type)) == NULL)
  934. return -ENOENT;
  935. if (dev->flags & IFF_UP)
  936. return -EBUSY; /* PVC in use */
  937. unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
  938. *get_dev_p(pvc, type) = NULL;
  939. if (!pvc_is_used(pvc)) {
  940. hdlc->state.fr.dce_pvc_count--;
  941. hdlc->state.fr.dce_changed = 1;
  942. }
  943. delete_unused_pvcs(hdlc);
  944. return 0;
  945. }
  946. static void fr_destroy(hdlc_device *hdlc)
  947. {
  948. pvc_device *pvc;
  949. pvc = hdlc->state.fr.first_pvc;
  950. hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */
  951. hdlc->state.fr.dce_pvc_count = 0;
  952. hdlc->state.fr.dce_changed = 1;
  953. while (pvc) {
  954. pvc_device *next = pvc->next;
  955. /* destructors will free_netdev() main and ether */
  956. if (pvc->main)
  957. unregister_netdevice(pvc->main);
  958. if (pvc->ether)
  959. unregister_netdevice(pvc->ether);
  960. kfree(pvc);
  961. pvc = next;
  962. }
  963. }
  964. int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr)
  965. {
  966. fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
  967. const size_t size = sizeof(fr_proto);
  968. fr_proto new_settings;
  969. hdlc_device *hdlc = dev_to_hdlc(dev);
  970. fr_proto_pvc pvc;
  971. int result;
  972. switch (ifr->ifr_settings.type) {
  973. case IF_GET_PROTO:
  974. ifr->ifr_settings.type = IF_PROTO_FR;
  975. if (ifr->ifr_settings.size < size) {
  976. ifr->ifr_settings.size = size; /* data size wanted */
  977. return -ENOBUFS;
  978. }
  979. if (copy_to_user(fr_s, &hdlc->state.fr.settings, size))
  980. return -EFAULT;
  981. return 0;
  982. case IF_PROTO_FR:
  983. if(!capable(CAP_NET_ADMIN))
  984. return -EPERM;
  985. if(dev->flags & IFF_UP)
  986. return -EBUSY;
  987. if (copy_from_user(&new_settings, fr_s, size))
  988. return -EFAULT;
  989. if (new_settings.lmi == LMI_DEFAULT)
  990. new_settings.lmi = LMI_ANSI;
  991. if ((new_settings.lmi != LMI_NONE &&
  992. new_settings.lmi != LMI_ANSI &&
  993. new_settings.lmi != LMI_CCITT &&
  994. new_settings.lmi != LMI_CISCO) ||
  995. new_settings.t391 < 1 ||
  996. new_settings.t392 < 2 ||
  997. new_settings.n391 < 1 ||
  998. new_settings.n392 < 1 ||
  999. new_settings.n393 < new_settings.n392 ||
  1000. new_settings.n393 > 32 ||
  1001. (new_settings.dce != 0 &&
  1002. new_settings.dce != 1))
  1003. return -EINVAL;
  1004. result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
  1005. if (result)
  1006. return result;
  1007. if (hdlc->proto.id != IF_PROTO_FR) {
  1008. hdlc_proto_detach(hdlc);
  1009. hdlc->state.fr.first_pvc = NULL;
  1010. hdlc->state.fr.dce_pvc_count = 0;
  1011. }
  1012. memcpy(&hdlc->state.fr.settings, &new_settings, size);
  1013. memset(&hdlc->proto, 0, sizeof(hdlc->proto));
  1014. hdlc->proto.close = fr_close;
  1015. hdlc->proto.start = fr_start;
  1016. hdlc->proto.stop = fr_stop;
  1017. hdlc->proto.detach = fr_destroy;
  1018. hdlc->proto.netif_rx = fr_rx;
  1019. hdlc->proto.id = IF_PROTO_FR;
  1020. dev->hard_start_xmit = hdlc->xmit;
  1021. dev->hard_header = NULL;
  1022. dev->type = ARPHRD_FRAD;
  1023. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  1024. dev->addr_len = 0;
  1025. return 0;
  1026. case IF_PROTO_FR_ADD_PVC:
  1027. case IF_PROTO_FR_DEL_PVC:
  1028. case IF_PROTO_FR_ADD_ETH_PVC:
  1029. case IF_PROTO_FR_DEL_ETH_PVC:
  1030. if(!capable(CAP_NET_ADMIN))
  1031. return -EPERM;
  1032. if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
  1033. sizeof(fr_proto_pvc)))
  1034. return -EFAULT;
  1035. if (pvc.dlci <= 0 || pvc.dlci >= 1024)
  1036. return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
  1037. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
  1038. ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
  1039. result = ARPHRD_ETHER; /* bridged Ethernet device */
  1040. else
  1041. result = ARPHRD_DLCI;
  1042. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
  1043. ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
  1044. return fr_add_pvc(dev, pvc.dlci, result);
  1045. else
  1046. return fr_del_pvc(hdlc, pvc.dlci, result);
  1047. }
  1048. return -EINVAL;
  1049. }