mesh_pathtbl.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143
  1. /*
  2. * Copyright (c) 2008, 2009 open80211s Ltd.
  3. * Author: Luis Carlos Cobo <luisca@cozybit.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/etherdevice.h>
  10. #include <linux/list.h>
  11. #include <linux/random.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/string.h>
  15. #include <net/mac80211.h>
  16. #include "wme.h"
  17. #include "ieee80211_i.h"
  18. #include "mesh.h"
  19. /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
  20. #define INIT_PATHS_SIZE_ORDER 2
  21. /* Keep the mean chain length below this constant */
  22. #define MEAN_CHAIN_LEN 2
  23. static inline bool mpath_expired(struct mesh_path *mpath)
  24. {
  25. return (mpath->flags & MESH_PATH_ACTIVE) &&
  26. time_after(jiffies, mpath->exp_time) &&
  27. !(mpath->flags & MESH_PATH_FIXED);
  28. }
  29. struct mpath_node {
  30. struct hlist_node list;
  31. struct rcu_head rcu;
  32. /* This indirection allows two different tables to point to the same
  33. * mesh_path structure, useful when resizing
  34. */
  35. struct mesh_path *mpath;
  36. };
  37. static struct mesh_table __rcu *mesh_paths;
  38. static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
  39. int mesh_paths_generation;
  40. int mpp_paths_generation;
  41. /* This lock will have the grow table function as writer and add / delete nodes
  42. * as readers. RCU provides sufficient protection only when reading the table
  43. * (i.e. doing lookups). Adding or adding or removing nodes requires we take
  44. * the read lock or we risk operating on an old table. The write lock is only
  45. * needed when modifying the number of buckets a table.
  46. */
  47. static DEFINE_RWLOCK(pathtbl_resize_lock);
  48. static inline struct mesh_table *resize_dereference_mesh_paths(void)
  49. {
  50. return rcu_dereference_protected(mesh_paths,
  51. lockdep_is_held(&pathtbl_resize_lock));
  52. }
  53. static inline struct mesh_table *resize_dereference_mpp_paths(void)
  54. {
  55. return rcu_dereference_protected(mpp_paths,
  56. lockdep_is_held(&pathtbl_resize_lock));
  57. }
  58. /*
  59. * CAREFUL -- "tbl" must not be an expression,
  60. * in particular not an rcu_dereference(), since
  61. * it's used twice. So it is illegal to do
  62. * for_each_mesh_entry(rcu_dereference(...), ...)
  63. */
  64. #define for_each_mesh_entry(tbl, node, i) \
  65. for (i = 0; i <= tbl->hash_mask; i++) \
  66. hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
  67. static struct mesh_table *mesh_table_alloc(int size_order)
  68. {
  69. int i;
  70. struct mesh_table *newtbl;
  71. newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
  72. if (!newtbl)
  73. return NULL;
  74. newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
  75. (1 << size_order), GFP_ATOMIC);
  76. if (!newtbl->hash_buckets) {
  77. kfree(newtbl);
  78. return NULL;
  79. }
  80. newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
  81. (1 << size_order), GFP_ATOMIC);
  82. if (!newtbl->hashwlock) {
  83. kfree(newtbl->hash_buckets);
  84. kfree(newtbl);
  85. return NULL;
  86. }
  87. newtbl->size_order = size_order;
  88. newtbl->hash_mask = (1 << size_order) - 1;
  89. atomic_set(&newtbl->entries, 0);
  90. get_random_bytes(&newtbl->hash_rnd,
  91. sizeof(newtbl->hash_rnd));
  92. for (i = 0; i <= newtbl->hash_mask; i++)
  93. spin_lock_init(&newtbl->hashwlock[i]);
  94. spin_lock_init(&newtbl->gates_lock);
  95. return newtbl;
  96. }
  97. static void __mesh_table_free(struct mesh_table *tbl)
  98. {
  99. kfree(tbl->hash_buckets);
  100. kfree(tbl->hashwlock);
  101. kfree(tbl);
  102. }
  103. static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
  104. {
  105. struct hlist_head *mesh_hash;
  106. struct hlist_node *p, *q;
  107. struct mpath_node *gate;
  108. int i;
  109. mesh_hash = tbl->hash_buckets;
  110. for (i = 0; i <= tbl->hash_mask; i++) {
  111. spin_lock_bh(&tbl->hashwlock[i]);
  112. hlist_for_each_safe(p, q, &mesh_hash[i]) {
  113. tbl->free_node(p, free_leafs);
  114. atomic_dec(&tbl->entries);
  115. }
  116. spin_unlock_bh(&tbl->hashwlock[i]);
  117. }
  118. if (free_leafs) {
  119. spin_lock_bh(&tbl->gates_lock);
  120. hlist_for_each_entry_safe(gate, q,
  121. tbl->known_gates, list) {
  122. hlist_del(&gate->list);
  123. kfree(gate);
  124. }
  125. kfree(tbl->known_gates);
  126. spin_unlock_bh(&tbl->gates_lock);
  127. }
  128. __mesh_table_free(tbl);
  129. }
  130. static int mesh_table_grow(struct mesh_table *oldtbl,
  131. struct mesh_table *newtbl)
  132. {
  133. struct hlist_head *oldhash;
  134. struct hlist_node *p, *q;
  135. int i;
  136. if (atomic_read(&oldtbl->entries)
  137. < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
  138. return -EAGAIN;
  139. newtbl->free_node = oldtbl->free_node;
  140. newtbl->mean_chain_len = oldtbl->mean_chain_len;
  141. newtbl->copy_node = oldtbl->copy_node;
  142. newtbl->known_gates = oldtbl->known_gates;
  143. atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
  144. oldhash = oldtbl->hash_buckets;
  145. for (i = 0; i <= oldtbl->hash_mask; i++)
  146. hlist_for_each(p, &oldhash[i])
  147. if (oldtbl->copy_node(p, newtbl) < 0)
  148. goto errcopy;
  149. return 0;
  150. errcopy:
  151. for (i = 0; i <= newtbl->hash_mask; i++) {
  152. hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
  153. oldtbl->free_node(p, 0);
  154. }
  155. return -ENOMEM;
  156. }
  157. static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
  158. struct mesh_table *tbl)
  159. {
  160. /* Use last four bytes of hw addr and interface index as hash index */
  161. return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
  162. tbl->hash_rnd) & tbl->hash_mask;
  163. }
  164. /**
  165. *
  166. * mesh_path_assign_nexthop - update mesh path next hop
  167. *
  168. * @mpath: mesh path to update
  169. * @sta: next hop to assign
  170. *
  171. * Locking: mpath->state_lock must be held when calling this function
  172. */
  173. void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
  174. {
  175. struct sk_buff *skb;
  176. struct ieee80211_hdr *hdr;
  177. unsigned long flags;
  178. rcu_assign_pointer(mpath->next_hop, sta);
  179. spin_lock_irqsave(&mpath->frame_queue.lock, flags);
  180. skb_queue_walk(&mpath->frame_queue, skb) {
  181. hdr = (struct ieee80211_hdr *) skb->data;
  182. memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
  183. memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
  184. ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
  185. }
  186. spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
  187. }
  188. static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
  189. struct mesh_path *gate_mpath)
  190. {
  191. struct ieee80211_hdr *hdr;
  192. struct ieee80211s_hdr *mshdr;
  193. int mesh_hdrlen, hdrlen;
  194. char *next_hop;
  195. hdr = (struct ieee80211_hdr *) skb->data;
  196. hdrlen = ieee80211_hdrlen(hdr->frame_control);
  197. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  198. if (!(mshdr->flags & MESH_FLAGS_AE)) {
  199. /* size of the fixed part of the mesh header */
  200. mesh_hdrlen = 6;
  201. /* make room for the two extended addresses */
  202. skb_push(skb, 2 * ETH_ALEN);
  203. memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
  204. hdr = (struct ieee80211_hdr *) skb->data;
  205. /* we preserve the previous mesh header and only add
  206. * the new addreses */
  207. mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
  208. mshdr->flags = MESH_FLAGS_AE_A5_A6;
  209. memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
  210. memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
  211. }
  212. /* update next hop */
  213. hdr = (struct ieee80211_hdr *) skb->data;
  214. rcu_read_lock();
  215. next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
  216. memcpy(hdr->addr1, next_hop, ETH_ALEN);
  217. rcu_read_unlock();
  218. memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
  219. memcpy(hdr->addr3, dst_addr, ETH_ALEN);
  220. }
  221. /**
  222. *
  223. * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  224. *
  225. * This function is used to transfer or copy frames from an unresolved mpath to
  226. * a gate mpath. The function also adds the Address Extension field and
  227. * updates the next hop.
  228. *
  229. * If a frame already has an Address Extension field, only the next hop and
  230. * destination addresses are updated.
  231. *
  232. * The gate mpath must be an active mpath with a valid mpath->next_hop.
  233. *
  234. * @mpath: An active mpath the frames will be sent to (i.e. the gate)
  235. * @from_mpath: The failed mpath
  236. * @copy: When true, copy all the frames to the new mpath queue. When false,
  237. * move them.
  238. */
  239. static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
  240. struct mesh_path *from_mpath,
  241. bool copy)
  242. {
  243. struct sk_buff *skb, *fskb, *tmp;
  244. struct sk_buff_head failq;
  245. unsigned long flags;
  246. if (WARN_ON(gate_mpath == from_mpath))
  247. return;
  248. if (WARN_ON(!gate_mpath->next_hop))
  249. return;
  250. __skb_queue_head_init(&failq);
  251. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  252. skb_queue_splice_init(&from_mpath->frame_queue, &failq);
  253. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  254. skb_queue_walk_safe(&failq, fskb, tmp) {
  255. if (skb_queue_len(&gate_mpath->frame_queue) >=
  256. MESH_FRAME_QUEUE_LEN) {
  257. mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
  258. break;
  259. }
  260. skb = skb_copy(fskb, GFP_ATOMIC);
  261. if (WARN_ON(!skb))
  262. break;
  263. prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
  264. skb_queue_tail(&gate_mpath->frame_queue, skb);
  265. if (copy)
  266. continue;
  267. __skb_unlink(fskb, &failq);
  268. kfree_skb(fskb);
  269. }
  270. mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
  271. gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
  272. if (!copy)
  273. return;
  274. spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
  275. skb_queue_splice(&failq, &from_mpath->frame_queue);
  276. spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
  277. }
  278. static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
  279. struct ieee80211_sub_if_data *sdata)
  280. {
  281. struct mesh_path *mpath;
  282. struct hlist_head *bucket;
  283. struct mpath_node *node;
  284. bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  285. hlist_for_each_entry_rcu(node, bucket, list) {
  286. mpath = node->mpath;
  287. if (mpath->sdata == sdata &&
  288. ether_addr_equal(dst, mpath->dst)) {
  289. if (mpath_expired(mpath)) {
  290. spin_lock_bh(&mpath->state_lock);
  291. mpath->flags &= ~MESH_PATH_ACTIVE;
  292. spin_unlock_bh(&mpath->state_lock);
  293. }
  294. return mpath;
  295. }
  296. }
  297. return NULL;
  298. }
  299. /**
  300. * mesh_path_lookup - look up a path in the mesh path table
  301. * @sdata: local subif
  302. * @dst: hardware address (ETH_ALEN length) of destination
  303. *
  304. * Returns: pointer to the mesh path structure, or NULL if not found
  305. *
  306. * Locking: must be called within a read rcu section.
  307. */
  308. struct mesh_path *
  309. mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
  310. {
  311. return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
  312. }
  313. struct mesh_path *
  314. mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
  315. {
  316. return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
  317. }
  318. /**
  319. * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
  320. * @idx: index
  321. * @sdata: local subif, or NULL for all entries
  322. *
  323. * Returns: pointer to the mesh path structure, or NULL if not found.
  324. *
  325. * Locking: must be called within a read rcu section.
  326. */
  327. struct mesh_path *
  328. mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
  329. {
  330. struct mesh_table *tbl = rcu_dereference(mesh_paths);
  331. struct mpath_node *node;
  332. int i;
  333. int j = 0;
  334. for_each_mesh_entry(tbl, node, i) {
  335. if (sdata && node->mpath->sdata != sdata)
  336. continue;
  337. if (j++ == idx) {
  338. if (mpath_expired(node->mpath)) {
  339. spin_lock_bh(&node->mpath->state_lock);
  340. node->mpath->flags &= ~MESH_PATH_ACTIVE;
  341. spin_unlock_bh(&node->mpath->state_lock);
  342. }
  343. return node->mpath;
  344. }
  345. }
  346. return NULL;
  347. }
  348. /**
  349. * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
  350. * @idx: index
  351. * @sdata: local subif, or NULL for all entries
  352. *
  353. * Returns: pointer to the proxy path structure, or NULL if not found.
  354. *
  355. * Locking: must be called within a read rcu section.
  356. */
  357. struct mesh_path *
  358. mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
  359. {
  360. struct mesh_table *tbl = rcu_dereference(mpp_paths);
  361. struct mpath_node *node;
  362. int i;
  363. int j = 0;
  364. for_each_mesh_entry(tbl, node, i) {
  365. if (sdata && node->mpath->sdata != sdata)
  366. continue;
  367. if (j++ == idx)
  368. return node->mpath;
  369. }
  370. return NULL;
  371. }
  372. /**
  373. * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
  374. * @mpath: gate path to add to table
  375. */
  376. int mesh_path_add_gate(struct mesh_path *mpath)
  377. {
  378. struct mesh_table *tbl;
  379. struct mpath_node *gate, *new_gate;
  380. int err;
  381. rcu_read_lock();
  382. tbl = rcu_dereference(mesh_paths);
  383. hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
  384. if (gate->mpath == mpath) {
  385. err = -EEXIST;
  386. goto err_rcu;
  387. }
  388. new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  389. if (!new_gate) {
  390. err = -ENOMEM;
  391. goto err_rcu;
  392. }
  393. mpath->is_gate = true;
  394. mpath->sdata->u.mesh.num_gates++;
  395. new_gate->mpath = mpath;
  396. spin_lock_bh(&tbl->gates_lock);
  397. hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
  398. spin_unlock_bh(&tbl->gates_lock);
  399. mpath_dbg(mpath->sdata,
  400. "Mesh path: Recorded new gate: %pM. %d known gates\n",
  401. mpath->dst, mpath->sdata->u.mesh.num_gates);
  402. err = 0;
  403. err_rcu:
  404. rcu_read_unlock();
  405. return err;
  406. }
  407. /**
  408. * mesh_gate_del - remove a mesh gate from the list of known gates
  409. * @tbl: table which holds our list of known gates
  410. * @mpath: gate mpath
  411. *
  412. * Locking: must be called inside rcu_read_lock() section
  413. */
  414. static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
  415. {
  416. struct mpath_node *gate;
  417. struct hlist_node *q;
  418. hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
  419. if (gate->mpath != mpath)
  420. continue;
  421. spin_lock_bh(&tbl->gates_lock);
  422. hlist_del_rcu(&gate->list);
  423. kfree_rcu(gate, rcu);
  424. spin_unlock_bh(&tbl->gates_lock);
  425. mpath->sdata->u.mesh.num_gates--;
  426. mpath->is_gate = false;
  427. mpath_dbg(mpath->sdata,
  428. "Mesh path: Deleted gate: %pM. %d known gates\n",
  429. mpath->dst, mpath->sdata->u.mesh.num_gates);
  430. break;
  431. }
  432. }
  433. /**
  434. * mesh_gate_num - number of gates known to this interface
  435. * @sdata: subif data
  436. */
  437. int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
  438. {
  439. return sdata->u.mesh.num_gates;
  440. }
  441. /**
  442. * mesh_path_add - allocate and add a new path to the mesh path table
  443. * @dst: destination address of the path (ETH_ALEN length)
  444. * @sdata: local subif
  445. *
  446. * Returns: 0 on success
  447. *
  448. * State: the initial state of the new path is set to 0
  449. */
  450. struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
  451. const u8 *dst)
  452. {
  453. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  454. struct ieee80211_local *local = sdata->local;
  455. struct mesh_table *tbl;
  456. struct mesh_path *mpath, *new_mpath;
  457. struct mpath_node *node, *new_node;
  458. struct hlist_head *bucket;
  459. int grow = 0;
  460. int err;
  461. u32 hash_idx;
  462. if (ether_addr_equal(dst, sdata->vif.addr))
  463. /* never add ourselves as neighbours */
  464. return ERR_PTR(-ENOTSUPP);
  465. if (is_multicast_ether_addr(dst))
  466. return ERR_PTR(-ENOTSUPP);
  467. if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
  468. return ERR_PTR(-ENOSPC);
  469. read_lock_bh(&pathtbl_resize_lock);
  470. tbl = resize_dereference_mesh_paths();
  471. hash_idx = mesh_table_hash(dst, sdata, tbl);
  472. bucket = &tbl->hash_buckets[hash_idx];
  473. spin_lock(&tbl->hashwlock[hash_idx]);
  474. hlist_for_each_entry(node, bucket, list) {
  475. mpath = node->mpath;
  476. if (mpath->sdata == sdata &&
  477. ether_addr_equal(dst, mpath->dst))
  478. goto found;
  479. }
  480. err = -ENOMEM;
  481. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  482. if (!new_mpath)
  483. goto err_path_alloc;
  484. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  485. if (!new_node)
  486. goto err_node_alloc;
  487. memcpy(new_mpath->dst, dst, ETH_ALEN);
  488. eth_broadcast_addr(new_mpath->rann_snd_addr);
  489. new_mpath->is_root = false;
  490. new_mpath->sdata = sdata;
  491. new_mpath->flags = 0;
  492. skb_queue_head_init(&new_mpath->frame_queue);
  493. new_node->mpath = new_mpath;
  494. new_mpath->timer.data = (unsigned long) new_mpath;
  495. new_mpath->timer.function = mesh_path_timer;
  496. new_mpath->exp_time = jiffies;
  497. spin_lock_init(&new_mpath->state_lock);
  498. init_timer(&new_mpath->timer);
  499. hlist_add_head_rcu(&new_node->list, bucket);
  500. if (atomic_inc_return(&tbl->entries) >=
  501. tbl->mean_chain_len * (tbl->hash_mask + 1))
  502. grow = 1;
  503. mesh_paths_generation++;
  504. if (grow) {
  505. set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
  506. ieee80211_queue_work(&local->hw, &sdata->work);
  507. }
  508. mpath = new_mpath;
  509. found:
  510. spin_unlock(&tbl->hashwlock[hash_idx]);
  511. read_unlock_bh(&pathtbl_resize_lock);
  512. return mpath;
  513. err_node_alloc:
  514. kfree(new_mpath);
  515. err_path_alloc:
  516. atomic_dec(&sdata->u.mesh.mpaths);
  517. spin_unlock(&tbl->hashwlock[hash_idx]);
  518. read_unlock_bh(&pathtbl_resize_lock);
  519. return ERR_PTR(err);
  520. }
  521. static void mesh_table_free_rcu(struct rcu_head *rcu)
  522. {
  523. struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
  524. mesh_table_free(tbl, false);
  525. }
  526. void mesh_mpath_table_grow(void)
  527. {
  528. struct mesh_table *oldtbl, *newtbl;
  529. write_lock_bh(&pathtbl_resize_lock);
  530. oldtbl = resize_dereference_mesh_paths();
  531. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  532. if (!newtbl)
  533. goto out;
  534. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  535. __mesh_table_free(newtbl);
  536. goto out;
  537. }
  538. rcu_assign_pointer(mesh_paths, newtbl);
  539. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  540. out:
  541. write_unlock_bh(&pathtbl_resize_lock);
  542. }
  543. void mesh_mpp_table_grow(void)
  544. {
  545. struct mesh_table *oldtbl, *newtbl;
  546. write_lock_bh(&pathtbl_resize_lock);
  547. oldtbl = resize_dereference_mpp_paths();
  548. newtbl = mesh_table_alloc(oldtbl->size_order + 1);
  549. if (!newtbl)
  550. goto out;
  551. if (mesh_table_grow(oldtbl, newtbl) < 0) {
  552. __mesh_table_free(newtbl);
  553. goto out;
  554. }
  555. rcu_assign_pointer(mpp_paths, newtbl);
  556. call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
  557. out:
  558. write_unlock_bh(&pathtbl_resize_lock);
  559. }
  560. int mpp_path_add(struct ieee80211_sub_if_data *sdata,
  561. const u8 *dst, const u8 *mpp)
  562. {
  563. struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
  564. struct ieee80211_local *local = sdata->local;
  565. struct mesh_table *tbl;
  566. struct mesh_path *mpath, *new_mpath;
  567. struct mpath_node *node, *new_node;
  568. struct hlist_head *bucket;
  569. int grow = 0;
  570. int err = 0;
  571. u32 hash_idx;
  572. if (ether_addr_equal(dst, sdata->vif.addr))
  573. /* never add ourselves as neighbours */
  574. return -ENOTSUPP;
  575. if (is_multicast_ether_addr(dst))
  576. return -ENOTSUPP;
  577. err = -ENOMEM;
  578. new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
  579. if (!new_mpath)
  580. goto err_path_alloc;
  581. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  582. if (!new_node)
  583. goto err_node_alloc;
  584. read_lock_bh(&pathtbl_resize_lock);
  585. memcpy(new_mpath->dst, dst, ETH_ALEN);
  586. memcpy(new_mpath->mpp, mpp, ETH_ALEN);
  587. new_mpath->sdata = sdata;
  588. new_mpath->flags = 0;
  589. skb_queue_head_init(&new_mpath->frame_queue);
  590. new_node->mpath = new_mpath;
  591. init_timer(&new_mpath->timer);
  592. new_mpath->exp_time = jiffies;
  593. spin_lock_init(&new_mpath->state_lock);
  594. tbl = resize_dereference_mpp_paths();
  595. hash_idx = mesh_table_hash(dst, sdata, tbl);
  596. bucket = &tbl->hash_buckets[hash_idx];
  597. spin_lock(&tbl->hashwlock[hash_idx]);
  598. err = -EEXIST;
  599. hlist_for_each_entry(node, bucket, list) {
  600. mpath = node->mpath;
  601. if (mpath->sdata == sdata &&
  602. ether_addr_equal(dst, mpath->dst))
  603. goto err_exists;
  604. }
  605. hlist_add_head_rcu(&new_node->list, bucket);
  606. if (atomic_inc_return(&tbl->entries) >=
  607. tbl->mean_chain_len * (tbl->hash_mask + 1))
  608. grow = 1;
  609. spin_unlock(&tbl->hashwlock[hash_idx]);
  610. read_unlock_bh(&pathtbl_resize_lock);
  611. mpp_paths_generation++;
  612. if (grow) {
  613. set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
  614. ieee80211_queue_work(&local->hw, &sdata->work);
  615. }
  616. return 0;
  617. err_exists:
  618. spin_unlock(&tbl->hashwlock[hash_idx]);
  619. read_unlock_bh(&pathtbl_resize_lock);
  620. kfree(new_node);
  621. err_node_alloc:
  622. kfree(new_mpath);
  623. err_path_alloc:
  624. return err;
  625. }
  626. /**
  627. * mesh_plink_broken - deactivates paths and sends perr when a link breaks
  628. *
  629. * @sta: broken peer link
  630. *
  631. * This function must be called from the rate control algorithm if enough
  632. * delivery errors suggest that a peer link is no longer usable.
  633. */
  634. void mesh_plink_broken(struct sta_info *sta)
  635. {
  636. struct mesh_table *tbl;
  637. static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  638. struct mesh_path *mpath;
  639. struct mpath_node *node;
  640. struct ieee80211_sub_if_data *sdata = sta->sdata;
  641. int i;
  642. rcu_read_lock();
  643. tbl = rcu_dereference(mesh_paths);
  644. for_each_mesh_entry(tbl, node, i) {
  645. mpath = node->mpath;
  646. if (rcu_access_pointer(mpath->next_hop) == sta &&
  647. mpath->flags & MESH_PATH_ACTIVE &&
  648. !(mpath->flags & MESH_PATH_FIXED)) {
  649. spin_lock_bh(&mpath->state_lock);
  650. mpath->flags &= ~MESH_PATH_ACTIVE;
  651. ++mpath->sn;
  652. spin_unlock_bh(&mpath->state_lock);
  653. mesh_path_error_tx(sdata,
  654. sdata->u.mesh.mshcfg.element_ttl,
  655. mpath->dst, mpath->sn,
  656. WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
  657. }
  658. }
  659. rcu_read_unlock();
  660. }
  661. static void mesh_path_node_reclaim(struct rcu_head *rp)
  662. {
  663. struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
  664. struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
  665. del_timer_sync(&node->mpath->timer);
  666. atomic_dec(&sdata->u.mesh.mpaths);
  667. kfree(node->mpath);
  668. kfree(node);
  669. }
  670. /* needs to be called with the corresponding hashwlock taken */
  671. static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
  672. {
  673. struct mesh_path *mpath;
  674. mpath = node->mpath;
  675. spin_lock(&mpath->state_lock);
  676. mpath->flags |= MESH_PATH_RESOLVING;
  677. if (mpath->is_gate)
  678. mesh_gate_del(tbl, mpath);
  679. hlist_del_rcu(&node->list);
  680. call_rcu(&node->rcu, mesh_path_node_reclaim);
  681. spin_unlock(&mpath->state_lock);
  682. atomic_dec(&tbl->entries);
  683. }
  684. /**
  685. * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
  686. *
  687. * @sta: mesh peer to match
  688. *
  689. * RCU notes: this function is called when a mesh plink transitions from
  690. * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
  691. * allows path creation. This will happen before the sta can be freed (because
  692. * sta_info_destroy() calls this) so any reader in a rcu read block will be
  693. * protected against the plink disappearing.
  694. */
  695. void mesh_path_flush_by_nexthop(struct sta_info *sta)
  696. {
  697. struct mesh_table *tbl;
  698. struct mesh_path *mpath;
  699. struct mpath_node *node;
  700. int i;
  701. rcu_read_lock();
  702. read_lock_bh(&pathtbl_resize_lock);
  703. tbl = resize_dereference_mesh_paths();
  704. for_each_mesh_entry(tbl, node, i) {
  705. mpath = node->mpath;
  706. if (rcu_access_pointer(mpath->next_hop) == sta) {
  707. spin_lock(&tbl->hashwlock[i]);
  708. __mesh_path_del(tbl, node);
  709. spin_unlock(&tbl->hashwlock[i]);
  710. }
  711. }
  712. read_unlock_bh(&pathtbl_resize_lock);
  713. rcu_read_unlock();
  714. }
  715. static void table_flush_by_iface(struct mesh_table *tbl,
  716. struct ieee80211_sub_if_data *sdata)
  717. {
  718. struct mesh_path *mpath;
  719. struct mpath_node *node;
  720. int i;
  721. WARN_ON(!rcu_read_lock_held());
  722. for_each_mesh_entry(tbl, node, i) {
  723. mpath = node->mpath;
  724. if (mpath->sdata != sdata)
  725. continue;
  726. spin_lock_bh(&tbl->hashwlock[i]);
  727. __mesh_path_del(tbl, node);
  728. spin_unlock_bh(&tbl->hashwlock[i]);
  729. }
  730. }
  731. /**
  732. * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
  733. *
  734. * This function deletes both mesh paths as well as mesh portal paths.
  735. *
  736. * @sdata: interface data to match
  737. *
  738. */
  739. void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
  740. {
  741. struct mesh_table *tbl;
  742. rcu_read_lock();
  743. read_lock_bh(&pathtbl_resize_lock);
  744. tbl = resize_dereference_mesh_paths();
  745. table_flush_by_iface(tbl, sdata);
  746. tbl = resize_dereference_mpp_paths();
  747. table_flush_by_iface(tbl, sdata);
  748. read_unlock_bh(&pathtbl_resize_lock);
  749. rcu_read_unlock();
  750. }
  751. /**
  752. * mesh_path_del - delete a mesh path from the table
  753. *
  754. * @addr: dst address (ETH_ALEN length)
  755. * @sdata: local subif
  756. *
  757. * Returns: 0 if successful
  758. */
  759. int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
  760. {
  761. struct mesh_table *tbl;
  762. struct mesh_path *mpath;
  763. struct mpath_node *node;
  764. struct hlist_head *bucket;
  765. int hash_idx;
  766. int err = 0;
  767. read_lock_bh(&pathtbl_resize_lock);
  768. tbl = resize_dereference_mesh_paths();
  769. hash_idx = mesh_table_hash(addr, sdata, tbl);
  770. bucket = &tbl->hash_buckets[hash_idx];
  771. spin_lock(&tbl->hashwlock[hash_idx]);
  772. hlist_for_each_entry(node, bucket, list) {
  773. mpath = node->mpath;
  774. if (mpath->sdata == sdata &&
  775. ether_addr_equal(addr, mpath->dst)) {
  776. __mesh_path_del(tbl, node);
  777. goto enddel;
  778. }
  779. }
  780. err = -ENXIO;
  781. enddel:
  782. mesh_paths_generation++;
  783. spin_unlock(&tbl->hashwlock[hash_idx]);
  784. read_unlock_bh(&pathtbl_resize_lock);
  785. return err;
  786. }
  787. /**
  788. * mesh_path_tx_pending - sends pending frames in a mesh path queue
  789. *
  790. * @mpath: mesh path to activate
  791. *
  792. * Locking: the state_lock of the mpath structure must NOT be held when calling
  793. * this function.
  794. */
  795. void mesh_path_tx_pending(struct mesh_path *mpath)
  796. {
  797. if (mpath->flags & MESH_PATH_ACTIVE)
  798. ieee80211_add_pending_skbs(mpath->sdata->local,
  799. &mpath->frame_queue);
  800. }
  801. /**
  802. * mesh_path_send_to_gates - sends pending frames to all known mesh gates
  803. *
  804. * @mpath: mesh path whose queue will be emptied
  805. *
  806. * If there is only one gate, the frames are transferred from the failed mpath
  807. * queue to that gate's queue. If there are more than one gates, the frames
  808. * are copied from each gate to the next. After frames are copied, the
  809. * mpath queues are emptied onto the transmission queue.
  810. */
  811. int mesh_path_send_to_gates(struct mesh_path *mpath)
  812. {
  813. struct ieee80211_sub_if_data *sdata = mpath->sdata;
  814. struct mesh_table *tbl;
  815. struct mesh_path *from_mpath = mpath;
  816. struct mpath_node *gate = NULL;
  817. bool copy = false;
  818. struct hlist_head *known_gates;
  819. rcu_read_lock();
  820. tbl = rcu_dereference(mesh_paths);
  821. known_gates = tbl->known_gates;
  822. rcu_read_unlock();
  823. if (!known_gates)
  824. return -EHOSTUNREACH;
  825. hlist_for_each_entry_rcu(gate, known_gates, list) {
  826. if (gate->mpath->sdata != sdata)
  827. continue;
  828. if (gate->mpath->flags & MESH_PATH_ACTIVE) {
  829. mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
  830. mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
  831. from_mpath = gate->mpath;
  832. copy = true;
  833. } else {
  834. mpath_dbg(sdata,
  835. "Not forwarding %p (flags %#x)\n",
  836. gate->mpath, gate->mpath->flags);
  837. }
  838. }
  839. hlist_for_each_entry_rcu(gate, known_gates, list)
  840. if (gate->mpath->sdata == sdata) {
  841. mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
  842. mesh_path_tx_pending(gate->mpath);
  843. }
  844. return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
  845. }
  846. /**
  847. * mesh_path_discard_frame - discard a frame whose path could not be resolved
  848. *
  849. * @skb: frame to discard
  850. * @sdata: network subif the frame was to be sent through
  851. *
  852. * Locking: the function must me called within a rcu_read_lock region
  853. */
  854. void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
  855. struct sk_buff *skb)
  856. {
  857. kfree_skb(skb);
  858. sdata->u.mesh.mshstats.dropped_frames_no_route++;
  859. }
  860. /**
  861. * mesh_path_flush_pending - free the pending queue of a mesh path
  862. *
  863. * @mpath: mesh path whose queue has to be freed
  864. *
  865. * Locking: the function must me called within a rcu_read_lock region
  866. */
  867. void mesh_path_flush_pending(struct mesh_path *mpath)
  868. {
  869. struct sk_buff *skb;
  870. while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
  871. mesh_path_discard_frame(mpath->sdata, skb);
  872. }
  873. /**
  874. * mesh_path_fix_nexthop - force a specific next hop for a mesh path
  875. *
  876. * @mpath: the mesh path to modify
  877. * @next_hop: the next hop to force
  878. *
  879. * Locking: this function must be called holding mpath->state_lock
  880. */
  881. void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
  882. {
  883. spin_lock_bh(&mpath->state_lock);
  884. mesh_path_assign_nexthop(mpath, next_hop);
  885. mpath->sn = 0xffff;
  886. mpath->metric = 0;
  887. mpath->hop_count = 0;
  888. mpath->exp_time = 0;
  889. mpath->flags |= MESH_PATH_FIXED;
  890. mesh_path_activate(mpath);
  891. spin_unlock_bh(&mpath->state_lock);
  892. mesh_path_tx_pending(mpath);
  893. }
  894. static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
  895. {
  896. struct mesh_path *mpath;
  897. struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
  898. mpath = node->mpath;
  899. hlist_del_rcu(p);
  900. if (free_leafs) {
  901. del_timer_sync(&mpath->timer);
  902. kfree(mpath);
  903. }
  904. kfree(node);
  905. }
  906. static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
  907. {
  908. struct mesh_path *mpath;
  909. struct mpath_node *node, *new_node;
  910. u32 hash_idx;
  911. new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
  912. if (new_node == NULL)
  913. return -ENOMEM;
  914. node = hlist_entry(p, struct mpath_node, list);
  915. mpath = node->mpath;
  916. new_node->mpath = mpath;
  917. hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
  918. hlist_add_head(&new_node->list,
  919. &newtbl->hash_buckets[hash_idx]);
  920. return 0;
  921. }
  922. int mesh_pathtbl_init(void)
  923. {
  924. struct mesh_table *tbl_path, *tbl_mpp;
  925. int ret;
  926. tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  927. if (!tbl_path)
  928. return -ENOMEM;
  929. tbl_path->free_node = &mesh_path_node_free;
  930. tbl_path->copy_node = &mesh_path_node_copy;
  931. tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
  932. tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  933. if (!tbl_path->known_gates) {
  934. ret = -ENOMEM;
  935. goto free_path;
  936. }
  937. INIT_HLIST_HEAD(tbl_path->known_gates);
  938. tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
  939. if (!tbl_mpp) {
  940. ret = -ENOMEM;
  941. goto free_path;
  942. }
  943. tbl_mpp->free_node = &mesh_path_node_free;
  944. tbl_mpp->copy_node = &mesh_path_node_copy;
  945. tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
  946. tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
  947. if (!tbl_mpp->known_gates) {
  948. ret = -ENOMEM;
  949. goto free_mpp;
  950. }
  951. INIT_HLIST_HEAD(tbl_mpp->known_gates);
  952. /* Need no locking since this is during init */
  953. RCU_INIT_POINTER(mesh_paths, tbl_path);
  954. RCU_INIT_POINTER(mpp_paths, tbl_mpp);
  955. return 0;
  956. free_mpp:
  957. mesh_table_free(tbl_mpp, true);
  958. free_path:
  959. mesh_table_free(tbl_path, true);
  960. return ret;
  961. }
  962. void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
  963. {
  964. struct mesh_table *tbl;
  965. struct mesh_path *mpath;
  966. struct mpath_node *node;
  967. int i;
  968. rcu_read_lock();
  969. tbl = rcu_dereference(mesh_paths);
  970. for_each_mesh_entry(tbl, node, i) {
  971. if (node->mpath->sdata != sdata)
  972. continue;
  973. mpath = node->mpath;
  974. if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
  975. (!(mpath->flags & MESH_PATH_FIXED)) &&
  976. time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
  977. mesh_path_del(mpath->sdata, mpath->dst);
  978. }
  979. rcu_read_unlock();
  980. }
  981. void mesh_pathtbl_unregister(void)
  982. {
  983. /* no need for locking during exit path */
  984. mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
  985. mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
  986. }