pno.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. /*
  2. * Copyright (c) 2016 Broadcom
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/netdevice.h>
  17. #include <linux/gcd.h>
  18. #include <net/cfg80211.h>
  19. #include "core.h"
  20. #include "debug.h"
  21. #include "fwil.h"
  22. #include "fwil_types.h"
  23. #include "cfg80211.h"
  24. #include "pno.h"
  25. #define BRCMF_PNO_VERSION 2
  26. #define BRCMF_PNO_REPEAT 4
  27. #define BRCMF_PNO_FREQ_EXPO_MAX 3
  28. #define BRCMF_PNO_IMMEDIATE_SCAN_BIT 3
  29. #define BRCMF_PNO_ENABLE_BD_SCAN_BIT 5
  30. #define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
  31. #define BRCMF_PNO_REPORT_SEPARATELY_BIT 11
  32. #define BRCMF_PNO_SCAN_INCOMPLETE 0
  33. #define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
  34. #define BRCMF_PNO_HIDDEN_BIT 2
  35. #define BRCMF_PNO_SCHED_SCAN_PERIOD 30
  36. #define BRCMF_PNO_MAX_BUCKETS 16
  37. #define GSCAN_BATCH_NO_THR_SET 101
  38. #define GSCAN_RETRY_THRESHOLD 3
  39. struct brcmf_pno_info {
  40. int n_reqs;
  41. struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
  42. struct mutex req_lock;
  43. };
  44. #define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno)
  45. static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
  46. struct cfg80211_sched_scan_request *req)
  47. {
  48. if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
  49. "pno request storage full\n"))
  50. return -ENOSPC;
  51. brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
  52. mutex_lock(&pi->req_lock);
  53. pi->reqs[pi->n_reqs++] = req;
  54. mutex_unlock(&pi->req_lock);
  55. return 0;
  56. }
  57. static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
  58. {
  59. int i, err = 0;
  60. mutex_lock(&pi->req_lock);
  61. /* find request */
  62. for (i = 0; i < pi->n_reqs; i++) {
  63. if (pi->reqs[i]->reqid == reqid)
  64. break;
  65. }
  66. /* request not found */
  67. if (WARN(i == pi->n_reqs, "reqid not found\n")) {
  68. err = -ENOENT;
  69. goto done;
  70. }
  71. brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
  72. pi->n_reqs--;
  73. /* if last we are done */
  74. if (!pi->n_reqs || i == pi->n_reqs)
  75. goto done;
  76. /* fill the gap with remaining requests */
  77. while (i <= pi->n_reqs - 1) {
  78. pi->reqs[i] = pi->reqs[i + 1];
  79. i++;
  80. }
  81. done:
  82. mutex_unlock(&pi->req_lock);
  83. return err;
  84. }
  85. static int brcmf_pno_channel_config(struct brcmf_if *ifp,
  86. struct brcmf_pno_config_le *cfg)
  87. {
  88. cfg->reporttype = 0;
  89. cfg->flags = 0;
  90. return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
  91. }
  92. static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
  93. u32 mscan, u32 bestn)
  94. {
  95. struct brcmf_pno_param_le pfn_param;
  96. u16 flags;
  97. u32 pfnmem;
  98. s32 err;
  99. memset(&pfn_param, 0, sizeof(pfn_param));
  100. pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
  101. /* set extra pno params */
  102. flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
  103. BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
  104. pfn_param.repeat = BRCMF_PNO_REPEAT;
  105. pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
  106. /* set up pno scan fr */
  107. pfn_param.scan_freq = cpu_to_le32(scan_freq);
  108. if (mscan) {
  109. pfnmem = bestn;
  110. /* set bestn in firmware */
  111. err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
  112. if (err < 0) {
  113. brcmf_err("failed to set pfnmem\n");
  114. goto exit;
  115. }
  116. /* get max mscan which the firmware supports */
  117. err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
  118. if (err < 0) {
  119. brcmf_err("failed to get pfnmem\n");
  120. goto exit;
  121. }
  122. mscan = min_t(u32, mscan, pfnmem);
  123. pfn_param.mscan = mscan;
  124. pfn_param.bestn = bestn;
  125. flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
  126. brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
  127. }
  128. pfn_param.flags = cpu_to_le16(flags);
  129. err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
  130. sizeof(pfn_param));
  131. if (err)
  132. brcmf_err("pfn_set failed, err=%d\n", err);
  133. exit:
  134. return err;
  135. }
  136. static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
  137. {
  138. struct brcmf_pno_macaddr_le pfn_mac;
  139. u8 *mac_addr = NULL;
  140. u8 *mac_mask = NULL;
  141. int err, i;
  142. for (i = 0; i < pi->n_reqs; i++)
  143. if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
  144. mac_addr = pi->reqs[i]->mac_addr;
  145. mac_mask = pi->reqs[i]->mac_addr_mask;
  146. break;
  147. }
  148. /* no random mac requested */
  149. if (!mac_addr)
  150. return 0;
  151. pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
  152. pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
  153. memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
  154. for (i = 0; i < ETH_ALEN; i++) {
  155. pfn_mac.mac[i] &= mac_mask[i];
  156. pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
  157. }
  158. /* Clear multi bit */
  159. pfn_mac.mac[0] &= 0xFE;
  160. /* Set locally administered */
  161. pfn_mac.mac[0] |= 0x02;
  162. brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
  163. pi->reqs[i]->reqid, pfn_mac.mac);
  164. err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
  165. sizeof(pfn_mac));
  166. if (err)
  167. brcmf_err("pfn_macaddr failed, err=%d\n", err);
  168. return err;
  169. }
  170. static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
  171. bool active)
  172. {
  173. struct brcmf_pno_net_param_le pfn;
  174. int err;
  175. pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
  176. pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
  177. pfn.wsec = cpu_to_le32(0);
  178. pfn.infra = cpu_to_le32(1);
  179. pfn.flags = 0;
  180. if (active)
  181. pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
  182. pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
  183. memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
  184. brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
  185. err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
  186. if (err < 0)
  187. brcmf_err("adding failed: err=%d\n", err);
  188. return err;
  189. }
  190. static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
  191. {
  192. struct brcmf_pno_bssid_le bssid_cfg;
  193. int err;
  194. memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
  195. bssid_cfg.flags = 0;
  196. brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
  197. err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
  198. sizeof(bssid_cfg));
  199. if (err < 0)
  200. brcmf_err("adding failed: err=%d\n", err);
  201. return err;
  202. }
  203. static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
  204. struct cfg80211_sched_scan_request *req)
  205. {
  206. int i;
  207. if (!ssid || !req->ssids || !req->n_ssids)
  208. return false;
  209. for (i = 0; i < req->n_ssids; i++) {
  210. if (ssid->ssid_len == req->ssids[i].ssid_len) {
  211. if (!strncmp(ssid->ssid, req->ssids[i].ssid,
  212. ssid->ssid_len))
  213. return true;
  214. }
  215. }
  216. return false;
  217. }
  218. static int brcmf_pno_clean(struct brcmf_if *ifp)
  219. {
  220. int ret;
  221. /* Disable pfn */
  222. ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
  223. if (ret == 0) {
  224. /* clear pfn */
  225. ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
  226. }
  227. if (ret < 0)
  228. brcmf_err("failed code %d\n", ret);
  229. return ret;
  230. }
  231. static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
  232. struct brcmf_pno_config_le *pno_cfg)
  233. {
  234. u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
  235. u16 chan;
  236. int i, err = 0;
  237. for (i = 0; i < r->n_channels; i++) {
  238. if (n_chan >= BRCMF_NUMCHANNELS) {
  239. err = -ENOSPC;
  240. goto done;
  241. }
  242. chan = r->channels[i]->hw_value;
  243. brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
  244. pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
  245. }
  246. /* return number of channels */
  247. err = n_chan;
  248. done:
  249. pno_cfg->channel_num = cpu_to_le32(n_chan);
  250. return err;
  251. }
  252. static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
  253. struct brcmf_pno_config_le *pno_cfg,
  254. struct brcmf_gscan_bucket_config **buckets,
  255. u32 *scan_freq)
  256. {
  257. struct cfg80211_sched_scan_request *sr;
  258. struct brcmf_gscan_bucket_config *fw_buckets;
  259. int i, err, chidx;
  260. brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
  261. if (WARN_ON(!pi->n_reqs))
  262. return -ENODATA;
  263. /*
  264. * actual scan period is determined using gcd() for each
  265. * scheduled scan period.
  266. */
  267. *scan_freq = pi->reqs[0]->scan_plans[0].interval;
  268. for (i = 1; i < pi->n_reqs; i++) {
  269. sr = pi->reqs[i];
  270. *scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
  271. }
  272. if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
  273. brcmf_dbg(SCAN, "scan period too small, using minimum\n");
  274. *scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
  275. }
  276. *buckets = NULL;
  277. fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
  278. if (!fw_buckets)
  279. return -ENOMEM;
  280. memset(pno_cfg, 0, sizeof(*pno_cfg));
  281. for (i = 0; i < pi->n_reqs; i++) {
  282. sr = pi->reqs[i];
  283. chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
  284. if (chidx < 0) {
  285. err = chidx;
  286. goto fail;
  287. }
  288. fw_buckets[i].bucket_end_index = chidx - 1;
  289. fw_buckets[i].bucket_freq_multiple =
  290. sr->scan_plans[0].interval / *scan_freq;
  291. /* assure period is non-zero */
  292. if (!fw_buckets[i].bucket_freq_multiple)
  293. fw_buckets[i].bucket_freq_multiple = 1;
  294. fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
  295. }
  296. if (BRCMF_SCAN_ON()) {
  297. brcmf_err("base period=%u\n", *scan_freq);
  298. for (i = 0; i < pi->n_reqs; i++) {
  299. brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
  300. i, fw_buckets[i].bucket_freq_multiple,
  301. le16_to_cpu(fw_buckets[i].max_freq_multiple),
  302. fw_buckets[i].repeat, fw_buckets[i].flag,
  303. fw_buckets[i].bucket_end_index);
  304. }
  305. }
  306. *buckets = fw_buckets;
  307. return pi->n_reqs;
  308. fail:
  309. kfree(fw_buckets);
  310. return err;
  311. }
  312. static int brcmf_pno_config_networks(struct brcmf_if *ifp,
  313. struct brcmf_pno_info *pi)
  314. {
  315. struct cfg80211_sched_scan_request *r;
  316. struct cfg80211_match_set *ms;
  317. bool active;
  318. int i, j, err = 0;
  319. for (i = 0; i < pi->n_reqs; i++) {
  320. r = pi->reqs[i];
  321. for (j = 0; j < r->n_match_sets; j++) {
  322. ms = &r->match_sets[j];
  323. if (ms->ssid.ssid_len) {
  324. active = brcmf_is_ssid_active(&ms->ssid, r);
  325. err = brcmf_pno_add_ssid(ifp, &ms->ssid,
  326. active);
  327. }
  328. if (!err && is_valid_ether_addr(ms->bssid))
  329. err = brcmf_pno_add_bssid(ifp, ms->bssid);
  330. if (err < 0)
  331. return err;
  332. }
  333. }
  334. return 0;
  335. }
  336. static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
  337. {
  338. struct brcmf_pno_info *pi;
  339. struct brcmf_gscan_config *gscan_cfg;
  340. struct brcmf_gscan_bucket_config *buckets;
  341. struct brcmf_pno_config_le pno_cfg;
  342. size_t gsz;
  343. u32 scan_freq;
  344. int err, n_buckets;
  345. pi = ifp_to_pno(ifp);
  346. n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
  347. &scan_freq);
  348. if (n_buckets < 0)
  349. return n_buckets;
  350. gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
  351. gscan_cfg = kzalloc(gsz, GFP_KERNEL);
  352. if (!gscan_cfg) {
  353. err = -ENOMEM;
  354. goto free_buckets;
  355. }
  356. /* clean up everything */
  357. err = brcmf_pno_clean(ifp);
  358. if (err < 0) {
  359. brcmf_err("failed error=%d\n", err);
  360. goto free_gscan;
  361. }
  362. /* configure pno */
  363. err = brcmf_pno_config(ifp, scan_freq, 0, 0);
  364. if (err < 0)
  365. goto free_gscan;
  366. err = brcmf_pno_channel_config(ifp, &pno_cfg);
  367. if (err < 0)
  368. goto clean;
  369. gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
  370. gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
  371. gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
  372. gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
  373. gscan_cfg->count_of_channel_buckets = n_buckets;
  374. memcpy(&gscan_cfg->bucket[0], buckets,
  375. n_buckets * sizeof(*buckets));
  376. err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
  377. if (err < 0)
  378. goto clean;
  379. /* configure random mac */
  380. err = brcmf_pno_set_random(ifp, pi);
  381. if (err < 0)
  382. goto clean;
  383. err = brcmf_pno_config_networks(ifp, pi);
  384. if (err < 0)
  385. goto clean;
  386. /* Enable the PNO */
  387. err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
  388. clean:
  389. if (err < 0)
  390. brcmf_pno_clean(ifp);
  391. free_gscan:
  392. kfree(gscan_cfg);
  393. free_buckets:
  394. kfree(buckets);
  395. return err;
  396. }
  397. int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
  398. struct cfg80211_sched_scan_request *req)
  399. {
  400. struct brcmf_pno_info *pi;
  401. int ret;
  402. brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
  403. pi = ifp_to_pno(ifp);
  404. ret = brcmf_pno_store_request(pi, req);
  405. if (ret < 0)
  406. return ret;
  407. ret = brcmf_pno_config_sched_scans(ifp);
  408. if (ret < 0) {
  409. brcmf_pno_remove_request(pi, req->reqid);
  410. if (pi->n_reqs)
  411. (void)brcmf_pno_config_sched_scans(ifp);
  412. return ret;
  413. }
  414. return 0;
  415. }
  416. int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
  417. {
  418. struct brcmf_pno_info *pi;
  419. int err;
  420. brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
  421. pi = ifp_to_pno(ifp);
  422. err = brcmf_pno_remove_request(pi, reqid);
  423. if (err)
  424. return err;
  425. brcmf_pno_clean(ifp);
  426. if (pi->n_reqs)
  427. (void)brcmf_pno_config_sched_scans(ifp);
  428. return 0;
  429. }
  430. int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
  431. {
  432. struct brcmf_pno_info *pi;
  433. brcmf_dbg(TRACE, "enter\n");
  434. pi = kzalloc(sizeof(*pi), GFP_KERNEL);
  435. if (!pi)
  436. return -ENOMEM;
  437. cfg->pno = pi;
  438. mutex_init(&pi->req_lock);
  439. return 0;
  440. }
  441. void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
  442. {
  443. struct brcmf_pno_info *pi;
  444. brcmf_dbg(TRACE, "enter\n");
  445. pi = cfg->pno;
  446. cfg->pno = NULL;
  447. WARN_ON(pi->n_reqs);
  448. mutex_destroy(&pi->req_lock);
  449. kfree(pi);
  450. }
  451. void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
  452. {
  453. /* scheduled scan settings */
  454. wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
  455. wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
  456. wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
  457. wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
  458. wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
  459. }
  460. u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
  461. {
  462. u64 reqid = 0;
  463. mutex_lock(&pi->req_lock);
  464. if (bucket < pi->n_reqs)
  465. reqid = pi->reqs[bucket]->reqid;
  466. mutex_unlock(&pi->req_lock);
  467. return reqid;
  468. }
  469. u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
  470. struct brcmf_pno_net_info_le *ni)
  471. {
  472. struct cfg80211_sched_scan_request *req;
  473. struct cfg80211_match_set *ms;
  474. u32 bucket_map = 0;
  475. int i, j;
  476. mutex_lock(&pi->req_lock);
  477. for (i = 0; i < pi->n_reqs; i++) {
  478. req = pi->reqs[i];
  479. if (!req->n_match_sets)
  480. continue;
  481. for (j = 0; j < req->n_match_sets; j++) {
  482. ms = &req->match_sets[j];
  483. if (ms->ssid.ssid_len == ni->SSID_len &&
  484. !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
  485. bucket_map |= BIT(i);
  486. break;
  487. }
  488. if (is_valid_ether_addr(ms->bssid) &&
  489. !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
  490. bucket_map |= BIT(i);
  491. break;
  492. }
  493. }
  494. }
  495. mutex_unlock(&pi->req_lock);
  496. return bucket_map;
  497. }