dfs_pri_detector.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * Copyright (c) 2012 Neratec Solutions AG
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #include "ath.h"
  19. #include "dfs_pattern_detector.h"
  20. #include "dfs_pri_detector.h"
  21. struct ath_dfs_pool_stats global_dfs_pool_stats = {};
  22. #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
  23. #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
  24. /**
  25. * struct pulse_elem - elements in pulse queue
  26. * @ts: time stamp in usecs
  27. */
  28. struct pulse_elem {
  29. struct list_head head;
  30. u64 ts;
  31. };
  32. /**
  33. * pde_get_multiple() - get number of multiples considering a given tolerance
  34. * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
  35. */
  36. static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
  37. {
  38. u32 remainder;
  39. u32 factor;
  40. u32 delta;
  41. if (fraction == 0)
  42. return 0;
  43. delta = (val < fraction) ? (fraction - val) : (val - fraction);
  44. if (delta <= tolerance)
  45. /* val and fraction are within tolerance */
  46. return 1;
  47. factor = val / fraction;
  48. remainder = val % fraction;
  49. if (remainder > tolerance) {
  50. /* no exact match */
  51. if ((fraction - remainder) <= tolerance)
  52. /* remainder is within tolerance */
  53. factor++;
  54. else
  55. factor = 0;
  56. }
  57. return factor;
  58. }
  59. /**
  60. * DOC: Singleton Pulse and Sequence Pools
  61. *
  62. * Instances of pri_sequence and pulse_elem are kept in singleton pools to
  63. * reduce the number of dynamic allocations. They are shared between all
  64. * instances and grow up to the peak number of simultaneously used objects.
  65. *
  66. * Memory is freed after all references to the pools are released.
  67. */
  68. static u32 singleton_pool_references;
  69. static LIST_HEAD(pulse_pool);
  70. static LIST_HEAD(pseq_pool);
  71. static DEFINE_SPINLOCK(pool_lock);
  72. static void pool_register_ref(void)
  73. {
  74. spin_lock_bh(&pool_lock);
  75. singleton_pool_references++;
  76. DFS_POOL_STAT_INC(pool_reference);
  77. spin_unlock_bh(&pool_lock);
  78. }
  79. static void pool_deregister_ref(void)
  80. {
  81. spin_lock_bh(&pool_lock);
  82. singleton_pool_references--;
  83. DFS_POOL_STAT_DEC(pool_reference);
  84. if (singleton_pool_references == 0) {
  85. /* free singleton pools with no references left */
  86. struct pri_sequence *ps, *ps0;
  87. struct pulse_elem *p, *p0;
  88. list_for_each_entry_safe(p, p0, &pulse_pool, head) {
  89. list_del(&p->head);
  90. DFS_POOL_STAT_DEC(pulse_allocated);
  91. kfree(p);
  92. }
  93. list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
  94. list_del(&ps->head);
  95. DFS_POOL_STAT_DEC(pseq_allocated);
  96. kfree(ps);
  97. }
  98. }
  99. spin_unlock_bh(&pool_lock);
  100. }
  101. static void pool_put_pulse_elem(struct pulse_elem *pe)
  102. {
  103. spin_lock_bh(&pool_lock);
  104. list_add(&pe->head, &pulse_pool);
  105. DFS_POOL_STAT_DEC(pulse_used);
  106. spin_unlock_bh(&pool_lock);
  107. }
  108. static void pool_put_pseq_elem(struct pri_sequence *pse)
  109. {
  110. spin_lock_bh(&pool_lock);
  111. list_add(&pse->head, &pseq_pool);
  112. DFS_POOL_STAT_DEC(pseq_used);
  113. spin_unlock_bh(&pool_lock);
  114. }
  115. static struct pri_sequence *pool_get_pseq_elem(void)
  116. {
  117. struct pri_sequence *pse = NULL;
  118. spin_lock_bh(&pool_lock);
  119. if (!list_empty(&pseq_pool)) {
  120. pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
  121. list_del(&pse->head);
  122. DFS_POOL_STAT_INC(pseq_used);
  123. }
  124. spin_unlock_bh(&pool_lock);
  125. return pse;
  126. }
  127. static struct pulse_elem *pool_get_pulse_elem(void)
  128. {
  129. struct pulse_elem *pe = NULL;
  130. spin_lock_bh(&pool_lock);
  131. if (!list_empty(&pulse_pool)) {
  132. pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
  133. list_del(&pe->head);
  134. DFS_POOL_STAT_INC(pulse_used);
  135. }
  136. spin_unlock_bh(&pool_lock);
  137. return pe;
  138. }
  139. static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
  140. {
  141. struct list_head *l = &pde->pulses;
  142. if (list_empty(l))
  143. return NULL;
  144. return list_entry(l->prev, struct pulse_elem, head);
  145. }
  146. static bool pulse_queue_dequeue(struct pri_detector *pde)
  147. {
  148. struct pulse_elem *p = pulse_queue_get_tail(pde);
  149. if (p != NULL) {
  150. list_del_init(&p->head);
  151. pde->count--;
  152. /* give it back to pool */
  153. pool_put_pulse_elem(p);
  154. }
  155. return (pde->count > 0);
  156. }
  157. /* remove pulses older than window */
  158. static void pulse_queue_check_window(struct pri_detector *pde)
  159. {
  160. u64 min_valid_ts;
  161. struct pulse_elem *p;
  162. /* there is no delta time with less than 2 pulses */
  163. if (pde->count < 2)
  164. return;
  165. if (pde->last_ts <= pde->window_size)
  166. return;
  167. min_valid_ts = pde->last_ts - pde->window_size;
  168. while ((p = pulse_queue_get_tail(pde)) != NULL) {
  169. if (p->ts >= min_valid_ts)
  170. return;
  171. pulse_queue_dequeue(pde);
  172. }
  173. }
  174. static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
  175. {
  176. struct pulse_elem *p = pool_get_pulse_elem();
  177. if (p == NULL) {
  178. p = kmalloc(sizeof(*p), GFP_ATOMIC);
  179. if (p == NULL) {
  180. DFS_POOL_STAT_INC(pulse_alloc_error);
  181. return false;
  182. }
  183. DFS_POOL_STAT_INC(pulse_allocated);
  184. DFS_POOL_STAT_INC(pulse_used);
  185. }
  186. INIT_LIST_HEAD(&p->head);
  187. p->ts = ts;
  188. list_add(&p->head, &pde->pulses);
  189. pde->count++;
  190. pde->last_ts = ts;
  191. pulse_queue_check_window(pde);
  192. if (pde->count >= pde->max_count)
  193. pulse_queue_dequeue(pde);
  194. return true;
  195. }
  196. static bool pseq_handler_create_sequences(struct pri_detector *pde,
  197. u64 ts, u32 min_count)
  198. {
  199. struct pulse_elem *p;
  200. list_for_each_entry(p, &pde->pulses, head) {
  201. struct pri_sequence ps, *new_ps;
  202. struct pulse_elem *p2;
  203. u32 tmp_false_count;
  204. u64 min_valid_ts;
  205. u32 delta_ts = ts - p->ts;
  206. if (delta_ts < pde->rs->pri_min)
  207. /* ignore too small pri */
  208. continue;
  209. if (delta_ts > pde->rs->pri_max)
  210. /* stop on too large pri (sorted list) */
  211. break;
  212. /* build a new sequence with new potential pri */
  213. ps.count = 2;
  214. ps.count_falses = 0;
  215. ps.first_ts = p->ts;
  216. ps.last_ts = ts;
  217. ps.pri = ts - p->ts;
  218. ps.dur = ps.pri * (pde->rs->ppb - 1)
  219. + 2 * pde->rs->max_pri_tolerance;
  220. p2 = p;
  221. tmp_false_count = 0;
  222. min_valid_ts = ts - ps.dur;
  223. /* check which past pulses are candidates for new sequence */
  224. list_for_each_entry_continue(p2, &pde->pulses, head) {
  225. u32 factor;
  226. if (p2->ts < min_valid_ts)
  227. /* stop on crossing window border */
  228. break;
  229. /* check if pulse match (multi)PRI */
  230. factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
  231. pde->rs->max_pri_tolerance);
  232. if (factor > 0) {
  233. ps.count++;
  234. ps.first_ts = p2->ts;
  235. /*
  236. * on match, add the intermediate falses
  237. * and reset counter
  238. */
  239. ps.count_falses += tmp_false_count;
  240. tmp_false_count = 0;
  241. } else {
  242. /* this is a potential false one */
  243. tmp_false_count++;
  244. }
  245. }
  246. if (ps.count < min_count)
  247. /* did not reach minimum count, drop sequence */
  248. continue;
  249. /* this is a valid one, add it */
  250. ps.deadline_ts = ps.first_ts + ps.dur;
  251. new_ps = pool_get_pseq_elem();
  252. if (new_ps == NULL) {
  253. new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
  254. if (new_ps == NULL) {
  255. DFS_POOL_STAT_INC(pseq_alloc_error);
  256. return false;
  257. }
  258. DFS_POOL_STAT_INC(pseq_allocated);
  259. DFS_POOL_STAT_INC(pseq_used);
  260. }
  261. memcpy(new_ps, &ps, sizeof(ps));
  262. INIT_LIST_HEAD(&new_ps->head);
  263. list_add(&new_ps->head, &pde->sequences);
  264. }
  265. return true;
  266. }
  267. /* check new ts and add to all matching existing sequences */
  268. static u32
  269. pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
  270. {
  271. u32 max_count = 0;
  272. struct pri_sequence *ps, *ps2;
  273. list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
  274. u32 delta_ts;
  275. u32 factor;
  276. /* first ensure that sequence is within window */
  277. if (ts > ps->deadline_ts) {
  278. list_del_init(&ps->head);
  279. pool_put_pseq_elem(ps);
  280. continue;
  281. }
  282. delta_ts = ts - ps->last_ts;
  283. factor = pde_get_multiple(delta_ts, ps->pri,
  284. pde->rs->max_pri_tolerance);
  285. if (factor > 0) {
  286. ps->last_ts = ts;
  287. ps->count++;
  288. if (max_count < ps->count)
  289. max_count = ps->count;
  290. } else {
  291. ps->count_falses++;
  292. }
  293. }
  294. return max_count;
  295. }
  296. static struct pri_sequence *
  297. pseq_handler_check_detection(struct pri_detector *pde)
  298. {
  299. struct pri_sequence *ps;
  300. if (list_empty(&pde->sequences))
  301. return NULL;
  302. list_for_each_entry(ps, &pde->sequences, head) {
  303. /*
  304. * we assume to have enough matching confidence if we
  305. * 1) have enough pulses
  306. * 2) have more matching than false pulses
  307. */
  308. if ((ps->count >= pde->rs->ppb_thresh) &&
  309. (ps->count * pde->rs->num_pri >= ps->count_falses))
  310. return ps;
  311. }
  312. return NULL;
  313. }
  314. /* free pulse queue and sequences list and give objects back to pools */
  315. static void pri_detector_reset(struct pri_detector *pde, u64 ts)
  316. {
  317. struct pri_sequence *ps, *ps0;
  318. struct pulse_elem *p, *p0;
  319. list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
  320. list_del_init(&ps->head);
  321. pool_put_pseq_elem(ps);
  322. }
  323. list_for_each_entry_safe(p, p0, &pde->pulses, head) {
  324. list_del_init(&p->head);
  325. pool_put_pulse_elem(p);
  326. }
  327. pde->count = 0;
  328. pde->last_ts = ts;
  329. }
  330. static void pri_detector_exit(struct pri_detector *de)
  331. {
  332. pri_detector_reset(de, 0);
  333. pool_deregister_ref();
  334. kfree(de);
  335. }
  336. static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
  337. struct pulse_event *event)
  338. {
  339. u32 max_updated_seq;
  340. struct pri_sequence *ps;
  341. u64 ts = event->ts;
  342. const struct radar_detector_specs *rs = de->rs;
  343. /* ignore pulses not within width range */
  344. if ((rs->width_min > event->width) || (rs->width_max < event->width))
  345. return NULL;
  346. if ((ts - de->last_ts) < rs->max_pri_tolerance)
  347. /* if delta to last pulse is too short, don't use this pulse */
  348. return NULL;
  349. /* radar detector spec needs chirp, but not detected */
  350. if (rs->chirp && rs->chirp != event->chirp)
  351. return NULL;
  352. de->last_ts = ts;
  353. max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
  354. if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
  355. pri_detector_reset(de, ts);
  356. return NULL;
  357. }
  358. ps = pseq_handler_check_detection(de);
  359. if (ps == NULL)
  360. pulse_queue_enqueue(de, ts);
  361. return ps;
  362. }
  363. struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
  364. {
  365. struct pri_detector *de;
  366. de = kzalloc(sizeof(*de), GFP_ATOMIC);
  367. if (de == NULL)
  368. return NULL;
  369. de->exit = pri_detector_exit;
  370. de->add_pulse = pri_detector_add_pulse;
  371. de->reset = pri_detector_reset;
  372. INIT_LIST_HEAD(&de->sequences);
  373. INIT_LIST_HEAD(&de->pulses);
  374. de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
  375. de->max_count = rs->ppb * 2;
  376. de->rs = rs;
  377. pool_register_ref();
  378. return de;
  379. }