bpf_lru_list.h 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. /* Copyright (c) 2016 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #ifndef __BPF_LRU_LIST_H_
  8. #define __BPF_LRU_LIST_H_
  9. #include <linux/list.h>
  10. #include <linux/spinlock_types.h>
  11. #define NR_BPF_LRU_LIST_T (3)
  12. #define NR_BPF_LRU_LIST_COUNT (2)
  13. #define NR_BPF_LRU_LOCAL_LIST_T (2)
  14. #define BPF_LOCAL_LIST_T_OFFSET NR_BPF_LRU_LIST_T
  15. enum bpf_lru_list_type {
  16. BPF_LRU_LIST_T_ACTIVE,
  17. BPF_LRU_LIST_T_INACTIVE,
  18. BPF_LRU_LIST_T_FREE,
  19. BPF_LRU_LOCAL_LIST_T_FREE,
  20. BPF_LRU_LOCAL_LIST_T_PENDING,
  21. };
  22. struct bpf_lru_node {
  23. struct list_head list;
  24. u16 cpu;
  25. u8 type;
  26. u8 ref;
  27. };
  28. struct bpf_lru_list {
  29. struct list_head lists[NR_BPF_LRU_LIST_T];
  30. unsigned int counts[NR_BPF_LRU_LIST_COUNT];
  31. /* The next inacitve list rotation starts from here */
  32. struct list_head *next_inactive_rotation;
  33. raw_spinlock_t lock ____cacheline_aligned_in_smp;
  34. };
  35. struct bpf_lru_locallist {
  36. struct list_head lists[NR_BPF_LRU_LOCAL_LIST_T];
  37. u16 next_steal;
  38. raw_spinlock_t lock;
  39. };
  40. struct bpf_common_lru {
  41. struct bpf_lru_list lru_list;
  42. struct bpf_lru_locallist __percpu *local_list;
  43. };
  44. typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node);
  45. struct bpf_lru {
  46. union {
  47. struct bpf_common_lru common_lru;
  48. struct bpf_lru_list __percpu *percpu_lru;
  49. };
  50. del_from_htab_func del_from_htab;
  51. void *del_arg;
  52. unsigned int hash_offset;
  53. unsigned int nr_scans;
  54. bool percpu;
  55. };
  56. static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node)
  57. {
  58. /* ref is an approximation on access frequency. It does not
  59. * have to be very accurate. Hence, no protection is used.
  60. */
  61. if (!node->ref)
  62. node->ref = 1;
  63. }
  64. int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
  65. del_from_htab_func del_from_htab, void *delete_arg);
  66. void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
  67. u32 elem_size, u32 nr_elems);
  68. void bpf_lru_destroy(struct bpf_lru *lru);
  69. struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash);
  70. void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node);
  71. void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node);
  72. #endif