spectrum_cnt.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/bitops.h>
  5. #include "spectrum_cnt.h"
  6. #define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
  7. struct mlxsw_sp_counter_sub_pool {
  8. unsigned int base_index;
  9. unsigned int size;
  10. unsigned int entry_size;
  11. unsigned int bank_count;
  12. };
  13. struct mlxsw_sp_counter_pool {
  14. unsigned int pool_size;
  15. unsigned long *usage; /* Usage bitmap */
  16. struct mlxsw_sp_counter_sub_pool *sub_pools;
  17. };
  18. static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
  19. [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
  20. .bank_count = 6,
  21. },
  22. [MLXSW_SP_COUNTER_SUB_POOL_RIF] = {
  23. .bank_count = 2,
  24. }
  25. };
  26. static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
  27. {
  28. unsigned int total_bank_config = 0;
  29. unsigned int pool_size;
  30. int i;
  31. pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
  32. /* Check config is valid, no bank over subscription */
  33. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
  34. total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
  35. if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
  36. return -EINVAL;
  37. return 0;
  38. }
  39. static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
  40. {
  41. struct mlxsw_sp_counter_sub_pool *sub_pool;
  42. /* Prepare generic flow pool*/
  43. sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
  44. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
  45. return -EIO;
  46. sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  47. COUNTER_SIZE_PACKETS_BYTES);
  48. /* Prepare erif pool*/
  49. sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
  50. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
  51. return -EIO;
  52. sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  53. COUNTER_SIZE_ROUTER_BASIC);
  54. return 0;
  55. }
  56. int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
  57. {
  58. struct mlxsw_sp_counter_sub_pool *sub_pool;
  59. struct mlxsw_sp_counter_pool *pool;
  60. unsigned int base_index;
  61. unsigned int map_size;
  62. int i;
  63. int err;
  64. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
  65. return -EIO;
  66. err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
  67. if (err)
  68. return err;
  69. err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
  70. if (err)
  71. return err;
  72. pool = kzalloc(sizeof(*pool), GFP_KERNEL);
  73. if (!pool)
  74. return -ENOMEM;
  75. pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
  76. map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
  77. pool->usage = kzalloc(map_size, GFP_KERNEL);
  78. if (!pool->usage) {
  79. err = -ENOMEM;
  80. goto err_usage_alloc;
  81. }
  82. pool->sub_pools = mlxsw_sp_counter_sub_pools;
  83. /* Allocation is based on bank count which should be
  84. * specified for each sub pool statically.
  85. */
  86. base_index = 0;
  87. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
  88. sub_pool = &pool->sub_pools[i];
  89. sub_pool->size = sub_pool->bank_count *
  90. MLXSW_SP_COUNTER_POOL_BANK_SIZE;
  91. sub_pool->base_index = base_index;
  92. base_index += sub_pool->size;
  93. /* The last bank can't be fully used */
  94. if (sub_pool->base_index + sub_pool->size > pool->pool_size)
  95. sub_pool->size = pool->pool_size - sub_pool->base_index;
  96. }
  97. mlxsw_sp->counter_pool = pool;
  98. return 0;
  99. err_usage_alloc:
  100. kfree(pool);
  101. return err;
  102. }
  103. void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
  104. {
  105. struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
  106. WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
  107. pool->pool_size);
  108. kfree(pool->usage);
  109. kfree(pool);
  110. }
  111. int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
  112. enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
  113. unsigned int *p_counter_index)
  114. {
  115. struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
  116. struct mlxsw_sp_counter_sub_pool *sub_pool;
  117. unsigned int entry_index;
  118. unsigned int stop_index;
  119. int i;
  120. sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
  121. stop_index = sub_pool->base_index + sub_pool->size;
  122. entry_index = sub_pool->base_index;
  123. entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
  124. if (entry_index == stop_index)
  125. return -ENOBUFS;
  126. /* The sub-pools can contain non-integer number of entries
  127. * so we must check for overflow
  128. */
  129. if (entry_index + sub_pool->entry_size > stop_index)
  130. return -ENOBUFS;
  131. for (i = 0; i < sub_pool->entry_size; i++)
  132. __set_bit(entry_index + i, pool->usage);
  133. *p_counter_index = entry_index;
  134. return 0;
  135. }
  136. void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
  137. enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
  138. unsigned int counter_index)
  139. {
  140. struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
  141. struct mlxsw_sp_counter_sub_pool *sub_pool;
  142. int i;
  143. if (WARN_ON(counter_index >= pool->pool_size))
  144. return;
  145. sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
  146. for (i = 0; i < sub_pool->entry_size; i++)
  147. __clear_bit(counter_index + i, pool->usage);
  148. }