bpf_jit_asm64.S 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /*
  2. * bpf_jit_asm64.S: Packet/header access helper functions
  3. * for PPC64 BPF compiler.
  4. *
  5. * Copyright 2016, Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  6. * IBM Corporation
  7. *
  8. * Based on bpf_jit_asm.S by Matt Evans
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; version 2
  13. * of the License.
  14. */
  15. #include <asm/ppc_asm.h>
  16. #include <asm/ptrace.h>
  17. #include "bpf_jit64.h"
  18. /*
  19. * All of these routines are called directly from generated code,
  20. * with the below register usage:
  21. * r27 skb pointer (ctx)
  22. * r25 skb header length
  23. * r26 skb->data pointer
  24. * r4 offset
  25. *
  26. * Result is passed back in:
  27. * r8 data read in host endian format (accumulator)
  28. *
  29. * r9 is used as a temporary register
  30. */
  31. #define r_skb r27
  32. #define r_hlen r25
  33. #define r_data r26
  34. #define r_off r4
  35. #define r_val r8
  36. #define r_tmp r9
  37. _GLOBAL_TOC(sk_load_word)
  38. cmpdi r_off, 0
  39. blt bpf_slow_path_word_neg
  40. b sk_load_word_positive_offset
  41. _GLOBAL_TOC(sk_load_word_positive_offset)
  42. /* Are we accessing past headlen? */
  43. subi r_tmp, r_hlen, 4
  44. cmpd r_tmp, r_off
  45. blt bpf_slow_path_word
  46. /* Nope, just hitting the header. cr0 here is eq or gt! */
  47. LWZX_BE r_val, r_data, r_off
  48. blr /* Return success, cr0 != LT */
  49. _GLOBAL_TOC(sk_load_half)
  50. cmpdi r_off, 0
  51. blt bpf_slow_path_half_neg
  52. b sk_load_half_positive_offset
  53. _GLOBAL_TOC(sk_load_half_positive_offset)
  54. subi r_tmp, r_hlen, 2
  55. cmpd r_tmp, r_off
  56. blt bpf_slow_path_half
  57. LHZX_BE r_val, r_data, r_off
  58. blr
  59. _GLOBAL_TOC(sk_load_byte)
  60. cmpdi r_off, 0
  61. blt bpf_slow_path_byte_neg
  62. b sk_load_byte_positive_offset
  63. _GLOBAL_TOC(sk_load_byte_positive_offset)
  64. cmpd r_hlen, r_off
  65. ble bpf_slow_path_byte
  66. lbzx r_val, r_data, r_off
  67. blr
  68. /*
  69. * Call out to skb_copy_bits:
  70. * Allocate a new stack frame here to remain ABI-compliant in
  71. * stashing LR.
  72. */
  73. #define bpf_slow_path_common(SIZE) \
  74. mflr r0; \
  75. std r0, PPC_LR_STKOFF(r1); \
  76. stdu r1, -(STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS)(r1); \
  77. mr r3, r_skb; \
  78. /* r4 = r_off as passed */ \
  79. addi r5, r1, STACK_FRAME_MIN_SIZE; \
  80. li r6, SIZE; \
  81. bl skb_copy_bits; \
  82. nop; \
  83. /* save r5 */ \
  84. addi r5, r1, STACK_FRAME_MIN_SIZE; \
  85. /* r3 = 0 on success */ \
  86. addi r1, r1, STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS; \
  87. ld r0, PPC_LR_STKOFF(r1); \
  88. mtlr r0; \
  89. cmpdi r3, 0; \
  90. blt bpf_error; /* cr0 = LT */
  91. bpf_slow_path_word:
  92. bpf_slow_path_common(4)
  93. /* Data value is on stack, and cr0 != LT */
  94. LWZX_BE r_val, 0, r5
  95. blr
  96. bpf_slow_path_half:
  97. bpf_slow_path_common(2)
  98. LHZX_BE r_val, 0, r5
  99. blr
  100. bpf_slow_path_byte:
  101. bpf_slow_path_common(1)
  102. lbzx r_val, 0, r5
  103. blr
  104. /*
  105. * Call out to bpf_internal_load_pointer_neg_helper
  106. */
  107. #define sk_negative_common(SIZE) \
  108. mflr r0; \
  109. std r0, PPC_LR_STKOFF(r1); \
  110. stdu r1, -STACK_FRAME_MIN_SIZE(r1); \
  111. mr r3, r_skb; \
  112. /* r4 = r_off, as passed */ \
  113. li r5, SIZE; \
  114. bl bpf_internal_load_pointer_neg_helper; \
  115. nop; \
  116. addi r1, r1, STACK_FRAME_MIN_SIZE; \
  117. ld r0, PPC_LR_STKOFF(r1); \
  118. mtlr r0; \
  119. /* R3 != 0 on success */ \
  120. cmpldi r3, 0; \
  121. beq bpf_error_slow; /* cr0 = EQ */
  122. bpf_slow_path_word_neg:
  123. lis r_tmp, -32 /* SKF_LL_OFF */
  124. cmpd r_off, r_tmp /* addr < SKF_* */
  125. blt bpf_error /* cr0 = LT */
  126. b sk_load_word_negative_offset
  127. _GLOBAL_TOC(sk_load_word_negative_offset)
  128. sk_negative_common(4)
  129. LWZX_BE r_val, 0, r3
  130. blr
  131. bpf_slow_path_half_neg:
  132. lis r_tmp, -32 /* SKF_LL_OFF */
  133. cmpd r_off, r_tmp /* addr < SKF_* */
  134. blt bpf_error /* cr0 = LT */
  135. b sk_load_half_negative_offset
  136. _GLOBAL_TOC(sk_load_half_negative_offset)
  137. sk_negative_common(2)
  138. LHZX_BE r_val, 0, r3
  139. blr
  140. bpf_slow_path_byte_neg:
  141. lis r_tmp, -32 /* SKF_LL_OFF */
  142. cmpd r_off, r_tmp /* addr < SKF_* */
  143. blt bpf_error /* cr0 = LT */
  144. b sk_load_byte_negative_offset
  145. _GLOBAL_TOC(sk_load_byte_negative_offset)
  146. sk_negative_common(1)
  147. lbzx r_val, 0, r3
  148. blr
  149. bpf_error_slow:
  150. /* fabricate a cr0 = lt */
  151. li r_tmp, -1
  152. cmpdi r_tmp, 0
  153. bpf_error:
  154. /*
  155. * Entered with cr0 = lt
  156. * Generated code will 'blt epilogue', returning 0.
  157. */
  158. li r_val, 0
  159. blr