hash.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_HASH_H
  3. #define _ASM_HASH_H
  4. /*
  5. * HP-PA only implements integer multiply in the FPU. However, for
  6. * integer multiplies by constant, it has a number of shift-and-add
  7. * (but no shift-and-subtract, sigh!) instructions that a compiler
  8. * can synthesize a code sequence with.
  9. *
  10. * Unfortunately, GCC isn't very efficient at using them. For example
  11. * it uses three instructions for "x *= 21" when only two are needed.
  12. * But we can find a sequence manually.
  13. */
  14. #define HAVE_ARCH__HASH_32 1
  15. /*
  16. * This is a multiply by GOLDEN_RATIO_32 = 0x61C88647 optimized for the
  17. * PA7100 pairing rules. This is an in-order 2-way superscalar processor.
  18. * Only one instruction in a pair may be a shift (by more than 3 bits),
  19. * but other than that, simple ALU ops (including shift-and-add by up
  20. * to 3 bits) may be paired arbitrarily.
  21. *
  22. * PA8xxx processors also dual-issue ALU instructions, although with
  23. * fewer constraints, so this schedule is good for them, too.
  24. *
  25. * This 6-step sequence was found by Yevgen Voronenko's implementation
  26. * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
  27. */
  28. static inline u32 __attribute_const__ __hash_32(u32 x)
  29. {
  30. u32 a, b, c;
  31. /*
  32. * Phase 1: Compute a = (x << 19) + x,
  33. * b = (x << 9) + a, c = (x << 23) + b.
  34. */
  35. a = x << 19; /* Two shifts can't be paired */
  36. b = x << 9; a += x;
  37. c = x << 23; b += a;
  38. c += b;
  39. /* Phase 2: Return (b<<11) + (c<<6) + (a<<3) - c */
  40. b <<= 11;
  41. a += c << 3; b -= c;
  42. return (a << 3) + b;
  43. }
  44. #if BITS_PER_LONG == 64
  45. #define HAVE_ARCH_HASH_64 1
  46. /*
  47. * Finding a good shift-and-add chain for GOLDEN_RATIO_64 is tricky,
  48. * because available software for the purpose chokes on constants this
  49. * large. (It's mostly designed for compiling FIR filter coefficients
  50. * into FPGAs.)
  51. *
  52. * However, Jason Thong pointed out a work-around. The Hcub software
  53. * (http://spiral.ece.cmu.edu/mcm/gen.html) is designed for *multiple*
  54. * constant multiplication, and is good at finding shift-and-add chains
  55. * which share common terms.
  56. *
  57. * Looking at 0x0x61C8864680B583EB in binary:
  58. * 0110000111001000100001100100011010000000101101011000001111101011
  59. * \______________/ \__________/ \_______/ \________/
  60. * \____________________________/ \____________________/
  61. * you can see the non-zero bits are divided into several well-separated
  62. * blocks. Hcub can find algorithms for those terms separately, which
  63. * can then be shifted and added together.
  64. *
  65. * Dividing the input into 2, 3 or 4 blocks, Hcub can find solutions
  66. * with 10, 9 or 8 adds, respectively, making a total of 11 for the
  67. * whole number.
  68. *
  69. * Using just two large blocks, 0xC3910C8D << 31 in the high bits,
  70. * and 0xB583EB in the low bits, produces as good an algorithm as any,
  71. * and with one more small shift than alternatives.
  72. *
  73. * The high bits are a larger number and more work to compute, as well
  74. * as needing one extra cycle to shift left 31 bits before the final
  75. * addition, so they are the critical path for scheduling. The low bits
  76. * can fit into the scheduling slots left over.
  77. */
  78. /*
  79. * This _ASSIGN(dst, src) macro performs "dst = src", but prevents GCC
  80. * from inferring anything about the value assigned to "dest".
  81. *
  82. * This prevents it from mis-optimizing certain sequences.
  83. * In particular, gcc is annoyingly eager to combine consecutive shifts.
  84. * Given "x <<= 19; y += x; z += x << 1;", GCC will turn this into
  85. * "y += x << 19; z += x << 20;" even though the latter sequence needs
  86. * an additional instruction and temporary register.
  87. *
  88. * Because no actual assembly code is generated, this construct is
  89. * usefully portable across all GCC platforms, and so can be test-compiled
  90. * on non-PA systems.
  91. *
  92. * In two places, additional unused input dependencies are added. This
  93. * forces GCC's scheduling so it does not rearrange instructions too much.
  94. * Because the PA-8xxx is out of order, I'm not sure how much this matters,
  95. * but why make it more difficult for the processor than necessary?
  96. */
  97. #define _ASSIGN(dst, src, ...) asm("" : "=r" (dst) : "0" (src), ##__VA_ARGS__)
  98. /*
  99. * Multiply by GOLDEN_RATIO_64 = 0x0x61C8864680B583EB using a heavily
  100. * optimized shift-and-add sequence.
  101. *
  102. * Without the final shift, the multiply proper is 19 instructions,
  103. * 10 cycles and uses only 4 temporaries. Whew!
  104. *
  105. * You are not expected to understand this.
  106. */
  107. static __always_inline u32 __attribute_const__
  108. hash_64(u64 a, unsigned int bits)
  109. {
  110. u64 b, c, d;
  111. /*
  112. * Encourage GCC to move a dynamic shift to %sar early,
  113. * thereby freeing up an additional temporary register.
  114. */
  115. if (!__builtin_constant_p(bits))
  116. asm("" : "=q" (bits) : "0" (64 - bits));
  117. else
  118. bits = 64 - bits;
  119. _ASSIGN(b, a*5); c = a << 13;
  120. b = (b << 2) + a; _ASSIGN(d, a << 17);
  121. a = b + (a << 1); c += d;
  122. d = a << 10; _ASSIGN(a, a << 19);
  123. d = a - d; _ASSIGN(a, a << 4, "X" (d));
  124. c += b; a += b;
  125. d -= c; c += a << 1;
  126. a += c << 3; _ASSIGN(b, b << (7+31), "X" (c), "X" (d));
  127. a <<= 31; b += d;
  128. a += b;
  129. return a >> bits;
  130. }
  131. #undef _ASSIGN /* We're a widely-used header file, so don't litter! */
  132. #endif /* BITS_PER_LONG == 64 */
  133. #endif /* _ASM_HASH_H */