round_Xsig.S 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*---------------------------------------------------------------------------+
  3. | round_Xsig.S |
  4. | |
  5. | Copyright (C) 1992,1993,1994,1995 |
  6. | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
  7. | Australia. E-mail billm@jacobi.maths.monash.edu.au |
  8. | |
  9. | Normalize and round a 12 byte quantity. |
  10. | Call from C as: |
  11. | int round_Xsig(Xsig *n) |
  12. | |
  13. | Normalize a 12 byte quantity. |
  14. | Call from C as: |
  15. | int norm_Xsig(Xsig *n) |
  16. | |
  17. | Each function returns the size of the shift (nr of bits). |
  18. | |
  19. +---------------------------------------------------------------------------*/
  20. .file "round_Xsig.S"
  21. #include "fpu_emu.h"
  22. .text
  23. ENTRY(round_Xsig)
  24. pushl %ebp
  25. movl %esp,%ebp
  26. pushl %ebx /* Reserve some space */
  27. pushl %ebx
  28. pushl %esi
  29. movl PARAM1,%esi
  30. movl 8(%esi),%edx
  31. movl 4(%esi),%ebx
  32. movl (%esi),%eax
  33. movl $0,-4(%ebp)
  34. orl %edx,%edx /* ms bits */
  35. js L_round /* Already normalized */
  36. jnz L_shift_1 /* Shift left 1 - 31 bits */
  37. movl %ebx,%edx
  38. movl %eax,%ebx
  39. xorl %eax,%eax
  40. movl $-32,-4(%ebp)
  41. /* We need to shift left by 1 - 31 bits */
  42. L_shift_1:
  43. bsrl %edx,%ecx /* get the required shift in %ecx */
  44. subl $31,%ecx
  45. negl %ecx
  46. subl %ecx,-4(%ebp)
  47. shld %cl,%ebx,%edx
  48. shld %cl,%eax,%ebx
  49. shl %cl,%eax
  50. L_round:
  51. testl $0x80000000,%eax
  52. jz L_exit
  53. addl $1,%ebx
  54. adcl $0,%edx
  55. jnz L_exit
  56. movl $0x80000000,%edx
  57. incl -4(%ebp)
  58. L_exit:
  59. movl %edx,8(%esi)
  60. movl %ebx,4(%esi)
  61. movl %eax,(%esi)
  62. movl -4(%ebp),%eax
  63. popl %esi
  64. popl %ebx
  65. leave
  66. ret
  67. ENDPROC(round_Xsig)
  68. ENTRY(norm_Xsig)
  69. pushl %ebp
  70. movl %esp,%ebp
  71. pushl %ebx /* Reserve some space */
  72. pushl %ebx
  73. pushl %esi
  74. movl PARAM1,%esi
  75. movl 8(%esi),%edx
  76. movl 4(%esi),%ebx
  77. movl (%esi),%eax
  78. movl $0,-4(%ebp)
  79. orl %edx,%edx /* ms bits */
  80. js L_n_exit /* Already normalized */
  81. jnz L_n_shift_1 /* Shift left 1 - 31 bits */
  82. movl %ebx,%edx
  83. movl %eax,%ebx
  84. xorl %eax,%eax
  85. movl $-32,-4(%ebp)
  86. orl %edx,%edx /* ms bits */
  87. js L_n_exit /* Normalized now */
  88. jnz L_n_shift_1 /* Shift left 1 - 31 bits */
  89. movl %ebx,%edx
  90. movl %eax,%ebx
  91. xorl %eax,%eax
  92. addl $-32,-4(%ebp)
  93. jmp L_n_exit /* Might not be normalized,
  94. but shift no more. */
  95. /* We need to shift left by 1 - 31 bits */
  96. L_n_shift_1:
  97. bsrl %edx,%ecx /* get the required shift in %ecx */
  98. subl $31,%ecx
  99. negl %ecx
  100. subl %ecx,-4(%ebp)
  101. shld %cl,%ebx,%edx
  102. shld %cl,%eax,%ebx
  103. shl %cl,%eax
  104. L_n_exit:
  105. movl %edx,8(%esi)
  106. movl %ebx,4(%esi)
  107. movl %eax,(%esi)
  108. movl -4(%ebp),%eax
  109. popl %esi
  110. popl %ebx
  111. leave
  112. ret
  113. ENDPROC(norm_Xsig)