reg_norm.S 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*---------------------------------------------------------------------------+
  3. | reg_norm.S |
  4. | |
  5. | Copyright (C) 1992,1993,1994,1995,1997 |
  6. | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
  7. | Australia. E-mail billm@suburbia.net |
  8. | |
  9. | Normalize the value in a FPU_REG. |
  10. | |
  11. | Call from C as: |
  12. | int FPU_normalize(FPU_REG *n) |
  13. | |
  14. | int FPU_normalize_nuo(FPU_REG *n) |
  15. | |
  16. | Return value is the tag of the answer, or-ed with FPU_Exception if |
  17. | one was raised, or -1 on internal error. |
  18. | |
  19. +---------------------------------------------------------------------------*/
  20. #include "fpu_emu.h"
  21. .text
  22. ENTRY(FPU_normalize)
  23. pushl %ebp
  24. movl %esp,%ebp
  25. pushl %ebx
  26. movl PARAM1,%ebx
  27. movl SIGH(%ebx),%edx
  28. movl SIGL(%ebx),%eax
  29. orl %edx,%edx /* ms bits */
  30. js L_done /* Already normalized */
  31. jnz L_shift_1 /* Shift left 1 - 31 bits */
  32. orl %eax,%eax
  33. jz L_zero /* The contents are zero */
  34. movl %eax,%edx
  35. xorl %eax,%eax
  36. subw $32,EXP(%ebx) /* This can cause an underflow */
  37. /* We need to shift left by 1 - 31 bits */
  38. L_shift_1:
  39. bsrl %edx,%ecx /* get the required shift in %ecx */
  40. subl $31,%ecx
  41. negl %ecx
  42. shld %cl,%eax,%edx
  43. shl %cl,%eax
  44. subw %cx,EXP(%ebx) /* This can cause an underflow */
  45. movl %edx,SIGH(%ebx)
  46. movl %eax,SIGL(%ebx)
  47. L_done:
  48. cmpw EXP_OVER,EXP(%ebx)
  49. jge L_overflow
  50. cmpw EXP_UNDER,EXP(%ebx)
  51. jle L_underflow
  52. L_exit_valid:
  53. movl TAG_Valid,%eax
  54. /* Convert the exponent to 80x87 form. */
  55. addw EXTENDED_Ebias,EXP(%ebx)
  56. andw $0x7fff,EXP(%ebx)
  57. L_exit:
  58. popl %ebx
  59. leave
  60. ret
  61. L_zero:
  62. movw $0,EXP(%ebx)
  63. movl TAG_Zero,%eax
  64. jmp L_exit
  65. L_underflow:
  66. /* Convert the exponent to 80x87 form. */
  67. addw EXTENDED_Ebias,EXP(%ebx)
  68. push %ebx
  69. call arith_underflow
  70. pop %ebx
  71. jmp L_exit
  72. L_overflow:
  73. /* Convert the exponent to 80x87 form. */
  74. addw EXTENDED_Ebias,EXP(%ebx)
  75. push %ebx
  76. call arith_overflow
  77. pop %ebx
  78. jmp L_exit
  79. ENDPROC(FPU_normalize)
  80. /* Normalise without reporting underflow or overflow */
  81. ENTRY(FPU_normalize_nuo)
  82. pushl %ebp
  83. movl %esp,%ebp
  84. pushl %ebx
  85. movl PARAM1,%ebx
  86. movl SIGH(%ebx),%edx
  87. movl SIGL(%ebx),%eax
  88. orl %edx,%edx /* ms bits */
  89. js L_exit_nuo_valid /* Already normalized */
  90. jnz L_nuo_shift_1 /* Shift left 1 - 31 bits */
  91. orl %eax,%eax
  92. jz L_exit_nuo_zero /* The contents are zero */
  93. movl %eax,%edx
  94. xorl %eax,%eax
  95. subw $32,EXP(%ebx) /* This can cause an underflow */
  96. /* We need to shift left by 1 - 31 bits */
  97. L_nuo_shift_1:
  98. bsrl %edx,%ecx /* get the required shift in %ecx */
  99. subl $31,%ecx
  100. negl %ecx
  101. shld %cl,%eax,%edx
  102. shl %cl,%eax
  103. subw %cx,EXP(%ebx) /* This can cause an underflow */
  104. movl %edx,SIGH(%ebx)
  105. movl %eax,SIGL(%ebx)
  106. L_exit_nuo_valid:
  107. movl TAG_Valid,%eax
  108. popl %ebx
  109. leave
  110. ret
  111. L_exit_nuo_zero:
  112. movl TAG_Zero,%eax
  113. movw EXP_UNDER,EXP(%ebx)
  114. popl %ebx
  115. leave
  116. ret
  117. ENDPROC(FPU_normalize_nuo)