JITArithmetic32_64.cpp 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192
  1. /*
  2. * Copyright (C) 2008 Apple Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  16. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
  17. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  18. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  19. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  20. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
  21. * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  23. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #include "config.h"
  26. #if ENABLE(JIT)
  27. #if USE(JSVALUE32_64)
  28. #include "JIT.h"
  29. #include "CodeBlock.h"
  30. #include "JITInlines.h"
  31. #include "JITStubCall.h"
  32. #include "JITStubs.h"
  33. #include "JSArray.h"
  34. #include "JSFunction.h"
  35. #include "Interpreter.h"
  36. #include "Operations.h"
  37. #include "ResultType.h"
  38. #include "SamplingTool.h"
  39. #ifndef NDEBUG
  40. #include <stdio.h>
  41. #endif
  42. using namespace std;
  43. namespace JSC {
  44. void JIT::emit_op_negate(Instruction* currentInstruction)
  45. {
  46. unsigned dst = currentInstruction[1].u.operand;
  47. unsigned src = currentInstruction[2].u.operand;
  48. emitLoad(src, regT1, regT0);
  49. Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
  50. addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
  51. neg32(regT0);
  52. emitStoreInt32(dst, regT0, (dst == src));
  53. Jump end = jump();
  54. srcNotInt.link(this);
  55. addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
  56. xor32(TrustedImm32(1 << 31), regT1);
  57. store32(regT1, tagFor(dst));
  58. if (dst != src)
  59. store32(regT0, payloadFor(dst));
  60. end.link(this);
  61. }
  62. void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  63. {
  64. unsigned dst = currentInstruction[1].u.operand;
  65. linkSlowCase(iter); // 0x7fffffff check
  66. linkSlowCase(iter); // double check
  67. JITStubCall stubCall(this, cti_op_negate);
  68. stubCall.addArgument(regT1, regT0);
  69. stubCall.call(dst);
  70. }
  71. void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
  72. {
  73. JumpList notInt32Op1;
  74. JumpList notInt32Op2;
  75. // Character less.
  76. if (isOperandConstantImmediateChar(op1)) {
  77. emitLoad(op2, regT1, regT0);
  78. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
  79. JumpList failures;
  80. emitLoadCharacterString(regT0, regT0, failures);
  81. addSlowCase(failures);
  82. addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
  83. return;
  84. }
  85. if (isOperandConstantImmediateChar(op2)) {
  86. emitLoad(op1, regT1, regT0);
  87. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
  88. JumpList failures;
  89. emitLoadCharacterString(regT0, regT0, failures);
  90. addSlowCase(failures);
  91. addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
  92. return;
  93. }
  94. if (isOperandConstantImmediateInt(op1)) {
  95. emitLoad(op2, regT3, regT2);
  96. notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  97. addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
  98. } else if (isOperandConstantImmediateInt(op2)) {
  99. emitLoad(op1, regT1, regT0);
  100. notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  101. addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
  102. } else {
  103. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  104. notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  105. notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  106. addJump(branch32(condition, regT0, regT2), target);
  107. }
  108. if (!supportsFloatingPoint()) {
  109. addSlowCase(notInt32Op1);
  110. addSlowCase(notInt32Op2);
  111. return;
  112. }
  113. Jump end = jump();
  114. // Double less.
  115. emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
  116. end.link(this);
  117. }
  118. void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
  119. {
  120. if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
  121. linkSlowCase(iter);
  122. linkSlowCase(iter);
  123. linkSlowCase(iter);
  124. linkSlowCase(iter);
  125. } else {
  126. if (!supportsFloatingPoint()) {
  127. if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
  128. linkSlowCase(iter); // int32 check
  129. linkSlowCase(iter); // int32 check
  130. } else {
  131. if (!isOperandConstantImmediateInt(op1)) {
  132. linkSlowCase(iter); // double check
  133. linkSlowCase(iter); // int32 check
  134. }
  135. if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
  136. linkSlowCase(iter); // double check
  137. }
  138. }
  139. JITStubCall stubCall(this, stub);
  140. stubCall.addArgument(op1);
  141. stubCall.addArgument(op2);
  142. stubCall.call();
  143. emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
  144. }
  145. // LeftShift (<<)
  146. void JIT::emit_op_lshift(Instruction* currentInstruction)
  147. {
  148. unsigned dst = currentInstruction[1].u.operand;
  149. unsigned op1 = currentInstruction[2].u.operand;
  150. unsigned op2 = currentInstruction[3].u.operand;
  151. if (isOperandConstantImmediateInt(op2)) {
  152. emitLoad(op1, regT1, regT0);
  153. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  154. lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
  155. emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
  156. return;
  157. }
  158. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  159. if (!isOperandConstantImmediateInt(op1))
  160. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  161. addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  162. lshift32(regT2, regT0);
  163. emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
  164. }
  165. void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  166. {
  167. unsigned dst = currentInstruction[1].u.operand;
  168. unsigned op1 = currentInstruction[2].u.operand;
  169. unsigned op2 = currentInstruction[3].u.operand;
  170. if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
  171. linkSlowCase(iter); // int32 check
  172. linkSlowCase(iter); // int32 check
  173. JITStubCall stubCall(this, cti_op_lshift);
  174. stubCall.addArgument(op1);
  175. stubCall.addArgument(op2);
  176. stubCall.call(dst);
  177. }
  178. // RightShift (>>) and UnsignedRightShift (>>>) helper
  179. void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
  180. {
  181. unsigned dst = currentInstruction[1].u.operand;
  182. unsigned op1 = currentInstruction[2].u.operand;
  183. unsigned op2 = currentInstruction[3].u.operand;
  184. // Slow case of rshift makes assumptions about what registers hold the
  185. // shift arguments, so any changes must be updated there as well.
  186. if (isOperandConstantImmediateInt(op2)) {
  187. emitLoad(op1, regT1, regT0);
  188. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  189. int shift = getConstantOperand(op2).asInt32() & 0x1f;
  190. if (shift) {
  191. if (isUnsigned)
  192. urshift32(Imm32(shift), regT0);
  193. else
  194. rshift32(Imm32(shift), regT0);
  195. } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
  196. addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
  197. emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
  198. } else {
  199. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  200. if (!isOperandConstantImmediateInt(op1))
  201. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  202. addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  203. if (isUnsigned) {
  204. urshift32(regT2, regT0);
  205. addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
  206. } else
  207. rshift32(regT2, regT0);
  208. emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
  209. }
  210. }
  211. void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
  212. {
  213. unsigned dst = currentInstruction[1].u.operand;
  214. unsigned op1 = currentInstruction[2].u.operand;
  215. unsigned op2 = currentInstruction[3].u.operand;
  216. if (isOperandConstantImmediateInt(op2)) {
  217. int shift = getConstantOperand(op2).asInt32() & 0x1f;
  218. // op1 = regT1:regT0
  219. linkSlowCase(iter); // int32 check
  220. if (supportsFloatingPointTruncate()) {
  221. JumpList failures;
  222. failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
  223. emitLoadDouble(op1, fpRegT0);
  224. failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
  225. if (shift) {
  226. if (isUnsigned)
  227. urshift32(Imm32(shift), regT0);
  228. else
  229. rshift32(Imm32(shift), regT0);
  230. } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
  231. failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
  232. move(TrustedImm32(JSValue::Int32Tag), regT1);
  233. emitStoreInt32(dst, regT0, false);
  234. emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
  235. failures.link(this);
  236. }
  237. if (isUnsigned && !shift)
  238. linkSlowCase(iter); // failed to box in hot path
  239. } else {
  240. // op1 = regT1:regT0
  241. // op2 = regT3:regT2
  242. if (!isOperandConstantImmediateInt(op1)) {
  243. linkSlowCase(iter); // int32 check -- op1 is not an int
  244. if (supportsFloatingPointTruncate()) {
  245. JumpList failures;
  246. failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
  247. emitLoadDouble(op1, fpRegT0);
  248. failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
  249. failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
  250. if (isUnsigned) {
  251. urshift32(regT2, regT0);
  252. failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
  253. } else
  254. rshift32(regT2, regT0);
  255. move(TrustedImm32(JSValue::Int32Tag), regT1);
  256. emitStoreInt32(dst, regT0, false);
  257. emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
  258. failures.link(this);
  259. }
  260. }
  261. linkSlowCase(iter); // int32 check - op2 is not an int
  262. if (isUnsigned)
  263. linkSlowCase(iter); // Can't represent unsigned result as an immediate
  264. }
  265. JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
  266. stubCall.addArgument(op1);
  267. stubCall.addArgument(op2);
  268. stubCall.call(dst);
  269. }
  270. // RightShift (>>)
  271. void JIT::emit_op_rshift(Instruction* currentInstruction)
  272. {
  273. emitRightShift(currentInstruction, false);
  274. }
  275. void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  276. {
  277. emitRightShiftSlowCase(currentInstruction, iter, false);
  278. }
  279. // UnsignedRightShift (>>>)
  280. void JIT::emit_op_urshift(Instruction* currentInstruction)
  281. {
  282. emitRightShift(currentInstruction, true);
  283. }
  284. void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  285. {
  286. emitRightShiftSlowCase(currentInstruction, iter, true);
  287. }
  288. // BitAnd (&)
  289. void JIT::emit_op_bitand(Instruction* currentInstruction)
  290. {
  291. unsigned dst = currentInstruction[1].u.operand;
  292. unsigned op1 = currentInstruction[2].u.operand;
  293. unsigned op2 = currentInstruction[3].u.operand;
  294. unsigned op;
  295. int32_t constant;
  296. if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
  297. emitLoad(op, regT1, regT0);
  298. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  299. and32(Imm32(constant), regT0);
  300. emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
  301. return;
  302. }
  303. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  304. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  305. addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  306. and32(regT2, regT0);
  307. emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
  308. }
  309. void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  310. {
  311. unsigned dst = currentInstruction[1].u.operand;
  312. unsigned op1 = currentInstruction[2].u.operand;
  313. unsigned op2 = currentInstruction[3].u.operand;
  314. if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
  315. linkSlowCase(iter); // int32 check
  316. linkSlowCase(iter); // int32 check
  317. JITStubCall stubCall(this, cti_op_bitand);
  318. stubCall.addArgument(op1);
  319. stubCall.addArgument(op2);
  320. stubCall.call(dst);
  321. }
  322. // BitOr (|)
  323. void JIT::emit_op_bitor(Instruction* currentInstruction)
  324. {
  325. unsigned dst = currentInstruction[1].u.operand;
  326. unsigned op1 = currentInstruction[2].u.operand;
  327. unsigned op2 = currentInstruction[3].u.operand;
  328. unsigned op;
  329. int32_t constant;
  330. if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
  331. emitLoad(op, regT1, regT0);
  332. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  333. or32(Imm32(constant), regT0);
  334. emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
  335. return;
  336. }
  337. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  338. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  339. addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  340. or32(regT2, regT0);
  341. emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
  342. }
  343. void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  344. {
  345. unsigned dst = currentInstruction[1].u.operand;
  346. unsigned op1 = currentInstruction[2].u.operand;
  347. unsigned op2 = currentInstruction[3].u.operand;
  348. if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
  349. linkSlowCase(iter); // int32 check
  350. linkSlowCase(iter); // int32 check
  351. JITStubCall stubCall(this, cti_op_bitor);
  352. stubCall.addArgument(op1);
  353. stubCall.addArgument(op2);
  354. stubCall.call(dst);
  355. }
  356. // BitXor (^)
  357. void JIT::emit_op_bitxor(Instruction* currentInstruction)
  358. {
  359. unsigned dst = currentInstruction[1].u.operand;
  360. unsigned op1 = currentInstruction[2].u.operand;
  361. unsigned op2 = currentInstruction[3].u.operand;
  362. unsigned op;
  363. int32_t constant;
  364. if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
  365. emitLoad(op, regT1, regT0);
  366. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  367. xor32(Imm32(constant), regT0);
  368. emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
  369. return;
  370. }
  371. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  372. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  373. addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  374. xor32(regT2, regT0);
  375. emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
  376. }
  377. void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  378. {
  379. unsigned dst = currentInstruction[1].u.operand;
  380. unsigned op1 = currentInstruction[2].u.operand;
  381. unsigned op2 = currentInstruction[3].u.operand;
  382. if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
  383. linkSlowCase(iter); // int32 check
  384. linkSlowCase(iter); // int32 check
  385. JITStubCall stubCall(this, cti_op_bitxor);
  386. stubCall.addArgument(op1);
  387. stubCall.addArgument(op2);
  388. stubCall.call(dst);
  389. }
  390. void JIT::emit_op_inc(Instruction* currentInstruction)
  391. {
  392. unsigned srcDst = currentInstruction[1].u.operand;
  393. emitLoad(srcDst, regT1, regT0);
  394. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  395. addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
  396. emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_inc));
  397. }
  398. void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  399. {
  400. unsigned srcDst = currentInstruction[1].u.operand;
  401. linkSlowCase(iter); // int32 check
  402. linkSlowCase(iter); // overflow check
  403. JITStubCall stubCall(this, cti_op_inc);
  404. stubCall.addArgument(srcDst);
  405. stubCall.call(srcDst);
  406. }
  407. void JIT::emit_op_dec(Instruction* currentInstruction)
  408. {
  409. unsigned srcDst = currentInstruction[1].u.operand;
  410. emitLoad(srcDst, regT1, regT0);
  411. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  412. addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
  413. emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_dec));
  414. }
  415. void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  416. {
  417. unsigned srcDst = currentInstruction[1].u.operand;
  418. linkSlowCase(iter); // int32 check
  419. linkSlowCase(iter); // overflow check
  420. JITStubCall stubCall(this, cti_op_dec);
  421. stubCall.addArgument(srcDst);
  422. stubCall.call(srcDst);
  423. }
  424. // Addition (+)
  425. void JIT::emit_op_add(Instruction* currentInstruction)
  426. {
  427. unsigned dst = currentInstruction[1].u.operand;
  428. unsigned op1 = currentInstruction[2].u.operand;
  429. unsigned op2 = currentInstruction[3].u.operand;
  430. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  431. if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
  432. addSlowCase();
  433. JITStubCall stubCall(this, cti_op_add);
  434. stubCall.addArgument(op1);
  435. stubCall.addArgument(op2);
  436. stubCall.call(dst);
  437. return;
  438. }
  439. JumpList notInt32Op1;
  440. JumpList notInt32Op2;
  441. unsigned op;
  442. int32_t constant;
  443. if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
  444. emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
  445. return;
  446. }
  447. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  448. notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  449. notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  450. // Int32 case.
  451. addSlowCase(branchAdd32(Overflow, regT2, regT0));
  452. emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
  453. if (!supportsFloatingPoint()) {
  454. addSlowCase(notInt32Op1);
  455. addSlowCase(notInt32Op2);
  456. return;
  457. }
  458. Jump end = jump();
  459. // Double case.
  460. emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
  461. end.link(this);
  462. }
  463. void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
  464. {
  465. // Int32 case.
  466. emitLoad(op, regT1, regT2);
  467. Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
  468. addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
  469. emitStoreInt32(dst, regT0, (op == dst));
  470. // Double case.
  471. if (!supportsFloatingPoint()) {
  472. addSlowCase(notInt32);
  473. return;
  474. }
  475. Jump end = jump();
  476. notInt32.link(this);
  477. if (!opType.definitelyIsNumber())
  478. addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
  479. move(Imm32(constant), regT2);
  480. convertInt32ToDouble(regT2, fpRegT0);
  481. emitLoadDouble(op, fpRegT1);
  482. addDouble(fpRegT1, fpRegT0);
  483. emitStoreDouble(dst, fpRegT0);
  484. end.link(this);
  485. }
  486. void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  487. {
  488. unsigned dst = currentInstruction[1].u.operand;
  489. unsigned op1 = currentInstruction[2].u.operand;
  490. unsigned op2 = currentInstruction[3].u.operand;
  491. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  492. if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
  493. linkDummySlowCase(iter);
  494. return;
  495. }
  496. unsigned op;
  497. int32_t constant;
  498. if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
  499. linkSlowCase(iter); // overflow check
  500. if (!supportsFloatingPoint())
  501. linkSlowCase(iter); // non-sse case
  502. else {
  503. ResultType opType = op == op1 ? types.first() : types.second();
  504. if (!opType.definitelyIsNumber())
  505. linkSlowCase(iter); // double check
  506. }
  507. } else {
  508. linkSlowCase(iter); // overflow check
  509. if (!supportsFloatingPoint()) {
  510. linkSlowCase(iter); // int32 check
  511. linkSlowCase(iter); // int32 check
  512. } else {
  513. if (!types.first().definitelyIsNumber())
  514. linkSlowCase(iter); // double check
  515. if (!types.second().definitelyIsNumber()) {
  516. linkSlowCase(iter); // int32 check
  517. linkSlowCase(iter); // double check
  518. }
  519. }
  520. }
  521. JITStubCall stubCall(this, cti_op_add);
  522. stubCall.addArgument(op1);
  523. stubCall.addArgument(op2);
  524. stubCall.call(dst);
  525. }
  526. // Subtraction (-)
  527. void JIT::emit_op_sub(Instruction* currentInstruction)
  528. {
  529. unsigned dst = currentInstruction[1].u.operand;
  530. unsigned op1 = currentInstruction[2].u.operand;
  531. unsigned op2 = currentInstruction[3].u.operand;
  532. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  533. JumpList notInt32Op1;
  534. JumpList notInt32Op2;
  535. if (isOperandConstantImmediateInt(op2)) {
  536. emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
  537. return;
  538. }
  539. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  540. notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  541. notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  542. // Int32 case.
  543. addSlowCase(branchSub32(Overflow, regT2, regT0));
  544. emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
  545. if (!supportsFloatingPoint()) {
  546. addSlowCase(notInt32Op1);
  547. addSlowCase(notInt32Op2);
  548. return;
  549. }
  550. Jump end = jump();
  551. // Double case.
  552. emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
  553. end.link(this);
  554. }
  555. void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
  556. {
  557. // Int32 case.
  558. emitLoad(op, regT1, regT0);
  559. Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
  560. #if ENABLE(JIT_CONSTANT_BLINDING)
  561. addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
  562. #else
  563. addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
  564. #endif
  565. emitStoreInt32(dst, regT2, (op == dst));
  566. // Double case.
  567. if (!supportsFloatingPoint()) {
  568. addSlowCase(notInt32);
  569. return;
  570. }
  571. Jump end = jump();
  572. notInt32.link(this);
  573. if (!opType.definitelyIsNumber())
  574. addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
  575. move(Imm32(constant), regT2);
  576. convertInt32ToDouble(regT2, fpRegT0);
  577. emitLoadDouble(op, fpRegT1);
  578. subDouble(fpRegT0, fpRegT1);
  579. emitStoreDouble(dst, fpRegT1);
  580. end.link(this);
  581. }
  582. void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  583. {
  584. unsigned dst = currentInstruction[1].u.operand;
  585. unsigned op1 = currentInstruction[2].u.operand;
  586. unsigned op2 = currentInstruction[3].u.operand;
  587. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  588. if (isOperandConstantImmediateInt(op2)) {
  589. linkSlowCase(iter); // overflow check
  590. if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
  591. linkSlowCase(iter); // int32 or double check
  592. } else {
  593. linkSlowCase(iter); // overflow check
  594. if (!supportsFloatingPoint()) {
  595. linkSlowCase(iter); // int32 check
  596. linkSlowCase(iter); // int32 check
  597. } else {
  598. if (!types.first().definitelyIsNumber())
  599. linkSlowCase(iter); // double check
  600. if (!types.second().definitelyIsNumber()) {
  601. linkSlowCase(iter); // int32 check
  602. linkSlowCase(iter); // double check
  603. }
  604. }
  605. }
  606. JITStubCall stubCall(this, cti_op_sub);
  607. stubCall.addArgument(op1);
  608. stubCall.addArgument(op2);
  609. stubCall.call(dst);
  610. }
  611. void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
  612. {
  613. JumpList end;
  614. if (!notInt32Op1.empty()) {
  615. // Double case 1: Op1 is not int32; Op2 is unknown.
  616. notInt32Op1.link(this);
  617. ASSERT(op1IsInRegisters);
  618. // Verify Op1 is double.
  619. if (!types.first().definitelyIsNumber())
  620. addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
  621. if (!op2IsInRegisters)
  622. emitLoad(op2, regT3, regT2);
  623. Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
  624. if (!types.second().definitelyIsNumber())
  625. addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  626. convertInt32ToDouble(regT2, fpRegT0);
  627. Jump doTheMath = jump();
  628. // Load Op2 as double into double register.
  629. doubleOp2.link(this);
  630. emitLoadDouble(op2, fpRegT0);
  631. // Do the math.
  632. doTheMath.link(this);
  633. switch (opcodeID) {
  634. case op_mul:
  635. emitLoadDouble(op1, fpRegT2);
  636. mulDouble(fpRegT2, fpRegT0);
  637. emitStoreDouble(dst, fpRegT0);
  638. break;
  639. case op_add:
  640. emitLoadDouble(op1, fpRegT2);
  641. addDouble(fpRegT2, fpRegT0);
  642. emitStoreDouble(dst, fpRegT0);
  643. break;
  644. case op_sub:
  645. emitLoadDouble(op1, fpRegT1);
  646. subDouble(fpRegT0, fpRegT1);
  647. emitStoreDouble(dst, fpRegT1);
  648. break;
  649. case op_div: {
  650. emitLoadDouble(op1, fpRegT1);
  651. divDouble(fpRegT0, fpRegT1);
  652. #if ENABLE(VALUE_PROFILER)
  653. // Is the result actually an integer? The DFG JIT would really like to know. If it's
  654. // not an integer, we increment a count. If this together with the slow case counter
  655. // are below threshold then the DFG JIT will compile this division with a specualtion
  656. // that the remainder is zero.
  657. // As well, there are cases where a double result here would cause an important field
  658. // in the heap to sometimes have doubles in it, resulting in double predictions getting
  659. // propagated to a use site where it might cause damage (such as the index to an array
  660. // access). So if we are DFG compiling anything in the program, we want this code to
  661. // ensure that it produces integers whenever possible.
  662. // FIXME: This will fail to convert to integer if the result is zero. We should
  663. // distinguish between positive zero and negative zero here.
  664. JumpList notInteger;
  665. branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
  666. // If we've got an integer, we might as well make that the result of the division.
  667. emitStoreInt32(dst, regT2);
  668. Jump isInteger = jump();
  669. notInteger.link(this);
  670. add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
  671. emitStoreDouble(dst, fpRegT1);
  672. isInteger.link(this);
  673. #else
  674. emitStoreDouble(dst, fpRegT1);
  675. #endif
  676. break;
  677. }
  678. case op_jless:
  679. emitLoadDouble(op1, fpRegT2);
  680. addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
  681. break;
  682. case op_jlesseq:
  683. emitLoadDouble(op1, fpRegT2);
  684. addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
  685. break;
  686. case op_jgreater:
  687. emitLoadDouble(op1, fpRegT2);
  688. addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
  689. break;
  690. case op_jgreatereq:
  691. emitLoadDouble(op1, fpRegT2);
  692. addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
  693. break;
  694. case op_jnless:
  695. emitLoadDouble(op1, fpRegT2);
  696. addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
  697. break;
  698. case op_jnlesseq:
  699. emitLoadDouble(op1, fpRegT2);
  700. addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
  701. break;
  702. case op_jngreater:
  703. emitLoadDouble(op1, fpRegT2);
  704. addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
  705. break;
  706. case op_jngreatereq:
  707. emitLoadDouble(op1, fpRegT2);
  708. addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
  709. break;
  710. default:
  711. RELEASE_ASSERT_NOT_REACHED();
  712. }
  713. if (!notInt32Op2.empty())
  714. end.append(jump());
  715. }
  716. if (!notInt32Op2.empty()) {
  717. // Double case 2: Op1 is int32; Op2 is not int32.
  718. notInt32Op2.link(this);
  719. ASSERT(op2IsInRegisters);
  720. if (!op1IsInRegisters)
  721. emitLoadPayload(op1, regT0);
  722. convertInt32ToDouble(regT0, fpRegT0);
  723. // Verify op2 is double.
  724. if (!types.second().definitelyIsNumber())
  725. addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
  726. // Do the math.
  727. switch (opcodeID) {
  728. case op_mul:
  729. emitLoadDouble(op2, fpRegT2);
  730. mulDouble(fpRegT2, fpRegT0);
  731. emitStoreDouble(dst, fpRegT0);
  732. break;
  733. case op_add:
  734. emitLoadDouble(op2, fpRegT2);
  735. addDouble(fpRegT2, fpRegT0);
  736. emitStoreDouble(dst, fpRegT0);
  737. break;
  738. case op_sub:
  739. emitLoadDouble(op2, fpRegT2);
  740. subDouble(fpRegT2, fpRegT0);
  741. emitStoreDouble(dst, fpRegT0);
  742. break;
  743. case op_div: {
  744. emitLoadDouble(op2, fpRegT2);
  745. divDouble(fpRegT2, fpRegT0);
  746. #if ENABLE(VALUE_PROFILER)
  747. // Is the result actually an integer? The DFG JIT would really like to know. If it's
  748. // not an integer, we increment a count. If this together with the slow case counter
  749. // are below threshold then the DFG JIT will compile this division with a specualtion
  750. // that the remainder is zero.
  751. // As well, there are cases where a double result here would cause an important field
  752. // in the heap to sometimes have doubles in it, resulting in double predictions getting
  753. // propagated to a use site where it might cause damage (such as the index to an array
  754. // access). So if we are DFG compiling anything in the program, we want this code to
  755. // ensure that it produces integers whenever possible.
  756. // FIXME: This will fail to convert to integer if the result is zero. We should
  757. // distinguish between positive zero and negative zero here.
  758. JumpList notInteger;
  759. branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
  760. // If we've got an integer, we might as well make that the result of the division.
  761. emitStoreInt32(dst, regT2);
  762. Jump isInteger = jump();
  763. notInteger.link(this);
  764. add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
  765. emitStoreDouble(dst, fpRegT0);
  766. isInteger.link(this);
  767. #else
  768. emitStoreDouble(dst, fpRegT0);
  769. #endif
  770. break;
  771. }
  772. case op_jless:
  773. emitLoadDouble(op2, fpRegT1);
  774. addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
  775. break;
  776. case op_jlesseq:
  777. emitLoadDouble(op2, fpRegT1);
  778. addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
  779. break;
  780. case op_jgreater:
  781. emitLoadDouble(op2, fpRegT1);
  782. addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
  783. break;
  784. case op_jgreatereq:
  785. emitLoadDouble(op2, fpRegT1);
  786. addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
  787. break;
  788. case op_jnless:
  789. emitLoadDouble(op2, fpRegT1);
  790. addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
  791. break;
  792. case op_jnlesseq:
  793. emitLoadDouble(op2, fpRegT1);
  794. addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
  795. break;
  796. case op_jngreater:
  797. emitLoadDouble(op2, fpRegT1);
  798. addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
  799. break;
  800. case op_jngreatereq:
  801. emitLoadDouble(op2, fpRegT1);
  802. addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
  803. break;
  804. default:
  805. RELEASE_ASSERT_NOT_REACHED();
  806. }
  807. }
  808. end.link(this);
  809. }
  810. // Multiplication (*)
  811. void JIT::emit_op_mul(Instruction* currentInstruction)
  812. {
  813. unsigned dst = currentInstruction[1].u.operand;
  814. unsigned op1 = currentInstruction[2].u.operand;
  815. unsigned op2 = currentInstruction[3].u.operand;
  816. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  817. #if ENABLE(VALUE_PROFILER)
  818. m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
  819. #endif
  820. JumpList notInt32Op1;
  821. JumpList notInt32Op2;
  822. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  823. notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  824. notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  825. // Int32 case.
  826. move(regT0, regT3);
  827. addSlowCase(branchMul32(Overflow, regT2, regT0));
  828. addSlowCase(branchTest32(Zero, regT0));
  829. emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
  830. if (!supportsFloatingPoint()) {
  831. addSlowCase(notInt32Op1);
  832. addSlowCase(notInt32Op2);
  833. return;
  834. }
  835. Jump end = jump();
  836. // Double case.
  837. emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
  838. end.link(this);
  839. }
  840. void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  841. {
  842. unsigned dst = currentInstruction[1].u.operand;
  843. unsigned op1 = currentInstruction[2].u.operand;
  844. unsigned op2 = currentInstruction[3].u.operand;
  845. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  846. Jump overflow = getSlowCase(iter); // overflow check
  847. linkSlowCase(iter); // zero result check
  848. Jump negZero = branchOr32(Signed, regT2, regT3);
  849. emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
  850. emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
  851. negZero.link(this);
  852. #if ENABLE(VALUE_PROFILER)
  853. // We only get here if we have a genuine negative zero. Record this,
  854. // so that the speculative JIT knows that we failed speculation
  855. // because of a negative zero.
  856. add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
  857. #endif
  858. overflow.link(this);
  859. if (!supportsFloatingPoint()) {
  860. linkSlowCase(iter); // int32 check
  861. linkSlowCase(iter); // int32 check
  862. }
  863. if (supportsFloatingPoint()) {
  864. if (!types.first().definitelyIsNumber())
  865. linkSlowCase(iter); // double check
  866. if (!types.second().definitelyIsNumber()) {
  867. linkSlowCase(iter); // int32 check
  868. linkSlowCase(iter); // double check
  869. }
  870. }
  871. Label jitStubCall(this);
  872. JITStubCall stubCall(this, cti_op_mul);
  873. stubCall.addArgument(op1);
  874. stubCall.addArgument(op2);
  875. stubCall.call(dst);
  876. }
  877. // Division (/)
  878. void JIT::emit_op_div(Instruction* currentInstruction)
  879. {
  880. unsigned dst = currentInstruction[1].u.operand;
  881. unsigned op1 = currentInstruction[2].u.operand;
  882. unsigned op2 = currentInstruction[3].u.operand;
  883. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  884. #if ENABLE(VALUE_PROFILER)
  885. m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
  886. #endif
  887. if (!supportsFloatingPoint()) {
  888. addSlowCase(jump());
  889. return;
  890. }
  891. // Int32 divide.
  892. JumpList notInt32Op1;
  893. JumpList notInt32Op2;
  894. JumpList end;
  895. emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
  896. notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  897. notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
  898. convertInt32ToDouble(regT0, fpRegT0);
  899. convertInt32ToDouble(regT2, fpRegT1);
  900. divDouble(fpRegT1, fpRegT0);
  901. #if ENABLE(VALUE_PROFILER)
  902. // Is the result actually an integer? The DFG JIT would really like to know. If it's
  903. // not an integer, we increment a count. If this together with the slow case counter
  904. // are below threshold then the DFG JIT will compile this division with a specualtion
  905. // that the remainder is zero.
  906. // As well, there are cases where a double result here would cause an important field
  907. // in the heap to sometimes have doubles in it, resulting in double predictions getting
  908. // propagated to a use site where it might cause damage (such as the index to an array
  909. // access). So if we are DFG compiling anything in the program, we want this code to
  910. // ensure that it produces integers whenever possible.
  911. // FIXME: This will fail to convert to integer if the result is zero. We should
  912. // distinguish between positive zero and negative zero here.
  913. JumpList notInteger;
  914. branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
  915. // If we've got an integer, we might as well make that the result of the division.
  916. emitStoreInt32(dst, regT2);
  917. end.append(jump());
  918. notInteger.link(this);
  919. add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
  920. emitStoreDouble(dst, fpRegT0);
  921. #else
  922. emitStoreDouble(dst, fpRegT0);
  923. #endif
  924. end.append(jump());
  925. // Double divide.
  926. emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
  927. end.link(this);
  928. }
  929. void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  930. {
  931. unsigned dst = currentInstruction[1].u.operand;
  932. unsigned op1 = currentInstruction[2].u.operand;
  933. unsigned op2 = currentInstruction[3].u.operand;
  934. OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
  935. if (!supportsFloatingPoint())
  936. linkSlowCase(iter);
  937. else {
  938. if (!types.first().definitelyIsNumber())
  939. linkSlowCase(iter); // double check
  940. if (!types.second().definitelyIsNumber()) {
  941. linkSlowCase(iter); // int32 check
  942. linkSlowCase(iter); // double check
  943. }
  944. }
  945. JITStubCall stubCall(this, cti_op_div);
  946. stubCall.addArgument(op1);
  947. stubCall.addArgument(op2);
  948. stubCall.call(dst);
  949. }
  950. // Mod (%)
  951. /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
  952. void JIT::emit_op_mod(Instruction* currentInstruction)
  953. {
  954. unsigned dst = currentInstruction[1].u.operand;
  955. unsigned op1 = currentInstruction[2].u.operand;
  956. unsigned op2 = currentInstruction[3].u.operand;
  957. #if CPU(X86) || CPU(X86_64)
  958. // Make sure registers are correct for x86 IDIV instructions.
  959. ASSERT(regT0 == X86Registers::eax);
  960. ASSERT(regT1 == X86Registers::edx);
  961. ASSERT(regT2 == X86Registers::ecx);
  962. ASSERT(regT3 == X86Registers::ebx);
  963. emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
  964. addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
  965. addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag)));
  966. move(regT3, regT0);
  967. addSlowCase(branchTest32(Zero, regT2));
  968. Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
  969. addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
  970. denominatorNotNeg1.link(this);
  971. m_assembler.cdq();
  972. m_assembler.idivl_r(regT2);
  973. Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
  974. addSlowCase(branchTest32(Zero, regT1));
  975. numeratorPositive.link(this);
  976. emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
  977. #else
  978. JITStubCall stubCall(this, cti_op_mod);
  979. stubCall.addArgument(op1);
  980. stubCall.addArgument(op2);
  981. stubCall.call(dst);
  982. #endif
  983. }
  984. void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
  985. {
  986. #if CPU(X86) || CPU(X86_64)
  987. unsigned result = currentInstruction[1].u.operand;
  988. unsigned op1 = currentInstruction[2].u.operand;
  989. unsigned op2 = currentInstruction[3].u.operand;
  990. linkSlowCase(iter);
  991. linkSlowCase(iter);
  992. linkSlowCase(iter);
  993. linkSlowCase(iter);
  994. linkSlowCase(iter);
  995. JITStubCall stubCall(this, cti_op_mod);
  996. stubCall.addArgument(op1);
  997. stubCall.addArgument(op2);
  998. stubCall.call(result);
  999. #else
  1000. UNUSED_PARAM(currentInstruction);
  1001. UNUSED_PARAM(iter);
  1002. // We would have really useful assertions here if it wasn't for the compiler's
  1003. // insistence on attribute noreturn.
  1004. // RELEASE_ASSERT_NOT_REACHED();
  1005. #endif
  1006. }
  1007. /* ------------------------------ END: OP_MOD ------------------------------ */
  1008. } // namespace JSC
  1009. #endif // USE(JSVALUE32_64)
  1010. #endif // ENABLE(JIT)