fold-const.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /*@@ Fix lossage on folding division of big integers. */
  2. /*@@ This file should be rewritten to use an arbitary precision
  3. @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
  4. @@ Perhaps the routines could also be used for bc/dc, and made a lib.
  5. @@ The routines that translate from the ap rep should
  6. @@ warn if precision et. al. is lost.
  7. @@ This would also make life easier when this technology is used
  8. @@ for cross-compilers. */
  9. /* Fold a constant sub-tree into a single node for C-compiler
  10. Copyright (C) 1987 Free Software Foundation, Inc.
  11. This file is part of GNU CC.
  12. GNU CC is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY. No author or distributor
  14. accepts responsibility to anyone for the consequences of using it
  15. or for whether it serves any particular purpose or works at all,
  16. unless he says so in writing. Refer to the GNU CC General Public
  17. License for full details.
  18. Everyone is granted permission to copy, modify and redistribute
  19. GNU CC, but only under the conditions described in the
  20. GNU CC General Public License. A copy of this license is
  21. supposed to have been given to you along with GNU CC so you
  22. can know your rights and responsibilities. It should be in a
  23. file named COPYING. Among other things, the copyright notice
  24. and this notice must be preserved on all copies. */
  25. /* There are only two entry points in this file:
  26. fold and combine.
  27. fold takes a tree as argument and returns a simplified tree.
  28. combine takes a tree code for an arithmetic operation
  29. and two operands that are trees for constant values
  30. and returns the result of the specified operation on those values,
  31. also as a tree. */
  32. #include <stdio.h>
  33. #include "config.h"
  34. #include "tree.h"
  35. static void lshift_double ();
  36. static void rshift_double ();
  37. static void lrotate_double ();
  38. static void rrotate_double ();
  39. /* To do constant folding on INTEGER_CST nodes requires 64-bit arithmetic.
  40. We do that by representing the 64-bit integer as 8 shorts,
  41. with only 8 bits stored in each short, as a positive number. */
  42. /* Unpack a 64-bit integer into 8 shorts.
  43. LOW and HI are the integer, as two `int' pieces.
  44. SHORTS points to the array of shorts. */
  45. static void
  46. encode (shorts, low, hi)
  47. short *shorts;
  48. int low, hi;
  49. {
  50. shorts[0] = low & 0xff;
  51. shorts[1] = (low >> 8) & 0xff;
  52. shorts[2] = (low >> 16) & 0xff;
  53. shorts[3] = (low >> 24) & 0xff;
  54. shorts[4] = hi & 0xff;
  55. shorts[5] = (hi >> 8) & 0xff;
  56. shorts[6] = (hi >> 16) & 0xff;
  57. shorts[7] = (hi >> 24) & 0xff;
  58. }
  59. /* Pack an array of 8 shorts into a 64-bit integer.
  60. SHORTS points to the array of shorts.
  61. The integer is stored into *LOW and *HI as two `int' pieces. */
  62. static void
  63. decode (shorts, low, hi)
  64. short *shorts;
  65. int *low, *hi;
  66. {
  67. *low = (shorts[3] << 24) | (shorts[2] << 16) | (shorts[1] << 8) | shorts[0];
  68. *hi = (shorts[7] << 24) | (shorts[6] << 16) | (shorts[5] << 8) | shorts[4];
  69. }
  70. /* Zero out any bits in an unsigned integer that are supposed to be zero
  71. because they are beyond the precision of the integer's data type. */
  72. static void
  73. truncate_unsigned (x)
  74. tree x;
  75. {
  76. register int prec = TYPE_PRECISION (TREE_TYPE (x));
  77. if (TREE_CODE (TREE_TYPE (x)) == POINTER_TYPE)
  78. TREE_INT_CST_HIGH (x) = 0;
  79. else if (prec > HOST_BITS_PER_INT)
  80. {
  81. TREE_INT_CST_HIGH (x)
  82. &= ~((-1) << (prec - HOST_BITS_PER_INT));
  83. }
  84. else
  85. {
  86. TREE_INT_CST_HIGH (x) = 0;
  87. TREE_INT_CST_LOW (x)
  88. &= ~((-1) << prec);
  89. }
  90. }
  91. /* Add two 64-bit integers with 64-bit result.
  92. Each argument is given as two `int' pieces.
  93. One argument is L1 and H1; the other, L2 and H2.
  94. The value is stored as two `int' pieces in *LV and *HV.
  95. We use the 8-shorts representation internally. */
  96. static void
  97. add_double (l1, h1, l2, h2, lv, hv)
  98. int l1, h1, l2, h2;
  99. int *lv, *hv;
  100. {
  101. short arg1[8];
  102. short arg2[8];
  103. register int carry = 0;
  104. register int i;
  105. encode (arg1, l1, h1);
  106. encode (arg2, l2, h2);
  107. for (i = 0; i < 8; i++)
  108. {
  109. carry += arg1[i] + arg2[i];
  110. arg1[i] = carry & 0xff;
  111. carry >>= 8;
  112. }
  113. decode (arg1, lv, hv);
  114. }
  115. /* Negate a 64-bit integers with 64-bit result.
  116. The argument is given as two `int' pieces in L1 and H1.
  117. The value is stored as two `int' pieces in *LV and *HV.
  118. We use the 8-shorts representation internally. */
  119. static void
  120. neg_double (l1, h1, lv, hv)
  121. int l1, h1;
  122. int *lv, *hv;
  123. {
  124. if (l1 == 0)
  125. {
  126. *lv = 0;
  127. *hv = - h1;
  128. }
  129. else
  130. {
  131. *lv = - l1;
  132. *hv = ~ h1;
  133. }
  134. }
  135. /* Multiply two 64-bit integers with 64-bit result.
  136. Each argument is given as two `int' pieces.
  137. One argument is L1 and H1; the other, L2 and H2.
  138. The value is stored as two `int' pieces in *LV and *HV.
  139. We use the 8-shorts representation internally. */
  140. static void
  141. mul_double (l1, h1, l2, h2, lv, hv)
  142. int l1, h1, l2, h2;
  143. int *lv, *hv;
  144. {
  145. short arg1[8];
  146. short arg2[8];
  147. short prod[16];
  148. register int carry = 0;
  149. register int i, j, k;
  150. encode (arg1, l1, h1);
  151. encode (arg2, l2, h2);
  152. bzero (prod, sizeof prod);
  153. for (i = 0; i < 8; i++)
  154. for (j = 0; j < 8; j++)
  155. {
  156. k = i + j;
  157. carry = arg1[i] * arg2[j];
  158. while (carry)
  159. {
  160. carry += prod[k];
  161. prod[k] = carry & 0xff;
  162. carry >>= 8;
  163. k++;
  164. }
  165. }
  166. decode (prod, lv, hv); /* @@decode ignores prod[8] -> prod[15] */
  167. }
  168. /* Shift the 64-bit integer in L1, H1 left by COUNT places
  169. keeping only PREC bits of result.
  170. Shift right if COUNT is negative.
  171. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
  172. Store the value as two `int' pieces in *LV and *HV. */
  173. static void
  174. lshift_double (l1, h1, count, prec, lv, hv, arith)
  175. int l1, h1, count, prec;
  176. int *lv, *hv;
  177. int arith;
  178. {
  179. short arg1[8];
  180. register int i;
  181. register int carry = 0;
  182. if (count < 0)
  183. {
  184. rshift_double (l1, h1, - count, prec, lv, hv, arith);
  185. return;
  186. }
  187. encode (arg1, l1, h1);
  188. count &= (1 << prec) - 1;
  189. while (count > 0)
  190. {
  191. for (i = 0; i < 8; i++)
  192. {
  193. carry += arg1[i] << 1;
  194. arg1[i] = carry & 0xff;
  195. carry >>= 8;
  196. }
  197. count--;
  198. }
  199. decode (arg1, lv, hv);
  200. }
  201. /* Shift the 64-bit integer in L1, H1 right by COUNT places
  202. keeping only PREC bits of result. COUNT must be positive.
  203. ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
  204. Store the value as two `int' pieces in *LV and *HV. */
  205. static void
  206. rshift_double (l1, h1, count, prec, lv, hv, arith)
  207. int l1, h1, count, prec;
  208. int *lv, *hv;
  209. int arith;
  210. {
  211. short arg1[8];
  212. register int i;
  213. register int carry;
  214. encode (arg1, l1, h1);
  215. count &= (1 << prec) - 1;
  216. carry = arith && arg1[7] >> 7;
  217. while (count > 0)
  218. {
  219. for (i = 7; i >= 0; i--)
  220. {
  221. carry <<= 8;
  222. carry += arg1[i];
  223. arg1[i] = (carry >> 1) & 0xff;
  224. }
  225. count--;
  226. }
  227. decode (arg1, lv, hv);
  228. }
  229. /* Rotate the 64-bit integer in L1, H1 left by COUNT places
  230. keeping only PREC bits of result.
  231. Rotate right if COUNT is negative.
  232. Store the value as two `int' pieces in *LV and *HV. */
  233. static void
  234. lrotate_double (l1, h1, count, prec, lv, hv)
  235. int l1, h1, count, prec;
  236. int *lv, *hv;
  237. {
  238. short arg1[8];
  239. register int i;
  240. register int carry;
  241. if (count < 0)
  242. {
  243. rrotate_double (l1, h1, - count, prec, lv, hv);
  244. return;
  245. }
  246. encode (arg1, l1, h1);
  247. count &= (1 << prec) - 1;
  248. carry = arg1[7] >> 7;
  249. while (count > 0)
  250. {
  251. for (i = 0; i < 8; i++)
  252. {
  253. carry += arg1[i] << 1;
  254. arg1[i] = carry & 0xff;
  255. carry >>= 8;
  256. }
  257. count--;
  258. }
  259. decode (arg1, lv, hv);
  260. }
  261. /* ROtate the 64-bit integer in L1, H1 left by COUNT places
  262. keeping only PREC bits of result. COUNT must be positive.
  263. Store the value as two `int' pieces in *LV and *HV. */
  264. static void
  265. rrotate_double (l1, h1, count, prec, lv, hv)
  266. int l1, h1, count, prec;
  267. int *lv, *hv;
  268. {
  269. short arg1[8];
  270. register int i;
  271. register int carry;
  272. encode (arg1, l1, h1);
  273. count &= (1 << prec) - 1;
  274. carry = arg1[0] & 1;
  275. while (count > 0)
  276. {
  277. for (i = 7; i >= 0; i--)
  278. {
  279. carry <<= 8;
  280. carry += arg1[i];
  281. arg1[i] = (carry >> 1) & 0xff;
  282. }
  283. count--;
  284. }
  285. decode (arg1, lv, hv);
  286. }
  287. /* Divide 64 bit integer LNUM, HNUM by 64 bit integer LDEN, HDEN
  288. for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
  289. CODE is a tree code for a kind of division, one of
  290. TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR and ROUND_DIV_EXPR.
  291. It controls how the quotient is rounded to a integer.
  292. UNS nonzero says do unsigned division. */
  293. static void
  294. div_and_round_double (code, uns,
  295. lnum, hnum, lden, hden, lquo, hquo, lrem, hrem)
  296. enum tree_code code;
  297. int uns;
  298. int lnum, hnum; /* num == numerator == dividend */
  299. int lden, hden; /* den == denominator == divisor */
  300. int *lquo, *hquo, *lrem, *hrem;
  301. {
  302. int quo_neg = 0;
  303. short num[9], den[8], quo[8]; /* extra element for scaling. */
  304. register int i, j, work;
  305. register int carry = 0;
  306. if ((hden == 0) && (lden == 0)) {
  307. *hquo = *lquo = *hrem = *lrem = 0;
  308. yyerror
  309. ("divide by 0 in constant folding - quotient and remainder set to 0.");
  310. return;
  311. }
  312. /* calculate quotient sign and convert operands to unsigned. */
  313. if (!uns)
  314. {
  315. if (hden < 0)
  316. {
  317. quo_neg = ~ quo_neg;
  318. neg_double (lden, hden, &lden, &hden);
  319. }
  320. if (hnum < 0)
  321. {
  322. quo_neg = ~ quo_neg;
  323. neg_double (lnum, hnum, &lnum, &hnum);
  324. }
  325. }
  326. if (hnum == 0 && hden == 0)
  327. { /* single precision */
  328. *hquo = *hrem = 0;
  329. *lquo = lnum / lden; /* rounds toward zero since positive args */
  330. goto finish_up;
  331. }
  332. if (hnum == 0)
  333. { /* trivial case: dividend < divisor */
  334. /* hden != 0 already checked. */
  335. *hquo = *lquo = 0;
  336. *hrem = hnum;
  337. *lrem = lnum;
  338. goto finish_up;
  339. }
  340. bzero (quo, sizeof quo);
  341. bzero (num, sizeof num); /* to zero 9th element */
  342. bzero (den, sizeof den);
  343. encode (num, lnum, hnum);
  344. encode (den, lden, hden);
  345. if (hden == 0)
  346. { /* simpler algorithm */
  347. /* hnum != 0 already checked. */
  348. for (i = 7; i >= 0; i--)
  349. {
  350. work = num[i] + (carry << 8);
  351. quo[i] = work / lden;
  352. carry = work % lden;
  353. }
  354. }
  355. else { /* full double precision,
  356. with thanks to Don Knuth's
  357. "Semi-Numericial Algorithms". */
  358. #define BASE 256
  359. int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
  360. /* Find the highest non-zero divisor digit. */
  361. for (i = 7; ; i--)
  362. if (den[i] != 0) {
  363. den_hi_sig = i;
  364. break;
  365. }
  366. for (i = 7; ; i--)
  367. if (num[i] != 0) {
  368. num_hi_sig = i;
  369. break;
  370. }
  371. quo_hi_sig = num_hi_sig - den_hi_sig + 1;
  372. /* Insure that the first digit of the divisor is at least BASE/2.
  373. This is required by the quotient digit estimation algorithm. */
  374. scale = BASE / (den[den_hi_sig] + 1);
  375. if (scale > 1) { /* scale divisor and dividend */
  376. carry = 0;
  377. for (i = 0; i <= 8; i++) {
  378. work = (num[i] * scale) + carry;
  379. num[i] = work & 0xff;
  380. carry = work >> 8;
  381. if (num[i] != 0) num_hi_sig = i;
  382. }
  383. carry = 0;
  384. for (i = 0; i <= 7; i++) {
  385. work = (den[i] * scale) + carry;
  386. den[i] = work & 0xff;
  387. carry = work >> 8;
  388. if (den[i] != 0) den_hi_sig = i;
  389. }
  390. }
  391. /* Main loop */
  392. for (i = quo_hi_sig; i > 0; i--) {
  393. /* quess the next quotient digit, quo_est, by dividing the first
  394. two remaining dividend digits by the high order quotient digit.
  395. quo_est is never low and is at most 2 high. */
  396. int num_hi; /* index of highest remaining dividend digit */
  397. num_hi = i + den_hi_sig;
  398. work = (num[num_hi] * BASE) + (num_hi ? 0 : num[num_hi - 1]);
  399. if (num[num_hi] != den[den_hi_sig]) {
  400. quo_est = work / den[den_hi_sig];
  401. }
  402. else {
  403. quo_est = BASE - 1;
  404. }
  405. /* refine quo_est so it's usually correct, and at most one high. */
  406. while ((den[den_hi_sig - 1] * quo_est)
  407. > (((work - (quo_est * den[den_hi_sig])) * BASE)
  408. + ((num_hi - 1) ? 0 : num[num_hi - 2]))) {
  409. quo_est--;
  410. }
  411. /* try quo_est as the quotient digit, by multiplying the
  412. divisor by quo_est and subtracting from the remaining dividend. */
  413. carry = 0;
  414. for (j = 0; j <= den_hi_sig; j++) {
  415. int digit;
  416. work = num[i + j] - (quo_est * den[j]) + carry;
  417. digit = work & 0xff;
  418. carry = work >> 8;
  419. if (digit < 0) {
  420. digit += BASE;
  421. carry--;
  422. }
  423. num[i + j] = digit;
  424. }
  425. /* if quo_est was high by one, then num[i] went negative and
  426. we need to correct things. */
  427. if (num[num_hi] < 0) {
  428. quo_est--;
  429. carry = 0; /* add divisor back in */
  430. for (j = 0; j <= den_hi_sig; j++) {
  431. work = num[i + j] + den[j] + carry;
  432. if (work > BASE) {
  433. work -= BASE;
  434. carry = 1;
  435. }
  436. else {
  437. carry = 0;
  438. }
  439. num[i + j] = work;
  440. }
  441. num [num_hi] += carry;
  442. }
  443. /* store the quotient digit. */
  444. quo[i - 1] = quo_est;
  445. }
  446. }
  447. decode (quo, lquo, hquo);
  448. finish_up:
  449. /* if result is negative, make it so. */
  450. if (quo_neg)
  451. neg_double (*lquo, *hquo, lquo, hquo);
  452. /* compute trial remainder: rem = num - (quo * den) */
  453. mul_double (*lquo, *hquo, lden, hden, lrem, hrem);
  454. neg_double (*lrem, *hrem, lrem, hrem);
  455. add_double (lnum, hnum, *lrem, *hrem, lrem, hrem);
  456. switch (code)
  457. {
  458. case TRUNC_DIV_EXPR:
  459. case TRUNC_MOD_EXPR: /* round toward zero */
  460. return;
  461. case FLOOR_DIV_EXPR:
  462. case FLOOR_MOD_EXPR: /* round toward negative infinity */
  463. if (quo_neg && (*lrem != 0 || *hrem != 0)) /* quo < 0 && rem != 0 */
  464. {
  465. /* quo = quo - 1; */
  466. add_double (*lquo, *hquo, -1, -1, lquo, hquo);
  467. }
  468. else return;
  469. break;
  470. case CEIL_DIV_EXPR:
  471. case CEIL_MOD_EXPR: /* round toward positive infinity */
  472. if ((uns || !quo_neg) && *lquo != 0 && *quo != 0 /* quo > 0 */
  473. && (*lrem != 0 || *hrem != 0)) /* && rem != 0 */
  474. {
  475. add_double (*lquo, *hquo, 1, 0, lquo, hquo);
  476. }
  477. else return;
  478. break;
  479. case ROUND_DIV_EXPR:
  480. case ROUND_MOD_EXPR: /* round to closest integer */
  481. {
  482. int labs_rem = *lrem, habs_rem = *hrem;
  483. int labs_den = lden, habs_den = hden, ltwice, htwice;
  484. /* get absolute values */
  485. if (*hrem < 0) neg_double(*lrem, *hrem, &labs_rem, &habs_rem);
  486. if (hden < 0) neg_double(lden, hden, &labs_den, &habs_den);
  487. /* if (2 * abs (lrem) >= abs (lden)) */
  488. mul_double(2, 0, labs_rem, habs_rem, &ltwice, &htwice);
  489. if (((unsigned) habs_den < (unsigned) htwice)
  490. || (((unsigned) habs_den == (unsigned) htwice)
  491. && ((unsigned) labs_den < (unsigned) ltwice)))
  492. {
  493. if (*hquo < 0)
  494. /* quo = quo - 1; */
  495. add_double (*lquo, *hquo, -1, -1, lquo, hquo);
  496. else
  497. /* quo = quo + 1; */
  498. add_double (*lquo, *hquo, 1, 0, lquo, hquo);
  499. }
  500. else return;
  501. }
  502. break;
  503. default:
  504. abort ();
  505. }
  506. /* compute true remainder: rem = num - (quo * den) */
  507. mul_double (*lquo, *hquo, lden, hden, lrem, hrem);
  508. neg_double (&lrem, &hrem, lrem, hrem);
  509. add_double (lnum, hnum, &lrem, &hrem, lrem, hrem);
  510. }
  511. /* Split a tree IN into a constant and a variable part
  512. that could be combined with CODE to make IN.
  513. CODE must be a commutative arithmetic operation.
  514. Store the constant part into *CONP and the variable in &VARP.
  515. Return 1 if this was done; zero means the tree IN did not decompose
  516. this way.
  517. If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
  518. Therefore, we must tell the caller whether the variable part
  519. was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
  520. The value stored is the coefficient for the variable term.
  521. The constant term we return should always be added;
  522. we negate it if necessary. */
  523. static int
  524. split_tree (in, code, varp, conp, varsignp)
  525. tree in;
  526. enum tree_code code;
  527. tree *varp, *conp;
  528. int *varsignp;
  529. {
  530. register tree outtype = TREE_TYPE (in);
  531. *varp = 0;
  532. *conp = 0;
  533. if (TREE_CODE (in) == NOP_EXPR)
  534. in = TREE_OPERAND (in, 0);
  535. if (TREE_CODE (in) == code
  536. || (TREE_CODE (TREE_TYPE (in)) != REAL_TYPE
  537. /* We can associate addition and subtraction together
  538. (even though the C standard doesn't say so)
  539. for integers because the value is not affected.
  540. For reals, the value might be affected, so we can't. */
  541. &&
  542. ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
  543. || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
  544. {
  545. if (TREE_LITERAL (TREE_OPERAND (in, 0)))
  546. {
  547. *conp = TREE_OPERAND (in, 0);
  548. *varp = TREE_OPERAND (in, 1);
  549. if (TREE_TYPE (*varp) != outtype)
  550. *varp = convert (outtype, *varp);
  551. *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
  552. return 1;
  553. }
  554. if (TREE_LITERAL (TREE_OPERAND (in, 1)))
  555. {
  556. *conp = TREE_OPERAND (in, 1);
  557. *varp = TREE_OPERAND (in, 0);
  558. *varsignp = 1;
  559. if (TREE_TYPE (*varp) != outtype)
  560. *varp = convert (outtype, *varp);
  561. if (TREE_CODE (in) == MINUS_EXPR)
  562. *conp = combine (MINUS_EXPR, integer_zero_node, *conp);
  563. return 1;
  564. }
  565. }
  566. return 0;
  567. }
  568. /* Combine two constants NUM and ARG2 under operation CODE
  569. to produce a new constant.
  570. We assume ARG1 and ARG2 have the same data type,
  571. or at least are the same kind of constant and the same machine mode. */
  572. tree
  573. combine (code, arg1, arg2)
  574. enum tree_code code;
  575. register tree arg1, arg2;
  576. {
  577. if (TREE_CODE (arg1) == INTEGER_CST)
  578. {
  579. register int int1l = TREE_INT_CST_LOW (arg1);
  580. register int int1h = TREE_INT_CST_HIGH (arg1);
  581. int int2l = TREE_INT_CST_LOW (arg2);
  582. int int2h = TREE_INT_CST_HIGH (arg2);
  583. int low, hi;
  584. int garbage;
  585. register tree t;
  586. int uns = type_unsigned_p (TREE_TYPE (arg1));
  587. switch (code)
  588. {
  589. case BIT_IOR_EXPR:
  590. t = build_int_2 (int1l | int2l, int1h | int2h);
  591. break;
  592. case BIT_XOR_EXPR:
  593. t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
  594. break;
  595. case BIT_AND_EXPR:
  596. t = build_int_2 (int1l & int2l, int1h & int2h);
  597. break;
  598. case BIT_ANDTC_EXPR:
  599. t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
  600. break;
  601. case RSHIFT_EXPR:
  602. int2l = - int2l;
  603. case LSHIFT_EXPR:
  604. lshift_double (int1l, int1h, int2l,
  605. TYPE_PRECISION (TREE_TYPE (arg1)),
  606. &low, &hi,
  607. !uns);
  608. t = build_int_2 (low, hi);
  609. break;
  610. case RROTATE_EXPR:
  611. int2l = - int2l;
  612. case LROTATE_EXPR:
  613. lrotate_double (int1l, int1h, int2l,
  614. TYPE_PRECISION (TREE_TYPE (arg1)),
  615. &low, &hi);
  616. t = build_int_2 (low, hi);
  617. break;
  618. case PLUS_EXPR:
  619. add_double (int1l, int1h, int2l, int2h, &low, &hi);
  620. t = build_int_2 (low, hi);
  621. break;
  622. case MINUS_EXPR:
  623. neg_double (int2l, int2h, &int2l, &int2h);
  624. add_double (int1l, int1h, int2l, int2h, &low, &hi);
  625. t = build_int_2 (low, hi);
  626. break;
  627. case MULT_EXPR:
  628. mul_double (int1l, int1h, int2l, int2h, &low, &hi);
  629. t = build_int_2 (low, hi);
  630. break;
  631. case TRUNC_DIV_EXPR: case ROUND_DIV_EXPR:
  632. case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
  633. div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
  634. &low, &hi, &garbage, &garbage);
  635. t = build_int_2 (low, hi);
  636. break;
  637. case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
  638. case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
  639. div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
  640. &garbage, &garbage, &low, &hi);
  641. t = build_int_2 (low, hi);
  642. break;
  643. case MIN_EXPR:
  644. case MAX_EXPR:
  645. if (uns)
  646. {
  647. low = (((unsigned) int1h < (unsigned) int2h)
  648. || (((unsigned) int1h == (unsigned) int2h)
  649. && ((unsigned) int1l < (unsigned) int2l)));
  650. }
  651. else
  652. {
  653. low = ((int1h < int2h)
  654. || ((int1h == int2h)
  655. && ((unsigned) int1l < (unsigned) int2l)));
  656. }
  657. if (low == (code == MIN_EXPR))
  658. t = build_int_2 (int1l, int1h);
  659. else
  660. t = build_int_2 (int2l, int2h);
  661. break;
  662. default:
  663. abort ();
  664. }
  665. TREE_TYPE (t) = TREE_TYPE (arg1);
  666. if (uns)
  667. truncate_unsigned (t);
  668. return t;
  669. }
  670. if (TREE_CODE (arg1) == REAL_CST)
  671. {
  672. register double d1 = TREE_REAL_CST (arg1);
  673. register double d2 = TREE_REAL_CST (arg2);
  674. register tree t;
  675. switch (code)
  676. {
  677. case PLUS_EXPR:
  678. t = build_real (d1 + d2);
  679. break;
  680. case MINUS_EXPR:
  681. t = build_real (d1 - d2);
  682. break;
  683. case MULT_EXPR:
  684. t = build_real (d1 * d2);
  685. break;
  686. case RDIV_EXPR:
  687. if (d2 == 0)
  688. return 0;
  689. t = build_real (d1 / d2);
  690. break;
  691. case MIN_EXPR:
  692. if (d1 < d2)
  693. t = build_real (d1);
  694. t = build_real (d2);
  695. break;
  696. case MAX_EXPR:
  697. if (d1 > d2)
  698. t = build_real (d1);
  699. t = build_real (d2);
  700. break;
  701. default:
  702. abort ();
  703. }
  704. TREE_TYPE (t) = TREE_TYPE (arg1);
  705. return t;
  706. }
  707. if (TREE_CODE (arg1) == COMPLEX_CST)
  708. {
  709. register tree r1 = TREE_REALPART (arg1);
  710. register tree i1 = TREE_IMAGPART (arg1);
  711. register tree r2 = TREE_REALPART (arg2);
  712. register tree i2 = TREE_IMAGPART (arg2);
  713. register tree t;
  714. switch (code)
  715. {
  716. case PLUS_EXPR:
  717. t = build_complex (combine (PLUS_EXPR, r1, r2),
  718. combine (PLUS_EXPR, i1, i2));
  719. break;
  720. case MINUS_EXPR:
  721. t = build_complex (combine (MINUS_EXPR, r1, r2),
  722. combine (MINUS_EXPR, i1, i2));
  723. break;
  724. case MULT_EXPR:
  725. t = build_complex (combine (MINUS_EXPR,
  726. combine (MULT_EXPR, r1, r2),
  727. combine (MULT_EXPR, i1, i2)),
  728. combine (PLUS_EXPR,
  729. combine (MULT_EXPR, r1, i2),
  730. combine (MULT_EXPR, i1, r2)));
  731. break;
  732. case RDIV_EXPR:
  733. {
  734. register tree magsquared
  735. = combine (PLUS_EXPR,
  736. combine (MULT_EXPR, r2, r2),
  737. combine (MULT_EXPR, i2, i2));
  738. t = build_complex (combine (RDIV_EXPR,
  739. combine (PLUS_EXPR,
  740. combine (MULT_EXPR, r1, r2),
  741. combine (MULT_EXPR, i1, i2)),
  742. magsquared),
  743. combine (RDIV_EXPR,
  744. combine (MINUS_EXPR,
  745. combine (MULT_EXPR, i1, r2),
  746. combine (MULT_EXPR, r1, i2)),
  747. magsquared));
  748. }
  749. break;
  750. default:
  751. abort ();
  752. }
  753. TREE_TYPE (t) = TREE_TYPE (arg1);
  754. return t;
  755. }
  756. return 0;
  757. }
  758. /* Given T, a tree representing type conversion of a constant,
  759. return a constant tree representing the result of conversion. */
  760. static tree
  761. fold_convert (t)
  762. register tree t;
  763. {
  764. register tree arg1 = TREE_OPERAND (t, 0);
  765. register tree type = TREE_TYPE (t);
  766. if (TREE_CODE (type) == POINTER_TYPE
  767. || TREE_CODE (type) == INTEGER_TYPE
  768. || TREE_CODE (type) == ENUMERAL_TYPE)
  769. {
  770. if (TREE_CODE (arg1) == INTEGER_CST)
  771. {
  772. /* Given an integer constant, make new constant with new type,
  773. appropriately sign-extended or truncated. */
  774. register int inprec;
  775. register int outprec;
  776. if (TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
  777. inprec = BITS_PER_WORD;
  778. else
  779. inprec = TYPE_PRECISION (TREE_TYPE (arg1));
  780. if (TREE_CODE (type) == POINTER_TYPE)
  781. outprec = BITS_PER_WORD;
  782. else
  783. outprec = TYPE_PRECISION (type);
  784. t = build_int_2 (TREE_INT_CST_LOW (arg1),
  785. TREE_INT_CST_HIGH (arg1));
  786. TREE_TYPE (t) = type;
  787. /* First zero out all bits not in the new type. */
  788. truncate_unsigned (t);
  789. /* If desired type is signed, sign extend. */
  790. if (!type_unsigned_p (type)
  791. && (outprec > HOST_BITS_PER_INT
  792. ? TREE_INT_CST_HIGH (t)
  793. & (1 << (outprec - HOST_BITS_PER_INT - 1))
  794. : TREE_INT_CST_LOW (t) & (1 << (outprec - 1))))
  795. {
  796. /* Value is negative:
  797. set to 1 all the undesired bits. */
  798. if (outprec > HOST_BITS_PER_INT)
  799. {
  800. TREE_INT_CST_HIGH (t)
  801. |= ((-1) << (outprec - HOST_BITS_PER_INT));
  802. }
  803. else
  804. {
  805. TREE_INT_CST_HIGH (t) = -1;
  806. TREE_INT_CST_LOW (t)
  807. |= ((-1) << outprec);
  808. }
  809. }
  810. }
  811. else if (TREE_CODE (arg1) == REAL_CST)
  812. t = build_int_2 ((int) TREE_REAL_CST (arg1),
  813. (int) (TREE_REAL_CST (arg1) / 0x10000 / 0x10000));
  814. }
  815. else if (TREE_CODE (type) == REAL_TYPE)
  816. {
  817. if (TREE_CODE (arg1) == INTEGER_CST)
  818. t = build_real_from_int_cst (arg1);
  819. else if (TREE_CODE (arg1) == REAL_CST)
  820. t = build_real (TREE_REAL_CST (arg1));
  821. }
  822. TREE_TYPE (t) = type;
  823. TREE_LITERAL (t) = 1;
  824. return t;
  825. }
  826. /* Perform constant folding and related simplification of EXPR.
  827. The related simplifications include x*1 => x, x*0 => 0, etc.,
  828. and application of the associative law.
  829. NOP_EXPR conversions may be removed freely (as long as we
  830. are careful not to change the C type of the overall expression)
  831. We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
  832. but we can constant-fold them if they have constant operands. */
  833. tree
  834. fold (expr)
  835. tree expr;
  836. {
  837. register tree t = expr;
  838. register tree arg0, arg1;
  839. register enum tree_code code = TREE_CODE (t);
  840. register int kind;
  841. /* WINS will be nonzero when the switch is done
  842. if all operands are constant.
  843. LOSES will be nonzero when the switch is done
  844. if any operand is volatile.
  845. This inhibits optimizations such as (foo () * 0) => 0.
  846. But identity-element optimizations such as
  847. (foo () * 1) => (foo ()) can be done even if LOSES is set. */
  848. int wins = 1;
  849. int loses = 0;
  850. /* Return right away if already constant. */
  851. if (TREE_LITERAL (t))
  852. {
  853. if (code == CONST_DECL)
  854. return DECL_INITIAL (t);
  855. return t;
  856. }
  857. kind = *tree_code_type[(int) code];
  858. if (kind == 'e' || kind == 'r')
  859. {
  860. register int len = tree_code_length[(int) code];
  861. register int i;
  862. for (i = 0; i < len; i++)
  863. {
  864. if (TREE_CODE (TREE_OPERAND (t, i)) != INTEGER_CST
  865. && TREE_CODE (TREE_OPERAND (t, i)) != REAL_CST)
  866. /* Note that TREE_LITERAL isn't enough:
  867. static var addresses are constant but we can't
  868. do arithmetic on them. */
  869. wins = 0;
  870. if (TREE_VOLATILE (TREE_OPERAND (t, i)))
  871. loses = 1;
  872. }
  873. arg0 = TREE_OPERAND (t, 0);
  874. if (len > 1)
  875. arg1 = TREE_OPERAND (t, 1);
  876. }
  877. /* Now WINS and LOSES are set as described above,
  878. ARG0 is the first operand of EXPR,
  879. and ARG1 is the second operand (if it has more than one operand). */
  880. switch (code)
  881. {
  882. case INTEGER_CST:
  883. case REAL_CST:
  884. case STRING_CST:
  885. case COMPLEX_CST:
  886. case CONSTRUCTOR:
  887. return t;
  888. case CONST_DECL:
  889. return fold (DECL_INITIAL (t));
  890. case NOP_EXPR:
  891. case FLOAT_EXPR:
  892. case CONVERT_EXPR:
  893. case FIX_ROUND_EXPR:
  894. if (!wins)
  895. {
  896. TREE_LITERAL (t) = TREE_LITERAL (arg0);
  897. return t;
  898. }
  899. return fold_convert (t);
  900. case RANGE_EXPR:
  901. TREE_LITERAL (t) = wins;
  902. return t;
  903. case NEGATE_EXPR:
  904. if (wins)
  905. {
  906. if (TREE_CODE (arg0) == INTEGER_CST)
  907. {
  908. if (TREE_INT_CST_LOW (arg0) == 0)
  909. t = build_int_2 (0, - TREE_INT_CST_HIGH (arg0));
  910. else
  911. t = build_int_2 (- TREE_INT_CST_LOW (arg0),
  912. ~ TREE_INT_CST_HIGH (arg0));
  913. if (type_unsigned_p (TREE_TYPE (expr)))
  914. truncate_unsigned (t);
  915. }
  916. else if (TREE_CODE (arg0) == REAL_CST)
  917. t = build_real (- TREE_REAL_CST (arg0));
  918. else if (TREE_CODE (arg0) == COMPLEX_CST)
  919. t = build_complex (fold (build1 (NEGATE_EXPR, arg0)),
  920. fold (build1 (NEGATE_EXPR, arg1)));
  921. TREE_TYPE (t) = TREE_TYPE (expr);
  922. }
  923. return t;
  924. case ABS_EXPR:
  925. if (wins)
  926. {
  927. if (TREE_CODE (arg0) == INTEGER_CST)
  928. {
  929. if (! type_unsigned_p (TREE_TYPE (expr))
  930. || TREE_INT_CST_HIGH (arg0) < 0)
  931. {
  932. if (TREE_INT_CST_LOW (arg0) == 0)
  933. t = build_int_2 (0, - TREE_INT_CST_HIGH (arg0));
  934. else
  935. t = build_int_2 (- TREE_INT_CST_LOW (arg0),
  936. ~ TREE_INT_CST_HIGH (arg0));
  937. }
  938. }
  939. else if (TREE_CODE (arg0) == REAL_CST)
  940. {
  941. if (TREE_REAL_CST (arg0) < 0)
  942. t = build_real (- TREE_REAL_CST (arg0));
  943. }
  944. TREE_TYPE (t) = TREE_TYPE (expr);
  945. }
  946. return t;
  947. case BIT_NOT_EXPR:
  948. if (wins)
  949. {
  950. if (TREE_CODE (arg0) == INTEGER_CST)
  951. t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
  952. ~ TREE_INT_CST_HIGH (arg0));
  953. TREE_TYPE (t) = TREE_TYPE (expr);
  954. if (type_unsigned_p (TREE_TYPE (t)))
  955. truncate_unsigned (t);
  956. }
  957. return t;
  958. case PLUS_EXPR:
  959. if (integer_zerop (arg0))
  960. return arg1;
  961. if (integer_zerop (arg1))
  962. return arg0;
  963. associate:
  964. /* The varsign == -1 cases happen only for addition and subtraction.
  965. It says that the arg that was split was really CON minus VAR.
  966. The rest of the code applies to all associative operations. */
  967. if (!wins)
  968. {
  969. tree var, con, tem;
  970. int varsign;
  971. if (split_tree (arg0, code, &var, &con, &varsign))
  972. {
  973. if (varsign == -1)
  974. {
  975. TREE_SET_CODE (t, MINUS_EXPR);
  976. TREE_OPERAND (t, 1) = var;
  977. tem = build2 (code, con, arg1);
  978. TREE_TYPE (tem) = TREE_TYPE (t);
  979. TREE_OPERAND (t, 0) = fold (tem);
  980. }
  981. else
  982. {
  983. tem = build2 (code, arg1, con);
  984. TREE_TYPE (tem) = TREE_TYPE (t);
  985. TREE_OPERAND (t, 1) = fold (tem);
  986. TREE_OPERAND (t, 0) = var;
  987. }
  988. return t;
  989. }
  990. }
  991. else if (!wins)
  992. {
  993. tree var, con, tem;
  994. int varsign;
  995. if (split_tree (arg1, code, &var, &con, &varsign))
  996. {
  997. if (varsign == -1)
  998. TREE_SET_CODE (t,
  999. (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
  1000. tem = build2 (code, arg0, con);
  1001. TREE_TYPE (tem) = TREE_TYPE (t);
  1002. TREE_OPERAND (t, 0) = fold (tem);
  1003. TREE_OPERAND (t, 1) = var;
  1004. return t;
  1005. }
  1006. }
  1007. binary:
  1008. {
  1009. register tree t1 = NULL_TREE;
  1010. if (wins)
  1011. t1 = combine (code, arg0, arg1);
  1012. if (t1 != NULL_TREE) return t1;
  1013. return t;
  1014. }
  1015. case MINUS_EXPR:
  1016. if (integer_zerop (arg0))
  1017. {
  1018. t = build1 (NEGATE_EXPR, arg1);
  1019. TREE_TYPE (t) = TREE_TYPE (expr);
  1020. return t;
  1021. }
  1022. if (integer_zerop (arg1))
  1023. return arg0;
  1024. /* Can't associate subtraction on floats in C. */
  1025. if (TREE_CODE (TREE_TYPE (expr)) == REAL_TYPE)
  1026. goto binary;
  1027. goto associate;
  1028. case MULT_EXPR:
  1029. if (!loses && integer_zerop (arg0))
  1030. return arg0;
  1031. if (!loses && integer_zerop (arg1))
  1032. return arg1;
  1033. if (integer_onep (arg0))
  1034. return arg1;
  1035. if (integer_onep (arg1))
  1036. return arg0;
  1037. goto associate;
  1038. case BIT_IOR_EXPR:
  1039. if (!loses && integer_all_onesp (arg0))
  1040. return arg0;
  1041. if (!loses && integer_all_onesp (arg1))
  1042. return arg1;
  1043. case BIT_XOR_EXPR:
  1044. if (integer_zerop (arg0))
  1045. return arg1;
  1046. if (integer_zerop (arg1))
  1047. return arg0;
  1048. goto associate;
  1049. case BIT_AND_EXPR:
  1050. if (integer_all_onesp (arg0))
  1051. return arg1;
  1052. if (integer_all_onesp (arg1))
  1053. return arg0;
  1054. if (!loses && integer_zerop (arg0))
  1055. return arg0;
  1056. if (!loses && integer_zerop (arg1))
  1057. return arg1;
  1058. goto associate;
  1059. case BIT_ANDTC_EXPR:
  1060. if (integer_all_onesp (arg0))
  1061. return arg1;
  1062. if (integer_zerop (arg1))
  1063. return arg0;
  1064. if (!loses && integer_zerop (arg0))
  1065. return arg0;
  1066. if (!loses && integer_all_onesp (arg1))
  1067. return combine (code, arg1, arg1);
  1068. goto binary;
  1069. case TRUNC_DIV_EXPR:
  1070. case ROUND_DIV_EXPR:
  1071. case FLOOR_DIV_EXPR:
  1072. case CEIL_DIV_EXPR:
  1073. case RDIV_EXPR:
  1074. if (integer_onep (arg1))
  1075. return arg0;
  1076. goto binary;
  1077. case CEIL_MOD_EXPR:
  1078. case FLOOR_MOD_EXPR:
  1079. case ROUND_MOD_EXPR:
  1080. case TRUNC_MOD_EXPR:
  1081. if (!loses && integer_onep (arg1))
  1082. return combine (code, arg1, arg1);
  1083. goto binary;
  1084. case LSHIFT_EXPR:
  1085. case RSHIFT_EXPR:
  1086. case LROTATE_EXPR:
  1087. case RROTATE_EXPR:
  1088. if (integer_zerop (arg1))
  1089. return arg0;
  1090. goto binary;
  1091. case MIN_EXPR: case MAX_EXPR:
  1092. goto associate;
  1093. case EQ_EXPR:
  1094. case NE_EXPR:
  1095. /* Compute a result for EQ, or return if cannot do so. */
  1096. if (TREE_CODE (arg0) == INTEGER_CST
  1097. && TREE_CODE (arg1) == INTEGER_CST)
  1098. {
  1099. t = build_int_2
  1100. (TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
  1101. && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1),
  1102. 0);
  1103. }
  1104. else if (TREE_CODE (arg0) == REAL_CST
  1105. && TREE_CODE (arg1) == REAL_CST) {
  1106. t = build_int_2 (TREE_REAL_CST (arg0) == TREE_REAL_CST (arg1),
  1107. 0);
  1108. }
  1109. else
  1110. return t;
  1111. /* If we wanted NE_EXPR, invert the result. */
  1112. if (code == NE_EXPR)
  1113. TREE_INT_CST_LOW (t) ^= 1;
  1114. TREE_TYPE (t) = TREE_TYPE (expr);
  1115. return t;
  1116. case LT_EXPR:
  1117. case GT_EXPR:
  1118. case LE_EXPR:
  1119. case GE_EXPR:
  1120. /* To compute GT, swap the arguments and do LT.
  1121. To compute GE, do LT and invert the result.
  1122. To compute LE, swap the arguments, do LT and invert the result. */
  1123. if (code == LE_EXPR || code == GT_EXPR) {
  1124. register tree temp = arg0;
  1125. arg0 = arg1;
  1126. arg1 = temp;
  1127. }
  1128. /* Compute a result for LT, or return if cannot do so. */
  1129. if (TREE_CODE (arg0) == INTEGER_CST
  1130. && TREE_CODE (arg1) == INTEGER_CST) {
  1131. t = build_int_2 ((type_unsigned_p (TREE_TYPE (arg0))
  1132. ? INT_CST_LT_UNSIGNED (arg0, arg1)
  1133. : INT_CST_LT (arg0, arg1)),
  1134. 0);
  1135. }
  1136. else if (TREE_CODE (arg0) == REAL_CST
  1137. && TREE_CODE (arg1) == REAL_CST) {
  1138. t = build_int_2 (TREE_REAL_CST (arg0) < TREE_REAL_CST (arg1), 0);
  1139. }
  1140. else
  1141. return t;
  1142. /* If we wanted ...-or-equal, invert the result. */
  1143. if (code == GE_EXPR || code == LE_EXPR)
  1144. TREE_INT_CST_LOW (t) ^= 1;
  1145. TREE_TYPE (t) = TREE_TYPE (expr);
  1146. return t;
  1147. COND_EXPR:
  1148. if (TREE_LITERAL (arg0))
  1149. return TREE_OPERAND (expr, (integer_zerop (arg0) ? 2 : 1));
  1150. return t;
  1151. default:
  1152. return t;
  1153. } /* switch (code) */
  1154. }