mach_dep.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. */
  14. /* Boehm, November 17, 1995 12:13 pm PST */
  15. # include "private/gc_priv.h"
  16. # include <stdio.h>
  17. # include <setjmp.h>
  18. # if defined(OS2) || defined(CX_UX)
  19. # define _setjmp(b) setjmp(b)
  20. # define _longjmp(b,v) longjmp(b,v)
  21. # endif
  22. # ifdef AMIGA
  23. # ifndef __GNUC__
  24. # include <dos.h>
  25. # else
  26. # include <machine/reg.h>
  27. # endif
  28. # endif
  29. #if defined(RS6000) || defined(POWERPC)
  30. # include <ucontext.h>
  31. #endif
  32. #if defined(__MWERKS__) && !defined(POWERPC)
  33. asm static void PushMacRegisters()
  34. {
  35. sub.w #4,sp // reserve space for one parameter.
  36. move.l a2,(sp)
  37. jsr GC_push_one
  38. move.l a3,(sp)
  39. jsr GC_push_one
  40. move.l a4,(sp)
  41. jsr GC_push_one
  42. # if !__option(a6frames)
  43. // <pcb> perhaps a6 should be pushed if stack frames are not being used.
  44. move.l a6,(sp)
  45. jsr GC_push_one
  46. # endif
  47. // skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
  48. move.l d2,(sp)
  49. jsr GC_push_one
  50. move.l d3,(sp)
  51. jsr GC_push_one
  52. move.l d4,(sp)
  53. jsr GC_push_one
  54. move.l d5,(sp)
  55. jsr GC_push_one
  56. move.l d6,(sp)
  57. jsr GC_push_one
  58. move.l d7,(sp)
  59. jsr GC_push_one
  60. add.w #4,sp // fix stack.
  61. rts
  62. }
  63. #endif /* __MWERKS__ */
  64. # if defined(SPARC) || defined(IA64)
  65. /* Value returned from register flushing routine; either sp (SPARC) */
  66. /* or ar.bsp (IA64) */
  67. word GC_save_regs_ret_val;
  68. # endif
  69. /* Routine to mark from registers that are preserved by the C compiler. */
  70. /* This must be ported to every new architecture. There is a generic */
  71. /* version at the end, that is likely, but not guaranteed to work */
  72. /* on your architecture. Run the test_setjmp program to see whether */
  73. /* there is any chance it will work. */
  74. #if !defined(USE_GENERIC_PUSH_REGS) && !defined(USE_ASM_PUSH_REGS)
  75. #undef HAVE_PUSH_REGS
  76. void GC_push_regs()
  77. {
  78. # ifdef RT
  79. register long TMP_SP; /* must be bound to r11 */
  80. # endif
  81. # ifdef VAX
  82. /* VAX - generic code below does not work under 4.2 */
  83. /* r1 through r5 are caller save, and therefore */
  84. /* on the stack or dead. */
  85. asm("pushl r11"); asm("calls $1,_GC_push_one");
  86. asm("pushl r10"); asm("calls $1,_GC_push_one");
  87. asm("pushl r9"); asm("calls $1,_GC_push_one");
  88. asm("pushl r8"); asm("calls $1,_GC_push_one");
  89. asm("pushl r7"); asm("calls $1,_GC_push_one");
  90. asm("pushl r6"); asm("calls $1,_GC_push_one");
  91. # define HAVE_PUSH_REGS
  92. # endif
  93. # if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
  94. /* M68K SUNOS - could be replaced by generic code */
  95. /* a0, a1 and d1 are caller save */
  96. /* and therefore are on stack or dead. */
  97. asm("subqw #0x4,sp"); /* allocate word on top of stack */
  98. asm("movl a2,sp@"); asm("jbsr _GC_push_one");
  99. asm("movl a3,sp@"); asm("jbsr _GC_push_one");
  100. asm("movl a4,sp@"); asm("jbsr _GC_push_one");
  101. asm("movl a5,sp@"); asm("jbsr _GC_push_one");
  102. /* Skip frame pointer and stack pointer */
  103. asm("movl d1,sp@"); asm("jbsr _GC_push_one");
  104. asm("movl d2,sp@"); asm("jbsr _GC_push_one");
  105. asm("movl d3,sp@"); asm("jbsr _GC_push_one");
  106. asm("movl d4,sp@"); asm("jbsr _GC_push_one");
  107. asm("movl d5,sp@"); asm("jbsr _GC_push_one");
  108. asm("movl d6,sp@"); asm("jbsr _GC_push_one");
  109. asm("movl d7,sp@"); asm("jbsr _GC_push_one");
  110. asm("addqw #0x4,sp"); /* put stack back where it was */
  111. # define HAVE_PUSH_REGS
  112. # endif
  113. # if defined(M68K) && defined(HP)
  114. /* M68K HP - could be replaced by generic code */
  115. /* a0, a1 and d1 are caller save. */
  116. asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
  117. asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
  118. asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
  119. asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
  120. asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
  121. /* Skip frame pointer and stack pointer */
  122. asm("mov.l %d1,(%sp)"); asm("jsr _GC_push_one");
  123. asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
  124. asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
  125. asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
  126. asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
  127. asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
  128. asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
  129. asm("addq.w &0x4,%sp"); /* put stack back where it was */
  130. # define HAVE_PUSH_REGS
  131. # endif /* M68K HP */
  132. # if defined(M68K) && defined(AMIGA)
  133. /* AMIGA - could be replaced by generic code */
  134. /* a0, a1, d0 and d1 are caller save */
  135. # ifdef __GNUC__
  136. asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
  137. asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
  138. asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
  139. asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
  140. asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
  141. asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
  142. /* Skip frame pointer and stack pointer */
  143. asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
  144. asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
  145. asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
  146. asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
  147. asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
  148. asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
  149. asm("addq.w &0x4,%sp"); /* put stack back where it was */
  150. # define HAVE_PUSH_REGS
  151. # else /* !__GNUC__ */
  152. GC_push_one(getreg(REG_A2));
  153. GC_push_one(getreg(REG_A3));
  154. # ifndef __SASC
  155. /* Can probably be changed to #if 0 -Kjetil M. (a4=globals)*/
  156. GC_push_one(getreg(REG_A4));
  157. # endif
  158. GC_push_one(getreg(REG_A5));
  159. GC_push_one(getreg(REG_A6));
  160. /* Skip stack pointer */
  161. GC_push_one(getreg(REG_D2));
  162. GC_push_one(getreg(REG_D3));
  163. GC_push_one(getreg(REG_D4));
  164. GC_push_one(getreg(REG_D5));
  165. GC_push_one(getreg(REG_D6));
  166. GC_push_one(getreg(REG_D7));
  167. # define HAVE_PUSH_REGS
  168. # endif /* !__GNUC__ */
  169. # endif /* AMIGA */
  170. # if defined(M68K) && defined(MACOS)
  171. # if defined(THINK_C)
  172. # define PushMacReg(reg) \
  173. move.l reg,(sp) \
  174. jsr GC_push_one
  175. asm {
  176. sub.w #4,sp ; reserve space for one parameter.
  177. PushMacReg(a2);
  178. PushMacReg(a3);
  179. PushMacReg(a4);
  180. ; skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
  181. PushMacReg(d2);
  182. PushMacReg(d3);
  183. PushMacReg(d4);
  184. PushMacReg(d5);
  185. PushMacReg(d6);
  186. PushMacReg(d7);
  187. add.w #4,sp ; fix stack.
  188. }
  189. # define HAVE_PUSH_REGS
  190. # undef PushMacReg
  191. # endif /* THINK_C */
  192. # if defined(__MWERKS__)
  193. PushMacRegisters();
  194. # define HAVE_PUSH_REGS
  195. # endif /* __MWERKS__ */
  196. # endif /* MACOS */
  197. # if defined(I386) &&!defined(OS2) &&!defined(SVR4) \
  198. && (defined(__MINGW32__) || !defined(MSWIN32)) \
  199. && !defined(SCO) && !defined(SCO_ELF) \
  200. && !(defined(LINUX) && defined(__ELF__)) \
  201. && !(defined(FREEBSD) && defined(__ELF__)) \
  202. && !(defined(NETBSD) && defined(__ELF__)) \
  203. && !(defined(OPENBSD) && defined(__ELF__)) \
  204. && !(defined(BEOS) && defined(__ELF__)) \
  205. && !defined(DOS4GW) && !defined(HURD)
  206. /* I386 code, generic code does not appear to work */
  207. /* It does appear to work under OS2, and asms dont */
  208. /* This is used for some 38g UNIX variants and for CYGWIN32 */
  209. asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
  210. asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
  211. asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
  212. asm("pushl %ebp"); asm("call _GC_push_one"); asm("addl $4,%esp");
  213. asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
  214. asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
  215. asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
  216. # define HAVE_PUSH_REGS
  217. # endif
  218. # if ( defined(I386) && defined(LINUX) && defined(__ELF__) ) \
  219. || ( defined(I386) && defined(FREEBSD) && defined(__ELF__) ) \
  220. || ( defined(I386) && defined(NETBSD) && defined(__ELF__) ) \
  221. || ( defined(I386) && defined(OPENBSD) && defined(__ELF__) ) \
  222. || ( defined(I386) && defined(HURD) && defined(__ELF__) ) \
  223. || ( defined(I386) && defined(DGUX) )
  224. /* This is modified for Linux with ELF (Note: _ELF_ only) */
  225. /* This section handles FreeBSD with ELF. */
  226. /* Eax is caller-save and dead here. Other caller-save */
  227. /* registers could also be skipped. We assume there are no */
  228. /* pointers in MMX registers, etc. */
  229. /* We combine instructions in a single asm to prevent gcc from */
  230. /* inserting code in the middle. */
  231. asm("pushl %ecx; call GC_push_one; addl $4,%esp");
  232. asm("pushl %edx; call GC_push_one; addl $4,%esp");
  233. asm("pushl %ebp; call GC_push_one; addl $4,%esp");
  234. asm("pushl %esi; call GC_push_one; addl $4,%esp");
  235. asm("pushl %edi; call GC_push_one; addl $4,%esp");
  236. asm("pushl %ebx; call GC_push_one; addl $4,%esp");
  237. # define HAVE_PUSH_REGS
  238. # endif
  239. # if ( defined(I386) && defined(BEOS) && defined(__ELF__) )
  240. /* As far as I can understand from */
  241. /* http://www.beunited.org/articles/jbq/nasm.shtml, */
  242. /* only ebp, esi, edi and ebx are not scratch. How MMX */
  243. /* etc. registers should be treated, I have no idea. */
  244. asm("pushl %ebp; call GC_push_one; addl $4,%esp");
  245. asm("pushl %esi; call GC_push_one; addl $4,%esp");
  246. asm("pushl %edi; call GC_push_one; addl $4,%esp");
  247. asm("pushl %ebx; call GC_push_one; addl $4,%esp");
  248. # define HAVE_PUSH_REGS
  249. # endif
  250. # if defined(I386) && defined(MSWIN32) && !defined(__MINGW32__) \
  251. && !defined(USE_GENERIC)
  252. /* I386 code, Microsoft variant */
  253. __asm push eax
  254. __asm call GC_push_one
  255. __asm add esp,4
  256. __asm push ebx
  257. __asm call GC_push_one
  258. __asm add esp,4
  259. __asm push ecx
  260. __asm call GC_push_one
  261. __asm add esp,4
  262. __asm push edx
  263. __asm call GC_push_one
  264. __asm add esp,4
  265. __asm push ebp
  266. __asm call GC_push_one
  267. __asm add esp,4
  268. __asm push esi
  269. __asm call GC_push_one
  270. __asm add esp,4
  271. __asm push edi
  272. __asm call GC_push_one
  273. __asm add esp,4
  274. # define HAVE_PUSH_REGS
  275. # endif
  276. # if defined(I386) && (defined(SVR4) || defined(SCO) || defined(SCO_ELF))
  277. /* I386 code, SVR4 variant, generic code does not appear to work */
  278. asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
  279. asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
  280. asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
  281. asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
  282. asm("pushl %ebp"); asm("call GC_push_one"); asm("addl $4,%esp");
  283. asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
  284. asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
  285. # define HAVE_PUSH_REGS
  286. # endif
  287. # ifdef NS32K
  288. asm ("movd r3, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
  289. asm ("movd r4, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
  290. asm ("movd r5, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
  291. asm ("movd r6, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
  292. asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
  293. # define HAVE_PUSH_REGS
  294. # endif
  295. # if defined(SPARC)
  296. GC_save_regs_ret_val = GC_save_regs_in_stack();
  297. # define HAVE_PUSH_REGS
  298. # endif
  299. # ifdef RT
  300. GC_push_one(TMP_SP); /* GC_push_one from r11 */
  301. asm("cas r11, r6, r0"); GC_push_one(TMP_SP); /* r6 */
  302. asm("cas r11, r7, r0"); GC_push_one(TMP_SP); /* through */
  303. asm("cas r11, r8, r0"); GC_push_one(TMP_SP); /* r10 */
  304. asm("cas r11, r9, r0"); GC_push_one(TMP_SP);
  305. asm("cas r11, r10, r0"); GC_push_one(TMP_SP);
  306. asm("cas r11, r12, r0"); GC_push_one(TMP_SP); /* r12 */
  307. asm("cas r11, r13, r0"); GC_push_one(TMP_SP); /* through */
  308. asm("cas r11, r14, r0"); GC_push_one(TMP_SP); /* r15 */
  309. asm("cas r11, r15, r0"); GC_push_one(TMP_SP);
  310. # define HAVE_PUSH_REGS
  311. # endif
  312. # if defined(M68K) && defined(SYSV)
  313. /* Once again similar to SUN and HP, though setjmp appears to work.
  314. --Parag
  315. */
  316. # ifdef __GNUC__
  317. asm("subqw #0x4,%sp"); /* allocate word on top of stack */
  318. asm("movl %a2,%sp@"); asm("jbsr GC_push_one");
  319. asm("movl %a3,%sp@"); asm("jbsr GC_push_one");
  320. asm("movl %a4,%sp@"); asm("jbsr GC_push_one");
  321. asm("movl %a5,%sp@"); asm("jbsr GC_push_one");
  322. /* Skip frame pointer and stack pointer */
  323. asm("movl %d1,%sp@"); asm("jbsr GC_push_one");
  324. asm("movl %d2,%sp@"); asm("jbsr GC_push_one");
  325. asm("movl %d3,%sp@"); asm("jbsr GC_push_one");
  326. asm("movl %d4,%sp@"); asm("jbsr GC_push_one");
  327. asm("movl %d5,%sp@"); asm("jbsr GC_push_one");
  328. asm("movl %d6,%sp@"); asm("jbsr GC_push_one");
  329. asm("movl %d7,%sp@"); asm("jbsr GC_push_one");
  330. asm("addqw #0x4,%sp"); /* put stack back where it was */
  331. # define HAVE_PUSH_REGS
  332. # else /* !__GNUC__*/
  333. asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
  334. asm("mov.l %a2,(%sp)"); asm("jsr GC_push_one");
  335. asm("mov.l %a3,(%sp)"); asm("jsr GC_push_one");
  336. asm("mov.l %a4,(%sp)"); asm("jsr GC_push_one");
  337. asm("mov.l %a5,(%sp)"); asm("jsr GC_push_one");
  338. /* Skip frame pointer and stack pointer */
  339. asm("mov.l %d1,(%sp)"); asm("jsr GC_push_one");
  340. asm("mov.l %d2,(%sp)"); asm("jsr GC_push_one");
  341. asm("mov.l %d3,(%sp)"); asm("jsr GC_push_one");
  342. asm("mov.l %d4,(%sp)"); asm("jsr GC_push_one");
  343. asm("mov.l %d5,(%sp)"); asm("jsr GC_push_one");
  344. asm("mov.l %d6,(%sp)"); asm("jsr GC_push_one");
  345. asm("mov.l %d7,(%sp)"); asm("jsr GC_push_one");
  346. asm("addq.w &0x4,%sp"); /* put stack back where it was */
  347. # define HAVE_PUSH_REGS
  348. # endif /* !__GNUC__ */
  349. # endif /* M68K/SYSV */
  350. # if defined(PJ)
  351. {
  352. register int * sp asm ("optop");
  353. extern int *__libc_stack_end;
  354. GC_push_all_stack (sp, __libc_stack_end);
  355. # define HAVE_PUSH_REGS
  356. /* Isn't this redundant with the code to push the stack? */
  357. }
  358. # endif
  359. /* other machines... */
  360. # if !defined(HAVE_PUSH_REGS)
  361. --> We just generated an empty GC_push_regs, which
  362. --> is almost certainly broken. Try defining
  363. --> USE_GENERIC_PUSH_REGS instead.
  364. # endif
  365. }
  366. #endif /* !USE_GENERIC_PUSH_REGS && !USE_ASM_PUSH_REGS */
  367. void GC_with_callee_saves_pushed(fn, arg)
  368. void (*fn)();
  369. ptr_t arg;
  370. {
  371. word dummy;
  372. # if defined(USE_GENERIC_PUSH_REGS)
  373. # ifdef HAVE_BUILTIN_UNWIND_INIT
  374. /* This was suggested by Richard Henderson as the way to */
  375. /* force callee-save registers and register windows onto */
  376. /* the stack. */
  377. __builtin_unwind_init();
  378. # else /* !HAVE_BUILTIN_UNWIND_INIT */
  379. # if defined(RS6000) || defined(POWERPC)
  380. /* FIXME: RS6000 means AIX. */
  381. /* This should probably be used in all Posix/non-gcc */
  382. /* settings. We defer that change to minimize risk. */
  383. ucontext_t ctxt;
  384. getcontext(&ctxt);
  385. # else
  386. /* Generic code */
  387. /* The idea is due to Parag Patel at HP. */
  388. /* We're not sure whether he would like */
  389. /* to be he acknowledged for it or not. */
  390. jmp_buf regs;
  391. register word * i = (word *) regs;
  392. register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
  393. /* Setjmp doesn't always clear all of the buffer. */
  394. /* That tends to preserve garbage. Clear it. */
  395. for (; (char *)i < lim; i++) {
  396. *i = 0;
  397. }
  398. # if defined(MSWIN32) || defined(MSWINCE) \
  399. || defined(UTS4) || defined(LINUX) || defined(EWS4800)
  400. (void) setjmp(regs);
  401. # else
  402. (void) _setjmp(regs);
  403. /* We don't want to mess with signals. According to */
  404. /* SUSV3, setjmp() may or may not save signal mask. */
  405. /* _setjmp won't, but is less portable. */
  406. # endif
  407. # endif /* !AIX ... */
  408. # endif /* !HAVE_BUILTIN_UNWIND_INIT */
  409. # else
  410. # if defined(PTHREADS) && !defined(MSWIN32) /* !USE_GENERIC_PUSH_REGS */
  411. /* We may still need this to save thread contexts. */
  412. ucontext_t ctxt;
  413. getcontext(&ctxt);
  414. # else /* Shouldn't be needed */
  415. ABORT("Unexpected call to GC_with_callee_saves_pushed");
  416. # endif
  417. # endif
  418. # if (defined(SPARC) && !defined(HAVE_BUILTIN_UNWIND_INIT)) \
  419. || defined(IA64)
  420. /* On a register window machine, we need to save register */
  421. /* contents on the stack for this to work. The setjmp */
  422. /* is probably not needed on SPARC, since pointers are */
  423. /* only stored in windowed or scratch registers. It is */
  424. /* needed on IA64, since some non-windowed registers are */
  425. /* preserved. */
  426. {
  427. GC_save_regs_ret_val = GC_save_regs_in_stack();
  428. /* On IA64 gcc, could use __builtin_ia64_flushrs() and */
  429. /* __builtin_ia64_flushrs(). The latter will be done */
  430. /* implicitly by __builtin_unwind_init() for gcc3.0.1 */
  431. /* and later. */
  432. }
  433. # endif
  434. fn(arg);
  435. /* Strongly discourage the compiler from treating the above */
  436. /* as a tail-call, since that would pop the register */
  437. /* contents before we get a chance to look at them. */
  438. GC_noop1((word)(&dummy));
  439. }
  440. #if defined(USE_GENERIC_PUSH_REGS)
  441. void GC_generic_push_regs(cold_gc_frame)
  442. ptr_t cold_gc_frame;
  443. {
  444. GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
  445. }
  446. #endif /* USE_GENERIC_PUSH_REGS */
  447. /* On register window machines, we need a way to force registers into */
  448. /* the stack. Return sp. */
  449. # ifdef SPARC
  450. asm(" .seg \"text\"");
  451. # if defined(SVR4) || defined(NETBSD) || defined(FREEBSD)
  452. asm(" .globl GC_save_regs_in_stack");
  453. asm("GC_save_regs_in_stack:");
  454. asm(" .type GC_save_regs_in_stack,#function");
  455. # else
  456. asm(" .globl _GC_save_regs_in_stack");
  457. asm("_GC_save_regs_in_stack:");
  458. # endif
  459. # if defined(__arch64__) || defined(__sparcv9)
  460. asm(" save %sp,-128,%sp");
  461. asm(" flushw");
  462. asm(" ret");
  463. asm(" restore %sp,2047+128,%o0");
  464. # else
  465. asm(" ta 0x3 ! ST_FLUSH_WINDOWS");
  466. asm(" retl");
  467. asm(" mov %sp,%o0");
  468. # endif
  469. # ifdef SVR4
  470. asm(" .GC_save_regs_in_stack_end:");
  471. asm(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
  472. # endif
  473. # ifdef LINT
  474. word GC_save_regs_in_stack() { return(0 /* sp really */);}
  475. # endif
  476. # endif
  477. /* On IA64, we also need to flush register windows. But they end */
  478. /* up on the other side of the stack segment. */
  479. /* Returns the backing store pointer for the register stack. */
  480. /* We now implement this as a separate assembly file, since inline */
  481. /* assembly code here doesn't work with either the Intel or HP */
  482. /* compilers. */
  483. # if 0
  484. # ifdef LINUX
  485. asm(" .text");
  486. asm(" .psr abi64");
  487. asm(" .psr lsb");
  488. asm(" .lsb");
  489. asm("");
  490. asm(" .text");
  491. asm(" .align 16");
  492. asm(" .global GC_save_regs_in_stack");
  493. asm(" .proc GC_save_regs_in_stack");
  494. asm("GC_save_regs_in_stack:");
  495. asm(" .body");
  496. asm(" flushrs");
  497. asm(" ;;");
  498. asm(" mov r8=ar.bsp");
  499. asm(" br.ret.sptk.few rp");
  500. asm(" .endp GC_save_regs_in_stack");
  501. # endif /* LINUX */
  502. # if 0 /* Other alternatives that don't work on HP/UX */
  503. word GC_save_regs_in_stack() {
  504. # if USE_BUILTINS
  505. __builtin_ia64_flushrs();
  506. return __builtin_ia64_bsp();
  507. # else
  508. # ifdef HPUX
  509. _asm(" flushrs");
  510. _asm(" ;;");
  511. _asm(" mov r8=ar.bsp");
  512. _asm(" br.ret.sptk.few rp");
  513. # else
  514. asm(" flushrs");
  515. asm(" ;;");
  516. asm(" mov r8=ar.bsp");
  517. asm(" br.ret.sptk.few rp");
  518. # endif
  519. # endif
  520. }
  521. # endif
  522. # endif
  523. /* GC_clear_stack_inner(arg, limit) clears stack area up to limit and */
  524. /* returns arg. Stack clearing is crucial on SPARC, so we supply */
  525. /* an assembly version that's more careful. Assumes limit is hotter */
  526. /* than sp, and limit is 8 byte aligned. */
  527. #if defined(ASM_CLEAR_CODE)
  528. #ifndef SPARC
  529. --> fix it
  530. #endif
  531. # ifdef SUNOS4
  532. asm(".globl _GC_clear_stack_inner");
  533. asm("_GC_clear_stack_inner:");
  534. # else
  535. asm(".globl GC_clear_stack_inner");
  536. asm("GC_clear_stack_inner:");
  537. asm(".type GC_save_regs_in_stack,#function");
  538. # endif
  539. #if defined(__arch64__) || defined(__sparcv9)
  540. asm("mov %sp,%o2"); /* Save sp */
  541. asm("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
  542. asm("add %o1,-2047-192,%sp"); /* Move sp out of the way, */
  543. /* so that traps still work. */
  544. /* Includes some extra words */
  545. /* so we can be sloppy below. */
  546. asm("loop:");
  547. asm("stx %g0,[%o3]"); /* *(long *)p = 0 */
  548. asm("cmp %o3,%o1");
  549. asm("bgu,pt %xcc, loop"); /* if (p > limit) goto loop */
  550. asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
  551. asm("retl");
  552. asm("mov %o2,%sp"); /* Restore sp., delay slot */
  553. #else
  554. asm("mov %sp,%o2"); /* Save sp */
  555. asm("add %sp,-8,%o3"); /* p = sp-8 */
  556. asm("clr %g1"); /* [g0,g1] = 0 */
  557. asm("add %o1,-0x60,%sp"); /* Move sp out of the way, */
  558. /* so that traps still work. */
  559. /* Includes some extra words */
  560. /* so we can be sloppy below. */
  561. asm("loop:");
  562. asm("std %g0,[%o3]"); /* *(long long *)p = 0 */
  563. asm("cmp %o3,%o1");
  564. asm("bgu loop "); /* if (p > limit) goto loop */
  565. asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
  566. asm("retl");
  567. asm("mov %o2,%sp"); /* Restore sp., delay slot */
  568. #endif /* old SPARC */
  569. /* First argument = %o0 = return value */
  570. # ifdef SVR4
  571. asm(" .GC_clear_stack_inner_end:");
  572. asm(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
  573. # endif
  574. # ifdef LINT
  575. /*ARGSUSED*/
  576. ptr_t GC_clear_stack_inner(arg, limit)
  577. ptr_t arg; word limit;
  578. { return(arg); }
  579. # endif
  580. #endif