entry_32.S 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. /*
  2. * Copyright (C) 1991,1992 Linus Torvalds
  3. *
  4. * entry_32.S contains the system-call and low-level fault and trap handling routines.
  5. *
  6. * Stack layout in 'syscall_exit':
  7. * ptrace needs to have all registers on the stack.
  8. * If the order here is changed, it needs to be
  9. * updated in fork.c:copy_process(), signal.c:do_signal(),
  10. * ptrace.c and ptrace.h
  11. *
  12. * 0(%esp) - %ebx
  13. * 4(%esp) - %ecx
  14. * 8(%esp) - %edx
  15. * C(%esp) - %esi
  16. * 10(%esp) - %edi
  17. * 14(%esp) - %ebp
  18. * 18(%esp) - %eax
  19. * 1C(%esp) - %ds
  20. * 20(%esp) - %es
  21. * 24(%esp) - %fs
  22. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  23. * 2C(%esp) - orig_eax
  24. * 30(%esp) - %eip
  25. * 34(%esp) - %cs
  26. * 38(%esp) - %eflags
  27. * 3C(%esp) - %oldesp
  28. * 40(%esp) - %oldss
  29. */
  30. #include <linux/linkage.h>
  31. #include <linux/err.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/irqflags.h>
  34. #include <asm/errno.h>
  35. #include <asm/segment.h>
  36. #include <asm/smp.h>
  37. #include <asm/page_types.h>
  38. #include <asm/percpu.h>
  39. #include <asm/processor-flags.h>
  40. #include <asm/ftrace.h>
  41. #include <asm/irq_vectors.h>
  42. #include <asm/cpufeature.h>
  43. #include <asm/alternative-asm.h>
  44. #include <asm/asm.h>
  45. #include <asm/smap.h>
  46. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  47. #include <linux/elf-em.h>
  48. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  49. #define __AUDIT_ARCH_LE 0x40000000
  50. #ifndef CONFIG_AUDITSYSCALL
  51. # define sysenter_audit syscall_trace_entry
  52. # define sysexit_audit syscall_exit_work
  53. #endif
  54. .section .entry.text, "ax"
  55. /*
  56. * We use macros for low-level operations which need to be overridden
  57. * for paravirtualization. The following will never clobber any registers:
  58. * INTERRUPT_RETURN (aka. "iret")
  59. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  60. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  61. *
  62. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  63. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  64. * Allowing a register to be clobbered can shrink the paravirt replacement
  65. * enough to patch inline, increasing performance.
  66. */
  67. #ifdef CONFIG_PREEMPT
  68. # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  69. #else
  70. # define preempt_stop(clobbers)
  71. # define resume_kernel restore_all
  72. #endif
  73. .macro TRACE_IRQS_IRET
  74. #ifdef CONFIG_TRACE_IRQFLAGS
  75. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
  76. jz 1f
  77. TRACE_IRQS_ON
  78. 1:
  79. #endif
  80. .endm
  81. /*
  82. * User gs save/restore
  83. *
  84. * %gs is used for userland TLS and kernel only uses it for stack
  85. * canary which is required to be at %gs:20 by gcc. Read the comment
  86. * at the top of stackprotector.h for more info.
  87. *
  88. * Local labels 98 and 99 are used.
  89. */
  90. #ifdef CONFIG_X86_32_LAZY_GS
  91. /* unfortunately push/pop can't be no-op */
  92. .macro PUSH_GS
  93. pushl $0
  94. .endm
  95. .macro POP_GS pop=0
  96. addl $(4 + \pop), %esp
  97. .endm
  98. .macro POP_GS_EX
  99. .endm
  100. /* all the rest are no-op */
  101. .macro PTGS_TO_GS
  102. .endm
  103. .macro PTGS_TO_GS_EX
  104. .endm
  105. .macro GS_TO_REG reg
  106. .endm
  107. .macro REG_TO_PTGS reg
  108. .endm
  109. .macro SET_KERNEL_GS reg
  110. .endm
  111. #else /* CONFIG_X86_32_LAZY_GS */
  112. .macro PUSH_GS
  113. pushl %gs
  114. .endm
  115. .macro POP_GS pop=0
  116. 98: popl %gs
  117. .if \pop <> 0
  118. add $\pop, %esp
  119. .endif
  120. .endm
  121. .macro POP_GS_EX
  122. .pushsection .fixup, "ax"
  123. 99: movl $0, (%esp)
  124. jmp 98b
  125. .popsection
  126. _ASM_EXTABLE(98b, 99b)
  127. .endm
  128. .macro PTGS_TO_GS
  129. 98: mov PT_GS(%esp), %gs
  130. .endm
  131. .macro PTGS_TO_GS_EX
  132. .pushsection .fixup, "ax"
  133. 99: movl $0, PT_GS(%esp)
  134. jmp 98b
  135. .popsection
  136. _ASM_EXTABLE(98b, 99b)
  137. .endm
  138. .macro GS_TO_REG reg
  139. movl %gs, \reg
  140. .endm
  141. .macro REG_TO_PTGS reg
  142. movl \reg, PT_GS(%esp)
  143. .endm
  144. .macro SET_KERNEL_GS reg
  145. movl $(__KERNEL_STACK_CANARY), \reg
  146. movl \reg, %gs
  147. .endm
  148. #endif /* CONFIG_X86_32_LAZY_GS */
  149. .macro SAVE_ALL
  150. cld
  151. PUSH_GS
  152. pushl %fs
  153. pushl %es
  154. pushl %ds
  155. pushl %eax
  156. pushl %ebp
  157. pushl %edi
  158. pushl %esi
  159. pushl %edx
  160. pushl %ecx
  161. pushl %ebx
  162. movl $(__USER_DS), %edx
  163. movl %edx, %ds
  164. movl %edx, %es
  165. movl $(__KERNEL_PERCPU), %edx
  166. movl %edx, %fs
  167. SET_KERNEL_GS %edx
  168. .endm
  169. .macro RESTORE_INT_REGS
  170. popl %ebx
  171. popl %ecx
  172. popl %edx
  173. popl %esi
  174. popl %edi
  175. popl %ebp
  176. popl %eax
  177. .endm
  178. .macro RESTORE_REGS pop=0
  179. RESTORE_INT_REGS
  180. 1: popl %ds
  181. 2: popl %es
  182. 3: popl %fs
  183. POP_GS \pop
  184. .pushsection .fixup, "ax"
  185. 4: movl $0, (%esp)
  186. jmp 1b
  187. 5: movl $0, (%esp)
  188. jmp 2b
  189. 6: movl $0, (%esp)
  190. jmp 3b
  191. .popsection
  192. _ASM_EXTABLE(1b, 4b)
  193. _ASM_EXTABLE(2b, 5b)
  194. _ASM_EXTABLE(3b, 6b)
  195. POP_GS_EX
  196. .endm
  197. ENTRY(ret_from_fork)
  198. pushl %eax
  199. call schedule_tail
  200. GET_THREAD_INFO(%ebp)
  201. popl %eax
  202. pushl $0x0202 # Reset kernel eflags
  203. popfl
  204. jmp syscall_exit
  205. END(ret_from_fork)
  206. ENTRY(ret_from_kernel_thread)
  207. pushl %eax
  208. call schedule_tail
  209. GET_THREAD_INFO(%ebp)
  210. popl %eax
  211. pushl $0x0202 # Reset kernel eflags
  212. popfl
  213. movl PT_EBP(%esp), %eax
  214. call *PT_EBX(%esp)
  215. movl $0, PT_EAX(%esp)
  216. jmp syscall_exit
  217. ENDPROC(ret_from_kernel_thread)
  218. /*
  219. * Return to user mode is not as complex as all this looks,
  220. * but we want the default path for a system call return to
  221. * go as quickly as possible which is why some of this is
  222. * less clear than it otherwise should be.
  223. */
  224. # userspace resumption stub bypassing syscall exit tracing
  225. ALIGN
  226. ret_from_exception:
  227. preempt_stop(CLBR_ANY)
  228. ret_from_intr:
  229. GET_THREAD_INFO(%ebp)
  230. #ifdef CONFIG_VM86
  231. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  232. movb PT_CS(%esp), %al
  233. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  234. #else
  235. /*
  236. * We can be coming here from child spawned by kernel_thread().
  237. */
  238. movl PT_CS(%esp), %eax
  239. andl $SEGMENT_RPL_MASK, %eax
  240. #endif
  241. cmpl $USER_RPL, %eax
  242. jb resume_kernel # not returning to v8086 or userspace
  243. ENTRY(resume_userspace)
  244. LOCKDEP_SYS_EXIT
  245. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  246. # setting need_resched or sigpending
  247. # between sampling and the iret
  248. TRACE_IRQS_OFF
  249. movl TI_flags(%ebp), %ecx
  250. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  251. # int/exception return?
  252. jne work_pending
  253. jmp restore_all
  254. END(ret_from_exception)
  255. #ifdef CONFIG_PREEMPT
  256. ENTRY(resume_kernel)
  257. DISABLE_INTERRUPTS(CLBR_ANY)
  258. need_resched:
  259. cmpl $0, PER_CPU_VAR(__preempt_count)
  260. jnz restore_all
  261. testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
  262. jz restore_all
  263. call preempt_schedule_irq
  264. jmp need_resched
  265. END(resume_kernel)
  266. #endif
  267. /*
  268. * SYSENTER_RETURN points to after the SYSENTER instruction
  269. * in the vsyscall page. See vsyscall-sysentry.S, which defines
  270. * the symbol.
  271. */
  272. # SYSENTER call handler stub
  273. ENTRY(entry_SYSENTER_32)
  274. movl TSS_sysenter_sp0(%esp), %esp
  275. sysenter_past_esp:
  276. /*
  277. * Interrupts are disabled here, but we can't trace it until
  278. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  279. * we immediately enable interrupts at that point anyway.
  280. */
  281. pushl $__USER_DS
  282. pushl %ebp
  283. pushfl
  284. orl $X86_EFLAGS_IF, (%esp)
  285. pushl $__USER_CS
  286. /*
  287. * Push current_thread_info()->sysenter_return to the stack.
  288. * A tiny bit of offset fixup is necessary: TI_sysenter_return
  289. * is relative to thread_info, which is at the bottom of the
  290. * kernel stack page. 4*4 means the 4 words pushed above;
  291. * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
  292. * and THREAD_SIZE takes us to the bottom.
  293. */
  294. pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
  295. pushl %eax
  296. SAVE_ALL
  297. ENABLE_INTERRUPTS(CLBR_NONE)
  298. /*
  299. * Load the potential sixth argument from user stack.
  300. * Careful about security.
  301. */
  302. cmpl $__PAGE_OFFSET-3, %ebp
  303. jae syscall_fault
  304. ASM_STAC
  305. 1: movl (%ebp), %ebp
  306. ASM_CLAC
  307. movl %ebp, PT_EBP(%esp)
  308. _ASM_EXTABLE(1b, syscall_fault)
  309. GET_THREAD_INFO(%ebp)
  310. testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
  311. jnz sysenter_audit
  312. sysenter_do_call:
  313. cmpl $(NR_syscalls), %eax
  314. jae sysenter_badsys
  315. call *sys_call_table(, %eax, 4)
  316. sysenter_after_call:
  317. movl %eax, PT_EAX(%esp)
  318. LOCKDEP_SYS_EXIT
  319. DISABLE_INTERRUPTS(CLBR_ANY)
  320. TRACE_IRQS_OFF
  321. movl TI_flags(%ebp), %ecx
  322. testl $_TIF_ALLWORK_MASK, %ecx
  323. jnz sysexit_audit
  324. sysenter_exit:
  325. /* if something modifies registers it must also disable sysexit */
  326. movl PT_EIP(%esp), %edx
  327. movl PT_OLDESP(%esp), %ecx
  328. xorl %ebp, %ebp
  329. TRACE_IRQS_ON
  330. 1: mov PT_FS(%esp), %fs
  331. PTGS_TO_GS
  332. ENABLE_INTERRUPTS_SYSEXIT
  333. #ifdef CONFIG_AUDITSYSCALL
  334. sysenter_audit:
  335. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
  336. jnz syscall_trace_entry
  337. /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
  338. movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
  339. /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
  340. pushl PT_ESI(%esp) /* a3: 5th arg */
  341. pushl PT_EDX+4(%esp) /* a2: 4th arg */
  342. call __audit_syscall_entry
  343. popl %ecx /* get that remapped edx off the stack */
  344. popl %ecx /* get that remapped esi off the stack */
  345. movl PT_EAX(%esp), %eax /* reload syscall number */
  346. jmp sysenter_do_call
  347. sysexit_audit:
  348. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  349. jnz syscall_exit_work
  350. TRACE_IRQS_ON
  351. ENABLE_INTERRUPTS(CLBR_ANY)
  352. movl %eax, %edx /* second arg, syscall return value */
  353. cmpl $-MAX_ERRNO, %eax /* is it an error ? */
  354. setbe %al /* 1 if so, 0 if not */
  355. movzbl %al, %eax /* zero-extend that */
  356. call __audit_syscall_exit
  357. DISABLE_INTERRUPTS(CLBR_ANY)
  358. TRACE_IRQS_OFF
  359. movl TI_flags(%ebp), %ecx
  360. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  361. jnz syscall_exit_work
  362. movl PT_EAX(%esp), %eax /* reload syscall return value */
  363. jmp sysenter_exit
  364. #endif
  365. .pushsection .fixup, "ax"
  366. 2: movl $0, PT_FS(%esp)
  367. jmp 1b
  368. .popsection
  369. _ASM_EXTABLE(1b, 2b)
  370. PTGS_TO_GS_EX
  371. ENDPROC(entry_SYSENTER_32)
  372. # system call handler stub
  373. ENTRY(entry_INT80_32)
  374. ASM_CLAC
  375. pushl %eax # save orig_eax
  376. SAVE_ALL
  377. GET_THREAD_INFO(%ebp)
  378. # system call tracing in operation / emulation
  379. testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
  380. jnz syscall_trace_entry
  381. cmpl $(NR_syscalls), %eax
  382. jae syscall_badsys
  383. syscall_call:
  384. call *sys_call_table(, %eax, 4)
  385. syscall_after_call:
  386. movl %eax, PT_EAX(%esp) # store the return value
  387. syscall_exit:
  388. LOCKDEP_SYS_EXIT
  389. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  390. # setting need_resched or sigpending
  391. # between sampling and the iret
  392. TRACE_IRQS_OFF
  393. movl TI_flags(%ebp), %ecx
  394. testl $_TIF_ALLWORK_MASK, %ecx # current->work
  395. jnz syscall_exit_work
  396. restore_all:
  397. TRACE_IRQS_IRET
  398. restore_all_notrace:
  399. #ifdef CONFIG_X86_ESPFIX32
  400. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  401. /*
  402. * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  403. * are returning to the kernel.
  404. * See comments in process.c:copy_thread() for details.
  405. */
  406. movb PT_OLDSS(%esp), %ah
  407. movb PT_CS(%esp), %al
  408. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  409. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  410. je ldt_ss # returning to user-space with LDT SS
  411. #endif
  412. restore_nocheck:
  413. RESTORE_REGS 4 # skip orig_eax/error_code
  414. irq_return:
  415. INTERRUPT_RETURN
  416. .section .fixup, "ax"
  417. ENTRY(iret_exc )
  418. pushl $0 # no error code
  419. pushl $do_iret_error
  420. jmp error_code
  421. .previous
  422. _ASM_EXTABLE(irq_return, iret_exc)
  423. #ifdef CONFIG_X86_ESPFIX32
  424. ldt_ss:
  425. #ifdef CONFIG_PARAVIRT
  426. /*
  427. * The kernel can't run on a non-flat stack if paravirt mode
  428. * is active. Rather than try to fixup the high bits of
  429. * ESP, bypass this code entirely. This may break DOSemu
  430. * and/or Wine support in a paravirt VM, although the option
  431. * is still available to implement the setting of the high
  432. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  433. */
  434. cmpl $0, pv_info+PARAVIRT_enabled
  435. jne restore_nocheck
  436. #endif
  437. /*
  438. * Setup and switch to ESPFIX stack
  439. *
  440. * We're returning to userspace with a 16 bit stack. The CPU will not
  441. * restore the high word of ESP for us on executing iret... This is an
  442. * "official" bug of all the x86-compatible CPUs, which we can work
  443. * around to make dosemu and wine happy. We do this by preloading the
  444. * high word of ESP with the high word of the userspace ESP while
  445. * compensating for the offset by changing to the ESPFIX segment with
  446. * a base address that matches for the difference.
  447. */
  448. #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
  449. mov %esp, %edx /* load kernel esp */
  450. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  451. mov %dx, %ax /* eax: new kernel esp */
  452. sub %eax, %edx /* offset (low word is 0) */
  453. shr $16, %edx
  454. mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
  455. mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
  456. pushl $__ESPFIX_SS
  457. pushl %eax /* new kernel esp */
  458. /*
  459. * Disable interrupts, but do not irqtrace this section: we
  460. * will soon execute iret and the tracer was already set to
  461. * the irqstate after the IRET:
  462. */
  463. DISABLE_INTERRUPTS(CLBR_EAX)
  464. lss (%esp), %esp /* switch to espfix segment */
  465. jmp restore_nocheck
  466. #endif
  467. ENDPROC(entry_INT80_32)
  468. # perform work that needs to be done immediately before resumption
  469. ALIGN
  470. work_pending:
  471. testb $_TIF_NEED_RESCHED, %cl
  472. jz work_notifysig
  473. work_resched:
  474. call schedule
  475. LOCKDEP_SYS_EXIT
  476. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  477. # setting need_resched or sigpending
  478. # between sampling and the iret
  479. TRACE_IRQS_OFF
  480. movl TI_flags(%ebp), %ecx
  481. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  482. # than syscall tracing?
  483. jz restore_all
  484. testb $_TIF_NEED_RESCHED, %cl
  485. jnz work_resched
  486. work_notifysig: # deal with pending signals and
  487. # notify-resume requests
  488. #ifdef CONFIG_VM86
  489. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  490. movl %esp, %eax
  491. jnz work_notifysig_v86 # returning to kernel-space or
  492. # vm86-space
  493. 1:
  494. #else
  495. movl %esp, %eax
  496. #endif
  497. TRACE_IRQS_ON
  498. ENABLE_INTERRUPTS(CLBR_NONE)
  499. movb PT_CS(%esp), %bl
  500. andb $SEGMENT_RPL_MASK, %bl
  501. cmpb $USER_RPL, %bl
  502. jb resume_kernel
  503. xorl %edx, %edx
  504. call do_notify_resume
  505. jmp resume_userspace
  506. #ifdef CONFIG_VM86
  507. ALIGN
  508. work_notifysig_v86:
  509. pushl %ecx # save ti_flags for do_notify_resume
  510. call save_v86_state # %eax contains pt_regs pointer
  511. popl %ecx
  512. movl %eax, %esp
  513. jmp 1b
  514. #endif
  515. END(work_pending)
  516. # perform syscall exit tracing
  517. ALIGN
  518. syscall_trace_entry:
  519. movl $-ENOSYS, PT_EAX(%esp)
  520. movl %esp, %eax
  521. call syscall_trace_enter
  522. /* What it returned is what we'll actually use. */
  523. cmpl $(NR_syscalls), %eax
  524. jnae syscall_call
  525. jmp syscall_exit
  526. END(syscall_trace_entry)
  527. # perform syscall exit tracing
  528. ALIGN
  529. syscall_exit_work:
  530. testl $_TIF_WORK_SYSCALL_EXIT, %ecx
  531. jz work_pending
  532. TRACE_IRQS_ON
  533. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  534. # schedule() instead
  535. movl %esp, %eax
  536. call syscall_trace_leave
  537. jmp resume_userspace
  538. END(syscall_exit_work)
  539. syscall_fault:
  540. ASM_CLAC
  541. GET_THREAD_INFO(%ebp)
  542. movl $-EFAULT, PT_EAX(%esp)
  543. jmp resume_userspace
  544. END(syscall_fault)
  545. syscall_badsys:
  546. movl $-ENOSYS, %eax
  547. jmp syscall_after_call
  548. END(syscall_badsys)
  549. sysenter_badsys:
  550. movl $-ENOSYS, %eax
  551. jmp sysenter_after_call
  552. END(sysenter_badsys)
  553. .macro FIXUP_ESPFIX_STACK
  554. /*
  555. * Switch back for ESPFIX stack to the normal zerobased stack
  556. *
  557. * We can't call C functions using the ESPFIX stack. This code reads
  558. * the high word of the segment base from the GDT and swiches to the
  559. * normal stack and adjusts ESP with the matching offset.
  560. */
  561. #ifdef CONFIG_X86_ESPFIX32
  562. /* fixup the stack */
  563. mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
  564. mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
  565. shl $16, %eax
  566. addl %esp, %eax /* the adjusted stack pointer */
  567. pushl $__KERNEL_DS
  568. pushl %eax
  569. lss (%esp), %esp /* switch to the normal stack segment */
  570. #endif
  571. .endm
  572. .macro UNWIND_ESPFIX_STACK
  573. #ifdef CONFIG_X86_ESPFIX32
  574. movl %ss, %eax
  575. /* see if on espfix stack */
  576. cmpw $__ESPFIX_SS, %ax
  577. jne 27f
  578. movl $__KERNEL_DS, %eax
  579. movl %eax, %ds
  580. movl %eax, %es
  581. /* switch to normal stack */
  582. FIXUP_ESPFIX_STACK
  583. 27:
  584. #endif
  585. .endm
  586. /*
  587. * Build the entry stubs with some assembler magic.
  588. * We pack 1 stub into every 8-byte block.
  589. */
  590. .align 8
  591. ENTRY(irq_entries_start)
  592. vector=FIRST_EXTERNAL_VECTOR
  593. .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
  594. pushl $(~vector+0x80) /* Note: always in signed byte range */
  595. vector=vector+1
  596. jmp common_interrupt
  597. .align 8
  598. .endr
  599. END(irq_entries_start)
  600. /*
  601. * the CPU automatically disables interrupts when executing an IRQ vector,
  602. * so IRQ-flags tracing has to follow that:
  603. */
  604. .p2align CONFIG_X86_L1_CACHE_SHIFT
  605. common_interrupt:
  606. ASM_CLAC
  607. addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
  608. SAVE_ALL
  609. TRACE_IRQS_OFF
  610. movl %esp, %eax
  611. call do_IRQ
  612. jmp ret_from_intr
  613. ENDPROC(common_interrupt)
  614. #define BUILD_INTERRUPT3(name, nr, fn) \
  615. ENTRY(name) \
  616. ASM_CLAC; \
  617. pushl $~(nr); \
  618. SAVE_ALL; \
  619. TRACE_IRQS_OFF \
  620. movl %esp, %eax; \
  621. call fn; \
  622. jmp ret_from_intr; \
  623. ENDPROC(name)
  624. #ifdef CONFIG_TRACING
  625. # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
  626. #else
  627. # define TRACE_BUILD_INTERRUPT(name, nr)
  628. #endif
  629. #define BUILD_INTERRUPT(name, nr) \
  630. BUILD_INTERRUPT3(name, nr, smp_##name); \
  631. TRACE_BUILD_INTERRUPT(name, nr)
  632. /* The include is where all of the SMP etc. interrupts come from */
  633. #include <asm/entry_arch.h>
  634. ENTRY(coprocessor_error)
  635. ASM_CLAC
  636. pushl $0
  637. pushl $do_coprocessor_error
  638. jmp error_code
  639. END(coprocessor_error)
  640. ENTRY(simd_coprocessor_error)
  641. ASM_CLAC
  642. pushl $0
  643. #ifdef CONFIG_X86_INVD_BUG
  644. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  645. ALTERNATIVE "pushl $do_general_protection", \
  646. "pushl $do_simd_coprocessor_error", \
  647. X86_FEATURE_XMM
  648. #else
  649. pushl $do_simd_coprocessor_error
  650. #endif
  651. jmp error_code
  652. END(simd_coprocessor_error)
  653. ENTRY(device_not_available)
  654. ASM_CLAC
  655. pushl $-1 # mark this as an int
  656. pushl $do_device_not_available
  657. jmp error_code
  658. END(device_not_available)
  659. #ifdef CONFIG_PARAVIRT
  660. ENTRY(native_iret)
  661. iret
  662. _ASM_EXTABLE(native_iret, iret_exc)
  663. END(native_iret)
  664. ENTRY(native_irq_enable_sysexit)
  665. sti
  666. sysexit
  667. END(native_irq_enable_sysexit)
  668. #endif
  669. ENTRY(overflow)
  670. ASM_CLAC
  671. pushl $0
  672. pushl $do_overflow
  673. jmp error_code
  674. END(overflow)
  675. ENTRY(bounds)
  676. ASM_CLAC
  677. pushl $0
  678. pushl $do_bounds
  679. jmp error_code
  680. END(bounds)
  681. ENTRY(invalid_op)
  682. ASM_CLAC
  683. pushl $0
  684. pushl $do_invalid_op
  685. jmp error_code
  686. END(invalid_op)
  687. ENTRY(coprocessor_segment_overrun)
  688. ASM_CLAC
  689. pushl $0
  690. pushl $do_coprocessor_segment_overrun
  691. jmp error_code
  692. END(coprocessor_segment_overrun)
  693. ENTRY(invalid_TSS)
  694. ASM_CLAC
  695. pushl $do_invalid_TSS
  696. jmp error_code
  697. END(invalid_TSS)
  698. ENTRY(segment_not_present)
  699. ASM_CLAC
  700. pushl $do_segment_not_present
  701. jmp error_code
  702. END(segment_not_present)
  703. ENTRY(stack_segment)
  704. ASM_CLAC
  705. pushl $do_stack_segment
  706. jmp error_code
  707. END(stack_segment)
  708. ENTRY(alignment_check)
  709. ASM_CLAC
  710. pushl $do_alignment_check
  711. jmp error_code
  712. END(alignment_check)
  713. ENTRY(divide_error)
  714. ASM_CLAC
  715. pushl $0 # no error code
  716. pushl $do_divide_error
  717. jmp error_code
  718. END(divide_error)
  719. #ifdef CONFIG_X86_MCE
  720. ENTRY(machine_check)
  721. ASM_CLAC
  722. pushl $0
  723. pushl machine_check_vector
  724. jmp error_code
  725. END(machine_check)
  726. #endif
  727. ENTRY(spurious_interrupt_bug)
  728. ASM_CLAC
  729. pushl $0
  730. pushl $do_spurious_interrupt_bug
  731. jmp error_code
  732. END(spurious_interrupt_bug)
  733. #ifdef CONFIG_XEN
  734. /*
  735. * Xen doesn't set %esp to be precisely what the normal SYSENTER
  736. * entry point expects, so fix it up before using the normal path.
  737. */
  738. ENTRY(xen_sysenter_target)
  739. addl $5*4, %esp /* remove xen-provided frame */
  740. jmp sysenter_past_esp
  741. ENTRY(xen_hypervisor_callback)
  742. pushl $-1 /* orig_ax = -1 => not a system call */
  743. SAVE_ALL
  744. TRACE_IRQS_OFF
  745. /*
  746. * Check to see if we got the event in the critical
  747. * region in xen_iret_direct, after we've reenabled
  748. * events and checked for pending events. This simulates
  749. * iret instruction's behaviour where it delivers a
  750. * pending interrupt when enabling interrupts:
  751. */
  752. movl PT_EIP(%esp), %eax
  753. cmpl $xen_iret_start_crit, %eax
  754. jb 1f
  755. cmpl $xen_iret_end_crit, %eax
  756. jae 1f
  757. jmp xen_iret_crit_fixup
  758. ENTRY(xen_do_upcall)
  759. 1: mov %esp, %eax
  760. call xen_evtchn_do_upcall
  761. #ifndef CONFIG_PREEMPT
  762. call xen_maybe_preempt_hcall
  763. #endif
  764. jmp ret_from_intr
  765. ENDPROC(xen_hypervisor_callback)
  766. /*
  767. * Hypervisor uses this for application faults while it executes.
  768. * We get here for two reasons:
  769. * 1. Fault while reloading DS, ES, FS or GS
  770. * 2. Fault while executing IRET
  771. * Category 1 we fix up by reattempting the load, and zeroing the segment
  772. * register if the load fails.
  773. * Category 2 we fix up by jumping to do_iret_error. We cannot use the
  774. * normal Linux return path in this case because if we use the IRET hypercall
  775. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  776. * We distinguish between categories by maintaining a status value in EAX.
  777. */
  778. ENTRY(xen_failsafe_callback)
  779. pushl %eax
  780. movl $1, %eax
  781. 1: mov 4(%esp), %ds
  782. 2: mov 8(%esp), %es
  783. 3: mov 12(%esp), %fs
  784. 4: mov 16(%esp), %gs
  785. /* EAX == 0 => Category 1 (Bad segment)
  786. EAX != 0 => Category 2 (Bad IRET) */
  787. testl %eax, %eax
  788. popl %eax
  789. lea 16(%esp), %esp
  790. jz 5f
  791. jmp iret_exc
  792. 5: pushl $-1 /* orig_ax = -1 => not a system call */
  793. SAVE_ALL
  794. jmp ret_from_exception
  795. .section .fixup, "ax"
  796. 6: xorl %eax, %eax
  797. movl %eax, 4(%esp)
  798. jmp 1b
  799. 7: xorl %eax, %eax
  800. movl %eax, 8(%esp)
  801. jmp 2b
  802. 8: xorl %eax, %eax
  803. movl %eax, 12(%esp)
  804. jmp 3b
  805. 9: xorl %eax, %eax
  806. movl %eax, 16(%esp)
  807. jmp 4b
  808. .previous
  809. _ASM_EXTABLE(1b, 6b)
  810. _ASM_EXTABLE(2b, 7b)
  811. _ASM_EXTABLE(3b, 8b)
  812. _ASM_EXTABLE(4b, 9b)
  813. ENDPROC(xen_failsafe_callback)
  814. BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  815. xen_evtchn_do_upcall)
  816. #endif /* CONFIG_XEN */
  817. #if IS_ENABLED(CONFIG_HYPERV)
  818. BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
  819. hyperv_vector_handler)
  820. #endif /* CONFIG_HYPERV */
  821. #ifdef CONFIG_FUNCTION_TRACER
  822. #ifdef CONFIG_DYNAMIC_FTRACE
  823. ENTRY(mcount)
  824. ret
  825. END(mcount)
  826. ENTRY(ftrace_caller)
  827. pushl %eax
  828. pushl %ecx
  829. pushl %edx
  830. pushl $0 /* Pass NULL as regs pointer */
  831. movl 4*4(%esp), %eax
  832. movl 0x4(%ebp), %edx
  833. movl function_trace_op, %ecx
  834. subl $MCOUNT_INSN_SIZE, %eax
  835. .globl ftrace_call
  836. ftrace_call:
  837. call ftrace_stub
  838. addl $4, %esp /* skip NULL pointer */
  839. popl %edx
  840. popl %ecx
  841. popl %eax
  842. ftrace_ret:
  843. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  844. .globl ftrace_graph_call
  845. ftrace_graph_call:
  846. jmp ftrace_stub
  847. #endif
  848. .globl ftrace_stub
  849. ftrace_stub:
  850. ret
  851. END(ftrace_caller)
  852. ENTRY(ftrace_regs_caller)
  853. pushf /* push flags before compare (in cs location) */
  854. /*
  855. * i386 does not save SS and ESP when coming from kernel.
  856. * Instead, to get sp, &regs->sp is used (see ptrace.h).
  857. * Unfortunately, that means eflags must be at the same location
  858. * as the current return ip is. We move the return ip into the
  859. * ip location, and move flags into the return ip location.
  860. */
  861. pushl 4(%esp) /* save return ip into ip slot */
  862. pushl $0 /* Load 0 into orig_ax */
  863. pushl %gs
  864. pushl %fs
  865. pushl %es
  866. pushl %ds
  867. pushl %eax
  868. pushl %ebp
  869. pushl %edi
  870. pushl %esi
  871. pushl %edx
  872. pushl %ecx
  873. pushl %ebx
  874. movl 13*4(%esp), %eax /* Get the saved flags */
  875. movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
  876. /* clobbering return ip */
  877. movl $__KERNEL_CS, 13*4(%esp)
  878. movl 12*4(%esp), %eax /* Load ip (1st parameter) */
  879. subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
  880. movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
  881. movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
  882. pushl %esp /* Save pt_regs as 4th parameter */
  883. GLOBAL(ftrace_regs_call)
  884. call ftrace_stub
  885. addl $4, %esp /* Skip pt_regs */
  886. movl 14*4(%esp), %eax /* Move flags back into cs */
  887. movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
  888. movl 12*4(%esp), %eax /* Get return ip from regs->ip */
  889. movl %eax, 14*4(%esp) /* Put return ip back for ret */
  890. popl %ebx
  891. popl %ecx
  892. popl %edx
  893. popl %esi
  894. popl %edi
  895. popl %ebp
  896. popl %eax
  897. popl %ds
  898. popl %es
  899. popl %fs
  900. popl %gs
  901. addl $8, %esp /* Skip orig_ax and ip */
  902. popf /* Pop flags at end (no addl to corrupt flags) */
  903. jmp ftrace_ret
  904. popf
  905. jmp ftrace_stub
  906. #else /* ! CONFIG_DYNAMIC_FTRACE */
  907. ENTRY(mcount)
  908. cmpl $__PAGE_OFFSET, %esp
  909. jb ftrace_stub /* Paging not enabled yet? */
  910. cmpl $ftrace_stub, ftrace_trace_function
  911. jnz trace
  912. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  913. cmpl $ftrace_stub, ftrace_graph_return
  914. jnz ftrace_graph_caller
  915. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  916. jnz ftrace_graph_caller
  917. #endif
  918. .globl ftrace_stub
  919. ftrace_stub:
  920. ret
  921. /* taken from glibc */
  922. trace:
  923. pushl %eax
  924. pushl %ecx
  925. pushl %edx
  926. movl 0xc(%esp), %eax
  927. movl 0x4(%ebp), %edx
  928. subl $MCOUNT_INSN_SIZE, %eax
  929. call *ftrace_trace_function
  930. popl %edx
  931. popl %ecx
  932. popl %eax
  933. jmp ftrace_stub
  934. END(mcount)
  935. #endif /* CONFIG_DYNAMIC_FTRACE */
  936. #endif /* CONFIG_FUNCTION_TRACER */
  937. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  938. ENTRY(ftrace_graph_caller)
  939. pushl %eax
  940. pushl %ecx
  941. pushl %edx
  942. movl 0xc(%esp), %eax
  943. lea 0x4(%ebp), %edx
  944. movl (%ebp), %ecx
  945. subl $MCOUNT_INSN_SIZE, %eax
  946. call prepare_ftrace_return
  947. popl %edx
  948. popl %ecx
  949. popl %eax
  950. ret
  951. END(ftrace_graph_caller)
  952. .globl return_to_handler
  953. return_to_handler:
  954. pushl %eax
  955. pushl %edx
  956. movl %ebp, %eax
  957. call ftrace_return_to_handler
  958. movl %eax, %ecx
  959. popl %edx
  960. popl %eax
  961. jmp *%ecx
  962. #endif
  963. #ifdef CONFIG_TRACING
  964. ENTRY(trace_page_fault)
  965. ASM_CLAC
  966. pushl $trace_do_page_fault
  967. jmp error_code
  968. END(trace_page_fault)
  969. #endif
  970. ENTRY(page_fault)
  971. ASM_CLAC
  972. pushl $do_page_fault
  973. ALIGN
  974. error_code:
  975. /* the function address is in %gs's slot on the stack */
  976. pushl %fs
  977. pushl %es
  978. pushl %ds
  979. pushl %eax
  980. pushl %ebp
  981. pushl %edi
  982. pushl %esi
  983. pushl %edx
  984. pushl %ecx
  985. pushl %ebx
  986. cld
  987. movl $(__KERNEL_PERCPU), %ecx
  988. movl %ecx, %fs
  989. UNWIND_ESPFIX_STACK
  990. GS_TO_REG %ecx
  991. movl PT_GS(%esp), %edi # get the function address
  992. movl PT_ORIG_EAX(%esp), %edx # get the error code
  993. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  994. REG_TO_PTGS %ecx
  995. SET_KERNEL_GS %ecx
  996. movl $(__USER_DS), %ecx
  997. movl %ecx, %ds
  998. movl %ecx, %es
  999. TRACE_IRQS_OFF
  1000. movl %esp, %eax # pt_regs pointer
  1001. call *%edi
  1002. jmp ret_from_exception
  1003. END(page_fault)
  1004. /*
  1005. * Debug traps and NMI can happen at the one SYSENTER instruction
  1006. * that sets up the real kernel stack. Check here, since we can't
  1007. * allow the wrong stack to be used.
  1008. *
  1009. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1010. * already pushed 3 words if it hits on the sysenter instruction:
  1011. * eflags, cs and eip.
  1012. *
  1013. * We just load the right stack, and push the three (known) values
  1014. * by hand onto the new stack - while updating the return eip past
  1015. * the instruction that would have done it for sysenter.
  1016. */
  1017. .macro FIX_STACK offset ok label
  1018. cmpw $__KERNEL_CS, 4(%esp)
  1019. jne \ok
  1020. \label:
  1021. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1022. pushfl
  1023. pushl $__KERNEL_CS
  1024. pushl $sysenter_past_esp
  1025. .endm
  1026. ENTRY(debug)
  1027. ASM_CLAC
  1028. cmpl $entry_SYSENTER_32, (%esp)
  1029. jne debug_stack_correct
  1030. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1031. debug_stack_correct:
  1032. pushl $-1 # mark this as an int
  1033. SAVE_ALL
  1034. TRACE_IRQS_OFF
  1035. xorl %edx, %edx # error code 0
  1036. movl %esp, %eax # pt_regs pointer
  1037. call do_debug
  1038. jmp ret_from_exception
  1039. END(debug)
  1040. /*
  1041. * NMI is doubly nasty. It can happen _while_ we're handling
  1042. * a debug fault, and the debug fault hasn't yet been able to
  1043. * clear up the stack. So we first check whether we got an
  1044. * NMI on the sysenter entry path, but after that we need to
  1045. * check whether we got an NMI on the debug path where the debug
  1046. * fault happened on the sysenter path.
  1047. */
  1048. ENTRY(nmi)
  1049. ASM_CLAC
  1050. #ifdef CONFIG_X86_ESPFIX32
  1051. pushl %eax
  1052. movl %ss, %eax
  1053. cmpw $__ESPFIX_SS, %ax
  1054. popl %eax
  1055. je nmi_espfix_stack
  1056. #endif
  1057. cmpl $entry_SYSENTER_32, (%esp)
  1058. je nmi_stack_fixup
  1059. pushl %eax
  1060. movl %esp, %eax
  1061. /*
  1062. * Do not access memory above the end of our stack page,
  1063. * it might not exist.
  1064. */
  1065. andl $(THREAD_SIZE-1), %eax
  1066. cmpl $(THREAD_SIZE-20), %eax
  1067. popl %eax
  1068. jae nmi_stack_correct
  1069. cmpl $entry_SYSENTER_32, 12(%esp)
  1070. je nmi_debug_stack_check
  1071. nmi_stack_correct:
  1072. pushl %eax
  1073. SAVE_ALL
  1074. xorl %edx, %edx # zero error code
  1075. movl %esp, %eax # pt_regs pointer
  1076. call do_nmi
  1077. jmp restore_all_notrace
  1078. nmi_stack_fixup:
  1079. FIX_STACK 12, nmi_stack_correct, 1
  1080. jmp nmi_stack_correct
  1081. nmi_debug_stack_check:
  1082. cmpw $__KERNEL_CS, 16(%esp)
  1083. jne nmi_stack_correct
  1084. cmpl $debug, (%esp)
  1085. jb nmi_stack_correct
  1086. cmpl $debug_esp_fix_insn, (%esp)
  1087. ja nmi_stack_correct
  1088. FIX_STACK 24, nmi_stack_correct, 1
  1089. jmp nmi_stack_correct
  1090. #ifdef CONFIG_X86_ESPFIX32
  1091. nmi_espfix_stack:
  1092. /*
  1093. * create the pointer to lss back
  1094. */
  1095. pushl %ss
  1096. pushl %esp
  1097. addl $4, (%esp)
  1098. /* copy the iret frame of 12 bytes */
  1099. .rept 3
  1100. pushl 16(%esp)
  1101. .endr
  1102. pushl %eax
  1103. SAVE_ALL
  1104. FIXUP_ESPFIX_STACK # %eax == %esp
  1105. xorl %edx, %edx # zero error code
  1106. call do_nmi
  1107. RESTORE_REGS
  1108. lss 12+4(%esp), %esp # back to espfix stack
  1109. jmp irq_return
  1110. #endif
  1111. END(nmi)
  1112. ENTRY(int3)
  1113. ASM_CLAC
  1114. pushl $-1 # mark this as an int
  1115. SAVE_ALL
  1116. TRACE_IRQS_OFF
  1117. xorl %edx, %edx # zero error code
  1118. movl %esp, %eax # pt_regs pointer
  1119. call do_int3
  1120. jmp ret_from_exception
  1121. END(int3)
  1122. ENTRY(general_protection)
  1123. pushl $do_general_protection
  1124. jmp error_code
  1125. END(general_protection)
  1126. #ifdef CONFIG_KVM_GUEST
  1127. ENTRY(async_page_fault)
  1128. ASM_CLAC
  1129. pushl $do_async_page_fault
  1130. jmp error_code
  1131. END(async_page_fault)
  1132. #endif