ptrace.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Ptrace user space interface.
  4. *
  5. * Copyright IBM Corp. 1999, 2010
  6. * Author(s): Denis Joseph Barrow
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/sched/task_stack.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/errno.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/user.h>
  17. #include <linux/security.h>
  18. #include <linux/audit.h>
  19. #include <linux/signal.h>
  20. #include <linux/elf.h>
  21. #include <linux/regset.h>
  22. #include <linux/tracehook.h>
  23. #include <linux/seccomp.h>
  24. #include <linux/compat.h>
  25. #include <trace/syscall.h>
  26. #include <asm/segment.h>
  27. #include <asm/page.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/pgalloc.h>
  30. #include <linux/uaccess.h>
  31. #include <asm/unistd.h>
  32. #include <asm/switch_to.h>
  33. #include <asm/runtime_instr.h>
  34. #include <asm/facility.h>
  35. #include "entry.h"
  36. #ifdef CONFIG_COMPAT
  37. #include "compat_ptrace.h"
  38. #endif
  39. #define CREATE_TRACE_POINTS
  40. #include <trace/events/syscalls.h>
  41. void update_cr_regs(struct task_struct *task)
  42. {
  43. struct pt_regs *regs = task_pt_regs(task);
  44. struct thread_struct *thread = &task->thread;
  45. struct per_regs old, new;
  46. union ctlreg0 cr0_old, cr0_new;
  47. union ctlreg2 cr2_old, cr2_new;
  48. int cr0_changed, cr2_changed;
  49. __ctl_store(cr0_old.val, 0, 0);
  50. __ctl_store(cr2_old.val, 2, 2);
  51. cr0_new = cr0_old;
  52. cr2_new = cr2_old;
  53. /* Take care of the enable/disable of transactional execution. */
  54. if (MACHINE_HAS_TE) {
  55. /* Set or clear transaction execution TXC bit 8. */
  56. cr0_new.tcx = 1;
  57. if (task->thread.per_flags & PER_FLAG_NO_TE)
  58. cr0_new.tcx = 0;
  59. /* Set or clear transaction execution TDC bits 62 and 63. */
  60. cr2_new.tdc = 0;
  61. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
  62. if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
  63. cr2_new.tdc = 1;
  64. else
  65. cr2_new.tdc = 2;
  66. }
  67. }
  68. /* Take care of enable/disable of guarded storage. */
  69. if (MACHINE_HAS_GS) {
  70. cr2_new.gse = 0;
  71. if (task->thread.gs_cb)
  72. cr2_new.gse = 1;
  73. }
  74. /* Load control register 0/2 iff changed */
  75. cr0_changed = cr0_new.val != cr0_old.val;
  76. cr2_changed = cr2_new.val != cr2_old.val;
  77. if (cr0_changed)
  78. __ctl_load(cr0_new.val, 0, 0);
  79. if (cr2_changed)
  80. __ctl_load(cr2_new.val, 2, 2);
  81. /* Copy user specified PER registers */
  82. new.control = thread->per_user.control;
  83. new.start = thread->per_user.start;
  84. new.end = thread->per_user.end;
  85. /* merge TIF_SINGLE_STEP into user specified PER registers. */
  86. if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
  87. test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
  88. if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
  89. new.control |= PER_EVENT_BRANCH;
  90. else
  91. new.control |= PER_EVENT_IFETCH;
  92. new.control |= PER_CONTROL_SUSPENSION;
  93. new.control |= PER_EVENT_TRANSACTION_END;
  94. if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
  95. new.control |= PER_EVENT_IFETCH;
  96. new.start = 0;
  97. new.end = -1UL;
  98. }
  99. /* Take care of the PER enablement bit in the PSW. */
  100. if (!(new.control & PER_EVENT_MASK)) {
  101. regs->psw.mask &= ~PSW_MASK_PER;
  102. return;
  103. }
  104. regs->psw.mask |= PSW_MASK_PER;
  105. __ctl_store(old, 9, 11);
  106. if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
  107. __ctl_load(new, 9, 11);
  108. }
  109. void user_enable_single_step(struct task_struct *task)
  110. {
  111. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  112. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  113. }
  114. void user_disable_single_step(struct task_struct *task)
  115. {
  116. clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
  117. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  118. }
  119. void user_enable_block_step(struct task_struct *task)
  120. {
  121. set_tsk_thread_flag(task, TIF_SINGLE_STEP);
  122. set_tsk_thread_flag(task, TIF_BLOCK_STEP);
  123. }
  124. /*
  125. * Called by kernel/ptrace.c when detaching..
  126. *
  127. * Clear all debugging related fields.
  128. */
  129. void ptrace_disable(struct task_struct *task)
  130. {
  131. memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
  132. memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
  133. clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
  134. clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
  135. task->thread.per_flags = 0;
  136. }
  137. #define __ADDR_MASK 7
  138. static inline unsigned long __peek_user_per(struct task_struct *child,
  139. addr_t addr)
  140. {
  141. struct per_struct_kernel *dummy = NULL;
  142. if (addr == (addr_t) &dummy->cr9)
  143. /* Control bits of the active per set. */
  144. return test_thread_flag(TIF_SINGLE_STEP) ?
  145. PER_EVENT_IFETCH : child->thread.per_user.control;
  146. else if (addr == (addr_t) &dummy->cr10)
  147. /* Start address of the active per set. */
  148. return test_thread_flag(TIF_SINGLE_STEP) ?
  149. 0 : child->thread.per_user.start;
  150. else if (addr == (addr_t) &dummy->cr11)
  151. /* End address of the active per set. */
  152. return test_thread_flag(TIF_SINGLE_STEP) ?
  153. -1UL : child->thread.per_user.end;
  154. else if (addr == (addr_t) &dummy->bits)
  155. /* Single-step bit. */
  156. return test_thread_flag(TIF_SINGLE_STEP) ?
  157. (1UL << (BITS_PER_LONG - 1)) : 0;
  158. else if (addr == (addr_t) &dummy->starting_addr)
  159. /* Start address of the user specified per set. */
  160. return child->thread.per_user.start;
  161. else if (addr == (addr_t) &dummy->ending_addr)
  162. /* End address of the user specified per set. */
  163. return child->thread.per_user.end;
  164. else if (addr == (addr_t) &dummy->perc_atmid)
  165. /* PER code, ATMID and AI of the last PER trap */
  166. return (unsigned long)
  167. child->thread.per_event.cause << (BITS_PER_LONG - 16);
  168. else if (addr == (addr_t) &dummy->address)
  169. /* Address of the last PER trap */
  170. return child->thread.per_event.address;
  171. else if (addr == (addr_t) &dummy->access_id)
  172. /* Access id of the last PER trap */
  173. return (unsigned long)
  174. child->thread.per_event.paid << (BITS_PER_LONG - 8);
  175. return 0;
  176. }
  177. /*
  178. * Read the word at offset addr from the user area of a process. The
  179. * trouble here is that the information is littered over different
  180. * locations. The process registers are found on the kernel stack,
  181. * the floating point stuff and the trace settings are stored in
  182. * the task structure. In addition the different structures in
  183. * struct user contain pad bytes that should be read as zeroes.
  184. * Lovely...
  185. */
  186. static unsigned long __peek_user(struct task_struct *child, addr_t addr)
  187. {
  188. struct user *dummy = NULL;
  189. addr_t offset, tmp;
  190. if (addr < (addr_t) &dummy->regs.acrs) {
  191. /*
  192. * psw and gprs are stored on the stack
  193. */
  194. tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
  195. if (addr == (addr_t) &dummy->regs.psw.mask) {
  196. /* Return a clean psw mask. */
  197. tmp &= PSW_MASK_USER | PSW_MASK_RI;
  198. tmp |= PSW_USER_BITS;
  199. }
  200. } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
  201. /*
  202. * access registers are stored in the thread structure
  203. */
  204. offset = addr - (addr_t) &dummy->regs.acrs;
  205. /*
  206. * Very special case: old & broken 64 bit gdb reading
  207. * from acrs[15]. Result is a 64 bit value. Read the
  208. * 32 bit acrs[15] value and shift it by 32. Sick...
  209. */
  210. if (addr == (addr_t) &dummy->regs.acrs[15])
  211. tmp = ((unsigned long) child->thread.acrs[15]) << 32;
  212. else
  213. tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
  214. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  215. /*
  216. * orig_gpr2 is stored on the kernel stack
  217. */
  218. tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
  219. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  220. /*
  221. * prevent reads of padding hole between
  222. * orig_gpr2 and fp_regs on s390.
  223. */
  224. tmp = 0;
  225. } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
  226. /*
  227. * floating point control reg. is in the thread structure
  228. */
  229. tmp = child->thread.fpu.fpc;
  230. tmp <<= BITS_PER_LONG - 32;
  231. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  232. /*
  233. * floating point regs. are either in child->thread.fpu
  234. * or the child->thread.fpu.vxrs array
  235. */
  236. offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
  237. if (MACHINE_HAS_VX)
  238. tmp = *(addr_t *)
  239. ((addr_t) child->thread.fpu.vxrs + 2*offset);
  240. else
  241. tmp = *(addr_t *)
  242. ((addr_t) child->thread.fpu.fprs + offset);
  243. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  244. /*
  245. * Handle access to the per_info structure.
  246. */
  247. addr -= (addr_t) &dummy->regs.per_info;
  248. tmp = __peek_user_per(child, addr);
  249. } else
  250. tmp = 0;
  251. return tmp;
  252. }
  253. static int
  254. peek_user(struct task_struct *child, addr_t addr, addr_t data)
  255. {
  256. addr_t tmp, mask;
  257. /*
  258. * Stupid gdb peeks/pokes the access registers in 64 bit with
  259. * an alignment of 4. Programmers from hell...
  260. */
  261. mask = __ADDR_MASK;
  262. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  263. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  264. mask = 3;
  265. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  266. return -EIO;
  267. tmp = __peek_user(child, addr);
  268. return put_user(tmp, (addr_t __user *) data);
  269. }
  270. static inline void __poke_user_per(struct task_struct *child,
  271. addr_t addr, addr_t data)
  272. {
  273. struct per_struct_kernel *dummy = NULL;
  274. /*
  275. * There are only three fields in the per_info struct that the
  276. * debugger user can write to.
  277. * 1) cr9: the debugger wants to set a new PER event mask
  278. * 2) starting_addr: the debugger wants to set a new starting
  279. * address to use with the PER event mask.
  280. * 3) ending_addr: the debugger wants to set a new ending
  281. * address to use with the PER event mask.
  282. * The user specified PER event mask and the start and end
  283. * addresses are used only if single stepping is not in effect.
  284. * Writes to any other field in per_info are ignored.
  285. */
  286. if (addr == (addr_t) &dummy->cr9)
  287. /* PER event mask of the user specified per set. */
  288. child->thread.per_user.control =
  289. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  290. else if (addr == (addr_t) &dummy->starting_addr)
  291. /* Starting address of the user specified per set. */
  292. child->thread.per_user.start = data;
  293. else if (addr == (addr_t) &dummy->ending_addr)
  294. /* Ending address of the user specified per set. */
  295. child->thread.per_user.end = data;
  296. }
  297. /*
  298. * Write a word to the user area of a process at location addr. This
  299. * operation does have an additional problem compared to peek_user.
  300. * Stores to the program status word and on the floating point
  301. * control register needs to get checked for validity.
  302. */
  303. static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
  304. {
  305. struct user *dummy = NULL;
  306. addr_t offset;
  307. if (addr < (addr_t) &dummy->regs.acrs) {
  308. /*
  309. * psw and gprs are stored on the stack
  310. */
  311. if (addr == (addr_t) &dummy->regs.psw.mask) {
  312. unsigned long mask = PSW_MASK_USER;
  313. mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
  314. if ((data ^ PSW_USER_BITS) & ~mask)
  315. /* Invalid psw mask. */
  316. return -EINVAL;
  317. if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
  318. /* Invalid address-space-control bits */
  319. return -EINVAL;
  320. if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
  321. /* Invalid addressing mode bits */
  322. return -EINVAL;
  323. }
  324. *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
  325. } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
  326. /*
  327. * access registers are stored in the thread structure
  328. */
  329. offset = addr - (addr_t) &dummy->regs.acrs;
  330. /*
  331. * Very special case: old & broken 64 bit gdb writing
  332. * to acrs[15] with a 64 bit value. Ignore the lower
  333. * half of the value and write the upper 32 bit to
  334. * acrs[15]. Sick...
  335. */
  336. if (addr == (addr_t) &dummy->regs.acrs[15])
  337. child->thread.acrs[15] = (unsigned int) (data >> 32);
  338. else
  339. *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
  340. } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
  341. /*
  342. * orig_gpr2 is stored on the kernel stack
  343. */
  344. task_pt_regs(child)->orig_gpr2 = data;
  345. } else if (addr < (addr_t) &dummy->regs.fp_regs) {
  346. /*
  347. * prevent writes of padding hole between
  348. * orig_gpr2 and fp_regs on s390.
  349. */
  350. return 0;
  351. } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
  352. /*
  353. * floating point control reg. is in the thread structure
  354. */
  355. if ((unsigned int) data != 0 ||
  356. test_fp_ctl(data >> (BITS_PER_LONG - 32)))
  357. return -EINVAL;
  358. child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
  359. } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
  360. /*
  361. * floating point regs. are either in child->thread.fpu
  362. * or the child->thread.fpu.vxrs array
  363. */
  364. offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
  365. if (MACHINE_HAS_VX)
  366. *(addr_t *)((addr_t)
  367. child->thread.fpu.vxrs + 2*offset) = data;
  368. else
  369. *(addr_t *)((addr_t)
  370. child->thread.fpu.fprs + offset) = data;
  371. } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
  372. /*
  373. * Handle access to the per_info structure.
  374. */
  375. addr -= (addr_t) &dummy->regs.per_info;
  376. __poke_user_per(child, addr, data);
  377. }
  378. return 0;
  379. }
  380. static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
  381. {
  382. addr_t mask;
  383. /*
  384. * Stupid gdb peeks/pokes the access registers in 64 bit with
  385. * an alignment of 4. Programmers from hell indeed...
  386. */
  387. mask = __ADDR_MASK;
  388. if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
  389. addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
  390. mask = 3;
  391. if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
  392. return -EIO;
  393. return __poke_user(child, addr, data);
  394. }
  395. long arch_ptrace(struct task_struct *child, long request,
  396. unsigned long addr, unsigned long data)
  397. {
  398. ptrace_area parea;
  399. int copied, ret;
  400. switch (request) {
  401. case PTRACE_PEEKUSR:
  402. /* read the word at location addr in the USER area. */
  403. return peek_user(child, addr, data);
  404. case PTRACE_POKEUSR:
  405. /* write the word at location addr in the USER area */
  406. return poke_user(child, addr, data);
  407. case PTRACE_PEEKUSR_AREA:
  408. case PTRACE_POKEUSR_AREA:
  409. if (copy_from_user(&parea, (void __force __user *) addr,
  410. sizeof(parea)))
  411. return -EFAULT;
  412. addr = parea.kernel_addr;
  413. data = parea.process_addr;
  414. copied = 0;
  415. while (copied < parea.len) {
  416. if (request == PTRACE_PEEKUSR_AREA)
  417. ret = peek_user(child, addr, data);
  418. else {
  419. addr_t utmp;
  420. if (get_user(utmp,
  421. (addr_t __force __user *) data))
  422. return -EFAULT;
  423. ret = poke_user(child, addr, utmp);
  424. }
  425. if (ret)
  426. return ret;
  427. addr += sizeof(unsigned long);
  428. data += sizeof(unsigned long);
  429. copied += sizeof(unsigned long);
  430. }
  431. return 0;
  432. case PTRACE_GET_LAST_BREAK:
  433. put_user(child->thread.last_break,
  434. (unsigned long __user *) data);
  435. return 0;
  436. case PTRACE_ENABLE_TE:
  437. if (!MACHINE_HAS_TE)
  438. return -EIO;
  439. child->thread.per_flags &= ~PER_FLAG_NO_TE;
  440. return 0;
  441. case PTRACE_DISABLE_TE:
  442. if (!MACHINE_HAS_TE)
  443. return -EIO;
  444. child->thread.per_flags |= PER_FLAG_NO_TE;
  445. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  446. return 0;
  447. case PTRACE_TE_ABORT_RAND:
  448. if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
  449. return -EIO;
  450. switch (data) {
  451. case 0UL:
  452. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
  453. break;
  454. case 1UL:
  455. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  456. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
  457. break;
  458. case 2UL:
  459. child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
  460. child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
  461. break;
  462. default:
  463. return -EINVAL;
  464. }
  465. return 0;
  466. default:
  467. return ptrace_request(child, request, addr, data);
  468. }
  469. }
  470. #ifdef CONFIG_COMPAT
  471. /*
  472. * Now the fun part starts... a 31 bit program running in the
  473. * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
  474. * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
  475. * to handle, the difference to the 64 bit versions of the requests
  476. * is that the access is done in multiples of 4 byte instead of
  477. * 8 bytes (sizeof(unsigned long) on 31/64 bit).
  478. * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
  479. * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
  480. * is a 31 bit program too, the content of struct user can be
  481. * emulated. A 31 bit program peeking into the struct user of
  482. * a 64 bit program is a no-no.
  483. */
  484. /*
  485. * Same as peek_user_per but for a 31 bit program.
  486. */
  487. static inline __u32 __peek_user_per_compat(struct task_struct *child,
  488. addr_t addr)
  489. {
  490. struct compat_per_struct_kernel *dummy32 = NULL;
  491. if (addr == (addr_t) &dummy32->cr9)
  492. /* Control bits of the active per set. */
  493. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  494. PER_EVENT_IFETCH : child->thread.per_user.control;
  495. else if (addr == (addr_t) &dummy32->cr10)
  496. /* Start address of the active per set. */
  497. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  498. 0 : child->thread.per_user.start;
  499. else if (addr == (addr_t) &dummy32->cr11)
  500. /* End address of the active per set. */
  501. return test_thread_flag(TIF_SINGLE_STEP) ?
  502. PSW32_ADDR_INSN : child->thread.per_user.end;
  503. else if (addr == (addr_t) &dummy32->bits)
  504. /* Single-step bit. */
  505. return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
  506. 0x80000000 : 0;
  507. else if (addr == (addr_t) &dummy32->starting_addr)
  508. /* Start address of the user specified per set. */
  509. return (__u32) child->thread.per_user.start;
  510. else if (addr == (addr_t) &dummy32->ending_addr)
  511. /* End address of the user specified per set. */
  512. return (__u32) child->thread.per_user.end;
  513. else if (addr == (addr_t) &dummy32->perc_atmid)
  514. /* PER code, ATMID and AI of the last PER trap */
  515. return (__u32) child->thread.per_event.cause << 16;
  516. else if (addr == (addr_t) &dummy32->address)
  517. /* Address of the last PER trap */
  518. return (__u32) child->thread.per_event.address;
  519. else if (addr == (addr_t) &dummy32->access_id)
  520. /* Access id of the last PER trap */
  521. return (__u32) child->thread.per_event.paid << 24;
  522. return 0;
  523. }
  524. /*
  525. * Same as peek_user but for a 31 bit program.
  526. */
  527. static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
  528. {
  529. struct compat_user *dummy32 = NULL;
  530. addr_t offset;
  531. __u32 tmp;
  532. if (addr < (addr_t) &dummy32->regs.acrs) {
  533. struct pt_regs *regs = task_pt_regs(child);
  534. /*
  535. * psw and gprs are stored on the stack
  536. */
  537. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  538. /* Fake a 31 bit psw mask. */
  539. tmp = (__u32)(regs->psw.mask >> 32);
  540. tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
  541. tmp |= PSW32_USER_BITS;
  542. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  543. /* Fake a 31 bit psw address. */
  544. tmp = (__u32) regs->psw.addr |
  545. (__u32)(regs->psw.mask & PSW_MASK_BA);
  546. } else {
  547. /* gpr 0-15 */
  548. tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
  549. }
  550. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  551. /*
  552. * access registers are stored in the thread structure
  553. */
  554. offset = addr - (addr_t) &dummy32->regs.acrs;
  555. tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
  556. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  557. /*
  558. * orig_gpr2 is stored on the kernel stack
  559. */
  560. tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
  561. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  562. /*
  563. * prevent reads of padding hole between
  564. * orig_gpr2 and fp_regs on s390.
  565. */
  566. tmp = 0;
  567. } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
  568. /*
  569. * floating point control reg. is in the thread structure
  570. */
  571. tmp = child->thread.fpu.fpc;
  572. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  573. /*
  574. * floating point regs. are either in child->thread.fpu
  575. * or the child->thread.fpu.vxrs array
  576. */
  577. offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
  578. if (MACHINE_HAS_VX)
  579. tmp = *(__u32 *)
  580. ((addr_t) child->thread.fpu.vxrs + 2*offset);
  581. else
  582. tmp = *(__u32 *)
  583. ((addr_t) child->thread.fpu.fprs + offset);
  584. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  585. /*
  586. * Handle access to the per_info structure.
  587. */
  588. addr -= (addr_t) &dummy32->regs.per_info;
  589. tmp = __peek_user_per_compat(child, addr);
  590. } else
  591. tmp = 0;
  592. return tmp;
  593. }
  594. static int peek_user_compat(struct task_struct *child,
  595. addr_t addr, addr_t data)
  596. {
  597. __u32 tmp;
  598. if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
  599. return -EIO;
  600. tmp = __peek_user_compat(child, addr);
  601. return put_user(tmp, (__u32 __user *) data);
  602. }
  603. /*
  604. * Same as poke_user_per but for a 31 bit program.
  605. */
  606. static inline void __poke_user_per_compat(struct task_struct *child,
  607. addr_t addr, __u32 data)
  608. {
  609. struct compat_per_struct_kernel *dummy32 = NULL;
  610. if (addr == (addr_t) &dummy32->cr9)
  611. /* PER event mask of the user specified per set. */
  612. child->thread.per_user.control =
  613. data & (PER_EVENT_MASK | PER_CONTROL_MASK);
  614. else if (addr == (addr_t) &dummy32->starting_addr)
  615. /* Starting address of the user specified per set. */
  616. child->thread.per_user.start = data;
  617. else if (addr == (addr_t) &dummy32->ending_addr)
  618. /* Ending address of the user specified per set. */
  619. child->thread.per_user.end = data;
  620. }
  621. /*
  622. * Same as poke_user but for a 31 bit program.
  623. */
  624. static int __poke_user_compat(struct task_struct *child,
  625. addr_t addr, addr_t data)
  626. {
  627. struct compat_user *dummy32 = NULL;
  628. __u32 tmp = (__u32) data;
  629. addr_t offset;
  630. if (addr < (addr_t) &dummy32->regs.acrs) {
  631. struct pt_regs *regs = task_pt_regs(child);
  632. /*
  633. * psw, gprs, acrs and orig_gpr2 are stored on the stack
  634. */
  635. if (addr == (addr_t) &dummy32->regs.psw.mask) {
  636. __u32 mask = PSW32_MASK_USER;
  637. mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
  638. /* Build a 64 bit psw mask from 31 bit mask. */
  639. if ((tmp ^ PSW32_USER_BITS) & ~mask)
  640. /* Invalid psw mask. */
  641. return -EINVAL;
  642. if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
  643. /* Invalid address-space-control bits */
  644. return -EINVAL;
  645. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
  646. (regs->psw.mask & PSW_MASK_BA) |
  647. (__u64)(tmp & mask) << 32;
  648. } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
  649. /* Build a 64 bit psw address from 31 bit address. */
  650. regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
  651. /* Transfer 31 bit amode bit to psw mask. */
  652. regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
  653. (__u64)(tmp & PSW32_ADDR_AMODE);
  654. } else {
  655. /* gpr 0-15 */
  656. *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
  657. }
  658. } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
  659. /*
  660. * access registers are stored in the thread structure
  661. */
  662. offset = addr - (addr_t) &dummy32->regs.acrs;
  663. *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
  664. } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
  665. /*
  666. * orig_gpr2 is stored on the kernel stack
  667. */
  668. *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
  669. } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
  670. /*
  671. * prevent writess of padding hole between
  672. * orig_gpr2 and fp_regs on s390.
  673. */
  674. return 0;
  675. } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
  676. /*
  677. * floating point control reg. is in the thread structure
  678. */
  679. if (test_fp_ctl(tmp))
  680. return -EINVAL;
  681. child->thread.fpu.fpc = data;
  682. } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
  683. /*
  684. * floating point regs. are either in child->thread.fpu
  685. * or the child->thread.fpu.vxrs array
  686. */
  687. offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
  688. if (MACHINE_HAS_VX)
  689. *(__u32 *)((addr_t)
  690. child->thread.fpu.vxrs + 2*offset) = tmp;
  691. else
  692. *(__u32 *)((addr_t)
  693. child->thread.fpu.fprs + offset) = tmp;
  694. } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
  695. /*
  696. * Handle access to the per_info structure.
  697. */
  698. addr -= (addr_t) &dummy32->regs.per_info;
  699. __poke_user_per_compat(child, addr, data);
  700. }
  701. return 0;
  702. }
  703. static int poke_user_compat(struct task_struct *child,
  704. addr_t addr, addr_t data)
  705. {
  706. if (!is_compat_task() || (addr & 3) ||
  707. addr > sizeof(struct compat_user) - 3)
  708. return -EIO;
  709. return __poke_user_compat(child, addr, data);
  710. }
  711. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  712. compat_ulong_t caddr, compat_ulong_t cdata)
  713. {
  714. unsigned long addr = caddr;
  715. unsigned long data = cdata;
  716. compat_ptrace_area parea;
  717. int copied, ret;
  718. switch (request) {
  719. case PTRACE_PEEKUSR:
  720. /* read the word at location addr in the USER area. */
  721. return peek_user_compat(child, addr, data);
  722. case PTRACE_POKEUSR:
  723. /* write the word at location addr in the USER area */
  724. return poke_user_compat(child, addr, data);
  725. case PTRACE_PEEKUSR_AREA:
  726. case PTRACE_POKEUSR_AREA:
  727. if (copy_from_user(&parea, (void __force __user *) addr,
  728. sizeof(parea)))
  729. return -EFAULT;
  730. addr = parea.kernel_addr;
  731. data = parea.process_addr;
  732. copied = 0;
  733. while (copied < parea.len) {
  734. if (request == PTRACE_PEEKUSR_AREA)
  735. ret = peek_user_compat(child, addr, data);
  736. else {
  737. __u32 utmp;
  738. if (get_user(utmp,
  739. (__u32 __force __user *) data))
  740. return -EFAULT;
  741. ret = poke_user_compat(child, addr, utmp);
  742. }
  743. if (ret)
  744. return ret;
  745. addr += sizeof(unsigned int);
  746. data += sizeof(unsigned int);
  747. copied += sizeof(unsigned int);
  748. }
  749. return 0;
  750. case PTRACE_GET_LAST_BREAK:
  751. put_user(child->thread.last_break,
  752. (unsigned int __user *) data);
  753. return 0;
  754. }
  755. return compat_ptrace_request(child, request, addr, data);
  756. }
  757. #endif
  758. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  759. {
  760. unsigned long mask = -1UL;
  761. /*
  762. * The sysc_tracesys code in entry.S stored the system
  763. * call number to gprs[2].
  764. */
  765. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  766. (tracehook_report_syscall_entry(regs) ||
  767. regs->gprs[2] >= NR_syscalls)) {
  768. /*
  769. * Tracing decided this syscall should not happen or the
  770. * debugger stored an invalid system call number. Skip
  771. * the system call and the system call restart handling.
  772. */
  773. clear_pt_regs_flag(regs, PIF_SYSCALL);
  774. return -1;
  775. }
  776. /* Do the secure computing check after ptrace. */
  777. if (secure_computing(NULL)) {
  778. /* seccomp failures shouldn't expose any additional code. */
  779. return -1;
  780. }
  781. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  782. trace_sys_enter(regs, regs->gprs[2]);
  783. if (is_compat_task())
  784. mask = 0xffffffff;
  785. audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
  786. regs->gprs[3] &mask, regs->gprs[4] &mask,
  787. regs->gprs[5] &mask);
  788. return regs->gprs[2];
  789. }
  790. asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
  791. {
  792. audit_syscall_exit(regs);
  793. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  794. trace_sys_exit(regs, regs->gprs[2]);
  795. if (test_thread_flag(TIF_SYSCALL_TRACE))
  796. tracehook_report_syscall_exit(regs, 0);
  797. }
  798. /*
  799. * user_regset definitions.
  800. */
  801. static int s390_regs_get(struct task_struct *target,
  802. const struct user_regset *regset,
  803. unsigned int pos, unsigned int count,
  804. void *kbuf, void __user *ubuf)
  805. {
  806. if (target == current)
  807. save_access_regs(target->thread.acrs);
  808. if (kbuf) {
  809. unsigned long *k = kbuf;
  810. while (count > 0) {
  811. *k++ = __peek_user(target, pos);
  812. count -= sizeof(*k);
  813. pos += sizeof(*k);
  814. }
  815. } else {
  816. unsigned long __user *u = ubuf;
  817. while (count > 0) {
  818. if (__put_user(__peek_user(target, pos), u++))
  819. return -EFAULT;
  820. count -= sizeof(*u);
  821. pos += sizeof(*u);
  822. }
  823. }
  824. return 0;
  825. }
  826. static int s390_regs_set(struct task_struct *target,
  827. const struct user_regset *regset,
  828. unsigned int pos, unsigned int count,
  829. const void *kbuf, const void __user *ubuf)
  830. {
  831. int rc = 0;
  832. if (target == current)
  833. save_access_regs(target->thread.acrs);
  834. if (kbuf) {
  835. const unsigned long *k = kbuf;
  836. while (count > 0 && !rc) {
  837. rc = __poke_user(target, pos, *k++);
  838. count -= sizeof(*k);
  839. pos += sizeof(*k);
  840. }
  841. } else {
  842. const unsigned long __user *u = ubuf;
  843. while (count > 0 && !rc) {
  844. unsigned long word;
  845. rc = __get_user(word, u++);
  846. if (rc)
  847. break;
  848. rc = __poke_user(target, pos, word);
  849. count -= sizeof(*u);
  850. pos += sizeof(*u);
  851. }
  852. }
  853. if (rc == 0 && target == current)
  854. restore_access_regs(target->thread.acrs);
  855. return rc;
  856. }
  857. static int s390_fpregs_get(struct task_struct *target,
  858. const struct user_regset *regset, unsigned int pos,
  859. unsigned int count, void *kbuf, void __user *ubuf)
  860. {
  861. _s390_fp_regs fp_regs;
  862. if (target == current)
  863. save_fpu_regs();
  864. fp_regs.fpc = target->thread.fpu.fpc;
  865. fpregs_store(&fp_regs, &target->thread.fpu);
  866. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  867. &fp_regs, 0, -1);
  868. }
  869. static int s390_fpregs_set(struct task_struct *target,
  870. const struct user_regset *regset, unsigned int pos,
  871. unsigned int count, const void *kbuf,
  872. const void __user *ubuf)
  873. {
  874. int rc = 0;
  875. freg_t fprs[__NUM_FPRS];
  876. if (target == current)
  877. save_fpu_regs();
  878. if (MACHINE_HAS_VX)
  879. convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
  880. else
  881. memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
  882. /* If setting FPC, must validate it first. */
  883. if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
  884. u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
  885. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
  886. 0, offsetof(s390_fp_regs, fprs));
  887. if (rc)
  888. return rc;
  889. if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
  890. return -EINVAL;
  891. target->thread.fpu.fpc = ufpc[0];
  892. }
  893. if (rc == 0 && count > 0)
  894. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  895. fprs, offsetof(s390_fp_regs, fprs), -1);
  896. if (rc)
  897. return rc;
  898. if (MACHINE_HAS_VX)
  899. convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
  900. else
  901. memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
  902. return rc;
  903. }
  904. static int s390_last_break_get(struct task_struct *target,
  905. const struct user_regset *regset,
  906. unsigned int pos, unsigned int count,
  907. void *kbuf, void __user *ubuf)
  908. {
  909. if (count > 0) {
  910. if (kbuf) {
  911. unsigned long *k = kbuf;
  912. *k = target->thread.last_break;
  913. } else {
  914. unsigned long __user *u = ubuf;
  915. if (__put_user(target->thread.last_break, u))
  916. return -EFAULT;
  917. }
  918. }
  919. return 0;
  920. }
  921. static int s390_last_break_set(struct task_struct *target,
  922. const struct user_regset *regset,
  923. unsigned int pos, unsigned int count,
  924. const void *kbuf, const void __user *ubuf)
  925. {
  926. return 0;
  927. }
  928. static int s390_tdb_get(struct task_struct *target,
  929. const struct user_regset *regset,
  930. unsigned int pos, unsigned int count,
  931. void *kbuf, void __user *ubuf)
  932. {
  933. struct pt_regs *regs = task_pt_regs(target);
  934. unsigned char *data;
  935. if (!(regs->int_code & 0x200))
  936. return -ENODATA;
  937. data = target->thread.trap_tdb;
  938. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
  939. }
  940. static int s390_tdb_set(struct task_struct *target,
  941. const struct user_regset *regset,
  942. unsigned int pos, unsigned int count,
  943. const void *kbuf, const void __user *ubuf)
  944. {
  945. return 0;
  946. }
  947. static int s390_vxrs_low_get(struct task_struct *target,
  948. const struct user_regset *regset,
  949. unsigned int pos, unsigned int count,
  950. void *kbuf, void __user *ubuf)
  951. {
  952. __u64 vxrs[__NUM_VXRS_LOW];
  953. int i;
  954. if (!MACHINE_HAS_VX)
  955. return -ENODEV;
  956. if (target == current)
  957. save_fpu_regs();
  958. for (i = 0; i < __NUM_VXRS_LOW; i++)
  959. vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
  960. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  961. }
  962. static int s390_vxrs_low_set(struct task_struct *target,
  963. const struct user_regset *regset,
  964. unsigned int pos, unsigned int count,
  965. const void *kbuf, const void __user *ubuf)
  966. {
  967. __u64 vxrs[__NUM_VXRS_LOW];
  968. int i, rc;
  969. if (!MACHINE_HAS_VX)
  970. return -ENODEV;
  971. if (target == current)
  972. save_fpu_regs();
  973. for (i = 0; i < __NUM_VXRS_LOW; i++)
  974. vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
  975. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  976. if (rc == 0)
  977. for (i = 0; i < __NUM_VXRS_LOW; i++)
  978. *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
  979. return rc;
  980. }
  981. static int s390_vxrs_high_get(struct task_struct *target,
  982. const struct user_regset *regset,
  983. unsigned int pos, unsigned int count,
  984. void *kbuf, void __user *ubuf)
  985. {
  986. __vector128 vxrs[__NUM_VXRS_HIGH];
  987. if (!MACHINE_HAS_VX)
  988. return -ENODEV;
  989. if (target == current)
  990. save_fpu_regs();
  991. memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
  992. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
  993. }
  994. static int s390_vxrs_high_set(struct task_struct *target,
  995. const struct user_regset *regset,
  996. unsigned int pos, unsigned int count,
  997. const void *kbuf, const void __user *ubuf)
  998. {
  999. int rc;
  1000. if (!MACHINE_HAS_VX)
  1001. return -ENODEV;
  1002. if (target == current)
  1003. save_fpu_regs();
  1004. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1005. target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
  1006. return rc;
  1007. }
  1008. static int s390_system_call_get(struct task_struct *target,
  1009. const struct user_regset *regset,
  1010. unsigned int pos, unsigned int count,
  1011. void *kbuf, void __user *ubuf)
  1012. {
  1013. unsigned int *data = &target->thread.system_call;
  1014. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1015. data, 0, sizeof(unsigned int));
  1016. }
  1017. static int s390_system_call_set(struct task_struct *target,
  1018. const struct user_regset *regset,
  1019. unsigned int pos, unsigned int count,
  1020. const void *kbuf, const void __user *ubuf)
  1021. {
  1022. unsigned int *data = &target->thread.system_call;
  1023. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1024. data, 0, sizeof(unsigned int));
  1025. }
  1026. static int s390_gs_cb_get(struct task_struct *target,
  1027. const struct user_regset *regset,
  1028. unsigned int pos, unsigned int count,
  1029. void *kbuf, void __user *ubuf)
  1030. {
  1031. struct gs_cb *data = target->thread.gs_cb;
  1032. if (!MACHINE_HAS_GS)
  1033. return -ENODEV;
  1034. if (!data)
  1035. return -ENODATA;
  1036. if (target == current)
  1037. save_gs_cb(data);
  1038. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1039. data, 0, sizeof(struct gs_cb));
  1040. }
  1041. static int s390_gs_cb_set(struct task_struct *target,
  1042. const struct user_regset *regset,
  1043. unsigned int pos, unsigned int count,
  1044. const void *kbuf, const void __user *ubuf)
  1045. {
  1046. struct gs_cb gs_cb = { }, *data = NULL;
  1047. int rc;
  1048. if (!MACHINE_HAS_GS)
  1049. return -ENODEV;
  1050. if (!target->thread.gs_cb) {
  1051. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1052. if (!data)
  1053. return -ENOMEM;
  1054. }
  1055. if (!target->thread.gs_cb)
  1056. gs_cb.gsd = 25;
  1057. else if (target == current)
  1058. save_gs_cb(&gs_cb);
  1059. else
  1060. gs_cb = *target->thread.gs_cb;
  1061. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1062. &gs_cb, 0, sizeof(gs_cb));
  1063. if (rc) {
  1064. kfree(data);
  1065. return -EFAULT;
  1066. }
  1067. preempt_disable();
  1068. if (!target->thread.gs_cb)
  1069. target->thread.gs_cb = data;
  1070. *target->thread.gs_cb = gs_cb;
  1071. if (target == current) {
  1072. __ctl_set_bit(2, 4);
  1073. restore_gs_cb(target->thread.gs_cb);
  1074. }
  1075. preempt_enable();
  1076. return rc;
  1077. }
  1078. static int s390_gs_bc_get(struct task_struct *target,
  1079. const struct user_regset *regset,
  1080. unsigned int pos, unsigned int count,
  1081. void *kbuf, void __user *ubuf)
  1082. {
  1083. struct gs_cb *data = target->thread.gs_bc_cb;
  1084. if (!MACHINE_HAS_GS)
  1085. return -ENODEV;
  1086. if (!data)
  1087. return -ENODATA;
  1088. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1089. data, 0, sizeof(struct gs_cb));
  1090. }
  1091. static int s390_gs_bc_set(struct task_struct *target,
  1092. const struct user_regset *regset,
  1093. unsigned int pos, unsigned int count,
  1094. const void *kbuf, const void __user *ubuf)
  1095. {
  1096. struct gs_cb *data = target->thread.gs_bc_cb;
  1097. if (!MACHINE_HAS_GS)
  1098. return -ENODEV;
  1099. if (!data) {
  1100. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1101. if (!data)
  1102. return -ENOMEM;
  1103. target->thread.gs_bc_cb = data;
  1104. }
  1105. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1106. data, 0, sizeof(struct gs_cb));
  1107. }
  1108. static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
  1109. {
  1110. return (cb->rca & 0x1f) == 0 &&
  1111. (cb->roa & 0xfff) == 0 &&
  1112. (cb->rla & 0xfff) == 0xfff &&
  1113. cb->s == 1 &&
  1114. cb->k == 1 &&
  1115. cb->h == 0 &&
  1116. cb->reserved1 == 0 &&
  1117. cb->ps == 1 &&
  1118. cb->qs == 0 &&
  1119. cb->pc == 1 &&
  1120. cb->qc == 0 &&
  1121. cb->reserved2 == 0 &&
  1122. cb->key == PAGE_DEFAULT_KEY &&
  1123. cb->reserved3 == 0 &&
  1124. cb->reserved4 == 0 &&
  1125. cb->reserved5 == 0 &&
  1126. cb->reserved6 == 0 &&
  1127. cb->reserved7 == 0 &&
  1128. cb->reserved8 == 0 &&
  1129. cb->rla >= cb->roa &&
  1130. cb->rca >= cb->roa &&
  1131. cb->rca <= cb->rla+1 &&
  1132. cb->m < 3;
  1133. }
  1134. static int s390_runtime_instr_get(struct task_struct *target,
  1135. const struct user_regset *regset,
  1136. unsigned int pos, unsigned int count,
  1137. void *kbuf, void __user *ubuf)
  1138. {
  1139. struct runtime_instr_cb *data = target->thread.ri_cb;
  1140. if (!test_facility(64))
  1141. return -ENODEV;
  1142. if (!data)
  1143. return -ENODATA;
  1144. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  1145. data, 0, sizeof(struct runtime_instr_cb));
  1146. }
  1147. static int s390_runtime_instr_set(struct task_struct *target,
  1148. const struct user_regset *regset,
  1149. unsigned int pos, unsigned int count,
  1150. const void *kbuf, const void __user *ubuf)
  1151. {
  1152. struct runtime_instr_cb ri_cb = { }, *data = NULL;
  1153. int rc;
  1154. if (!test_facility(64))
  1155. return -ENODEV;
  1156. if (!target->thread.ri_cb) {
  1157. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1158. if (!data)
  1159. return -ENOMEM;
  1160. }
  1161. if (target->thread.ri_cb) {
  1162. if (target == current)
  1163. store_runtime_instr_cb(&ri_cb);
  1164. else
  1165. ri_cb = *target->thread.ri_cb;
  1166. }
  1167. rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  1168. &ri_cb, 0, sizeof(struct runtime_instr_cb));
  1169. if (rc) {
  1170. kfree(data);
  1171. return -EFAULT;
  1172. }
  1173. if (!is_ri_cb_valid(&ri_cb)) {
  1174. kfree(data);
  1175. return -EINVAL;
  1176. }
  1177. preempt_disable();
  1178. if (!target->thread.ri_cb)
  1179. target->thread.ri_cb = data;
  1180. *target->thread.ri_cb = ri_cb;
  1181. if (target == current)
  1182. load_runtime_instr_cb(target->thread.ri_cb);
  1183. preempt_enable();
  1184. return 0;
  1185. }
  1186. static const struct user_regset s390_regsets[] = {
  1187. {
  1188. .core_note_type = NT_PRSTATUS,
  1189. .n = sizeof(s390_regs) / sizeof(long),
  1190. .size = sizeof(long),
  1191. .align = sizeof(long),
  1192. .get = s390_regs_get,
  1193. .set = s390_regs_set,
  1194. },
  1195. {
  1196. .core_note_type = NT_PRFPREG,
  1197. .n = sizeof(s390_fp_regs) / sizeof(long),
  1198. .size = sizeof(long),
  1199. .align = sizeof(long),
  1200. .get = s390_fpregs_get,
  1201. .set = s390_fpregs_set,
  1202. },
  1203. {
  1204. .core_note_type = NT_S390_SYSTEM_CALL,
  1205. .n = 1,
  1206. .size = sizeof(unsigned int),
  1207. .align = sizeof(unsigned int),
  1208. .get = s390_system_call_get,
  1209. .set = s390_system_call_set,
  1210. },
  1211. {
  1212. .core_note_type = NT_S390_LAST_BREAK,
  1213. .n = 1,
  1214. .size = sizeof(long),
  1215. .align = sizeof(long),
  1216. .get = s390_last_break_get,
  1217. .set = s390_last_break_set,
  1218. },
  1219. {
  1220. .core_note_type = NT_S390_TDB,
  1221. .n = 1,
  1222. .size = 256,
  1223. .align = 1,
  1224. .get = s390_tdb_get,
  1225. .set = s390_tdb_set,
  1226. },
  1227. {
  1228. .core_note_type = NT_S390_VXRS_LOW,
  1229. .n = __NUM_VXRS_LOW,
  1230. .size = sizeof(__u64),
  1231. .align = sizeof(__u64),
  1232. .get = s390_vxrs_low_get,
  1233. .set = s390_vxrs_low_set,
  1234. },
  1235. {
  1236. .core_note_type = NT_S390_VXRS_HIGH,
  1237. .n = __NUM_VXRS_HIGH,
  1238. .size = sizeof(__vector128),
  1239. .align = sizeof(__vector128),
  1240. .get = s390_vxrs_high_get,
  1241. .set = s390_vxrs_high_set,
  1242. },
  1243. {
  1244. .core_note_type = NT_S390_GS_CB,
  1245. .n = sizeof(struct gs_cb) / sizeof(__u64),
  1246. .size = sizeof(__u64),
  1247. .align = sizeof(__u64),
  1248. .get = s390_gs_cb_get,
  1249. .set = s390_gs_cb_set,
  1250. },
  1251. {
  1252. .core_note_type = NT_S390_GS_BC,
  1253. .n = sizeof(struct gs_cb) / sizeof(__u64),
  1254. .size = sizeof(__u64),
  1255. .align = sizeof(__u64),
  1256. .get = s390_gs_bc_get,
  1257. .set = s390_gs_bc_set,
  1258. },
  1259. {
  1260. .core_note_type = NT_S390_RI_CB,
  1261. .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
  1262. .size = sizeof(__u64),
  1263. .align = sizeof(__u64),
  1264. .get = s390_runtime_instr_get,
  1265. .set = s390_runtime_instr_set,
  1266. },
  1267. };
  1268. static const struct user_regset_view user_s390_view = {
  1269. .name = UTS_MACHINE,
  1270. .e_machine = EM_S390,
  1271. .regsets = s390_regsets,
  1272. .n = ARRAY_SIZE(s390_regsets)
  1273. };
  1274. #ifdef CONFIG_COMPAT
  1275. static int s390_compat_regs_get(struct task_struct *target,
  1276. const struct user_regset *regset,
  1277. unsigned int pos, unsigned int count,
  1278. void *kbuf, void __user *ubuf)
  1279. {
  1280. if (target == current)
  1281. save_access_regs(target->thread.acrs);
  1282. if (kbuf) {
  1283. compat_ulong_t *k = kbuf;
  1284. while (count > 0) {
  1285. *k++ = __peek_user_compat(target, pos);
  1286. count -= sizeof(*k);
  1287. pos += sizeof(*k);
  1288. }
  1289. } else {
  1290. compat_ulong_t __user *u = ubuf;
  1291. while (count > 0) {
  1292. if (__put_user(__peek_user_compat(target, pos), u++))
  1293. return -EFAULT;
  1294. count -= sizeof(*u);
  1295. pos += sizeof(*u);
  1296. }
  1297. }
  1298. return 0;
  1299. }
  1300. static int s390_compat_regs_set(struct task_struct *target,
  1301. const struct user_regset *regset,
  1302. unsigned int pos, unsigned int count,
  1303. const void *kbuf, const void __user *ubuf)
  1304. {
  1305. int rc = 0;
  1306. if (target == current)
  1307. save_access_regs(target->thread.acrs);
  1308. if (kbuf) {
  1309. const compat_ulong_t *k = kbuf;
  1310. while (count > 0 && !rc) {
  1311. rc = __poke_user_compat(target, pos, *k++);
  1312. count -= sizeof(*k);
  1313. pos += sizeof(*k);
  1314. }
  1315. } else {
  1316. const compat_ulong_t __user *u = ubuf;
  1317. while (count > 0 && !rc) {
  1318. compat_ulong_t word;
  1319. rc = __get_user(word, u++);
  1320. if (rc)
  1321. break;
  1322. rc = __poke_user_compat(target, pos, word);
  1323. count -= sizeof(*u);
  1324. pos += sizeof(*u);
  1325. }
  1326. }
  1327. if (rc == 0 && target == current)
  1328. restore_access_regs(target->thread.acrs);
  1329. return rc;
  1330. }
  1331. static int s390_compat_regs_high_get(struct task_struct *target,
  1332. const struct user_regset *regset,
  1333. unsigned int pos, unsigned int count,
  1334. void *kbuf, void __user *ubuf)
  1335. {
  1336. compat_ulong_t *gprs_high;
  1337. gprs_high = (compat_ulong_t *)
  1338. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1339. if (kbuf) {
  1340. compat_ulong_t *k = kbuf;
  1341. while (count > 0) {
  1342. *k++ = *gprs_high;
  1343. gprs_high += 2;
  1344. count -= sizeof(*k);
  1345. }
  1346. } else {
  1347. compat_ulong_t __user *u = ubuf;
  1348. while (count > 0) {
  1349. if (__put_user(*gprs_high, u++))
  1350. return -EFAULT;
  1351. gprs_high += 2;
  1352. count -= sizeof(*u);
  1353. }
  1354. }
  1355. return 0;
  1356. }
  1357. static int s390_compat_regs_high_set(struct task_struct *target,
  1358. const struct user_regset *regset,
  1359. unsigned int pos, unsigned int count,
  1360. const void *kbuf, const void __user *ubuf)
  1361. {
  1362. compat_ulong_t *gprs_high;
  1363. int rc = 0;
  1364. gprs_high = (compat_ulong_t *)
  1365. &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
  1366. if (kbuf) {
  1367. const compat_ulong_t *k = kbuf;
  1368. while (count > 0) {
  1369. *gprs_high = *k++;
  1370. *gprs_high += 2;
  1371. count -= sizeof(*k);
  1372. }
  1373. } else {
  1374. const compat_ulong_t __user *u = ubuf;
  1375. while (count > 0 && !rc) {
  1376. unsigned long word;
  1377. rc = __get_user(word, u++);
  1378. if (rc)
  1379. break;
  1380. *gprs_high = word;
  1381. *gprs_high += 2;
  1382. count -= sizeof(*u);
  1383. }
  1384. }
  1385. return rc;
  1386. }
  1387. static int s390_compat_last_break_get(struct task_struct *target,
  1388. const struct user_regset *regset,
  1389. unsigned int pos, unsigned int count,
  1390. void *kbuf, void __user *ubuf)
  1391. {
  1392. compat_ulong_t last_break;
  1393. if (count > 0) {
  1394. last_break = target->thread.last_break;
  1395. if (kbuf) {
  1396. unsigned long *k = kbuf;
  1397. *k = last_break;
  1398. } else {
  1399. unsigned long __user *u = ubuf;
  1400. if (__put_user(last_break, u))
  1401. return -EFAULT;
  1402. }
  1403. }
  1404. return 0;
  1405. }
  1406. static int s390_compat_last_break_set(struct task_struct *target,
  1407. const struct user_regset *regset,
  1408. unsigned int pos, unsigned int count,
  1409. const void *kbuf, const void __user *ubuf)
  1410. {
  1411. return 0;
  1412. }
  1413. static const struct user_regset s390_compat_regsets[] = {
  1414. {
  1415. .core_note_type = NT_PRSTATUS,
  1416. .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
  1417. .size = sizeof(compat_long_t),
  1418. .align = sizeof(compat_long_t),
  1419. .get = s390_compat_regs_get,
  1420. .set = s390_compat_regs_set,
  1421. },
  1422. {
  1423. .core_note_type = NT_PRFPREG,
  1424. .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
  1425. .size = sizeof(compat_long_t),
  1426. .align = sizeof(compat_long_t),
  1427. .get = s390_fpregs_get,
  1428. .set = s390_fpregs_set,
  1429. },
  1430. {
  1431. .core_note_type = NT_S390_SYSTEM_CALL,
  1432. .n = 1,
  1433. .size = sizeof(compat_uint_t),
  1434. .align = sizeof(compat_uint_t),
  1435. .get = s390_system_call_get,
  1436. .set = s390_system_call_set,
  1437. },
  1438. {
  1439. .core_note_type = NT_S390_LAST_BREAK,
  1440. .n = 1,
  1441. .size = sizeof(long),
  1442. .align = sizeof(long),
  1443. .get = s390_compat_last_break_get,
  1444. .set = s390_compat_last_break_set,
  1445. },
  1446. {
  1447. .core_note_type = NT_S390_TDB,
  1448. .n = 1,
  1449. .size = 256,
  1450. .align = 1,
  1451. .get = s390_tdb_get,
  1452. .set = s390_tdb_set,
  1453. },
  1454. {
  1455. .core_note_type = NT_S390_VXRS_LOW,
  1456. .n = __NUM_VXRS_LOW,
  1457. .size = sizeof(__u64),
  1458. .align = sizeof(__u64),
  1459. .get = s390_vxrs_low_get,
  1460. .set = s390_vxrs_low_set,
  1461. },
  1462. {
  1463. .core_note_type = NT_S390_VXRS_HIGH,
  1464. .n = __NUM_VXRS_HIGH,
  1465. .size = sizeof(__vector128),
  1466. .align = sizeof(__vector128),
  1467. .get = s390_vxrs_high_get,
  1468. .set = s390_vxrs_high_set,
  1469. },
  1470. {
  1471. .core_note_type = NT_S390_HIGH_GPRS,
  1472. .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
  1473. .size = sizeof(compat_long_t),
  1474. .align = sizeof(compat_long_t),
  1475. .get = s390_compat_regs_high_get,
  1476. .set = s390_compat_regs_high_set,
  1477. },
  1478. {
  1479. .core_note_type = NT_S390_GS_CB,
  1480. .n = sizeof(struct gs_cb) / sizeof(__u64),
  1481. .size = sizeof(__u64),
  1482. .align = sizeof(__u64),
  1483. .get = s390_gs_cb_get,
  1484. .set = s390_gs_cb_set,
  1485. },
  1486. {
  1487. .core_note_type = NT_S390_GS_BC,
  1488. .n = sizeof(struct gs_cb) / sizeof(__u64),
  1489. .size = sizeof(__u64),
  1490. .align = sizeof(__u64),
  1491. .get = s390_gs_bc_get,
  1492. .set = s390_gs_bc_set,
  1493. },
  1494. {
  1495. .core_note_type = NT_S390_RI_CB,
  1496. .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
  1497. .size = sizeof(__u64),
  1498. .align = sizeof(__u64),
  1499. .get = s390_runtime_instr_get,
  1500. .set = s390_runtime_instr_set,
  1501. },
  1502. };
  1503. static const struct user_regset_view user_s390_compat_view = {
  1504. .name = "s390",
  1505. .e_machine = EM_S390,
  1506. .regsets = s390_compat_regsets,
  1507. .n = ARRAY_SIZE(s390_compat_regsets)
  1508. };
  1509. #endif
  1510. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1511. {
  1512. #ifdef CONFIG_COMPAT
  1513. if (test_tsk_thread_flag(task, TIF_31BIT))
  1514. return &user_s390_compat_view;
  1515. #endif
  1516. return &user_s390_view;
  1517. }
  1518. static const char *gpr_names[NUM_GPRS] = {
  1519. "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
  1520. "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
  1521. };
  1522. unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
  1523. {
  1524. if (offset >= NUM_GPRS)
  1525. return 0;
  1526. return regs->gprs[offset];
  1527. }
  1528. int regs_query_register_offset(const char *name)
  1529. {
  1530. unsigned long offset;
  1531. if (!name || *name != 'r')
  1532. return -EINVAL;
  1533. if (kstrtoul(name + 1, 10, &offset))
  1534. return -EINVAL;
  1535. if (offset >= NUM_GPRS)
  1536. return -EINVAL;
  1537. return offset;
  1538. }
  1539. const char *regs_query_register_name(unsigned int offset)
  1540. {
  1541. if (offset >= NUM_GPRS)
  1542. return NULL;
  1543. return gpr_names[offset];
  1544. }
  1545. static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  1546. {
  1547. unsigned long ksp = kernel_stack_pointer(regs);
  1548. return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
  1549. }
  1550. /**
  1551. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  1552. * @regs:pt_regs which contains kernel stack pointer.
  1553. * @n:stack entry number.
  1554. *
  1555. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  1556. * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
  1557. * this returns 0.
  1558. */
  1559. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  1560. {
  1561. unsigned long addr;
  1562. addr = kernel_stack_pointer(regs) + n * sizeof(long);
  1563. if (!regs_within_kernel_stack(regs, addr))
  1564. return 0;
  1565. return *(unsigned long *)addr;
  1566. }