aarch64-dis.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436
  1. /* aarch64-dis.c -- AArch64 disassembler.
  2. Copyright (C) 2009-2015 Free Software Foundation, Inc.
  3. Contributed by ARM Ltd.
  4. This file is part of the GNU opcodes library.
  5. This library is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3, or (at your option)
  8. any later version.
  9. It is distributed in the hope that it will be useful, but WITHOUT
  10. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  11. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
  12. License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; see the file COPYING3. If not,
  15. see <http://www.gnu.org/licenses/>. */
  16. #include "sysdep.h"
  17. #include "bfd_stdint.h"
  18. #include "dis-asm.h"
  19. #include "libiberty.h"
  20. #include "opintl.h"
  21. #include "aarch64-dis.h"
  22. #include "elf-bfd.h"
  23. #define ERR_OK 0
  24. #define ERR_UND -1
  25. #define ERR_UNP -3
  26. #define ERR_NYI -5
  27. #define INSNLEN 4
  28. /* Cached mapping symbol state. */
  29. enum map_type
  30. {
  31. MAP_INSN,
  32. MAP_DATA
  33. };
  34. static enum map_type last_type;
  35. static int last_mapping_sym = -1;
  36. static bfd_vma last_mapping_addr = 0;
  37. /* Other options */
  38. static int no_aliases = 0; /* If set disassemble as most general inst. */
  39. static void
  40. set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
  41. {
  42. }
  43. static void
  44. parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
  45. {
  46. /* Try to match options that are simple flags */
  47. if (CONST_STRNEQ (option, "no-aliases"))
  48. {
  49. no_aliases = 1;
  50. return;
  51. }
  52. if (CONST_STRNEQ (option, "aliases"))
  53. {
  54. no_aliases = 0;
  55. return;
  56. }
  57. #ifdef DEBUG_AARCH64
  58. if (CONST_STRNEQ (option, "debug_dump"))
  59. {
  60. debug_dump = 1;
  61. return;
  62. }
  63. #endif /* DEBUG_AARCH64 */
  64. /* Invalid option. */
  65. fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
  66. }
  67. static void
  68. parse_aarch64_dis_options (const char *options)
  69. {
  70. const char *option_end;
  71. if (options == NULL)
  72. return;
  73. while (*options != '\0')
  74. {
  75. /* Skip empty options. */
  76. if (*options == ',')
  77. {
  78. options++;
  79. continue;
  80. }
  81. /* We know that *options is neither NUL or a comma. */
  82. option_end = options + 1;
  83. while (*option_end != ',' && *option_end != '\0')
  84. option_end++;
  85. parse_aarch64_dis_option (options, option_end - options);
  86. /* Go on to the next one. If option_end points to a comma, it
  87. will be skipped above. */
  88. options = option_end;
  89. }
  90. }
  91. /* Functions doing the instruction disassembling. */
  92. /* The unnamed arguments consist of the number of fields and information about
  93. these fields where the VALUE will be extracted from CODE and returned.
  94. MASK can be zero or the base mask of the opcode.
  95. N.B. the fields are required to be in such an order than the most signficant
  96. field for VALUE comes the first, e.g. the <index> in
  97. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
  98. is encoded in H:L:M in some cases, the fields H:L:M should be passed in
  99. the order of H, L, M. */
  100. static inline aarch64_insn
  101. extract_fields (aarch64_insn code, aarch64_insn mask, ...)
  102. {
  103. uint32_t num;
  104. const aarch64_field *field;
  105. enum aarch64_field_kind kind;
  106. va_list va;
  107. va_start (va, mask);
  108. num = va_arg (va, uint32_t);
  109. assert (num <= 5);
  110. aarch64_insn value = 0x0;
  111. while (num--)
  112. {
  113. kind = va_arg (va, enum aarch64_field_kind);
  114. field = &fields[kind];
  115. value <<= field->width;
  116. value |= extract_field (kind, code, mask);
  117. }
  118. return value;
  119. }
  120. /* Sign-extend bit I of VALUE. */
  121. static inline int32_t
  122. sign_extend (aarch64_insn value, unsigned i)
  123. {
  124. uint32_t ret = value;
  125. assert (i < 32);
  126. if ((value >> i) & 0x1)
  127. {
  128. uint32_t val = (uint32_t)(-1) << i;
  129. ret = ret | val;
  130. }
  131. return (int32_t) ret;
  132. }
  133. /* N.B. the following inline helpfer functions create a dependency on the
  134. order of operand qualifier enumerators. */
  135. /* Given VALUE, return qualifier for a general purpose register. */
  136. static inline enum aarch64_opnd_qualifier
  137. get_greg_qualifier_from_value (aarch64_insn value)
  138. {
  139. enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
  140. assert (value <= 0x1
  141. && aarch64_get_qualifier_standard_value (qualifier) == value);
  142. return qualifier;
  143. }
  144. /* Given VALUE, return qualifier for a vector register. */
  145. static inline enum aarch64_opnd_qualifier
  146. get_vreg_qualifier_from_value (aarch64_insn value)
  147. {
  148. enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
  149. assert (value <= 0x8
  150. && aarch64_get_qualifier_standard_value (qualifier) == value);
  151. return qualifier;
  152. }
  153. /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
  154. static inline enum aarch64_opnd_qualifier
  155. get_sreg_qualifier_from_value (aarch64_insn value)
  156. {
  157. enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
  158. assert (value <= 0x4
  159. && aarch64_get_qualifier_standard_value (qualifier) == value);
  160. return qualifier;
  161. }
  162. /* Given the instruction in *INST which is probably half way through the
  163. decoding and our caller wants to know the expected qualifier for operand
  164. I. Return such a qualifier if we can establish it; otherwise return
  165. AARCH64_OPND_QLF_NIL. */
  166. static aarch64_opnd_qualifier_t
  167. get_expected_qualifier (const aarch64_inst *inst, int i)
  168. {
  169. aarch64_opnd_qualifier_seq_t qualifiers;
  170. /* Should not be called if the qualifier is known. */
  171. assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
  172. if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
  173. i, qualifiers))
  174. return qualifiers[i];
  175. else
  176. return AARCH64_OPND_QLF_NIL;
  177. }
  178. /* Operand extractors. */
  179. int
  180. aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
  181. const aarch64_insn code,
  182. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  183. {
  184. info->reg.regno = extract_field (self->fields[0], code, 0);
  185. return 1;
  186. }
  187. int
  188. aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
  189. const aarch64_insn code ATTRIBUTE_UNUSED,
  190. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  191. {
  192. assert (info->idx == 1
  193. || info->idx ==3);
  194. info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
  195. return 1;
  196. }
  197. /* e.g. IC <ic_op>{, <Xt>}. */
  198. int
  199. aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
  200. const aarch64_insn code,
  201. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  202. {
  203. info->reg.regno = extract_field (self->fields[0], code, 0);
  204. assert (info->idx == 1
  205. && (aarch64_get_operand_class (inst->operands[0].type)
  206. == AARCH64_OPND_CLASS_SYSTEM));
  207. /* This will make the constraint checking happy and more importantly will
  208. help the disassembler determine whether this operand is optional or
  209. not. */
  210. info->present = inst->operands[0].sysins_op->has_xt;
  211. return 1;
  212. }
  213. /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
  214. int
  215. aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
  216. const aarch64_insn code,
  217. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  218. {
  219. /* regno */
  220. info->reglane.regno = extract_field (self->fields[0], code,
  221. inst->opcode->mask);
  222. /* Index and/or type. */
  223. if (inst->opcode->iclass == asisdone
  224. || inst->opcode->iclass == asimdins)
  225. {
  226. if (info->type == AARCH64_OPND_En
  227. && inst->opcode->operands[0] == AARCH64_OPND_Ed)
  228. {
  229. unsigned shift;
  230. /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
  231. assert (info->idx == 1); /* Vn */
  232. aarch64_insn value = extract_field (FLD_imm4, code, 0);
  233. /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
  234. info->qualifier = get_expected_qualifier (inst, info->idx);
  235. shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
  236. info->reglane.index = value >> shift;
  237. }
  238. else
  239. {
  240. /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
  241. imm5<3:0> <V>
  242. 0000 RESERVED
  243. xxx1 B
  244. xx10 H
  245. x100 S
  246. 1000 D */
  247. int pos = -1;
  248. aarch64_insn value = extract_field (FLD_imm5, code, 0);
  249. while (++pos <= 3 && (value & 0x1) == 0)
  250. value >>= 1;
  251. if (pos > 3)
  252. return 0;
  253. info->qualifier = get_sreg_qualifier_from_value (pos);
  254. info->reglane.index = (unsigned) (value >> 1);
  255. }
  256. }
  257. else
  258. {
  259. /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
  260. or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
  261. /* Need information in other operand(s) to help decoding. */
  262. info->qualifier = get_expected_qualifier (inst, info->idx);
  263. switch (info->qualifier)
  264. {
  265. case AARCH64_OPND_QLF_S_H:
  266. /* h:l:m */
  267. info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
  268. FLD_M);
  269. info->reglane.regno &= 0xf;
  270. break;
  271. case AARCH64_OPND_QLF_S_S:
  272. /* h:l */
  273. info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
  274. break;
  275. case AARCH64_OPND_QLF_S_D:
  276. /* H */
  277. info->reglane.index = extract_field (FLD_H, code, 0);
  278. break;
  279. default:
  280. return 0;
  281. }
  282. }
  283. return 1;
  284. }
  285. int
  286. aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
  287. const aarch64_insn code,
  288. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  289. {
  290. /* R */
  291. info->reglist.first_regno = extract_field (self->fields[0], code, 0);
  292. /* len */
  293. info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
  294. return 1;
  295. }
  296. /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
  297. int
  298. aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
  299. aarch64_opnd_info *info, const aarch64_insn code,
  300. const aarch64_inst *inst)
  301. {
  302. aarch64_insn value;
  303. /* Number of elements in each structure to be loaded/stored. */
  304. unsigned expected_num = get_opcode_dependent_value (inst->opcode);
  305. struct
  306. {
  307. unsigned is_reserved;
  308. unsigned num_regs;
  309. unsigned num_elements;
  310. } data [] =
  311. { {0, 4, 4},
  312. {1, 4, 4},
  313. {0, 4, 1},
  314. {0, 4, 2},
  315. {0, 3, 3},
  316. {1, 3, 3},
  317. {0, 3, 1},
  318. {0, 1, 1},
  319. {0, 2, 2},
  320. {1, 2, 2},
  321. {0, 2, 1},
  322. };
  323. /* Rt */
  324. info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
  325. /* opcode */
  326. value = extract_field (FLD_opcode, code, 0);
  327. if (expected_num != data[value].num_elements || data[value].is_reserved)
  328. return 0;
  329. info->reglist.num_regs = data[value].num_regs;
  330. return 1;
  331. }
  332. /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
  333. lanes instructions. */
  334. int
  335. aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
  336. aarch64_opnd_info *info, const aarch64_insn code,
  337. const aarch64_inst *inst)
  338. {
  339. aarch64_insn value;
  340. /* Rt */
  341. info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
  342. /* S */
  343. value = extract_field (FLD_S, code, 0);
  344. /* Number of registers is equal to the number of elements in
  345. each structure to be loaded/stored. */
  346. info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
  347. assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
  348. /* Except when it is LD1R. */
  349. if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
  350. info->reglist.num_regs = 2;
  351. return 1;
  352. }
  353. /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
  354. load/store single element instructions. */
  355. int
  356. aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
  357. aarch64_opnd_info *info, const aarch64_insn code,
  358. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  359. {
  360. aarch64_field field = {0, 0};
  361. aarch64_insn QSsize; /* fields Q:S:size. */
  362. aarch64_insn opcodeh2; /* opcode<2:1> */
  363. /* Rt */
  364. info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
  365. /* Decode the index, opcode<2:1> and size. */
  366. gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
  367. opcodeh2 = extract_field_2 (&field, code, 0);
  368. QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
  369. switch (opcodeh2)
  370. {
  371. case 0x0:
  372. info->qualifier = AARCH64_OPND_QLF_S_B;
  373. /* Index encoded in "Q:S:size". */
  374. info->reglist.index = QSsize;
  375. break;
  376. case 0x1:
  377. if (QSsize & 0x1)
  378. /* UND. */
  379. return 0;
  380. info->qualifier = AARCH64_OPND_QLF_S_H;
  381. /* Index encoded in "Q:S:size<1>". */
  382. info->reglist.index = QSsize >> 1;
  383. break;
  384. case 0x2:
  385. if ((QSsize >> 1) & 0x1)
  386. /* UND. */
  387. return 0;
  388. if ((QSsize & 0x1) == 0)
  389. {
  390. info->qualifier = AARCH64_OPND_QLF_S_S;
  391. /* Index encoded in "Q:S". */
  392. info->reglist.index = QSsize >> 2;
  393. }
  394. else
  395. {
  396. if (extract_field (FLD_S, code, 0))
  397. /* UND */
  398. return 0;
  399. info->qualifier = AARCH64_OPND_QLF_S_D;
  400. /* Index encoded in "Q". */
  401. info->reglist.index = QSsize >> 3;
  402. }
  403. break;
  404. default:
  405. return 0;
  406. }
  407. info->reglist.has_index = 1;
  408. info->reglist.num_regs = 0;
  409. /* Number of registers is equal to the number of elements in
  410. each structure to be loaded/stored. */
  411. info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
  412. assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
  413. return 1;
  414. }
  415. /* Decode fields immh:immb and/or Q for e.g.
  416. SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
  417. or SSHR <V><d>, <V><n>, #<shift>. */
  418. int
  419. aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
  420. aarch64_opnd_info *info, const aarch64_insn code,
  421. const aarch64_inst *inst)
  422. {
  423. int pos;
  424. aarch64_insn Q, imm, immh;
  425. enum aarch64_insn_class iclass = inst->opcode->iclass;
  426. immh = extract_field (FLD_immh, code, 0);
  427. if (immh == 0)
  428. return 0;
  429. imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
  430. pos = 4;
  431. /* Get highest set bit in immh. */
  432. while (--pos >= 0 && (immh & 0x8) == 0)
  433. immh <<= 1;
  434. assert ((iclass == asimdshf || iclass == asisdshf)
  435. && (info->type == AARCH64_OPND_IMM_VLSR
  436. || info->type == AARCH64_OPND_IMM_VLSL));
  437. if (iclass == asimdshf)
  438. {
  439. Q = extract_field (FLD_Q, code, 0);
  440. /* immh Q <T>
  441. 0000 x SEE AdvSIMD modified immediate
  442. 0001 0 8B
  443. 0001 1 16B
  444. 001x 0 4H
  445. 001x 1 8H
  446. 01xx 0 2S
  447. 01xx 1 4S
  448. 1xxx 0 RESERVED
  449. 1xxx 1 2D */
  450. info->qualifier =
  451. get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
  452. }
  453. else
  454. info->qualifier = get_sreg_qualifier_from_value (pos);
  455. if (info->type == AARCH64_OPND_IMM_VLSR)
  456. /* immh <shift>
  457. 0000 SEE AdvSIMD modified immediate
  458. 0001 (16-UInt(immh:immb))
  459. 001x (32-UInt(immh:immb))
  460. 01xx (64-UInt(immh:immb))
  461. 1xxx (128-UInt(immh:immb)) */
  462. info->imm.value = (16 << pos) - imm;
  463. else
  464. /* immh:immb
  465. immh <shift>
  466. 0000 SEE AdvSIMD modified immediate
  467. 0001 (UInt(immh:immb)-8)
  468. 001x (UInt(immh:immb)-16)
  469. 01xx (UInt(immh:immb)-32)
  470. 1xxx (UInt(immh:immb)-64) */
  471. info->imm.value = imm - (8 << pos);
  472. return 1;
  473. }
  474. /* Decode shift immediate for e.g. sshr (imm). */
  475. int
  476. aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
  477. aarch64_opnd_info *info, const aarch64_insn code,
  478. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  479. {
  480. int64_t imm;
  481. aarch64_insn val;
  482. val = extract_field (FLD_size, code, 0);
  483. switch (val)
  484. {
  485. case 0: imm = 8; break;
  486. case 1: imm = 16; break;
  487. case 2: imm = 32; break;
  488. default: return 0;
  489. }
  490. info->imm.value = imm;
  491. return 1;
  492. }
  493. /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
  494. value in the field(s) will be extracted as unsigned immediate value. */
  495. int
  496. aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
  497. const aarch64_insn code,
  498. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  499. {
  500. int64_t imm;
  501. /* Maximum of two fields to extract. */
  502. assert (self->fields[2] == FLD_NIL);
  503. if (self->fields[1] == FLD_NIL)
  504. imm = extract_field (self->fields[0], code, 0);
  505. else
  506. /* e.g. TBZ b5:b40. */
  507. imm = extract_fields (code, 0, 2, self->fields[0], self->fields[1]);
  508. if (info->type == AARCH64_OPND_FPIMM)
  509. info->imm.is_fp = 1;
  510. if (operand_need_sign_extension (self))
  511. imm = sign_extend (imm, get_operand_fields_width (self) - 1);
  512. if (operand_need_shift_by_two (self))
  513. imm <<= 2;
  514. if (info->type == AARCH64_OPND_ADDR_ADRP)
  515. imm <<= 12;
  516. info->imm.value = imm;
  517. return 1;
  518. }
  519. /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
  520. int
  521. aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
  522. const aarch64_insn code,
  523. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  524. {
  525. aarch64_ext_imm (self, info, code, inst);
  526. info->shifter.kind = AARCH64_MOD_LSL;
  527. info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
  528. return 1;
  529. }
  530. /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
  531. MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
  532. int
  533. aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
  534. aarch64_opnd_info *info,
  535. const aarch64_insn code,
  536. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  537. {
  538. uint64_t imm;
  539. enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
  540. aarch64_field field = {0, 0};
  541. assert (info->idx == 1);
  542. if (info->type == AARCH64_OPND_SIMD_FPIMM)
  543. info->imm.is_fp = 1;
  544. /* a:b:c:d:e:f:g:h */
  545. imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
  546. if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
  547. {
  548. /* Either MOVI <Dd>, #<imm>
  549. or MOVI <Vd>.2D, #<imm>.
  550. <imm> is a 64-bit immediate
  551. 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
  552. encoded in "a:b:c:d:e:f:g:h". */
  553. int i;
  554. unsigned abcdefgh = imm;
  555. for (imm = 0ull, i = 0; i < 8; i++)
  556. if (((abcdefgh >> i) & 0x1) != 0)
  557. imm |= 0xffull << (8 * i);
  558. }
  559. info->imm.value = imm;
  560. /* cmode */
  561. info->qualifier = get_expected_qualifier (inst, info->idx);
  562. switch (info->qualifier)
  563. {
  564. case AARCH64_OPND_QLF_NIL:
  565. /* no shift */
  566. info->shifter.kind = AARCH64_MOD_NONE;
  567. return 1;
  568. case AARCH64_OPND_QLF_LSL:
  569. /* shift zeros */
  570. info->shifter.kind = AARCH64_MOD_LSL;
  571. switch (aarch64_get_qualifier_esize (opnd0_qualifier))
  572. {
  573. case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
  574. case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
  575. case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
  576. default: assert (0); return 0;
  577. }
  578. /* 00: 0; 01: 8; 10:16; 11:24. */
  579. info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
  580. break;
  581. case AARCH64_OPND_QLF_MSL:
  582. /* shift ones */
  583. info->shifter.kind = AARCH64_MOD_MSL;
  584. gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
  585. info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
  586. break;
  587. default:
  588. assert (0);
  589. return 0;
  590. }
  591. return 1;
  592. }
  593. /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
  594. int
  595. aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
  596. aarch64_opnd_info *info, const aarch64_insn code,
  597. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  598. {
  599. info->imm.value = 64- extract_field (FLD_scale, code, 0);
  600. return 1;
  601. }
  602. /* Decode arithmetic immediate for e.g.
  603. SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
  604. int
  605. aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
  606. aarch64_opnd_info *info, const aarch64_insn code,
  607. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  608. {
  609. aarch64_insn value;
  610. info->shifter.kind = AARCH64_MOD_LSL;
  611. /* shift */
  612. value = extract_field (FLD_shift, code, 0);
  613. if (value >= 2)
  614. return 0;
  615. info->shifter.amount = value ? 12 : 0;
  616. /* imm12 (unsigned) */
  617. info->imm.value = extract_field (FLD_imm12, code, 0);
  618. return 1;
  619. }
  620. /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
  621. int
  622. aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
  623. aarch64_opnd_info *info, const aarch64_insn code,
  624. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  625. {
  626. uint64_t imm, mask;
  627. uint32_t sf;
  628. uint32_t N, R, S;
  629. unsigned simd_size;
  630. aarch64_insn value;
  631. value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
  632. assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
  633. || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
  634. sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
  635. /* value is N:immr:imms. */
  636. S = value & 0x3f;
  637. R = (value >> 6) & 0x3f;
  638. N = (value >> 12) & 0x1;
  639. if (sf == 0 && N == 1)
  640. return 0;
  641. /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
  642. (in other words, right rotated by R), then replicated. */
  643. if (N != 0)
  644. {
  645. simd_size = 64;
  646. mask = 0xffffffffffffffffull;
  647. }
  648. else
  649. {
  650. switch (S)
  651. {
  652. case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
  653. case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
  654. case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
  655. case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
  656. case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
  657. default: return 0;
  658. }
  659. mask = (1ull << simd_size) - 1;
  660. /* Top bits are IGNORED. */
  661. R &= simd_size - 1;
  662. }
  663. /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
  664. if (S == simd_size - 1)
  665. return 0;
  666. /* S+1 consecutive bits to 1. */
  667. /* NOTE: S can't be 63 due to detection above. */
  668. imm = (1ull << (S + 1)) - 1;
  669. /* Rotate to the left by simd_size - R. */
  670. if (R != 0)
  671. imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
  672. /* Replicate the value according to SIMD size. */
  673. switch (simd_size)
  674. {
  675. case 2: imm = (imm << 2) | imm;
  676. case 4: imm = (imm << 4) | imm;
  677. case 8: imm = (imm << 8) | imm;
  678. case 16: imm = (imm << 16) | imm;
  679. case 32: imm = (imm << 32) | imm;
  680. case 64: break;
  681. default: assert (0); return 0;
  682. }
  683. info->imm.value = sf ? imm : imm & 0xffffffff;
  684. return 1;
  685. }
  686. /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
  687. or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
  688. int
  689. aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
  690. aarch64_opnd_info *info,
  691. const aarch64_insn code, const aarch64_inst *inst)
  692. {
  693. aarch64_insn value;
  694. /* Rt */
  695. info->reg.regno = extract_field (FLD_Rt, code, 0);
  696. /* size */
  697. value = extract_field (FLD_ldst_size, code, 0);
  698. if (inst->opcode->iclass == ldstpair_indexed
  699. || inst->opcode->iclass == ldstnapair_offs
  700. || inst->opcode->iclass == ldstpair_off
  701. || inst->opcode->iclass == loadlit)
  702. {
  703. enum aarch64_opnd_qualifier qualifier;
  704. switch (value)
  705. {
  706. case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
  707. case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
  708. case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
  709. default: return 0;
  710. }
  711. info->qualifier = qualifier;
  712. }
  713. else
  714. {
  715. /* opc1:size */
  716. value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
  717. if (value > 0x4)
  718. return 0;
  719. info->qualifier = get_sreg_qualifier_from_value (value);
  720. }
  721. return 1;
  722. }
  723. /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
  724. int
  725. aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
  726. aarch64_opnd_info *info,
  727. aarch64_insn code,
  728. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  729. {
  730. /* Rn */
  731. info->addr.base_regno = extract_field (FLD_Rn, code, 0);
  732. return 1;
  733. }
  734. /* Decode the address operand for e.g.
  735. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
  736. int
  737. aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
  738. aarch64_opnd_info *info,
  739. aarch64_insn code, const aarch64_inst *inst)
  740. {
  741. aarch64_insn S, value;
  742. /* Rn */
  743. info->addr.base_regno = extract_field (FLD_Rn, code, 0);
  744. /* Rm */
  745. info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
  746. /* option */
  747. value = extract_field (FLD_option, code, 0);
  748. info->shifter.kind =
  749. aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
  750. /* Fix-up the shifter kind; although the table-driven approach is
  751. efficient, it is slightly inflexible, thus needing this fix-up. */
  752. if (info->shifter.kind == AARCH64_MOD_UXTX)
  753. info->shifter.kind = AARCH64_MOD_LSL;
  754. /* S */
  755. S = extract_field (FLD_S, code, 0);
  756. if (S == 0)
  757. {
  758. info->shifter.amount = 0;
  759. info->shifter.amount_present = 0;
  760. }
  761. else
  762. {
  763. int size;
  764. /* Need information in other operand(s) to help achieve the decoding
  765. from 'S' field. */
  766. info->qualifier = get_expected_qualifier (inst, info->idx);
  767. /* Get the size of the data element that is accessed, which may be
  768. different from that of the source register size, e.g. in strb/ldrb. */
  769. size = aarch64_get_qualifier_esize (info->qualifier);
  770. info->shifter.amount = get_logsz (size);
  771. info->shifter.amount_present = 1;
  772. }
  773. return 1;
  774. }
  775. /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
  776. int
  777. aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
  778. aarch64_insn code, const aarch64_inst *inst)
  779. {
  780. aarch64_insn imm;
  781. info->qualifier = get_expected_qualifier (inst, info->idx);
  782. /* Rn */
  783. info->addr.base_regno = extract_field (FLD_Rn, code, 0);
  784. /* simm (imm9 or imm7) */
  785. imm = extract_field (self->fields[0], code, 0);
  786. info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
  787. if (self->fields[0] == FLD_imm7)
  788. /* scaled immediate in ld/st pair instructions. */
  789. info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
  790. /* qualifier */
  791. if (inst->opcode->iclass == ldst_unscaled
  792. || inst->opcode->iclass == ldstnapair_offs
  793. || inst->opcode->iclass == ldstpair_off
  794. || inst->opcode->iclass == ldst_unpriv)
  795. info->addr.writeback = 0;
  796. else
  797. {
  798. /* pre/post- index */
  799. info->addr.writeback = 1;
  800. if (extract_field (self->fields[1], code, 0) == 1)
  801. info->addr.preind = 1;
  802. else
  803. info->addr.postind = 1;
  804. }
  805. return 1;
  806. }
  807. /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
  808. int
  809. aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
  810. aarch64_insn code,
  811. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  812. {
  813. int shift;
  814. info->qualifier = get_expected_qualifier (inst, info->idx);
  815. shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
  816. /* Rn */
  817. info->addr.base_regno = extract_field (self->fields[0], code, 0);
  818. /* uimm12 */
  819. info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
  820. return 1;
  821. }
  822. /* Decode the address operand for e.g.
  823. LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
  824. int
  825. aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
  826. aarch64_opnd_info *info,
  827. aarch64_insn code, const aarch64_inst *inst)
  828. {
  829. /* The opcode dependent area stores the number of elements in
  830. each structure to be loaded/stored. */
  831. int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
  832. /* Rn */
  833. info->addr.base_regno = extract_field (FLD_Rn, code, 0);
  834. /* Rm | #<amount> */
  835. info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
  836. if (info->addr.offset.regno == 31)
  837. {
  838. if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
  839. /* Special handling of loading single structure to all lane. */
  840. info->addr.offset.imm = (is_ld1r ? 1
  841. : inst->operands[0].reglist.num_regs)
  842. * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
  843. else
  844. info->addr.offset.imm = inst->operands[0].reglist.num_regs
  845. * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
  846. * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
  847. }
  848. else
  849. info->addr.offset.is_reg = 1;
  850. info->addr.writeback = 1;
  851. return 1;
  852. }
  853. /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
  854. int
  855. aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
  856. aarch64_opnd_info *info,
  857. aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
  858. {
  859. aarch64_insn value;
  860. /* cond */
  861. value = extract_field (FLD_cond, code, 0);
  862. info->cond = get_cond_from_value (value);
  863. return 1;
  864. }
  865. /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
  866. int
  867. aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
  868. aarch64_opnd_info *info,
  869. aarch64_insn code,
  870. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  871. {
  872. /* op0:op1:CRn:CRm:op2 */
  873. info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
  874. FLD_CRm, FLD_op2);
  875. return 1;
  876. }
  877. /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
  878. int
  879. aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
  880. aarch64_opnd_info *info, aarch64_insn code,
  881. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  882. {
  883. int i;
  884. /* op1:op2 */
  885. info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
  886. for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
  887. if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
  888. return 1;
  889. /* Reserved value in <pstatefield>. */
  890. return 0;
  891. }
  892. /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
  893. int
  894. aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
  895. aarch64_opnd_info *info,
  896. aarch64_insn code,
  897. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  898. {
  899. int i;
  900. aarch64_insn value;
  901. const aarch64_sys_ins_reg *sysins_ops;
  902. /* op0:op1:CRn:CRm:op2 */
  903. value = extract_fields (code, 0, 5,
  904. FLD_op0, FLD_op1, FLD_CRn,
  905. FLD_CRm, FLD_op2);
  906. switch (info->type)
  907. {
  908. case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
  909. case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
  910. case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
  911. case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
  912. default: assert (0); return 0;
  913. }
  914. for (i = 0; sysins_ops[i].name != NULL; ++i)
  915. if (sysins_ops[i].value == value)
  916. {
  917. info->sysins_op = sysins_ops + i;
  918. DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
  919. info->sysins_op->name,
  920. (unsigned)info->sysins_op->value,
  921. info->sysins_op->has_xt, i);
  922. return 1;
  923. }
  924. return 0;
  925. }
  926. /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
  927. int
  928. aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
  929. aarch64_opnd_info *info,
  930. aarch64_insn code,
  931. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  932. {
  933. /* CRm */
  934. info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
  935. return 1;
  936. }
  937. /* Decode the prefetch operation option operand for e.g.
  938. PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
  939. int
  940. aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
  941. aarch64_opnd_info *info,
  942. aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
  943. {
  944. /* prfop in Rt */
  945. info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
  946. return 1;
  947. }
  948. /* Decode the extended register operand for e.g.
  949. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
  950. int
  951. aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
  952. aarch64_opnd_info *info,
  953. aarch64_insn code,
  954. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  955. {
  956. aarch64_insn value;
  957. /* Rm */
  958. info->reg.regno = extract_field (FLD_Rm, code, 0);
  959. /* option */
  960. value = extract_field (FLD_option, code, 0);
  961. info->shifter.kind =
  962. aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
  963. /* imm3 */
  964. info->shifter.amount = extract_field (FLD_imm3, code, 0);
  965. /* This makes the constraint checking happy. */
  966. info->shifter.operator_present = 1;
  967. /* Assume inst->operands[0].qualifier has been resolved. */
  968. assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
  969. info->qualifier = AARCH64_OPND_QLF_W;
  970. if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
  971. && (info->shifter.kind == AARCH64_MOD_UXTX
  972. || info->shifter.kind == AARCH64_MOD_SXTX))
  973. info->qualifier = AARCH64_OPND_QLF_X;
  974. return 1;
  975. }
  976. /* Decode the shifted register operand for e.g.
  977. SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
  978. int
  979. aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
  980. aarch64_opnd_info *info,
  981. aarch64_insn code,
  982. const aarch64_inst *inst ATTRIBUTE_UNUSED)
  983. {
  984. aarch64_insn value;
  985. /* Rm */
  986. info->reg.regno = extract_field (FLD_Rm, code, 0);
  987. /* shift */
  988. value = extract_field (FLD_shift, code, 0);
  989. info->shifter.kind =
  990. aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
  991. if (info->shifter.kind == AARCH64_MOD_ROR
  992. && inst->opcode->iclass != log_shift)
  993. /* ROR is not available for the shifted register operand in arithmetic
  994. instructions. */
  995. return 0;
  996. /* imm6 */
  997. info->shifter.amount = extract_field (FLD_imm6, code, 0);
  998. /* This makes the constraint checking happy. */
  999. info->shifter.operator_present = 1;
  1000. return 1;
  1001. }
  1002. /* Bitfields that are commonly used to encode certain operands' information
  1003. may be partially used as part of the base opcode in some instructions.
  1004. For example, the bit 1 of the field 'size' in
  1005. FCVTXN <Vb><d>, <Va><n>
  1006. is actually part of the base opcode, while only size<0> is available
  1007. for encoding the register type. Another example is the AdvSIMD
  1008. instruction ORR (register), in which the field 'size' is also used for
  1009. the base opcode, leaving only the field 'Q' available to encode the
  1010. vector register arrangement specifier '8B' or '16B'.
  1011. This function tries to deduce the qualifier from the value of partially
  1012. constrained field(s). Given the VALUE of such a field or fields, the
  1013. qualifiers CANDIDATES and the MASK (indicating which bits are valid for
  1014. operand encoding), the function returns the matching qualifier or
  1015. AARCH64_OPND_QLF_NIL if nothing matches.
  1016. N.B. CANDIDATES is a group of possible qualifiers that are valid for
  1017. one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
  1018. may end with AARCH64_OPND_QLF_NIL. */
  1019. static enum aarch64_opnd_qualifier
  1020. get_qualifier_from_partial_encoding (aarch64_insn value,
  1021. const enum aarch64_opnd_qualifier* \
  1022. candidates,
  1023. aarch64_insn mask)
  1024. {
  1025. int i;
  1026. DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
  1027. for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
  1028. {
  1029. aarch64_insn standard_value;
  1030. if (candidates[i] == AARCH64_OPND_QLF_NIL)
  1031. break;
  1032. standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
  1033. if ((standard_value & mask) == (value & mask))
  1034. return candidates[i];
  1035. }
  1036. return AARCH64_OPND_QLF_NIL;
  1037. }
  1038. /* Given a list of qualifier sequences, return all possible valid qualifiers
  1039. for operand IDX in QUALIFIERS.
  1040. Assume QUALIFIERS is an array whose length is large enough. */
  1041. static void
  1042. get_operand_possible_qualifiers (int idx,
  1043. const aarch64_opnd_qualifier_seq_t *list,
  1044. enum aarch64_opnd_qualifier *qualifiers)
  1045. {
  1046. int i;
  1047. for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
  1048. if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
  1049. break;
  1050. }
  1051. /* Decode the size Q field for e.g. SHADD.
  1052. We tag one operand with the qualifer according to the code;
  1053. whether the qualifier is valid for this opcode or not, it is the
  1054. duty of the semantic checking. */
  1055. static int
  1056. decode_sizeq (aarch64_inst *inst)
  1057. {
  1058. int idx;
  1059. enum aarch64_opnd_qualifier qualifier;
  1060. aarch64_insn code;
  1061. aarch64_insn value, mask;
  1062. enum aarch64_field_kind fld_sz;
  1063. enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
  1064. if (inst->opcode->iclass == asisdlse
  1065. || inst->opcode->iclass == asisdlsep
  1066. || inst->opcode->iclass == asisdlso
  1067. || inst->opcode->iclass == asisdlsop)
  1068. fld_sz = FLD_vldst_size;
  1069. else
  1070. fld_sz = FLD_size;
  1071. code = inst->value;
  1072. value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
  1073. /* Obtain the info that which bits of fields Q and size are actually
  1074. available for operand encoding. Opcodes like FMAXNM and FMLA have
  1075. size[1] unavailable. */
  1076. mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
  1077. /* The index of the operand we are going to tag a qualifier and the qualifer
  1078. itself are reasoned from the value of the size and Q fields and the
  1079. possible valid qualifier lists. */
  1080. idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
  1081. DEBUG_TRACE ("key idx: %d", idx);
  1082. /* For most related instruciton, size:Q are fully available for operand
  1083. encoding. */
  1084. if (mask == 0x7)
  1085. {
  1086. inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
  1087. return 1;
  1088. }
  1089. get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
  1090. candidates);
  1091. #ifdef DEBUG_AARCH64
  1092. if (debug_dump)
  1093. {
  1094. int i;
  1095. for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
  1096. && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
  1097. DEBUG_TRACE ("qualifier %d: %s", i,
  1098. aarch64_get_qualifier_name(candidates[i]));
  1099. DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
  1100. }
  1101. #endif /* DEBUG_AARCH64 */
  1102. qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
  1103. if (qualifier == AARCH64_OPND_QLF_NIL)
  1104. return 0;
  1105. inst->operands[idx].qualifier = qualifier;
  1106. return 1;
  1107. }
  1108. /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
  1109. e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
  1110. static int
  1111. decode_asimd_fcvt (aarch64_inst *inst)
  1112. {
  1113. aarch64_field field = {0, 0};
  1114. aarch64_insn value;
  1115. enum aarch64_opnd_qualifier qualifier;
  1116. gen_sub_field (FLD_size, 0, 1, &field);
  1117. value = extract_field_2 (&field, inst->value, 0);
  1118. qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
  1119. : AARCH64_OPND_QLF_V_2D;
  1120. switch (inst->opcode->op)
  1121. {
  1122. case OP_FCVTN:
  1123. case OP_FCVTN2:
  1124. /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
  1125. inst->operands[1].qualifier = qualifier;
  1126. break;
  1127. case OP_FCVTL:
  1128. case OP_FCVTL2:
  1129. /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
  1130. inst->operands[0].qualifier = qualifier;
  1131. break;
  1132. default:
  1133. assert (0);
  1134. return 0;
  1135. }
  1136. return 1;
  1137. }
  1138. /* Decode size[0], i.e. bit 22, for
  1139. e.g. FCVTXN <Vb><d>, <Va><n>. */
  1140. static int
  1141. decode_asisd_fcvtxn (aarch64_inst *inst)
  1142. {
  1143. aarch64_field field = {0, 0};
  1144. gen_sub_field (FLD_size, 0, 1, &field);
  1145. if (!extract_field_2 (&field, inst->value, 0))
  1146. return 0;
  1147. inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
  1148. return 1;
  1149. }
  1150. /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
  1151. static int
  1152. decode_fcvt (aarch64_inst *inst)
  1153. {
  1154. enum aarch64_opnd_qualifier qualifier;
  1155. aarch64_insn value;
  1156. const aarch64_field field = {15, 2};
  1157. /* opc dstsize */
  1158. value = extract_field_2 (&field, inst->value, 0);
  1159. switch (value)
  1160. {
  1161. case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
  1162. case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
  1163. case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
  1164. default: return 0;
  1165. }
  1166. inst->operands[0].qualifier = qualifier;
  1167. return 1;
  1168. }
  1169. /* Do miscellaneous decodings that are not common enough to be driven by
  1170. flags. */
  1171. static int
  1172. do_misc_decoding (aarch64_inst *inst)
  1173. {
  1174. switch (inst->opcode->op)
  1175. {
  1176. case OP_FCVT:
  1177. return decode_fcvt (inst);
  1178. case OP_FCVTN:
  1179. case OP_FCVTN2:
  1180. case OP_FCVTL:
  1181. case OP_FCVTL2:
  1182. return decode_asimd_fcvt (inst);
  1183. case OP_FCVTXN_S:
  1184. return decode_asisd_fcvtxn (inst);
  1185. default:
  1186. return 0;
  1187. }
  1188. }
  1189. /* Opcodes that have fields shared by multiple operands are usually flagged
  1190. with flags. In this function, we detect such flags, decode the related
  1191. field(s) and store the information in one of the related operands. The
  1192. 'one' operand is not any operand but one of the operands that can
  1193. accommadate all the information that has been decoded. */
  1194. static int
  1195. do_special_decoding (aarch64_inst *inst)
  1196. {
  1197. int idx;
  1198. aarch64_insn value;
  1199. /* Condition for truly conditional executed instructions, e.g. b.cond. */
  1200. if (inst->opcode->flags & F_COND)
  1201. {
  1202. value = extract_field (FLD_cond2, inst->value, 0);
  1203. inst->cond = get_cond_from_value (value);
  1204. }
  1205. /* 'sf' field. */
  1206. if (inst->opcode->flags & F_SF)
  1207. {
  1208. idx = select_operand_for_sf_field_coding (inst->opcode);
  1209. value = extract_field (FLD_sf, inst->value, 0);
  1210. inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
  1211. if ((inst->opcode->flags & F_N)
  1212. && extract_field (FLD_N, inst->value, 0) != value)
  1213. return 0;
  1214. }
  1215. /* 'sf' field. */
  1216. if (inst->opcode->flags & F_LSE_SZ)
  1217. {
  1218. idx = select_operand_for_sf_field_coding (inst->opcode);
  1219. value = extract_field (FLD_lse_sz, inst->value, 0);
  1220. inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
  1221. }
  1222. /* size:Q fields. */
  1223. if (inst->opcode->flags & F_SIZEQ)
  1224. return decode_sizeq (inst);
  1225. if (inst->opcode->flags & F_FPTYPE)
  1226. {
  1227. idx = select_operand_for_fptype_field_coding (inst->opcode);
  1228. value = extract_field (FLD_type, inst->value, 0);
  1229. switch (value)
  1230. {
  1231. case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
  1232. case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
  1233. case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
  1234. default: return 0;
  1235. }
  1236. }
  1237. if (inst->opcode->flags & F_SSIZE)
  1238. {
  1239. /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
  1240. of the base opcode. */
  1241. aarch64_insn mask;
  1242. enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
  1243. idx = select_operand_for_scalar_size_field_coding (inst->opcode);
  1244. value = extract_field (FLD_size, inst->value, inst->opcode->mask);
  1245. mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
  1246. /* For most related instruciton, the 'size' field is fully available for
  1247. operand encoding. */
  1248. if (mask == 0x3)
  1249. inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
  1250. else
  1251. {
  1252. get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
  1253. candidates);
  1254. inst->operands[idx].qualifier
  1255. = get_qualifier_from_partial_encoding (value, candidates, mask);
  1256. }
  1257. }
  1258. if (inst->opcode->flags & F_T)
  1259. {
  1260. /* Num of consecutive '0's on the right side of imm5<3:0>. */
  1261. int num = 0;
  1262. unsigned val, Q;
  1263. assert (aarch64_get_operand_class (inst->opcode->operands[0])
  1264. == AARCH64_OPND_CLASS_SIMD_REG);
  1265. /* imm5<3:0> q <t>
  1266. 0000 x reserved
  1267. xxx1 0 8b
  1268. xxx1 1 16b
  1269. xx10 0 4h
  1270. xx10 1 8h
  1271. x100 0 2s
  1272. x100 1 4s
  1273. 1000 0 reserved
  1274. 1000 1 2d */
  1275. val = extract_field (FLD_imm5, inst->value, 0);
  1276. while ((val & 0x1) == 0 && ++num <= 3)
  1277. val >>= 1;
  1278. if (num > 3)
  1279. return 0;
  1280. Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
  1281. inst->operands[0].qualifier =
  1282. get_vreg_qualifier_from_value ((num << 1) | Q);
  1283. }
  1284. if (inst->opcode->flags & F_GPRSIZE_IN_Q)
  1285. {
  1286. /* Use Rt to encode in the case of e.g.
  1287. STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
  1288. idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
  1289. if (idx == -1)
  1290. {
  1291. /* Otherwise use the result operand, which has to be a integer
  1292. register. */
  1293. assert (aarch64_get_operand_class (inst->opcode->operands[0])
  1294. == AARCH64_OPND_CLASS_INT_REG);
  1295. idx = 0;
  1296. }
  1297. assert (idx == 0 || idx == 1);
  1298. value = extract_field (FLD_Q, inst->value, 0);
  1299. inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
  1300. }
  1301. if (inst->opcode->flags & F_LDS_SIZE)
  1302. {
  1303. aarch64_field field = {0, 0};
  1304. assert (aarch64_get_operand_class (inst->opcode->operands[0])
  1305. == AARCH64_OPND_CLASS_INT_REG);
  1306. gen_sub_field (FLD_opc, 0, 1, &field);
  1307. value = extract_field_2 (&field, inst->value, 0);
  1308. inst->operands[0].qualifier
  1309. = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
  1310. }
  1311. /* Miscellaneous decoding; done as the last step. */
  1312. if (inst->opcode->flags & F_MISC)
  1313. return do_misc_decoding (inst);
  1314. return 1;
  1315. }
  1316. /* Converters converting a real opcode instruction to its alias form. */
  1317. /* ROR <Wd>, <Ws>, #<shift>
  1318. is equivalent to:
  1319. EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
  1320. static int
  1321. convert_extr_to_ror (aarch64_inst *inst)
  1322. {
  1323. if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
  1324. {
  1325. copy_operand_info (inst, 2, 3);
  1326. inst->operands[3].type = AARCH64_OPND_NIL;
  1327. return 1;
  1328. }
  1329. return 0;
  1330. }
  1331. /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
  1332. is equivalent to:
  1333. USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
  1334. static int
  1335. convert_shll_to_xtl (aarch64_inst *inst)
  1336. {
  1337. if (inst->operands[2].imm.value == 0)
  1338. {
  1339. inst->operands[2].type = AARCH64_OPND_NIL;
  1340. return 1;
  1341. }
  1342. return 0;
  1343. }
  1344. /* Convert
  1345. UBFM <Xd>, <Xn>, #<shift>, #63.
  1346. to
  1347. LSR <Xd>, <Xn>, #<shift>. */
  1348. static int
  1349. convert_bfm_to_sr (aarch64_inst *inst)
  1350. {
  1351. int64_t imms, val;
  1352. imms = inst->operands[3].imm.value;
  1353. val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
  1354. if (imms == val)
  1355. {
  1356. inst->operands[3].type = AARCH64_OPND_NIL;
  1357. return 1;
  1358. }
  1359. return 0;
  1360. }
  1361. /* Convert MOV to ORR. */
  1362. static int
  1363. convert_orr_to_mov (aarch64_inst *inst)
  1364. {
  1365. /* MOV <Vd>.<T>, <Vn>.<T>
  1366. is equivalent to:
  1367. ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
  1368. if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
  1369. {
  1370. inst->operands[2].type = AARCH64_OPND_NIL;
  1371. return 1;
  1372. }
  1373. return 0;
  1374. }
  1375. /* When <imms> >= <immr>, the instruction written:
  1376. SBFX <Xd>, <Xn>, #<lsb>, #<width>
  1377. is equivalent to:
  1378. SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
  1379. static int
  1380. convert_bfm_to_bfx (aarch64_inst *inst)
  1381. {
  1382. int64_t immr, imms;
  1383. immr = inst->operands[2].imm.value;
  1384. imms = inst->operands[3].imm.value;
  1385. if (imms >= immr)
  1386. {
  1387. int64_t lsb = immr;
  1388. inst->operands[2].imm.value = lsb;
  1389. inst->operands[3].imm.value = imms + 1 - lsb;
  1390. /* The two opcodes have different qualifiers for
  1391. the immediate operands; reset to help the checking. */
  1392. reset_operand_qualifier (inst, 2);
  1393. reset_operand_qualifier (inst, 3);
  1394. return 1;
  1395. }
  1396. return 0;
  1397. }
  1398. /* When <imms> < <immr>, the instruction written:
  1399. SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
  1400. is equivalent to:
  1401. SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
  1402. static int
  1403. convert_bfm_to_bfi (aarch64_inst *inst)
  1404. {
  1405. int64_t immr, imms, val;
  1406. immr = inst->operands[2].imm.value;
  1407. imms = inst->operands[3].imm.value;
  1408. val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
  1409. if (imms < immr)
  1410. {
  1411. inst->operands[2].imm.value = (val - immr) & (val - 1);
  1412. inst->operands[3].imm.value = imms + 1;
  1413. /* The two opcodes have different qualifiers for
  1414. the immediate operands; reset to help the checking. */
  1415. reset_operand_qualifier (inst, 2);
  1416. reset_operand_qualifier (inst, 3);
  1417. return 1;
  1418. }
  1419. return 0;
  1420. }
  1421. /* The instruction written:
  1422. LSL <Xd>, <Xn>, #<shift>
  1423. is equivalent to:
  1424. UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
  1425. static int
  1426. convert_ubfm_to_lsl (aarch64_inst *inst)
  1427. {
  1428. int64_t immr = inst->operands[2].imm.value;
  1429. int64_t imms = inst->operands[3].imm.value;
  1430. int64_t val
  1431. = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
  1432. if ((immr == 0 && imms == val) || immr == imms + 1)
  1433. {
  1434. inst->operands[3].type = AARCH64_OPND_NIL;
  1435. inst->operands[2].imm.value = val - imms;
  1436. return 1;
  1437. }
  1438. return 0;
  1439. }
  1440. /* CINC <Wd>, <Wn>, <cond>
  1441. is equivalent to:
  1442. CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
  1443. where <cond> is not AL or NV. */
  1444. static int
  1445. convert_from_csel (aarch64_inst *inst)
  1446. {
  1447. if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
  1448. && (inst->operands[3].cond->value & 0xe) != 0xe)
  1449. {
  1450. copy_operand_info (inst, 2, 3);
  1451. inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
  1452. inst->operands[3].type = AARCH64_OPND_NIL;
  1453. return 1;
  1454. }
  1455. return 0;
  1456. }
  1457. /* CSET <Wd>, <cond>
  1458. is equivalent to:
  1459. CSINC <Wd>, WZR, WZR, invert(<cond>)
  1460. where <cond> is not AL or NV. */
  1461. static int
  1462. convert_csinc_to_cset (aarch64_inst *inst)
  1463. {
  1464. if (inst->operands[1].reg.regno == 0x1f
  1465. && inst->operands[2].reg.regno == 0x1f
  1466. && (inst->operands[3].cond->value & 0xe) != 0xe)
  1467. {
  1468. copy_operand_info (inst, 1, 3);
  1469. inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
  1470. inst->operands[3].type = AARCH64_OPND_NIL;
  1471. inst->operands[2].type = AARCH64_OPND_NIL;
  1472. return 1;
  1473. }
  1474. return 0;
  1475. }
  1476. /* MOV <Wd>, #<imm>
  1477. is equivalent to:
  1478. MOVZ <Wd>, #<imm16>, LSL #<shift>.
  1479. A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
  1480. ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
  1481. or where a MOVN has an immediate that could be encoded by MOVZ, or where
  1482. MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
  1483. machine-instruction mnemonic must be used. */
  1484. static int
  1485. convert_movewide_to_mov (aarch64_inst *inst)
  1486. {
  1487. uint64_t value = inst->operands[1].imm.value;
  1488. /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
  1489. if (value == 0 && inst->operands[1].shifter.amount != 0)
  1490. return 0;
  1491. inst->operands[1].type = AARCH64_OPND_IMM_MOV;
  1492. inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
  1493. value <<= inst->operands[1].shifter.amount;
  1494. /* As an alias convertor, it has to be clear that the INST->OPCODE
  1495. is the opcode of the real instruction. */
  1496. if (inst->opcode->op == OP_MOVN)
  1497. {
  1498. int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
  1499. value = ~value;
  1500. /* A MOVN has an immediate that could be encoded by MOVZ. */
  1501. if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
  1502. return 0;
  1503. }
  1504. inst->operands[1].imm.value = value;
  1505. inst->operands[1].shifter.amount = 0;
  1506. return 1;
  1507. }
  1508. /* MOV <Wd>, #<imm>
  1509. is equivalent to:
  1510. ORR <Wd>, WZR, #<imm>.
  1511. A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
  1512. ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
  1513. or where a MOVN has an immediate that could be encoded by MOVZ, or where
  1514. MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
  1515. machine-instruction mnemonic must be used. */
  1516. static int
  1517. convert_movebitmask_to_mov (aarch64_inst *inst)
  1518. {
  1519. int is32;
  1520. uint64_t value;
  1521. /* Should have been assured by the base opcode value. */
  1522. assert (inst->operands[1].reg.regno == 0x1f);
  1523. copy_operand_info (inst, 1, 2);
  1524. is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
  1525. inst->operands[1].type = AARCH64_OPND_IMM_MOV;
  1526. value = inst->operands[1].imm.value;
  1527. /* ORR has an immediate that could be generated by a MOVZ or MOVN
  1528. instruction. */
  1529. if (inst->operands[0].reg.regno != 0x1f
  1530. && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
  1531. || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
  1532. return 0;
  1533. inst->operands[2].type = AARCH64_OPND_NIL;
  1534. return 1;
  1535. }
  1536. /* Some alias opcodes are disassembled by being converted from their real-form.
  1537. N.B. INST->OPCODE is the real opcode rather than the alias. */
  1538. static int
  1539. convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
  1540. {
  1541. switch (alias->op)
  1542. {
  1543. case OP_ASR_IMM:
  1544. case OP_LSR_IMM:
  1545. return convert_bfm_to_sr (inst);
  1546. case OP_LSL_IMM:
  1547. return convert_ubfm_to_lsl (inst);
  1548. case OP_CINC:
  1549. case OP_CINV:
  1550. case OP_CNEG:
  1551. return convert_from_csel (inst);
  1552. case OP_CSET:
  1553. case OP_CSETM:
  1554. return convert_csinc_to_cset (inst);
  1555. case OP_UBFX:
  1556. case OP_BFXIL:
  1557. case OP_SBFX:
  1558. return convert_bfm_to_bfx (inst);
  1559. case OP_SBFIZ:
  1560. case OP_BFI:
  1561. case OP_UBFIZ:
  1562. return convert_bfm_to_bfi (inst);
  1563. case OP_MOV_V:
  1564. return convert_orr_to_mov (inst);
  1565. case OP_MOV_IMM_WIDE:
  1566. case OP_MOV_IMM_WIDEN:
  1567. return convert_movewide_to_mov (inst);
  1568. case OP_MOV_IMM_LOG:
  1569. return convert_movebitmask_to_mov (inst);
  1570. case OP_ROR_IMM:
  1571. return convert_extr_to_ror (inst);
  1572. case OP_SXTL:
  1573. case OP_SXTL2:
  1574. case OP_UXTL:
  1575. case OP_UXTL2:
  1576. return convert_shll_to_xtl (inst);
  1577. default:
  1578. return 0;
  1579. }
  1580. }
  1581. static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
  1582. aarch64_inst *, int);
  1583. /* Given the instruction information in *INST, check if the instruction has
  1584. any alias form that can be used to represent *INST. If the answer is yes,
  1585. update *INST to be in the form of the determined alias. */
  1586. /* In the opcode description table, the following flags are used in opcode
  1587. entries to help establish the relations between the real and alias opcodes:
  1588. F_ALIAS: opcode is an alias
  1589. F_HAS_ALIAS: opcode has alias(es)
  1590. F_P1
  1591. F_P2
  1592. F_P3: Disassembly preference priority 1-3 (the larger the
  1593. higher). If nothing is specified, it is the priority
  1594. 0 by default, i.e. the lowest priority.
  1595. Although the relation between the machine and the alias instructions are not
  1596. explicitly described, it can be easily determined from the base opcode
  1597. values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
  1598. description entries:
  1599. The mask of an alias opcode must be equal to or a super-set (i.e. more
  1600. constrained) of that of the aliased opcode; so is the base opcode value.
  1601. if (opcode_has_alias (real) && alias_opcode_p (opcode)
  1602. && (opcode->mask & real->mask) == real->mask
  1603. && (real->mask & opcode->opcode) == (real->mask & real->opcode))
  1604. then OPCODE is an alias of, and only of, the REAL instruction
  1605. The alias relationship is forced flat-structured to keep related algorithm
  1606. simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
  1607. During the disassembling, the decoding decision tree (in
  1608. opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
  1609. if the decoding of such a machine instruction succeeds (and -Mno-aliases is
  1610. not specified), the disassembler will check whether there is any alias
  1611. instruction exists for this real instruction. If there is, the disassembler
  1612. will try to disassemble the 32-bit binary again using the alias's rule, or
  1613. try to convert the IR to the form of the alias. In the case of the multiple
  1614. aliases, the aliases are tried one by one from the highest priority
  1615. (currently the flag F_P3) to the lowest priority (no priority flag), and the
  1616. first succeeds first adopted.
  1617. You may ask why there is a need for the conversion of IR from one form to
  1618. another in handling certain aliases. This is because on one hand it avoids
  1619. adding more operand code to handle unusual encoding/decoding; on other
  1620. hand, during the disassembling, the conversion is an effective approach to
  1621. check the condition of an alias (as an alias may be adopted only if certain
  1622. conditions are met).
  1623. In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
  1624. aarch64_opcode_table and generated aarch64_find_alias_opcode and
  1625. aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
  1626. static void
  1627. determine_disassembling_preference (struct aarch64_inst *inst)
  1628. {
  1629. const aarch64_opcode *opcode;
  1630. const aarch64_opcode *alias;
  1631. opcode = inst->opcode;
  1632. /* This opcode does not have an alias, so use itself. */
  1633. if (opcode_has_alias (opcode) == FALSE)
  1634. return;
  1635. alias = aarch64_find_alias_opcode (opcode);
  1636. assert (alias);
  1637. #ifdef DEBUG_AARCH64
  1638. if (debug_dump)
  1639. {
  1640. const aarch64_opcode *tmp = alias;
  1641. printf ("#### LIST orderd: ");
  1642. while (tmp)
  1643. {
  1644. printf ("%s, ", tmp->name);
  1645. tmp = aarch64_find_next_alias_opcode (tmp);
  1646. }
  1647. printf ("\n");
  1648. }
  1649. #endif /* DEBUG_AARCH64 */
  1650. for (; alias; alias = aarch64_find_next_alias_opcode (alias))
  1651. {
  1652. DEBUG_TRACE ("try %s", alias->name);
  1653. assert (alias_opcode_p (alias));
  1654. /* An alias can be a pseudo opcode which will never be used in the
  1655. disassembly, e.g. BIC logical immediate is such a pseudo opcode
  1656. aliasing AND. */
  1657. if (pseudo_opcode_p (alias))
  1658. {
  1659. DEBUG_TRACE ("skip pseudo %s", alias->name);
  1660. continue;
  1661. }
  1662. if ((inst->value & alias->mask) != alias->opcode)
  1663. {
  1664. DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
  1665. continue;
  1666. }
  1667. /* No need to do any complicated transformation on operands, if the alias
  1668. opcode does not have any operand. */
  1669. if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
  1670. {
  1671. DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
  1672. aarch64_replace_opcode (inst, alias);
  1673. return;
  1674. }
  1675. if (alias->flags & F_CONV)
  1676. {
  1677. aarch64_inst copy;
  1678. memcpy (&copy, inst, sizeof (aarch64_inst));
  1679. /* ALIAS is the preference as long as the instruction can be
  1680. successfully converted to the form of ALIAS. */
  1681. if (convert_to_alias (&copy, alias) == 1)
  1682. {
  1683. aarch64_replace_opcode (&copy, alias);
  1684. assert (aarch64_match_operands_constraint (&copy, NULL));
  1685. DEBUG_TRACE ("succeed with %s via conversion", alias->name);
  1686. memcpy (inst, &copy, sizeof (aarch64_inst));
  1687. return;
  1688. }
  1689. }
  1690. else
  1691. {
  1692. /* Directly decode the alias opcode. */
  1693. aarch64_inst temp;
  1694. memset (&temp, '\0', sizeof (aarch64_inst));
  1695. if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
  1696. {
  1697. DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
  1698. memcpy (inst, &temp, sizeof (aarch64_inst));
  1699. return;
  1700. }
  1701. }
  1702. }
  1703. }
  1704. /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
  1705. fails, which meanes that CODE is not an instruction of OPCODE; otherwise
  1706. return 1.
  1707. If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
  1708. determined and used to disassemble CODE; this is done just before the
  1709. return. */
  1710. static int
  1711. aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
  1712. aarch64_inst *inst, int noaliases_p)
  1713. {
  1714. int i;
  1715. DEBUG_TRACE ("enter with %s", opcode->name);
  1716. assert (opcode && inst);
  1717. /* Check the base opcode. */
  1718. if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
  1719. {
  1720. DEBUG_TRACE ("base opcode match FAIL");
  1721. goto decode_fail;
  1722. }
  1723. /* Clear inst. */
  1724. memset (inst, '\0', sizeof (aarch64_inst));
  1725. inst->opcode = opcode;
  1726. inst->value = code;
  1727. /* Assign operand codes and indexes. */
  1728. for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
  1729. {
  1730. if (opcode->operands[i] == AARCH64_OPND_NIL)
  1731. break;
  1732. inst->operands[i].type = opcode->operands[i];
  1733. inst->operands[i].idx = i;
  1734. }
  1735. /* Call the opcode decoder indicated by flags. */
  1736. if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
  1737. {
  1738. DEBUG_TRACE ("opcode flag-based decoder FAIL");
  1739. goto decode_fail;
  1740. }
  1741. /* Call operand decoders. */
  1742. for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
  1743. {
  1744. const aarch64_operand *opnd;
  1745. enum aarch64_opnd type;
  1746. type = opcode->operands[i];
  1747. if (type == AARCH64_OPND_NIL)
  1748. break;
  1749. opnd = &aarch64_operands[type];
  1750. if (operand_has_extractor (opnd)
  1751. && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
  1752. {
  1753. DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
  1754. goto decode_fail;
  1755. }
  1756. }
  1757. /* Match the qualifiers. */
  1758. if (aarch64_match_operands_constraint (inst, NULL) == 1)
  1759. {
  1760. /* Arriving here, the CODE has been determined as a valid instruction
  1761. of OPCODE and *INST has been filled with information of this OPCODE
  1762. instruction. Before the return, check if the instruction has any
  1763. alias and should be disassembled in the form of its alias instead.
  1764. If the answer is yes, *INST will be updated. */
  1765. if (!noaliases_p)
  1766. determine_disassembling_preference (inst);
  1767. DEBUG_TRACE ("SUCCESS");
  1768. return 1;
  1769. }
  1770. else
  1771. {
  1772. DEBUG_TRACE ("constraint matching FAIL");
  1773. }
  1774. decode_fail:
  1775. return 0;
  1776. }
  1777. /* This does some user-friendly fix-up to *INST. It is currently focus on
  1778. the adjustment of qualifiers to help the printed instruction
  1779. recognized/understood more easily. */
  1780. static void
  1781. user_friendly_fixup (aarch64_inst *inst)
  1782. {
  1783. switch (inst->opcode->iclass)
  1784. {
  1785. case testbranch:
  1786. /* TBNZ Xn|Wn, #uimm6, label
  1787. Test and Branch Not Zero: conditionally jumps to label if bit number
  1788. uimm6 in register Xn is not zero. The bit number implies the width of
  1789. the register, which may be written and should be disassembled as Wn if
  1790. uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
  1791. */
  1792. if (inst->operands[1].imm.value < 32)
  1793. inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
  1794. break;
  1795. default: break;
  1796. }
  1797. }
  1798. /* Decode INSN and fill in *INST the instruction information. Return zero
  1799. on success. */
  1800. int
  1801. aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst)
  1802. {
  1803. const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
  1804. #ifdef DEBUG_AARCH64
  1805. if (debug_dump)
  1806. {
  1807. const aarch64_opcode *tmp = opcode;
  1808. printf ("\n");
  1809. DEBUG_TRACE ("opcode lookup:");
  1810. while (tmp != NULL)
  1811. {
  1812. aarch64_verbose (" %s", tmp->name);
  1813. tmp = aarch64_find_next_opcode (tmp);
  1814. }
  1815. }
  1816. #endif /* DEBUG_AARCH64 */
  1817. /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
  1818. distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
  1819. opcode field and value, apart from the difference that one of them has an
  1820. extra field as part of the opcode, but such a field is used for operand
  1821. encoding in other opcode(s) ('immh' in the case of the example). */
  1822. while (opcode != NULL)
  1823. {
  1824. /* But only one opcode can be decoded successfully for, as the
  1825. decoding routine will check the constraint carefully. */
  1826. if (aarch64_opcode_decode (opcode, insn, inst, no_aliases) == 1)
  1827. return ERR_OK;
  1828. opcode = aarch64_find_next_opcode (opcode);
  1829. }
  1830. return ERR_UND;
  1831. }
  1832. /* Print operands. */
  1833. static void
  1834. print_operands (bfd_vma pc, const aarch64_opcode *opcode,
  1835. const aarch64_opnd_info *opnds, struct disassemble_info *info)
  1836. {
  1837. int i, pcrel_p, num_printed;
  1838. for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
  1839. {
  1840. const size_t size = 128;
  1841. char str[size];
  1842. /* We regard the opcode operand info more, however we also look into
  1843. the inst->operands to support the disassembling of the optional
  1844. operand.
  1845. The two operand code should be the same in all cases, apart from
  1846. when the operand can be optional. */
  1847. if (opcode->operands[i] == AARCH64_OPND_NIL
  1848. || opnds[i].type == AARCH64_OPND_NIL)
  1849. break;
  1850. /* Generate the operand string in STR. */
  1851. aarch64_print_operand (str, size, pc, opcode, opnds, i, &pcrel_p,
  1852. &info->target);
  1853. /* Print the delimiter (taking account of omitted operand(s)). */
  1854. if (str[0] != '\0')
  1855. (*info->fprintf_func) (info->stream, "%s",
  1856. num_printed++ == 0 ? "\t" : ", ");
  1857. /* Print the operand. */
  1858. if (pcrel_p)
  1859. (*info->print_address_func) (info->target, info);
  1860. else
  1861. (*info->fprintf_func) (info->stream, "%s", str);
  1862. }
  1863. }
  1864. /* Print the instruction mnemonic name. */
  1865. static void
  1866. print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
  1867. {
  1868. if (inst->opcode->flags & F_COND)
  1869. {
  1870. /* For instructions that are truly conditionally executed, e.g. b.cond,
  1871. prepare the full mnemonic name with the corresponding condition
  1872. suffix. */
  1873. char name[8], *ptr;
  1874. size_t len;
  1875. ptr = strchr (inst->opcode->name, '.');
  1876. assert (ptr && inst->cond);
  1877. len = ptr - inst->opcode->name;
  1878. assert (len < 8);
  1879. strncpy (name, inst->opcode->name, len);
  1880. name [len] = '\0';
  1881. (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
  1882. }
  1883. else
  1884. (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
  1885. }
  1886. /* Print the instruction according to *INST. */
  1887. static void
  1888. print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
  1889. struct disassemble_info *info)
  1890. {
  1891. print_mnemonic_name (inst, info);
  1892. print_operands (pc, inst->opcode, inst->operands, info);
  1893. }
  1894. /* Entry-point of the instruction disassembler and printer. */
  1895. static void
  1896. print_insn_aarch64_word (bfd_vma pc,
  1897. uint32_t word,
  1898. struct disassemble_info *info)
  1899. {
  1900. static const char *err_msg[6] =
  1901. {
  1902. [ERR_OK] = "_",
  1903. [-ERR_UND] = "undefined",
  1904. [-ERR_UNP] = "unpredictable",
  1905. [-ERR_NYI] = "NYI"
  1906. };
  1907. int ret;
  1908. aarch64_inst inst;
  1909. info->insn_info_valid = 1;
  1910. info->branch_delay_insns = 0;
  1911. info->data_size = 0;
  1912. info->target = 0;
  1913. info->target2 = 0;
  1914. if (info->flags & INSN_HAS_RELOC)
  1915. /* If the instruction has a reloc associated with it, then
  1916. the offset field in the instruction will actually be the
  1917. addend for the reloc. (If we are using REL type relocs).
  1918. In such cases, we can ignore the pc when computing
  1919. addresses, since the addend is not currently pc-relative. */
  1920. pc = 0;
  1921. ret = aarch64_decode_insn (word, &inst);
  1922. if (((word >> 21) & 0x3ff) == 1)
  1923. {
  1924. /* RESERVED for ALES. */
  1925. assert (ret != ERR_OK);
  1926. ret = ERR_NYI;
  1927. }
  1928. switch (ret)
  1929. {
  1930. case ERR_UND:
  1931. case ERR_UNP:
  1932. case ERR_NYI:
  1933. /* Handle undefined instructions. */
  1934. info->insn_type = dis_noninsn;
  1935. (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
  1936. word, err_msg[-ret]);
  1937. break;
  1938. case ERR_OK:
  1939. user_friendly_fixup (&inst);
  1940. print_aarch64_insn (pc, &inst, info);
  1941. break;
  1942. default:
  1943. abort ();
  1944. }
  1945. }
  1946. /* Disallow mapping symbols ($x, $d etc) from
  1947. being displayed in symbol relative addresses. */
  1948. bfd_boolean
  1949. aarch64_symbol_is_valid (asymbol * sym,
  1950. struct disassemble_info * info ATTRIBUTE_UNUSED)
  1951. {
  1952. const char * name;
  1953. if (sym == NULL)
  1954. return FALSE;
  1955. name = bfd_asymbol_name (sym);
  1956. return name
  1957. && (name[0] != '$'
  1958. || (name[1] != 'x' && name[1] != 'd')
  1959. || (name[2] != '\0' && name[2] != '.'));
  1960. }
  1961. /* Print data bytes on INFO->STREAM. */
  1962. static void
  1963. print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
  1964. uint32_t word,
  1965. struct disassemble_info *info)
  1966. {
  1967. switch (info->bytes_per_chunk)
  1968. {
  1969. case 1:
  1970. info->fprintf_func (info->stream, ".byte\t0x%02x", word);
  1971. break;
  1972. case 2:
  1973. info->fprintf_func (info->stream, ".short\t0x%04x", word);
  1974. break;
  1975. case 4:
  1976. info->fprintf_func (info->stream, ".word\t0x%08x", word);
  1977. break;
  1978. default:
  1979. abort ();
  1980. }
  1981. }
  1982. /* Try to infer the code or data type from a symbol.
  1983. Returns nonzero if *MAP_TYPE was set. */
  1984. static int
  1985. get_sym_code_type (struct disassemble_info *info, int n,
  1986. enum map_type *map_type)
  1987. {
  1988. elf_symbol_type *es;
  1989. unsigned int type;
  1990. const char *name;
  1991. es = *(elf_symbol_type **)(info->symtab + n);
  1992. type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
  1993. /* If the symbol has function type then use that. */
  1994. if (type == STT_FUNC)
  1995. {
  1996. *map_type = MAP_INSN;
  1997. return TRUE;
  1998. }
  1999. /* Check for mapping symbols. */
  2000. name = bfd_asymbol_name(info->symtab[n]);
  2001. if (name[0] == '$'
  2002. && (name[1] == 'x' || name[1] == 'd')
  2003. && (name[2] == '\0' || name[2] == '.'))
  2004. {
  2005. *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
  2006. return TRUE;
  2007. }
  2008. return FALSE;
  2009. }
  2010. /* Entry-point of the AArch64 disassembler. */
  2011. int
  2012. print_insn_aarch64 (bfd_vma pc,
  2013. struct disassemble_info *info)
  2014. {
  2015. bfd_byte buffer[INSNLEN];
  2016. int status;
  2017. void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
  2018. bfd_boolean found = FALSE;
  2019. unsigned int size = 4;
  2020. unsigned long data;
  2021. if (info->disassembler_options)
  2022. {
  2023. set_default_aarch64_dis_options (info);
  2024. parse_aarch64_dis_options (info->disassembler_options);
  2025. /* To avoid repeated parsing of these options, we remove them here. */
  2026. info->disassembler_options = NULL;
  2027. }
  2028. /* Aarch64 instructions are always little-endian */
  2029. info->endian_code = BFD_ENDIAN_LITTLE;
  2030. /* First check the full symtab for a mapping symbol, even if there
  2031. are no usable non-mapping symbols for this address. */
  2032. if (info->symtab_size != 0
  2033. && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
  2034. {
  2035. enum map_type type = MAP_INSN;
  2036. int last_sym = -1;
  2037. bfd_vma addr;
  2038. int n;
  2039. if (pc <= last_mapping_addr)
  2040. last_mapping_sym = -1;
  2041. /* Start scanning at the start of the function, or wherever
  2042. we finished last time. */
  2043. n = info->symtab_pos + 1;
  2044. if (n < last_mapping_sym)
  2045. n = last_mapping_sym;
  2046. /* Scan up to the location being disassembled. */
  2047. for (; n < info->symtab_size; n++)
  2048. {
  2049. addr = bfd_asymbol_value (info->symtab[n]);
  2050. if (addr > pc)
  2051. break;
  2052. if ((info->section == NULL
  2053. || info->section == info->symtab[n]->section)
  2054. && get_sym_code_type (info, n, &type))
  2055. {
  2056. last_sym = n;
  2057. found = TRUE;
  2058. }
  2059. }
  2060. if (!found)
  2061. {
  2062. n = info->symtab_pos;
  2063. if (n < last_mapping_sym)
  2064. n = last_mapping_sym;
  2065. /* No mapping symbol found at this address. Look backwards
  2066. for a preceeding one. */
  2067. for (; n >= 0; n--)
  2068. {
  2069. if (get_sym_code_type (info, n, &type))
  2070. {
  2071. last_sym = n;
  2072. found = TRUE;
  2073. break;
  2074. }
  2075. }
  2076. }
  2077. last_mapping_sym = last_sym;
  2078. last_type = type;
  2079. /* Look a little bit ahead to see if we should print out
  2080. less than four bytes of data. If there's a symbol,
  2081. mapping or otherwise, after two bytes then don't
  2082. print more. */
  2083. if (last_type == MAP_DATA)
  2084. {
  2085. size = 4 - (pc & 3);
  2086. for (n = last_sym + 1; n < info->symtab_size; n++)
  2087. {
  2088. addr = bfd_asymbol_value (info->symtab[n]);
  2089. if (addr > pc)
  2090. {
  2091. if (addr - pc < size)
  2092. size = addr - pc;
  2093. break;
  2094. }
  2095. }
  2096. /* If the next symbol is after three bytes, we need to
  2097. print only part of the data, so that we can use either
  2098. .byte or .short. */
  2099. if (size == 3)
  2100. size = (pc & 1) ? 1 : 2;
  2101. }
  2102. }
  2103. if (last_type == MAP_DATA)
  2104. {
  2105. /* size was set above. */
  2106. info->bytes_per_chunk = size;
  2107. info->display_endian = info->endian;
  2108. printer = print_insn_data;
  2109. }
  2110. else
  2111. {
  2112. info->bytes_per_chunk = size = INSNLEN;
  2113. info->display_endian = info->endian_code;
  2114. printer = print_insn_aarch64_word;
  2115. }
  2116. status = (*info->read_memory_func) (pc, buffer, size, info);
  2117. if (status != 0)
  2118. {
  2119. (*info->memory_error_func) (status, pc, info);
  2120. return -1;
  2121. }
  2122. data = bfd_get_bits (buffer, size * 8,
  2123. info->display_endian == BFD_ENDIAN_BIG);
  2124. (*printer) (pc, data, info);
  2125. return size;
  2126. }
  2127. void
  2128. print_aarch64_disassembler_options (FILE *stream)
  2129. {
  2130. fprintf (stream, _("\n\
  2131. The following AARCH64 specific disassembler options are supported for use\n\
  2132. with the -M switch (multiple options should be separated by commas):\n"));
  2133. fprintf (stream, _("\n\
  2134. no-aliases Don't print instruction aliases.\n"));
  2135. fprintf (stream, _("\n\
  2136. aliases Do print instruction aliases.\n"));
  2137. #ifdef DEBUG_AARCH64
  2138. fprintf (stream, _("\n\
  2139. debug_dump Temp switch for debug trace.\n"));
  2140. #endif /* DEBUG_AARCH64 */
  2141. fprintf (stream, _("\n"));
  2142. }