xop.h 116 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741
  1. /* SPDX-License-Identifier: MIT
  2. *
  3. * Permission is hereby granted, free of charge, to any person
  4. * obtaining a copy of this software and associated documentation
  5. * files (the "Software"), to deal in the Software without
  6. * restriction, including without limitation the rights to use, copy,
  7. * modify, merge, publish, distribute, sublicense, and/or sell copies
  8. * of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be
  12. * included in all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  15. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  16. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  17. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  18. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  19. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  20. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Copyright:
  24. * 2020 Evan Nemerson <evan@nemerson.com>
  25. */
  26. #if !defined(SIMDE_X86_XOP_H)
  27. #define SIMDE_X86_XOP_H
  28. #include "avx2.h"
  29. #if !defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_ENABLE_NATIVE_ALIASES)
  30. # define SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES
  31. #endif
  32. HEDLEY_DIAGNOSTIC_PUSH
  33. SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
  34. SIMDE_BEGIN_DECLS_
  35. SIMDE_FUNCTION_ATTRIBUTES
  36. simde__m128i
  37. simde_mm_cmov_si128 (simde__m128i a, simde__m128i b, simde__m128i c) {
  38. #if defined(SIMDE_X86_XOP_NATIVE)
  39. return _mm_cmov_si128(a, b, c);
  40. #elif defined(SIMDE_X86_AVX512VL_NATIVE)
  41. return _mm_ternarylogic_epi32(a, b, c, 0xe4);
  42. #elif defined(SIMDE_X86_SSE2_NATIVE)
  43. return _mm_or_si128(_mm_and_si128(c, a), _mm_andnot_si128(c, b));
  44. #else
  45. simde__m128i_private
  46. r_,
  47. a_ = simde__m128i_to_private(a),
  48. b_ = simde__m128i_to_private(b),
  49. c_ = simde__m128i_to_private(c);
  50. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  51. r_.neon_i8 = vbslq_s8(c_.neon_u8, a_.neon_i8, b_.neon_i8);
  52. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  53. r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, c_.wasm_v128);
  54. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  55. r_.altivec_i32 = vec_sel(b_.altivec_i32, a_.altivec_i32, c_.altivec_u32);
  56. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  57. r_.i32f = (c_.i32f & a_.i32f) | (~c_.i32f & b_.i32f);
  58. #else
  59. SIMDE_VECTORIZE
  60. for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
  61. r_.i32f[i] = (c_.i32f[i] & a_.i32f[i]) | (~c_.i32f[i] & b_.i32f[i]);
  62. }
  63. #endif
  64. return simde__m128i_from_private(r_);
  65. #endif
  66. }
  67. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  68. #define _mm_cmov_si128(a, b, c) simde_mm_cmov_si128((a), (b), (c))
  69. #endif
  70. SIMDE_FUNCTION_ATTRIBUTES
  71. simde__m256i
  72. simde_mm256_cmov_si256 (simde__m256i a, simde__m256i b, simde__m256i c) {
  73. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_AVX_NATIVE) && !defined(SIMDE_BUG_GCC_98521) && !defined(SIMDE_BUG_MCST_LCC_MISSING_CMOV_M256)
  74. return _mm256_cmov_si256(a, b, c);
  75. #elif defined(SIMDE_X86_AVX512VL_NATIVE)
  76. return _mm256_ternarylogic_epi32(a, b, c, 0xe4);
  77. #elif defined(SIMDE_X86_AVX2_NATIVE)
  78. return _mm256_or_si256(_mm256_and_si256(c, a), _mm256_andnot_si256(c, b));
  79. #else
  80. simde__m256i_private
  81. r_,
  82. a_ = simde__m256i_to_private(a),
  83. b_ = simde__m256i_to_private(b),
  84. c_ = simde__m256i_to_private(c);
  85. #if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
  86. for (size_t i = 0 ; i < (sizeof(r_.m128i) / sizeof(r_.m128i[0])) ; i++) {
  87. r_.m128i[i] = simde_mm_cmov_si128(a_.m128i[i], b_.m128i[i], c_.m128i[i]);
  88. }
  89. #else
  90. SIMDE_VECTORIZE
  91. for (size_t i = 0 ; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])) ; i++) {
  92. r_.i32f[i] = (c_.i32f[i] & a_.i32f[i]) | (~c_.i32f[i] & b_.i32f[i]);
  93. }
  94. #endif
  95. return simde__m256i_from_private(r_);
  96. #endif
  97. }
  98. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  99. #define _mm256_cmov_si256(a, b, c) simde_mm256_cmov_si256((a), (b), (c))
  100. #endif
  101. SIMDE_FUNCTION_ATTRIBUTES
  102. simde__m128i
  103. simde_mm_comeq_epi8 (simde__m128i a, simde__m128i b) {
  104. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  105. return _mm_com_epi8(a, b, _MM_PCOMCTRL_EQ);
  106. #elif defined(SIMDE_X86_XOP_NATIVE)
  107. return _mm_comeq_epi8(a, b);
  108. #else
  109. simde__m128i_private
  110. r_,
  111. a_ = simde__m128i_to_private(a),
  112. b_ = simde__m128i_to_private(b);
  113. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  114. r_.neon_u8 = vceqq_s8(a_.neon_i8, b_.neon_i8);
  115. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  116. r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 == b_.i8);
  117. #else
  118. SIMDE_VECTORIZE
  119. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  120. r_.i8[i] = (a_.i8[i] == b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  121. }
  122. #endif
  123. return simde__m128i_from_private(r_);
  124. #endif
  125. }
  126. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  127. #define _mm_comeq_epi8(a, b) simde_mm_comeq_epi8((a), (b))
  128. #endif
  129. SIMDE_FUNCTION_ATTRIBUTES
  130. simde__m128i
  131. simde_mm_comeq_epi16 (simde__m128i a, simde__m128i b) {
  132. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  133. return _mm_com_epi16(a, b, _MM_PCOMCTRL_EQ);
  134. #elif defined(SIMDE_X86_XOP_NATIVE)
  135. return _mm_comeq_epi16(a, b);
  136. #else
  137. simde__m128i_private
  138. r_,
  139. a_ = simde__m128i_to_private(a),
  140. b_ = simde__m128i_to_private(b);
  141. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  142. r_.neon_u16 = vceqq_s16(a_.neon_i16, b_.neon_i16);
  143. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  144. r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 == b_.i16);
  145. #else
  146. SIMDE_VECTORIZE
  147. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  148. r_.i16[i] = (a_.i16[i] == b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  149. }
  150. #endif
  151. return simde__m128i_from_private(r_);
  152. #endif
  153. }
  154. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  155. #define _mm_comeq_epi16(a, b) simde_mm_comeq_epi16((a), (b))
  156. #endif
  157. SIMDE_FUNCTION_ATTRIBUTES
  158. simde__m128i
  159. simde_mm_comeq_epi32 (simde__m128i a, simde__m128i b) {
  160. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  161. return _mm_com_epi32(a, b, _MM_PCOMCTRL_EQ);
  162. #elif defined(SIMDE_X86_XOP_NATIVE)
  163. return _mm_comeq_epi32(a, b);
  164. #else
  165. simde__m128i_private
  166. r_,
  167. a_ = simde__m128i_to_private(a),
  168. b_ = simde__m128i_to_private(b);
  169. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  170. r_.neon_u32 = vceqq_s32(a_.neon_i32, b_.neon_i32);
  171. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  172. r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 == b_.i32);
  173. #else
  174. SIMDE_VECTORIZE
  175. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  176. r_.i32[i] = (a_.i32[i] == b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  177. }
  178. #endif
  179. return simde__m128i_from_private(r_);
  180. #endif
  181. }
  182. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  183. #define _mm_comeq_epi32(a, b) simde_mm_comeq_epi32((a), (b))
  184. #endif
  185. SIMDE_FUNCTION_ATTRIBUTES
  186. simde__m128i
  187. simde_mm_comeq_epi64 (simde__m128i a, simde__m128i b) {
  188. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  189. return _mm_com_epi64(a, b, _MM_PCOMCTRL_EQ);
  190. #elif defined(SIMDE_X86_XOP_NATIVE)
  191. return _mm_comeq_epi64(a, b);
  192. #else
  193. simde__m128i_private
  194. r_,
  195. a_ = simde__m128i_to_private(a),
  196. b_ = simde__m128i_to_private(b);
  197. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  198. r_.neon_u64 = vceqq_s64(a_.neon_i64, b_.neon_i64);
  199. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  200. r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 == b_.i64);
  201. #else
  202. SIMDE_VECTORIZE
  203. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  204. r_.i64[i] = (a_.i64[i] == b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
  205. }
  206. #endif
  207. return simde__m128i_from_private(r_);
  208. #endif
  209. }
  210. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  211. #define _mm_comeq_epi64(a, b) simde_mm_comeq_epi64((a), (b))
  212. #endif
  213. SIMDE_FUNCTION_ATTRIBUTES
  214. simde__m128i
  215. simde_mm_comeq_epu8 (simde__m128i a, simde__m128i b) {
  216. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  217. return _mm_com_epu8(a, b, _MM_PCOMCTRL_EQ);
  218. #elif defined(SIMDE_X86_XOP_NATIVE)
  219. return _mm_comeq_epu8(a, b);
  220. #else
  221. simde__m128i_private
  222. r_,
  223. a_ = simde__m128i_to_private(a),
  224. b_ = simde__m128i_to_private(b);
  225. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  226. r_.neon_u8 = vceqq_u8(a_.neon_u8, b_.neon_u8);
  227. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  228. r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 == b_.u8);
  229. #else
  230. SIMDE_VECTORIZE
  231. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  232. r_.u8[i] = (a_.u8[i] == b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
  233. }
  234. #endif
  235. return simde__m128i_from_private(r_);
  236. #endif
  237. }
  238. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  239. #define _mm_comeq_epu8(a, b) simde_mm_comeq_epu8((a), (b))
  240. #endif
  241. SIMDE_FUNCTION_ATTRIBUTES
  242. simde__m128i
  243. simde_mm_comeq_epu16 (simde__m128i a, simde__m128i b) {
  244. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  245. return _mm_com_epu16(a, b, _MM_PCOMCTRL_EQ);
  246. #elif defined(SIMDE_X86_XOP_NATIVE)
  247. return _mm_comeq_epu16(a, b);
  248. #else
  249. simde__m128i_private
  250. r_,
  251. a_ = simde__m128i_to_private(a),
  252. b_ = simde__m128i_to_private(b);
  253. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  254. r_.neon_u16 = vceqq_u16(a_.neon_u16, b_.neon_u16);
  255. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  256. r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 == b_.u16);
  257. #else
  258. SIMDE_VECTORIZE
  259. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  260. r_.u16[i] = (a_.u16[i] == b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
  261. }
  262. #endif
  263. return simde__m128i_from_private(r_);
  264. #endif
  265. }
  266. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  267. #define _mm_comeq_epu16(a, b) simde_mm_comeq_epu16((a), (b))
  268. #endif
  269. SIMDE_FUNCTION_ATTRIBUTES
  270. simde__m128i
  271. simde_mm_comeq_epu32 (simde__m128i a, simde__m128i b) {
  272. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  273. return _mm_com_epu32(a, b, _MM_PCOMCTRL_EQ);
  274. #elif defined(SIMDE_X86_XOP_NATIVE)
  275. return _mm_comeq_epu32(a, b);
  276. #else
  277. simde__m128i_private
  278. r_,
  279. a_ = simde__m128i_to_private(a),
  280. b_ = simde__m128i_to_private(b);
  281. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  282. r_.neon_u32 = vceqq_u32(a_.neon_u32, b_.neon_u32);
  283. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  284. r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 == b_.u32);
  285. #else
  286. SIMDE_VECTORIZE
  287. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  288. r_.u32[i] = (a_.u32[i] == b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
  289. }
  290. #endif
  291. return simde__m128i_from_private(r_);
  292. #endif
  293. }
  294. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  295. #define _mm_comeq_epu32(a, b) simde_mm_comeq_epu32((a), (b))
  296. #endif
  297. SIMDE_FUNCTION_ATTRIBUTES
  298. simde__m128i
  299. simde_mm_comeq_epu64 (simde__m128i a, simde__m128i b) {
  300. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_EQ)
  301. return _mm_com_epu64(a, b, _MM_PCOMCTRL_EQ);
  302. #elif defined(SIMDE_X86_XOP_NATIVE)
  303. return _mm_comeq_epu64(a, b);
  304. #else
  305. simde__m128i_private
  306. r_,
  307. a_ = simde__m128i_to_private(a),
  308. b_ = simde__m128i_to_private(b);
  309. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  310. r_.neon_u64 = vceqq_u64(a_.neon_u64, b_.neon_u64);
  311. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  312. r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 == b_.u64);
  313. #else
  314. SIMDE_VECTORIZE
  315. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  316. r_.u64[i] = (a_.u64[i] == b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
  317. }
  318. #endif
  319. return simde__m128i_from_private(r_);
  320. #endif
  321. }
  322. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  323. #define _mm_comeq_epu64(a, b) simde_mm_comeq_epu64((a), (b))
  324. #endif
  325. SIMDE_FUNCTION_ATTRIBUTES
  326. simde__m128i
  327. simde_mm_comge_epi8 (simde__m128i a, simde__m128i b) {
  328. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  329. return _mm_com_epi8(a, b, _MM_PCOMCTRL_GE);
  330. #elif defined(SIMDE_X86_XOP_NATIVE)
  331. return _mm_comge_epi8(a, b);
  332. #else
  333. simde__m128i_private
  334. r_,
  335. a_ = simde__m128i_to_private(a),
  336. b_ = simde__m128i_to_private(b);
  337. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  338. r_.neon_u8 = vcgeq_s8(a_.neon_i8, b_.neon_i8);
  339. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  340. r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 >= b_.i8);
  341. #else
  342. SIMDE_VECTORIZE
  343. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  344. r_.i8[i] = (a_.i8[i] >= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  345. }
  346. #endif
  347. return simde__m128i_from_private(r_);
  348. #endif
  349. }
  350. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  351. #define _mm_comge_epi8(a, b) simde_mm_comge_epi8((a), (b))
  352. #endif
  353. SIMDE_FUNCTION_ATTRIBUTES
  354. simde__m128i
  355. simde_mm_comge_epi16 (simde__m128i a, simde__m128i b) {
  356. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  357. return _mm_com_epi16(a, b, _MM_PCOMCTRL_GE);
  358. #elif defined(SIMDE_X86_XOP_NATIVE)
  359. return _mm_comge_epi16(a, b);
  360. #else
  361. simde__m128i_private
  362. r_,
  363. a_ = simde__m128i_to_private(a),
  364. b_ = simde__m128i_to_private(b);
  365. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  366. r_.neon_u16 = vcgeq_s16(a_.neon_i16, b_.neon_i16);
  367. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  368. r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 >= b_.i16);
  369. #else
  370. SIMDE_VECTORIZE
  371. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  372. r_.i16[i] = (a_.i16[i] >= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  373. }
  374. #endif
  375. return simde__m128i_from_private(r_);
  376. #endif
  377. }
  378. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  379. #define _mm_comge_epi16(a, b) simde_mm_comge_epi16((a), (b))
  380. #endif
  381. SIMDE_FUNCTION_ATTRIBUTES
  382. simde__m128i
  383. simde_mm_comge_epi32 (simde__m128i a, simde__m128i b) {
  384. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  385. return _mm_com_epi32(a, b, _MM_PCOMCTRL_GE);
  386. #elif defined(SIMDE_X86_XOP_NATIVE)
  387. return _mm_comge_epi32(a, b);
  388. #else
  389. simde__m128i_private
  390. r_,
  391. a_ = simde__m128i_to_private(a),
  392. b_ = simde__m128i_to_private(b);
  393. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  394. r_.neon_u32 = vcgeq_s32(a_.neon_i32, b_.neon_i32);
  395. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  396. r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 >= b_.i32);
  397. #else
  398. SIMDE_VECTORIZE
  399. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  400. r_.i32[i] = (a_.i32[i] >= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  401. }
  402. #endif
  403. return simde__m128i_from_private(r_);
  404. #endif
  405. }
  406. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  407. #define _mm_comge_epi32(a, b) simde_mm_comge_epi32((a), (b))
  408. #endif
  409. SIMDE_FUNCTION_ATTRIBUTES
  410. simde__m128i
  411. simde_mm_comge_epi64 (simde__m128i a, simde__m128i b) {
  412. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  413. return _mm_com_epi64(a, b, _MM_PCOMCTRL_GE);
  414. #elif defined(SIMDE_X86_XOP_NATIVE)
  415. return _mm_comge_epi64(a, b);
  416. #else
  417. simde__m128i_private
  418. r_,
  419. a_ = simde__m128i_to_private(a),
  420. b_ = simde__m128i_to_private(b);
  421. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  422. r_.neon_u64 = vcgeq_s64(a_.neon_i64, b_.neon_i64);
  423. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  424. r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 >= b_.i64);
  425. #else
  426. SIMDE_VECTORIZE
  427. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  428. r_.i64[i] = (a_.i64[i] >= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
  429. }
  430. #endif
  431. return simde__m128i_from_private(r_);
  432. #endif
  433. }
  434. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  435. #define _mm_comge_epi64(a, b) simde_mm_comge_epi64((a), (b))
  436. #endif
  437. SIMDE_FUNCTION_ATTRIBUTES
  438. simde__m128i
  439. simde_mm_comge_epu8 (simde__m128i a, simde__m128i b) {
  440. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  441. return _mm_com_epu8(a, b, _MM_PCOMCTRL_GE);
  442. #elif defined(SIMDE_X86_XOP_NATIVE)
  443. return _mm_comge_epu8(a, b);
  444. #else
  445. simde__m128i_private
  446. r_,
  447. a_ = simde__m128i_to_private(a),
  448. b_ = simde__m128i_to_private(b);
  449. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  450. r_.neon_u8 = vcgeq_u8(a_.neon_u8, b_.neon_u8);
  451. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  452. r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 >= b_.u8);
  453. #else
  454. SIMDE_VECTORIZE
  455. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  456. r_.u8[i] = (a_.u8[i] >= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
  457. }
  458. #endif
  459. return simde__m128i_from_private(r_);
  460. #endif
  461. }
  462. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  463. #define _mm_comge_epu8(a, b) simde_mm_comge_epu8((a), (b))
  464. #endif
  465. SIMDE_FUNCTION_ATTRIBUTES
  466. simde__m128i
  467. simde_mm_comge_epu16 (simde__m128i a, simde__m128i b) {
  468. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  469. return _mm_com_epu16(a, b, _MM_PCOMCTRL_GE);
  470. #elif defined(SIMDE_X86_XOP_NATIVE)
  471. return _mm_comge_epu16(a, b);
  472. #else
  473. simde__m128i_private
  474. r_,
  475. a_ = simde__m128i_to_private(a),
  476. b_ = simde__m128i_to_private(b);
  477. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  478. r_.neon_u16 = vcgeq_u16(a_.neon_u16, b_.neon_u16);
  479. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  480. r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 >= b_.u16);
  481. #else
  482. SIMDE_VECTORIZE
  483. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  484. r_.u16[i] = (a_.u16[i] >= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
  485. }
  486. #endif
  487. return simde__m128i_from_private(r_);
  488. #endif
  489. }
  490. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  491. #define _mm_comge_epu16(a, b) simde_mm_comge_epu16((a), (b))
  492. #endif
  493. SIMDE_FUNCTION_ATTRIBUTES
  494. simde__m128i
  495. simde_mm_comge_epu32 (simde__m128i a, simde__m128i b) {
  496. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  497. return _mm_com_epu32(a, b, _MM_PCOMCTRL_GE);
  498. #elif defined(SIMDE_X86_XOP_NATIVE)
  499. return _mm_comge_epu32(a, b);
  500. #else
  501. simde__m128i_private
  502. r_,
  503. a_ = simde__m128i_to_private(a),
  504. b_ = simde__m128i_to_private(b);
  505. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  506. r_.neon_u32 = vcgeq_u32(a_.neon_u32, b_.neon_u32);
  507. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  508. r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 >= b_.u32);
  509. #else
  510. SIMDE_VECTORIZE
  511. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  512. r_.u32[i] = (a_.u32[i] >= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
  513. }
  514. #endif
  515. return simde__m128i_from_private(r_);
  516. #endif
  517. }
  518. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  519. #define _mm_comge_epu32(a, b) simde_mm_comge_epu32((a), (b))
  520. #endif
  521. SIMDE_FUNCTION_ATTRIBUTES
  522. simde__m128i
  523. simde_mm_comge_epu64 (simde__m128i a, simde__m128i b) {
  524. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GE)
  525. return _mm_com_epu64(a, b, _MM_PCOMCTRL_GE);
  526. #elif defined(SIMDE_X86_XOP_NATIVE)
  527. return _mm_comge_epu64(a, b);
  528. #else
  529. simde__m128i_private
  530. r_,
  531. a_ = simde__m128i_to_private(a),
  532. b_ = simde__m128i_to_private(b);
  533. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  534. r_.neon_u64 = vcgeq_u64(a_.neon_u64, b_.neon_u64);
  535. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  536. r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 >= b_.u64);
  537. #else
  538. SIMDE_VECTORIZE
  539. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  540. r_.u64[i] = (a_.u64[i] >= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
  541. }
  542. #endif
  543. return simde__m128i_from_private(r_);
  544. #endif
  545. }
  546. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  547. #define _mm_comge_epu64(a, b) simde_mm_comge_epu64((a), (b))
  548. #endif
  549. SIMDE_FUNCTION_ATTRIBUTES
  550. simde__m128i
  551. simde_mm_comgt_epi8 (simde__m128i a, simde__m128i b) {
  552. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  553. return _mm_com_epi8(a, b, _MM_PCOMCTRL_GT);
  554. #elif defined(SIMDE_X86_XOP_NATIVE)
  555. return _mm_comgt_epi8(a, b);
  556. #else
  557. simde__m128i_private
  558. r_,
  559. a_ = simde__m128i_to_private(a),
  560. b_ = simde__m128i_to_private(b);
  561. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  562. r_.neon_u8 = vcgtq_s8(a_.neon_i8, b_.neon_i8);
  563. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  564. r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 > b_.i8);
  565. #else
  566. SIMDE_VECTORIZE
  567. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  568. r_.i8[i] = (a_.i8[i] > b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  569. }
  570. #endif
  571. return simde__m128i_from_private(r_);
  572. #endif
  573. }
  574. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  575. #define _mm_comgt_epi8(a, b) simde_mm_comgt_epi8((a), (b))
  576. #endif
  577. SIMDE_FUNCTION_ATTRIBUTES
  578. simde__m128i
  579. simde_mm_comgt_epi16 (simde__m128i a, simde__m128i b) {
  580. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  581. return _mm_com_epi16(a, b, _MM_PCOMCTRL_GT);
  582. #elif defined(SIMDE_X86_XOP_NATIVE)
  583. return _mm_comgt_epi16(a, b);
  584. #else
  585. simde__m128i_private
  586. r_,
  587. a_ = simde__m128i_to_private(a),
  588. b_ = simde__m128i_to_private(b);
  589. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  590. r_.neon_u16 = vcgtq_s16(a_.neon_i16, b_.neon_i16);
  591. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  592. r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 > b_.i16);
  593. #else
  594. SIMDE_VECTORIZE
  595. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  596. r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  597. }
  598. #endif
  599. return simde__m128i_from_private(r_);
  600. #endif
  601. }
  602. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  603. #define _mm_comgt_epi16(a, b) simde_mm_comgt_epi16((a), (b))
  604. #endif
  605. SIMDE_FUNCTION_ATTRIBUTES
  606. simde__m128i
  607. simde_mm_comgt_epi32 (simde__m128i a, simde__m128i b) {
  608. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  609. return _mm_com_epi32(a, b, _MM_PCOMCTRL_GT);
  610. #elif defined(SIMDE_X86_XOP_NATIVE)
  611. return _mm_comgt_epi32(a, b);
  612. #else
  613. simde__m128i_private
  614. r_,
  615. a_ = simde__m128i_to_private(a),
  616. b_ = simde__m128i_to_private(b);
  617. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  618. r_.neon_u32 = vcgtq_s32(a_.neon_i32, b_.neon_i32);
  619. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  620. r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 > b_.i32);
  621. #else
  622. SIMDE_VECTORIZE
  623. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  624. r_.i32[i] = (a_.i32[i] > b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  625. }
  626. #endif
  627. return simde__m128i_from_private(r_);
  628. #endif
  629. }
  630. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  631. #define _mm_comgt_epi32(a, b) simde_mm_comgt_epi32((a), (b))
  632. #endif
  633. SIMDE_FUNCTION_ATTRIBUTES
  634. simde__m128i
  635. simde_mm_comgt_epi64 (simde__m128i a, simde__m128i b) {
  636. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  637. return _mm_com_epi64(a, b, _MM_PCOMCTRL_GT);
  638. #elif defined(SIMDE_X86_XOP_NATIVE)
  639. return _mm_comgt_epi64(a, b);
  640. #else
  641. simde__m128i_private
  642. r_,
  643. a_ = simde__m128i_to_private(a),
  644. b_ = simde__m128i_to_private(b);
  645. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  646. r_.neon_u64 = vcgtq_s64(a_.neon_i64, b_.neon_i64);
  647. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  648. r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 > b_.i64);
  649. #else
  650. SIMDE_VECTORIZE
  651. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  652. r_.i64[i] = (a_.i64[i] > b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
  653. }
  654. #endif
  655. return simde__m128i_from_private(r_);
  656. #endif
  657. }
  658. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  659. #define _mm_comgt_epi64(a, b) simde_mm_comgt_epi64((a), (b))
  660. #endif
  661. SIMDE_FUNCTION_ATTRIBUTES
  662. simde__m128i
  663. simde_mm_comgt_epu8 (simde__m128i a, simde__m128i b) {
  664. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  665. return _mm_com_epu8(a, b, _MM_PCOMCTRL_GT);
  666. #elif defined(SIMDE_X86_XOP_NATIVE)
  667. return _mm_comgt_epu8(a, b);
  668. #else
  669. simde__m128i_private
  670. r_,
  671. a_ = simde__m128i_to_private(a),
  672. b_ = simde__m128i_to_private(b);
  673. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  674. r_.neon_u8 = vcgtq_u8(a_.neon_u8, b_.neon_u8);
  675. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  676. r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 > b_.u8);
  677. #else
  678. SIMDE_VECTORIZE
  679. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  680. r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
  681. }
  682. #endif
  683. return simde__m128i_from_private(r_);
  684. #endif
  685. }
  686. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  687. #define _mm_comgt_epu8(a, b) simde_mm_comgt_epu8((a), (b))
  688. #endif
  689. SIMDE_FUNCTION_ATTRIBUTES
  690. simde__m128i
  691. simde_mm_comgt_epu16 (simde__m128i a, simde__m128i b) {
  692. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  693. return _mm_com_epu16(a, b, _MM_PCOMCTRL_GT);
  694. #elif defined(SIMDE_X86_XOP_NATIVE)
  695. return _mm_comgt_epu16(a, b);
  696. #else
  697. simde__m128i_private
  698. r_,
  699. a_ = simde__m128i_to_private(a),
  700. b_ = simde__m128i_to_private(b);
  701. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  702. r_.neon_u16 = vcgtq_u16(a_.neon_u16, b_.neon_u16);
  703. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  704. r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 > b_.u16);
  705. #else
  706. SIMDE_VECTORIZE
  707. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  708. r_.u16[i] = (a_.u16[i] > b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
  709. }
  710. #endif
  711. return simde__m128i_from_private(r_);
  712. #endif
  713. }
  714. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  715. #define _mm_comgt_epu16(a, b) simde_mm_comgt_epu16((a), (b))
  716. #endif
  717. SIMDE_FUNCTION_ATTRIBUTES
  718. simde__m128i
  719. simde_mm_comgt_epu32 (simde__m128i a, simde__m128i b) {
  720. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  721. return _mm_com_epu32(a, b, _MM_PCOMCTRL_GT);
  722. #elif defined(SIMDE_X86_XOP_NATIVE)
  723. return _mm_comgt_epu32(a, b);
  724. #else
  725. simde__m128i_private
  726. r_,
  727. a_ = simde__m128i_to_private(a),
  728. b_ = simde__m128i_to_private(b);
  729. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  730. r_.neon_u32 = vcgtq_u32(a_.neon_u32, b_.neon_u32);
  731. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  732. r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 > b_.u32);
  733. #else
  734. SIMDE_VECTORIZE
  735. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  736. r_.u32[i] = (a_.u32[i] > b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
  737. }
  738. #endif
  739. return simde__m128i_from_private(r_);
  740. #endif
  741. }
  742. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  743. #define _mm_comgt_epu32(a, b) simde_mm_comgt_epu32((a), (b))
  744. #endif
  745. SIMDE_FUNCTION_ATTRIBUTES
  746. simde__m128i
  747. simde_mm_comgt_epu64 (simde__m128i a, simde__m128i b) {
  748. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_GT)
  749. return _mm_com_epu64(a, b, _MM_PCOMCTRL_GT);
  750. #elif defined(SIMDE_X86_XOP_NATIVE)
  751. return _mm_comgt_epu64(a, b);
  752. #else
  753. simde__m128i_private
  754. r_,
  755. a_ = simde__m128i_to_private(a),
  756. b_ = simde__m128i_to_private(b);
  757. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  758. r_.neon_u64 = vcgtq_u64(a_.neon_u64, b_.neon_u64);
  759. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  760. r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 > b_.u64);
  761. #else
  762. SIMDE_VECTORIZE
  763. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  764. r_.u64[i] = (a_.u64[i] > b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
  765. }
  766. #endif
  767. return simde__m128i_from_private(r_);
  768. #endif
  769. }
  770. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  771. #define _mm_comgt_epu64(a, b) simde_mm_comgt_epu64((a), (b))
  772. #endif
  773. SIMDE_FUNCTION_ATTRIBUTES
  774. simde__m128i
  775. simde_mm_comle_epi8 (simde__m128i a, simde__m128i b) {
  776. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  777. return _mm_com_epi8(a, b, _MM_PCOMCTRL_LE);
  778. #elif defined(SIMDE_X86_XOP_NATIVE)
  779. return _mm_comle_epi8(a, b);
  780. #else
  781. simde__m128i_private
  782. r_,
  783. a_ = simde__m128i_to_private(a),
  784. b_ = simde__m128i_to_private(b);
  785. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  786. r_.neon_u8 = vcleq_s8(a_.neon_i8, b_.neon_i8);
  787. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  788. r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 <= b_.i8);
  789. #else
  790. SIMDE_VECTORIZE
  791. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  792. r_.i8[i] = (a_.i8[i] <= b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  793. }
  794. #endif
  795. return simde__m128i_from_private(r_);
  796. #endif
  797. }
  798. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  799. #define _mm_comle_epi8(a, b) simde_mm_comle_epi8((a), (b))
  800. #endif
  801. SIMDE_FUNCTION_ATTRIBUTES
  802. simde__m128i
  803. simde_mm_comle_epi16 (simde__m128i a, simde__m128i b) {
  804. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  805. return _mm_com_epi16(a, b, _MM_PCOMCTRL_LE);
  806. #elif defined(SIMDE_X86_XOP_NATIVE)
  807. return _mm_comle_epi16(a, b);
  808. #else
  809. simde__m128i_private
  810. r_,
  811. a_ = simde__m128i_to_private(a),
  812. b_ = simde__m128i_to_private(b);
  813. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  814. r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 <= b_.i16);
  815. #else
  816. SIMDE_VECTORIZE
  817. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  818. r_.i16[i] = (a_.i16[i] <= b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  819. }
  820. #endif
  821. return simde__m128i_from_private(r_);
  822. #endif
  823. }
  824. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  825. #define _mm_comle_epi16(a, b) simde_mm_comle_epi16((a), (b))
  826. #endif
  827. SIMDE_FUNCTION_ATTRIBUTES
  828. simde__m128i
  829. simde_mm_comle_epi32 (simde__m128i a, simde__m128i b) {
  830. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  831. return _mm_com_epi32(a, b, _MM_PCOMCTRL_LE);
  832. #elif defined(SIMDE_X86_XOP_NATIVE)
  833. return _mm_comle_epi32(a, b);
  834. #else
  835. simde__m128i_private
  836. r_,
  837. a_ = simde__m128i_to_private(a),
  838. b_ = simde__m128i_to_private(b);
  839. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  840. r_.neon_u32 = vcleq_s32(a_.neon_i32, b_.neon_i32);
  841. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  842. r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 <= b_.i32);
  843. #else
  844. SIMDE_VECTORIZE
  845. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  846. r_.i32[i] = (a_.i32[i] <= b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  847. }
  848. #endif
  849. return simde__m128i_from_private(r_);
  850. #endif
  851. }
  852. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  853. #define _mm_comle_epi32(a, b) simde_mm_comle_epi32((a), (b))
  854. #endif
  855. SIMDE_FUNCTION_ATTRIBUTES
  856. simde__m128i
  857. simde_mm_comle_epi64 (simde__m128i a, simde__m128i b) {
  858. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  859. return _mm_com_epi64(a, b, _MM_PCOMCTRL_LE);
  860. #elif defined(SIMDE_X86_XOP_NATIVE)
  861. return _mm_comle_epi64(a, b);
  862. #else
  863. simde__m128i_private
  864. r_,
  865. a_ = simde__m128i_to_private(a),
  866. b_ = simde__m128i_to_private(b);
  867. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  868. r_.neon_u64 = vcleq_s64(a_.neon_i64, b_.neon_i64);
  869. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  870. r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 <= b_.i64);
  871. #else
  872. SIMDE_VECTORIZE
  873. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  874. r_.i64[i] = (a_.i64[i] <= b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
  875. }
  876. #endif
  877. return simde__m128i_from_private(r_);
  878. #endif
  879. }
  880. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  881. #define _mm_comle_epi64(a, b) simde_mm_comle_epi64((a), (b))
  882. #endif
  883. SIMDE_FUNCTION_ATTRIBUTES
  884. simde__m128i
  885. simde_mm_comle_epu8 (simde__m128i a, simde__m128i b) {
  886. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  887. return _mm_com_epu8(a, b, _MM_PCOMCTRL_LE);
  888. #elif defined(SIMDE_X86_XOP_NATIVE)
  889. return _mm_comle_epu8(a, b);
  890. #else
  891. simde__m128i_private
  892. r_,
  893. a_ = simde__m128i_to_private(a),
  894. b_ = simde__m128i_to_private(b);
  895. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  896. r_.neon_u8 = vcleq_u8(a_.neon_u8, b_.neon_u8);
  897. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  898. r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 <= b_.u8);
  899. #else
  900. SIMDE_VECTORIZE
  901. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  902. r_.u8[i] = (a_.u8[i] <= b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
  903. }
  904. #endif
  905. return simde__m128i_from_private(r_);
  906. #endif
  907. }
  908. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  909. #define _mm_comle_epu8(a, b) simde_mm_comle_epu8((a), (b))
  910. #endif
  911. SIMDE_FUNCTION_ATTRIBUTES
  912. simde__m128i
  913. simde_mm_comle_epu16 (simde__m128i a, simde__m128i b) {
  914. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  915. return _mm_com_epu16(a, b, _MM_PCOMCTRL_LE);
  916. #elif defined(SIMDE_X86_XOP_NATIVE)
  917. return _mm_comle_epu16(a, b);
  918. #else
  919. simde__m128i_private
  920. r_,
  921. a_ = simde__m128i_to_private(a),
  922. b_ = simde__m128i_to_private(b);
  923. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  924. r_.neon_u16 = vcleq_u16(a_.neon_u16, b_.neon_u16);
  925. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  926. r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 <= b_.u16);
  927. #else
  928. SIMDE_VECTORIZE
  929. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  930. r_.u16[i] = (a_.u16[i] <= b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
  931. }
  932. #endif
  933. return simde__m128i_from_private(r_);
  934. #endif
  935. }
  936. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  937. #define _mm_comle_epu16(a, b) simde_mm_comle_epu16((a), (b))
  938. #endif
  939. SIMDE_FUNCTION_ATTRIBUTES
  940. simde__m128i
  941. simde_mm_comle_epu32 (simde__m128i a, simde__m128i b) {
  942. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  943. return _mm_com_epu32(a, b, _MM_PCOMCTRL_LE);
  944. #elif defined(SIMDE_X86_XOP_NATIVE)
  945. return _mm_comle_epu32(a, b);
  946. #else
  947. simde__m128i_private
  948. r_,
  949. a_ = simde__m128i_to_private(a),
  950. b_ = simde__m128i_to_private(b);
  951. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  952. r_.neon_u32 = vcleq_u32(a_.neon_u32, b_.neon_u32);
  953. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  954. r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 <= b_.u32);
  955. #else
  956. SIMDE_VECTORIZE
  957. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  958. r_.u32[i] = (a_.u32[i] <= b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
  959. }
  960. #endif
  961. return simde__m128i_from_private(r_);
  962. #endif
  963. }
  964. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  965. #define _mm_comle_epu32(a, b) simde_mm_comle_epu32((a), (b))
  966. #endif
  967. SIMDE_FUNCTION_ATTRIBUTES
  968. simde__m128i
  969. simde_mm_comle_epu64 (simde__m128i a, simde__m128i b) {
  970. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LE)
  971. return _mm_com_epu64(a, b, _MM_PCOMCTRL_LE);
  972. #elif defined(SIMDE_X86_XOP_NATIVE)
  973. return _mm_comle_epu64(a, b);
  974. #else
  975. simde__m128i_private
  976. r_,
  977. a_ = simde__m128i_to_private(a),
  978. b_ = simde__m128i_to_private(b);
  979. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  980. r_.neon_u64 = vcleq_u64(a_.neon_u64, b_.neon_u64);
  981. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  982. r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 <= b_.u64);
  983. #else
  984. SIMDE_VECTORIZE
  985. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  986. r_.u64[i] = (a_.u64[i] <= b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
  987. }
  988. #endif
  989. return simde__m128i_from_private(r_);
  990. #endif
  991. }
  992. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  993. #define _mm_comle_epu64(a, b) simde_mm_comle_epu64((a), (b))
  994. #endif
  995. SIMDE_FUNCTION_ATTRIBUTES
  996. simde__m128i
  997. simde_mm_comlt_epi8 (simde__m128i a, simde__m128i b) {
  998. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  999. return _mm_com_epi8(a, b, _MM_PCOMCTRL_LT);
  1000. #elif defined(SIMDE_X86_XOP_NATIVE)
  1001. return _mm_comlt_epi8(a, b);
  1002. #else
  1003. simde__m128i_private
  1004. r_,
  1005. a_ = simde__m128i_to_private(a),
  1006. b_ = simde__m128i_to_private(b);
  1007. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1008. r_.neon_u8 = vcltq_s8(a_.neon_i8, b_.neon_i8);
  1009. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1010. r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 < b_.i8);
  1011. #else
  1012. SIMDE_VECTORIZE
  1013. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  1014. r_.i8[i] = (a_.i8[i] < b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  1015. }
  1016. #endif
  1017. return simde__m128i_from_private(r_);
  1018. #endif
  1019. }
  1020. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1021. #define _mm_comlt_epi8(a, b) simde_mm_comlt_epi8((a), (b))
  1022. #endif
  1023. SIMDE_FUNCTION_ATTRIBUTES
  1024. simde__m128i
  1025. simde_mm_comlt_epi16 (simde__m128i a, simde__m128i b) {
  1026. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1027. return _mm_com_epi16(a, b, _MM_PCOMCTRL_LT);
  1028. #elif defined(SIMDE_X86_XOP_NATIVE)
  1029. return _mm_comlt_epi16(a, b);
  1030. #else
  1031. simde__m128i_private
  1032. r_,
  1033. a_ = simde__m128i_to_private(a),
  1034. b_ = simde__m128i_to_private(b);
  1035. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1036. r_.neon_u16 = vcltq_s16(a_.neon_i16, b_.neon_i16);
  1037. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1038. r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 < b_.i16);
  1039. #else
  1040. SIMDE_VECTORIZE
  1041. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  1042. r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  1043. }
  1044. #endif
  1045. return simde__m128i_from_private(r_);
  1046. #endif
  1047. }
  1048. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1049. #define _mm_comlt_epi16(a, b) simde_mm_comlt_epi16((a), (b))
  1050. #endif
  1051. SIMDE_FUNCTION_ATTRIBUTES
  1052. simde__m128i
  1053. simde_mm_comlt_epi32 (simde__m128i a, simde__m128i b) {
  1054. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1055. return _mm_com_epi32(a, b, _MM_PCOMCTRL_LT);
  1056. #elif defined(SIMDE_X86_XOP_NATIVE)
  1057. return _mm_comlt_epi32(a, b);
  1058. #else
  1059. simde__m128i_private
  1060. r_,
  1061. a_ = simde__m128i_to_private(a),
  1062. b_ = simde__m128i_to_private(b);
  1063. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1064. r_.neon_u32 = vcltq_s32(a_.neon_i32, b_.neon_i32);
  1065. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1066. r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 < b_.i32);
  1067. #else
  1068. SIMDE_VECTORIZE
  1069. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  1070. r_.i32[i] = (a_.i32[i] < b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  1071. }
  1072. #endif
  1073. return simde__m128i_from_private(r_);
  1074. #endif
  1075. }
  1076. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1077. #define _mm_comlt_epi32(a, b) simde_mm_comlt_epi32((a), (b))
  1078. #endif
  1079. SIMDE_FUNCTION_ATTRIBUTES
  1080. simde__m128i
  1081. simde_mm_comlt_epi64 (simde__m128i a, simde__m128i b) {
  1082. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1083. return _mm_com_epi64(a, b, _MM_PCOMCTRL_LT);
  1084. #elif defined(SIMDE_X86_XOP_NATIVE)
  1085. return _mm_comlt_epi64(a, b);
  1086. #else
  1087. simde__m128i_private
  1088. r_,
  1089. a_ = simde__m128i_to_private(a),
  1090. b_ = simde__m128i_to_private(b);
  1091. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1092. r_.neon_u64 = vcltq_s64(a_.neon_i64, b_.neon_i64);
  1093. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1094. r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 < b_.i64);
  1095. #else
  1096. SIMDE_VECTORIZE
  1097. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  1098. r_.i64[i] = (a_.i64[i] < b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
  1099. }
  1100. #endif
  1101. return simde__m128i_from_private(r_);
  1102. #endif
  1103. }
  1104. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1105. #define _mm_comlt_epi64(a, b) simde_mm_comlt_epi64((a), (b))
  1106. #endif
  1107. SIMDE_FUNCTION_ATTRIBUTES
  1108. simde__m128i
  1109. simde_mm_comlt_epu8 (simde__m128i a, simde__m128i b) {
  1110. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1111. return _mm_com_epu8(a, b, _MM_PCOMCTRL_LT);
  1112. #elif defined(SIMDE_X86_XOP_NATIVE)
  1113. return _mm_comlt_epu8(a, b);
  1114. #else
  1115. simde__m128i_private
  1116. r_,
  1117. a_ = simde__m128i_to_private(a),
  1118. b_ = simde__m128i_to_private(b);
  1119. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1120. r_.neon_u8 = vcltq_u8(a_.neon_u8, b_.neon_u8);
  1121. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1122. r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 < b_.u8);
  1123. #else
  1124. SIMDE_VECTORIZE
  1125. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  1126. r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
  1127. }
  1128. #endif
  1129. return simde__m128i_from_private(r_);
  1130. #endif
  1131. }
  1132. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1133. #define _mm_comlt_epu8(a, b) simde_mm_comlt_epu8((a), (b))
  1134. #endif
  1135. SIMDE_FUNCTION_ATTRIBUTES
  1136. simde__m128i
  1137. simde_mm_comlt_epu16 (simde__m128i a, simde__m128i b) {
  1138. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1139. return _mm_com_epu16(a, b, _MM_PCOMCTRL_LT);
  1140. #elif defined(SIMDE_X86_XOP_NATIVE)
  1141. return _mm_comlt_epu16(a, b);
  1142. #else
  1143. simde__m128i_private
  1144. r_,
  1145. a_ = simde__m128i_to_private(a),
  1146. b_ = simde__m128i_to_private(b);
  1147. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1148. r_.neon_u16 = vcltq_u16(a_.neon_u16, b_.neon_u16);
  1149. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1150. r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 < b_.u16);
  1151. #else
  1152. SIMDE_VECTORIZE
  1153. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  1154. r_.u16[i] = (a_.u16[i] < b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
  1155. }
  1156. #endif
  1157. return simde__m128i_from_private(r_);
  1158. #endif
  1159. }
  1160. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1161. #define _mm_comlt_epu16(a, b) simde_mm_comlt_epu16((a), (b))
  1162. #endif
  1163. SIMDE_FUNCTION_ATTRIBUTES
  1164. simde__m128i
  1165. simde_mm_comlt_epu32 (simde__m128i a, simde__m128i b) {
  1166. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1167. return _mm_com_epu32(a, b, _MM_PCOMCTRL_LT);
  1168. #elif defined(SIMDE_X86_XOP_NATIVE)
  1169. return _mm_comlt_epu32(a, b);
  1170. #else
  1171. simde__m128i_private
  1172. r_,
  1173. a_ = simde__m128i_to_private(a),
  1174. b_ = simde__m128i_to_private(b);
  1175. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1176. r_.neon_u32 = vcltq_u32(a_.neon_u32, b_.neon_u32);
  1177. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1178. r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 < b_.u32);
  1179. #else
  1180. SIMDE_VECTORIZE
  1181. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  1182. r_.u32[i] = (a_.u32[i] < b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
  1183. }
  1184. #endif
  1185. return simde__m128i_from_private(r_);
  1186. #endif
  1187. }
  1188. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1189. #define _mm_comlt_epu32(a, b) simde_mm_comlt_epu32((a), (b))
  1190. #endif
  1191. SIMDE_FUNCTION_ATTRIBUTES
  1192. simde__m128i
  1193. simde_mm_comlt_epu64 (simde__m128i a, simde__m128i b) {
  1194. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1195. return _mm_com_epu64(a, b, _MM_PCOMCTRL_LT);
  1196. #elif defined(SIMDE_X86_XOP_NATIVE)
  1197. return _mm_comlt_epu64(a, b);
  1198. #else
  1199. simde__m128i_private
  1200. r_,
  1201. a_ = simde__m128i_to_private(a),
  1202. b_ = simde__m128i_to_private(b);
  1203. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1204. r_.neon_u64 = vcltq_u64(a_.neon_u64, b_.neon_u64);
  1205. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1206. r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 < b_.u64);
  1207. #else
  1208. SIMDE_VECTORIZE
  1209. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  1210. r_.u64[i] = (a_.u64[i] < b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
  1211. }
  1212. #endif
  1213. return simde__m128i_from_private(r_);
  1214. #endif
  1215. }
  1216. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1217. #define _mm_comlt_epu64(a, b) simde_mm_comlt_epu64((a), (b))
  1218. #endif
  1219. SIMDE_FUNCTION_ATTRIBUTES
  1220. simde__m128i
  1221. simde_mm_comneq_epi8 (simde__m128i a, simde__m128i b) {
  1222. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1223. return _mm_com_epi8(a, b, _MM_PCOMCTRL_NEQ);
  1224. #elif defined(SIMDE_X86_XOP_NATIVE)
  1225. return _mm_comneq_epi8(a, b);
  1226. #else
  1227. simde__m128i_private
  1228. r_,
  1229. a_ = simde__m128i_to_private(a),
  1230. b_ = simde__m128i_to_private(b);
  1231. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1232. r_.neon_u8 = vmvnq_u8(vceqq_s8(a_.neon_i8, b_.neon_i8));
  1233. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1234. r_.i8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i8), a_.i8 != b_.i8);
  1235. #else
  1236. SIMDE_VECTORIZE
  1237. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  1238. r_.i8[i] = (a_.i8[i] != b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  1239. }
  1240. #endif
  1241. return simde__m128i_from_private(r_);
  1242. #endif
  1243. }
  1244. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1245. #define _mm_comneq_epi8(a, b) simde_mm_comneq_epi8((a), (b))
  1246. #endif
  1247. SIMDE_FUNCTION_ATTRIBUTES
  1248. simde__m128i
  1249. simde_mm_comneq_epi16 (simde__m128i a, simde__m128i b) {
  1250. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1251. return _mm_com_epi16(a, b, _MM_PCOMCTRL_NEQ);
  1252. #elif defined(SIMDE_X86_XOP_NATIVE)
  1253. return _mm_comneq_epi16(a, b);
  1254. #else
  1255. simde__m128i_private
  1256. r_,
  1257. a_ = simde__m128i_to_private(a),
  1258. b_ = simde__m128i_to_private(b);
  1259. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1260. r_.neon_u16 = vmvnq_u16(vceqq_s16(a_.neon_i16, b_.neon_i16));
  1261. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1262. r_.i16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i16), a_.i16 != b_.i16);
  1263. #else
  1264. SIMDE_VECTORIZE
  1265. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  1266. r_.i16[i] = (a_.i16[i] != b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  1267. }
  1268. #endif
  1269. return simde__m128i_from_private(r_);
  1270. #endif
  1271. }
  1272. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1273. #define _mm_comneq_epi16(a, b) simde_mm_comneq_epi16((a), (b))
  1274. #endif
  1275. SIMDE_FUNCTION_ATTRIBUTES
  1276. simde__m128i
  1277. simde_mm_comneq_epi32 (simde__m128i a, simde__m128i b) {
  1278. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1279. return _mm_com_epi32(a, b, _MM_PCOMCTRL_NEQ);
  1280. #elif defined(SIMDE_X86_XOP_NATIVE)
  1281. return _mm_comneq_epi32(a, b);
  1282. #else
  1283. simde__m128i_private
  1284. r_,
  1285. a_ = simde__m128i_to_private(a),
  1286. b_ = simde__m128i_to_private(b);
  1287. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1288. r_.neon_u32 = vmvnq_u32(vceqq_s32(a_.neon_i32, b_.neon_i32));
  1289. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1290. r_.i32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i32), a_.i32 != b_.i32);
  1291. #else
  1292. SIMDE_VECTORIZE
  1293. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  1294. r_.i32[i] = (a_.i32[i] != b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  1295. }
  1296. #endif
  1297. return simde__m128i_from_private(r_);
  1298. #endif
  1299. }
  1300. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1301. #define _mm_comneq_epi32(a, b) simde_mm_comneq_epi32((a), (b))
  1302. #endif
  1303. SIMDE_FUNCTION_ATTRIBUTES
  1304. simde__m128i
  1305. simde_mm_comneq_epi64 (simde__m128i a, simde__m128i b) {
  1306. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1307. return _mm_com_epi64(a, b, _MM_PCOMCTRL_NEQ);
  1308. #elif defined(SIMDE_X86_XOP_NATIVE)
  1309. return _mm_comneq_epi64(a, b);
  1310. #else
  1311. simde__m128i_private
  1312. r_,
  1313. a_ = simde__m128i_to_private(a),
  1314. b_ = simde__m128i_to_private(b);
  1315. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1316. r_.neon_u32 = vmvnq_u32(vreinterpretq_u32_u64(vceqq_s64(a_.neon_i64, b_.neon_i64)));
  1317. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1318. r_.i64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.i64), a_.i64 != b_.i64);
  1319. #else
  1320. SIMDE_VECTORIZE
  1321. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  1322. r_.i64[i] = (a_.i64[i] != b_.i64[i]) ? ~INT64_C(0) : INT64_C(0);
  1323. }
  1324. #endif
  1325. return simde__m128i_from_private(r_);
  1326. #endif
  1327. }
  1328. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1329. #define _mm_comneq_epi64(a, b) simde_mm_comneq_epi64((a), (b))
  1330. #endif
  1331. SIMDE_FUNCTION_ATTRIBUTES
  1332. simde__m128i
  1333. simde_mm_comneq_epu8 (simde__m128i a, simde__m128i b) {
  1334. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1335. return _mm_com_epu8(a, b, _MM_PCOMCTRL_NEQ);
  1336. #elif defined(SIMDE_X86_XOP_NATIVE)
  1337. return _mm_comneq_epu8(a, b);
  1338. #else
  1339. simde__m128i_private
  1340. r_,
  1341. a_ = simde__m128i_to_private(a),
  1342. b_ = simde__m128i_to_private(b);
  1343. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1344. r_.neon_u8 = vmvnq_u8(vceqq_u8(a_.neon_u8, b_.neon_u8));
  1345. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1346. r_.u8 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u8), a_.u8 != b_.u8);
  1347. #else
  1348. SIMDE_VECTORIZE
  1349. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  1350. r_.u8[i] = (a_.u8[i] != b_.u8[i]) ? ~INT8_C(0) : INT8_C(0);
  1351. }
  1352. #endif
  1353. return simde__m128i_from_private(r_);
  1354. #endif
  1355. }
  1356. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1357. #define _mm_comneq_epu8(a, b) simde_mm_comneq_epu8((a), (b))
  1358. #endif
  1359. SIMDE_FUNCTION_ATTRIBUTES
  1360. simde__m128i
  1361. simde_mm_comneq_epu16 (simde__m128i a, simde__m128i b) {
  1362. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1363. return _mm_com_epu16(a, b, _MM_PCOMCTRL_NEQ);
  1364. #elif defined(SIMDE_X86_XOP_NATIVE)
  1365. return _mm_comneq_epu16(a, b);
  1366. #else
  1367. simde__m128i_private
  1368. r_,
  1369. a_ = simde__m128i_to_private(a),
  1370. b_ = simde__m128i_to_private(b);
  1371. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1372. r_.neon_u16 = vmvnq_u16(vceqq_u16(a_.neon_u16, b_.neon_u16));
  1373. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1374. r_.u16 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u16), a_.u16 != b_.u16);
  1375. #else
  1376. SIMDE_VECTORIZE
  1377. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  1378. r_.u16[i] = (a_.u16[i] != b_.u16[i]) ? ~INT16_C(0) : INT16_C(0);
  1379. }
  1380. #endif
  1381. return simde__m128i_from_private(r_);
  1382. #endif
  1383. }
  1384. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1385. #define _mm_comneq_epu16(a, b) simde_mm_comneq_epu16((a), (b))
  1386. #endif
  1387. SIMDE_FUNCTION_ATTRIBUTES
  1388. simde__m128i
  1389. simde_mm_comneq_epu32 (simde__m128i a, simde__m128i b) {
  1390. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1391. return _mm_com_epu32(a, b, _MM_PCOMCTRL_NEQ);
  1392. #elif defined(SIMDE_X86_XOP_NATIVE)
  1393. return _mm_comneq_epu32(a, b);
  1394. #else
  1395. simde__m128i_private
  1396. r_,
  1397. a_ = simde__m128i_to_private(a),
  1398. b_ = simde__m128i_to_private(b);
  1399. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1400. r_.neon_u32 = vmvnq_u32(vceqq_u32(a_.neon_u32, b_.neon_u32));
  1401. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1402. r_.u32 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u32), a_.u32 != b_.u32);
  1403. #else
  1404. SIMDE_VECTORIZE
  1405. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  1406. r_.u32[i] = (a_.u32[i] != b_.u32[i]) ? ~INT32_C(0) : INT32_C(0);
  1407. }
  1408. #endif
  1409. return simde__m128i_from_private(r_);
  1410. #endif
  1411. }
  1412. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1413. #define _mm_comneq_epu32(a, b) simde_mm_comneq_epu32((a), (b))
  1414. #endif
  1415. SIMDE_FUNCTION_ATTRIBUTES
  1416. simde__m128i
  1417. simde_mm_comneq_epu64 (simde__m128i a, simde__m128i b) {
  1418. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_NEQ)
  1419. return _mm_com_epu64(a, b, _MM_PCOMCTRL_NEQ);
  1420. #elif defined(SIMDE_X86_XOP_NATIVE)
  1421. return _mm_comneq_epu64(a, b);
  1422. #else
  1423. simde__m128i_private
  1424. r_,
  1425. a_ = simde__m128i_to_private(a),
  1426. b_ = simde__m128i_to_private(b);
  1427. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1428. r_.neon_u32 = vmvnq_u32(vreinterpretq_u32_u64(vceqq_u64(a_.neon_u64, b_.neon_u64)));
  1429. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1430. r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u64 != b_.u64);
  1431. #else
  1432. SIMDE_VECTORIZE
  1433. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  1434. r_.u64[i] = (a_.u64[i] != b_.u64[i]) ? ~INT64_C(0) : INT64_C(0);
  1435. }
  1436. #endif
  1437. return simde__m128i_from_private(r_);
  1438. #endif
  1439. }
  1440. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1441. #define _mm_comneq_epu64(a, b) simde_mm_comneq_epu64((a), (b))
  1442. #endif
  1443. SIMDE_FUNCTION_ATTRIBUTES
  1444. simde__m128i
  1445. simde_mm_comfalse_epi8 (simde__m128i a, simde__m128i b) {
  1446. (void) a;
  1447. (void) b;
  1448. return simde_mm_setzero_si128();
  1449. }
  1450. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1451. #define _mm_comfalse_epi8(a, b) simde_mm_comfalse_epi8((a), (b))
  1452. #endif
  1453. SIMDE_FUNCTION_ATTRIBUTES
  1454. simde__m128i
  1455. simde_mm_comfalse_epi16 (simde__m128i a, simde__m128i b) {
  1456. (void) a;
  1457. (void) b;
  1458. return simde_mm_setzero_si128();
  1459. }
  1460. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1461. #define _mm_comfalse_epi16(a, b) simde_mm_comfalse_epi16((a), (b))
  1462. #endif
  1463. SIMDE_FUNCTION_ATTRIBUTES
  1464. simde__m128i
  1465. simde_mm_comfalse_epi32 (simde__m128i a, simde__m128i b) {
  1466. (void) a;
  1467. (void) b;
  1468. return simde_mm_setzero_si128();
  1469. }
  1470. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1471. #define _mm_comfalse_epi32(a, b) simde_mm_comfalse_epi32((a), (b))
  1472. #endif
  1473. SIMDE_FUNCTION_ATTRIBUTES
  1474. simde__m128i
  1475. simde_mm_comfalse_epi64 (simde__m128i a, simde__m128i b) {
  1476. (void) a;
  1477. (void) b;
  1478. return simde_mm_setzero_si128();
  1479. }
  1480. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1481. #define _mm_comfalse_epi64(a, b) simde_mm_comfalse_epi64((a), (b))
  1482. #endif
  1483. SIMDE_FUNCTION_ATTRIBUTES
  1484. simde__m128i
  1485. simde_mm_comfalse_epu8 (simde__m128i a, simde__m128i b) {
  1486. (void) a;
  1487. (void) b;
  1488. return simde_mm_setzero_si128();
  1489. }
  1490. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1491. #define _mm_comfalse_epu8(a, b) simde_mm_comfalse_epu8((a), (b))
  1492. #endif
  1493. SIMDE_FUNCTION_ATTRIBUTES
  1494. simde__m128i
  1495. simde_mm_comfalse_epu16 (simde__m128i a, simde__m128i b) {
  1496. (void) a;
  1497. (void) b;
  1498. return simde_mm_setzero_si128();
  1499. }
  1500. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1501. #define _mm_comfalse_epu16(a, b) simde_mm_comfalse_epu16((a), (b))
  1502. #endif
  1503. SIMDE_FUNCTION_ATTRIBUTES
  1504. simde__m128i
  1505. simde_mm_comfalse_epu32 (simde__m128i a, simde__m128i b) {
  1506. (void) a;
  1507. (void) b;
  1508. return simde_mm_setzero_si128();
  1509. }
  1510. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1511. #define _mm_comfalse_epu32(a, b) simde_mm_comfalse_epu32((a), (b))
  1512. #endif
  1513. SIMDE_FUNCTION_ATTRIBUTES
  1514. simde__m128i
  1515. simde_mm_comfalse_epu64 (simde__m128i a, simde__m128i b) {
  1516. (void) a;
  1517. (void) b;
  1518. return simde_mm_setzero_si128();
  1519. }
  1520. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1521. #define _mm_comfalse_epu64(a, b) simde_mm_comfalse_epu64((a), (b))
  1522. #endif
  1523. SIMDE_FUNCTION_ATTRIBUTES
  1524. simde__m128i
  1525. simde_mm_comtrue_epi8 (simde__m128i a, simde__m128i b) {
  1526. (void) a;
  1527. (void) b;
  1528. return simde_x_mm_setone_si128();
  1529. }
  1530. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1531. #define _mm_comtrue_epi8(a, b) simde_mm_comtrue_epi8((a), (b))
  1532. #endif
  1533. SIMDE_FUNCTION_ATTRIBUTES
  1534. simde__m128i
  1535. simde_mm_comtrue_epi16 (simde__m128i a, simde__m128i b) {
  1536. (void) a;
  1537. (void) b;
  1538. return simde_x_mm_setone_si128();
  1539. }
  1540. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1541. #define _mm_comtrue_epi16(a, b) simde_mm_comtrue_epi16((a), (b))
  1542. #endif
  1543. SIMDE_FUNCTION_ATTRIBUTES
  1544. simde__m128i
  1545. simde_mm_comtrue_epi32 (simde__m128i a, simde__m128i b) {
  1546. (void) a;
  1547. (void) b;
  1548. return simde_x_mm_setone_si128();
  1549. }
  1550. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1551. #define _mm_comtrue_epi32(a, b) simde_mm_comtrue_epi32((a), (b))
  1552. #endif
  1553. SIMDE_FUNCTION_ATTRIBUTES
  1554. simde__m128i
  1555. simde_mm_comtrue_epi64 (simde__m128i a, simde__m128i b) {
  1556. (void) a;
  1557. (void) b;
  1558. return simde_x_mm_setone_si128();
  1559. }
  1560. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1561. #define _mm_comtrue_epi64(a, b) simde_mm_comtrue_epi64((a), (b))
  1562. #endif
  1563. SIMDE_FUNCTION_ATTRIBUTES
  1564. simde__m128i
  1565. simde_mm_comtrue_epu8 (simde__m128i a, simde__m128i b) {
  1566. (void) a;
  1567. (void) b;
  1568. return simde_x_mm_setone_si128();
  1569. }
  1570. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1571. #define _mm_comtrue_epu8(a, b) simde_mm_comtrue_epu8((a), (b))
  1572. #endif
  1573. SIMDE_FUNCTION_ATTRIBUTES
  1574. simde__m128i
  1575. simde_mm_comtrue_epu16 (simde__m128i a, simde__m128i b) {
  1576. (void) a;
  1577. (void) b;
  1578. return simde_x_mm_setone_si128();
  1579. }
  1580. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1581. #define _mm_comtrue_epu16(a, b) simde_mm_comtrue_epu16((a), (b))
  1582. #endif
  1583. SIMDE_FUNCTION_ATTRIBUTES
  1584. simde__m128i
  1585. simde_mm_comtrue_epu32 (simde__m128i a, simde__m128i b) {
  1586. (void) a;
  1587. (void) b;
  1588. return simde_x_mm_setone_si128();
  1589. }
  1590. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1591. #define _mm_comtrue_epu32(a, b) simde_mm_comtrue_epu32((a), (b))
  1592. #endif
  1593. SIMDE_FUNCTION_ATTRIBUTES
  1594. simde__m128i
  1595. simde_mm_comtrue_epu64 (simde__m128i a, simde__m128i b) {
  1596. (void) a;
  1597. (void) b;
  1598. return simde_x_mm_setone_si128();
  1599. }
  1600. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1601. #define _mm_comtrue_epu64(a, b) simde_mm_comtrue_epu64((a), (b))
  1602. #endif
  1603. #if defined(SIMDE_X86_XOP_NATIVE) && defined(_MM_PCOMCTRL_LT)
  1604. #define SIMDE_X86_XOP_HAVE_COM_ 1
  1605. #define SIMDE_MM_PCOMCTRL_LT _MM_PCOMCTRL_LT
  1606. #define SIMDE_MM_PCOMCTRL_LE _MM_PCOMCTRL_LE
  1607. #define SIMDE_MM_PCOMCTRL_GT _MM_PCOMCTRL_GT
  1608. #define SIMDE_MM_PCOMCTRL_GE _MM_PCOMCTRL_GE
  1609. #define SIMDE_MM_PCOMCTRL_EQ _MM_PCOMCTRL_EQ
  1610. #define SIMDE_MM_PCOMCTRL_NEQ _MM_PCOMCTRL_NEQ
  1611. #define SIMDE_MM_PCOMCTRL_FALSE _MM_PCOMCTRL_FALSE
  1612. #define SIMDE_MM_PCOMCTRL_TRUE _MM_PCOMCTRL_TRUE
  1613. #else
  1614. #define SIMDE_MM_PCOMCTRL_LT 0
  1615. #define SIMDE_MM_PCOMCTRL_LE 1
  1616. #define SIMDE_MM_PCOMCTRL_GT 2
  1617. #define SIMDE_MM_PCOMCTRL_GE 3
  1618. #define SIMDE_MM_PCOMCTRL_EQ 4
  1619. #define SIMDE_MM_PCOMCTRL_NEQ 5
  1620. #define SIMDE_MM_PCOMCTRL_FALSE 6
  1621. #define SIMDE_MM_PCOMCTRL_TRUE 7
  1622. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1623. #define _MM_PCOMCTRL_LT SIMDE_MM_PCOMCTRL_LT
  1624. #define _MM_PCOMCTRL_LE SIMDE_MM_PCOMCTRL_LE
  1625. #define _MM_PCOMCTRL_GT SIMDE_MM_PCOMCTRL_GT
  1626. #define _MM_PCOMCTRL_GE SIMDE_MM_PCOMCTRL_GE
  1627. #define _MM_PCOMCTRL_EQ SIMDE_MM_PCOMCTRL_EQ
  1628. #define _MM_PCOMCTRL_NEQ SIMDE_MM_PCOMCTRL_NEQ
  1629. #define _MM_PCOMCTRL_FALSE SIMDE_MM_PCOMCTRL_FALSE
  1630. #define _MM_PCOMCTRL_TRUE SIMDE_MM_PCOMCTRL_TRUE
  1631. #endif
  1632. #endif
  1633. SIMDE_FUNCTION_ATTRIBUTES
  1634. simde__m128i
  1635. simde_mm_com_epi8 (simde__m128i a, simde__m128i b, const int imm8)
  1636. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1637. switch (imm8) {
  1638. case SIMDE_MM_PCOMCTRL_LT:
  1639. return simde_mm_comlt_epi8(a, b);
  1640. case SIMDE_MM_PCOMCTRL_LE:
  1641. return simde_mm_comle_epi8(a, b);
  1642. case SIMDE_MM_PCOMCTRL_GT:
  1643. return simde_mm_comgt_epi8(a, b);
  1644. case SIMDE_MM_PCOMCTRL_GE:
  1645. return simde_mm_comge_epi8(a, b);
  1646. case SIMDE_MM_PCOMCTRL_EQ:
  1647. return simde_mm_comeq_epi8(a, b);
  1648. case SIMDE_MM_PCOMCTRL_NEQ:
  1649. return simde_mm_comneq_epi8(a, b);
  1650. case SIMDE_MM_PCOMCTRL_FALSE:
  1651. return simde_mm_comfalse_epi8(a, b);
  1652. case SIMDE_MM_PCOMCTRL_TRUE:
  1653. return simde_mm_comtrue_epi8(a, b);
  1654. default:
  1655. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1656. }
  1657. }
  1658. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1659. #define simde_mm_com_epi8(a, b, imm8) _mm_com_epi8((a), (b), (imm8))
  1660. #endif
  1661. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1662. #define _mm_com_epi8(a, b, imm8) simde_mm_com_epi8((a), (b), (imm8))
  1663. #endif
  1664. SIMDE_FUNCTION_ATTRIBUTES
  1665. simde__m128i
  1666. simde_mm_com_epi16 (simde__m128i a, simde__m128i b, const int imm8)
  1667. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1668. switch (imm8) {
  1669. case SIMDE_MM_PCOMCTRL_LT:
  1670. return simde_mm_comlt_epi16(a, b);
  1671. case SIMDE_MM_PCOMCTRL_LE:
  1672. return simde_mm_comle_epi16(a, b);
  1673. case SIMDE_MM_PCOMCTRL_GT:
  1674. return simde_mm_comgt_epi16(a, b);
  1675. case SIMDE_MM_PCOMCTRL_GE:
  1676. return simde_mm_comge_epi16(a, b);
  1677. case SIMDE_MM_PCOMCTRL_EQ:
  1678. return simde_mm_comeq_epi16(a, b);
  1679. case SIMDE_MM_PCOMCTRL_NEQ:
  1680. return simde_mm_comneq_epi16(a, b);
  1681. case SIMDE_MM_PCOMCTRL_FALSE:
  1682. return simde_mm_comfalse_epi16(a, b);
  1683. case SIMDE_MM_PCOMCTRL_TRUE:
  1684. return simde_mm_comtrue_epi16(a, b);
  1685. default:
  1686. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1687. }
  1688. }
  1689. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1690. #define simde_mm_com_epi16(a, b, imm8) _mm_com_epi16((a), (b), (imm8))
  1691. #endif
  1692. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1693. #define _mm_com_epi16(a, b, imm8) simde_mm_com_epi16((a), (b), (imm8))
  1694. #endif
  1695. SIMDE_FUNCTION_ATTRIBUTES
  1696. simde__m128i
  1697. simde_mm_com_epi32 (simde__m128i a, simde__m128i b, const int imm8)
  1698. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1699. switch (imm8) {
  1700. case SIMDE_MM_PCOMCTRL_LT:
  1701. return simde_mm_comlt_epi32(a, b);
  1702. case SIMDE_MM_PCOMCTRL_LE:
  1703. return simde_mm_comle_epi32(a, b);
  1704. case SIMDE_MM_PCOMCTRL_GT:
  1705. return simde_mm_comgt_epi32(a, b);
  1706. case SIMDE_MM_PCOMCTRL_GE:
  1707. return simde_mm_comge_epi32(a, b);
  1708. case SIMDE_MM_PCOMCTRL_EQ:
  1709. return simde_mm_comeq_epi32(a, b);
  1710. case SIMDE_MM_PCOMCTRL_NEQ:
  1711. return simde_mm_comneq_epi32(a, b);
  1712. case SIMDE_MM_PCOMCTRL_FALSE:
  1713. return simde_mm_comfalse_epi32(a, b);
  1714. case SIMDE_MM_PCOMCTRL_TRUE:
  1715. return simde_mm_comtrue_epi32(a, b);
  1716. default:
  1717. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1718. }
  1719. }
  1720. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1721. #define simde_mm_com_epi32(a, b, imm8) _mm_com_epi32((a), (b), (imm8))
  1722. #endif
  1723. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1724. #define _mm_com_epi32(a, b, imm8) simde_mm_com_epi32((a), (b), (imm8))
  1725. #endif
  1726. SIMDE_FUNCTION_ATTRIBUTES
  1727. simde__m128i
  1728. simde_mm_com_epi64 (simde__m128i a, simde__m128i b, const int imm8)
  1729. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1730. switch (imm8) {
  1731. case SIMDE_MM_PCOMCTRL_LT:
  1732. return simde_mm_comlt_epi64(a, b);
  1733. case SIMDE_MM_PCOMCTRL_LE:
  1734. return simde_mm_comle_epi64(a, b);
  1735. case SIMDE_MM_PCOMCTRL_GT:
  1736. return simde_mm_comgt_epi64(a, b);
  1737. case SIMDE_MM_PCOMCTRL_GE:
  1738. return simde_mm_comge_epi64(a, b);
  1739. case SIMDE_MM_PCOMCTRL_EQ:
  1740. return simde_mm_comeq_epi64(a, b);
  1741. case SIMDE_MM_PCOMCTRL_NEQ:
  1742. return simde_mm_comneq_epi64(a, b);
  1743. case SIMDE_MM_PCOMCTRL_FALSE:
  1744. return simde_mm_comfalse_epi64(a, b);
  1745. case SIMDE_MM_PCOMCTRL_TRUE:
  1746. return simde_mm_comtrue_epi64(a, b);
  1747. default:
  1748. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1749. }
  1750. }
  1751. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1752. #define simde_mm_com_epi64(a, b, imm8) _mm_com_epi64((a), (b), (imm8))
  1753. #endif
  1754. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1755. #define _mm_com_epi64(a, b, imm8) simde_mm_com_epi64((a), (b), (imm8))
  1756. #endif
  1757. SIMDE_FUNCTION_ATTRIBUTES
  1758. simde__m128i
  1759. simde_mm_com_epu8 (simde__m128i a, simde__m128i b, const int imm8)
  1760. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1761. switch (imm8) {
  1762. case SIMDE_MM_PCOMCTRL_LT:
  1763. return simde_mm_comlt_epu8(a, b);
  1764. case SIMDE_MM_PCOMCTRL_LE:
  1765. return simde_mm_comle_epu8(a, b);
  1766. case SIMDE_MM_PCOMCTRL_GT:
  1767. return simde_mm_comgt_epu8(a, b);
  1768. case SIMDE_MM_PCOMCTRL_GE:
  1769. return simde_mm_comge_epu8(a, b);
  1770. case SIMDE_MM_PCOMCTRL_EQ:
  1771. return simde_mm_comeq_epu8(a, b);
  1772. case SIMDE_MM_PCOMCTRL_NEQ:
  1773. return simde_mm_comneq_epu8(a, b);
  1774. case SIMDE_MM_PCOMCTRL_FALSE:
  1775. return simde_mm_comfalse_epu8(a, b);
  1776. case SIMDE_MM_PCOMCTRL_TRUE:
  1777. return simde_mm_comtrue_epu8(a, b);
  1778. default:
  1779. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1780. }
  1781. }
  1782. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1783. #define simde_mm_com_epu8(a, b, imm8) _mm_com_epu8((a), (b), (imm8))
  1784. #endif
  1785. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1786. #define _mm_com_epu8(a, b, imm8) simde_mm_com_epu8((a), (b), (imm8))
  1787. #endif
  1788. SIMDE_FUNCTION_ATTRIBUTES
  1789. simde__m128i
  1790. simde_mm_com_epu16 (simde__m128i a, simde__m128i b, const int imm8)
  1791. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1792. switch (imm8) {
  1793. case SIMDE_MM_PCOMCTRL_LT:
  1794. return simde_mm_comlt_epu16(a, b);
  1795. case SIMDE_MM_PCOMCTRL_LE:
  1796. return simde_mm_comle_epu16(a, b);
  1797. case SIMDE_MM_PCOMCTRL_GT:
  1798. return simde_mm_comgt_epu16(a, b);
  1799. case SIMDE_MM_PCOMCTRL_GE:
  1800. return simde_mm_comge_epu16(a, b);
  1801. case SIMDE_MM_PCOMCTRL_EQ:
  1802. return simde_mm_comeq_epu16(a, b);
  1803. case SIMDE_MM_PCOMCTRL_NEQ:
  1804. return simde_mm_comneq_epu16(a, b);
  1805. case SIMDE_MM_PCOMCTRL_FALSE:
  1806. return simde_mm_comfalse_epu16(a, b);
  1807. case SIMDE_MM_PCOMCTRL_TRUE:
  1808. return simde_mm_comtrue_epu16(a, b);
  1809. default:
  1810. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1811. }
  1812. }
  1813. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1814. #define simde_mm_com_epu16(a, b, imm8) _mm_com_epu16((a), (b), (imm8))
  1815. #endif
  1816. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1817. #define _mm_com_epu16(a, b, imm8) simde_mm_com_epu16((a), (b), (imm8))
  1818. #endif
  1819. SIMDE_FUNCTION_ATTRIBUTES
  1820. simde__m128i
  1821. simde_mm_com_epu32 (simde__m128i a, simde__m128i b, const int imm8)
  1822. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1823. switch (imm8) {
  1824. case SIMDE_MM_PCOMCTRL_LT:
  1825. return simde_mm_comlt_epu32(a, b);
  1826. case SIMDE_MM_PCOMCTRL_LE:
  1827. return simde_mm_comle_epu32(a, b);
  1828. case SIMDE_MM_PCOMCTRL_GT:
  1829. return simde_mm_comgt_epu32(a, b);
  1830. case SIMDE_MM_PCOMCTRL_GE:
  1831. return simde_mm_comge_epu32(a, b);
  1832. case SIMDE_MM_PCOMCTRL_EQ:
  1833. return simde_mm_comeq_epu32(a, b);
  1834. case SIMDE_MM_PCOMCTRL_NEQ:
  1835. return simde_mm_comneq_epu32(a, b);
  1836. case SIMDE_MM_PCOMCTRL_FALSE:
  1837. return simde_mm_comfalse_epu32(a, b);
  1838. case SIMDE_MM_PCOMCTRL_TRUE:
  1839. return simde_mm_comtrue_epu32(a, b);
  1840. default:
  1841. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1842. }
  1843. }
  1844. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1845. #define simde_mm_com_epu32(a, b, imm8) _mm_com_epu32((a), (b), (imm8))
  1846. #endif
  1847. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1848. #define _mm_com_epu32(a, b, imm8) simde_mm_com_epu32((a), (b), (imm8))
  1849. #endif
  1850. SIMDE_FUNCTION_ATTRIBUTES
  1851. simde__m128i
  1852. simde_mm_com_epu64 (simde__m128i a, simde__m128i b, const int imm8)
  1853. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7) {
  1854. switch (imm8) {
  1855. case SIMDE_MM_PCOMCTRL_LT:
  1856. return simde_mm_comlt_epu64(a, b);
  1857. case SIMDE_MM_PCOMCTRL_LE:
  1858. return simde_mm_comle_epu64(a, b);
  1859. case SIMDE_MM_PCOMCTRL_GT:
  1860. return simde_mm_comgt_epu64(a, b);
  1861. case SIMDE_MM_PCOMCTRL_GE:
  1862. return simde_mm_comge_epu64(a, b);
  1863. case SIMDE_MM_PCOMCTRL_EQ:
  1864. return simde_mm_comeq_epu64(a, b);
  1865. case SIMDE_MM_PCOMCTRL_NEQ:
  1866. return simde_mm_comneq_epu64(a, b);
  1867. case SIMDE_MM_PCOMCTRL_FALSE:
  1868. return simde_mm_comfalse_epu64(a, b);
  1869. case SIMDE_MM_PCOMCTRL_TRUE:
  1870. return simde_mm_comtrue_epu64(a, b);
  1871. default:
  1872. HEDLEY_UNREACHABLE_RETURN(simde_mm_setzero_si128());
  1873. }
  1874. }
  1875. #if defined(SIMDE_X86_XOP_NATIVE) && defined(SIMDE_X86_XOP_HAVE_COM_)
  1876. #define simde_mm_com_epu64(a, b, imm8) _mm_com_epu64((a), (b), (imm8))
  1877. #endif
  1878. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1879. #define _mm_com_epu64(a, b, imm8) simde_mm_com_epu64((a), (b), (imm8))
  1880. #endif
  1881. SIMDE_FUNCTION_ATTRIBUTES
  1882. simde__m128
  1883. simde_mm_frcz_ps (simde__m128 a) {
  1884. #if defined(SIMDE_X86_XOP_NATIVE)
  1885. return _mm_frcz_ps(a);
  1886. #else
  1887. simde__m128_private
  1888. r_,
  1889. a_ = simde__m128_to_private(a);
  1890. SIMDE_VECTORIZE
  1891. for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
  1892. #if defined(simde_math_modff)
  1893. simde_float32 integral;
  1894. r_.f32[i] = simde_math_modff(a_.f32[i], &integral);
  1895. #else
  1896. r_.f32[i] = (a_.f32[i] / 1.0f);
  1897. #endif
  1898. }
  1899. return simde__m128_from_private(r_);
  1900. #endif
  1901. }
  1902. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1903. #define _mm_frcz_ps(a) simde_mm_frcz_ps((a))
  1904. #endif
  1905. SIMDE_FUNCTION_ATTRIBUTES
  1906. simde__m128d
  1907. simde_mm_frcz_pd (simde__m128d a) {
  1908. #if defined(SIMDE_X86_XOP_NATIVE)
  1909. return _mm_frcz_pd(a);
  1910. #else
  1911. simde__m128d_private
  1912. r_,
  1913. a_ = simde__m128d_to_private(a);
  1914. SIMDE_VECTORIZE
  1915. for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
  1916. #if defined(simde_math_modf)
  1917. simde_float64 integral;
  1918. r_.f64[i] = simde_math_modf(a_.f64[i], &integral);
  1919. #else
  1920. r_.f64[i] = (a_.f64[i] / 1.0f);
  1921. #endif
  1922. }
  1923. return simde__m128d_from_private(r_);
  1924. #endif
  1925. }
  1926. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1927. #define _mm_frcz_ps(a) simde_mm_frcz_ps((a))
  1928. #endif
  1929. SIMDE_FUNCTION_ATTRIBUTES
  1930. simde__m128
  1931. simde_mm_frcz_ss (simde__m128 a, simde__m128 b) {
  1932. #if defined(SIMDE_X86_XOP_NATIVE) && !defined(SIMDE_BUG_CLANG_48673)
  1933. return _mm_frcz_ss(a, b);
  1934. #else
  1935. simde__m128_private
  1936. a_ = simde__m128_to_private(a),
  1937. b_ = simde__m128_to_private(b);
  1938. #if defined(simde_math_modff)
  1939. simde_float32 integral;
  1940. a_.f32[0] = simde_math_modff(b_.f32[0], &integral);
  1941. #else
  1942. a_.f32[0] = (b_.f32[0] / 1.0f);
  1943. #endif
  1944. return simde__m128_from_private(a_);
  1945. #endif
  1946. }
  1947. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1948. #define _mm_frcz_ss(a, b) simde_mm_frcz_ss((a), (b))
  1949. #endif
  1950. SIMDE_FUNCTION_ATTRIBUTES
  1951. simde__m128d
  1952. simde_mm_frcz_sd (simde__m128d a, simde__m128d b) {
  1953. #if defined(SIMDE_X86_XOP_NATIVE) && !defined(SIMDE_BUG_CLANG_48673)
  1954. return _mm_frcz_sd(a, b);
  1955. #else
  1956. simde__m128d_private
  1957. a_ = simde__m128d_to_private(a),
  1958. b_ = simde__m128d_to_private(b);
  1959. #if defined(simde_math_modf)
  1960. simde_float64 integral;
  1961. a_.f64[0] = simde_math_modf(b_.f64[0], &integral);
  1962. #else
  1963. a_.f64[0] = (b_.f64[0] / 1.0f);
  1964. #endif
  1965. return simde__m128d_from_private(a_);
  1966. #endif
  1967. }
  1968. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1969. #define _mm_frcz_sd(a, b) simde_mm_frcz_sd((a), (b))
  1970. #endif
  1971. SIMDE_FUNCTION_ATTRIBUTES
  1972. simde__m256
  1973. simde_mm256_frcz_ps (simde__m256 a) {
  1974. #if defined(SIMDE_X86_XOP_NATIVE)
  1975. return _mm256_frcz_ps(a);
  1976. #else
  1977. simde__m256_private
  1978. r_,
  1979. a_ = simde__m256_to_private(a);
  1980. #if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
  1981. for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) {
  1982. r_.m128[i] = simde_mm_frcz_ps(a_.m128[i]);
  1983. }
  1984. #else
  1985. SIMDE_VECTORIZE
  1986. for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
  1987. #if defined(simde_math_modff)
  1988. simde_float32 integral;
  1989. r_.f32[i] = simde_math_modff(a_.f32[i], &integral);
  1990. #else
  1991. r_.f32[i] = (a_.f32[i] / 1.0f);
  1992. #endif
  1993. }
  1994. #endif
  1995. return simde__m256_from_private(r_);
  1996. #endif
  1997. }
  1998. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  1999. #define _mm256_frcz_ps(a) simde_mm256_frcz_ps((a))
  2000. #endif
  2001. SIMDE_FUNCTION_ATTRIBUTES
  2002. simde__m256d
  2003. simde_mm256_frcz_pd (simde__m256d a) {
  2004. #if defined(SIMDE_X86_XOP_NATIVE)
  2005. return _mm256_frcz_pd(a);
  2006. #else
  2007. simde__m256d_private
  2008. r_,
  2009. a_ = simde__m256d_to_private(a);
  2010. #if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
  2011. for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) {
  2012. r_.m128d[i] = simde_mm_frcz_pd(a_.m128d[i]);
  2013. }
  2014. #else
  2015. SIMDE_VECTORIZE
  2016. for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
  2017. #if defined(simde_math_modf)
  2018. simde_float64 integral;
  2019. r_.f64[i] = simde_math_modf(a_.f64[i], &integral);
  2020. #else
  2021. r_.f64[i] = (a_.f64[i] / 1.0f);
  2022. #endif
  2023. }
  2024. #endif
  2025. return simde__m256d_from_private(r_);
  2026. #endif
  2027. }
  2028. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2029. #define _mm256_frcz_ps(a) simde_mm256_frcz_ps((a))
  2030. #endif
  2031. SIMDE_FUNCTION_ATTRIBUTES
  2032. simde__m128i
  2033. simde_mm_haddw_epi8 (simde__m128i a) {
  2034. #if defined(SIMDE_X86_XOP_NATIVE)
  2035. return _mm_haddw_epi8(a);
  2036. #elif defined(SIMDE_X86_SSSE3_NATIVE)
  2037. return _mm_maddubs_epi16(_mm_set1_epi8(INT8_C(1)), a);
  2038. #else
  2039. simde__m128i_private
  2040. r_,
  2041. a_ = simde__m128i_to_private(a);
  2042. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2043. r_.neon_i16 = vpaddlq_s8(a_.neon_i8);
  2044. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2045. r_.wasm_v128 = wasm_i16x8_extadd_pairwise_i8x16(a_.wasm_v128);
  2046. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2047. SIMDE_POWER_ALTIVEC_VECTOR(signed char) one = vec_splat_s8(1);
  2048. r_.altivec_i16 =
  2049. vec_add(
  2050. vec_mule(a_.altivec_i8, one),
  2051. vec_mulo(a_.altivec_i8, one)
  2052. );
  2053. #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  2054. r_.i16 =
  2055. ((a_.i16 << 8) >> 8) +
  2056. ((a_.i16 >> 8) );
  2057. #else
  2058. SIMDE_VECTORIZE
  2059. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  2060. r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2)]) + HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2) + 1]);
  2061. }
  2062. #endif
  2063. return simde__m128i_from_private(r_);
  2064. #endif
  2065. }
  2066. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2067. #define _mm_haddw_epi8(a) simde_mm_haddw_epi8((a))
  2068. #endif
  2069. SIMDE_FUNCTION_ATTRIBUTES
  2070. simde__m128i
  2071. simde_mm_haddw_epu8 (simde__m128i a) {
  2072. #if defined(SIMDE_X86_XOP_NATIVE)
  2073. return _mm_haddw_epu8(a);
  2074. #elif defined(SIMDE_X86_SSSE3_NATIVE)
  2075. return _mm_maddubs_epi16(a, _mm_set1_epi8(INT8_C(1)));
  2076. #else
  2077. simde__m128i_private
  2078. r_,
  2079. a_ = simde__m128i_to_private(a);
  2080. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2081. r_.neon_u16 = vpaddlq_u8(a_.neon_u8);
  2082. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2083. r_.wasm_v128 = wasm_u16x8_extadd_pairwise_u8x16(a_.wasm_v128);
  2084. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2085. SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) one = vec_splat_u8(1);
  2086. r_.altivec_u16 =
  2087. vec_add(
  2088. vec_mule(a_.altivec_u8, one),
  2089. vec_mulo(a_.altivec_u8, one)
  2090. );
  2091. #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  2092. r_.u16 =
  2093. ((a_.u16 << 8) >> 8) +
  2094. ((a_.u16 >> 8) );
  2095. #else
  2096. SIMDE_VECTORIZE
  2097. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  2098. r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u8[(i * 2)]) + HEDLEY_STATIC_CAST(uint16_t, a_.u8[(i * 2) + 1]);
  2099. }
  2100. #endif
  2101. return simde__m128i_from_private(r_);
  2102. #endif
  2103. }
  2104. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2105. #define _mm_haddw_epu8(a) simde_mm_haddw_epu8((a))
  2106. #endif
  2107. SIMDE_FUNCTION_ATTRIBUTES
  2108. simde__m128i
  2109. simde_mm_haddd_epi8 (simde__m128i a) {
  2110. #if defined(SIMDE_X86_XOP_NATIVE)
  2111. return _mm_haddd_epi8(a);
  2112. #else
  2113. simde__m128i_private
  2114. r_,
  2115. a_ = simde__m128i_to_private(a);
  2116. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2117. r_.neon_i32 = vpaddlq_s16(vpaddlq_s8(a_.neon_i8));
  2118. #else
  2119. SIMDE_VECTORIZE
  2120. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2121. r_.i32[i] =
  2122. HEDLEY_STATIC_CAST(int32_t, a_.i8[(i * 4) ]) + HEDLEY_STATIC_CAST(int32_t, a_.i8[(i * 4) + 1]) +
  2123. HEDLEY_STATIC_CAST(int32_t, a_.i8[(i * 4) + 2]) + HEDLEY_STATIC_CAST(int32_t, a_.i8[(i * 4) + 3]);
  2124. }
  2125. #endif
  2126. return simde__m128i_from_private(r_);
  2127. #endif
  2128. }
  2129. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2130. #define _mm_haddd_epi8(a) simde_mm_haddd_epi8((a))
  2131. #endif
  2132. SIMDE_FUNCTION_ATTRIBUTES
  2133. simde__m128i
  2134. simde_mm_haddd_epi16 (simde__m128i a) {
  2135. #if defined(SIMDE_X86_XOP_NATIVE)
  2136. return _mm_haddd_epi16(a);
  2137. #elif defined(SIMDE_X86_SSE2_NATIVE)
  2138. return _mm_madd_epi16(a, _mm_set1_epi16(INT8_C(1)));
  2139. #else
  2140. simde__m128i_private
  2141. r_,
  2142. a_ = simde__m128i_to_private(a);
  2143. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2144. r_.neon_i32 = vpaddlq_s16(a_.neon_i16);
  2145. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2146. r_.wasm_v128 = wasm_i32x4_extadd_pairwise_i16x8(a_.wasm_v128);
  2147. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2148. SIMDE_POWER_ALTIVEC_VECTOR(signed short) one = vec_splat_s16(1);
  2149. r_.altivec_i32 =
  2150. vec_add(
  2151. vec_mule(a_.altivec_i16, one),
  2152. vec_mulo(a_.altivec_i16, one)
  2153. );
  2154. #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  2155. r_.i32 =
  2156. ((a_.i32 << 16) >> 16) +
  2157. ((a_.i32 >> 16) );
  2158. #else
  2159. SIMDE_VECTORIZE
  2160. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2161. r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2)]) + HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) + 1]);
  2162. }
  2163. #endif
  2164. return simde__m128i_from_private(r_);
  2165. #endif
  2166. }
  2167. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2168. #define _mm_haddd_epi8(a) simde_mm_haddd_epi8((a))
  2169. #endif
  2170. SIMDE_FUNCTION_ATTRIBUTES
  2171. simde__m128i
  2172. simde_mm_haddd_epu8 (simde__m128i a) {
  2173. #if defined(SIMDE_X86_XOP_NATIVE)
  2174. return _mm_haddd_epu8(a);
  2175. #else
  2176. simde__m128i_private
  2177. r_,
  2178. a_ = simde__m128i_to_private(a);
  2179. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2180. r_.neon_u32 = vpaddlq_u16(vpaddlq_u8(a_.neon_u8));
  2181. #else
  2182. SIMDE_VECTORIZE
  2183. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  2184. r_.u32[i] =
  2185. HEDLEY_STATIC_CAST(uint32_t, a_.u8[(i * 4) ]) + HEDLEY_STATIC_CAST(uint32_t, a_.u8[(i * 4) + 1]) +
  2186. HEDLEY_STATIC_CAST(uint32_t, a_.u8[(i * 4) + 2]) + HEDLEY_STATIC_CAST(uint32_t, a_.u8[(i * 4) + 3]);
  2187. }
  2188. #endif
  2189. return simde__m128i_from_private(r_);
  2190. #endif
  2191. }
  2192. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2193. #define _mm_haddd_epu8(a) simde_mm_haddd_epu8((a))
  2194. #endif
  2195. SIMDE_FUNCTION_ATTRIBUTES
  2196. simde__m128i
  2197. simde_mm_haddd_epu16 (simde__m128i a) {
  2198. #if defined(SIMDE_X86_XOP_NATIVE)
  2199. return _mm_haddd_epu16(a);
  2200. #elif defined(SIMDE_X86_SSE2_NATIVE)
  2201. return
  2202. _mm_add_epi32(
  2203. _mm_srli_epi32(a, 16),
  2204. _mm_and_si128(a, _mm_set1_epi32(INT32_C(0x0000ffff)))
  2205. );
  2206. #else
  2207. simde__m128i_private
  2208. r_,
  2209. a_ = simde__m128i_to_private(a);
  2210. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2211. r_.neon_u32 = vpaddlq_u16(a_.neon_u16);
  2212. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2213. r_.wasm_v128 = wasm_u32x4_extadd_pairwise_u16x8(a_.wasm_v128);
  2214. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2215. SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) one = vec_splat_u16(1);
  2216. r_.altivec_u32 =
  2217. vec_add(
  2218. vec_mule(a_.altivec_u16, one),
  2219. vec_mulo(a_.altivec_u16, one)
  2220. );
  2221. #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  2222. r_.u32 =
  2223. ((a_.u32 << 16) >> 16) +
  2224. ((a_.u32 >> 16) );
  2225. #else
  2226. SIMDE_VECTORIZE
  2227. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  2228. r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2)]) + HEDLEY_STATIC_CAST(uint32_t, a_.u16[(i * 2) + 1]);
  2229. }
  2230. #endif
  2231. return simde__m128i_from_private(r_);
  2232. #endif
  2233. }
  2234. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2235. #define _mm_haddd_epu8(a) simde_mm_haddd_epu8((a))
  2236. #endif
  2237. SIMDE_FUNCTION_ATTRIBUTES
  2238. simde__m128i
  2239. simde_mm_haddq_epi8 (simde__m128i a) {
  2240. #if defined(SIMDE_X86_XOP_NATIVE)
  2241. return _mm_haddq_epi8(a);
  2242. #else
  2243. simde__m128i_private
  2244. r_,
  2245. a_ = simde__m128i_to_private(a);
  2246. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2247. r_.neon_i64 = vpaddlq_s32(vpaddlq_s16(vpaddlq_s8(a_.neon_i8)));
  2248. #else
  2249. SIMDE_VECTORIZE
  2250. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2251. r_.i64[i] =
  2252. HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) ]) + HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) + 1]) +
  2253. HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) + 2]) + HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) + 3]) +
  2254. HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) + 4]) + HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) + 5]) +
  2255. HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) + 6]) + HEDLEY_STATIC_CAST(int64_t, a_.i8[(i * 8) + 7]);
  2256. }
  2257. #endif
  2258. return simde__m128i_from_private(r_);
  2259. #endif
  2260. }
  2261. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2262. #define _mm_haddq_epi8(a) simde_mm_haddq_epi8((a))
  2263. #endif
  2264. SIMDE_FUNCTION_ATTRIBUTES
  2265. simde__m128i
  2266. simde_mm_haddq_epi16 (simde__m128i a) {
  2267. #if defined(SIMDE_X86_XOP_NATIVE)
  2268. return _mm_haddq_epi16(a);
  2269. #else
  2270. simde__m128i_private
  2271. r_,
  2272. a_ = simde__m128i_to_private(a);
  2273. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2274. r_.neon_i64 = vpaddlq_s32(vpaddlq_s16(a_.neon_i16));
  2275. #else
  2276. SIMDE_VECTORIZE
  2277. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2278. r_.i64[i] =
  2279. HEDLEY_STATIC_CAST(int64_t, a_.i16[(i * 4) ]) + HEDLEY_STATIC_CAST(int64_t, a_.i16[(i * 4) + 1]) +
  2280. HEDLEY_STATIC_CAST(int64_t, a_.i16[(i * 4) + 2]) + HEDLEY_STATIC_CAST(int64_t, a_.i16[(i * 4) + 3]);
  2281. }
  2282. #endif
  2283. return simde__m128i_from_private(r_);
  2284. #endif
  2285. }
  2286. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2287. #define _mm_haddq_epi16(a) simde_mm_haddq_epi16((a))
  2288. #endif
  2289. SIMDE_FUNCTION_ATTRIBUTES
  2290. simde__m128i
  2291. simde_mm_haddq_epi32 (simde__m128i a) {
  2292. #if defined(SIMDE_X86_XOP_NATIVE)
  2293. return _mm_haddq_epi32(a);
  2294. #else
  2295. simde__m128i_private
  2296. r_,
  2297. a_ = simde__m128i_to_private(a);
  2298. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2299. r_.neon_i64 = vpaddlq_s32(a_.neon_i32);
  2300. #else
  2301. SIMDE_VECTORIZE
  2302. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2303. r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) ]) + HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) + 1]);
  2304. }
  2305. #endif
  2306. return simde__m128i_from_private(r_);
  2307. #endif
  2308. }
  2309. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2310. #define _mm_haddq_epi32(a) simde_mm_haddq_epi32((a))
  2311. #endif
  2312. SIMDE_FUNCTION_ATTRIBUTES
  2313. simde__m128i
  2314. simde_mm_haddq_epu8 (simde__m128i a) {
  2315. #if defined(SIMDE_X86_XOP_NATIVE)
  2316. return _mm_haddq_epu8(a);
  2317. #else
  2318. simde__m128i_private
  2319. r_,
  2320. a_ = simde__m128i_to_private(a);
  2321. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2322. r_.neon_u64 = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(a_.neon_u8)));
  2323. #else
  2324. SIMDE_VECTORIZE
  2325. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2326. r_.u64[i] =
  2327. HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) ]) + HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) + 1]) +
  2328. HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) + 2]) + HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) + 3]) +
  2329. HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) + 4]) + HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) + 5]) +
  2330. HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) + 6]) + HEDLEY_STATIC_CAST(uint64_t, a_.u8[(i * 8) + 7]);
  2331. }
  2332. #endif
  2333. return simde__m128i_from_private(r_);
  2334. #endif
  2335. }
  2336. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2337. #define _mm_haddq_epu8(a) simde_mm_haddq_epu8((a))
  2338. #endif
  2339. SIMDE_FUNCTION_ATTRIBUTES
  2340. simde__m128i
  2341. simde_mm_haddq_epu16 (simde__m128i a) {
  2342. #if defined(SIMDE_X86_XOP_NATIVE)
  2343. return _mm_haddq_epu16(a);
  2344. #else
  2345. simde__m128i_private
  2346. r_,
  2347. a_ = simde__m128i_to_private(a);
  2348. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2349. r_.neon_u64 = vpaddlq_u32(vpaddlq_u16(a_.neon_u16));
  2350. #else
  2351. SIMDE_VECTORIZE
  2352. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2353. r_.u64[i] =
  2354. HEDLEY_STATIC_CAST(uint64_t, a_.u16[(i * 4) ]) + HEDLEY_STATIC_CAST(uint64_t, a_.u16[(i * 4) + 1]) +
  2355. HEDLEY_STATIC_CAST(uint64_t, a_.u16[(i * 4) + 2]) + HEDLEY_STATIC_CAST(uint64_t, a_.u16[(i * 4) + 3]);
  2356. }
  2357. #endif
  2358. return simde__m128i_from_private(r_);
  2359. #endif
  2360. }
  2361. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2362. #define _mm_haddq_epu16(a) simde_mm_haddq_epu16((a))
  2363. #endif
  2364. SIMDE_FUNCTION_ATTRIBUTES
  2365. simde__m128i
  2366. simde_mm_haddq_epu32 (simde__m128i a) {
  2367. #if defined(SIMDE_X86_XOP_NATIVE)
  2368. return _mm_haddq_epu32(a);
  2369. #else
  2370. simde__m128i_private
  2371. r_,
  2372. a_ = simde__m128i_to_private(a);
  2373. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2374. r_.neon_u64 = vpaddlq_u32(a_.neon_u32);
  2375. #else
  2376. SIMDE_VECTORIZE
  2377. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2378. r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[(i * 2) ]) + HEDLEY_STATIC_CAST(uint64_t, a_.u32[(i * 2) + 1]);
  2379. }
  2380. #endif
  2381. return simde__m128i_from_private(r_);
  2382. #endif
  2383. }
  2384. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2385. #define _mm_haddq_epu32(a) simde_mm_haddq_epu32((a))
  2386. #endif
  2387. SIMDE_FUNCTION_ATTRIBUTES
  2388. simde__m128i
  2389. simde_mm_hsubw_epi8 (simde__m128i a) {
  2390. #if defined(SIMDE_X86_XOP_NATIVE)
  2391. return _mm_hsubw_epi8(a);
  2392. #else
  2393. simde__m128i_private
  2394. r_,
  2395. a_ = simde__m128i_to_private(a);
  2396. SIMDE_VECTORIZE
  2397. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  2398. r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i8[i * 2]) - HEDLEY_STATIC_CAST(int16_t, a_.i8[(i * 2) + 1]);
  2399. }
  2400. return simde__m128i_from_private(r_);
  2401. #endif
  2402. }
  2403. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2404. #define _mm_hsubw_epi8(a) simde_mm_hsubw_epi8((a))
  2405. #endif
  2406. SIMDE_FUNCTION_ATTRIBUTES
  2407. simde__m128i
  2408. simde_mm_hsubd_epi16 (simde__m128i a) {
  2409. #if defined(SIMDE_X86_XOP_NATIVE)
  2410. return _mm_hsubd_epi16(a);
  2411. #else
  2412. simde__m128i_private
  2413. r_,
  2414. a_ = simde__m128i_to_private(a);
  2415. SIMDE_VECTORIZE
  2416. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2417. r_.i32[i] =
  2418. HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) ]) - HEDLEY_STATIC_CAST(int32_t, a_.i16[(i * 2) + 1]);
  2419. }
  2420. return simde__m128i_from_private(r_);
  2421. #endif
  2422. }
  2423. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2424. #define _mm_hsubd_epi8(a) simde_mm_hsubd_epi8((a))
  2425. #endif
  2426. SIMDE_FUNCTION_ATTRIBUTES
  2427. simde__m128i
  2428. simde_mm_hsubq_epi32 (simde__m128i a) {
  2429. #if defined(SIMDE_X86_XOP_NATIVE)
  2430. return _mm_hsubq_epi32(a);
  2431. #else
  2432. simde__m128i_private
  2433. r_,
  2434. a_ = simde__m128i_to_private(a);
  2435. SIMDE_VECTORIZE
  2436. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2437. r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) ]) - HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) + 1]);
  2438. }
  2439. return simde__m128i_from_private(r_);
  2440. #endif
  2441. }
  2442. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2443. #define _mm_hsubq_epi32(a) simde_mm_hsubq_epi32((a))
  2444. #endif
  2445. SIMDE_FUNCTION_ATTRIBUTES
  2446. simde__m128i
  2447. simde_mm_macc_epi16 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2448. #if defined(SIMDE_X86_XOP_NATIVE)
  2449. return _mm_macc_epi16(a, b, c);
  2450. #else
  2451. simde__m128i_private
  2452. r_,
  2453. a_ = simde__m128i_to_private(a),
  2454. b_ = simde__m128i_to_private(b),
  2455. c_ = simde__m128i_to_private(c);
  2456. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2457. r_.neon_i16 = vmlaq_s16(c_.neon_i16, a_.neon_i16, b_.neon_i16);
  2458. #else
  2459. SIMDE_VECTORIZE
  2460. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  2461. r_.i16[i] = (a_.i16[i] * b_.i16[i]) + c_.i16[i];
  2462. }
  2463. #endif
  2464. return simde__m128i_from_private(r_);
  2465. #endif
  2466. }
  2467. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2468. #define _mm_macc_epi16(a, b, c) simde_mm_macc_epi16((a), (b), (c))
  2469. #endif
  2470. SIMDE_FUNCTION_ATTRIBUTES
  2471. simde__m128i
  2472. simde_mm_macc_epi32 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2473. #if defined(SIMDE_X86_XOP_NATIVE)
  2474. return _mm_macc_epi32(a, b, c);
  2475. #else
  2476. simde__m128i_private
  2477. r_,
  2478. a_ = simde__m128i_to_private(a),
  2479. b_ = simde__m128i_to_private(b),
  2480. c_ = simde__m128i_to_private(c);
  2481. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2482. r_.neon_i32 = vmlaq_s32(c_.neon_i32, a_.neon_i32, b_.neon_i32);
  2483. #else
  2484. SIMDE_VECTORIZE
  2485. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2486. r_.i32[i] = (a_.i32[i] * b_.i32[i]) + c_.i32[i];
  2487. }
  2488. #endif
  2489. return simde__m128i_from_private(r_);
  2490. #endif
  2491. }
  2492. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2493. #define _mm_macc_epi32(a, b, c) simde_mm_macc_epi32((a), (b), (c))
  2494. #endif
  2495. SIMDE_FUNCTION_ATTRIBUTES
  2496. simde__m128i
  2497. simde_mm_maccd_epi16 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2498. #if defined(SIMDE_X86_XOP_NATIVE)
  2499. return _mm_maccd_epi16(a, b, c);
  2500. #else
  2501. simde__m128i_private
  2502. r_,
  2503. a_ = simde__m128i_to_private(a),
  2504. b_ = simde__m128i_to_private(b),
  2505. c_ = simde__m128i_to_private(c);
  2506. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2507. int16x8_t even = vuzp1q_s16(a_.neon_i16, b_.neon_i16);
  2508. int32x4_t a_even = vmovl_s16(vget_low_s16(even));
  2509. int32x4_t b_even = vmovl_high_s16(even);
  2510. r_.neon_i32 = vmlaq_s32(c_.neon_i32, a_even, b_even);
  2511. #else
  2512. SIMDE_VECTORIZE
  2513. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2514. r_.i32[i] = (HEDLEY_STATIC_CAST(int32_t, a_.i16[i * 2]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i * 2])) + c_.i32[i];
  2515. }
  2516. #endif
  2517. return simde__m128i_from_private(r_);
  2518. #endif
  2519. }
  2520. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2521. #define _mm_maccd_epi16(a, b, c) simde_mm_maccd_epi16((a), (b), (c))
  2522. #endif
  2523. SIMDE_FUNCTION_ATTRIBUTES
  2524. simde__m128i
  2525. simde_mm_macclo_epi32 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2526. #if defined(SIMDE_X86_XOP_NATIVE)
  2527. return _mm_macclo_epi32(a, b, c);
  2528. #else
  2529. simde__m128i_private
  2530. r_,
  2531. a_ = simde__m128i_to_private(a),
  2532. b_ = simde__m128i_to_private(b),
  2533. c_ = simde__m128i_to_private(c);
  2534. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2535. int32x4_t even = vuzp1q_s32(a_.neon_i32, b_.neon_i32);
  2536. r_.neon_i64 = vaddq_s64(vmull_s32(vget_low_s32(even), vget_high_s32(even)), c_.neon_i64);
  2537. #else
  2538. SIMDE_VECTORIZE
  2539. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2540. r_.i64[i] = (HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) + 0]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[(i * 2) + 0])) + c_.i64[i];
  2541. }
  2542. #endif
  2543. return simde__m128i_from_private(r_);
  2544. #endif
  2545. }
  2546. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2547. #define _mm_macclo_epi16(a, b, c) simde_mm_macclo_epi16((a), (b), (c))
  2548. #endif
  2549. SIMDE_FUNCTION_ATTRIBUTES
  2550. simde__m128i
  2551. simde_mm_macchi_epi32 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2552. #if defined(SIMDE_X86_XOP_NATIVE)
  2553. return _mm_macchi_epi32(a, b, c);
  2554. #else
  2555. simde__m128i_private
  2556. r_,
  2557. a_ = simde__m128i_to_private(a),
  2558. b_ = simde__m128i_to_private(b),
  2559. c_ = simde__m128i_to_private(c);
  2560. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2561. int32x4_t even = vuzp2q_s32(a_.neon_i32, b_.neon_i32);
  2562. r_.neon_i64 = vaddq_s64(vmull_s32(vget_low_s32(even), vget_high_s32(even)), c_.neon_i64);
  2563. #else
  2564. SIMDE_VECTORIZE
  2565. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2566. r_.i64[i] = (HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) + 1]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[(i * 2) + 1])) + c_.i64[i];
  2567. }
  2568. #endif
  2569. return simde__m128i_from_private(r_);
  2570. #endif
  2571. }
  2572. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2573. #define _mm_macchi_epi16(a, b, c) simde_mm_macchi_epi16((a), (b), (c))
  2574. #endif
  2575. SIMDE_FUNCTION_ATTRIBUTES
  2576. simde__m128i
  2577. simde_mm_maccs_epi16 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2578. #if defined(SIMDE_X86_XOP_NATIVE)
  2579. return _mm_maccs_epi16(a, b, c);
  2580. #else
  2581. simde__m128i_private
  2582. r_,
  2583. a_ = simde__m128i_to_private(a),
  2584. b_ = simde__m128i_to_private(b),
  2585. c_ = simde__m128i_to_private(c);
  2586. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2587. int32x4_t c_lo = vmovl_s16(vget_low_s16(c_.neon_i16));
  2588. int32x4_t c_hi = vmovl_high_s16(c_.neon_i16);
  2589. int32x4_t lo = vmlal_s16(c_lo, vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16));
  2590. int32x4_t hi = vmlal_high_s16(c_hi, a_.neon_i16, b_.neon_i16);
  2591. r_.neon_i16 = vcombine_s16(vqmovn_s32(lo), vqmovn_s32(hi));
  2592. #else
  2593. SIMDE_VECTORIZE
  2594. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  2595. int32_t tmp = HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i]);
  2596. tmp += c_.i16[i];
  2597. if (tmp > INT16_MAX)
  2598. r_.i16[i] = INT16_MAX;
  2599. else if (tmp < INT16_MIN)
  2600. r_.i16[i] = INT16_MIN;
  2601. else
  2602. r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, tmp);
  2603. }
  2604. #endif
  2605. return simde__m128i_from_private(r_);
  2606. #endif
  2607. }
  2608. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2609. #define _mm_maccs_epi16(a, b, c) simde_mm_maccs_epi16((a), (b), (c))
  2610. #endif
  2611. SIMDE_FUNCTION_ATTRIBUTES
  2612. simde__m128i
  2613. simde_mm_maccs_epi32 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2614. #if defined(SIMDE_X86_XOP_NATIVE)
  2615. return _mm_maccs_epi32(a, b, c);
  2616. #else
  2617. simde__m128i_private
  2618. r_,
  2619. a_ = simde__m128i_to_private(a),
  2620. b_ = simde__m128i_to_private(b),
  2621. c_ = simde__m128i_to_private(c);
  2622. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2623. int64x2_t c_lo = vmovl_s32(vget_low_s32(c_.neon_i32));
  2624. int64x2_t c_hi = vmovl_high_s32(c_.neon_i32);
  2625. int64x2_t lo = vmlal_s32(c_lo, vget_low_s32(a_.neon_i32), vget_low_s32(b_.neon_i32));
  2626. int64x2_t hi = vmlal_high_s32(c_hi, a_.neon_i32, b_.neon_i32);
  2627. r_.neon_i32 = vcombine_s32(vqmovn_s64(lo), vqmovn_s64(hi));
  2628. #else
  2629. SIMDE_VECTORIZE
  2630. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2631. int64_t tmp = HEDLEY_STATIC_CAST(int64_t, a_.i32[i]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[i]);
  2632. tmp += HEDLEY_STATIC_CAST(int64_t, c_.i32[i]);
  2633. if (tmp > INT32_MAX)
  2634. r_.i32[i] = INT32_MAX;
  2635. else if (tmp < INT32_MIN)
  2636. r_.i32[i] = INT32_MIN;
  2637. else
  2638. r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, tmp);
  2639. }
  2640. #endif
  2641. return simde__m128i_from_private(r_);
  2642. #endif
  2643. }
  2644. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2645. #define _mm_maccs_epi32(a, b, c) simde_mm_maccs_epi32((a), (b), (c))
  2646. #endif
  2647. SIMDE_FUNCTION_ATTRIBUTES
  2648. simde__m128i
  2649. simde_mm_maccsd_epi16 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2650. #if defined(SIMDE_X86_XOP_NATIVE)
  2651. return _mm_maccsd_epi16(a, b, c);
  2652. #else
  2653. simde__m128i_private
  2654. r_,
  2655. a_ = simde__m128i_to_private(a),
  2656. b_ = simde__m128i_to_private(b),
  2657. c_ = simde__m128i_to_private(c);
  2658. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2659. int16x8_t even = vuzp1q_s16(a_.neon_i16, b_.neon_i16);
  2660. r_.neon_i32 = vqaddq_s32(vmull_s16(vget_low_s16(even), vget_high_s16(even)), c_.neon_i32);
  2661. #else
  2662. SIMDE_VECTORIZE
  2663. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2664. int32_t prod = HEDLEY_STATIC_CAST(int32_t, a_.i16[i * 2]) * HEDLEY_STATIC_CAST(int32_t, b_.i16[i * 2]);
  2665. r_.i32[i] = simde_math_adds_i32(prod, c_.i32[i]);
  2666. }
  2667. #endif
  2668. return simde__m128i_from_private(r_);
  2669. #endif
  2670. }
  2671. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2672. #define _mm_maccsd_epi16(a, b, c) simde_mm_maccsd_epi16((a), (b), (c))
  2673. #endif
  2674. SIMDE_FUNCTION_ATTRIBUTES
  2675. simde__m128i
  2676. simde_mm_maccslo_epi32 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2677. #if defined(SIMDE_X86_XOP_NATIVE)
  2678. return _mm_maccslo_epi32(a, b, c);
  2679. #else
  2680. simde__m128i_private
  2681. r_,
  2682. a_ = simde__m128i_to_private(a),
  2683. b_ = simde__m128i_to_private(b),
  2684. c_ = simde__m128i_to_private(c);
  2685. SIMDE_VECTORIZE
  2686. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2687. int64_t tmp = HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) + 0]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[(i * 2) + 0]);
  2688. r_.i64[i] = simde_math_adds_i64(tmp, c_.i64[i]);
  2689. }
  2690. return simde__m128i_from_private(r_);
  2691. #endif
  2692. }
  2693. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2694. #define _mm_maccslo_epi16(a, b, c) simde_mm_maccslo_epi16((a), (b), (c))
  2695. #endif
  2696. SIMDE_FUNCTION_ATTRIBUTES
  2697. simde__m128i
  2698. simde_mm_maccshi_epi32 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2699. #if defined(SIMDE_X86_XOP_NATIVE)
  2700. return _mm_maccshi_epi32(a, b, c);
  2701. #else
  2702. simde__m128i_private
  2703. r_,
  2704. a_ = simde__m128i_to_private(a),
  2705. b_ = simde__m128i_to_private(b),
  2706. c_ = simde__m128i_to_private(c);
  2707. SIMDE_VECTORIZE
  2708. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2709. int64_t tmp = HEDLEY_STATIC_CAST(int64_t, a_.i32[(i * 2) + 1]) * HEDLEY_STATIC_CAST(int64_t, b_.i32[(i * 2) + 1]);
  2710. r_.i64[i] = simde_math_adds_i64(tmp, c_.i64[i]);
  2711. }
  2712. return simde__m128i_from_private(r_);
  2713. #endif
  2714. }
  2715. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2716. #define _mm_maccshi_epi16(a, b, c) simde_mm_maccshi_epi16((a), (b), (c))
  2717. #endif
  2718. SIMDE_FUNCTION_ATTRIBUTES
  2719. simde__m128i
  2720. simde_mm_maddd_epi16 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2721. #if defined(SIMDE_X86_XOP_NATIVE)
  2722. return _mm_maddd_epi16(a, b, c);
  2723. #else
  2724. simde__m128i_private
  2725. r_,
  2726. a_ = simde__m128i_to_private(a),
  2727. b_ = simde__m128i_to_private(b),
  2728. c_ = simde__m128i_to_private(c);
  2729. SIMDE_VECTORIZE
  2730. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2731. r_.i32[i] =
  2732. (a_.i16[(i * 2) + 0] * b_.i16[(i * 2) + 0]) +
  2733. (a_.i16[(i * 2) + 1] * b_.i16[(i * 2) + 1]);
  2734. r_.i32[i] += c_.i32[i];
  2735. }
  2736. return simde__m128i_from_private(r_);
  2737. #endif
  2738. }
  2739. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2740. #define _mm_maddd_epi16(a, b, c) simde_mm_maddd_epi16((a), (b), (c))
  2741. #endif
  2742. SIMDE_FUNCTION_ATTRIBUTES
  2743. simde__m128i
  2744. simde_mm_maddsd_epi16 (simde__m128i a, simde__m128i b, simde__m128i c) {
  2745. #if defined(SIMDE_X86_XOP_NATIVE)
  2746. return _mm_maddsd_epi16(a, b, c);
  2747. #else
  2748. simde__m128i_private
  2749. r_,
  2750. a_ = simde__m128i_to_private(a),
  2751. b_ = simde__m128i_to_private(b),
  2752. c_ = simde__m128i_to_private(c);
  2753. SIMDE_VECTORIZE
  2754. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2755. /* The AMD64 Architecture Programmer's Manual says that "the"
  2756. * addition is saturated; I'm not sure whether that means
  2757. * the pairwise addition or the accumulate, or both. */
  2758. r_.i32[i] =
  2759. (a_.i16[(i * 2) + 0] * b_.i16[(i * 2) + 0]) +
  2760. (a_.i16[(i * 2) + 1] * b_.i16[(i * 2) + 1]);
  2761. r_.i32[i] = simde_math_adds_i32(r_.i32[i], c_.i32[i]);
  2762. }
  2763. return simde__m128i_from_private(r_);
  2764. #endif
  2765. }
  2766. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2767. #define _mm_maddsd_epi16(a, b, c) simde_mm_maddsd_epi16((a), (b), (c))
  2768. #endif
  2769. SIMDE_FUNCTION_ATTRIBUTES
  2770. simde__m128i
  2771. simde_mm_sha_epi8 (simde__m128i a, simde__m128i b) {
  2772. #if defined(SIMDE_X86_XOP_NATIVE)
  2773. return _mm_sha_epi8(a, b);
  2774. #else
  2775. simde__m128i_private
  2776. r_,
  2777. a_ = simde__m128i_to_private(a),
  2778. b_ = simde__m128i_to_private(b);
  2779. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2780. r_.neon_i8 = vshlq_s8(a_.neon_i8, b_.neon_i8);
  2781. #else
  2782. SIMDE_VECTORIZE
  2783. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  2784. if (b_.i8[i] < 0) {
  2785. r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, a_.i8[i] >> -b_.i8[i]);
  2786. } else {
  2787. r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, a_.i8[i] << b_.i8[i]);
  2788. }
  2789. }
  2790. #endif
  2791. return simde__m128i_from_private(r_);
  2792. #endif
  2793. }
  2794. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2795. #define _mm_sha_epi8(a, b) simde_mm_sha_epi8((a), (b))
  2796. #endif
  2797. SIMDE_FUNCTION_ATTRIBUTES
  2798. simde__m128i
  2799. simde_mm_sha_epi16 (simde__m128i a, simde__m128i b) {
  2800. #if defined(SIMDE_X86_XOP_NATIVE)
  2801. return _mm_sha_epi16(a, b);
  2802. #else
  2803. simde__m128i_private
  2804. r_,
  2805. a_ = simde__m128i_to_private(a),
  2806. b_ = simde__m128i_to_private(b);
  2807. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2808. r_.neon_i16 = vshlq_s16(a_.neon_i16, b_.neon_i16);
  2809. #else
  2810. SIMDE_VECTORIZE
  2811. for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
  2812. if (b_.i16[i] < 0) {
  2813. r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i16[i] >> -b_.i16[i]);
  2814. } else {
  2815. r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i16[i] << b_.i16[i]);
  2816. }
  2817. }
  2818. #endif
  2819. return simde__m128i_from_private(r_);
  2820. #endif
  2821. }
  2822. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2823. #define _mm_sha_epi16(a, b) simde_mm_sha_epi16((a), (b))
  2824. #endif
  2825. SIMDE_FUNCTION_ATTRIBUTES
  2826. simde__m128i
  2827. simde_mm_sha_epi32 (simde__m128i a, simde__m128i b) {
  2828. #if defined(SIMDE_X86_XOP_NATIVE)
  2829. return _mm_sha_epi32(a, b);
  2830. #else
  2831. simde__m128i_private
  2832. r_,
  2833. a_ = simde__m128i_to_private(a),
  2834. b_ = simde__m128i_to_private(b);
  2835. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2836. r_.neon_i32 = vshlq_s32(a_.neon_i32, b_.neon_i32);
  2837. #else
  2838. SIMDE_VECTORIZE
  2839. for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
  2840. if (b_.i32[i] < 0) {
  2841. r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i32[i] >> -b_.i32[i]);
  2842. } else {
  2843. r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, a_.i32[i] << b_.i32[i]);
  2844. }
  2845. }
  2846. #endif
  2847. return simde__m128i_from_private(r_);
  2848. #endif
  2849. }
  2850. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2851. #define _mm_sha_epi32(a, b) simde_mm_sha_epi32((a), (b))
  2852. #endif
  2853. SIMDE_FUNCTION_ATTRIBUTES
  2854. simde__m128i
  2855. simde_mm_sha_epi64 (simde__m128i a, simde__m128i b) {
  2856. #if defined(SIMDE_X86_XOP_NATIVE)
  2857. return _mm_sha_epi64(a, b);
  2858. #else
  2859. simde__m128i_private
  2860. r_,
  2861. a_ = simde__m128i_to_private(a),
  2862. b_ = simde__m128i_to_private(b);
  2863. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2864. r_.neon_i64 = vshlq_s64(a_.neon_i64, b_.neon_i64);
  2865. #else
  2866. SIMDE_VECTORIZE
  2867. for (size_t i = 0 ; i < (sizeof(r_.i64) / sizeof(r_.i64[0])) ; i++) {
  2868. if (b_.i64[i] < 0) {
  2869. r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i64[i] >> -b_.i64[i]);
  2870. } else {
  2871. r_.i64[i] = HEDLEY_STATIC_CAST(int64_t, a_.i64[i] << b_.i64[i]);
  2872. }
  2873. }
  2874. #endif
  2875. return simde__m128i_from_private(r_);
  2876. #endif
  2877. }
  2878. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2879. #define _mm_sha_epi64(a, b) simde_mm_sha_epi64((a), (b))
  2880. #endif
  2881. SIMDE_FUNCTION_ATTRIBUTES
  2882. simde__m128i
  2883. simde_mm_shl_epi8 (simde__m128i a, simde__m128i b) {
  2884. #if defined(SIMDE_X86_XOP_NATIVE)
  2885. return _mm_shl_epi8(a, b);
  2886. #else
  2887. simde__m128i_private
  2888. r_,
  2889. a_ = simde__m128i_to_private(a),
  2890. b_ = simde__m128i_to_private(b);
  2891. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2892. r_.neon_u8 = vshlq_u8(a_.neon_u8, b_.neon_i8);
  2893. #else
  2894. SIMDE_VECTORIZE
  2895. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  2896. if (HEDLEY_UNLIKELY(b_.i8[i] < -7 || b_.i8[i] > 7)) {
  2897. r_.u8[i] = 0;
  2898. } else {
  2899. if (b_.i8[i] < 0) {
  2900. r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i] >> -b_.i8[i]);
  2901. } else {
  2902. r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, a_.u8[i] << b_.i8[i]);
  2903. }
  2904. }
  2905. }
  2906. #endif
  2907. return simde__m128i_from_private(r_);
  2908. #endif
  2909. }
  2910. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2911. #define _mm_shl_epi8(a, b) simde_mm_shl_epi8((a), (b))
  2912. #endif
  2913. SIMDE_FUNCTION_ATTRIBUTES
  2914. simde__m128i
  2915. simde_mm_shl_epi16 (simde__m128i a, simde__m128i b) {
  2916. #if defined(SIMDE_X86_XOP_NATIVE)
  2917. return _mm_shl_epi16(a, b);
  2918. #else
  2919. simde__m128i_private
  2920. r_,
  2921. a_ = simde__m128i_to_private(a),
  2922. b_ = simde__m128i_to_private(b);
  2923. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2924. r_.neon_u16 = vshlq_u16(a_.neon_u16, b_.neon_i16);
  2925. #else
  2926. SIMDE_VECTORIZE
  2927. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  2928. if (HEDLEY_UNLIKELY(b_.i16[i] < -15 || b_.i16[i] > 15)) {
  2929. r_.u16[i] = 0;
  2930. } else {
  2931. if (b_.i16[i] < 0) {
  2932. r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i] >> -b_.i16[i]);
  2933. } else {
  2934. r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, a_.u16[i] << b_.i16[i]);
  2935. }
  2936. }
  2937. }
  2938. #endif
  2939. return simde__m128i_from_private(r_);
  2940. #endif
  2941. }
  2942. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2943. #define _mm_shl_epi16(a, b) simde_mm_shl_epi16((a), (b))
  2944. #endif
  2945. SIMDE_FUNCTION_ATTRIBUTES
  2946. simde__m128i
  2947. simde_mm_shl_epi32 (simde__m128i a, simde__m128i b) {
  2948. #if defined(SIMDE_X86_XOP_NATIVE)
  2949. return _mm_shl_epi32(a, b);
  2950. #else
  2951. simde__m128i_private
  2952. r_,
  2953. a_ = simde__m128i_to_private(a),
  2954. b_ = simde__m128i_to_private(b);
  2955. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2956. r_.neon_u32 = vshlq_u32(a_.neon_u32, b_.neon_i32);
  2957. #else
  2958. SIMDE_VECTORIZE
  2959. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  2960. if (HEDLEY_UNLIKELY(b_.i32[i] < -31 || b_.i32[i] > 31)) {
  2961. r_.u32[i] = 0;
  2962. } else {
  2963. if (b_.i32[i] < 0) {
  2964. r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i] >> -b_.i32[i]);
  2965. } else {
  2966. r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t, a_.u32[i] << b_.i32[i]);
  2967. }
  2968. }
  2969. }
  2970. #endif
  2971. return simde__m128i_from_private(r_);
  2972. #endif
  2973. }
  2974. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  2975. #define _mm_shl_epi32(a, b) simde_mm_shl_epi32((a), (b))
  2976. #endif
  2977. SIMDE_FUNCTION_ATTRIBUTES
  2978. simde__m128i
  2979. simde_mm_shl_epi64 (simde__m128i a, simde__m128i b) {
  2980. #if defined(SIMDE_X86_XOP_NATIVE)
  2981. return _mm_shl_epi64(a, b);
  2982. #else
  2983. simde__m128i_private
  2984. r_,
  2985. a_ = simde__m128i_to_private(a),
  2986. b_ = simde__m128i_to_private(b);
  2987. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2988. r_.neon_u64 = vshlq_u64(a_.neon_u64, b_.neon_i64);
  2989. #else
  2990. SIMDE_VECTORIZE
  2991. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  2992. if (HEDLEY_UNLIKELY(b_.i64[i] < -63 || b_.i64[i] > 63)) {
  2993. r_.u64[i] = 0;
  2994. } else {
  2995. if (b_.i64[i] < 0) {
  2996. r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i] >> -b_.i64[i]);
  2997. } else {
  2998. r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u64[i] << b_.i64[i]);
  2999. }
  3000. }
  3001. }
  3002. #endif
  3003. return simde__m128i_from_private(r_);
  3004. #endif
  3005. }
  3006. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3007. #define _mm_shl_epi64(a, b) simde_mm_shl_epi64((a), (b))
  3008. #endif
  3009. SIMDE_FUNCTION_ATTRIBUTES
  3010. simde__m128i
  3011. simde_mm_rot_epi8 (simde__m128i a, simde__m128i b) {
  3012. #if defined(SIMDE_X86_XOP_NATIVE)
  3013. return _mm_rot_epi8(a, b);
  3014. #else
  3015. simde__m128i_private
  3016. r_,
  3017. a_ = simde__m128i_to_private(a),
  3018. b_ = simde__m128i_to_private(b);
  3019. SIMDE_VECTORIZE
  3020. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  3021. r_.u8[i] = (b_.i8[i] < 0) ?
  3022. HEDLEY_STATIC_CAST(uint8_t, ((a_.u8[i] >> -b_.i8[i]) | (a_.u8[i] << ( b_.i8[i] & 7)))) :
  3023. HEDLEY_STATIC_CAST(uint8_t, ((a_.u8[i] << b_.i8[i]) | (a_.u8[i] >> (-b_.i8[i] & 7))));
  3024. }
  3025. return simde__m128i_from_private(r_);
  3026. #endif
  3027. }
  3028. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3029. #define _mm_rot_epi8(a, b) simde_mm_rot_epi8((a), (b))
  3030. #endif
  3031. SIMDE_FUNCTION_ATTRIBUTES
  3032. simde__m128i
  3033. simde_mm_rot_epi16 (simde__m128i a, simde__m128i b) {
  3034. #if defined(SIMDE_X86_XOP_NATIVE)
  3035. return _mm_rot_epi16(a, b);
  3036. #else
  3037. simde__m128i_private
  3038. r_,
  3039. a_ = simde__m128i_to_private(a),
  3040. b_ = simde__m128i_to_private(b);
  3041. SIMDE_VECTORIZE
  3042. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  3043. r_.u16[i] = (b_.i16[i] < 0) ?
  3044. HEDLEY_STATIC_CAST(uint16_t, ((a_.u16[i] >> -b_.i16[i]) | (a_.u16[i] << ( b_.i16[i] & 15)))) :
  3045. HEDLEY_STATIC_CAST(uint16_t, ((a_.u16[i] << b_.i16[i]) | (a_.u16[i] >> (-b_.i16[i] & 15))));
  3046. }
  3047. return simde__m128i_from_private(r_);
  3048. #endif
  3049. }
  3050. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3051. #define _mm_rot_epi16(a, b) simde_mm_rot_epi16((a), (b))
  3052. #endif
  3053. SIMDE_FUNCTION_ATTRIBUTES
  3054. simde__m128i
  3055. simde_mm_rot_epi32 (simde__m128i a, simde__m128i b) {
  3056. #if defined(SIMDE_X86_XOP_NATIVE)
  3057. return _mm_rot_epi32(a, b);
  3058. #else
  3059. simde__m128i_private
  3060. r_,
  3061. a_ = simde__m128i_to_private(a),
  3062. b_ = simde__m128i_to_private(b);
  3063. SIMDE_VECTORIZE
  3064. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  3065. r_.u32[i] = (b_.i32[i] < 0) ?
  3066. HEDLEY_STATIC_CAST(uint32_t, ((a_.u32[i] >> -b_.i32[i]) | (a_.u32[i] << ( b_.i32[i] & 31)))) :
  3067. HEDLEY_STATIC_CAST(uint32_t, ((a_.u32[i] << b_.i32[i]) | (a_.u32[i] >> (-b_.i32[i] & 31))));
  3068. }
  3069. return simde__m128i_from_private(r_);
  3070. #endif
  3071. }
  3072. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3073. #define _mm_rot_epi32(a, b) simde_mm_rot_epi32((a), (b))
  3074. #endif
  3075. SIMDE_FUNCTION_ATTRIBUTES
  3076. simde__m128i
  3077. simde_mm_rot_epi64 (simde__m128i a, simde__m128i b) {
  3078. #if defined(SIMDE_X86_XOP_NATIVE)
  3079. return _mm_rot_epi64(a, b);
  3080. #else
  3081. simde__m128i_private
  3082. r_,
  3083. a_ = simde__m128i_to_private(a),
  3084. b_ = simde__m128i_to_private(b);
  3085. SIMDE_VECTORIZE
  3086. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  3087. r_.u64[i] = (b_.i64[i] < 0) ?
  3088. HEDLEY_STATIC_CAST(uint64_t, ((a_.u64[i] >> -b_.i64[i]) | (a_.u64[i] << ( b_.i64[i] & 63)))) :
  3089. HEDLEY_STATIC_CAST(uint64_t, ((a_.u64[i] << b_.i64[i]) | (a_.u64[i] >> (-b_.i64[i] & 63))));
  3090. }
  3091. return simde__m128i_from_private(r_);
  3092. #endif
  3093. }
  3094. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3095. #define _mm_rot_epi64(a, b) simde_mm_rot_epi64((a), (b))
  3096. #endif
  3097. SIMDE_FUNCTION_ATTRIBUTES
  3098. simde__m128i
  3099. simde_mm_roti_epi8 (simde__m128i a, const int count) {
  3100. simde__m128i_private
  3101. r_,
  3102. a_ = simde__m128i_to_private(a);
  3103. SIMDE_VECTORIZE
  3104. for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
  3105. r_.u8[i] = (count < 0) ?
  3106. HEDLEY_STATIC_CAST(uint8_t, ((a_.u8[i] >> -count) | (a_.u8[i] << ( count & 7)))) :
  3107. HEDLEY_STATIC_CAST(uint8_t, ((a_.u8[i] << count) | (a_.u8[i] >> (-count & 7))));
  3108. }
  3109. return simde__m128i_from_private(r_);
  3110. }
  3111. #if defined(SIMDE_X86_XOP_NATIVE)
  3112. #define simde_mm_roti_epi8(a, count) _mm_roti_epi8((a), (count))
  3113. #endif
  3114. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3115. #define _mm_roti_epi8(a, b) simde_mm_roti_epi8((a), (count))
  3116. #endif
  3117. SIMDE_FUNCTION_ATTRIBUTES
  3118. simde__m128i
  3119. simde_mm_roti_epi16 (simde__m128i a, const int count) {
  3120. simde__m128i_private
  3121. r_,
  3122. a_ = simde__m128i_to_private(a);
  3123. SIMDE_VECTORIZE
  3124. for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
  3125. r_.u16[i] = (count < 0) ?
  3126. HEDLEY_STATIC_CAST(uint16_t, ((a_.u16[i] >> -count) | (a_.u16[i] << ( count & 15)))) :
  3127. HEDLEY_STATIC_CAST(uint16_t, ((a_.u16[i] << count) | (a_.u16[i] >> (-count & 15))));
  3128. }
  3129. return simde__m128i_from_private(r_);
  3130. }
  3131. #if defined(SIMDE_X86_XOP_NATIVE)
  3132. #define simde_mm_roti_epi16(a, count) _mm_roti_epi16((a), (count))
  3133. #endif
  3134. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3135. #define _mm_roti_epi16(a, count) simde_mm_roti_epi16((a), (count))
  3136. #endif
  3137. SIMDE_FUNCTION_ATTRIBUTES
  3138. simde__m128i
  3139. simde_mm_roti_epi32 (simde__m128i a, const int count) {
  3140. simde__m128i_private
  3141. r_,
  3142. a_ = simde__m128i_to_private(a);
  3143. SIMDE_VECTORIZE
  3144. for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
  3145. r_.u32[i] = (count < 0) ?
  3146. HEDLEY_STATIC_CAST(uint32_t, ((a_.u32[i] >> -count) | (a_.u32[i] << ( count & 31)))) :
  3147. HEDLEY_STATIC_CAST(uint32_t, ((a_.u32[i] << count) | (a_.u32[i] >> (-count & 31))));
  3148. }
  3149. return simde__m128i_from_private(r_);
  3150. }
  3151. #if defined(SIMDE_X86_XOP_NATIVE)
  3152. #define simde_mm_roti_epi32(a, count) _mm_roti_epi32((a), (count))
  3153. #endif
  3154. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3155. #define _mm_roti_epi32(a, count) simde_mm_roti_epi32((a), (count))
  3156. #endif
  3157. SIMDE_FUNCTION_ATTRIBUTES
  3158. simde__m128i
  3159. simde_mm_roti_epi64 (simde__m128i a, const int count) {
  3160. simde__m128i_private
  3161. r_,
  3162. a_ = simde__m128i_to_private(a);
  3163. SIMDE_VECTORIZE
  3164. for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
  3165. r_.u64[i] = (count < 0) ?
  3166. HEDLEY_STATIC_CAST(uint64_t, ((a_.u64[i] >> -count) | (a_.u64[i] << ( count & 63)))) :
  3167. HEDLEY_STATIC_CAST(uint64_t, ((a_.u64[i] << count) | (a_.u64[i] >> (-count & 63))));
  3168. }
  3169. return simde__m128i_from_private(r_);
  3170. }
  3171. #if defined(SIMDE_X86_XOP_NATIVE)
  3172. #define simde_mm_roti_epi64(a, count) _mm_roti_epi64((a), (count))
  3173. #endif
  3174. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3175. #define _mm_roti_epi64(a, count) simde_mm_roti_epi64((a), (count))
  3176. #endif
  3177. SIMDE_FUNCTION_ATTRIBUTES
  3178. simde__m128i
  3179. simde_mm_perm_epi8 (simde__m128i a, simde__m128i b, simde__m128i c) {
  3180. #if defined(SIMDE_X86_XOP_NATIVE)
  3181. return _mm_perm_epi8(a, b, c);
  3182. #else
  3183. simde__m128i_private
  3184. r_,
  3185. a_ = simde__m128i_to_private(a),
  3186. b_ = simde__m128i_to_private(b),
  3187. c_ = simde__m128i_to_private(c);
  3188. SIMDE_VECTORIZE
  3189. for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) {
  3190. int8_t src = (c_.u8[i] & 0x10) ? b_.i8[c_.u8[i] & 0xf] : a_.i8[c_.u8[i] & 0xf];
  3191. switch (c_.u8[i] & 0xc0) {
  3192. case 0x40:
  3193. #if HEDLEY_HAS_BUILTIN(__builtin_bitreverse8) && !defined(HEDLEY_IBM_VERSION)
  3194. src = HEDLEY_STATIC_CAST(int8_t, __builtin_bitreverse8(HEDLEY_STATIC_CAST(uint8_t, src)));
  3195. #else
  3196. src = HEDLEY_STATIC_CAST(int8_t, ((HEDLEY_STATIC_CAST(uint8_t, src) * UINT64_C(0x80200802)) & UINT64_C(0x0884422110)) * UINT64_C(0x0101010101) >> 32);
  3197. #endif
  3198. break;
  3199. case 0x80:
  3200. src = 0;
  3201. break;
  3202. case 0xc0:
  3203. src >>= 7;
  3204. break;
  3205. }
  3206. r_.i8[i] = (c_.u8[i] & 0x20) ? ~src : src;
  3207. }
  3208. return simde__m128i_from_private(r_);
  3209. #endif
  3210. }
  3211. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3212. #define _mm_perm_epi8(a, b, c) simde_mm_perm_epi8((a), (b), (c))
  3213. #endif
  3214. SIMDE_FUNCTION_ATTRIBUTES
  3215. simde__m128
  3216. simde_mm_permute2_ps (simde__m128 a, simde__m128 b, simde__m128i c, const int imm8) {
  3217. simde__m128_private
  3218. r_,
  3219. a_ = simde__m128_to_private(a),
  3220. b_ = simde__m128_to_private(b);
  3221. simde__m128i_private c_ = simde__m128i_to_private(c);
  3222. const int m2z = imm8 & 0x03;
  3223. SIMDE_VECTORIZE
  3224. for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
  3225. const int sel = c_.i32[i] & 0x07;
  3226. const int m = c_.i32[i] & 0x08;
  3227. switch (m | m2z) {
  3228. case 0xa:
  3229. case 0x3:
  3230. r_.i32[i] = 0;
  3231. break;
  3232. default:
  3233. r_.i32[i] = (sel > 3) ? b_.i32[sel - 4] : a_.i32[sel];
  3234. break;
  3235. }
  3236. }
  3237. return simde__m128_from_private(r_);
  3238. }
  3239. #if defined(SIMDE_X86_XOP_NATIVE)
  3240. #if defined(HEDLEY_MCST_LCC_VERSION)
  3241. #define simde_mm_permute2_ps(a, b, c, imm8) (__extension__ ({ \
  3242. SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \
  3243. _mm_permute2_ps((a), (b), (c), (imm8)); \
  3244. SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \
  3245. }))
  3246. #else
  3247. #define simde_mm_permute2_ps(a, b, c, imm8) _mm_permute2_ps((a), (b), (c), (imm8))
  3248. #endif
  3249. #endif
  3250. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3251. #define _mm_permute2_ps(a, b, c, imm8) simde_mm_permute2_ps((a), (b), (c), (imm8))
  3252. #endif
  3253. SIMDE_FUNCTION_ATTRIBUTES
  3254. simde__m128d
  3255. simde_mm_permute2_pd (simde__m128d a, simde__m128d b, simde__m128i c, const int imm8) {
  3256. simde__m128d_private
  3257. r_,
  3258. a_ = simde__m128d_to_private(a),
  3259. b_ = simde__m128d_to_private(b);
  3260. simde__m128i_private c_ = simde__m128i_to_private(c);
  3261. const int m2z = imm8 & 0x03;
  3262. SIMDE_VECTORIZE
  3263. for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
  3264. const int sel = (c_.i64[i] & 0x06) >> 1;
  3265. const int m = c_.i64[i] & 0x08;
  3266. switch (m | m2z) {
  3267. case 0x0a:
  3268. case 0x03:
  3269. r_.i64[i] = 0;
  3270. break;
  3271. default:
  3272. r_.i64[i] = (sel > 1) ? b_.i64[sel - 2] : a_.i64[sel];
  3273. break;
  3274. }
  3275. }
  3276. return simde__m128d_from_private(r_);
  3277. }
  3278. #if defined(SIMDE_X86_XOP_NATIVE)
  3279. #if defined(HEDLEY_MCST_LCC_VERSION)
  3280. #define simde_mm_permute2_pd(a, b, c, imm8) (__extension__ ({ \
  3281. SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \
  3282. _mm_permute2_pd((a), (b), (c), (imm8)); \
  3283. SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \
  3284. }))
  3285. #else
  3286. #define simde_mm_permute2_pd(a, b, c, imm8) _mm_permute2_pd((a), (b), (c), (imm8))
  3287. #endif
  3288. #endif
  3289. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3290. #define _mm_permute2_pd(a, b, c, imm8) simde_mm_permute2_pd((a), (b), (c), (imm8))
  3291. #endif
  3292. SIMDE_FUNCTION_ATTRIBUTES
  3293. simde__m256
  3294. simde_mm256_permute2_ps (simde__m256 a, simde__m256 b, simde__m256i c, const int imm8) {
  3295. simde__m256_private
  3296. r_,
  3297. a_ = simde__m256_to_private(a),
  3298. b_ = simde__m256_to_private(b);
  3299. simde__m256i_private c_ = simde__m256i_to_private(c);
  3300. #if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
  3301. for (size_t i = 0 ; i < (sizeof(r_.m128) / sizeof(r_.m128[0])) ; i++) {
  3302. r_.m128[i] = simde_mm_permute2_ps(a_.m128[i], b_.m128[i], c_.m128i[i], imm8);
  3303. }
  3304. #else
  3305. const int m2z = imm8 & 0x03;
  3306. SIMDE_VECTORIZE
  3307. for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
  3308. const int sel = c_.i32[i] & 0x07;
  3309. const int m = c_.i32[i] & 0x08;
  3310. switch (m | m2z) {
  3311. case 0xa:
  3312. case 0x3:
  3313. r_.i32[i] = 0;
  3314. break;
  3315. default:
  3316. r_.i32[i] = (sel > 3) ? b_.i32[sel + (HEDLEY_STATIC_CAST(int, i) & 4) - 4] : a_.i32[sel + (HEDLEY_STATIC_CAST(int, i) & 4)];
  3317. break;
  3318. }
  3319. }
  3320. #endif
  3321. return simde__m256_from_private(r_);
  3322. }
  3323. #if defined(SIMDE_X86_XOP_NATIVE)
  3324. #if defined(HEDLEY_MCST_LCC_VERSION)
  3325. #define simde_mm256_permute2_ps(a, b, c, imm8) (__extension__ ({ \
  3326. SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \
  3327. _mm256_permute2_ps((a), (b), (c), (imm8)); \
  3328. SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \
  3329. }))
  3330. #else
  3331. #define simde_mm256_permute2_ps(a, b, c, imm8) _mm256_permute2_ps((a), (b), (c), (imm8))
  3332. #endif
  3333. #endif
  3334. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3335. #define _mm256_permute2_ps(a, b, c, imm8) simde_mm256_permute2_ps((a), (b), (c), (imm8))
  3336. #endif
  3337. SIMDE_FUNCTION_ATTRIBUTES
  3338. simde__m256d
  3339. simde_mm256_permute2_pd (simde__m256d a, simde__m256d b, simde__m256i c, const int imm8) {
  3340. simde__m256d_private
  3341. r_,
  3342. a_ = simde__m256d_to_private(a),
  3343. b_ = simde__m256d_to_private(b);
  3344. simde__m256i_private c_ = simde__m256i_to_private(c);
  3345. #if SIMDE_NATURAL_VECTOR_SIZE_LE(128)
  3346. for (size_t i = 0 ; i < (sizeof(r_.m128d) / sizeof(r_.m128d[0])) ; i++) {
  3347. r_.m128d[i] = simde_mm_permute2_pd(a_.m128d[i], b_.m128d[i], c_.m128i[i], imm8);
  3348. }
  3349. #else
  3350. const int m2z = imm8 & 0x03;
  3351. SIMDE_VECTORIZE
  3352. for (size_t i = 0 ; i < (sizeof(r_.f64) / sizeof(r_.f64[0])) ; i++) {
  3353. const int sel = (c_.i64[i] & 0x06) >> 1;
  3354. const int m = c_.i64[i] & 0x08;
  3355. switch (m | m2z) {
  3356. case 0x0a:
  3357. case 0x03:
  3358. r_.i64[i] = 0;
  3359. break;
  3360. default:
  3361. r_.i64[i] = (sel > 1) ? b_.i64[sel + (HEDLEY_STATIC_CAST(int, i) & 2) - 2] : a_.i64[sel + (HEDLEY_STATIC_CAST(int, i) & 2)];
  3362. break;
  3363. }
  3364. }
  3365. #endif
  3366. return simde__m256d_from_private(r_);
  3367. }
  3368. #if defined(SIMDE_X86_XOP_NATIVE)
  3369. #if defined(HEDLEY_MCST_LCC_VERSION)
  3370. #define simde_mm256_permute2_pd(a, b, c, imm8) (__extension__ ({ \
  3371. SIMDE_LCC_DISABLE_DEPRECATED_WARNINGS \
  3372. _mm256_permute2_pd((a), (b), (c), (imm8)); \
  3373. SIMDE_LCC_REVERT_DEPRECATED_WARNINGS \
  3374. }))
  3375. #else
  3376. #define simde_mm256_permute2_pd(a, b, c, imm8) _mm256_permute2_pd((a), (b), (c), (imm8))
  3377. #endif
  3378. #endif
  3379. #if defined(SIMDE_X86_XOP_ENABLE_NATIVE_ALIASES)
  3380. #define _mm256_permute2_pd(a, b, c, imm8) simde_mm256_permute2_pd((a), (b), (c), (imm8))
  3381. #endif
  3382. HEDLEY_DIAGNOSTIC_POP
  3383. SIMDE_END_DECLS_
  3384. #endif /* !defined(SIMDE_X86_XOP_H) */