EncoderMD.cpp 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441
  1. //-------------------------------------------------------------------------------------------------------
  2. // Copyright (C) Microsoft. All rights reserved.
  3. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  4. //-------------------------------------------------------------------------------------------------------
  5. #include "Backend.h"
  6. #include "ARMEncode.h"
  7. #include "Language/JavascriptFunctionArgIndex.h"
  8. const FormTable * InstrEncode[]={
  9. #define MACRO(name, jnLayout, attrib, byte2, form, opbyte, ...) opbyte,
  10. #include "MdOpCodes.h"
  11. #undef ASMDAT
  12. };
  13. ///----------------------------------------------------------------------------
  14. ///
  15. /// EncoderMD::Init
  16. ///
  17. ///----------------------------------------------------------------------------
  18. void
  19. EncoderMD::Init(Encoder *encoder)
  20. {
  21. m_encoder = encoder;
  22. m_relocList = nullptr;
  23. }
  24. ///----------------------------------------------------------------------------
  25. ///
  26. /// EncoderMD::GetRegEncode
  27. ///
  28. /// Get the encoding of a given register.
  29. ///
  30. ///----------------------------------------------------------------------------
  31. BYTE
  32. EncoderMD::GetRegEncode(IR::RegOpnd *regOpnd)
  33. {
  34. return GetRegEncode(regOpnd->GetReg());
  35. }
  36. BYTE
  37. EncoderMD::GetRegEncode(RegNum reg)
  38. {
  39. return RegEncode[reg];
  40. }
  41. BYTE
  42. EncoderMD::GetFloatRegEncode(IR::RegOpnd *regOpnd)
  43. {
  44. //Each double register holds two single precision registers.
  45. BYTE regEncode = GetRegEncode(regOpnd->GetReg()) * 2;
  46. AssertMsg(regEncode <= LAST_FLOAT_REG_NUM, "Impossible to allocate higher registers on VFP");
  47. return regEncode;
  48. }
  49. ///----------------------------------------------------------------------------
  50. ///
  51. /// EncoderMD::GetOpdope
  52. ///
  53. /// Get the dope vector of a particular instr. The dope vector describes
  54. /// certain properties of an instr.
  55. ///
  56. ///----------------------------------------------------------------------------
  57. uint32
  58. EncoderMD::GetOpdope(IR::Instr *instr)
  59. {
  60. return GetOpdope(instr->m_opcode);
  61. }
  62. uint32
  63. EncoderMD::GetOpdope(Js::OpCode op)
  64. {
  65. return Opdope[op - (Js::OpCode::MDStart+1)];
  66. }
  67. //
  68. // EncoderMD::CanonicalizeInstr :
  69. // Put the instruction in its final form for encoding. This may involve
  70. // expanding a pseudo-op such as LEA or changing an opcode to indicate the
  71. // op bits the encoder should use.
  72. //
  73. // Return the size of the final instruction's encoding.
  74. //
  75. InstructionType EncoderMD::CanonicalizeInstr(IR::Instr* instr)
  76. {
  77. if (!instr->IsLowered())
  78. {
  79. return InstructionType::None;
  80. }
  81. switch (instr->m_opcode)
  82. {
  83. CASE_OPCODES_ALWAYS_THUMB2
  84. return InstructionType::Thumb2;
  85. CASE_OPCODES_NEVER_THUMB2
  86. return InstructionType::Thumb;
  87. case Js::OpCode::MOV:
  88. return this->CanonicalizeMov(instr);
  89. case Js::OpCode::B:
  90. return InstructionType::Thumb2; // For now only T2 branches are encoded
  91. case Js::OpCode::BL:
  92. return InstructionType::Thumb2;
  93. case Js::OpCode::BNE:
  94. case Js::OpCode::BEQ:
  95. case Js::OpCode::BLT:
  96. case Js::OpCode::BLE:
  97. case Js::OpCode::BGE:
  98. case Js::OpCode::BGT:
  99. case Js::OpCode::BCS:
  100. case Js::OpCode::BCC:
  101. case Js::OpCode::BHI:
  102. case Js::OpCode::BLS:
  103. case Js::OpCode::BMI:
  104. case Js::OpCode::BPL:
  105. case Js::OpCode::BVS:
  106. case Js::OpCode::BVC:
  107. return InstructionType::Thumb2; // For now only T2 branches are encoded
  108. case Js::OpCode::CMP:
  109. return this->CmpEncodeType(instr);
  110. case Js::OpCode::CMN:
  111. return this->CmnEncodeType(instr);
  112. case Js::OpCode::CMP_ASR31:
  113. return InstructionType::Thumb2;
  114. case Js::OpCode::POP:
  115. return this->PushPopEncodeType(instr->GetSrc1()->AsIndirOpnd(), instr->GetDst()->AsRegBVOpnd());
  116. case Js::OpCode::PUSH:
  117. return this->PushPopEncodeType(instr->GetDst()->AsIndirOpnd(), instr->GetSrc1()->AsRegBVOpnd());
  118. case Js::OpCode::LDR:
  119. return this->CanonicalizeLoad(instr);
  120. case Js::OpCode::STR:
  121. return this->CanonicalizeStore(instr);
  122. case Js::OpCode::LEA:
  123. return this->CanonicalizeLea(instr);
  124. case Js::OpCode::ADD:
  125. case Js::OpCode::ADDS:
  126. return this->CanonicalizeAdd(instr);
  127. case Js::OpCode::SUB:
  128. case Js::OpCode::SUBS:
  129. return this->CanonicalizeSub(instr);
  130. case Js::OpCode::AND:
  131. case Js::OpCode::EOR:
  132. case Js::OpCode::MUL:
  133. case Js::OpCode::ORR:
  134. case Js::OpCode::RSB:
  135. case Js::OpCode::RSBS:
  136. case Js::OpCode::BIC:
  137. return this->Alu3EncodeType(instr);
  138. case Js::OpCode::EOR_ASR31:
  139. return InstructionType::Thumb2;
  140. case Js::OpCode::SMULL:
  141. case Js::OpCode::SMLAL:
  142. return InstructionType::Thumb2;
  143. case Js::OpCode::MVN:
  144. return this->Alu2EncodeType(instr->GetDst(), instr->GetSrc1());
  145. case Js::OpCode::TST:
  146. return this->Alu2EncodeType(instr->GetSrc1(), instr->GetSrc2());
  147. case Js::OpCode::ASR:
  148. case Js::OpCode::ASRS:
  149. case Js::OpCode::LSL:
  150. case Js::OpCode::LSR:
  151. return this->ShiftEncodeType(instr);
  152. case Js::OpCode::VSTR:
  153. case Js::OpCode::VSTR32:
  154. case Js::OpCode::VLDR:
  155. case Js::OpCode::VLDR32:
  156. case Js::OpCode::VABS:
  157. case Js::OpCode::VSQRT:
  158. case Js::OpCode::VMOV:
  159. case Js::OpCode::VMOVARMVFP:
  160. case Js::OpCode::VMOVF64R32L:
  161. case Js::OpCode::VMOVF64R32U:
  162. case Js::OpCode::VCVTF64F32:
  163. case Js::OpCode::VCVTF32F64:
  164. case Js::OpCode::VCVTF64S32:
  165. case Js::OpCode::VCVTF64U32:
  166. case Js::OpCode::VCVTS32F64:
  167. case Js::OpCode::VCVTRS32F64:
  168. case Js::OpCode::VPUSH:
  169. case Js::OpCode::VPOP:
  170. case Js::OpCode::VADDF64:
  171. case Js::OpCode::VSUBF64:
  172. case Js::OpCode::VMULF64:
  173. case Js::OpCode::VDIVF64:
  174. case Js::OpCode::VNEGF64:
  175. case Js::OpCode::VCMPF64:
  176. case Js::OpCode::VMRS:
  177. case Js::OpCode::VMRSR:
  178. case Js::OpCode::VMSR:
  179. return InstructionType::Vfp;
  180. default:
  181. AssertMsg(UNREACHED, "Unexpected opcode in IsInstrThumb2");
  182. return InstructionType::None;
  183. }
  184. }
  185. // CanonicalizeMov: Determine the size of the encoding and change the opcode
  186. // if necessary to indicate a wide instruction. (We do this for MOV, LDR, and STR
  187. // to cut down on the time it takes to search all the possible forms.)
  188. InstructionType EncoderMD::CanonicalizeMov(IR::Instr * instr)
  189. {
  190. // 3 possibilities:
  191. // 1. MOV (T1):
  192. // - uint8 to low reg
  193. // - any reg to reg
  194. // 2. MOVW (T2):
  195. // - uint16 to reg
  196. // 3. MOV_W (T2):
  197. // - mod const imm to reg
  198. IR::RegOpnd *dstOpnd = instr->GetDst()->AsRegOpnd();
  199. IR::Opnd *srcOpnd = instr->GetSrc1();
  200. if (srcOpnd->IsRegOpnd())
  201. {
  202. // All reg to reg copies are 2 bytes.
  203. return InstructionType::Thumb;
  204. }
  205. int32 immed = srcOpnd->GetImmediateValueAsInt32(instr->m_func);
  206. if (IS_LOWREG(dstOpnd->GetReg()) &&
  207. IS_CONST_UINT8(immed))
  208. {
  209. // uint8 -> low reg
  210. return InstructionType::Thumb;
  211. }
  212. // Wide MOV instruction. Choose the opcode based on the constant.
  213. if (IS_CONST_UINT16(immed))
  214. {
  215. instr->m_opcode = Js::OpCode::MOVW;
  216. }
  217. else
  218. {
  219. Assert(CanEncodeModConst12(immed));
  220. instr->m_opcode = Js::OpCode::MOV_W;
  221. }
  222. return InstructionType::Thumb2;
  223. }
  224. // CanonicalizeLoad: Determine the size of the encoding and change the opcode
  225. // if necessary to indicate a wide instruction. (We do this for MOV, LDR, and STR
  226. // to cut down on the time it takes to search all the possible forms.)
  227. InstructionType EncoderMD::CanonicalizeLoad(IR::Instr * instr)
  228. {
  229. IR::Opnd *memOpnd = instr->GetSrc1();
  230. // Note: sign-extension of less-than-4-byte loads requires a wide instruction.
  231. if (memOpnd->GetSize() == 4 || memOpnd->IsUnsigned())
  232. {
  233. if (!this->IsWideMemInstr(instr->GetSrc1(), instr->GetDst()->AsRegOpnd()))
  234. {
  235. return InstructionType::Thumb;
  236. }
  237. }
  238. instr->m_opcode = Js::OpCode::LDR_W;
  239. return InstructionType::Thumb2;
  240. }
  241. // CanonicalizeStore: Determine the size of the encoding and change the opcode
  242. // if necessary to indicate a wide instruction. (We do this for MOV, LDR, and STR
  243. // to cut down on the time it takes to search all the possible forms.)
  244. InstructionType EncoderMD::CanonicalizeStore(IR::Instr * instr)
  245. {
  246. if (this->IsWideMemInstr(instr->GetDst(), instr->GetSrc1()->AsRegOpnd()))
  247. {
  248. instr->m_opcode = Js::OpCode::STR_W;
  249. return InstructionType::Thumb2;
  250. }
  251. return InstructionType::Thumb;
  252. }
  253. // IsWideMemInstr: Shared by LDR and STR.
  254. // Determine the width of the encoding based on the operand properties.
  255. bool EncoderMD::IsWideMemInstr(IR::Opnd *memOpnd, IR::RegOpnd *regOpnd)
  256. {
  257. // LDR/STR rn, [rbase + rindex], or
  258. // LDR/STR rn, [rbase + offset]
  259. // If rn is not low reg, instr is wide.
  260. if (!IS_LOWREG(regOpnd->GetReg()))
  261. {
  262. return true;
  263. }
  264. // Pull the base and index/offset from the indirection.
  265. RegNum baseReg;
  266. IR::RegOpnd *indexOpnd;
  267. int32 offset;
  268. if (memOpnd->IsSymOpnd())
  269. {
  270. indexOpnd = nullptr;
  271. this->BaseAndOffsetFromSym(memOpnd->AsSymOpnd(), &baseReg, &offset, this->m_func);
  272. }
  273. else
  274. {
  275. IR::IndirOpnd *indirOpnd = memOpnd->AsIndirOpnd();
  276. // Scaled index operands require wide instruction.
  277. if (indirOpnd->GetScale() > 0)
  278. {
  279. return true;
  280. }
  281. baseReg = indirOpnd->GetBaseOpnd()->GetReg();
  282. indexOpnd = indirOpnd->GetIndexOpnd();
  283. offset = indirOpnd->GetOffset();
  284. }
  285. Assert(offset == 0 || indexOpnd == nullptr);
  286. if (indexOpnd)
  287. {
  288. // Both base and index must be low regs.
  289. return !IS_LOWREG(baseReg) || !IS_LOWREG(indexOpnd->GetReg());
  290. }
  291. else
  292. {
  293. size_t size = memOpnd->GetSize();
  294. if (!IS_LOWREG(baseReg) && (baseReg != RegSP || size != 4))
  295. {
  296. // Base reg must be low or SP (and we only have 4-byte SP-relative ops).
  297. return true;
  298. }
  299. // Short encodings shift the offset based on the size of the load/store.
  300. // (E.g., 4-byte load shifts the offset by 2.)
  301. if (offset & (size - 1))
  302. {
  303. // Can't use a short encoding if we lose bits by shifting the offset.
  304. return true;
  305. }
  306. uint32 shiftBits = Math::Log2(size);
  307. if (baseReg == RegSP)
  308. {
  309. // LDR/STR rn, [SP + uint8:00]
  310. return !IS_CONST_UINT8(offset >> shiftBits);
  311. }
  312. else
  313. {
  314. // LDR/STR rn, [base + uint5:size]
  315. return !IS_CONST_UINT5(offset >> shiftBits);
  316. }
  317. }
  318. }
  319. InstructionType EncoderMD::CanonicalizeAdd(IR::Instr * instr)
  320. {
  321. IR::Opnd *src2 = instr->GetSrc2();
  322. int32 immed = 0;
  323. // Check cases that apply to ADD but not SUB.
  324. if (src2->IsRegOpnd())
  325. {
  326. // Check for rm = ADD rm, rn
  327. if (instr->m_opcode != Js::OpCode::ADDS &&
  328. instr->GetDst()->AsRegOpnd()->IsSameReg(instr->GetSrc1()))
  329. {
  330. return InstructionType::Thumb;
  331. }
  332. }
  333. else
  334. {
  335. immed = src2->GetImmediateValueAsInt32(instr->m_func);
  336. // Check for rm = ADD SP, uint8:00
  337. if (IS_LOWREG(instr->GetDst()->AsRegOpnd()->GetReg()))
  338. {
  339. if (instr->GetSrc1()->AsRegOpnd()->GetReg() == RegSP)
  340. {
  341. if ((immed & 3) == 0 && IS_CONST_UINT8(immed >> 2))
  342. {
  343. return InstructionType::Thumb;
  344. }
  345. }
  346. }
  347. }
  348. // Now check the shared ADD/SUB cases.
  349. if (this->IsWideAddSub(instr))
  350. {
  351. // The instr is definitely wide. Let the opcode indicate that if we're using the uint12 form.
  352. // Note that the uint12 form can't set the status bits.
  353. if (!src2->IsRegOpnd() && !this->CanEncodeModConst12(immed))
  354. {
  355. Assert(instr->m_opcode != Js::OpCode::ADDS);
  356. Assert(IS_CONST_UINT12(immed));
  357. instr->m_opcode = Js::OpCode::ADDW;
  358. }
  359. return InstructionType::Thumb2;
  360. }
  361. return InstructionType::Thumb;
  362. }
  363. InstructionType EncoderMD::CanonicalizeSub(IR::Instr * instr)
  364. {
  365. if (this->IsWideAddSub(instr))
  366. {
  367. IR::Opnd *src2 = instr->GetSrc2();
  368. // The instr is definitely wide. Let the opcode indicate that if we're using the uint12 form.
  369. // Note that the uint12 form can't set the status bits.
  370. Assert(!IRType_IsInt64(src2->GetType()));
  371. if (!src2->IsRegOpnd() && !this->CanEncodeModConst12(src2->GetImmediateValueAsInt32(instr->m_func)))
  372. {
  373. Assert(instr->m_opcode != Js::OpCode::SUBS);
  374. Assert(IS_CONST_UINT12(src2->GetImmediateValueAsInt32(instr->m_func)));
  375. instr->m_opcode = Js::OpCode::SUBW;
  376. }
  377. return InstructionType::Thumb2;
  378. }
  379. return InstructionType::Thumb;
  380. }
  381. bool EncoderMD::IsWideAddSub(IR::Instr * instr)
  382. {
  383. IR::RegOpnd *dst = instr->GetDst()->AsRegOpnd();
  384. IR::RegOpnd *src1 = instr->GetSrc1()->AsRegOpnd();
  385. IR::Opnd *src2 = instr->GetSrc2();
  386. int32 immed;
  387. if (dst->GetReg() == RegSP)
  388. {
  389. // The one short form is SP = op SP, uint7:00
  390. if (src1->GetReg() != RegSP)
  391. {
  392. return true;
  393. }
  394. if (src2->IsRegOpnd())
  395. {
  396. return true;
  397. }
  398. immed = src2->GetImmediateValueAsInt32(instr->m_func);
  399. return ((immed & 3) != 0) || !IS_CONST_UINT7(immed >> 2);
  400. }
  401. else
  402. {
  403. // low1 = op low2, low3 or
  404. // low1 = op low2, uint3 or
  405. // low1 = op low1, uint8
  406. if (!IS_LOWREG(dst->GetReg()) || !IS_LOWREG(src1->GetReg()))
  407. {
  408. return true;
  409. }
  410. if (src2->IsRegOpnd())
  411. {
  412. return !IS_LOWREG(src2->AsRegOpnd()->GetReg());
  413. }
  414. else
  415. {
  416. immed = src2->GetImmediateValueAsInt32(instr->m_func);
  417. return dst->IsSameReg(src1) ? !IS_CONST_UINT8(immed) : !IS_CONST_UINT3(immed);
  418. }
  419. }
  420. }
  421. InstructionType EncoderMD::CanonicalizeLea(IR::Instr * instr)
  422. {
  423. RegNum baseReg;
  424. int32 offset;
  425. IR::Opnd* src1 = instr->UnlinkSrc1();
  426. if (src1->IsSymOpnd())
  427. {
  428. // We may as well turn this LEA into the equivalent ADD instruction and let the common ADD
  429. // logic handle it.
  430. IR::SymOpnd *symOpnd = src1->AsSymOpnd();
  431. this->BaseAndOffsetFromSym(symOpnd, &baseReg, &offset, this->m_func);
  432. symOpnd->Free(this->m_func);
  433. instr->SetSrc1(IR::RegOpnd::New(nullptr, baseReg, TyMachReg, this->m_func));
  434. instr->SetSrc2(IR::IntConstOpnd::New(offset, TyMachReg, this->m_func));
  435. }
  436. else
  437. {
  438. IR::IndirOpnd *indirOpnd = src1->AsIndirOpnd();
  439. IR::RegOpnd *baseOpnd = indirOpnd->GetBaseOpnd();
  440. IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd();
  441. offset = indirOpnd->GetOffset();
  442. Assert(offset == 0 || indexOpnd == nullptr);
  443. instr->SetSrc1(baseOpnd);
  444. if (indexOpnd)
  445. {
  446. AssertMsg(indirOpnd->GetScale() == 0, "NYI Needs shifted register support for ADD");
  447. instr->SetSrc2(indexOpnd);
  448. }
  449. else
  450. {
  451. instr->SetSrc2(IR::IntConstOpnd::New(offset, TyMachReg, this->m_func));
  452. }
  453. indirOpnd->Free(this->m_func);
  454. }
  455. instr->m_opcode = Js::OpCode::ADD;
  456. return this->CanonicalizeAdd(instr);
  457. }
  458. InstructionType EncoderMD::CmpEncodeType(IR::Instr * instr)
  459. {
  460. // CMP:
  461. // - low reg, uint8
  462. // - any reg, any reg
  463. IR::Opnd *src2 = instr->GetSrc2();
  464. if (src2->IsRegOpnd())
  465. {
  466. Assert(instr->GetSrc1()->IsRegOpnd());
  467. return InstructionType::Thumb;
  468. }
  469. if (IS_LOWREG(instr->GetSrc1()->AsRegOpnd()->GetReg()) &&
  470. IS_CONST_UINT8(src2->GetImmediateValueAsInt32(instr->m_func)))
  471. {
  472. return InstructionType::Thumb;
  473. }
  474. return InstructionType::Thumb2;
  475. }
  476. InstructionType EncoderMD::CmnEncodeType(IR::Instr * instr)
  477. {
  478. // CMN:
  479. // - low reg, low reg
  480. // - any reg, uint8
  481. // - any reg, any reg
  482. IR::Opnd *src2 = instr->GetSrc2();
  483. if (src2->IsRegOpnd())
  484. {
  485. // low reg, low reg
  486. if (IS_LOWREG(instr->GetSrc1()->AsRegOpnd()->GetReg()) && IS_LOWREG(instr->GetSrc2()->AsRegOpnd()->GetReg()))
  487. {
  488. return InstructionType::Thumb;
  489. }
  490. }
  491. // any reg, uint8
  492. // any reg, any reg
  493. return InstructionType::Thumb2;
  494. }
  495. InstructionType EncoderMD::PushPopEncodeType(IR::IndirOpnd *target, IR::RegBVOpnd * opnd)
  496. {
  497. if(target->GetBaseOpnd()->GetReg() != RegSP)
  498. {
  499. return InstructionType::Thumb2;
  500. }
  501. // NOTE: because T1 encoding permits LR here, we could theoretically check for it specially,
  502. // but in practice we never push LR without R11, so it would never help. If that changes, we
  503. // should make this function smarter.
  504. BYTE lastRegEncode = (BYTE)opnd->m_value.GetPrevBit();
  505. Assert(lastRegEncode != BVInvalidIndex);
  506. return lastRegEncode > RegEncode[RegR7] ? InstructionType::Thumb2 : InstructionType::Thumb;
  507. }
  508. InstructionType EncoderMD::Alu2EncodeType(IR::Opnd *opnd1, IR::Opnd *opnd2)
  509. {
  510. // Shared by TST (checks src1 and src2) and MVN (checks dst and src1), which is why we pass
  511. // operands rather than the whole instruction.
  512. // Short encoding requires two low regs as operands.
  513. if (!opnd1->IsRegOpnd() || !IS_LOWREG(opnd1->AsRegOpnd()->GetReg()))
  514. {
  515. return InstructionType::Thumb2;
  516. }
  517. if (!opnd2->IsRegOpnd() || !IS_LOWREG(opnd2->AsRegOpnd()->GetReg()))
  518. {
  519. return InstructionType::Thumb2;
  520. }
  521. return InstructionType::Thumb;
  522. }
  523. InstructionType EncoderMD::Alu3EncodeType(IR::Instr * instr)
  524. {
  525. // Check for rm = op rm, rn
  526. IR::RegOpnd *dst = instr->GetDst()->AsRegOpnd();
  527. if (!IS_LOWREG(dst->GetReg()) ||
  528. !dst->IsSameReg(instr->GetSrc1()))
  529. {
  530. return InstructionType::Thumb2;
  531. }
  532. IR::Opnd *src2 = instr->GetSrc2();
  533. if (!src2->IsRegOpnd() || !IS_LOWREG(src2->AsRegOpnd()->GetReg()))
  534. {
  535. return InstructionType::Thumb2;
  536. }
  537. return InstructionType::Thumb;
  538. }
  539. InstructionType EncoderMD::ShiftEncodeType(IR::Instr * instr)
  540. {
  541. // 2 short forms:
  542. // rm = op rn, uint5
  543. // rm = op rm, rn
  544. IR::RegOpnd *dst = instr->GetDst()->AsRegOpnd();
  545. if (!IS_LOWREG(dst->GetReg()))
  546. {
  547. return InstructionType::Thumb2;
  548. }
  549. IR::RegOpnd *src1 = instr->GetSrc1()->AsRegOpnd();
  550. IR::Opnd *src2 = instr->GetSrc2();
  551. if (src2->IsRegOpnd())
  552. {
  553. return (IS_LOWREG(src2->AsRegOpnd()->GetReg()) && dst->IsSameReg(src1)) ? InstructionType::Thumb : InstructionType::Thumb2;
  554. }
  555. else
  556. {
  557. Assert(IS_CONST_UINT5(src2->GetImmediateValueAsInt32(instr->m_func)));
  558. return IS_LOWREG(src1->GetReg()) ? InstructionType::Thumb : InstructionType::Thumb2;
  559. }
  560. }
  561. int
  562. EncoderMD::IndirForm(int form, int *pOpnnum, RegNum baseReg, IR::Opnd *indexOpnd)
  563. {
  564. int opnnum = *pOpnnum;
  565. form |= FSRC(INDIR, opnnum++);
  566. switch (baseReg)
  567. {
  568. case RegSP:
  569. form |= FSRC(SP, opnnum++);
  570. break ;
  571. case RegPC:
  572. form |= FSRC(PC, opnnum++);
  573. break;
  574. default:
  575. form |= FSRC(REG, opnnum++);
  576. break;
  577. }
  578. if (indexOpnd == nullptr)
  579. {
  580. // UTC does this for OPBASED. Seems to be based on the assumption
  581. // that we have either an offset or an index, but not both.
  582. form |= FSRC(CONST, opnnum++); // OFFSET
  583. }
  584. else
  585. {
  586. form |= FSRC(REG, opnnum++); // INDEX
  587. }
  588. *pOpnnum = opnnum;
  589. return form;
  590. }
  591. //---------------------------------------------------------------------------
  592. //
  593. // CoGenIForms()
  594. //
  595. // parses the instruction tuple and generates the corresponding 'form' constant
  596. //
  597. //---------------------------------------------------------------------------
  598. int
  599. EncoderMD::GetForm(IR::Instr *instr, int32 size)
  600. {
  601. int form;
  602. int opnnum; //Current looping operand in the instruction
  603. int operands; //Represents if the current operand is dst or source
  604. RegNum regNum;
  605. IR::Opnd* dst;
  606. IR::Opnd* opn;
  607. IR::IndirOpnd *indirOpnd;
  608. bool sameSrcDst = false;
  609. bool T2instr = false;
  610. form = 0;
  611. T2instr = (size == 4);
  612. // Set THUMB or THUMB2 instruction, this is to figure out if the form is T2 or T1.
  613. if (T2instr)
  614. {
  615. form |= FTHUMB2;
  616. }
  617. else
  618. {
  619. sameSrcDst = true;
  620. form |= FTHUMB;
  621. }
  622. dst = instr->GetDst();
  623. if (dst == nullptr || LowererMD::IsCall(instr))
  624. {
  625. opn = instr->GetSrc1();
  626. opnnum = 1;
  627. operands = 1;
  628. if (instr->IsBranchInstr() && instr->AsBranchInstr()->GetTarget())
  629. {
  630. // Treat the label reference as the first source.
  631. form |= FSRC(LABEL, opnnum++);
  632. }
  633. }
  634. else
  635. {
  636. opn = dst;
  637. opnnum = 0;
  638. operands = 0;
  639. }
  640. bool done = false;
  641. while (opn != nullptr)
  642. {
  643. switch (opn->GetKind())
  644. {
  645. case IR::OpndKindIntConst:
  646. case IR::OpndKindFloatConst:
  647. case IR::OpndKindAddr: //UTC - CASE_DATAADDRTUPLE
  648. {
  649. form |= FSRC(CONST, opnnum++);
  650. }
  651. break;
  652. case IR::OpndKindReg:
  653. {
  654. regNum = opn->AsRegOpnd()->GetReg();
  655. switch (regNum)
  656. {
  657. case RegSP:
  658. case RegPC:
  659. if (size != 4 || instr->m_opcode == Js::OpCode::LDRRET)
  660. {
  661. if (regNum == RegSP)
  662. {
  663. form |= FSRC(SP, opnnum++);
  664. }
  665. else
  666. {
  667. form |= FSRC(PC, opnnum++);
  668. }
  669. break;
  670. }
  671. // FALL THROUGH!
  672. default:
  673. if (regNum >= RegR0 && regNum <= RegPC)
  674. {
  675. if ((regNum > RegR7) && (!T2instr))
  676. {
  677. form |= FSET(REG,28);
  678. }
  679. if (operands == 0)
  680. { // dst operands
  681. form |= FSRC(REG,opnnum++);
  682. }
  683. else
  684. { // src operands
  685. if (sameSrcDst && dst && opn->AsRegOpnd()->IsSameReg(dst))
  686. {
  687. form |= FSRC(REG,0); // same src,dst
  688. sameSrcDst = false;
  689. }
  690. else
  691. {
  692. form |= FSRC(REG, opnnum++);
  693. }
  694. }
  695. }
  696. else if (regNum >= RegR0 && regNum <= LAST_DOUBLE_REG)
  697. {
  698. form |= FSRC(DREG, opnnum++);
  699. }
  700. break;
  701. }
  702. }
  703. break;
  704. case IR::OpndKindHelperCall:
  705. {
  706. form |= FSRC(CODE, opnnum++);
  707. }
  708. break;
  709. case IR::OpndKindRegBV:
  710. {
  711. Assert(instr->m_opcode == Js::OpCode::PUSH || instr->m_opcode == Js::OpCode::POP
  712. || instr->m_opcode == Js::OpCode::VPUSH || instr->m_opcode == Js::OpCode::VPOP);
  713. BVIndex count = opn->AsRegBVOpnd()->GetValue().Count();
  714. Assert(count > 0);
  715. // Note: only the wide encoding distinguishes between single- and multiple-register push/pop.
  716. if (count == 1 && T2instr)
  717. {
  718. form |= FSRC(REG, opnnum++);
  719. }
  720. break;
  721. }
  722. case IR::OpndKindIndir:
  723. indirOpnd = opn->AsIndirOpnd();
  724. form = this->IndirForm(form, &opnnum, indirOpnd->GetBaseOpnd()->GetReg(), indirOpnd->GetIndexOpnd());
  725. break;
  726. case IR::OpndKindSym:
  727. {
  728. RegNum baseReg;
  729. int32 offset;
  730. AssertMsg(opn->AsSymOpnd()->m_sym->IsStackSym(), "Should only see stackSym syms in encoder.");
  731. form |= FSRC(INDIR, opnnum++);
  732. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &baseReg, &offset, this->m_func);
  733. if (baseReg == RegSP)
  734. {
  735. form |= FSRC(SP, opnnum++);
  736. }
  737. else
  738. {
  739. form |= FSRC(REG, opnnum++);
  740. form |= FSRC(CONST, opnnum++);
  741. }
  742. break;
  743. }
  744. case IR::OpndKindLabel:
  745. form |= FSRC(LABEL, opnnum++);
  746. break;
  747. case IR::OpndKindMemRef:
  748. // Deref of literal address
  749. AssertMsg(0, "NYI");
  750. return 0;
  751. default:
  752. AssertMsg(UNREACHED, "Unrecognized kind");
  753. return 0;
  754. }
  755. if (done)
  756. {
  757. //If we have traversed all the 3 operands exit.
  758. break;
  759. }
  760. if (LowererMD::IsCall(instr))
  761. {
  762. break;
  763. }
  764. if (opn == dst)
  765. {
  766. opn = instr->GetSrc1();
  767. if (instr->IsBranchInstr() && instr->AsBranchInstr()->GetTarget())
  768. {
  769. // Treat the label reference as the first source.
  770. form |= FSRC(LABEL, opnnum++);
  771. }
  772. }
  773. else
  774. {
  775. opn = instr->GetSrc2();
  776. done = true;
  777. }
  778. operands = 1;
  779. }
  780. return (form);
  781. }
  782. bool EncoderMD::EncodeImmediate16(int32 constant, DWORD * result)
  783. {
  784. if (constant > 0xFFFF)
  785. {
  786. return FALSE;
  787. }
  788. DWORD encode = ((constant & 0xFF) << 16) |
  789. ((constant & 0x0700) << 20) |
  790. ((constant & 0x0800) >> 1) |
  791. ((constant & 0xF000) >> 12);
  792. *result |= encode;
  793. return TRUE;
  794. }
  795. ENCODE_32
  796. EncoderMD::EncodeT2Immediate12(ENCODE_32 encode, int32 constant)
  797. {
  798. Assert((constant & 0xFFFFF000) == 0);
  799. ENCODE_32 encoded = (constant & 0x800) >> (11-10);
  800. encoded |= (constant & 0x700) << (16+12-8);
  801. encoded |= (constant & 0xFF) << 16;
  802. encode |= encoded;
  803. return encode;
  804. }
  805. ENCODE_32
  806. EncoderMD::EncodeT2Offset(ENCODE_32 encode, IR::Instr *instr, int offset, int bitOffset)
  807. {
  808. if (EncoderMD::IsShifterUpdate(instr))
  809. {
  810. Assert(IS_CONST_INT8(offset));
  811. encode |= 9 << 24;
  812. if (!EncoderMD::IsShifterSub(instr))
  813. {
  814. encode |= 1 << 25;
  815. }
  816. if (!EncoderMD::IsShifterPost(instr))
  817. {
  818. encode |= 1 << 26;
  819. }
  820. }
  821. else
  822. {
  823. if (offset >=0)
  824. {
  825. Assert(IS_CONST_UINT12(offset));
  826. encode |= 1 << 7;
  827. }
  828. else
  829. {
  830. offset = -offset;
  831. Assert(IS_CONST_UINT8(offset));
  832. encode |= 0x0C000000;
  833. }
  834. }
  835. encode |= offset << bitOffset;
  836. return encode;
  837. }
  838. //---------------------------------------------------------------------------
  839. //
  840. // GenerateEncoding()
  841. //
  842. // generates the encoding for the specified tuple/form by applying the
  843. // associated encoding steps
  844. //
  845. //---------------------------------------------------------------------------
  846. ENCODE_32
  847. EncoderMD::GenerateEncoding(IR::Instr* instr, IFORM iform, BYTE *pc, int32 size, InstructionType instrType)
  848. {
  849. ENCODE_32 encode = 0 ;
  850. DWORD encoded = 0;
  851. IR::Opnd* opn = 0;
  852. IR::Opnd* dst = 0;
  853. IR::Opnd* reg = 0; //tupReg;
  854. IR::IndirOpnd *indirOpnd;
  855. Js::OpCode opcode = instr->m_opcode;
  856. const AssemblyStep *AsmSteps = nullptr;
  857. const FormTable *ftp = nullptr;
  858. int bitOffset;
  859. int offset;
  860. bool fUpdate;
  861. bool fSub;
  862. bool fPost;
  863. int done = false;
  864. int32 constant = 0; //UTC IVALTYPE
  865. bool constantValid = false;
  866. RegNum regNum;
  867. unsigned int iType = 0, SFlag = 0;
  868. dst = instr->GetDst();
  869. if(opcode == Js::OpCode::MLS)
  870. {
  871. Assert(instr->m_prev->GetDst()->IsRegOpnd() && (instr->m_prev->GetDst()->AsRegOpnd()->GetReg() == RegR12));
  872. }
  873. if (dst == nullptr || LowererMD::IsCall(instr))
  874. {
  875. opn = instr->GetSrc1();
  876. reg = opn;
  877. }
  878. else if (opcode == Js::OpCode::POP || opcode == Js::OpCode::VPOP)
  879. {
  880. opn = instr->GetSrc1();
  881. reg = dst;
  882. }
  883. else
  884. {
  885. opn = dst;
  886. reg = opn;
  887. }
  888. for (ftp = InstrEncode[opcode - (Js::OpCode::MDStart + 1)]; !done && ftp->form != FORM_NOMORE; ftp++)
  889. {
  890. if (ftp->form != iform)
  891. {
  892. if (!((iform & (1<<28)) == 0 && THUMB2_THUMB1_FORM(ftp->form, iform)))
  893. {
  894. continue;
  895. }
  896. }
  897. AsmSteps = ftp->steps;
  898. done = false;
  899. constantValid=0;
  900. while (!done)
  901. {
  902. switch (*AsmSteps++)
  903. {
  904. case STEP_NEXTOPN:
  905. // Get Next operand
  906. if (opn == dst)
  907. {
  908. opn = instr->GetSrc1();
  909. }
  910. else
  911. {
  912. Assert(opn == instr->GetSrc1());
  913. opn = instr->GetSrc2();
  914. }
  915. reg = opn;
  916. continue;
  917. case STEP_CONSTANT:
  918. Assert(opn->IsImmediateOpnd());
  919. constant = opn->GetImmediateValueAsInt32(instr->m_func);
  920. constantValid = true;
  921. continue;
  922. case STEP_CALL:
  923. continue;
  924. case STEP_T2_BRANCH24:
  925. // Constant encoded with 24bits
  926. EncodeReloc::New(&m_relocList, RelocTypeBranch24, m_pc, instr->AsBranchInstr()->GetTarget(), m_encoder->m_tempAlloc);
  927. continue;
  928. case STEP_T2_BRANCH20:
  929. // Constant encoded with 20bits.
  930. EncodeReloc::New(&m_relocList, RelocTypeBranch20, m_pc, instr->AsBranchInstr()->GetTarget(), m_encoder->m_tempAlloc);
  931. continue;
  932. case STEP_REG:
  933. Assert(reg != nullptr);
  934. Assert(reg->IsRegOpnd());
  935. bitOffset = *AsmSteps++;
  936. regNum = (RegNum)this->GetRegEncode(reg->AsRegOpnd());
  937. encode |= regNum << bitOffset;
  938. continue;
  939. case STEP_HREG:
  940. Assert(reg != nullptr);
  941. Assert(reg->IsRegOpnd());
  942. bitOffset = *AsmSteps++;
  943. regNum = (RegNum)this->GetRegEncode(reg->AsRegOpnd());
  944. encode |= (regNum & 0x7) << bitOffset;
  945. continue;
  946. case STEP_R12:
  947. bitOffset = *AsmSteps++;
  948. regNum = (RegNum)this->GetRegEncode(RegR12);
  949. encode |= regNum << bitOffset;
  950. continue;
  951. case STEP_HBIT:
  952. Assert(reg != nullptr);
  953. Assert(reg->IsRegOpnd());
  954. regNum = (RegNum)this->GetRegEncode(reg->AsRegOpnd());
  955. if (regNum >= MAX_INT_REGISTERS_LOW)
  956. {
  957. bitOffset = *AsmSteps;
  958. encode |= 1 << bitOffset;
  959. }
  960. AsmSteps++;
  961. continue;
  962. case STEP_OPEQ:
  963. Assert(instr->GetDst()->AsRegOpnd()->IsSameReg(instr->GetSrc1()));
  964. continue;
  965. case STEP_DUMMY_REG:
  966. Assert(opn->AsRegOpnd()->GetReg() == RegSP ||
  967. opn->AsRegOpnd()->GetReg() == RegPC);
  968. continue;
  969. case STEP_OPCODE:
  970. //ASSERTTNR(!(instr & ftp->inst), tupInstr);
  971. encode |= ftp->inst;
  972. continue;
  973. case STEP_FIXUP:
  974. /*
  975. if (TU_ISINDIR(tupOpn)) {
  976. if (fApplyFixup)
  977. CoApplyFixup(tupOpn, dataBuf);
  978. }*/
  979. continue;
  980. case STEP_LDR:
  981. Assert(!constantValid);
  982. switch (opn->GetType())
  983. {
  984. case TyInt8:
  985. constant = 0x5600;
  986. case TyInt16:
  987. constant = 0x5e00;
  988. break;
  989. case TyInt32:
  990. case TyUint32:
  991. case TyVar:
  992. constant = 0x5800;
  993. break;
  994. case TyUint8:
  995. constant = 0x5c00;
  996. break;
  997. case TyUint16:
  998. constant = 0x5a00;
  999. break;
  1000. }
  1001. encode |= constant;
  1002. continue;
  1003. case STEP_LDRI:
  1004. Assert(!constantValid);
  1005. switch (opn->GetType())
  1006. {
  1007. case TyInt8:
  1008. case TyInt16:
  1009. constant = 0;
  1010. break;
  1011. case TyInt32:
  1012. case TyUint32:
  1013. case TyVar:
  1014. constant = 0x6800;
  1015. break;
  1016. case TyUint8:
  1017. constant = 0x7800;
  1018. break;
  1019. case TyUint16:
  1020. constant = 0x8800;
  1021. break;
  1022. }
  1023. encode |= constant;
  1024. continue;
  1025. case STEP_STRI:
  1026. Assert(!constantValid);
  1027. switch (opn->GetType())
  1028. {
  1029. case TyInt8:
  1030. case TyUint8:
  1031. constant = 0x7000;
  1032. break;
  1033. case TyInt16:
  1034. case TyUint16:
  1035. constant = 0x8000;
  1036. break;
  1037. case TyInt32:
  1038. case TyUint32:
  1039. case TyVar:
  1040. constant = 0x6000;
  1041. break;
  1042. }
  1043. encode |= constant;
  1044. continue;
  1045. case STEP_STR:
  1046. Assert(!constantValid);
  1047. switch (opn->GetType())
  1048. {
  1049. case TyInt8:
  1050. case TyUint8:
  1051. constant = 0x5400;
  1052. break;
  1053. case TyInt16:
  1054. case TyUint16:
  1055. constant = 0x5200;
  1056. break;
  1057. case TyInt32:
  1058. case TyUint32:
  1059. case TyVar:
  1060. constant = 0x5000;
  1061. break;
  1062. }
  1063. encode |= constant;
  1064. continue;
  1065. case STEP_IMM:
  1066. bitOffset = *AsmSteps++;
  1067. if (opn->IsIndirOpnd())
  1068. {
  1069. offset = opn->AsIndirOpnd()->GetOffset();
  1070. }
  1071. else
  1072. {
  1073. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &regNum, &offset, this->m_func);
  1074. }
  1075. switch (opn->GetSize())
  1076. {
  1077. case 1:
  1078. break;
  1079. case 2:
  1080. Assert(!(offset & 0x1));
  1081. offset = offset >> 1;
  1082. break;
  1083. case 4:
  1084. Assert(!(offset & 0x3)); //check for word-align
  1085. offset = offset >> 2;
  1086. break;
  1087. default:
  1088. Assert(UNREACHED);
  1089. offset = 0;
  1090. }
  1091. Assert(IS_CONST_UINT5(offset));
  1092. encode |= offset << bitOffset;
  1093. continue;
  1094. case STEP_UIMM3:
  1095. bitOffset = *AsmSteps++;
  1096. Assert(constantValid);
  1097. Assert(IS_CONST_UINT3(constant));
  1098. encode |= constant << bitOffset;
  1099. continue;
  1100. case STEP_IMM_W7:
  1101. Assert(constantValid);
  1102. Assert(!(constant & 0x3)); // check for word-alignment
  1103. constant = constant >> 2; // remove rightmost two zero bits
  1104. Assert(IS_CONST_UINT7(constant));
  1105. encode |= constant;
  1106. constantValid = false;
  1107. continue;
  1108. case STEP_IMM_DPW8:
  1109. Assert(constantValid);
  1110. Assert(IS_CONST_UINT8(constant >> 2));
  1111. Assert(constant % 4 == 0);
  1112. encode |= constant >> 2;
  1113. constantValid = false;
  1114. continue;
  1115. case STEP_IMM_W8:
  1116. if (opn->IsSymOpnd())
  1117. {
  1118. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &regNum, &offset, this->m_func);
  1119. }
  1120. else
  1121. {
  1122. offset = opn->AsIndirOpnd()->GetOffset();
  1123. }
  1124. Assert(offset % 4 == 0);
  1125. Assert(IS_CONST_UINT8(offset >> 2));
  1126. encode |= offset >> 2;
  1127. continue;
  1128. case STEP_T2_IMM_16:
  1129. if (!EncodeImmediate16(constant, &encoded))
  1130. {
  1131. AssertMsg(false,"constant > than 16 bits");
  1132. }
  1133. encode |= encoded;
  1134. continue;
  1135. case STEP_T2_IMM_12:
  1136. encode = this->EncodeT2Immediate12(encode, constant);
  1137. continue;
  1138. case STEP_OFFSET:
  1139. {
  1140. unsigned int R_bit = 0;
  1141. if (ISSTORE(instr->m_opcode))
  1142. {
  1143. if (TESTREGBIT(constant, RegLR))
  1144. {
  1145. R_bit = 1 << 8;
  1146. }
  1147. CLEARREGBIT(constant, RegLR);
  1148. }
  1149. else
  1150. {
  1151. if (TESTREGBIT(constant, RegPC))
  1152. {
  1153. R_bit = 1 << 8;
  1154. }
  1155. CLEARREGBIT(constant, RegPC);
  1156. }
  1157. Assert(IS_CONST_UINT8(constant));
  1158. encode |= (CO_UIMMED8(constant) | R_bit);
  1159. constantValid=false;
  1160. continue;
  1161. }
  1162. case STEP_SCALE_CONST:
  1163. {
  1164. bitOffset = *AsmSteps++;
  1165. byte scale = opn->AsIndirOpnd()->GetScale();
  1166. Assert(IS_CONST_UINT5(scale));
  1167. encode |= scale << bitOffset;
  1168. continue;
  1169. }
  1170. case STEP_SHIFTER_CONST:
  1171. bitOffset = *AsmSteps++;
  1172. // TODO: When we have IR that can send Shifts we will
  1173. // need to translate the following:
  1174. // As of now instructions do not have a mechanism to
  1175. // provide the shift offset.
  1176. //ASSERTNR(IS_CONST_UINT5(TU_SHIFTER_SHIFT(tupOpn)));
  1177. //instr |= TU_SHIFTER_SHIFT(tupOpn) << to;
  1178. continue;
  1179. case STEP_BASEREG:
  1180. bitOffset = *AsmSteps++;
  1181. if (opn->IsIndirOpnd())
  1182. {
  1183. regNum = opn->AsIndirOpnd()->GetBaseOpnd()->GetReg();
  1184. }
  1185. else
  1186. {
  1187. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &regNum, &offset, this->m_func);
  1188. }
  1189. encode |= RegEncode[regNum] << bitOffset;
  1190. continue;
  1191. case STEP_INDEXED:
  1192. Assert(opn->IsIndirOpnd());
  1193. Assert(opn->AsIndirOpnd()->GetIndexOpnd() != nullptr);
  1194. Assert(opn->AsIndirOpnd()->GetOffset() == 0);
  1195. continue;
  1196. case STEP_INDEXREG:
  1197. bitOffset = *AsmSteps++;
  1198. reg = opn->AsIndirOpnd()->GetIndexOpnd();
  1199. Assert(reg != nullptr);
  1200. Assert(reg->IsRegOpnd());
  1201. regNum = (RegNum)this->GetRegEncode(reg->AsRegOpnd());
  1202. encode |= regNum << bitOffset;
  1203. continue;
  1204. case STEP_INDIR:
  1205. Assert(opn->IsIndirOpnd() ||
  1206. (opn->IsSymOpnd() && opn->AsSymOpnd()->m_sym->IsStackSym()));
  1207. continue;
  1208. case STEP_BASED:
  1209. Assert((opn->IsIndirOpnd() && opn->AsIndirOpnd()->GetIndexOpnd() == nullptr) ||
  1210. (opn->IsSymOpnd() && opn->AsSymOpnd()->m_sym->IsStackSym()));
  1211. continue;
  1212. case STEP_T2_REGLIST:
  1213. //ASSERTTNR(constant_valid, tupOpn);
  1214. encode |= constant << 16;
  1215. constantValid = false;
  1216. if (EncoderMD::IsShifterUpdate(instr))
  1217. {
  1218. encode |= 0x20;
  1219. }
  1220. continue;
  1221. case STEP_UIMM5:
  1222. bitOffset = *AsmSteps++;
  1223. Assert(constantValid);
  1224. Assert(IS_CONST_UINT5(constant));
  1225. encode |= constant << bitOffset;
  1226. constantValid = false;
  1227. continue;
  1228. case STEP_UIMM8:
  1229. Assert(constantValid);
  1230. Assert(IS_CONST_UINT8(constant));
  1231. encode |= constant;
  1232. constantValid = false;
  1233. continue;
  1234. case STEP_REGLIST:
  1235. {
  1236. indirOpnd = opn->AsIndirOpnd();
  1237. Assert(indirOpnd->GetIndexOpnd() == nullptr);
  1238. constant = indirOpnd->GetOffset();
  1239. IR::Opnd *opndRD;
  1240. if (EncoderMD::IsLoad(instr))
  1241. {
  1242. opndRD = instr->GetDst();
  1243. }
  1244. else
  1245. {
  1246. opndRD = instr->GetSrc1();
  1247. }
  1248. if (!constant)
  1249. {
  1250. BVUnit32 registers = opndRD->AsRegBVOpnd()->GetValue();
  1251. uint32 regenc;
  1252. BVIndex index = registers.GetNextBit();
  1253. // Note: only the wide encoding distinguishes between
  1254. // single- and multiple-register push/pop.
  1255. if (registers.Count() > 1 || size == 2)
  1256. {
  1257. // Add the physical register number
  1258. do
  1259. {
  1260. regenc = 1 << index;
  1261. constant |= regenc;
  1262. }while ((index = registers.GetNextBit(index + 1))!= BVInvalidIndex);
  1263. }
  1264. else
  1265. {
  1266. bitOffset = *AsmSteps++;
  1267. Assert(index < RegEncode[RegSP]);
  1268. encode |= index << bitOffset;
  1269. continue;
  1270. }
  1271. }
  1272. if (size == 4)
  1273. {
  1274. fSub = EncoderMD::IsShifterSub(instr);
  1275. fUpdate = EncoderMD::IsShifterUpdate(instr);
  1276. encode |= fSub << 8;
  1277. encode |= !fSub << 7;
  1278. encode |= fUpdate << 5;
  1279. }
  1280. constantValid=true;
  1281. }
  1282. continue;
  1283. case STEP_T1_SETS_CR0:
  1284. {
  1285. //ASSERTTNR(Tuple::FindReg(TU_DST(tupInstr), RG_SYM(CR0)) != nullptr, tupInstr);
  1286. }
  1287. continue;
  1288. case STEP_SBIT:
  1289. {
  1290. if (this->SetsSBit(instr))
  1291. {
  1292. bitOffset = *AsmSteps;
  1293. encode |= 1 << bitOffset;
  1294. }
  1295. AsmSteps++;
  1296. }
  1297. continue;
  1298. case STEP_NOSBIT:
  1299. // just asserts that we're not supposed to set the condition flags
  1300. //
  1301. Assert(!this->SetsSBit(instr));
  1302. continue;
  1303. case STEP_MODCONST_12:
  1304. if (!EncodeModConst12(constant, &encoded))
  1305. {
  1306. Assert(UNREACHED);
  1307. }
  1308. encode |= encoded;
  1309. continue;
  1310. case STEP_T2_MEMIMM_POS12_NEG8:
  1311. bitOffset = *AsmSteps++;
  1312. Assert(opn != nullptr);
  1313. if (opn->IsIndirOpnd())
  1314. {
  1315. Assert(opn->AsIndirOpnd()->GetIndexOpnd() == nullptr);
  1316. offset = opn->AsIndirOpnd()->GetOffset();
  1317. // TODO: Handle literal pool loads, if necessary
  1318. // <tfs #775202>: LDR_W could have $Label Fixup for literal-pool
  1319. //if (TU_FEFIXUPSYM(tupOpn) && SS_ISLABEL(TU_FEFIXUPSYM(tupOpn))) {
  1320. // offset += (SS_OFFSET(TU_FEFIXUPSYM(tupOpn)) - ((pc & (~3)) + 4));
  1321. //}
  1322. }
  1323. else if (opn->IsSymOpnd())
  1324. {
  1325. Assert(opn->AsSymOpnd()->m_sym->IsStackSym());
  1326. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &regNum, &offset, this->m_func);
  1327. }
  1328. else
  1329. {
  1330. Assert(opn->IsImmediateOpnd());
  1331. offset = opn->GetImmediateValueAsInt32(instr->m_func);
  1332. }
  1333. encode = this->EncodeT2Offset(encode, instr, offset, bitOffset);
  1334. continue;
  1335. case STEP_T2_IMMSTACK_POS12_NEG8:
  1336. bitOffset = *AsmSteps++;
  1337. Assert(opn != nullptr);
  1338. Assert(opn->IsSymOpnd() && opn->AsSymOpnd()->m_sym->IsStackSym());
  1339. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &regNum, &offset, this->m_func);
  1340. encode = this->EncodeT2Offset(encode, instr, offset, bitOffset);
  1341. encode |= RegEncode[regNum];
  1342. continue;
  1343. case STEP_T2_STACKSYM_IMM_12:
  1344. // Used by LEA. Encode base reg at the given bit offset and 12-bit constant
  1345. // as a normal ADDW immediate.
  1346. bitOffset = *AsmSteps++;
  1347. Assert(opn != nullptr);
  1348. Assert(opn->IsSymOpnd() && opn->AsSymOpnd()->m_sym->IsStackSym());
  1349. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &regNum, &offset, this->m_func);
  1350. encode |= RegEncode[regNum] << bitOffset;
  1351. encode = this->EncodeT2Immediate12(encode, offset);
  1352. continue;
  1353. case STEP_T2_MEM_TYPE:
  1354. {
  1355. Assert((ftp->inst & 0xFF00) == 0xF800);
  1356. switch (opn->GetType())
  1357. {
  1358. case TyInt8:
  1359. SFlag = 1;
  1360. iType = 0;
  1361. break;
  1362. case TyUint8:
  1363. SFlag = 0;
  1364. iType = 0;
  1365. break;
  1366. case TyInt16:
  1367. SFlag = 1;
  1368. iType = 1;
  1369. break;
  1370. case TyUint16:
  1371. iType = 1;
  1372. SFlag = 0;
  1373. break;
  1374. case TyInt32:
  1375. case TyUint32:
  1376. case TyVar:
  1377. SFlag = 0;
  1378. iType = 2;
  1379. break;
  1380. default:
  1381. Assert(UNREACHED);
  1382. }
  1383. if (!EncoderMD::IsLoad(instr))
  1384. {
  1385. SFlag = 0;
  1386. }
  1387. encode |= (SFlag << 8) | (iType << 5);
  1388. continue;
  1389. }
  1390. case STEP_T2_SHIFT_IMM_5:
  1391. #if DBG
  1392. if(instr->m_opcode == Js::OpCode::ASR ||
  1393. instr->m_opcode == Js::OpCode::ASRS ||
  1394. instr->m_opcode == Js::OpCode::LSR)
  1395. {
  1396. // Encoding zero is interpreted as 32
  1397. // for these instructions.
  1398. Assert(constant != 0);
  1399. }
  1400. #endif
  1401. Assert(IS_CONST_UINT5(constant));
  1402. encoded = (constant & 0x03) << (16+6);
  1403. encoded |= (constant & 0x1c) << (16+12-2);
  1404. encode |= encoded;
  1405. continue;
  1406. case STEP_MOVW_reloc:
  1407. Assert(opn && opn->IsLabelOpnd());
  1408. if (opn->AsLabelOpnd()->GetLabel()->m_isDataLabel)
  1409. {
  1410. Assert(!opn->AsLabelOpnd()->GetLabel()->isInlineeEntryInstr);
  1411. EncodeReloc::New(&m_relocList, RelocTypeDataLabelLow, m_pc, opn->AsLabelOpnd()->GetLabel(), m_encoder->m_tempAlloc);
  1412. }
  1413. else
  1414. {
  1415. EncodeReloc::New(&m_relocList, RelocTypeLabelLow, m_pc, opn->AsLabelOpnd()->GetLabel(), m_encoder->m_tempAlloc);
  1416. }
  1417. continue;
  1418. case STEP_MOVT_reloc:
  1419. Assert(opn && opn->IsLabelOpnd());
  1420. EncodeReloc::New(&m_relocList, RelocTypeLabelHigh, m_pc, opn->AsLabelOpnd()->GetLabel(), m_encoder->m_tempAlloc);
  1421. continue;
  1422. case STEP_DREG:
  1423. {
  1424. int bbit = 0;
  1425. DWORD tmp = 0;
  1426. Assert(opn != nullptr && opn->IsRegOpnd());
  1427. bitOffset = *AsmSteps++;
  1428. bbit = *AsmSteps++;
  1429. regNum = (RegNum)GetRegEncode(opn->AsRegOpnd());
  1430. //Check to see if register number is valid
  1431. Assert(regNum >= 0 && regNum <= LAST_DOUBLE_REG_NUM);
  1432. tmp |= (regNum & 0xf) << bitOffset;
  1433. tmp |= ((regNum >> 4) & 0x1) << bbit;
  1434. Assert(0 == (encode & tmp));
  1435. encode |= tmp;
  1436. continue;
  1437. }
  1438. case STEP_SREG:
  1439. {
  1440. int bbit = 0;
  1441. DWORD tmp = 0;
  1442. Assert(opn != nullptr && opn->IsRegOpnd());
  1443. bitOffset = *AsmSteps++;
  1444. bbit = *AsmSteps++;
  1445. regNum = (RegNum)GetFloatRegEncode(opn->AsRegOpnd());
  1446. //Check to see if register number is valid
  1447. Assert(regNum >= 0 && regNum <= LAST_FLOAT_REG_NUM);
  1448. tmp |= (regNum & 0x1) << bbit;
  1449. tmp |= (regNum >> 1) << bitOffset;
  1450. Assert(0 == (encode & tmp));
  1451. encode |= tmp;
  1452. continue;
  1453. }
  1454. case STEP_IMM_S8:
  1455. {
  1456. Assert(opn!=nullptr);
  1457. AssertMsg(instrType == InstructionType::Vfp, "This step is specific to VFP instructions");
  1458. if (opn->IsIndirOpnd())
  1459. {
  1460. Assert(opn->AsIndirOpnd()->GetIndexOpnd() == nullptr);
  1461. offset = opn->AsIndirOpnd()->GetOffset();
  1462. // TODO: Handle literal pool loads, if necessary
  1463. // <tfs #775202>: LDR_W could have $Label Fixup for literal-pool
  1464. //if (TU_FEFIXUPSYM(tupOpn) && SS_ISLABEL(TU_FEFIXUPSYM(tupOpn))) {
  1465. // offset += (SS_OFFSET(TU_FEFIXUPSYM(tupOpn)) - ((pc & (~3)) + 4));
  1466. //}
  1467. }
  1468. else if (opn->IsSymOpnd())
  1469. {
  1470. Assert(opn->AsSymOpnd()->m_sym->IsStackSym());
  1471. this->BaseAndOffsetFromSym(opn->AsSymOpnd(), &regNum, &offset, this->m_func);
  1472. }
  1473. else
  1474. {
  1475. offset = 0;
  1476. AssertMsg(false, "Why are we here");
  1477. }
  1478. if (offset < 0)
  1479. {
  1480. //IsShifterSub(tupOpn) = TRUE; //Doesn't seem necessary for us, why does UTC set this?
  1481. offset = -offset;
  1482. encode &= ~(1 << 7);
  1483. }
  1484. else
  1485. {
  1486. encode |= (1 << 7);
  1487. }
  1488. // Set the W (writeback) bit if IsShifterUpdate is
  1489. // specified.
  1490. if (EncoderMD::IsShifterUpdate(instr))
  1491. {
  1492. encode |= (1 << 5);
  1493. // Set the P (pre-indexed) bit to be the complement
  1494. // of IsShifterPost
  1495. if (EncoderMD::IsShifterPost(instr))
  1496. {
  1497. encode &= ~(1 << 8);
  1498. }
  1499. else
  1500. {
  1501. encode |= (1 << 8);
  1502. }
  1503. }
  1504. else
  1505. {
  1506. // Clear the W bit and set the P bit (offset
  1507. // addressing).
  1508. encode &= ~(1 << 5);
  1509. encode |= (1 << 8);
  1510. }
  1511. Assert(IS_CONST_UINT8(offset >> 2));
  1512. encode |= ((offset >> 2) << 16);
  1513. continue;
  1514. }
  1515. case STEP_DREGLIST:
  1516. {
  1517. IR::Opnd *opndRD;
  1518. if (EncoderMD::IsLoad(instr))
  1519. {
  1520. opndRD = instr->GetDst();
  1521. }
  1522. else
  1523. {
  1524. opndRD = instr->GetSrc1();
  1525. }
  1526. BVUnit32 registers = opndRD->AsRegBVOpnd()->GetValue();
  1527. DWORD first = registers.GetNextBit();
  1528. DWORD last = (DWORD)-1;
  1529. _BitScanReverse((DWORD*)&last, (DWORD)registers.GetWord());
  1530. Assert(last >= first && last <= LAST_DOUBLE_CALLEE_SAVED_REG_NUM);
  1531. encode |= (CO_UIMMED8((last - first + 1) * 2)) << 16;
  1532. encode |= (first << 28);
  1533. }
  1534. continue;
  1535. case STEP_AM5:
  1536. Assert(opn->IsIndirOpnd());
  1537. Assert(opn->AsIndirOpnd()->GetIndexOpnd() == nullptr);
  1538. Assert(opn->AsIndirOpnd()->GetOffset() == 0);
  1539. fPost = EncoderMD::IsShifterPost(instr);
  1540. fSub = EncoderMD::IsShifterSub(instr);
  1541. fUpdate = EncoderMD::IsShifterUpdate(instr);
  1542. // update addressing mode
  1543. encode |= !fPost << 8;
  1544. encode |= !fSub << 7;
  1545. encode |= fUpdate << 5;
  1546. continue;
  1547. case STEP_DONE:
  1548. done = true;
  1549. break;
  1550. default:
  1551. #if DBG
  1552. instr->Dump();
  1553. AssertMsg(UNREACHED, "Unrecognized assembly step");
  1554. #endif
  1555. return 0;
  1556. }
  1557. break;
  1558. }
  1559. }
  1560. #if DBG
  1561. if (!done)
  1562. {
  1563. instr->Dump();
  1564. Output::Flush();
  1565. AssertMsg(UNREACHED, "Unsupported Instruction Form");
  1566. }
  1567. #endif
  1568. return encode;
  1569. }
  1570. #ifdef INSERT_NOPS
  1571. ptrdiff_t insertNops(BYTE *pc, ENCODE_32 outInstr, uint count, uint size)
  1572. {
  1573. //Insert count nops in the beginning
  1574. for(int i = 0; i < count;i++)
  1575. {
  1576. *(ENCODE_32 *)(pc + i * sizeof(ENCODE_32)) = 0x8000F3AF;
  1577. }
  1578. if (size == sizeof(ENCODE_16))
  1579. {
  1580. *(ENCODE_16 *)(pc + count * sizeof(ENCODE_32)) = (ENCODE_16)(outInstr & 0x0000ffff);
  1581. *(ENCODE_16 *)(pc + sizeof(ENCODE_16) + count * sizeof(ENCODE_32)) = (ENCODE_16)(0xBF00);
  1582. }
  1583. else
  1584. {
  1585. Assert(size == sizeof(ENCODE_32));
  1586. *(ENCODE_32 *)(pc + count * sizeof(ENCODE_32)) = outInstr;
  1587. }
  1588. //Insert count nops at the end;
  1589. for(int i = count + 1; i < (2 *count + 1); i++)
  1590. {
  1591. *(ENCODE_32 *)(pc + i * sizeof(ENCODE_32)) = 0x8000F3AF;
  1592. }
  1593. return MachInt*(2*count + 1);
  1594. }
  1595. #endif //INSERT_NOPS
  1596. #ifdef SOFTWARE_FIXFOR_HARDWARE_BUGWIN8_502326
  1597. bool
  1598. EncoderMD::IsBuggyHardware()
  1599. {
  1600. return true;
  1601. // TODO: Enable this for restricting to Qualcomm Krait cores affected: KR28M2A10, KR28M2A11, KR28M2A12
  1602. /*
  1603. AssertMsg(AutoSystemInfo::Data.wProcessorArchitecture == 5, "This has to be ARM architecture");
  1604. if (((AutoSystemInfo::Data.wProcessorLevel & 0xFC0) == 0x40) && ((AutoSystemInfo::Data.wProcessorRevision & 0xF0) == 0))
  1605. {
  1606. #if DBG_DUMP
  1607. if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::EncoderPhase))
  1608. {
  1609. Output::Print(_u("TRACE: Running in buggy hardware.\n"));
  1610. }
  1611. #endif
  1612. return true;
  1613. }
  1614. return false;
  1615. */
  1616. }
  1617. bool
  1618. EncoderMD::CheckBranchInstrCriteria(IR::Instr* instr)
  1619. {
  1620. if (ISQBUGGYBR(instr->m_opcode))
  1621. {
  1622. return true;
  1623. }
  1624. #if DBG
  1625. switch (instr->m_opcode)
  1626. {
  1627. case Js::OpCode::RET: //This is never Thumb2 hence we are safe in this hardware bug
  1628. return false;
  1629. case Js::OpCode::BL:
  1630. AssertMsg(false, "We don't generate these now. Include in the opcode list above for BL T1 encodings");
  1631. return false;
  1632. case Js::OpCode::BLX:
  1633. AssertMsg(instr->GetSrc1()->IsRegOpnd(),"If we generate label include in the opcode list above");
  1634. //Fallthrough
  1635. default:
  1636. {
  1637. //Assert to make sure none of the other instructions have target as PC register.
  1638. if (instr->GetDst() && instr->GetDst()->IsRegOpnd())
  1639. {
  1640. AssertMsg(instr->GetDst()->AsRegOpnd()->GetReg() != RegPC, "Check for this opcode above");
  1641. }
  1642. }
  1643. }
  1644. #endif
  1645. return false;
  1646. }
  1647. #endif //SOFTWARE_FIXFOR_HARDWARE_BUGWIN8_502326
  1648. ///----------------------------------------------------------------------------
  1649. ///
  1650. /// EncoderMD::Encode
  1651. ///
  1652. /// Emit the ARM encoding for the given instruction in the passed in
  1653. /// buffer ptr.
  1654. ///
  1655. ///----------------------------------------------------------------------------
  1656. ptrdiff_t
  1657. EncoderMD::Encode(IR::Instr *instr, BYTE *pc, BYTE* beginCodeAddress)
  1658. {
  1659. m_pc = pc;
  1660. ENCODE_32 outInstr;
  1661. IFORM iform;
  1662. int size = 0;
  1663. // Instructions must be lowered, we don't handle non-MD opcodes here.
  1664. Assert(instr != nullptr);
  1665. if (instr->IsLowered() == false)
  1666. {
  1667. if (instr->IsLabelInstr())
  1668. {
  1669. if (instr->isInlineeEntryInstr)
  1670. {
  1671. intptr_t inlineeCallInfo = 0;
  1672. const bool encodeResult = Js::InlineeCallInfo::Encode(inlineeCallInfo, instr->AsLabelInstr()->GetOffset(), m_pc - m_encoder->m_encodeBuffer);
  1673. Assert(encodeResult);
  1674. //We are re-using offset to save the inlineeCallInfo which will be patched in ApplyRelocs
  1675. //This is a cleaner way to patch MOVW\MOVT pair with the right inlineeCallInfo
  1676. instr->AsLabelInstr()->ResetOffset((uint32)inlineeCallInfo);
  1677. }
  1678. else
  1679. {
  1680. instr->AsLabelInstr()->SetPC(m_pc);
  1681. if (instr->AsLabelInstr()->m_id == m_func->m_unwindInfo.GetPrologStartLabel())
  1682. {
  1683. m_func->m_unwindInfo.SetPrologOffset(m_pc - m_encoder->m_encodeBuffer);
  1684. }
  1685. else if (instr->AsLabelInstr()->m_id == m_func->m_unwindInfo.GetEpilogEndLabel())
  1686. {
  1687. // This is the last instruction in the epilog. Any instructions that follow
  1688. // are separated code, so the unwind info will have to represent them as a function
  1689. // fragment. (If there's no separated code, then this offset will equal the total
  1690. // code size.)
  1691. m_func->m_unwindInfo.SetEpilogEndOffset(m_pc - m_encoder->m_encodeBuffer - m_func->m_unwindInfo.GetPrologOffset());
  1692. }
  1693. }
  1694. }
  1695. #if ENABLE_DEBUG_CONFIG_OPTIONS
  1696. if (instr->IsEntryInstr() && (
  1697. Js::Configuration::Global.flags.DebugBreak.Contains(m_func->GetFunctionNumber()) ||
  1698. PHASE_ON(Js::DebugBreakPhase, m_func)
  1699. ))
  1700. {
  1701. IR::Instr *int3 = IR::Instr::New(Js::OpCode::DEBUGBREAK, m_func);
  1702. return this->Encode(int3, m_pc);
  1703. }
  1704. #endif
  1705. return 0;
  1706. }
  1707. #ifdef SOFTWARE_FIXFOR_HARDWARE_BUGWIN8_502326
  1708. if (IsBuggyHardware())
  1709. {
  1710. // Hardware bug is in Qualcomm 8960. 32 bit thumb branch instructions might not jump to the correct address in following
  1711. // conditions:
  1712. // a.3 T16 thumb instruction followed by T32 branch instruction (conditional\unconditional & RegPc load).
  1713. // b.Branch instruction starts at 0x*****FBE
  1714. // As we don't know the final address, instead of checking for 0x*FBE we just check for offset 0x*E
  1715. // as the final function start address is always aligned at 16 byte boundary (EMIT_BUFFER_ALIGNMENT)
  1716. if (consecutiveThumbInstrCount >= 3 && (((uint)(m_pc - beginCodeAddress) & 0xF) == 0xE) && CheckBranchInstrCriteria(instr))
  1717. {
  1718. Assert(beginCodeAddress);
  1719. IR::Instr *nop = IR::Instr::New(Js::OpCode::VMOV,
  1720. IR::RegOpnd::New(nullptr, RegD15, TyMachDouble, this->m_func),
  1721. IR::RegOpnd::New(nullptr, RegD15, TyMachDouble, this->m_func),
  1722. m_func);
  1723. size = this->Encode(nop, m_pc);
  1724. consecutiveThumbInstrCount = 0;
  1725. size+= this->Encode(instr, m_pc + size);
  1726. #if DBG_DUMP
  1727. if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::EncoderPhase))
  1728. {
  1729. Output::Print(_u("TRACE: Avoiding Branch instruction and Dummy nops at 0x*E \n"));
  1730. }
  1731. #endif
  1732. Assert(size == 8);
  1733. // We are okay with returning size 8 as the previous 3 thumb instructions any way would have saved 6 bytes
  1734. // and doesn't alter the logic of allocating temp buffer based on MachMaxInstrSize
  1735. return size;
  1736. }
  1737. }
  1738. #endif
  1739. InstructionType instrType = this->CanonicalizeInstr(instr);
  1740. switch(instrType)
  1741. {
  1742. case Thumb:
  1743. size = 2;
  1744. consecutiveThumbInstrCount++;
  1745. break;
  1746. case Thumb2:
  1747. size = 4;
  1748. consecutiveThumbInstrCount = 0;
  1749. break;
  1750. case Vfp:
  1751. size = 4;
  1752. consecutiveThumbInstrCount = 0;
  1753. break;
  1754. default: Assert(false);
  1755. }
  1756. AssertMsg(size != MachChar, "Thumb2 is never single Byte");
  1757. iform = (IFORM)GetForm(instr, size);
  1758. outInstr = GenerateEncoding(instr, iform, m_pc, size, instrType);
  1759. if (outInstr == 0)
  1760. {
  1761. return 0;
  1762. }
  1763. // TODO: Check if VFP/Neon instructions in Thumb-2 mode we need to swap the instruction halfwords
  1764. if (size == sizeof(ENCODE_16))
  1765. {
  1766. #ifdef INSERT_NOPS
  1767. return insertNops(m_pc, outInstr, CountNops, sizeof(ENCODE_16));
  1768. #else
  1769. //2 byte Thumb encoding
  1770. Assert((outInstr & 0xffff0000) == 0);
  1771. *(ENCODE_16 *)m_pc = (ENCODE_16)(outInstr & 0x0000ffff);
  1772. return MachShort;
  1773. #endif
  1774. }
  1775. else if (size == sizeof(ENCODE_32))
  1776. {
  1777. #ifdef INSERT_NOPS
  1778. return insertNops(m_pc, outInstr, CountNops, sizeof(ENCODE_32));
  1779. #else
  1780. //4 byte Thumb2 encoding
  1781. *(ENCODE_32 *)m_pc = outInstr ;
  1782. return MachInt;
  1783. #endif
  1784. }
  1785. AssertMsg(UNREACHED, "Unexpected size");
  1786. return 0;
  1787. }
  1788. bool
  1789. EncoderMD::CanEncodeModConst12(DWORD constant)
  1790. {
  1791. DWORD encode;
  1792. return EncodeModConst12(constant, &encode);
  1793. }
  1794. bool
  1795. EncoderMD::EncodeModConst12(DWORD constant, DWORD * result)
  1796. {
  1797. unsigned int a, b, c, d, rotation, firstbit, lastbit, temp=0;
  1798. if (constant == 0)
  1799. {
  1800. *result = 0;
  1801. return true;
  1802. }
  1803. a = constant & 0xff;
  1804. b = (constant >> 8) & 0xff;
  1805. c = (constant >> 16) & 0xff;
  1806. d = (constant >> 24) & 0xff;
  1807. _BitScanReverse((DWORD*)&firstbit, constant);
  1808. _BitScanForward((DWORD*)&lastbit, constant);
  1809. if (! ((a == 0 && c == 0 && b == d)
  1810. || (b == 0 && d == 0 && a == c)
  1811. || (a == b && b == c && c == d)
  1812. || (firstbit-lastbit < 8) ))
  1813. {
  1814. return false;
  1815. }
  1816. *result = 0;
  1817. if (constant <= 0xFF)
  1818. {
  1819. *result |= constant << 16;
  1820. }
  1821. else if (firstbit-lastbit < 8)
  1822. {
  1823. if (firstbit > 7)
  1824. {
  1825. temp |= 0x7F & (constant >> (firstbit-7));
  1826. rotation = 32-firstbit+7;
  1827. }
  1828. else
  1829. {
  1830. temp |= 0x7F & (constant << (7-firstbit));
  1831. rotation = 7-firstbit;
  1832. }
  1833. *result = (temp & 0xFF) << 16;
  1834. *result |= (0x10 & rotation) << 6;
  1835. *result |= (0xE & rotation) << 27;
  1836. *result |= (0x1 & rotation) << 23;
  1837. }
  1838. else
  1839. {
  1840. if (a==0 && c==0 && b==d)
  1841. {
  1842. *result |= 0x20000000; // HW2[12]
  1843. *result |= (0xFF & b) << 16;
  1844. }
  1845. else if (a==c && b==0 && d==0)
  1846. {
  1847. *result |= 0x10000000;
  1848. *result |= (0xFF & a) << 16;
  1849. }
  1850. else if (a==b && b==c && c==d)
  1851. {
  1852. *result |= 0x30000000;
  1853. *result |= (0xFF & d) << 16;
  1854. }
  1855. else
  1856. {
  1857. Assert(UNREACHED);
  1858. }
  1859. }
  1860. return true;
  1861. }
  1862. ///----------------------------------------------------------------------------
  1863. ///
  1864. /// EncodeReloc::New
  1865. ///
  1866. ///----------------------------------------------------------------------------
  1867. void
  1868. EncodeReloc::New(EncodeReloc **pHead, RelocType relocType, BYTE *offset, IR::Instr *relocInstr, ArenaAllocator *alloc)
  1869. {
  1870. EncodeReloc *newReloc = AnewStruct(alloc, EncodeReloc);
  1871. newReloc->m_relocType = relocType;
  1872. newReloc->m_consumerOffset = offset;
  1873. newReloc->m_next = *pHead;
  1874. newReloc->m_relocInstr = relocInstr;
  1875. *pHead = newReloc;
  1876. }
  1877. ENCODE_32 EncoderMD::CallOffset(int x)
  1878. {
  1879. Assert(IS_CONST_INT24(x >> 1));
  1880. ENCODE_32 ret;
  1881. int Sflag = (x & 0x1000000) >> 24;
  1882. int off23 = (x & 0x800000) >> 23;
  1883. int off22 = (x & 0x400000) >> 22;
  1884. ret = (x & 0xFFE) << 15;
  1885. ret |= (x & 0x3FF000) >> 12;
  1886. ret |= (((~off23) ^ Sflag) & 0x1) << (16+13);
  1887. ret |= (((~off22) ^ Sflag) & 0x1) << (16+11);
  1888. ret |= (Sflag << 10);
  1889. return ret;
  1890. }
  1891. ENCODE_32 EncoderMD::BranchOffset_T2_24(int x)
  1892. {
  1893. x -= 4;
  1894. Assert(IS_CONST_INT24(x >> 1));
  1895. int ret;
  1896. int Sflag = (x & 0x1000000) >> 24;
  1897. int off23 = (x & 0x800000) >> 23;
  1898. int off22 = (x & 0x400000) >> 22;
  1899. ret = (x & 0xFFE) << 15;
  1900. ret |= (x & 0x3FF000) >> 12;
  1901. ret |= (((~off23) ^ Sflag) & 0x1) << (16+13);
  1902. ret |= (((~off22) ^ Sflag) & 0x1) << (16+11);
  1903. ret |= (Sflag << 10);
  1904. return INSTR_TYPE(ret);
  1905. }
  1906. ENCODE_32 EncoderMD::BranchOffset_T2_20(int x)
  1907. {
  1908. x -= 4;
  1909. Assert(IS_CONST_INT21(x));
  1910. uint32 ret;
  1911. uint32 Sflag = (x & 0x100000) >> 20;
  1912. uint32 off19 = (x & 0x80000) >> 19;
  1913. uint32 off18 = (x & 0x40000) >> 18;
  1914. ret = (x & 0xFFE) << 15;
  1915. ret |= (x & 0x3F000) >> 12;
  1916. ret |= off18 << (13+16);
  1917. ret |= off19 << (11+16);
  1918. ret |= (Sflag << 10);
  1919. return ret;
  1920. }
  1921. void
  1922. EncoderMD::BaseAndOffsetFromSym(IR::SymOpnd *symOpnd, RegNum *pBaseReg, int32 *pOffset, Func * func)
  1923. {
  1924. StackSym *stackSym = symOpnd->m_sym->AsStackSym();
  1925. RegNum baseReg = func->GetLocalsPointer();
  1926. int32 offset = stackSym->m_offset + symOpnd->m_offset;
  1927. if (baseReg == RegSP)
  1928. {
  1929. // SP points to the base of the argument area. Non-reg SP points directly to the locals.
  1930. offset += (func->m_argSlotsForFunctionsCalled * MachRegInt);
  1931. }
  1932. if (func->HasInlinee())
  1933. {
  1934. if ((!stackSym->IsArgSlotSym() || stackSym->m_isOrphanedArg) && !stackSym->IsParamSlotSym())
  1935. {
  1936. offset += func->GetInlineeArgumentStackSize();
  1937. }
  1938. }
  1939. if (stackSym->IsParamSlotSym())
  1940. {
  1941. offset += func->m_localStackHeight + func->m_ArgumentsOffset;
  1942. if (!EncoderMD::CanEncodeLoadStoreOffset(offset))
  1943. {
  1944. // Use the frame pointer. No need to hoist an offset for a param.
  1945. baseReg = FRAME_REG;
  1946. offset = stackSym->m_offset + symOpnd->m_offset - (Js::JavascriptFunctionArgIndex_Frame * MachRegInt);
  1947. Assert(EncoderMD::CanEncodeLoadStoreOffset(offset));
  1948. }
  1949. }
  1950. #ifdef DBG
  1951. else
  1952. {
  1953. // Locals are offset by the size of the area allocated for stack args.
  1954. Assert(offset >= 0);
  1955. Assert(baseReg != RegSP || (uint)offset >= (func->m_argSlotsForFunctionsCalled * MachRegInt));
  1956. if (func->GetMaxInlineeArgOutSize() != 0)
  1957. {
  1958. Assert(func->HasInlinee());
  1959. Assert(baseReg == (func->HasTry() ? RegR7 : RegSP));
  1960. if (stackSym->IsArgSlotSym() && !stackSym->m_isOrphanedArg)
  1961. {
  1962. Assert(stackSym->m_isInlinedArgSlot);
  1963. Assert((uint)offset <= func->m_argSlotsForFunctionsCalled * MachRegInt + func->GetMaxInlineeArgOutSize());
  1964. }
  1965. else
  1966. {
  1967. AssertMsg(stackSym->IsAllocated(), "StackSym offset should be set");
  1968. Assert(offset > (func->HasTry() ? (int32)func->GetMaxInlineeArgOutSize() : (int32)(func->m_argSlotsForFunctionsCalled * MachRegInt + func->GetMaxInlineeArgOutSize())));
  1969. }
  1970. }
  1971. // TODO: restore the following assert (very useful) once we have a way to tell whether prolog/epilog
  1972. // gen is complete.
  1973. //Assert(offset < func->m_localStackHeight);
  1974. }
  1975. #endif
  1976. *pBaseReg = baseReg;
  1977. *pOffset = offset;
  1978. }
  1979. ///----------------------------------------------------------------------------
  1980. ///
  1981. /// EncoderMD::ApplyRelocs
  1982. /// We apply relocations to the temporary buffer using the target buffer's address
  1983. /// before we copy the contents of the temporary buffer to the target buffer.
  1984. ///----------------------------------------------------------------------------
  1985. void
  1986. EncoderMD::ApplyRelocs(uint32 codeBufferAddress, size_t codeSize, uint* bufferCRC, BOOL isBrShorteningSucceeded, bool isFinalBufferValidation)
  1987. {
  1988. for (EncodeReloc *reloc = m_relocList; reloc; reloc = reloc->m_next)
  1989. {
  1990. BYTE * relocAddress = reloc->m_consumerOffset;
  1991. int32 pcrel;
  1992. ENCODE_32 encode = *(ENCODE_32*)relocAddress;
  1993. switch (reloc->m_relocType)
  1994. {
  1995. case RelocTypeBranch20:
  1996. {
  1997. IR::LabelInstr * labelInstr = reloc->m_relocInstr->AsLabelInstr();
  1998. Assert(!labelInstr->isInlineeEntryInstr);
  1999. AssertMsg(labelInstr->GetPC() != nullptr, "Branch to unemitted label?");
  2000. pcrel = (uint32)(labelInstr->GetPC() - reloc->m_consumerOffset);
  2001. encode |= BranchOffset_T2_20(pcrel);
  2002. *(uint32 *)relocAddress = encode;
  2003. break;
  2004. }
  2005. case RelocTypeBranch24:
  2006. {
  2007. IR::LabelInstr * labelInstr = reloc->m_relocInstr->AsLabelInstr();
  2008. Assert(!labelInstr->isInlineeEntryInstr);
  2009. AssertMsg(labelInstr->GetPC() != nullptr, "Branch to unemitted label?");
  2010. pcrel = (uint32)(labelInstr->GetPC() - reloc->m_consumerOffset);
  2011. encode |= BranchOffset_T2_24(pcrel);
  2012. *(ENCODE_32 *)relocAddress = encode;
  2013. break;
  2014. }
  2015. case RelocTypeDataLabelLow:
  2016. {
  2017. IR::LabelInstr * labelInstr = reloc->m_relocInstr->AsLabelInstr();
  2018. Assert(!labelInstr->isInlineeEntryInstr && labelInstr->m_isDataLabel);
  2019. AssertMsg(labelInstr->GetPC() != nullptr, "Branch to unemitted label?");
  2020. pcrel = ((labelInstr->GetPC() - m_encoder->m_encodeBuffer + codeBufferAddress) & 0xFFFF);
  2021. if (!EncodeImmediate16(pcrel, (DWORD*) &encode))
  2022. {
  2023. Assert(UNREACHED);
  2024. }
  2025. *(ENCODE_32 *) relocAddress = encode;
  2026. break;
  2027. }
  2028. case RelocTypeLabelLow:
  2029. {
  2030. // Absolute (not relative) label address (lower 16 bits)
  2031. IR::LabelInstr * labelInstr = reloc->m_relocInstr->AsLabelInstr();
  2032. if (!labelInstr->isInlineeEntryInstr)
  2033. {
  2034. AssertMsg(labelInstr->GetPC() != nullptr, "Branch to unemitted label?");
  2035. // Note that the bottom bit must be set, since this is a Thumb code address.
  2036. pcrel = ((labelInstr->GetPC() - m_encoder->m_encodeBuffer + codeBufferAddress) & 0xFFFF) | 1;
  2037. }
  2038. else
  2039. {
  2040. //This is a encoded low 16 bits.
  2041. pcrel = labelInstr->GetOffset() & 0xFFFF;
  2042. }
  2043. if (!EncodeImmediate16(pcrel, (DWORD*) &encode))
  2044. {
  2045. Assert(UNREACHED);
  2046. }
  2047. *(ENCODE_32 *) relocAddress = encode;
  2048. break;
  2049. }
  2050. case RelocTypeLabelHigh:
  2051. {
  2052. // Absolute (not relative) label address (upper 16 bits)
  2053. IR::LabelInstr * labelInstr = reloc->m_relocInstr->AsLabelInstr();
  2054. if (!labelInstr->isInlineeEntryInstr)
  2055. {
  2056. AssertMsg(labelInstr->GetPC() != nullptr, "Branch to unemitted label?");
  2057. pcrel = (labelInstr->GetPC() - m_encoder->m_encodeBuffer + codeBufferAddress) >> 16;
  2058. // We only record the relocation on the low byte of the pair
  2059. }
  2060. else
  2061. {
  2062. //This is a encoded high 16 bits.
  2063. pcrel = labelInstr->GetOffset() >> 16;
  2064. }
  2065. if (!EncodeImmediate16(pcrel, (DWORD*) &encode))
  2066. {
  2067. Assert(UNREACHED);
  2068. }
  2069. *(ENCODE_32 *) relocAddress = encode;
  2070. break;
  2071. }
  2072. case RelocTypeLabel:
  2073. {
  2074. IR::LabelInstr * labelInstr = reloc->m_relocInstr->AsLabelInstr();
  2075. AssertMsg(labelInstr->GetPC() != nullptr, "Branch to unemitted label?");
  2076. /* For Thumb instruction set -> OR 1 with the address*/
  2077. *(uint32 *)relocAddress = (uint32)(labelInstr->GetPC() - m_encoder->m_encodeBuffer + codeBufferAddress) | 1;
  2078. break;
  2079. }
  2080. default:
  2081. AssertMsg(UNREACHED, "Unknown reloc type");
  2082. }
  2083. }
  2084. }
  2085. void
  2086. EncoderMD::EncodeInlineeCallInfo(IR::Instr *instr, uint32 codeOffset)
  2087. {
  2088. IR::LabelInstr* inlineeStart = instr->AsLabelInstr();
  2089. Assert((inlineeStart->GetOffset() & 0x0F) == inlineeStart->GetOffset());
  2090. return;
  2091. }
  2092. bool EncoderMD::TryConstFold(IR::Instr *instr, IR::RegOpnd *regOpnd)
  2093. {
  2094. Assert(regOpnd->m_sym->IsConst());
  2095. if (instr->m_opcode == Js::OpCode::MOV)
  2096. {
  2097. if (instr->GetSrc1() != regOpnd)
  2098. {
  2099. return false;
  2100. }
  2101. if (!instr->GetDst()->IsRegOpnd())
  2102. {
  2103. return false;
  2104. }
  2105. instr->ReplaceSrc(regOpnd, regOpnd->m_sym->GetConstOpnd());
  2106. LegalizeMD::LegalizeInstr(instr);
  2107. return true;
  2108. }
  2109. else
  2110. {
  2111. return false;
  2112. }
  2113. }
  2114. bool EncoderMD::TryFold(IR::Instr *instr, IR::RegOpnd *regOpnd)
  2115. {
  2116. if (LowererMD::IsAssign(instr))
  2117. {
  2118. if (!instr->GetDst()->IsRegOpnd() || regOpnd != instr->GetSrc1())
  2119. {
  2120. return false;
  2121. }
  2122. IR::SymOpnd *symOpnd = IR::SymOpnd::New(regOpnd->m_sym, regOpnd->GetType(), instr->m_func);
  2123. instr->ReplaceSrc(regOpnd, symOpnd);
  2124. LegalizeMD::LegalizeInstr(instr);
  2125. return true;
  2126. }
  2127. else
  2128. {
  2129. return false;
  2130. }
  2131. }
  2132. void EncoderMD::AddLabelReloc(BYTE* relocAddress)
  2133. {
  2134. Assert(relocAddress != nullptr);
  2135. EncodeReloc::New(&m_relocList, RelocTypeLabel, relocAddress, *(IR::Instr**)relocAddress, m_encoder->m_tempAlloc);
  2136. }