1 Star 0 Fork 38

xisme/qemu-kvm_src-anolis

forked from src-anolis-os/qemu-kvm 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
0009-Add-loongarch64-rh-devices.mak.patch 94.31 KB
一键复制 编辑 原始数据 按行查看 历史
Xianglai Li 提交于 2024-08-29 19:00 +08:00 . fix compile error for loongarch
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227
From 89fb9c8e0ca11e311417981d46768642d1f8e6d2 Mon Sep 17 00:00:00 2001
From: lixianglai <lixianglai@loongson.cn>
Date: Wed, 24 Aug 2022 22:56:29 -0400
Subject: [PATCH 09/28] Add loongarch64-rh-devices.mak.
Change-Id: I375face82c0aa68c053254b879267830d6981756
Signed-off-by: lixianglai <lixianglai@loongson.cn>
---
.../loongarch64-rh-devices.mak | 155 ++
configure | 5 +
meson.build | 2 +
pc-bios/meson.build | 1 +
tcg/loongarch64/tcg-insn-defs.c.inc | 979 +++++++++
tcg/loongarch64/tcg-target-con-set.h | 31 +
tcg/loongarch64/tcg-target-con-str.h | 28 +
tcg/loongarch64/tcg-target.c.inc | 1744 +++++++++++++++++
tcg/loongarch64/tcg-target.h | 178 ++
9 files changed, 3123 insertions(+)
create mode 100644 configs/devices/loongarch64-softmmu/loongarch64-rh-devices.mak
create mode 100644 tcg/loongarch64/tcg-insn-defs.c.inc
create mode 100644 tcg/loongarch64/tcg-target-con-set.h
create mode 100644 tcg/loongarch64/tcg-target-con-str.h
create mode 100644 tcg/loongarch64/tcg-target.c.inc
create mode 100644 tcg/loongarch64/tcg-target.h
diff --git a/configs/devices/loongarch64-softmmu/loongarch64-rh-devices.mak b/configs/devices/loongarch64-softmmu/loongarch64-rh-devices.mak
new file mode 100644
index 000000000..e7b5bdc8e
--- /dev/null
+++ b/configs/devices/loongarch64-softmmu/loongarch64-rh-devices.mak
@@ -0,0 +1,155 @@
+
+include ../rh-virtio.mak
+# Default configuration for loongarch-softmmu
+
+CONFIG_PCI=y
+CONFIG_ACPI_PCI=y
+# For now, CONFIG_IDE_CORE requires ISA, so we enable it here
+CONFIG_ISA_BUS=y
+CONFIG_VIRTIO_PCI=y
+
+CONFIG_VGA_PCI=y
+CONFIG_ACPI_SMBUS=y
+#CONFIG_VHOST_USER_SCSI=$(call land,$(CONFIG_VHOST_USER),$(CONFIG_LINUX))
+CONFIG_VHOST_USER_SCSI=y
+#CONFIG_VHOST_USER_BLK=$(call land,$(CONFIG_VHOST_USER),$(CONFIG_LINUX))
+CONFIG_VHOST_USER_BLK=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CRYPTO=y
+CONFIG_VIRTIO_GPU=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_RNG=y
+CONFIG_SCSI=y
+CONFIG_VIRTIO_SCSI=y
+CONFIG_VIRTIO_SERIAL=y
+
+CONFIG_USB_UHCI=y
+CONFIG_USB_OHCI=y
+CONFIG_USB_OHCI_PCI=y
+CONFIG_USB_XHCI=y
+CONFIG_USB_XHCI_NEC=y
+CONFIG_NE2000_PCI=y
+CONFIG_EEPRO100_PCI=y
+CONFIG_PCNET_PCI=y
+CONFIG_PCNET_COMMON=y
+CONFIG_AC97=y
+CONFIG_HDA=y
+CONFIG_ES1370=y
+CONFIG_SCSI=y
+CONFIG_LSI_SCSI_PCI=y
+CONFIG_VMW_PVSCSI_SCSI_PCI=y
+CONFIG_MEGASAS_SCSI_PCI=y
+CONFIG_MPTSAS_SCSI_PCI=y
+CONFIG_RTL8139_PCI=y
+CONFIG_E1000_PCI=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_IDE_PCI=y
+CONFIG_AHCI=y
+CONFIG_AHCI_ICH9=y
+CONFIG_ESP=y
+CONFIG_ESP_PCI=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_SERIAL_PCI=y
+CONFIG_CAN_BUS=y
+CONFIG_CAN_SJA1000=y
+CONFIG_CAN_PCI=y
+CONFIG_USB_UHCI=y
+CONFIG_USB_OHCI=y
+CONFIG_USB_XHCI=y
+CONFIG_USB_XHCI_NEC=y
+CONFIG_NE2000_PCI=y
+CONFIG_EEPRO100_PCI=y
+CONFIG_PCNET_PCI=y
+CONFIG_PCNET_COMMON=y
+CONFIG_AC97=y
+CONFIG_HDA=y
+CONFIG_ES1370=y
+CONFIG_SCSI=y
+CONFIG_LSI_SCSI_PCI=y
+CONFIG_VMW_PVSCSI_SCSI_PCI=y
+CONFIG_MEGASAS_SCSI_PCI=y
+CONFIG_MPTSAS_SCSI_PCI=y
+CONFIG_RTL8139_PCI=y
+CONFIG_E1000_PCI=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_IDE_PCI=y
+CONFIG_AHCI=y
+CONFIG_ESP=y
+CONFIG_ESP_PCI=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_SERIAL_PCI=y
+CONFIG_CAN_BUS=y
+CONFIG_CAN_SJA1000=y
+CONFIG_CAN_PCI=y
+
+CONFIG_SPICE=y
+CONFIG_QXL=y
+CONFIG_ESP=y
+CONFIG_SCSI=y
+CONFIG_VGA_ISA=y
+CONFIG_VGA_ISA_MM=y
+CONFIG_VGA_CIRRUS=y
+CONFIG_VMWARE_VGA=y
+CONFIG_VIRTIO_VGA=y
+CONFIG_SERIAL_ISA=y
+CONFIG_PARALLEL=y
+CONFIG_I8254=y
+CONFIG_PCSPK=y
+CONFIG_PCKBD=y
+CONFIG_FDC=y
+CONFIG_ACPI=y
+CONFIG_ACPI_MEMORY_HOTPLUG=y
+CONFIG_ACPI_NVDIMM=y
+CONFIG_ACPI_CPU_HOTPLUG=y
+CONFIG_APM=y
+CONFIG_I8257=y
+CONFIG_PIIX4=y
+CONFIG_IDE_ISA=y
+CONFIG_IDE_PIIX=y
+#CONFIG_NE2000_ISA=y
+CONFIG_MIPSNET=y
+CONFIG_PFLASH_CFI01=y
+CONFIG_I8259=y
+CONFIG_MC146818RTC=y
+CONFIG_ISA_TESTDEV=y
+CONFIG_EMPTY_SLOT=y
+CONFIG_I2C=y
+CONFIG_DIMM=y
+CONFIG_MEM_DEVICE=y
+
+# Arch Specified CONFIG defines
+CONFIG_IDE_VIA=y
+CONFIG_VT82C686=y
+CONFIG_RC4030=y
+CONFIG_DP8393X=y
+CONFIG_DS1225Y=y
+CONFIG_FITLOADER=y
+CONFIG_SMBIOS=y
+
+CONFIG_PCIE_PORT=y
+CONFIG_I82801B11=y
+CONFIG_XIO3130=y
+CONFIG_PCI_EXPRESS=y
+CONFIG_MSI_NONBROKEN=y
+CONFIG_IOH3420=y
+CONFIG_SD=y
+CONFIG_SDHCI=y
+CONFIG_VIRTFS=y
+CONFIG_VIRTIO_9P=y
+CONFIG_USB_EHCI=y
+CONFIG_USB_EHCI_PCI=y
+CONFIG_USB_EHCI_SYSBUS=y
+CONFIG_USB_STORAGE_BOT=y
+CONFIG_TPM_EMULATOR=y
+CONFIG_TPM_TIS=y
+CONFIG_PLATFORM_BUS=y
+CONFIG_TPM_TIS_SYSBUS=y
+CONFIG_ACPI_LOONGARCH=y
+CONFIG_LS7A_RTC=y
diff --git a/configure b/configure
index 48c21775f..1f932f7ee 100755
--- a/configure
+++ b/configure
@@ -581,6 +581,8 @@ elif check_define __arm__ ; then
cpu="arm"
elif check_define __aarch64__ ; then
cpu="aarch64"
+elif check_define __loongarch__ ; then
+ cpu="loongarch64"
else
cpu=$(uname -m)
fi
@@ -606,6 +608,9 @@ case "$cpu" in
aarch64)
cpu="aarch64"
;;
+ loongarch64)
+ cpu="loongarch64"
+ ;;
mips*)
cpu="mips"
;;
diff --git a/meson.build b/meson.build
index c0fb5788f..c5fdb7856 100644
--- a/meson.build
+++ b/meson.build
@@ -361,6 +361,8 @@ if not get_option('tcg').disabled()
tcg_arch = 'i386'
elif config_host['ARCH'] == 'ppc64'
tcg_arch = 'ppc'
+ elif config_host['ARCH'] == 'loongarch64'
+ tcg_arch = 'loongarch64'
endif
add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch,
language: ['c', 'cpp', 'objc'])
diff --git a/pc-bios/meson.build b/pc-bios/meson.build
index a09ca4d03..60009bd89 100644
--- a/pc-bios/meson.build
+++ b/pc-bios/meson.build
@@ -84,6 +84,7 @@ blobs = files(
'opensbi-riscv64-generic-fw_dynamic.elf',
'npcm7xx_bootrom.bin',
'loongarch_bios.bin',
+ 'loongarch_vars.bin',
)
if get_option('install_blobs')
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
new file mode 100644
index 000000000..d16257185
--- /dev/null
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
@@ -0,0 +1,979 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * LoongArch instruction formats, opcodes, and encoders for TCG use.
+ *
+ * This file is auto-generated by genqemutcgdefs from
+ * https://github.com/loongson-community/loongarch-opcodes,
+ * from commit 961f0c60f5b63e574d785995600c71ad5413fdc4.
+ * DO NOT EDIT.
+ */
+
+typedef enum {
+ OPC_CLZ_W = 0x00001400,
+ OPC_CTZ_W = 0x00001c00,
+ OPC_CLZ_D = 0x00002400,
+ OPC_CTZ_D = 0x00002c00,
+ OPC_REVB_2H = 0x00003000,
+ OPC_REVB_2W = 0x00003800,
+ OPC_REVB_D = 0x00003c00,
+ OPC_SEXT_H = 0x00005800,
+ OPC_SEXT_B = 0x00005c00,
+ OPC_ADD_W = 0x00100000,
+ OPC_ADD_D = 0x00108000,
+ OPC_SUB_W = 0x00110000,
+ OPC_SUB_D = 0x00118000,
+ OPC_SLT = 0x00120000,
+ OPC_SLTU = 0x00128000,
+ OPC_MASKEQZ = 0x00130000,
+ OPC_MASKNEZ = 0x00138000,
+ OPC_NOR = 0x00140000,
+ OPC_AND = 0x00148000,
+ OPC_OR = 0x00150000,
+ OPC_XOR = 0x00158000,
+ OPC_ORN = 0x00160000,
+ OPC_ANDN = 0x00168000,
+ OPC_SLL_W = 0x00170000,
+ OPC_SRL_W = 0x00178000,
+ OPC_SRA_W = 0x00180000,
+ OPC_SLL_D = 0x00188000,
+ OPC_SRL_D = 0x00190000,
+ OPC_SRA_D = 0x00198000,
+ OPC_ROTR_W = 0x001b0000,
+ OPC_ROTR_D = 0x001b8000,
+ OPC_MUL_W = 0x001c0000,
+ OPC_MULH_W = 0x001c8000,
+ OPC_MULH_WU = 0x001d0000,
+ OPC_MUL_D = 0x001d8000,
+ OPC_MULH_D = 0x001e0000,
+ OPC_MULH_DU = 0x001e8000,
+ OPC_DIV_W = 0x00200000,
+ OPC_MOD_W = 0x00208000,
+ OPC_DIV_WU = 0x00210000,
+ OPC_MOD_WU = 0x00218000,
+ OPC_DIV_D = 0x00220000,
+ OPC_MOD_D = 0x00228000,
+ OPC_DIV_DU = 0x00230000,
+ OPC_MOD_DU = 0x00238000,
+ OPC_SLLI_W = 0x00408000,
+ OPC_SLLI_D = 0x00410000,
+ OPC_SRLI_W = 0x00448000,
+ OPC_SRLI_D = 0x00450000,
+ OPC_SRAI_W = 0x00488000,
+ OPC_SRAI_D = 0x00490000,
+ OPC_ROTRI_W = 0x004c8000,
+ OPC_ROTRI_D = 0x004d0000,
+ OPC_BSTRINS_W = 0x00600000,
+ OPC_BSTRPICK_W = 0x00608000,
+ OPC_BSTRINS_D = 0x00800000,
+ OPC_BSTRPICK_D = 0x00c00000,
+ OPC_SLTI = 0x02000000,
+ OPC_SLTUI = 0x02400000,
+ OPC_ADDI_W = 0x02800000,
+ OPC_ADDI_D = 0x02c00000,
+ OPC_CU52I_D = 0x03000000,
+ OPC_ANDI = 0x03400000,
+ OPC_ORI = 0x03800000,
+ OPC_XORI = 0x03c00000,
+ OPC_LU12I_W = 0x14000000,
+ OPC_CU32I_D = 0x16000000,
+ OPC_PCADDU2I = 0x18000000,
+ OPC_PCALAU12I = 0x1a000000,
+ OPC_PCADDU12I = 0x1c000000,
+ OPC_PCADDU18I = 0x1e000000,
+ OPC_LD_B = 0x28000000,
+ OPC_LD_H = 0x28400000,
+ OPC_LD_W = 0x28800000,
+ OPC_LD_D = 0x28c00000,
+ OPC_ST_B = 0x29000000,
+ OPC_ST_H = 0x29400000,
+ OPC_ST_W = 0x29800000,
+ OPC_ST_D = 0x29c00000,
+ OPC_LD_BU = 0x2a000000,
+ OPC_LD_HU = 0x2a400000,
+ OPC_LD_WU = 0x2a800000,
+ OPC_LDX_B = 0x38000000,
+ OPC_LDX_H = 0x38040000,
+ OPC_LDX_W = 0x38080000,
+ OPC_LDX_D = 0x380c0000,
+ OPC_STX_B = 0x38100000,
+ OPC_STX_H = 0x38140000,
+ OPC_STX_W = 0x38180000,
+ OPC_STX_D = 0x381c0000,
+ OPC_LDX_BU = 0x38200000,
+ OPC_LDX_HU = 0x38240000,
+ OPC_LDX_WU = 0x38280000,
+ OPC_DBAR = 0x38720000,
+ OPC_JIRL = 0x4c000000,
+ OPC_B = 0x50000000,
+ OPC_BL = 0x54000000,
+ OPC_BEQ = 0x58000000,
+ OPC_BNE = 0x5c000000,
+ OPC_BGT = 0x60000000,
+ OPC_BLE = 0x64000000,
+ OPC_BGTU = 0x68000000,
+ OPC_BLEU = 0x6c000000,
+} LoongArchInsn;
+
+static int32_t __attribute__((unused))
+encode_d_slot(LoongArchInsn opc, uint32_t d)
+{
+ return opc | d;
+}
+
+static int32_t __attribute__((unused))
+encode_dj_slots(LoongArchInsn opc, uint32_t d, uint32_t j)
+{
+ return opc | d | j << 5;
+}
+
+static int32_t __attribute__((unused))
+encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k)
+{
+ return opc | d | j << 5 | k << 10;
+}
+
+static int32_t __attribute__((unused))
+encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
+ uint32_t m)
+{
+ return opc | d | j << 5 | k << 10 | m << 16;
+}
+
+static int32_t __attribute__((unused))
+encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k)
+{
+ return opc | d | k << 10;
+}
+
+static int32_t __attribute__((unused))
+encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ return encode_dj_slots(opc, d, j);
+}
+
+static int32_t __attribute__((unused))
+encode_djk_insn(LoongArchInsn opc, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
+ return encode_djk_slots(opc, d, j, k);
+}
+
+static int32_t __attribute__((unused))
+encode_djsk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
+ return encode_djk_slots(opc, d, j, sk12 & 0xfff);
+}
+
+static int32_t __attribute__((unused))
+encode_djsk16_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk16 >= -0x8000 && sk16 <= 0x7fff);
+ return encode_djk_slots(opc, d, j, sk16 & 0xffff);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk12 <= 0xfff);
+ return encode_djk_slots(opc, d, j, uk12);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk5 <= 0x1f);
+ return encode_djk_slots(opc, d, j, uk5);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk5um5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5,
+ uint32_t um5)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk5 <= 0x1f);
+ tcg_debug_assert(um5 <= 0x1f);
+ return encode_djkm_slots(opc, d, j, uk5, um5);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk6 <= 0x3f);
+ return encode_djk_slots(opc, d, j, uk6);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk6um6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6,
+ uint32_t um6)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk6 <= 0x3f);
+ tcg_debug_assert(um6 <= 0x3f);
+ return encode_djkm_slots(opc, d, j, uk6, um6);
+}
+
+static int32_t __attribute__((unused))
+encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(sj20 >= -0x80000 && sj20 <= 0x7ffff);
+ return encode_dj_slots(opc, d, sj20 & 0xfffff);
+}
+
+static int32_t __attribute__((unused))
+encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
+{
+ tcg_debug_assert(sd10k16 >= -0x2000000 && sd10k16 <= 0x1ffffff);
+ return encode_dk_slots(opc, (sd10k16 >> 16) & 0x3ff, sd10k16 & 0xffff);
+}
+
+static int32_t __attribute__((unused))
+encode_ud15_insn(LoongArchInsn opc, uint32_t ud15)
+{
+ tcg_debug_assert(ud15 <= 0x7fff);
+ return encode_d_slot(opc, ud15);
+}
+
+/* Emits the `clz.w d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CLZ_W, d, j));
+}
+
+/* Emits the `ctz.w d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ctz_w(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CTZ_W, d, j));
+}
+
+/* Emits the `clz.d d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_clz_d(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CLZ_D, d, j));
+}
+
+/* Emits the `ctz.d d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ctz_d(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CTZ_D, d, j));
+}
+
+/* Emits the `revb.2h d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_revb_2h(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_REVB_2H, d, j));
+}
+
+/* Emits the `revb.2w d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_revb_2w(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_REVB_2W, d, j));
+}
+
+/* Emits the `revb.d d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_revb_d(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_REVB_D, d, j));
+}
+
+/* Emits the `sext.h d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sext_h(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_SEXT_H, d, j));
+}
+
+/* Emits the `sext.b d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sext_b(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_SEXT_B, d, j));
+}
+
+/* Emits the `add.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_add_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ADD_W, d, j, k));
+}
+
+/* Emits the `add.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_add_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ADD_D, d, j, k));
+}
+
+/* Emits the `sub.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sub_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SUB_W, d, j, k));
+}
+
+/* Emits the `sub.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sub_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SUB_D, d, j, k));
+}
+
+/* Emits the `slt d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slt(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLT, d, j, k));
+}
+
+/* Emits the `sltu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sltu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLTU, d, j, k));
+}
+
+/* Emits the `maskeqz d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_maskeqz(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MASKEQZ, d, j, k));
+}
+
+/* Emits the `masknez d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_masknez(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MASKNEZ, d, j, k));
+}
+
+/* Emits the `nor d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_nor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_NOR, d, j, k));
+}
+
+/* Emits the `and d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_and(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_AND, d, j, k));
+}
+
+/* Emits the `or d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_or(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_OR, d, j, k));
+}
+
+/* Emits the `xor d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_xor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_XOR, d, j, k));
+}
+
+/* Emits the `orn d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_orn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ORN, d, j, k));
+}
+
+/* Emits the `andn d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_andn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ANDN, d, j, k));
+}
+
+/* Emits the `sll.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sll_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLL_W, d, j, k));
+}
+
+/* Emits the `srl.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srl_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRL_W, d, j, k));
+}
+
+/* Emits the `sra.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sra_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRA_W, d, j, k));
+}
+
+/* Emits the `sll.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sll_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLL_D, d, j, k));
+}
+
+/* Emits the `srl.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srl_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRL_D, d, j, k));
+}
+
+/* Emits the `sra.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sra_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRA_D, d, j, k));
+}
+
+/* Emits the `rotr.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotr_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ROTR_W, d, j, k));
+}
+
+/* Emits the `rotr.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotr_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ROTR_D, d, j, k));
+}
+
+/* Emits the `mul.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mul_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MUL_W, d, j, k));
+}
+
+/* Emits the `mulh.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_W, d, j, k));
+}
+
+/* Emits the `mulh.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_WU, d, j, k));
+}
+
+/* Emits the `mul.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mul_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MUL_D, d, j, k));
+}
+
+/* Emits the `mulh.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_D, d, j, k));
+}
+
+/* Emits the `mulh.du d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_DU, d, j, k));
+}
+
+/* Emits the `div.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_W, d, j, k));
+}
+
+/* Emits the `mod.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_W, d, j, k));
+}
+
+/* Emits the `div.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_WU, d, j, k));
+}
+
+/* Emits the `mod.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_WU, d, j, k));
+}
+
+/* Emits the `div.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_D, d, j, k));
+}
+
+/* Emits the `mod.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_D, d, j, k));
+}
+
+/* Emits the `div.du d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_DU, d, j, k));
+}
+
+/* Emits the `mod.du d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_DU, d, j, k));
+}
+
+/* Emits the `slli.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_SLLI_W, d, j, uk5));
+}
+
+/* Emits the `slli.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_SLLI_D, d, j, uk6));
+}
+
+/* Emits the `srli.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_SRLI_W, d, j, uk5));
+}
+
+/* Emits the `srli.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_SRLI_D, d, j, uk6));
+}
+
+/* Emits the `srai.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srai_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_SRAI_W, d, j, uk5));
+}
+
+/* Emits the `srai.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srai_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_SRAI_D, d, j, uk6));
+}
+
+/* Emits the `rotri.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotri_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_ROTRI_W, d, j, uk5));
+}
+
+/* Emits the `rotri.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotri_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_ROTRI_D, d, j, uk6));
+}
+
+/* Emits the `bstrins.w d, j, uk5, um5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrins_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5,
+ uint32_t um5)
+{
+ tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRINS_W, d, j, uk5, um5));
+}
+
+/* Emits the `bstrpick.w d, j, uk5, um5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrpick_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5,
+ uint32_t um5)
+{
+ tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRPICK_W, d, j, uk5, um5));
+}
+
+/* Emits the `bstrins.d d, j, uk6, um6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrins_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6,
+ uint32_t um6)
+{
+ tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRINS_D, d, j, uk6, um6));
+}
+
+/* Emits the `bstrpick.d d, j, uk6, um6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrpick_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6,
+ uint32_t um6)
+{
+ tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRPICK_D, d, j, uk6, um6));
+}
+
+/* Emits the `slti d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slti(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_SLTI, d, j, sk12));
+}
+
+/* Emits the `sltui d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sltui(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_SLTUI, d, j, sk12));
+}
+
+/* Emits the `addi.w d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_addi_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ADDI_W, d, j, sk12));
+}
+
+/* Emits the `addi.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_addi_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ADDI_D, d, j, sk12));
+}
+
+/* Emits the `cu52i.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_cu52i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_CU52I_D, d, j, sk12));
+}
+
+/* Emits the `andi d, j, uk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_andi(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_out32(s, encode_djuk12_insn(OPC_ANDI, d, j, uk12));
+}
+
+/* Emits the `ori d, j, uk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_out32(s, encode_djuk12_insn(OPC_ORI, d, j, uk12));
+}
+
+/* Emits the `xori d, j, uk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
+}
+
+/* Emits the `lu12i.w d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_lu12i_w(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_LU12I_W, d, sj20));
+}
+
+/* Emits the `cu32i.d d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_cu32i_d(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_CU32I_D, d, sj20));
+}
+
+/* Emits the `pcaddu2i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcaddu2i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCADDU2I, d, sj20));
+}
+
+/* Emits the `pcalau12i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcalau12i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCALAU12I, d, sj20));
+}
+
+/* Emits the `pcaddu12i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcaddu12i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCADDU12I, d, sj20));
+}
+
+/* Emits the `pcaddu18i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcaddu18i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCADDU18I, d, sj20));
+}
+
+/* Emits the `ld.b d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_B, d, j, sk12));
+}
+
+/* Emits the `ld.h d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_H, d, j, sk12));
+}
+
+/* Emits the `ld.w d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_W, d, j, sk12));
+}
+
+/* Emits the `ld.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_D, d, j, sk12));
+}
+
+/* Emits the `st.b d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_B, d, j, sk12));
+}
+
+/* Emits the `st.h d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_H, d, j, sk12));
+}
+
+/* Emits the `st.w d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_W, d, j, sk12));
+}
+
+/* Emits the `st.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_D, d, j, sk12));
+}
+
+/* Emits the `ld.bu d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_bu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_BU, d, j, sk12));
+}
+
+/* Emits the `ld.hu d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_hu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_HU, d, j, sk12));
+}
+
+/* Emits the `ld.wu d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12));
+}
+
+/* Emits the `ldx.b d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_B, d, j, k));
+}
+
+/* Emits the `ldx.h d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_H, d, j, k));
+}
+
+/* Emits the `ldx.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_W, d, j, k));
+}
+
+/* Emits the `ldx.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_D, d, j, k));
+}
+
+/* Emits the `stx.b d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_B, d, j, k));
+}
+
+/* Emits the `stx.h d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_H, d, j, k));
+}
+
+/* Emits the `stx.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_W, d, j, k));
+}
+
+/* Emits the `stx.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_D, d, j, k));
+}
+
+/* Emits the `ldx.bu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_bu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_BU, d, j, k));
+}
+
+/* Emits the `ldx.hu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_hu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_HU, d, j, k));
+}
+
+/* Emits the `ldx.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k));
+}
+
+/* Emits the `dbar ud15` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
+{
+ tcg_out32(s, encode_ud15_insn(OPC_DBAR, ud15));
+}
+
+/* Emits the `jirl d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_jirl(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_JIRL, d, j, sk16));
+}
+
+/* Emits the `b sd10k16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_b(TCGContext *s, int32_t sd10k16)
+{
+ tcg_out32(s, encode_sd10k16_insn(OPC_B, sd10k16));
+}
+
+/* Emits the `bl sd10k16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bl(TCGContext *s, int32_t sd10k16)
+{
+ tcg_out32(s, encode_sd10k16_insn(OPC_BL, sd10k16));
+}
+
+/* Emits the `beq d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_beq(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BEQ, d, j, sk16));
+}
+
+/* Emits the `bne d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bne(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BNE, d, j, sk16));
+}
+
+/* Emits the `bgt d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bgt(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BGT, d, j, sk16));
+}
+
+/* Emits the `ble d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ble(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BLE, d, j, sk16));
+}
+
+/* Emits the `bgtu d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bgtu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BGTU, d, j, sk16));
+}
+
+/* Emits the `bleu d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16));
+}
+
+/* End of generated code. */
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
new file mode 100644
index 000000000..349c67268
--- /dev/null
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define LoongArch target-specific constraint sets.
+ *
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on tcg/riscv/tcg-target-con-set.h
+ *
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
+ * Each operand should be a sequence of constraint letters as defined by
+ * tcg-target-con-str.h; the constraint combination is inclusive or.
+ */
+C_O0_I1(r)
+C_O0_I2(rZ, r)
+C_O0_I2(rZ, rZ)
+C_O0_I2(LZ, L)
+C_O1_I1(r, r)
+C_O1_I1(r, L)
+C_O1_I2(r, r, rC)
+C_O1_I2(r, r, ri)
+C_O1_I2(r, r, rI)
+C_O1_I2(r, r, rU)
+C_O1_I2(r, r, rW)
+C_O1_I2(r, r, rZ)
+C_O1_I2(r, 0, rZ)
+C_O1_I2(r, rZ, rN)
+C_O1_I2(r, rZ, rZ)
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
new file mode 100644
index 000000000..c3986a4fd
--- /dev/null
+++ b/tcg/loongarch64/tcg-target-con-str.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define LoongArch target-specific operand constraints.
+ *
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on tcg/riscv/tcg-target-con-str.h
+ *
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * Define constraint letters for register sets:
+ * REGS(letter, register_mask)
+ */
+REGS('r', ALL_GENERAL_REGS)
+REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
+
+/*
+ * Define constraint letters for constants:
+ * CONST(letter, TCG_CT_CONST_* bit set)
+ */
+CONST('I', TCG_CT_CONST_S12)
+CONST('N', TCG_CT_CONST_N12)
+CONST('U', TCG_CT_CONST_U12)
+CONST('Z', TCG_CT_CONST_ZERO)
+CONST('C', TCG_CT_CONST_C12)
+CONST('W', TCG_CT_CONST_WSZ)
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
new file mode 100644
index 000000000..9b53549ed
--- /dev/null
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -0,0 +1,1744 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on tcg/riscv/tcg-target.c.inc
+ *
+ * Copyright (c) 2018 SiFive, Inc
+ * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "../tcg-ldst.c.inc"
+
+#ifdef CONFIG_DEBUG_TCG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "zero",
+ "ra",
+ "tp",
+ "sp",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "a4",
+ "a5",
+ "a6",
+ "a7",
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "t5",
+ "t6",
+ "t7",
+ "t8",
+ "r21", /* reserved in the LP64* ABI, hence no ABI name */
+ "s9",
+ "s0",
+ "s1",
+ "s2",
+ "s3",
+ "s4",
+ "s5",
+ "s6",
+ "s7",
+ "s8"
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ /* Registers preserved across calls */
+ /* TCG_REG_S0 reserved for TCG_AREG0 */
+ TCG_REG_S1,
+ TCG_REG_S2,
+ TCG_REG_S3,
+ TCG_REG_S4,
+ TCG_REG_S5,
+ TCG_REG_S6,
+ TCG_REG_S7,
+ TCG_REG_S8,
+ TCG_REG_S9,
+
+ /* Registers (potentially) clobbered across calls */
+ TCG_REG_T0,
+ TCG_REG_T1,
+ TCG_REG_T2,
+ TCG_REG_T3,
+ TCG_REG_T4,
+ TCG_REG_T5,
+ TCG_REG_T6,
+ TCG_REG_T7,
+ TCG_REG_T8,
+
+ /* Argument registers, opposite order of allocation. */
+ TCG_REG_A7,
+ TCG_REG_A6,
+ TCG_REG_A5,
+ TCG_REG_A4,
+ TCG_REG_A3,
+ TCG_REG_A2,
+ TCG_REG_A1,
+ TCG_REG_A0,
+};
+
+static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_A0,
+ TCG_REG_A1,
+ TCG_REG_A2,
+ TCG_REG_A3,
+ TCG_REG_A4,
+ TCG_REG_A5,
+ TCG_REG_A6,
+ TCG_REG_A7,
+};
+
+static const int tcg_target_call_oarg_regs[] = {
+ TCG_REG_A0,
+ TCG_REG_A1,
+};
+
+#ifndef CONFIG_SOFTMMU
+#define USE_GUEST_BASE (guest_base != 0)
+#define TCG_GUEST_BASE_REG TCG_REG_S1
+#endif
+
+#define TCG_CT_CONST_ZERO 0x100
+#define TCG_CT_CONST_S12 0x200
+#define TCG_CT_CONST_N12 0x400
+#define TCG_CT_CONST_U12 0x800
+#define TCG_CT_CONST_C12 0x1000
+#define TCG_CT_CONST_WSZ 0x2000
+
+#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
+/*
+ * For softmmu, we need to avoid conflicts with the first 5
+ * argument registers to call the helper. Some of these are
+ * also used for the tlb lookup.
+ */
+#ifdef CONFIG_SOFTMMU
+#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5)
+#else
+#define SOFTMMU_RESERVE_REGS 0
+#endif
+
+
+static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
+{
+ return sextract64(val, pos, len);
+}
+
+/* test if a constant matches the constraint */
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+{
+ if (ct & TCG_CT_CONST) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Relocations
+ */
+
+/*
+ * Relocation records defined in LoongArch ELF psABI v1.00 is way too
+ * complicated; a whopping stack machine is needed to stuff the fields, at
+ * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
+ * needed.
+ *
+ * Hence, define our own simpler relocation types. Numbers are chosen as to
+ * not collide with potential future additions to the true ELF relocation
+ * type enum.
+ */
+
+/* Field Sk16, shifted right by 2; suitable for conditional jumps */
+#define R_LOONGARCH_BR_SK16 256
+/* Field Sd10k16, shifted right by 2; suitable for B and BL */
+#define R_LOONGARCH_BR_SD10K16 257
+
+static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
+{
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
+ intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
+
+ tcg_debug_assert((offset & 3) == 0);
+ offset >>= 2;
+ if (offset == sextreg(offset, 0, 16)) {
+ *src_rw = deposit64(*src_rw, 10, 16, offset);
+ return true;
+ }
+
+ return false;
+}
+
+static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
+ const tcg_insn_unit *target)
+{
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
+ intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
+
+ tcg_debug_assert((offset & 3) == 0);
+ offset >>= 2;
+ if (offset == sextreg(offset, 0, 26)) {
+ *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
+ *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
+ return true;
+ }
+
+ return false;
+}
+
+static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
+ intptr_t value, intptr_t addend)
+{
+ tcg_debug_assert(addend == 0);
+ switch (type) {
+ case R_LOONGARCH_BR_SK16:
+ return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
+ case R_LOONGARCH_BR_SD10K16:
+ return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#include "tcg-insn-defs.c.inc"
+
+/*
+ * TCG intrinsics
+ */
+
+static void tcg_out_mb(TCGContext *s, TCGArg a0)
+{
+ /* Baseline LoongArch only has the full barrier, unfortunately. */
+ tcg_out_opc_dbar(s, 0);
+}
+
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+{
+ if (ret == arg) {
+ return true;
+ }
+ switch (type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
+ /*
+ * Conventional register-register move used in LoongArch is
+ * `or dst, src, zero`.
+ */
+ tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+static bool imm_part_needs_loading(bool high_bits_are_ones,
+ tcg_target_long part)
+{
+ if (high_bits_are_ones) {
+ return part != -1;
+ } else {
+ return part != 0;
+ }
+}
+
+/* Loads a 32-bit immediate into rd, sign-extended. */
+static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
+{
+ tcg_target_long lo = sextreg(val, 0, 12);
+ tcg_target_long hi12 = sextreg(val, 12, 20);
+
+ /* Single-instruction cases. */
+ if (lo == val) {
+ /* val fits in simm12: addi.w rd, zero, val */
+ tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
+ return;
+ }
+ if (0x800 <= val && val <= 0xfff) {
+ /* val fits in uimm12: ori rd, zero, val */
+ tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
+ return;
+ }
+
+ /* High bits must be set; load with lu12i.w + optional ori. */
+ tcg_out_opc_lu12i_w(s, rd, hi12);
+ if (lo != 0) {
+ tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
+ }
+}
+
+static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
+ tcg_target_long val)
+{
+ /*
+ * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
+ * with dedicated instructions for filling the respective bitfields
+ * below:
+ *
+ * 6 5 4 3
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
+ * +-----------------------+---------------------------------------+...
+ * | hi52 | hi32 |
+ * +-----------------------+---------------------------------------+...
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * ...+-------------------------------------+-------------------------+
+ * | hi12 | lo |
+ * ...+-------------------------------------+-------------------------+
+ *
+ * Check if val belong to one of the several fast cases, before falling
+ * back to the slow path.
+ */
+
+ intptr_t pc_offset;
+ tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
+ tcg_target_long hi32, hi52;
+ bool rd_high_bits_are_ones;
+
+ /* Value fits in signed i32. */
+ if (type == TCG_TYPE_I32 || val == (int32_t)val) {
+ tcg_out_movi_i32(s, rd, val);
+ return;
+ }
+
+ /* PC-relative cases. */
+ pc_offset = tcg_pcrel_diff(s, (void *)val);
+ if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
+ /* Single pcaddu2i. */
+ tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
+ return;
+ }
+
+ if (pc_offset == (int32_t)pc_offset) {
+ /* Offset within 32 bits; load with pcalau12i + ori. */
+ val_lo = sextreg(val, 0, 12);
+ val_hi = val >> 12;
+ pc_hi = (val - pc_offset) >> 12;
+ offset_hi = val_hi - pc_hi;
+
+ tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
+ tcg_out_opc_pcalau12i(s, rd, offset_hi);
+ if (val_lo != 0) {
+ tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
+ }
+ return;
+ }
+
+ hi32 = sextreg(val, 32, 20);
+ hi52 = sextreg(val, 52, 12);
+
+ /* Single cu52i.d case. */
+ if (ctz64(val) >= 52) {
+ tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
+ return;
+ }
+
+ /* Slow path. Initialize the low 32 bits, then concat high bits. */
+ tcg_out_movi_i32(s, rd, val);
+ rd_high_bits_are_ones = (int32_t)val < 0;
+
+ if (imm_part_needs_loading(rd_high_bits_are_ones, hi32)) {
+ tcg_out_opc_cu32i_d(s, rd, hi32);
+ rd_high_bits_are_ones = hi32 < 0;
+ }
+
+ if (imm_part_needs_loading(rd_high_bits_are_ones, hi52)) {
+ tcg_out_opc_cu52i_d(s, rd, rd, hi52);
+ }
+}
+
+static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_andi(s, ret, arg, 0xff);
+}
+
+static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
+}
+
+static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
+}
+
+static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_sext_b(s, ret, arg);
+}
+
+static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_sext_h(s, ret, arg);
+}
+
+static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_addi_w(s, ret, arg, 0);
+}
+
+static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
+ TCGReg a0, TCGReg a1, TCGReg a2,
+ bool c2, bool is_32bit)
+{
+ if (c2) {
+ /*
+ * Fast path: semantics already satisfied due to constraint and
+ * insn behavior, single instruction is enough.
+ */
+ tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
+ /* all clz/ctz insns belong to DJ-format */
+ tcg_out32(s, encode_dj_insn(opc, a0, a1));
+ return;
+ }
+
+ tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
+ /* a0 = a1 ? REG_TMP0 : a2 */
+ tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
+ tcg_out_opc_masknez(s, a0, a2, a1);
+ tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
+}
+
+static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg arg1, TCGReg arg2, bool c2)
+{
+ TCGReg tmp;
+
+ if (c2) {
+ tcg_debug_assert(arg2 == 0);
+ }
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ if (c2) {
+ tmp = arg1;
+ } else {
+ tcg_out_opc_sub_d(s, ret, arg1, arg2);
+ tmp = ret;
+ }
+ tcg_out_opc_sltui(s, ret, tmp, 1);
+ break;
+ case TCG_COND_NE:
+ if (c2) {
+ tmp = arg1;
+ } else {
+ tcg_out_opc_sub_d(s, ret, arg1, arg2);
+ tmp = ret;
+ }
+ tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
+ break;
+ case TCG_COND_LT:
+ tcg_out_opc_slt(s, ret, arg1, arg2);
+ break;
+ case TCG_COND_GE:
+ tcg_out_opc_slt(s, ret, arg1, arg2);
+ tcg_out_opc_xori(s, ret, ret, 1);
+ break;
+ case TCG_COND_LE:
+ tcg_out_setcond(s, TCG_COND_GE, ret, arg2, arg1, false);
+ break;
+ case TCG_COND_GT:
+ tcg_out_setcond(s, TCG_COND_LT, ret, arg2, arg1, false);
+ break;
+ case TCG_COND_LTU:
+ tcg_out_opc_sltu(s, ret, arg1, arg2);
+ break;
+ case TCG_COND_GEU:
+ tcg_out_opc_sltu(s, ret, arg1, arg2);
+ tcg_out_opc_xori(s, ret, ret, 1);
+ break;
+ case TCG_COND_LEU:
+ tcg_out_setcond(s, TCG_COND_GEU, ret, arg2, arg1, false);
+ break;
+ case TCG_COND_GTU:
+ tcg_out_setcond(s, TCG_COND_LTU, ret, arg2, arg1, false);
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+/*
+ * Branch helpers
+ */
+
+static const struct {
+ LoongArchInsn op;
+ bool swap;
+} tcg_brcond_to_loongarch[] = {
+ [TCG_COND_EQ] = { OPC_BEQ, false },
+ [TCG_COND_NE] = { OPC_BNE, false },
+ [TCG_COND_LT] = { OPC_BGT, true },
+ [TCG_COND_GE] = { OPC_BLE, true },
+ [TCG_COND_LE] = { OPC_BLE, false },
+ [TCG_COND_GT] = { OPC_BGT, false },
+ [TCG_COND_LTU] = { OPC_BGTU, true },
+ [TCG_COND_GEU] = { OPC_BLEU, true },
+ [TCG_COND_LEU] = { OPC_BLEU, false },
+ [TCG_COND_GTU] = { OPC_BGTU, false }
+};
+
+static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
+ TCGReg arg2, TCGLabel *l)
+{
+ LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
+
+ tcg_debug_assert(op != 0);
+
+ if (tcg_brcond_to_loongarch[cond].swap) {
+ TCGReg t = arg1;
+ arg1 = arg2;
+ arg2 = t;
+ }
+
+ /* all conditional branch insns belong to DJSk16-format */
+ tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
+ tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
+}
+
+static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
+{
+ TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
+ ptrdiff_t offset = tcg_pcrel_diff(s, arg);
+
+ tcg_debug_assert((offset & 3) == 0);
+ if (offset == sextreg(offset, 0, 28)) {
+ /* short jump: +/- 256MiB */
+ if (tail) {
+ tcg_out_opc_b(s, offset >> 2);
+ } else {
+ tcg_out_opc_bl(s, offset >> 2);
+ }
+ } else if (offset == sextreg(offset, 0, 38)) {
+ /* long jump: +/- 256GiB */
+ tcg_target_long lo = sextreg(offset, 0, 18);
+ tcg_target_long hi = offset - lo;
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
+ tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
+ } else {
+ /* far jump: 64-bit */
+ tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
+ tcg_target_long hi = (tcg_target_long)arg - lo;
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
+ tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
+ }
+}
+
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
+{
+ tcg_out_call_int(s, arg, false);
+}
+
+/*
+ * Load/store helpers
+ */
+
+static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
+ TCGReg addr, intptr_t offset)
+{
+ intptr_t imm12 = sextreg(offset, 0, 12);
+
+ if (offset != imm12) {
+ intptr_t diff = offset - (uintptr_t)s->code_ptr;
+
+ if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
+ imm12 = sextreg(diff, 0, 12);
+ tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
+ if (addr != TCG_REG_ZERO) {
+ tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
+ }
+ }
+ addr = TCG_REG_TMP2;
+ }
+
+ switch (opc) {
+ case OPC_LD_B:
+ case OPC_LD_BU:
+ case OPC_LD_H:
+ case OPC_LD_HU:
+ case OPC_LD_W:
+ case OPC_LD_WU:
+ case OPC_LD_D:
+ case OPC_ST_B:
+ case OPC_ST_H:
+ case OPC_ST_W:
+ case OPC_ST_D:
+ tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
+ TCGReg arg1, intptr_t arg2)
+{
+ bool is_32bit = type == TCG_TYPE_I32;
+ tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
+}
+
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
+ TCGReg arg1, intptr_t arg2)
+{
+ bool is_32bit = type == TCG_TYPE_I32;
+ tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
+}
+
+static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
+ TCGReg base, intptr_t ofs)
+{
+ if (val == 0) {
+ tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Load/store helpers for SoftMMU, and qemu_ld/st implementations
+ */
+
+#if defined(CONFIG_SOFTMMU)
+/*
+ * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
+ * MemOpIdx oi, uintptr_t ra)
+ */
+static void * const qemu_ld_helpers[4] = {
+ [MO_8] = helper_ret_ldub_mmu,
+ [MO_16] = helper_le_lduw_mmu,
+ [MO_32] = helper_le_ldul_mmu,
+ [MO_64] = helper_le_ldq_mmu,
+};
+
+/*
+ * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
+ * uintxx_t val, MemOpIdx oi,
+ * uintptr_t ra)
+ */
+static void * const qemu_st_helpers[4] = {
+ [MO_8] = helper_ret_stb_mmu,
+ [MO_16] = helper_le_stw_mmu,
+ [MO_32] = helper_le_stl_mmu,
+ [MO_64] = helper_le_stq_mmu,
+};
+
+/* We expect to use a 12-bit negative offset from ENV. */
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
+
+static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
+{
+ tcg_out_opc_b(s, 0);
+ return reloc_br_sd10k16(s->code_ptr - 1, target);
+}
+
+/*
+ * Emits common code for TLB addend lookup, that eventually loads the
+ * addend in TCG_REG_TMP2.
+ */
+static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi,
+ tcg_insn_unit **label_ptr, bool is_load)
+{
+ MemOp opc = get_memop(oi);
+ unsigned s_bits = opc & MO_SIZE;
+ unsigned a_bits = get_alignment_bits(opc);
+ tcg_target_long compare_mask;
+ int mem_index = get_mmuidx(oi);
+ int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
+
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
+
+ tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
+ tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
+
+ /* Load the tlb comparator and the addend. */
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
+ is_load ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
+ offsetof(CPUTLBEntry, addend));
+
+ /* We don't support unaligned accesses. */
+ if (a_bits < s_bits) {
+ a_bits = s_bits;
+ }
+ /* Clear the non-page, non-alignment bits from the address. */
+ compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
+ tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
+ tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
+
+ /* Compare masked address with the TLB entry. */
+ label_ptr[0] = s->code_ptr;
+ tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
+
+ /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */
+}
+
+static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
+ TCGType type,
+ TCGReg datalo, TCGReg addrlo,
+ void *raddr, tcg_insn_unit **label_ptr)
+{
+ TCGLabelQemuLdst *label = new_ldst_label(s);
+
+ label->is_ld = is_ld;
+ label->oi = oi;
+ label->type = type;
+ label->datalo_reg = datalo;
+ label->datahi_reg = 0; /* unused */
+ label->addrlo_reg = addrlo;
+ label->addrhi_reg = 0; /* unused */
+ label->raddr = tcg_splitwx_to_rx(raddr);
+ label->label_ptr[0] = label_ptr[0];
+}
+
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ MemOpIdx oi = l->oi;
+ MemOp opc = get_memop(oi);
+ MemOp size = opc & MO_SIZE;
+ TCGType type = l->type;
+
+ /* resolve label address */
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
+ return false;
+ }
+
+ /* call load helper */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr);
+
+ tcg_out_call(s, qemu_ld_helpers[size]);
+
+ switch (opc & MO_SSIZE) {
+ case MO_SB:
+ tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ case MO_SW:
+ tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ case MO_SL:
+ tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ case MO_UL:
+ if (type == TCG_TYPE_I32) {
+ /* MO_UL loads of i32 should be sign-extended too */
+ tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ }
+ /* fallthrough */
+ default:
+ tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0);
+ break;
+ }
+
+ return tcg_out_goto(s, l->raddr);
+}
+
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ MemOpIdx oi = l->oi;
+ MemOp opc = get_memop(oi);
+ MemOp size = opc & MO_SIZE;
+
+ /* resolve label address */
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
+ return false;
+ }
+
+ /* call store helper */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
+ switch (size) {
+ case MO_8:
+ tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg);
+ break;
+ case MO_16:
+ tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg);
+ break;
+ case MO_32:
+ tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg);
+ break;
+ case MO_64:
+ tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg);
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr);
+
+ tcg_out_call(s, qemu_st_helpers[size]);
+
+ return tcg_out_goto(s, l->raddr);
+}
+#else
+
+/*
+ * Alignment helpers for user-mode emulation
+ */
+
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
+ unsigned a_bits)
+{
+ TCGLabelQemuLdst *l = new_ldst_label(s);
+
+ l->is_ld = is_ld;
+ l->addrlo_reg = addr_reg;
+
+ /*
+ * Without micro-architecture details, we don't know which of bstrpick or
+ * andi is faster, so use bstrpick as it's not constrained by imm field
+ * width. (Not to say alignments >= 2^12 are going to happen any time
+ * soon, though)
+ */
+ tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
+
+ l->label_ptr[0] = s->code_ptr;
+ tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
+
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
+}
+
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ /* resolve label address */
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
+ return false;
+ }
+
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
+
+ /* tail call, with the return address back inline. */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
+ tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
+ : helper_unaligned_st), true);
+ return true;
+}
+
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ return tcg_out_fail_alignment(s, l);
+}
+
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ return tcg_out_fail_alignment(s, l);
+}
+
+#endif /* CONFIG_SOFTMMU */
+
+/*
+ * `ext32u` the address register into the temp register given,
+ * if target is 32-bit, no-op otherwise.
+ *
+ * Returns the address register ready for use with TLB addend.
+ */
+static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
+ TCGReg addr, TCGReg tmp)
+{
+ if (TARGET_LONG_BITS == 32) {
+ tcg_out_ext32u(s, tmp, addr);
+ return tmp;
+ }
+ return addr;
+}
+
+static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
+ TCGReg rk, MemOp opc, TCGType type)
+{
+ /* Byte swapping is left to middle-end expansion. */
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
+
+ switch (opc & MO_SSIZE) {
+ case MO_UB:
+ tcg_out_opc_ldx_bu(s, rd, rj, rk);
+ break;
+ case MO_SB:
+ tcg_out_opc_ldx_b(s, rd, rj, rk);
+ break;
+ case MO_UW:
+ tcg_out_opc_ldx_hu(s, rd, rj, rk);
+ break;
+ case MO_SW:
+ tcg_out_opc_ldx_h(s, rd, rj, rk);
+ break;
+ case MO_UL:
+ if (type == TCG_TYPE_I64) {
+ tcg_out_opc_ldx_wu(s, rd, rj, rk);
+ break;
+ }
+ /* fallthrough */
+ case MO_SL:
+ tcg_out_opc_ldx_w(s, rd, rj, rk);
+ break;
+ case MO_Q:
+ tcg_out_opc_ldx_d(s, rd, rj, rk);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
+{
+ TCGReg addr_regl;
+ TCGReg data_regl;
+ MemOpIdx oi;
+ MemOp opc;
+#if defined(CONFIG_SOFTMMU)
+ tcg_insn_unit *label_ptr[1];
+#else
+ unsigned a_bits;
+#endif
+ TCGReg base;
+
+ data_regl = *args++;
+ addr_regl = *args++;
+ oi = *args++;
+ opc = get_memop(oi);
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
+ add_qemu_ldst_label(s, 1, oi, type,
+ data_regl, addr_regl,
+ s->code_ptr, label_ptr);
+#else
+ a_bits = get_alignment_bits(opc);
+ if (a_bits) {
+ tcg_out_test_alignment(s, true, addr_regl, a_bits);
+ }
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
+ tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
+#endif
+}
+
+static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
+ TCGReg rj, TCGReg rk, MemOp opc)
+{
+ /* Byte swapping is left to middle-end expansion. */
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
+
+ switch (opc & MO_SIZE) {
+ case MO_8:
+ tcg_out_opc_stx_b(s, data, rj, rk);
+ break;
+ case MO_16:
+ tcg_out_opc_stx_h(s, data, rj, rk);
+ break;
+ case MO_32:
+ tcg_out_opc_stx_w(s, data, rj, rk);
+ break;
+ case MO_64:
+ tcg_out_opc_stx_d(s, data, rj, rk);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
+{
+ TCGReg addr_regl;
+ TCGReg data_regl;
+ MemOpIdx oi;
+ MemOp opc;
+#if defined(CONFIG_SOFTMMU)
+ tcg_insn_unit *label_ptr[1];
+#else
+ unsigned a_bits;
+#endif
+ TCGReg base;
+
+ data_regl = *args++;
+ addr_regl = *args++;
+ oi = *args++;
+ opc = get_memop(oi);
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
+ add_qemu_ldst_label(s, 0, oi,
+ 0, /* type param is unused for stores */
+ data_regl, addr_regl,
+ s->code_ptr, label_ptr);
+#else
+ a_bits = get_alignment_bits(opc);
+ if (a_bits) {
+ tcg_out_test_alignment(s, false, addr_regl, a_bits);
+ }
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
+ tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
+#endif
+}
+
+/*
+ * Entry-points
+ */
+
+static const tcg_insn_unit *tb_ret_addr;
+
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
+{
+ TCGArg a0 = args[0];
+ TCGArg a1 = args[1];
+ TCGArg a2 = args[2];
+ int c2 = const_args[2];
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
+ tcg_out_call_int(s, tb_ret_addr, true);
+ }
+ break;
+
+ case INDEX_op_goto_tb:
+ assert(s->tb_jmp_insn_offset == 0);
+ /* indirect jump method */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
+ (uintptr_t)(s->tb_jmp_target_addr + a0));
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, a0);
+ break;
+
+ case INDEX_op_mb:
+ tcg_out_mb(s, a0);
+ break;
+
+ case INDEX_op_goto_ptr:
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
+ break;
+
+ case INDEX_op_br:
+ tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
+ 0);
+ tcg_out_opc_b(s, 0);
+ break;
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
+ break;
+
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ tcg_out_ext8s(s, a0, a1);
+ break;
+
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ tcg_out_ext8u(s, a0, a1);
+ break;
+
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ tcg_out_ext16s(s, a0, a1);
+ break;
+
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ tcg_out_ext16u(s, a0, a1);
+ break;
+
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ tcg_out_ext32u(s, a0, a1);
+ break;
+
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_ext_i32_i64:
+ tcg_out_ext32s(s, a0, a1);
+ break;
+
+ case INDEX_op_extrh_i64_i32:
+ tcg_out_opc_srai_d(s, a0, a1, 32);
+ break;
+
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
+ break;
+
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ if (c2) {
+ tcg_out_opc_ori(s, a0, a1, a2);
+ tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
+ } else {
+ tcg_out_opc_nor(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ if (c2) {
+ /* guaranteed to fit due to constraint */
+ tcg_out_opc_andi(s, a0, a1, ~a2);
+ } else {
+ tcg_out_opc_andn(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ if (c2) {
+ /* guaranteed to fit due to constraint */
+ tcg_out_opc_ori(s, a0, a1, ~a2);
+ } else {
+ tcg_out_opc_orn(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ if (c2) {
+ tcg_out_opc_andi(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_and(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ if (c2) {
+ tcg_out_opc_ori(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_or(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ if (c2) {
+ tcg_out_opc_xori(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_xor(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_extract_i32:
+ tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
+ break;
+ case INDEX_op_extract_i64:
+ tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
+ break;
+
+ case INDEX_op_deposit_i32:
+ tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
+ break;
+ case INDEX_op_deposit_i64:
+ tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
+ break;
+
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ tcg_out_opc_revb_2h(s, a0, a1);
+ if (a2 & TCG_BSWAP_OS) {
+ tcg_out_ext16s(s, a0, a0);
+ } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext16u(s, a0, a0);
+ }
+ break;
+
+ case INDEX_op_bswap32_i32:
+ /* All 32-bit values are computed sign-extended in the register. */
+ a2 = TCG_BSWAP_OS;
+ /* fallthrough */
+ case INDEX_op_bswap32_i64:
+ tcg_out_opc_revb_2w(s, a0, a1);
+ if (a2 & TCG_BSWAP_OS) {
+ tcg_out_ext32s(s, a0, a0);
+ } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext32u(s, a0, a0);
+ }
+ break;
+
+ case INDEX_op_bswap64_i64:
+ tcg_out_opc_revb_d(s, a0, a1);
+ break;
+
+ case INDEX_op_clz_i32:
+ tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
+ break;
+ case INDEX_op_clz_i64:
+ tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
+ break;
+
+ case INDEX_op_ctz_i32:
+ tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
+ break;
+ case INDEX_op_ctz_i64:
+ tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
+ break;
+
+ case INDEX_op_shl_i32:
+ if (c2) {
+ tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_sll_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_shl_i64:
+ if (c2) {
+ tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_sll_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_shr_i32:
+ if (c2) {
+ tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_srl_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_shr_i64:
+ if (c2) {
+ tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_srl_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_sar_i32:
+ if (c2) {
+ tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_sra_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_sar_i64:
+ if (c2) {
+ tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_sra_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_rotl_i32:
+ /* transform into equivalent rotr/rotri */
+ if (c2) {
+ tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
+ } else {
+ tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
+ tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
+ }
+ break;
+ case INDEX_op_rotl_i64:
+ /* transform into equivalent rotr/rotri */
+ if (c2) {
+ tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
+ } else {
+ tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
+ tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
+ }
+ break;
+
+ case INDEX_op_rotr_i32:
+ if (c2) {
+ tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_rotr_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_rotr_i64:
+ if (c2) {
+ tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_rotr_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_add_i32:
+ if (c2) {
+ tcg_out_opc_addi_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_add_i64:
+ if (c2) {
+ tcg_out_opc_addi_d(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_sub_i32:
+ if (c2) {
+ tcg_out_opc_addi_w(s, a0, a1, -a2);
+ } else {
+ tcg_out_opc_sub_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_sub_i64:
+ if (c2) {
+ tcg_out_opc_addi_d(s, a0, a1, -a2);
+ } else {
+ tcg_out_opc_sub_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_mul_i32:
+ tcg_out_opc_mul_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_mul_i64:
+ tcg_out_opc_mul_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_mulsh_i32:
+ tcg_out_opc_mulh_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_mulsh_i64:
+ tcg_out_opc_mulh_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_muluh_i32:
+ tcg_out_opc_mulh_wu(s, a0, a1, a2);
+ break;
+ case INDEX_op_muluh_i64:
+ tcg_out_opc_mulh_du(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_div_i32:
+ tcg_out_opc_div_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_div_i64:
+ tcg_out_opc_div_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_divu_i32:
+ tcg_out_opc_div_wu(s, a0, a1, a2);
+ break;
+ case INDEX_op_divu_i64:
+ tcg_out_opc_div_du(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_rem_i32:
+ tcg_out_opc_mod_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_rem_i64:
+ tcg_out_opc_mod_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_remu_i32:
+ tcg_out_opc_mod_wu(s, a0, a1, a2);
+ break;
+ case INDEX_op_remu_i64:
+ tcg_out_opc_mod_du(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ tcg_out_setcond(s, args[3], a0, a1, a2, c2);
+ break;
+
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
+ break;
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
+ break;
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
+ break;
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
+ break;
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32s_i64:
+ tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
+ break;
+ case INDEX_op_ld32u_i64:
+ tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
+ break;
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
+ break;
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
+ break;
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
+ break;
+
+ case INDEX_op_qemu_ld_i32:
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
+ break;
+ case INDEX_op_qemu_ld_i64:
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
+ break;
+ case INDEX_op_qemu_st_i32:
+ tcg_out_qemu_st(s, args);
+ break;
+ case INDEX_op_qemu_st_i64:
+ tcg_out_qemu_st(s, args);
+ break;
+
+ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_mov_i64:
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+{
+ switch (op) {
+ case INDEX_op_goto_ptr:
+ return C_O0_I1(r);
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i32:
+ case INDEX_op_st_i64:
+ return C_O0_I2(rZ, r);
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ return C_O0_I2(rZ, rZ);
+
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st_i64:
+ return C_O0_I2(LZ, L);
+
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld_i64:
+ return C_O1_I1(r, r);
+
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ return C_O1_I1(r, L);
+
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ /*
+ * LoongArch insns for these ops don't have reg-imm forms, but we
+ * can express using andi/ori if ~constant satisfies
+ * TCG_CT_CONST_U12.
+ */
+ return C_O1_I2(r, r, rC);
+
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ return C_O1_I2(r, r, ri);
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ return C_O1_I2(r, r, rI);
+
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ /* LoongArch reg-imm bitops have their imms ZERO-extended */
+ return C_O1_I2(r, r, rU);
+
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ return C_O1_I2(r, r, rW);
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ return C_O1_I2(r, r, rZ);
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ /* Must deposit into the same register as input */
+ return C_O1_I2(r, 0, rZ);
+
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ return C_O1_I2(r, rZ, rN);
+
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_mulsh_i32:
+ case INDEX_op_mulsh_i64:
+ case INDEX_op_muluh_i32:
+ case INDEX_op_muluh_i64:
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ return C_O1_I2(r, rZ, rZ);
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static const int tcg_target_callee_save_regs[] = {
+ TCG_REG_S0, /* used for the global env (TCG_AREG0) */
+ TCG_REG_S1,
+ TCG_REG_S2,
+ TCG_REG_S3,
+ TCG_REG_S4,
+ TCG_REG_S5,
+ TCG_REG_S6,
+ TCG_REG_S7,
+ TCG_REG_S8,
+ TCG_REG_S9,
+ TCG_REG_RA, /* should be last for ABI compliance */
+};
+
+/* Stack frame parameters. */
+#define REG_SIZE (TCG_TARGET_REG_BITS / 8)
+#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
+#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
+#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
+ + TCG_TARGET_STACK_ALIGN - 1) \
+ & -TCG_TARGET_STACK_ALIGN)
+#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
+
+/* We're expecting to be able to use an immediate for frame allocation. */
+QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
+
+/* Generate global QEMU prologue and epilogue code */
+static void tcg_target_qemu_prologue(TCGContext *s)
+{
+ int i;
+
+ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
+
+ /* TB prologue */
+ tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
+ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
+ TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
+ }
+
+#if !defined(CONFIG_SOFTMMU)
+ if (USE_GUEST_BASE) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
+ tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
+ }
+#endif
+
+ /* Call generated code */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
+
+ /* Return path for goto_ptr. Set return value to 0 */
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
+ tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
+
+ /* TB epilogue */
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
+ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
+ TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
+ }
+
+ tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
+}
+
+static void tcg_target_init(TCGContext *s)
+{
+ tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
+ tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
+
+ tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
+
+ s->reserved_regs = 0;
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
+}
+
+typedef struct {
+ DebugFrameHeader h;
+ uint8_t fde_def_cfa[4];
+ uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
+} DebugFrame;
+
+#define ELF_HOST_MACHINE EM_LOONGARCH
+
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
+ .h.cie.return_column = TCG_REG_RA,
+
+ /* Total FDE size does not include the "len" member. */
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
+
+ .fde_def_cfa = {
+ 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
+ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
+ (FRAME_SIZE >> 7)
+ },
+ .fde_reg_ofs = {
+ 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
+ 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
+ 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
+ 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
+ 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
+ 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
+ 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
+ 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
+ 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
+ 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
+ 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
+ }
+};
+
+void tcg_register_jit(const void *buf, size_t buf_size)
+{
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
+}
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
new file mode 100644
index 000000000..d58a6162f
--- /dev/null
+++ b/tcg/loongarch64/tcg-target.h
@@ -0,0 +1,178 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on tcg/riscv/tcg-target.h
+ *
+ * Copyright (c) 2018 SiFive, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef LOONGARCH_TCG_TARGET_H
+#define LOONGARCH_TCG_TARGET_H
+
+/*
+ * Loongson removed the (incomplete) 32-bit support from kernel and toolchain
+ * for the initial upstreaming of this architecture, so don't bother and just
+ * support the LP64* ABI for now.
+ */
+#if defined(__loongarch64)
+# define TCG_TARGET_REG_BITS 64
+#else
+# error unsupported LoongArch register size
+#endif
+
+#define TCG_TARGET_INSN_UNIT_SIZE 4
+#define TCG_TARGET_NB_REGS 32
+#define MAX_CODE_GEN_BUFFER_SIZE SIZE_MAX
+
+typedef enum {
+ TCG_REG_ZERO,
+ TCG_REG_RA,
+ TCG_REG_TP,
+ TCG_REG_SP,
+ TCG_REG_A0,
+ TCG_REG_A1,
+ TCG_REG_A2,
+ TCG_REG_A3,
+ TCG_REG_A4,
+ TCG_REG_A5,
+ TCG_REG_A6,
+ TCG_REG_A7,
+ TCG_REG_T0,
+ TCG_REG_T1,
+ TCG_REG_T2,
+ TCG_REG_T3,
+ TCG_REG_T4,
+ TCG_REG_T5,
+ TCG_REG_T6,
+ TCG_REG_T7,
+ TCG_REG_T8,
+ TCG_REG_RESERVED,
+ TCG_REG_S9,
+ TCG_REG_S0,
+ TCG_REG_S1,
+ TCG_REG_S2,
+ TCG_REG_S3,
+ TCG_REG_S4,
+ TCG_REG_S5,
+ TCG_REG_S6,
+ TCG_REG_S7,
+ TCG_REG_S8,
+
+ /* aliases */
+ TCG_AREG0 = TCG_REG_S0,
+ TCG_REG_TMP0 = TCG_REG_T8,
+ TCG_REG_TMP1 = TCG_REG_T7,
+ TCG_REG_TMP2 = TCG_REG_T6,
+} TCGReg;
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_ALIGN_ARGS 1
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+/* optional instructions */
+#define TCG_TARGET_HAS_movcond_i32 0
+#define TCG_TARGET_HAS_div_i32 1
+#define TCG_TARGET_HAS_rem_i32 1
+#define TCG_TARGET_HAS_div2_i32 0
+#define TCG_TARGET_HAS_rot_i32 1
+#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 1
+#define TCG_TARGET_HAS_sextract_i32 0
+#define TCG_TARGET_HAS_extract2_i32 0
+#define TCG_TARGET_HAS_add2_i32 0
+#define TCG_TARGET_HAS_sub2_i32 0
+#define TCG_TARGET_HAS_mulu2_i32 0
+#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 1
+#define TCG_TARGET_HAS_mulsh_i32 1
+#define TCG_TARGET_HAS_ext8s_i32 1
+#define TCG_TARGET_HAS_ext16s_i32 1
+#define TCG_TARGET_HAS_ext8u_i32 1
+#define TCG_TARGET_HAS_ext16u_i32 1
+#define TCG_TARGET_HAS_bswap16_i32 1
+#define TCG_TARGET_HAS_bswap32_i32 1
+#define TCG_TARGET_HAS_not_i32 1
+#define TCG_TARGET_HAS_neg_i32 0
+#define TCG_TARGET_HAS_andc_i32 1
+#define TCG_TARGET_HAS_orc_i32 1
+#define TCG_TARGET_HAS_eqv_i32 0
+#define TCG_TARGET_HAS_nand_i32 0
+#define TCG_TARGET_HAS_nor_i32 1
+#define TCG_TARGET_HAS_clz_i32 1
+#define TCG_TARGET_HAS_ctz_i32 1
+#define TCG_TARGET_HAS_ctpop_i32 0
+#define TCG_TARGET_HAS_direct_jump 0
+#define TCG_TARGET_HAS_brcond2 0
+#define TCG_TARGET_HAS_setcond2 0
+#define TCG_TARGET_HAS_qemu_st8_i32 0
+
+/* 64-bit operations */
+#define TCG_TARGET_HAS_movcond_i64 0
+#define TCG_TARGET_HAS_div_i64 1
+#define TCG_TARGET_HAS_rem_i64 1
+#define TCG_TARGET_HAS_div2_i64 0
+#define TCG_TARGET_HAS_rot_i64 1
+#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 1
+#define TCG_TARGET_HAS_sextract_i64 0
+#define TCG_TARGET_HAS_extract2_i64 0
+#define TCG_TARGET_HAS_extrl_i64_i32 1
+#define TCG_TARGET_HAS_extrh_i64_i32 1
+#define TCG_TARGET_HAS_ext8s_i64 1
+#define TCG_TARGET_HAS_ext16s_i64 1
+#define TCG_TARGET_HAS_ext32s_i64 1
+#define TCG_TARGET_HAS_ext8u_i64 1
+#define TCG_TARGET_HAS_ext16u_i64 1
+#define TCG_TARGET_HAS_ext32u_i64 1
+#define TCG_TARGET_HAS_bswap16_i64 1
+#define TCG_TARGET_HAS_bswap32_i64 1
+#define TCG_TARGET_HAS_bswap64_i64 1
+#define TCG_TARGET_HAS_not_i64 1
+#define TCG_TARGET_HAS_neg_i64 0
+#define TCG_TARGET_HAS_andc_i64 1
+#define TCG_TARGET_HAS_orc_i64 1
+#define TCG_TARGET_HAS_eqv_i64 0
+#define TCG_TARGET_HAS_nand_i64 0
+#define TCG_TARGET_HAS_nor_i64 1
+#define TCG_TARGET_HAS_clz_i64 1
+#define TCG_TARGET_HAS_ctz_i64 1
+#define TCG_TARGET_HAS_ctpop_i64 0
+#define TCG_TARGET_HAS_add2_i64 0
+#define TCG_TARGET_HAS_sub2_i64 0
+#define TCG_TARGET_HAS_mulu2_i64 0
+#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 1
+#define TCG_TARGET_HAS_mulsh_i64 1
+
+/* not defined -- call should be eliminated at compile time */
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+
+#define TCG_TARGET_DEFAULT_MO (0)
+
+#define TCG_TARGET_NEED_LDST_LABELS
+
+#define TCG_TARGET_HAS_MEMORY_BSWAP 0
+
+#endif /* LOONGARCH_TCG_TARGET_H */
--
2.43.5
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/xisme/qemu-kvm_src-anolis.git
git@gitee.com:xisme/qemu-kvm_src-anolis.git
xisme
qemu-kvm_src-anolis
qemu-kvm_src-anolis
vtkm_support_csv

搜索帮助

371d5123 14472233 46e8bd33 14472233