summaryrefslogtreecommitdiffhomepage
path: root/dev/LibCompiler/Backend
diff options
context:
space:
mode:
authorAmlal El Mahrouss <amlal@nekernel.org>2025-05-23 03:48:06 +0200
committerAmlal El Mahrouss <amlal@nekernel.org>2025-05-23 03:48:06 +0200
commit2eed4954c762bb8050e40798c3d9f1d3998324d1 (patch)
tree8848d4345fca4d62c23d1e7136eeff2978c9e6c5 /dev/LibCompiler/Backend
parent8ad58a91a11380203c4a81fe4dc93e7734631b32 (diff)
feat!(LibCompiler): Codebase and diagram has been improved.
Signed-off-by: Amlal El Mahrouss <amlal@nekernel.org>
Diffstat (limited to 'dev/LibCompiler/Backend')
-rw-r--r--dev/LibCompiler/Backend/32x0.h24
-rw-r--r--dev/LibCompiler/Backend/64x0.h42
-rw-r--r--dev/LibCompiler/Backend/Aarch64.h2
-rw-r--r--dev/LibCompiler/Backend/X64.h (renamed from dev/LibCompiler/Backend/Amd64.h)18
4 files changed, 43 insertions, 43 deletions
diff --git a/dev/LibCompiler/Backend/32x0.h b/dev/LibCompiler/Backend/32x0.h
index 49bb978..240d885 100644
--- a/dev/LibCompiler/Backend/32x0.h
+++ b/dev/LibCompiler/Backend/32x0.h
@@ -1,6 +1,6 @@
/* -------------------------------------------
- Copyright (C) 2024-2025 Amlal EL Mahrous, all rights reserved
+ Copyright (C) 2024-2025 Amlal EL Mahrouss, all rights reserved
------------------------------------------- */
@@ -11,7 +11,7 @@
// @brief 32x0 support.
// @file Backend/32x0.h
-#define kAsmOpcodeDecl(__NAME, __OPCODE, __FUNCT3, __FUNCT7) \
+#define LC_ASM_OPCODE(__NAME, __OPCODE, __FUNCT3, __FUNCT7) \
{.fName = __NAME, .fOpcode = __OPCODE, .fFunct3 = __FUNCT3, .fFunct7 = __FUNCT7},
#define kAsmImmediate 0x01
@@ -37,18 +37,18 @@ struct CpuCode32x0 {
#define kAsmByteStr ".byte" /* 8-bit */
inline std::vector<CpuCode32x0> kOpcodes32x0 = {
- kAsmOpcodeDecl("nop", 0b0100011, 0b000, kAsmNoArgs) // nothing to do. (1C)
- kAsmOpcodeDecl("jmp", 0b1110011, 0b001, kAsmJump) // jump to branch (2C)
- kAsmOpcodeDecl("mov", 0b0100011, 0b101, kAsmImmediate) // move registers (3C)
- kAsmOpcodeDecl("psh", 0b0111011, 0b000, kAsmImmediate) // push to sp (2C)
- kAsmOpcodeDecl("pop", 0b0111011, 0b001, kAsmImmediate) // pop from sp. (1C)
- kAsmOpcodeDecl("lea", 0b0111011, 0b010,
+ LC_ASM_OPCODE("nop", 0b0100011, 0b000, kAsmNoArgs) // nothing to do. (1C)
+ LC_ASM_OPCODE("jmp", 0b1110011, 0b001, kAsmJump) // jump to branch (2C)
+ LC_ASM_OPCODE("mov", 0b0100011, 0b101, kAsmImmediate) // move registers (3C)
+ LC_ASM_OPCODE("psh", 0b0111011, 0b000, kAsmImmediate) // push to sp (2C)
+ LC_ASM_OPCODE("pop", 0b0111011, 0b001, kAsmImmediate) // pop from sp. (1C)
+ LC_ASM_OPCODE("lea", 0b0111011, 0b010,
kAsmImmediate) // setup stack and call, store address to CR (1C).
- kAsmOpcodeDecl("ret", 0b0111011, 0b110,
+ LC_ASM_OPCODE("ret", 0b0111011, 0b110,
kAsmImmediate) // return from procedure (2C).
- kAsmOpcodeDecl("uc", 0b0111111, 0b000, kAsmSyscall) // user call (1C)
- kAsmOpcodeDecl("kc", 0b0111111, 0b001, kAsmSyscall) // kernel call (1C)
- kAsmOpcodeDecl("int", 0b0111111, 0b010, kAsmSyscall) // raise interrupt (1C)
+ LC_ASM_OPCODE("uc", 0b0111111, 0b000, kAsmSyscall) // user call (1C)
+ LC_ASM_OPCODE("kc", 0b0111111, 0b001, kAsmSyscall) // kernel call (1C)
+ LC_ASM_OPCODE("int", 0b0111111, 0b010, kAsmSyscall) // raise interrupt (1C)
};
// \brief 64x0 register prefix
diff --git a/dev/LibCompiler/Backend/64x0.h b/dev/LibCompiler/Backend/64x0.h
index f929995..f7f0332 100644
--- a/dev/LibCompiler/Backend/64x0.h
+++ b/dev/LibCompiler/Backend/64x0.h
@@ -1,6 +1,6 @@
/* -------------------------------------------
- Copyright (C) 2024-2025 Amlal EL Mahrous, all rights reserved
+ Copyright (C) 2024-2025 Amlal EL Mahrouss, all rights reserved
------------------------------------------- */
@@ -12,7 +12,7 @@
// @brief 64x0 support.
// @file Backend/64x0.h
-#define kAsmOpcodeDecl(__NAME, __OPCODE, __FUNCT3, __FUNCT7) \
+#define LC_ASM_OPCODE(__NAME, __OPCODE, __FUNCT3, __FUNCT7) \
{.fName = __NAME, .fOpcode = __OPCODE, .fFunct3 = __FUNCT3, .fFunct7 = __FUNCT7},
#define kAsmImmediate 0x01
@@ -32,29 +32,29 @@ struct CpuOpcode64x0 {
};
inline std::vector<CpuOpcode64x0> kOpcodes64x0 = {
- kAsmOpcodeDecl("nop", 0b0000000, 0b0000000, kAsmNoArgs) // no-operation.
- kAsmOpcodeDecl("np", 0b0000000, 0b0000000, kAsmNoArgs) // no-operation.
- kAsmOpcodeDecl("jlr", 0b1110011, 0b0000111,
+ LC_ASM_OPCODE("nop", 0b0000000, 0b0000000, kAsmNoArgs) // no-operation.
+ LC_ASM_OPCODE("np", 0b0000000, 0b0000000, kAsmNoArgs) // no-operation.
+ LC_ASM_OPCODE("jlr", 0b1110011, 0b0000111,
kAsmJump) // jump to linked return register
- kAsmOpcodeDecl("jrl", 0b1110011, 0b0001111,
+ LC_ASM_OPCODE("jrl", 0b1110011, 0b0001111,
kAsmJump) // jump from return register.
- kAsmOpcodeDecl("mv", 0b0100011, 0b101, kAsmRegToReg) kAsmOpcodeDecl(
- "bg", 0b1100111, 0b111, kAsmRegToReg) kAsmOpcodeDecl("bl", 0b1100111, 0b011, kAsmRegToReg)
- kAsmOpcodeDecl("beq", 0b1100111, 0b000, kAsmRegToReg)
- kAsmOpcodeDecl("bne", 0b1100111, 0b001, kAsmRegToReg)
- kAsmOpcodeDecl("bge", 0b1100111, 0b101, kAsmRegToReg)
- kAsmOpcodeDecl("ble", 0b1100111, 0b100, kAsmRegToReg)
- kAsmOpcodeDecl("stw", 0b0001111, 0b100, kAsmImmediate)
- kAsmOpcodeDecl("ldw", 0b0001111, 0b100, kAsmImmediate)
- kAsmOpcodeDecl("lda", 0b0001111, 0b101, kAsmImmediate)
- kAsmOpcodeDecl("sta", 0b0001111, 0b001, kAsmImmediate)
+ LC_ASM_OPCODE("mv", 0b0100011, 0b101, kAsmRegToReg) LC_ASM_OPCODE(
+ "bg", 0b1100111, 0b111, kAsmRegToReg) LC_ASM_OPCODE("bl", 0b1100111, 0b011, kAsmRegToReg)
+ LC_ASM_OPCODE("beq", 0b1100111, 0b000, kAsmRegToReg)
+ LC_ASM_OPCODE("bne", 0b1100111, 0b001, kAsmRegToReg)
+ LC_ASM_OPCODE("bge", 0b1100111, 0b101, kAsmRegToReg)
+ LC_ASM_OPCODE("ble", 0b1100111, 0b100, kAsmRegToReg)
+ LC_ASM_OPCODE("stw", 0b0001111, 0b100, kAsmImmediate)
+ LC_ASM_OPCODE("ldw", 0b0001111, 0b100, kAsmImmediate)
+ LC_ASM_OPCODE("lda", 0b0001111, 0b101, kAsmImmediate)
+ LC_ASM_OPCODE("sta", 0b0001111, 0b001, kAsmImmediate)
// add/sub without carry flag
- kAsmOpcodeDecl("add", 0b0101011, 0b100, kAsmImmediate)
- kAsmOpcodeDecl("sub", 0b0101011, 0b101, kAsmImmediate)
+ LC_ASM_OPCODE("add", 0b0101011, 0b100, kAsmImmediate)
+ LC_ASM_OPCODE("sub", 0b0101011, 0b101, kAsmImmediate)
// add/sub with carry flag
- kAsmOpcodeDecl("addc", 0b0101011, 0b110, kAsmImmediate)
- kAsmOpcodeDecl("subc", 0b0101011, 0b111, kAsmImmediate)
- kAsmOpcodeDecl("sc", 0b1110011, 0b00, kAsmSyscall)};
+ LC_ASM_OPCODE("addc", 0b0101011, 0b110, kAsmImmediate)
+ LC_ASM_OPCODE("subc", 0b0101011, 0b111, kAsmImmediate)
+ LC_ASM_OPCODE("sc", 0b1110011, 0b00, kAsmSyscall)};
// \brief 64x0 register prefix
// example: r32, r0
diff --git a/dev/LibCompiler/Backend/Aarch64.h b/dev/LibCompiler/Backend/Aarch64.h
index dcafa0a..2676306 100644
--- a/dev/LibCompiler/Backend/Aarch64.h
+++ b/dev/LibCompiler/Backend/Aarch64.h
@@ -1,6 +1,6 @@
/* -------------------------------------------
-Copyright (C) 2024-2025 Amlal EL Mahrous, all rights reserved
+Copyright (C) 2024-2025 Amlal EL Mahrouss, all rights reserved
------------------------------------------- */
diff --git a/dev/LibCompiler/Backend/Amd64.h b/dev/LibCompiler/Backend/X64.h
index ae8458f..f489515 100644
--- a/dev/LibCompiler/Backend/Amd64.h
+++ b/dev/LibCompiler/Backend/X64.h
@@ -1,6 +1,6 @@
/* -------------------------------------------
- Copyright (C) 2024-2025 Amlal EL Mahrous, all rights reserved
+ Copyright (C) 2024-2025 Amlal EL Mahrouss, all rights reserved
------------------------------------------- */
@@ -9,9 +9,9 @@
#include <LibCompiler/Defines.h>
// @brief AMD64 support.
-// @file Backend/Amd64.h
+// @file Backend/X64.h
-#define kAsmOpcodeDecl(__NAME, __OPCODE) {.fName = __NAME, .fOpcode = __OPCODE},
+#define LC_ASM_OPCODE(__NAME, __OPCODE) {.fName = __NAME, .fOpcode = __OPCODE},
typedef char i64_character_t;
typedef uint8_t i64_byte_t;
@@ -39,11 +39,11 @@ struct CpuOpcodeAMD64 {
#define kJumpLimitStandardLimit 0xEB
inline std::vector<CpuOpcodeAMD64> kOpcodesAMD64 = {
- kAsmOpcodeDecl("int", 0xCD) kAsmOpcodeDecl("into", 0xCE) kAsmOpcodeDecl("intd", 0xF1)
- kAsmOpcodeDecl("int3", 0xC3) kAsmOpcodeDecl("iret", 0xCF) kAsmOpcodeDecl("retf", 0xCB)
- kAsmOpcodeDecl("retn", 0xC3) kAsmOpcodeDecl("ret", 0xC3) kAsmOpcodeDecl("sti", 0xfb)
- kAsmOpcodeDecl("cli", 0xfa) kAsmOpcodeDecl("hlt", 0xf4) kAsmOpcodeDecl("nop", 0x90)
- kAsmOpcodeDecl("mov", 0x48) kAsmOpcodeDecl("call", 0xFF)
- kAsmOpcodeDecl("syscall", 0x0F) kAsmOpcodeDecl("xor", 0x48)};
+ LC_ASM_OPCODE("int", 0xCD) LC_ASM_OPCODE("into", 0xCE) LC_ASM_OPCODE("intd", 0xF1)
+ LC_ASM_OPCODE("int3", 0xC3) LC_ASM_OPCODE("iret", 0xCF) LC_ASM_OPCODE("retf", 0xCB)
+ LC_ASM_OPCODE("retn", 0xC3) LC_ASM_OPCODE("ret", 0xC3) LC_ASM_OPCODE("sti", 0xfb)
+ LC_ASM_OPCODE("cli", 0xfa) LC_ASM_OPCODE("hlt", 0xf4) LC_ASM_OPCODE("nop", 0x90)
+ LC_ASM_OPCODE("mov", 0x48) LC_ASM_OPCODE("call", 0xFF)
+ LC_ASM_OPCODE("syscall", 0x0F) LC_ASM_OPCODE("xor", 0x48)};
#define kAsmRegisterLimit 16