Make kernel to boot in x32

This commit is contained in:
Alex 2023-05-11 18:34:21 +03:00
parent 15637438da
commit 149d8ba790
Signed by untrusted user who does not match committer: enderice2
GPG Key ID: EACC3AD603BAB4DD
19 changed files with 803 additions and 530 deletions

View File

@ -23,7 +23,7 @@
#include "multiboot2.h"
#include "../../../../../kernel.h"
EXTERNC void multiboot_main(uint64_t Magic, uint64_t Info)
EXTERNC void multiboot_main(uintptr_t Magic, uintptr_t Info)
{
if (Info == NULL || Magic == NULL)
{

View File

@ -0,0 +1,294 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag, MapType Type)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if ((PML4.raw & Flag) > 0)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
{
if (Type == MapType::OneGB && PDPTE->Entries[Index.PDPTEIndex].PageSize)
return true;
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (Type == MapType::TwoMB && PDE->Entries[Index.PDEIndex].PageSize)
return true;
if ((PDE->Entries[Index.PDEIndex].Present))
{
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if ((PTE->Entries[Index.PTEIndex].Present))
return true;
}
}
}
}
}
}
return false;
}
void *Virtual::GetPhysical(void *VirtualAddress)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if (PML4.Present)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if (PDPTE->Entries[Index.PDPTEIndex].Present)
{
if (PDPTE->Entries[Index.PDPTEIndex].PageSize)
return (void *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (PDE->Entries[Index.PDEIndex].Present)
{
if (PDE->Entries[Index.PDEIndex].PageSize)
return (void *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if (PTE->Entries[Index.PTEIndex].Present)
return (void *)((uintptr_t)PTE->Entries[Index.PTEIndex].GetAddress() << 12);
}
}
}
}
}
}
return nullptr;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
{
error("No page table");
return;
}
Flags |= PTFlag::P;
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
uint64_t DirectoryFlags = Flags & 0x3F;
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
if (!PML4->Present)
{
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryPointerTableEntryPtr) + 1));
memset(PDPTEPtr, 0, sizeof(PageDirectoryPointerTableEntryPtr));
PML4->Present = true;
PML4->SetAddress((uintptr_t)PDPTEPtr >> 12);
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)(PML4->GetAddress() << 12);
PML4->raw |= DirectoryFlags;
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (Type == MapType::OneGB)
{
PDPTE->raw |= Flags;
PDPTE->PageSize = true;
PDPTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 1GB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageDirectoryEntryPtr *PDEPtr = nullptr;
if (!PDPTE->Present)
{
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryEntryPtr) + 1));
memset(PDEPtr, 0, sizeof(PageDirectoryEntryPtr));
PDPTE->Present = true;
PDPTE->SetAddress((uintptr_t)PDEPtr >> 12);
}
else
PDEPtr = (PageDirectoryEntryPtr *)(PDPTE->GetAddress() << 12);
PDPTE->raw |= DirectoryFlags;
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (Type == MapType::TwoMB)
{
PDE->raw |= Flags;
PDE->PageSize = true;
PDE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 2MB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE->Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr) + 1));
memset(PTEPtr, 0, sizeof(PageTableEntryPtr));
PDE->Present = true;
PDE->SetAddress((uintptr_t)PTEPtr >> 12);
}
else
PTEPtr = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
PDE->raw |= DirectoryFlags;
PageTableEntry *PTE = &PTEPtr->Entries[Index.PTEIndex];
PTE->Present = true;
PTE->raw |= Flags;
PTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
#ifdef DEBUG
/* https://stackoverflow.com/a/3208376/9352057 */
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags, Type)) // quick workaround just to see where it fails
warn("Failed to map v:%#lx p:%#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Unmap(void *VirtualAddress, MapType Type)
{
SmartLock(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
if (!PML4->Present)
{
error("Page %#lx not present", PML4->GetAddress());
return;
}
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4->Address << 12);
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE->Present)
{
error("Page %#lx not present", PDPTE->GetAddress());
return;
}
if (Type == MapType::OneGB && PDPTE->PageSize)
{
PDPTE->Present = false;
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Address << 12);
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (!PDE->Present)
{
error("Page %#lx not present", PDE->GetAddress());
return;
}
if (Type == MapType::TwoMB && PDE->PageSize)
{
PDE->Present = false;
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE->Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
error("Page %#lx not present", PTE.GetAddress());
return;
}
PTE.Present = false;
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
}

View File

@ -23,7 +23,7 @@
#include "multiboot2.h"
#include "../../../../../kernel.h"
EXTERNC void multiboot_main(uint32_t Magic, uint32_t Info)
EXTERNC void multiboot_main(uintptr_t Magic, uintptr_t Info)
{
if (Info == NULL || Magic == NULL)
{
@ -217,8 +217,10 @@ EXTERNC void multiboot_main(uint32_t Magic, uint32_t Info)
case MULTIBOOT_TAG_TYPE_ELF_SECTIONS:
{
multiboot_tag_elf_sections *elf = (multiboot_tag_elf_sections *)Tag;
fixme("elf_sections->[num=%d, size=%d, entsize=%d, shndx=%d]",
elf->num, elf->size, elf->entsize, elf->shndx);
mb2binfo.Kernel.Symbols.Num = elf->num;
mb2binfo.Kernel.Symbols.EntSize = elf->entsize;
mb2binfo.Kernel.Symbols.Shndx = elf->shndx;
mb2binfo.Kernel.Symbols.Sections = (uintptr_t)&elf->sections;
break;
}
case MULTIBOOT_TAG_TYPE_APM:

View File

@ -23,8 +23,24 @@ align 0x1000
global BootPageTable
BootPageTable:
dd 0x00000083
dd 0x00400083
dd 0x00800083
dd 0x00C00083
dd 0x01000083
dd 0x01400083
dd 0x01800083
dd 0x01C00083
dd 0x02000083
dd 0x02400083
times (KERNEL_PAGE_NUMBER - 10) dd 0
dd 0x00000083
times (KERNEL_PAGE_NUMBER - 2) dd 0
dd 0x00000083
dd 0x00000083
times (1024 - KERNEL_PAGE_NUMBER - 2) dd 0
dd 0x00400083
dd 0x00800083
dd 0x00C00083
dd 0x01000083
dd 0x01400083
dd 0x01800083
dd 0x01C00083
dd 0x02000083
dd 0x02400083
times (1024 - KERNEL_PAGE_NUMBER - 10) dd 0

View File

@ -18,50 +18,3 @@
.intel_syntax noprefix
.code32
.equ PAGE_TABLE_SIZE, 0x4
.section .bootstrap.data
.align 0x1000
.global BootPageTable
BootPageTable:
.rept 0x10000 /* 0x4000 bytes will be used in UpdatePageTable */
.long 0
.endr
.section .bootstrap.text
.global UpdatePageTable
UpdatePageTable:
mov edi, (BootPageTable + 0x0000) /* First PML4E */
mov eax, (BootPageTable + 0x1000) /* First PDPTE */
or eax, 0b11 /* Bitwise OR on rax (PDPTE) with 11b (Present, Write) */
mov dword [edi], eax /* Write 11b to PML4E */
mov edi, (BootPageTable + 0x1000) /* First PDPTE */
mov eax, (BootPageTable + 0x2000) /* First PDE */
or eax, 0b11 /* Bitwise OR on rax (PDE) with 11b (Present, Write) */
mov ecx, PAGE_TABLE_SIZE /* For loop instruction */
mov ebx, 0x0 /* Value to store in the next 4 bytes */
.FillPageTableLevel3:
mov dword [edi], eax /* Store modified PDE in PDPTE */
mov dword [edi + 4], ebx /* Store the rbx value in the next 4 bytes */
add eax, 0x1000 /* Increment (page size) */
adc ebx, 0 /* Add 0 to carry flag */
add edi, 8 /* Add 8 to rdi (next PDE) */
loop .FillPageTableLevel3 /* Loop until rcx is 0 */
mov edi, (BootPageTable + 0x2000) /* First PDE */
mov eax, 0b10000011 /* Present, Write, Large Page */
mov ecx, (512 * PAGE_TABLE_SIZE) /* For loop instruction */
mov ebx, 0x0 /* Value to store in the next 4 bytes */
.FillPageTableLevel2:
mov dword [edi], eax /* Store modified PDE in PDPTE */
mov dword [edi + 4], ebx /* Store the rbx value in the next 4 bytes */
add eax, 1 << 21 /* Increment (page size) */
adc ebx, 0 /* Add 0 (carry flag) to rbx to increment if there was a carry */
add edi, 8 /* Add 8 to rdi (next PDE) */
loop .FillPageTableLevel2 /* Loop until rcx is 0 */
ret

View File

@ -18,100 +18,3 @@
.intel_syntax noprefix
.code32
.equ KERNEL_STACK_SIZE, 0x4000 /* 16KB */
.extern DetectCPUID
.extern Detect64Bit
.extern DetectPSE
.extern DetectPAE
.extern multiboot_main
.extern LoadGDT32
.extern BootPageTable
.extern UpdatePageTable
.extern GDT64.Ptr
.extern GDT64.Code
.extern GDT64.Data
.section .bootstrap.data
MB_HeaderMagic:
.quad 0
MB_HeaderInfo:
.quad 0
.section .bootstrap.text
.global Multiboot2_start
Multiboot2_start:
cli
mov [MB_HeaderMagic], eax
mov [MB_HeaderInfo], ebx
call DetectCPUID
cmp eax, 0
je $
call Detect64Bit
cmp eax, 0
je $
call DetectPSE
cmp eax, 0
je $
call DetectPAE
cmp eax, 0
je $
mov ecx, cr4
or ecx, 0x00000010 /* Set PSE in CR4 */
or ecx, 0x00000020 /* Set PAE in CR4 */
mov cr4, ecx
call LoadGDT32
call UpdatePageTable
mov ecx, BootPageTable
mov cr3, ecx
mov ecx, 0xC0000080 /* EFER */
rdmsr
or eax, 0x800 | 0x100 | 0x1 /* Set LME, LMA, SCE */
wrmsr
mov ecx, cr0
or ecx, 0x80000001 /* Set PG and PE in CR0 */
mov cr0, ecx
lgdt [GDT64.Ptr]
jmp GDT64.Code:HigherHalfStart
.extern UpdatePageTable64
.code64
HigherHalfStart:
mov ax, GDT64.Data
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
call UpdatePageTable64
mov rsp, KernelStack + KERNEL_STACK_SIZE
mov rbp, 0
mov rdi, [MB_HeaderMagic]
mov rsi, [MB_HeaderInfo]
push rsi
push rdi
call multiboot_main
.Hang:
hlt
jmp .Hang
.section .bootstrap.bss
.align 16
KernelStack:
.space KERNEL_STACK_SIZE

View File

@ -0,0 +1,197 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag, MapType Type)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageDirectoryEntry PDE = this->Table->Entries[Index.PDEIndex];
PageTableEntryPtr *PTE = nullptr;
if ((PDE.raw & Flag) > 0)
{
if (Type == MapType::FourMB && PDE.PageSize)
return true;
PTE = (PageTableEntryPtr *)((uintptr_t)PDE.GetAddress() << 12);
if (PTE)
{
if ((PTE->Entries[Index.PTEIndex].Present))
return true;
}
}
return false;
}
void *Virtual::GetPhysical(void *VirtualAddress)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageDirectoryEntry PDE = this->Table->Entries[Index.PDEIndex];
PageTableEntryPtr *PTE = nullptr;
if (PDE.Present)
{
if (PDE.PageSize)
return (void *)((uintptr_t)PDE.GetAddress() << 12);
PTE = (PageTableEntryPtr *)((uintptr_t)PDE.GetAddress() << 12);
if (PTE)
{
if (PTE->Entries[Index.PTEIndex].Present)
return (void *)((uintptr_t)PTE->Entries[Index.PTEIndex].GetAddress() << 12);
}
}
return nullptr;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
{
error("No page table");
return;
}
Flags |= PTFlag::P;
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
uint64_t DirectoryFlags = Flags & 0x3F;
PageDirectoryEntry *PDE = &this->Table->Entries[Index.PDEIndex];
if (Type == MapType::FourMB)
{
PDE->raw |= Flags;
PDE->PageSize = true;
PDE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 4MB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE->Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr) + 1));
memset(PTEPtr, 0, sizeof(PageTableEntryPtr));
PDE->Present = true;
PDE->SetAddress((uintptr_t)PTEPtr >> 12);
}
else
PTEPtr = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
PDE->raw |= DirectoryFlags;
PageTableEntry *PTE = &PTEPtr->Entries[Index.PTEIndex];
PTE->Present = true;
PTE->raw |= Flags;
PTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
#ifdef DEBUG
/* https://stackoverflow.com/a/3208376/9352057 */
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags, Type)) // quick workaround just to see where it fails
warn("Failed to map v:%#lx p:%#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Unmap(void *VirtualAddress, MapType Type)
{
SmartLock(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
PageDirectoryEntry *PDE = &this->Table->Entries[Index.PDEIndex];
if (!PDE->Present)
{
error("Page %#lx not present", PDE->GetAddress());
return;
}
if (Type == MapType::FourMB && PDE->PageSize)
{
PDE->Present = false;
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE->Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
error("Page %#lx not present", PTE.GetAddress());
return;
}
PTE.Present = false;
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
}

View File

@ -17,26 +17,38 @@
#include <smp.hpp>
#include <ints.hpp>
#include <memory.hpp>
#include <ints.hpp>
#include <assert.h>
#include <cpu.hpp>
#include <atomic>
#include "../../../kernel.h"
volatile bool CPUEnabled = false;
enum SMPTrampolineAddress
{
PAGE_TABLE = 0x500,
START_ADDR = 0x520,
STACK = 0x570,
GDT = 0x580,
IDT = 0x590,
CORE = 0x600,
TRAMPOLINE_START = 0x2000
};
std::atomic_bool CPUEnabled = false;
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
static __aligned(0x1000) CPUData CPUs[MAX_CPU] = {0};
CPUData *GetCPU(uint64_t id) { return &CPUs[id]; }
SafeFunction CPUData *GetCPU(uint64_t id) { return &CPUs[id]; }
CPUData *GetCurrentCPU()
SafeFunction CPUData *GetCurrentCPU()
{
uint64_t ret = 0;
if (!(&CPUs[ret])->IsActive)
{
error("CPU %d is not active!", ret);
// error("CPU %d is not active!", ret); FIXME
return &CPUs[0];
}
assert((&CPUs[ret])->Checksum == CPU_DATA_CHECKSUM);

View File

@ -227,6 +227,7 @@ SafeFunction void PageFaultExceptionHandler(CHArchTrapFrame *Frame)
if (Present)
{
#if defined(a64)
uintptr_t CheckPageFaultLinearAddress = (uintptr_t)CheckPageFaultAddress;
CheckPageFaultLinearAddress &= 0xFFFFFFFFFFFFF000;
debug("%#lx -> %#lx", CheckPageFaultAddress, CheckPageFaultLinearAddress);
@ -298,6 +299,7 @@ SafeFunction void PageFaultExceptionHandler(CHArchTrapFrame *Frame)
PTE->Entries[Index.PTEIndex].ProtectionKey,
PTE->Entries[Index.PTEIndex].ExecuteDisable ? "1" : "0",
PTE->Entries[Index.PTEIndex].GetAddress() << 12);
#endif
}
}
#endif

View File

@ -435,6 +435,7 @@ namespace CrashHandler
{
for (int PMLIndex = 0; PMLIndex < 512; PMLIndex++)
{
#if defined(a64)
Memory::PageMapLevel4 PML4 = BasePageTable->Entries[PMLIndex];
EHPrint("\e888888# \eAABBCC%03d-%03d-%03d-%03d\e4500F5: P:%s RW:%s US:%s PWT:%s PCB:%s A:%s NX:%s Address:\e888888%#lx\n",
PMLIndex, 0, 0, 0,
@ -514,6 +515,7 @@ namespace CrashHandler
}
}
}
#endif
}
}
}

View File

@ -281,6 +281,7 @@ SafeFunction void UserModeExceptionHandler(CHArchTrapFrame *Frame)
if (Present)
{
#if defined(a64)
uintptr_t CheckPageFaultLinearAddress = (uintptr_t)CheckPageFaultAddress;
CheckPageFaultLinearAddress &= 0xFFFFFFFFFFFFF000;
debug("%#lx -> %#lx", CheckPageFaultAddress, CheckPageFaultLinearAddress);
@ -345,6 +346,7 @@ SafeFunction void UserModeExceptionHandler(CHArchTrapFrame *Frame)
PTE->Entries[Index.PTEIndex].ProtectionKey,
PTE->Entries[Index.PTEIndex].ExecuteDisable ? "1" : "0",
PTE->Entries[Index.PTEIndex].GetAddress() << 12);
#endif
}
}
}

View File

@ -287,6 +287,7 @@ NIF void MapKernel(PageTable *PT)
NIF void InitializeMemoryManagement()
{
#ifdef DEBUG
#ifndef a32
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
uintptr_t Base = r_cst(uintptr_t, bInfo.Memory.Entry[i].BaseAddress);
@ -330,7 +331,8 @@ NIF void InitializeMemoryManagement()
End,
Type);
}
#endif
#endif // a32
#endif // DEBUG
trace("Initializing Physical Memory Manager");
// KernelAllocator = Physical(); <- Already called in the constructor
KernelAllocator.Init();
@ -364,14 +366,13 @@ NIF void InitializeMemoryManagement()
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x80000001 cpuid;
CPU::x86::Intel::CPUID0x00000001 cpuid;
cpuid.Get();
fixme("Intel PSE support");
PSESupport = cpuid.EDX.PSE;
}
if (Page1GBSupport && PSESupport)
if (PSESupport)
{
debug("1GB Page Support Enabled");
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
@ -382,8 +383,14 @@ NIF void InitializeMemoryManagement()
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
trace("PSE Support Enabled");
}
#ifdef DEBUG
if (Page1GBSupport)
debug("1GB Page Support Enabled");
#endif
MapFromZero(KernelPageTable);
MapFramebuffer(KernelPageTable);
MapKernel(KernelPageTable);
@ -615,7 +622,7 @@ void free(void *Address)
#endif
}
void *operator new(size_t Size)
void *operator new(std::size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
@ -646,7 +653,7 @@ void *operator new(size_t Size)
return ret;
}
void *operator new[](size_t Size)
void *operator new[](std::size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
@ -677,7 +684,7 @@ void *operator new[](size_t Size)
return ret;
}
void *operator new(unsigned long Size, std::align_val_t Alignment)
void *operator new(std::size_t Size, std::align_val_t Alignment)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));

View File

@ -37,8 +37,6 @@ namespace Memory
this->PTEIndex = Address & 0x3FF;
Address >>= 10;
this->PDEIndex = Address & 0x3FF;
Address >>= 10;
this->PDPTEIndex = Address & 0x3FF;
#elif defined(aa64)
#endif
}

View File

@ -101,6 +101,14 @@ namespace Memory
debug("Reserving RSDT...");
this->ReservePages((void *)bInfo.RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
#if defined(a32)
if ((uintptr_t)ACPIPtr > 0x2800000) /* FIXME */
{
error("ACPI table is located above 0x2800000, which is not mapped.");
return;
}
#endif
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) / (XSDT ? 8 : 4));
debug("Reserving %d ACPI tables...", TableSize);

View File

@ -22,276 +22,6 @@
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag, MapType Type)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if ((PML4.raw & Flag) > 0)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
{
if (Type == MapType::OneGB && PDPTE->Entries[Index.PDPTEIndex].PageSize)
return true;
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (Type == MapType::TwoMB && PDE->Entries[Index.PDEIndex].PageSize)
return true;
if ((PDE->Entries[Index.PDEIndex].Present))
{
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if ((PTE->Entries[Index.PTEIndex].Present))
return true;
}
}
}
}
}
}
return false;
}
void *Virtual::GetPhysical(void *VirtualAddress)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if (PML4.Present)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if (PDPTE->Entries[Index.PDPTEIndex].Present)
{
if (PDPTE->Entries[Index.PDPTEIndex].PageSize)
return (void *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (PDE->Entries[Index.PDEIndex].Present)
{
if (PDE->Entries[Index.PDEIndex].PageSize)
return (void *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if (PTE->Entries[Index.PTEIndex].Present)
return (void *)((uintptr_t)PTE->Entries[Index.PTEIndex].GetAddress() << 12);
}
}
}
}
}
}
return nullptr;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
{
error("No page table");
return;
}
Flags |= PTFlag::P;
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
uint64_t DirectoryFlags = Flags & 0x3F;
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
if (!PML4->Present)
{
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryPointerTableEntryPtr) + 1));
memset(PDPTEPtr, 0, sizeof(PageDirectoryPointerTableEntryPtr));
PML4->Present = true;
PML4->SetAddress((uintptr_t)PDPTEPtr >> 12);
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)(PML4->GetAddress() << 12);
PML4->raw |= DirectoryFlags;
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (Type == MapType::OneGB)
{
PDPTE->raw |= Flags;
PDPTE->PageSize = true;
PDPTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 1GB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageDirectoryEntryPtr *PDEPtr = nullptr;
if (!PDPTE->Present)
{
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryEntryPtr) + 1));
memset(PDEPtr, 0, sizeof(PageDirectoryEntryPtr));
PDPTE->Present = true;
PDPTE->SetAddress((uintptr_t)PDEPtr >> 12);
}
else
PDEPtr = (PageDirectoryEntryPtr *)(PDPTE->GetAddress() << 12);
PDPTE->raw |= DirectoryFlags;
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (Type == MapType::TwoMB)
{
PDE->raw |= Flags;
PDE->PageSize = true;
PDE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 2MB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE->Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr) + 1));
memset(PTEPtr, 0, sizeof(PageTableEntryPtr));
PDE->Present = true;
PDE->SetAddress((uintptr_t)PTEPtr >> 12);
}
else
PTEPtr = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
PDE->raw |= DirectoryFlags;
PageTableEntry *PTE = &PTEPtr->Entries[Index.PTEIndex];
PTE->Present = true;
PTE->raw |= Flags;
PTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
#ifdef DEBUG
/* https://stackoverflow.com/a/3208376/9352057 */
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags, Type)) // quick workaround just to see where it fails
warn("Failed to map v:%#lx p:%#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Unmap(void *VirtualAddress, MapType Type)
{
SmartLock(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
if (!PML4->Present)
{
error("Page %#lx not present", PML4->GetAddress());
return;
}
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4->Address << 12);
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE->Present)
{
error("Page %#lx not present", PDPTE->GetAddress());
return;
}
if (Type == MapType::OneGB && PDPTE->PageSize)
{
PDPTE->Present = false;
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Address << 12);
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (!PDE->Present)
{
error("Page %#lx not present", PDE->GetAddress());
return;
}
if (Type == MapType::TwoMB && PDE->PageSize)
{
PDE->Present = false;
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE->Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
error("Page %#lx not present", PTE.GetAddress());
return;
}
PTE.Present = false;
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
Virtual::Virtual(PageTable *Table)
{
if (Table)

View File

@ -232,7 +232,10 @@ namespace Video
this->Buffers[Index].DoNotScroll = Value;
}
char Display::Print(char Char, int Index, bool WriteToUART)
#if defined(a32)
__no_sanitize("undefined")
#endif
char Display::Print(char Char, int Index, bool WriteToUART)
{
if (unlikely(this->Buffers[Index].Checksum != 0xBBFFE515A117E))
return 0;

View File

@ -49,6 +49,9 @@ extern bool DebuggerIsAttached;
void TestMemoryAllocation()
{
#ifdef a32
return; /* Not really for now. */
#endif
if (EnableExternalMemoryTracer || DebuggerIsAttached)
{
debug("The test is disabled when the external memory tracer or a debugger is enabled.");

View File

@ -102,7 +102,7 @@ namespace Video
uint32_t CursorX, CursorY;
char Brightness;
bool DoNotScroll;
long Checksum;
long long Checksum;
};
class Display

View File

@ -24,6 +24,7 @@
#include <bitmap.hpp>
#include <lock.hpp>
#include <std.hpp>
#include <cstddef>
#endif // __cplusplus
#include <types.h>
@ -177,28 +178,44 @@ namespace Memory
XD = (uint64_t)1 << 63
};
/* 2.2 Paging in IA-32e Mode - https://composter.com.ua/documents/TLBs_Paging-Structure_Caches_and_Their_Invalidation.pdf */
union __packed PageTableEntry
{
#if defined(a64)
struct
{
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Dirty : 1; // 6
uint64_t PageAttributeTable : 1; // 7
uint64_t Global : 1; // 8
uint64_t Available0 : 3; // 9-11
uint64_t Address : 40; // 12-51
uint64_t Available1 : 7; // 52-58
uint64_t ProtectionKey : 4; // 59-62
uint64_t ExecuteDisable : 1; // 63
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Dirty : 1; // 6
uintptr_t PageAttributeTable : 1; // 7
uintptr_t Global : 1; // 8
uintptr_t Available0 : 3; // 9-11
uintptr_t Address : 40; // 12-51
uintptr_t Available1 : 7; // 52-58
uintptr_t ProtectionKey : 4; // 59-62
uintptr_t ExecuteDisable : 1; // 63
};
uint64_t raw;
#elif defined(a32)
struct
{
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Dirty : 1; // 6
uintptr_t PageAttributeTable : 1; // 7
uintptr_t Global : 1; // 8
uintptr_t Available0 : 3; // 9-11
uintptr_t Address : 20; // 12-31
};
#elif defined(aa64)
#endif
uintptr_t raw;
/** @brief Set Address */
void SetAddress(uintptr_t _Address)
@ -233,48 +250,87 @@ namespace Memory
struct __packed PageTableEntryPtr
{
#if defined(a64)
PageTableEntry Entries[512];
#elif defined(a32)
PageTableEntry Entries[1024];
#elif defined(aa64)
#endif
};
union __packed PageDirectoryEntry
{
#if defined(a64)
struct
{
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Available0 : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Available1 : 4; // 8-11
uint64_t Address : 40; // 12-51
uint64_t Available2 : 11; // 52-62
uint64_t ExecuteDisable : 1; // 63
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Available0 : 1; // 6
uintptr_t PageSize : 1; // 7
uintptr_t Available1 : 4; // 8-11
uintptr_t Address : 40; // 12-51
uintptr_t Available2 : 11; // 52-62
uintptr_t ExecuteDisable : 1; // 63
};
struct
{
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Dirty : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Global : 1; // 8
uint64_t Available0 : 3; // 9-11
uint64_t PageAttributeTable : 1; // 12
uint64_t Reserved0 : 8; // 13-20
uint64_t Address : 31; // 21-51
uint64_t Available1 : 7; // 52-58
uint64_t ProtectionKey : 4; // 59-62
uint64_t ExecuteDisable : 1; // 63
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Dirty : 1; // 6
uintptr_t PageSize : 1; // 7
uintptr_t Global : 1; // 8
uintptr_t Available0 : 3; // 9-11
uintptr_t PageAttributeTable : 1; // 12
uintptr_t Reserved0 : 8; // 13-20
uintptr_t Address : 31; // 21-51
uintptr_t Available1 : 7; // 52-58
uintptr_t ProtectionKey : 4; // 59-62
uintptr_t ExecuteDisable : 1; // 63
} TwoMB;
#elif defined(a32)
struct
{
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Available0 : 1; // 6
uintptr_t PageSize : 1; // 7
uintptr_t Available1 : 4; // 8-11
uintptr_t Address : 20; // 12-31
};
uint64_t raw;
struct
{
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Dirty : 1; // 6
uintptr_t PageSize : 1; // 7
uintptr_t Global : 1; // 8
uintptr_t Available0 : 3; // 9-11
uintptr_t PageAttributeTable : 1; // 12
uintptr_t Address0 : 8; // 13-20
uintptr_t Reserved0 : 1; // 21
uintptr_t Address1 : 10; // 22-31
} FourMB;
#elif defined(aa64)
#endif
uintptr_t raw;
/** @brief Set PageTableEntryPtr address */
void SetAddress(uintptr_t _Address)
@ -316,41 +372,41 @@ namespace Memory
{
struct
{
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Available0 : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Available1 : 4; // 8-11
uint64_t Address : 40; // 12-51
uint64_t Available2 : 11; // 52-62
uint64_t ExecuteDisable : 1; // 63
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Available0 : 1; // 6
uintptr_t PageSize : 1; // 7
uintptr_t Available1 : 4; // 8-11
uintptr_t Address : 40; // 12-51
uintptr_t Available2 : 11; // 52-62
uintptr_t ExecuteDisable : 1; // 63
};
struct
{
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Dirty : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Global : 1; // 8
uint64_t Available0 : 3; // 9-11
uint64_t PageAttributeTable : 1; // 12
uint64_t Reserved0 : 17; // 13-29
uint64_t Address : 22; // 30-51
uint64_t Available1 : 7; // 52-58
uint64_t ProtectionKey : 4; // 59-62
uint64_t ExecuteDisable : 1; // 63
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Dirty : 1; // 6
uintptr_t PageSize : 1; // 7
uintptr_t Global : 1; // 8
uintptr_t Available0 : 3; // 9-11
uintptr_t PageAttributeTable : 1; // 12
uintptr_t Reserved0 : 17; // 13-29
uintptr_t Address : 22; // 30-51
uintptr_t Available1 : 7; // 52-58
uintptr_t ProtectionKey : 4; // 59-62
uintptr_t ExecuteDisable : 1; // 63
} OneGB;
uint64_t raw;
uintptr_t raw;
/** @brief Set PageDirectoryEntryPtr address */
void SetAddress(uintptr_t _Address)
@ -392,20 +448,20 @@ namespace Memory
{
struct
{
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Available0 : 1; // 6
uint64_t Reserved0 : 1; // 7
uint64_t Available1 : 4; // 8-11
uint64_t Address : 40; // 12-51
uint64_t Available2 : 11; // 52-62
uint64_t ExecuteDisable : 1; // 63
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Available0 : 1; // 6
uintptr_t Reserved0 : 1; // 7
uintptr_t Available1 : 4; // 8-11
uintptr_t Address : 40; // 12-51
uintptr_t Available2 : 11; // 52-62
uintptr_t ExecuteDisable : 1; // 63
};
uint64_t raw;
uintptr_t raw;
/** @brief Set PageDirectoryPointerTableEntryPtr address */
void SetAddress(uintptr_t _Address)
@ -438,10 +494,70 @@ namespace Memory
}
};
struct __packed PageMapLevel4Ptr
{
PageMapLevel4 Entries[512];
};
union __packed PageMapLevel5
{
struct
{
uintptr_t Present : 1; // 0
uintptr_t ReadWrite : 1; // 1
uintptr_t UserSupervisor : 1; // 2
uintptr_t WriteThrough : 1; // 3
uintptr_t CacheDisable : 1; // 4
uintptr_t Accessed : 1; // 5
uintptr_t Available0 : 1; // 6
uintptr_t Reserved0 : 1; // 7
uintptr_t Available1 : 4; // 8-11
uintptr_t Address : 40; // 12-51
uintptr_t Available2 : 11; // 52-62
uintptr_t ExecuteDisable : 1; // 63
};
uintptr_t raw;
/** @brief Set PageMapLevel4Ptr address */
void SetAddress(uintptr_t _Address)
{
#if defined(a64)
_Address &= 0x000000FFFFFFFFFF;
this->raw &= 0xFFF0000000000FFF;
this->raw |= (_Address << 12);
#elif defined(a32)
_Address &= 0x000FFFFF;
this->raw &= 0xFFC00003;
this->raw |= (_Address << 12);
#elif defined(aa64)
_Address &= 0x000000FFFFFFFFFF;
this->raw &= 0xFFF0000000000FFF;
this->raw |= (_Address << 12);
#endif
}
/** @brief Get PageMapLevel4Ptr address */
uintptr_t GetAddress()
{
#if defined(a64)
return (this->raw & 0x000FFFFFFFFFF000) >> 12;
#elif defined(a32)
return (this->raw & 0x003FFFFF000) >> 12;
#elif defined(aa64)
return (this->raw & 0x000FFFFFFFFFF000) >> 12;
#endif
}
};
class PageTable
{
public:
#if defined(a64)
PageMapLevel4 Entries[512];
#elif defined(a32)
PageDirectoryEntry Entries[1024];
#elif defined(aa64)
#endif
/**
* @brief Update CR3 with this PageTable
@ -611,14 +727,17 @@ namespace Memory
NoMapType,
FourKB,
TwoMB,
FourMB,
OneGB
};
class PageMapIndexer
{
public:
#if defined(a64)
uintptr_t PMLIndex = 0;
uintptr_t PDPTEIndex = 0;
#endif
uintptr_t PDEIndex = 0;
uintptr_t PTEIndex = 0;
PageMapIndexer(uintptr_t VirtualAddress);
@ -667,6 +786,8 @@ namespace Memory
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::FourMB)
PageSize = PAGE_SIZE_4M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
@ -701,6 +822,14 @@ namespace Memory
Length -= PAGE_SIZE_1G;
}
while (Length >= PAGE_SIZE_4M)
{
this->Map(VirtualAddress, PhysicalAddress, Length, Flags, Virtual::MapType::FourMB);
VirtualAddress = (void *)((uintptr_t)VirtualAddress + PAGE_SIZE_4M);
PhysicalAddress = (void *)((uintptr_t)PhysicalAddress + PAGE_SIZE_4M);
Length -= PAGE_SIZE_4M;
}
while (Length >= PAGE_SIZE_2M)
{
this->Map(VirtualAddress, PhysicalAddress, Length, Flags, Virtual::MapType::TwoMB);
@ -732,6 +861,16 @@ namespace Memory
return Virtual::MapType::NoMapType;
}
}
else if (Length >= PAGE_SIZE_4M)
{
Type = Virtual::MapType::FourMB;
if (Length % PAGE_SIZE_4M != 0)
{
warn("Length is not a multiple of 4MB.");
if (FailOnModulo)
return Virtual::MapType::NoMapType;
}
}
else if (Length >= PAGE_SIZE_2M)
{
Type = Virtual::MapType::TwoMB;
@ -768,6 +907,8 @@ namespace Memory
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::FourMB)
PageSize = PAGE_SIZE_4M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
@ -909,9 +1050,9 @@ namespace Memory
void InitializeMemoryManagement();
void *operator new(size_t Size);
void *operator new[](size_t Size);
void *operator new(size_t Size, std::align_val_t Alignment);
void *operator new(std::size_t Size);
void *operator new[](std::size_t Size);
void *operator new(std::size_t Size, std::align_val_t Alignment);
void operator delete(void *Pointer);
void operator delete[](void *Pointer);
void operator delete(void *Pointer, long unsigned int Size);