Optimize memory mapping

This commit is contained in:
Alex 2023-04-07 05:12:48 +03:00
parent 540152a339
commit 7fa3e91a53
Signed by untrusted user who does not match committer: enderice2
GPG Key ID: EACC3AD603BAB4DD
3 changed files with 315 additions and 242 deletions

View File

@ -39,16 +39,18 @@
#define memdbg(m, ...)
#endif
using namespace Memory;
#ifdef DEBUG_ALLOCATIONS_SL
NewLock(AllocatorLock);
NewLock(OperatorAllocatorLock);
#endif
using namespace Memory;
Physical KernelAllocator;
PageTable4 *KernelPageTable = nullptr;
PageTable4 *UserspaceKernelOnlyPageTable = nullptr;
bool Page1GBSupport = false;
bool PSESupport = false;
static MemoryAllocatorType AllocatorType = MemoryAllocatorType::Pages;
Xalloc::V1 *XallocV1Allocator = nullptr;
@ -75,40 +77,27 @@ NIF void tracepagetable(PageTable4 *pt)
NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
{
bool Page1GBSupport = false;
bool PSESupport = false;
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
{
CPU::x86::AMD::CPUID0x80000001 cpuid;
cpuid.Get();
Page1GBSupport = cpuid.EDX.Page1GB;
PSESupport = cpuid.EDX.PSE;
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x80000001 cpuid;
cpuid.Get();
}
debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
Virtual va = Virtual(PT);
size_t MemSize = Info->Memory.Size;
if (Page1GBSupport && PSESupport)
{
debug("1GB Page Support Enabled");
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
CPU::x64::writecr4(cr4);
#elif defined(a32)
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
cr4.PSE = 1;
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
/* Map the first 100MB of memory as 4KB pages */
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW | PTFlag::PS /* , Virtual::MapType::OneGB */);
// uintptr_t Physical4KBSectionStart = 0x10000000;
// va.Map((void *)0,
// (void *)0,
// Physical4KBSectionStart,
// PTFlag::RW);
// va.Map((void *)Physical4KBSectionStart,
// (void *)Physical4KBSectionStart,
// MemSize - Physical4KBSectionStart,
// PTFlag::RW,
// Virtual::MapType::OneGB);
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
}
else
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
@ -120,6 +109,7 @@ NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
{
debug("Mapping Framebuffer");
Virtual va = Virtual(PT);
int itrfb = 0;
while (1)
@ -127,10 +117,10 @@ NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
if (!Info->Framebuffer[itrfb].BaseAddress)
break;
for (uintptr_t fb_base = (uintptr_t)Info->Framebuffer[itrfb].BaseAddress;
fb_base < ((uintptr_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
fb_base += PAGE_SIZE)
va.Map((void *)fb_base, (void *)fb_base, PTFlag::RW | PTFlag::US | PTFlag::G);
va.OptimizedMap((void *)Info->Framebuffer[itrfb].BaseAddress,
(void *)Info->Framebuffer[itrfb].BaseAddress,
Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height,
PTFlag::RW | PTFlag::US | PTFlag::G);
itrfb++;
#ifdef DEBUG
@ -158,10 +148,7 @@ NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
{
/* KernelStart KernelTextEnd KernelRoDataEnd KernelEnd
Kernel Start & Text Start ------ Text End ------ Kernel Rodata End ------ Kernel Data End & Kernel End
*/
Virtual va = Virtual(PT);
debug("Mapping Kernel");
uintptr_t KernelStart = (uintptr_t)&_kernel_start;
uintptr_t KernelTextEnd = (uintptr_t)&_kernel_text_end;
uintptr_t KernelDataEnd = (uintptr_t)&_kernel_data_end;
@ -170,8 +157,15 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
uintptr_t KernelFileStart = (uintptr_t)Info->Kernel.FileBase;
uintptr_t KernelFileEnd = KernelFileStart + Info->Kernel.Size;
debug("File size: %ld KB", TO_KB(Info->Kernel.Size));
debug(".text size: %ld KB", TO_KB(KernelTextEnd - KernelStart));
debug(".data size: %ld KB", TO_KB(KernelDataEnd - KernelTextEnd));
debug(".rodata size: %ld KB", TO_KB(KernelRoDataEnd - KernelDataEnd));
debug(".bss size: %ld KB", TO_KB(KernelEnd - KernelRoDataEnd));
uintptr_t BaseKernelMapAddress = (uintptr_t)Info->Kernel.PhysicalBase;
uintptr_t k;
Virtual va = Virtual(PT);
/* Text section */
for (k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
@ -212,9 +206,6 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
KernelAllocator.LockPage((void *)k);
}
debug("\nStart: %#llx - Text End: %#llx - RoEnd: %#llx - End: %#llx\nStart Physical: %#llx - End Physical: %#llx",
KernelStart, KernelTextEnd, KernelRoDataEnd, KernelEnd, KernelFileStart, KernelFileEnd);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
@ -309,22 +300,40 @@ NIF void InitializeMemoryManagement(BootInfo *Info)
UserspaceKernelOnlyPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
memset(UserspaceKernelOnlyPageTable, 0, PAGE_SIZE);
debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
{
CPU::x86::AMD::CPUID0x80000001 cpuid;
cpuid.Get();
PSESupport = cpuid.EDX.PSE;
Page1GBSupport = cpuid.EDX.Page1GB;
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x80000001 cpuid;
cpuid.Get();
fixme("Intel PSE support");
}
if (Page1GBSupport && PSESupport)
{
debug("1GB Page Support Enabled");
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
CPU::x64::writecr4(cr4);
#elif defined(a32)
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
cr4.PSE = 1;
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
}
MapFromZero(KernelPageTable, Info);
debug("Mapping from 0x0 %#llx for Userspace Page Table", Info->Memory.Size);
UserspaceKernelOnlyPageTable[0] = KernelPageTable[0];
/* Mapping Framebuffer address */
debug("Mapping Framebuffer");
MapFramebuffer(KernelPageTable, Info);
debug("Mapping Framebuffer for Userspace Page Table");
MapFramebuffer(UserspaceKernelOnlyPageTable, Info);
/* Kernel mapping */
debug("Mapping Kernel");
MapKernel(KernelPageTable, Info);
debug("Mapping Kernel for Userspace Page Table");
MapKernel(UserspaceKernelOnlyPageTable, Info);
memcpy(UserspaceKernelOnlyPageTable, KernelPageTable, sizeof(PageTable4));
trace("Applying new page table from address %p", KernelPageTable);
#ifdef DEBUG

View File

@ -127,75 +127,67 @@ namespace Memory
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
uint64_t DirectoryFlags = Flags & 0x3F;
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
if (!PML4.Present)
if (!PML4->Present)
{
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPage();
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryPointerTableEntryPtr)));
memset(PDPTEPtr, 0, PAGE_SIZE);
PML4.Present = true;
PML4.SetAddress((uintptr_t)PDPTEPtr >> 12);
PML4->Present = true;
PML4->SetAddress((uintptr_t)PDPTEPtr >> 12);
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)(PML4->GetAddress() << 12);
PML4->raw |= DirectoryFlags;
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (Type == MapType::OneGB)
{
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
PDPTE.raw |= Flags;
PDPTE.PageSize = true;
PDPTE.SetAddress((uintptr_t)PhysicalAddress >> 12);
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
PDPTE->raw |= Flags;
PDPTE->PageSize = true;
PDPTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 1GB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PML4.raw |= DirectoryFlags;
this->Table->Entries[Index.PMLIndex] = PML4;
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
PageDirectoryEntryPtr *PDEPtr = nullptr;
if (!PDPTE.Present)
if (!PDPTE->Present)
{
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPage();
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryEntryPtr)));
memset(PDEPtr, 0, PAGE_SIZE);
PDPTE.Present = true;
PDPTE.SetAddress((uintptr_t)PDEPtr >> 12);
PDPTE->Present = true;
PDPTE->SetAddress((uintptr_t)PDEPtr >> 12);
}
else
PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.GetAddress() << 12);
PDEPtr = (PageDirectoryEntryPtr *)(PDPTE->GetAddress() << 12);
PDPTE->raw |= DirectoryFlags;
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (Type == MapType::TwoMB)
{
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
PDE.raw |= Flags;
PDE.PageSize = true;
PDE.SetAddress((uintptr_t)PhysicalAddress >> 12);
PDEPtr->Entries[Index.PDEIndex] = PDE;
PDE->raw |= Flags;
PDE->PageSize = true;
PDE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 2MB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PDPTE.raw |= DirectoryFlags;
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE.Present)
if (!PDE->Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPage();
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr)));
memset(PTEPtr, 0, PAGE_SIZE);
PDE.Present = true;
PDE.SetAddress((uintptr_t)PTEPtr >> 12);
PDE->Present = true;
PDE->SetAddress((uintptr_t)PTEPtr >> 12);
}
else
PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE.GetAddress() << 12);
PDE.raw |= DirectoryFlags;
PDEPtr->Entries[Index.PDEIndex] = PDE;
PTEPtr = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
PDE->raw |= DirectoryFlags;
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
PTE.Present = true;
PTE.raw |= Flags;
PTE.SetAddress((uintptr_t)PhysicalAddress >> 12);
PTEPtr->Entries[Index.PTEIndex] = PTE;
PageTableEntry *PTE = &PTEPtr->Entries[Index.PTEIndex];
PTE->Present = true;
PTE->raw |= Flags;
PTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
@ -229,19 +221,6 @@ namespace Memory
#endif
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, size_t Length, uint64_t Flags, MapType Type)
{
int PageSize = PAGE_SIZE_4K;
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
for (uintptr_t i = 0; i < Length; i += PageSize)
this->Map((void *)((uintptr_t)VirtualAddress + i), (void *)((uintptr_t)PhysicalAddress + i), Flags, Type);
}
void Virtual::Unmap(void *VirtualAddress, MapType Type)
{
SmartLock(this->MemoryLock);
@ -252,44 +231,42 @@ namespace Memory
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
if (!PML4.Present)
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
if (!PML4->Present)
{
error("Page %#lx not present", PML4.GetAddress());
error("Page %#lx not present", PML4->GetAddress());
return;
}
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.Address << 12);
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE.Present)
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4->Address << 12);
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE->Present)
{
error("Page %#lx not present", PDPTE.GetAddress());
error("Page %#lx not present", PDPTE->GetAddress());
return;
}
if (Type == MapType::OneGB && PDPTE.PageSize)
if (Type == MapType::OneGB && PDPTE->PageSize)
{
PDPTE.Present = false;
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
PDPTE->Present = false;
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.Address << 12);
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
if (!PDE.Present)
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Address << 12);
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (!PDE->Present)
{
error("Page %#lx not present", PDE.GetAddress());
error("Page %#lx not present", PDE->GetAddress());
return;
}
if (Type == MapType::TwoMB && PDE.PageSize)
if (Type == MapType::TwoMB && PDE->PageSize)
{
PDE.Present = false;
PDEPtr->Entries[Index.PDEIndex] = PDE;
PDE->Present = false;
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE.Address << 12);
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE->Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
@ -315,25 +292,6 @@ namespace Memory
#endif
}
void Virtual::Unmap(void *VirtualAddress, size_t Length, MapType Type)
{
int PageSize = PAGE_SIZE_4K;
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
for (uintptr_t i = 0; i < Length; i += PageSize)
this->Unmap((void *)((uintptr_t)VirtualAddress + i), Type);
}
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
this->Unmap(VirtualAddress, Type);
this->Map(VirtualAddress, PhysicalAddress, Flags, Type);
}
Virtual::Virtual(PageTable4 *Table)
{
if (Table)

View File

@ -33,25 +33,25 @@ extern uintptr_t _kernel_start, _kernel_end;
extern uintptr_t _kernel_text_end, _kernel_data_end, _kernel_rodata_end;
// kilobyte
#define TO_KB(d) (d / 1024)
#define TO_KB(d) ((d) / 1024)
// megabyte
#define TO_MB(d) (d / 1024 / 1024)
#define TO_MB(d) ((d) / 1024 / 1024)
// gigabyte
#define TO_GB(d) (d / 1024 / 1024 / 1024)
#define TO_GB(d) ((d) / 1024 / 1024 / 1024)
// terabyte
#define TO_TB(d) (d / 1024 / 1024 / 1024 / 1024)
#define TO_TB(d) ((d) / 1024 / 1024 / 1024 / 1024)
// petabyte
#define TO_PB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024)
#define TO_PB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024)
// exobyte
#define TO_EB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define TO_EB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
// zettabyte
#define TO_ZB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define TO_ZB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
// yottabyte
#define TO_YB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define TO_YB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
// brontobyte
#define TO_BB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define TO_BB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
// geopbyte
#define TO_GPB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define TO_GPB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define PAGE_SIZE 0x1000 // 4KB
#define PAGE_SIZE_4K PAGE_SIZE // 4KB
@ -178,20 +178,20 @@ namespace Memory
{
struct
{
bool Present : 1; // 0
bool ReadWrite : 1; // 1
bool UserSupervisor : 1; // 2
bool WriteThrough : 1; // 3
bool CacheDisable : 1; // 4
bool Accessed : 1; // 5
bool Dirty : 1; // 6
bool PageAttributeTable : 1; // 7
bool Global : 1; // 8
char Available0 : 3; // 9-11
long Address : 40; // 12-51
char Available1 : 7; // 52-58
char ProtectionKey : 4; // 59-62
bool ExecuteDisable : 1; // 63
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Dirty : 1; // 6
uint64_t PageAttributeTable : 1; // 7
uint64_t Global : 1; // 8
uint64_t Available0 : 3; // 9-11
uint64_t Address : 40; // 12-51
uint64_t Available1 : 7; // 52-58
uint64_t ProtectionKey : 4; // 59-62
uint64_t ExecuteDisable : 1; // 63
};
uint64_t raw;
@ -235,38 +235,38 @@ namespace Memory
{
struct
{
bool Present : 1; // 0
bool ReadWrite : 1; // 1
bool UserSupervisor : 1; // 2
bool WriteThrough : 1; // 3
bool CacheDisable : 1; // 4
bool Accessed : 1; // 5
bool Available0 : 1; // 6
bool PageSize : 1; // 7
char Available1 : 4; // 8-11
long Address : 40; // 12-51
short Available2 : 11; // 52-62
bool ExecuteDisable : 1; // 63
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Available0 : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Available1 : 4; // 8-11
uint64_t Address : 40; // 12-51
uint64_t Available2 : 11; // 52-62
uint64_t ExecuteDisable : 1; // 63
};
struct
{
bool Present : 1; // 0
bool ReadWrite : 1; // 1
bool UserSupervisor : 1; // 2
bool WriteThrough : 1; // 3
bool CacheDisable : 1; // 4
bool Accessed : 1; // 5
bool Dirty : 1; // 6
bool PageSize : 1; // 7
bool Global : 1; // 8
char Available0 : 3; // 9-11
bool PageAttributeTable : 1; // 12
char Reserved0 : 8; // 13-20
long Address : 31; // 21-51
char Available1 : 7; // 52-58
char ProtectionKey : 4; // 59-62
bool ExecuteDisable : 1; // 63
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Dirty : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Global : 1; // 8
uint64_t Available0 : 3; // 9-11
uint64_t PageAttributeTable : 1; // 12
uint64_t Reserved0 : 8; // 13-20
uint64_t Address : 31; // 21-51
uint64_t Available1 : 7; // 52-58
uint64_t ProtectionKey : 4; // 59-62
uint64_t ExecuteDisable : 1; // 63
} TwoMB;
uint64_t raw;
@ -311,38 +311,38 @@ namespace Memory
{
struct
{
bool Present : 1; // 0
bool ReadWrite : 1; // 1
bool UserSupervisor : 1; // 2
bool WriteThrough : 1; // 3
bool CacheDisable : 1; // 4
bool Accessed : 1; // 5
bool Available0 : 1; // 6
bool PageSize : 1; // 7
char Available1 : 4; // 8-11
long Address : 40; // 12-51
short Available2 : 11; // 52-62
bool ExecuteDisable : 1; // 63
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Available0 : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Available1 : 4; // 8-11
uint64_t Address : 40; // 12-51
uint64_t Available2 : 11; // 52-62
uint64_t ExecuteDisable : 1; // 63
};
struct
{
bool Present : 1; // 0
bool ReadWrite : 1; // 1
bool UserSupervisor : 1; // 2
bool WriteThrough : 1; // 3
bool CacheDisable : 1; // 4
bool Accessed : 1; // 5
bool Dirty : 1; // 6
bool PageSize : 1; // 7
bool Global : 1; // 8
char Available0 : 3; // 9-11
bool PageAttributeTable : 1; // 12
int Reserved0 : 17; // 13-29
long Address : 22; // 30-51
char Available1 : 7; // 52-58
char ProtectionKey : 4; // 59-62
bool ExecuteDisable : 1; // 63
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Dirty : 1; // 6
uint64_t PageSize : 1; // 7
uint64_t Global : 1; // 8
uint64_t Available0 : 3; // 9-11
uint64_t PageAttributeTable : 1; // 12
uint64_t Reserved0 : 17; // 13-29
uint64_t Address : 22; // 30-51
uint64_t Available1 : 7; // 52-58
uint64_t ProtectionKey : 4; // 59-62
uint64_t ExecuteDisable : 1; // 63
} OneGB;
uint64_t raw;
@ -387,18 +387,18 @@ namespace Memory
{
struct
{
bool Present : 1; // 0
bool ReadWrite : 1; // 1
bool UserSupervisor : 1; // 2
bool WriteThrough : 1; // 3
bool CacheDisable : 1; // 4
bool Accessed : 1; // 5
bool Available0 : 1; // 6
bool Reserved0 : 1; // 7
char Available1 : 4; // 8-11
long Address : 40; // 12-51
short Available2 : 11; // 52-62
bool ExecuteDisable : 1; // 63
uint64_t Present : 1; // 0
uint64_t ReadWrite : 1; // 1
uint64_t UserSupervisor : 1; // 2
uint64_t WriteThrough : 1; // 3
uint64_t CacheDisable : 1; // 4
uint64_t Accessed : 1; // 5
uint64_t Available0 : 1; // 6
uint64_t Reserved0 : 1; // 7
uint64_t Available1 : 4; // 8-11
uint64_t Address : 40; // 12-51
uint64_t Available2 : 11; // 52-62
uint64_t ExecuteDisable : 1; // 63
};
uint64_t raw;
@ -598,6 +598,7 @@ namespace Memory
public:
enum MapType
{
NoMapType,
FourKB,
TwoMB,
OneGB
@ -618,6 +619,7 @@ namespace Memory
*
* @param VirtualAddress Virtual address of the page
* @param Flag Flag to check
* @param Type Type of the page. Check MapType enum.
* @return true if page has the specified flag.
* @return false if page is has the specified flag.
*/
@ -636,6 +638,7 @@ namespace Memory
* @param VirtualAddress Virtual address of the page.
* @param PhysicalAddress Physical address of the page.
* @param Flags Flags of the page. Check PTFlag enum.
* @param Type Type of the page. Check MapType enum.
*/
void Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flag = PTFlag::P, MapType Type = MapType::FourKB);
@ -644,15 +647,101 @@ namespace Memory
*
* @param VirtualAddress First virtual address of the page.
* @param PhysicalAddress First physical address of the page.
* @param PageCount Number of pages.
* @param Length Length to map.
* @param Flags Flags of the page. Check PTFlag enum.
* @param Type Type of the page. Check MapType enum.
*/
void Map(void *VirtualAddress, void *PhysicalAddress, size_t Length, uint64_t Flags, MapType Type = MapType::FourKB);
__always_inline inline void Map(void *VirtualAddress, void *PhysicalAddress, size_t Length, uint64_t Flags, MapType Type = MapType::FourKB)
{
int PageSize = PAGE_SIZE_4K;
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
for (uintptr_t i = 0; i < Length; i += PageSize)
this->Map((void *)((uintptr_t)VirtualAddress + i), (void *)((uintptr_t)PhysicalAddress + i), Flags, Type);
}
/**
* @brief Map multiple pages efficiently.
*
* This function will detect the best page size to map the pages.
*
* @note This function will not check if PSE or 1GB pages are enabled or supported.
*
* @param VirtualAddress First virtual address of the page.
* @param PhysicalAddress First physical address of the page.
* @param Length Length of the pages.
* @param Flags Flags of the page. Check PTFlag enum.
* @param Fit If true, the function will try to fit the pages in the smallest page size.
* @param FailOnModulo If true, the function will return NoMapType if the length is not a multiple of the page size.
* @return The best page size to map the pages.
*/
__always_inline inline MapType OptimizedMap(void *VirtualAddress, void *PhysicalAddress, size_t Length, uint64_t Flags, bool Fit = false, bool FailOnModulo = false)
{
if (unlikely(Fit))
{
while (Length >= PAGE_SIZE_1G)
{
this->Map(VirtualAddress, PhysicalAddress, Length, Flags, Virtual::MapType::OneGB);
VirtualAddress = (void *)((uintptr_t)VirtualAddress + PAGE_SIZE_1G);
PhysicalAddress = (void *)((uintptr_t)PhysicalAddress + PAGE_SIZE_1G);
Length -= PAGE_SIZE_1G;
}
while (Length >= PAGE_SIZE_2M)
{
this->Map(VirtualAddress, PhysicalAddress, Length, Flags, Virtual::MapType::TwoMB);
VirtualAddress = (void *)((uintptr_t)VirtualAddress + PAGE_SIZE_2M);
PhysicalAddress = (void *)((uintptr_t)PhysicalAddress + PAGE_SIZE_2M);
Length -= PAGE_SIZE_2M;
}
while (Length >= PAGE_SIZE_4K)
{
this->Map(VirtualAddress, PhysicalAddress, Length, Flags, Virtual::MapType::FourKB);
VirtualAddress = (void *)((uintptr_t)VirtualAddress + PAGE_SIZE_4K);
PhysicalAddress = (void *)((uintptr_t)PhysicalAddress + PAGE_SIZE_4K);
Length -= PAGE_SIZE_4K;
}
return Virtual::MapType::FourKB;
}
Virtual::MapType Type = Virtual::MapType::FourKB;
if (Length >= PAGE_SIZE_1G)
{
Type = Virtual::MapType::OneGB;
if (Length % PAGE_SIZE_1G != 0)
{
warn("Length is not a multiple of 1GB.");
if (FailOnModulo)
return Virtual::MapType::NoMapType;
}
}
else if (Length >= PAGE_SIZE_2M)
{
Type = Virtual::MapType::TwoMB;
if (Length % PAGE_SIZE_2M != 0)
{
warn("Length is not a multiple of 2MB.");
if (FailOnModulo)
return Virtual::MapType::NoMapType;
}
}
this->Map(VirtualAddress, PhysicalAddress, Length, Flags, Type);
return Type;
}
/**
* @brief Unmap page.
*
* @param VirtualAddress Virtual address of the page.
* @param Type Type of the page. Check MapType enum.
*/
void Unmap(void *VirtualAddress, MapType Type = MapType::FourKB);
@ -660,9 +749,21 @@ namespace Memory
* @brief Unmap multiple pages.
*
* @param VirtualAddress First virtual address of the page.
* @param Length Number of pages.
* @param Length Length to map.
* @param Type Type of the page. Check MapType enum.
*/
void Unmap(void *VirtualAddress, size_t Length, MapType Type = MapType::FourKB);
__always_inline inline void Unmap(void *VirtualAddress, size_t Length, MapType Type = MapType::FourKB)
{
int PageSize = PAGE_SIZE_4K;
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
for (uintptr_t i = 0; i < Length; i += PageSize)
this->Unmap((void *)((uintptr_t)VirtualAddress + i), Type);
}
/**
* @brief Remap page.
@ -670,8 +771,13 @@ namespace Memory
* @param VirtualAddress Virtual address of the page.
* @param PhysicalAddress Physical address of the page.
* @param Flags Flags of the page. Check PTFlag enum.
* @param Type Type of the page. Check MapType enum.
*/
void Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type = MapType::FourKB);
__always_inline inline void Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type = MapType::FourKB)
{
this->Unmap(VirtualAddress, Type);
this->Map(VirtualAddress, PhysicalAddress, Flags, Type);
}
/**
* @brief Construct a new Virtual object