Add support for 2MB and 1GB map

This commit is contained in:
Alex
2023-04-07 01:39:11 +03:00
parent 5becc15ed7
commit 540152a339
10 changed files with 239 additions and 116 deletions

View File

@ -33,8 +33,6 @@ namespace Driver
{
void Driver::MapPCIAddresses(PCI::PCIDeviceHeader *PCIDevice)
{
Memory::Virtual vma = Memory::Virtual(nullptr);
debug("Header Type: %d", PCIDevice->HeaderType);
switch (PCIDevice->HeaderType)
{
@ -103,12 +101,7 @@ namespace Driver
size_t BARSize = BARsSize[i];
debug("Mapping BAR%d from %#lx to %#lx", i, BARBase, BARBase + BARSize);
for (uintptr_t j = BARBase;
j < (BARBase + BARSize);
j += PAGE_SIZE)
{
vma.Map((void *)j, (void *)j, Memory::PTFlag::RW | Memory::PTFlag::PWT);
}
Memory::Virtual().Map((void *)BARBase, (void *)BARBase, BARSize, Memory::PTFlag::RW | Memory::PTFlag::PWT);
}
else if ((BAR[i] & 1) == 1) // I/O Base
{
@ -116,12 +109,7 @@ namespace Driver
uintptr_t BARSize = BARsSize[i];
debug("Mapping BAR%d from %#x to %#x", i, BARBase, BARBase + BARSize);
for (uintptr_t j = BARBase;
j < (BARBase + BARSize);
j += PAGE_SIZE)
{
vma.Map((void *)j, (void *)j, Memory::PTFlag::RW | Memory::PTFlag::PWT);
}
Memory::Virtual().Map((void *)BARBase, (void *)BARBase, BARSize, Memory::PTFlag::RW | Memory::PTFlag::PWT);
}
}
break;

View File

@ -75,22 +75,47 @@ NIF void tracepagetable(PageTable4 *pt)
NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
{
static int once = 0;
if (!once++)
bool Page1GBSupport = false;
bool PSESupport = false;
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
{
Virtual va = Virtual(PT);
void *NullAddress = KernelAllocator.RequestPage();
memset(NullAddress, 0, PAGE_SIZE); // TODO: If the CPU instruction pointer hits this page, there should be function to handle it. (memcpy assembly code?)
va.Map((void *)0, (void *)NullAddress, PTFlag::RW | PTFlag::US);
size_t MemSize = Info->Memory.Size;
for (size_t t = 0; t < MemSize; t += PAGE_SIZE)
va.Map((void *)t, (void *)t, PTFlag::RW);
CPU::x86::AMD::CPUID0x80000001 cpuid;
cpuid.Get();
Page1GBSupport = cpuid.EDX.Page1GB;
PSESupport = cpuid.EDX.PSE;
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x80000001 cpuid;
cpuid.Get();
}
Virtual va = Virtual(PT);
size_t MemSize = Info->Memory.Size;
if (Page1GBSupport && PSESupport)
{
debug("1GB Page Support Enabled");
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
CPU::x64::writecr4(cr4);
#elif defined(a32)
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
cr4.PSE = 1;
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW | PTFlag::PS /* , Virtual::MapType::OneGB */);
}
else
{
error("MapFromZero() called more than once!");
CPU::Stop();
}
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
void *NullAddress = KernelAllocator.RequestPage();
memset(NullAddress, 0, PAGE_SIZE); // TODO: If the CPU instruction pointer hits this page, there should be function to handle it. (memcpy assembly code?)
va.Remap((void *)0, (void *)NullAddress, PTFlag::RW | PTFlag::US);
}
NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
@ -151,7 +176,7 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
/* Text section */
for (k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}

View File

@ -22,7 +22,7 @@
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag)
bool Virtual::Check(void *VirtualAddress, PTFlag Flag, MapType Type)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
@ -39,20 +39,30 @@ namespace Memory
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
{
if (Type == MapType::OneGB && PDPTE->Entries[Index.PDPTEIndex].PageSize)
return true;
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (Type == MapType::TwoMB && PDE->Entries[Index.PDEIndex].PageSize)
return true;
if ((PDE->Entries[Index.PDEIndex].Present))
{
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if ((PTE->Entries[Index.PTEIndex].Present))
{
return true;
}
}
}
}
}
}
}
return false;
}
@ -74,25 +84,35 @@ namespace Memory
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if (PDPTE->Entries[Index.PDPTEIndex].Present)
{
if (PDPTE->Entries[Index.PDPTEIndex].PageSize)
return (void *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (PDE->Entries[Index.PDEIndex].Present)
{
if (PDE->Entries[Index.PDEIndex].PageSize)
return (void *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if (PTE->Entries[Index.PTEIndex].Present)
{
return (void *)((uintptr_t)PTE->Entries[Index.PTEIndex].GetAddress() << 12);
}
}
}
}
}
}
}
return nullptr;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
@ -118,6 +138,17 @@ namespace Memory
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (Type == MapType::OneGB)
{
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
PDPTE.raw |= Flags;
PDPTE.PageSize = true;
PDPTE.SetAddress((uintptr_t)PhysicalAddress >> 12);
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
return;
}
PML4.raw |= DirectoryFlags;
this->Table->Entries[Index.PMLIndex] = PML4;
@ -132,6 +163,17 @@ namespace Memory
}
else
PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.GetAddress() << 12);
if (Type == MapType::TwoMB)
{
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
PDE.raw |= Flags;
PDE.PageSize = true;
PDE.SetAddress((uintptr_t)PhysicalAddress >> 12);
PDEPtr->Entries[Index.PDEIndex] = PDE;
return;
}
PDPTE.raw |= DirectoryFlags;
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
@ -182,18 +224,25 @@ namespace Memory
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags)) // quick workaround just to see where it fails
if (!this->Check(VirtualAddress, (PTFlag)Flags, Type)) // quick workaround just to see where it fails
warn("Failed to map %#lx - %#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, size_t PageCount, uint64_t Flags)
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, size_t Length, uint64_t Flags, MapType Type)
{
for (size_t i = 0; i < PageCount; i++)
this->Map((void *)((uintptr_t)VirtualAddress + (i * PAGE_SIZE)), (void *)((uintptr_t)PhysicalAddress + (i * PAGE_SIZE)), Flags);
int PageSize = PAGE_SIZE_4K;
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
for (uintptr_t i = 0; i < Length; i += PageSize)
this->Map((void *)((uintptr_t)VirtualAddress + i), (void *)((uintptr_t)PhysicalAddress + i), Flags, Type);
}
void Virtual::Unmap(void *VirtualAddress)
void Virtual::Unmap(void *VirtualAddress, MapType Type)
{
SmartLock(this->MemoryLock);
if (!this->Table)
@ -218,6 +267,13 @@ namespace Memory
return;
}
if (Type == MapType::OneGB && PDPTE.PageSize)
{
PDPTE.Present = false;
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.Address << 12);
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
if (!PDE.Present)
@ -226,6 +282,13 @@ namespace Memory
return;
}
if (Type == MapType::TwoMB && PDE.PageSize)
{
PDE.Present = false;
PDEPtr->Entries[Index.PDEIndex] = PDE;
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE.Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
@ -252,16 +315,23 @@ namespace Memory
#endif
}
void Virtual::Unmap(void *VirtualAddress, size_t PageCount)
void Virtual::Unmap(void *VirtualAddress, size_t Length, MapType Type)
{
for (size_t i = 0; i < PageCount; i++)
this->Unmap((void *)((uintptr_t)VirtualAddress + (i * PAGE_SIZE)));
int PageSize = PAGE_SIZE_4K;
if (Type == MapType::TwoMB)
PageSize = PAGE_SIZE_2M;
else if (Type == MapType::OneGB)
PageSize = PAGE_SIZE_1G;
for (uintptr_t i = 0; i < Length; i += PageSize)
this->Unmap((void *)((uintptr_t)VirtualAddress + i), Type);
}
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
this->Unmap(VirtualAddress);
this->Map(VirtualAddress, PhysicalAddress, Flags);
this->Unmap(VirtualAddress, Type);
this->Map(VirtualAddress, PhysicalAddress, Flags, Type);
}
Virtual::Virtual(PageTable4 *Table)

View File

@ -36,8 +36,7 @@ namespace Video
PSF2_HEADER *font2 = (PSF2_HEADER *)KernelAllocator.RequestPages(FontDataLength / PAGE_SIZE + 1);
memcpy((void *)font2, Start, FontDataLength);
for (uintptr_t i = 0; i < FontDataLength / PAGE_SIZE + 1; i++)
Memory::Virtual().Map((void *)(font2 + (i * PAGE_SIZE)), (void *)(font2 + (i * PAGE_SIZE)), Memory::PTFlag::RW);
Memory::Virtual().Map((void *)font2, (void *)font2, FontDataLength, Memory::PTFlag::RW);
if (font2->magic[0] != PSF2_MAGIC0 || font2->magic[1] != PSF2_MAGIC1 || font2->magic[2] != PSF2_MAGIC2 || font2->magic[3] != PSF2_MAGIC3)
{