Make kernel to boot in x32

This commit is contained in:
Alex
2023-05-11 18:34:21 +03:00
parent 15637438da
commit 149d8ba790
19 changed files with 803 additions and 530 deletions

View File

@ -227,6 +227,7 @@ SafeFunction void PageFaultExceptionHandler(CHArchTrapFrame *Frame)
if (Present)
{
#if defined(a64)
uintptr_t CheckPageFaultLinearAddress = (uintptr_t)CheckPageFaultAddress;
CheckPageFaultLinearAddress &= 0xFFFFFFFFFFFFF000;
debug("%#lx -> %#lx", CheckPageFaultAddress, CheckPageFaultLinearAddress);
@ -298,6 +299,7 @@ SafeFunction void PageFaultExceptionHandler(CHArchTrapFrame *Frame)
PTE->Entries[Index.PTEIndex].ProtectionKey,
PTE->Entries[Index.PTEIndex].ExecuteDisable ? "1" : "0",
PTE->Entries[Index.PTEIndex].GetAddress() << 12);
#endif
}
}
#endif

View File

@ -435,6 +435,7 @@ namespace CrashHandler
{
for (int PMLIndex = 0; PMLIndex < 512; PMLIndex++)
{
#if defined(a64)
Memory::PageMapLevel4 PML4 = BasePageTable->Entries[PMLIndex];
EHPrint("\e888888# \eAABBCC%03d-%03d-%03d-%03d\e4500F5: P:%s RW:%s US:%s PWT:%s PCB:%s A:%s NX:%s Address:\e888888%#lx\n",
PMLIndex, 0, 0, 0,
@ -514,6 +515,7 @@ namespace CrashHandler
}
}
}
#endif
}
}
}

View File

@ -281,6 +281,7 @@ SafeFunction void UserModeExceptionHandler(CHArchTrapFrame *Frame)
if (Present)
{
#if defined(a64)
uintptr_t CheckPageFaultLinearAddress = (uintptr_t)CheckPageFaultAddress;
CheckPageFaultLinearAddress &= 0xFFFFFFFFFFFFF000;
debug("%#lx -> %#lx", CheckPageFaultAddress, CheckPageFaultLinearAddress);
@ -345,6 +346,7 @@ SafeFunction void UserModeExceptionHandler(CHArchTrapFrame *Frame)
PTE->Entries[Index.PTEIndex].ProtectionKey,
PTE->Entries[Index.PTEIndex].ExecuteDisable ? "1" : "0",
PTE->Entries[Index.PTEIndex].GetAddress() << 12);
#endif
}
}
}

View File

@ -287,6 +287,7 @@ NIF void MapKernel(PageTable *PT)
NIF void InitializeMemoryManagement()
{
#ifdef DEBUG
#ifndef a32
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
uintptr_t Base = r_cst(uintptr_t, bInfo.Memory.Entry[i].BaseAddress);
@ -330,7 +331,8 @@ NIF void InitializeMemoryManagement()
End,
Type);
}
#endif
#endif // a32
#endif // DEBUG
trace("Initializing Physical Memory Manager");
// KernelAllocator = Physical(); <- Already called in the constructor
KernelAllocator.Init();
@ -364,14 +366,13 @@ NIF void InitializeMemoryManagement()
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x80000001 cpuid;
CPU::x86::Intel::CPUID0x00000001 cpuid;
cpuid.Get();
fixme("Intel PSE support");
PSESupport = cpuid.EDX.PSE;
}
if (Page1GBSupport && PSESupport)
if (PSESupport)
{
debug("1GB Page Support Enabled");
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
@ -382,8 +383,14 @@ NIF void InitializeMemoryManagement()
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
trace("PSE Support Enabled");
}
#ifdef DEBUG
if (Page1GBSupport)
debug("1GB Page Support Enabled");
#endif
MapFromZero(KernelPageTable);
MapFramebuffer(KernelPageTable);
MapKernel(KernelPageTable);
@ -615,7 +622,7 @@ void free(void *Address)
#endif
}
void *operator new(size_t Size)
void *operator new(std::size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
@ -646,7 +653,7 @@ void *operator new(size_t Size)
return ret;
}
void *operator new[](size_t Size)
void *operator new[](std::size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
@ -677,7 +684,7 @@ void *operator new[](size_t Size)
return ret;
}
void *operator new(unsigned long Size, std::align_val_t Alignment)
void *operator new(std::size_t Size, std::align_val_t Alignment)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));

View File

@ -37,8 +37,6 @@ namespace Memory
this->PTEIndex = Address & 0x3FF;
Address >>= 10;
this->PDEIndex = Address & 0x3FF;
Address >>= 10;
this->PDPTEIndex = Address & 0x3FF;
#elif defined(aa64)
#endif
}

View File

@ -101,6 +101,14 @@ namespace Memory
debug("Reserving RSDT...");
this->ReservePages((void *)bInfo.RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
#if defined(a32)
if ((uintptr_t)ACPIPtr > 0x2800000) /* FIXME */
{
error("ACPI table is located above 0x2800000, which is not mapped.");
return;
}
#endif
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) / (XSDT ? 8 : 4));
debug("Reserving %d ACPI tables...", TableSize);

View File

@ -22,276 +22,6 @@
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag, MapType Type)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if ((PML4.raw & Flag) > 0)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
{
if (Type == MapType::OneGB && PDPTE->Entries[Index.PDPTEIndex].PageSize)
return true;
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (Type == MapType::TwoMB && PDE->Entries[Index.PDEIndex].PageSize)
return true;
if ((PDE->Entries[Index.PDEIndex].Present))
{
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if ((PTE->Entries[Index.PTEIndex].Present))
return true;
}
}
}
}
}
}
return false;
}
void *Virtual::GetPhysical(void *VirtualAddress)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if (PML4.Present)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if (PDPTE->Entries[Index.PDPTEIndex].Present)
{
if (PDPTE->Entries[Index.PDPTEIndex].PageSize)
return (void *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (PDE->Entries[Index.PDEIndex].Present)
{
if (PDE->Entries[Index.PDEIndex].PageSize)
return (void *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if (PTE->Entries[Index.PTEIndex].Present)
return (void *)((uintptr_t)PTE->Entries[Index.PTEIndex].GetAddress() << 12);
}
}
}
}
}
}
return nullptr;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
{
error("No page table");
return;
}
Flags |= PTFlag::P;
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
uint64_t DirectoryFlags = Flags & 0x3F;
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
if (!PML4->Present)
{
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryPointerTableEntryPtr) + 1));
memset(PDPTEPtr, 0, sizeof(PageDirectoryPointerTableEntryPtr));
PML4->Present = true;
PML4->SetAddress((uintptr_t)PDPTEPtr >> 12);
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)(PML4->GetAddress() << 12);
PML4->raw |= DirectoryFlags;
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (Type == MapType::OneGB)
{
PDPTE->raw |= Flags;
PDPTE->PageSize = true;
PDPTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 1GB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageDirectoryEntryPtr *PDEPtr = nullptr;
if (!PDPTE->Present)
{
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryEntryPtr) + 1));
memset(PDEPtr, 0, sizeof(PageDirectoryEntryPtr));
PDPTE->Present = true;
PDPTE->SetAddress((uintptr_t)PDEPtr >> 12);
}
else
PDEPtr = (PageDirectoryEntryPtr *)(PDPTE->GetAddress() << 12);
PDPTE->raw |= DirectoryFlags;
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (Type == MapType::TwoMB)
{
PDE->raw |= Flags;
PDE->PageSize = true;
PDE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 2MB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE->Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr) + 1));
memset(PTEPtr, 0, sizeof(PageTableEntryPtr));
PDE->Present = true;
PDE->SetAddress((uintptr_t)PTEPtr >> 12);
}
else
PTEPtr = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
PDE->raw |= DirectoryFlags;
PageTableEntry *PTE = &PTEPtr->Entries[Index.PTEIndex];
PTE->Present = true;
PTE->raw |= Flags;
PTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
#ifdef DEBUG
/* https://stackoverflow.com/a/3208376/9352057 */
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags, Type)) // quick workaround just to see where it fails
warn("Failed to map v:%#lx p:%#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Unmap(void *VirtualAddress, MapType Type)
{
SmartLock(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
if (!PML4->Present)
{
error("Page %#lx not present", PML4->GetAddress());
return;
}
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4->Address << 12);
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE->Present)
{
error("Page %#lx not present", PDPTE->GetAddress());
return;
}
if (Type == MapType::OneGB && PDPTE->PageSize)
{
PDPTE->Present = false;
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Address << 12);
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (!PDE->Present)
{
error("Page %#lx not present", PDE->GetAddress());
return;
}
if (Type == MapType::TwoMB && PDE->PageSize)
{
PDE->Present = false;
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE->Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
error("Page %#lx not present", PTE.GetAddress());
return;
}
PTE.Present = false;
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
Virtual::Virtual(PageTable *Table)
{
if (Table)

View File

@ -232,7 +232,10 @@ namespace Video
this->Buffers[Index].DoNotScroll = Value;
}
char Display::Print(char Char, int Index, bool WriteToUART)
#if defined(a32)
__no_sanitize("undefined")
#endif
char Display::Print(char Char, int Index, bool WriteToUART)
{
if (unlikely(this->Buffers[Index].Checksum != 0xBBFFE515A117E))
return 0;