From 2d1c42fbcdd2653af6390c57e0652654229651fc Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 5 Dec 2022 00:48:41 +0200 Subject: [PATCH] Memory related code optimization --- Core/Memory/Memory.cpp | 13 +++---- Core/Memory/PageDirectoryEntry.cpp | 2 +- Core/Memory/PageMapIndexer.cpp | 32 ++++++++++------- Core/Memory/PhysicalMemoryManager.cpp | 34 +++++++++--------- Core/Memory/StackGuard.cpp | 10 +++++- Core/Memory/VirtualMemoryManager.cpp | 51 +++++++++++++++++++++------ Execute/Spawn.cpp | 19 +++++++--- Tasking/Task.cpp | 7 +++- include/memory.hpp | 12 ++++++- 9 files changed, 123 insertions(+), 57 deletions(-) diff --git a/Core/Memory/Memory.cpp b/Core/Memory/Memory.cpp index 9670a95..6a7e1b7 100644 --- a/Core/Memory/Memory.cpp +++ b/Core/Memory/Memory.cpp @@ -44,8 +44,8 @@ __no_instrument_function void MapFromZero(PageTable *PT, BootInfo *Info) uint64_t MemSize = Info->Memory.Size; for (uint64_t t = 0; t < MemSize; t += PAGE_SIZE) { - va.Map((void *)t, (void *)t, PTFlag::RW); - va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW); + va.Map((void *)t, (void *)t, PTFlag::RW /* | PTFlag::US */); + va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW /* | PTFlag::US */); VirtualOffsetNormalVMA += PAGE_SIZE; } } @@ -62,7 +62,7 @@ __no_instrument_function void MapFramebuffer(PageTable *PT, BootInfo *Info) for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress; fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE)); fb_base += PAGE_SIZE) - va.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US); + va.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US | PTFlag::G); itrfb++; } } @@ -91,21 +91,21 @@ __no_instrument_function void MapKernel(PageTable *PT, BootInfo *Info) for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE) { - va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW); + va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G); KernelAllocator.LockPage((void *)BaseKernelMapAddress); BaseKernelMapAddress += PAGE_SIZE; } for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE) { - va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P); + va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P | PTFlag::G); KernelAllocator.LockPage((void *)BaseKernelMapAddress); BaseKernelMapAddress += PAGE_SIZE; } for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE) { - va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW); + va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G); KernelAllocator.LockPage((void *)BaseKernelMapAddress); BaseKernelMapAddress += PAGE_SIZE; } @@ -209,6 +209,7 @@ __no_instrument_function void InitializeMemoryManagement(BootInfo *Info) #elif defined(__aarch64__) asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable)); #endif + debug("Page table updated."); if (strstr(Info->Kernel.CommandLine, "xallocv1")) { XallocV1Allocator = new Xalloc::AllocatorV1((void *)KERNEL_HEAP_BASE, false, false); diff --git a/Core/Memory/PageDirectoryEntry.cpp b/Core/Memory/PageDirectoryEntry.cpp index 1fdbddf..8c95881 100644 --- a/Core/Memory/PageDirectoryEntry.cpp +++ b/Core/Memory/PageDirectoryEntry.cpp @@ -7,7 +7,7 @@ namespace Memory void PageDirectoryEntry::ClearFlags() { this->Value.raw = 0; } void PageDirectoryEntry::SetFlag(uint64_t Flag, bool Enabled) { - this->Value.raw &= ~Flag; + this->Value.raw = 0; if (Enabled) this->Value.raw |= Flag; } diff --git a/Core/Memory/PageMapIndexer.cpp b/Core/Memory/PageMapIndexer.cpp index 8137291..d99f3d2 100644 --- a/Core/Memory/PageMapIndexer.cpp +++ b/Core/Memory/PageMapIndexer.cpp @@ -2,21 +2,27 @@ namespace Memory { - Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress) - { + Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress) + { #if defined(__amd64__) - this->PDPIndex = (VirtualAddress & ((uint64_t)0x1FF << 39)) >> 39; - this->PDIndex = (VirtualAddress & ((uint64_t)0x1FF << 30)) >> 30; - this->PTIndex = (VirtualAddress & ((uint64_t)0x1FF << 21)) >> 21; - this->PIndex = (VirtualAddress & ((uint64_t)0x1FF << 12)) >> 12; + uint64_t Address = VirtualAddress; + Address >>= 12; + this->PIndex = Address & 0x1FF; + Address >>= 9; + this->PTIndex = Address & 0x1FF; + Address >>= 9; + this->PDIndex = Address & 0x1FF; + Address >>= 9; + this->PDPIndex = Address & 0x1FF; #elif defined(__i386__) - this->PDIndex = (VirtualAddress & ((uint64_t)0x3FF << 22)) >> 22; - this->PTIndex = (VirtualAddress & ((uint64_t)0x3FF << 12)) >> 12; - this->PIndex = (VirtualAddress & ((uint64_t)0xFFF)) >> 0; + uint64_t Address = VirtualAddress; + Address >>= 12; + this->PIndex = Address & 0x3FF; + Address >>= 10; + this->PTIndex = Address & 0x3FF; + Address >>= 10; + this->PDIndex = Address & 0x3FF; #elif defined(__aarch64__) - this->PDIndex = (VirtualAddress & ((uint64_t)0x1FF << 30)) >> 30; - this->PTIndex = (VirtualAddress & ((uint64_t)0x1FF << 21)) >> 21; - this->PIndex = (VirtualAddress & ((uint64_t)0x1FF << 12)) >> 12; #endif - } + } } diff --git a/Core/Memory/PhysicalMemoryManager.cpp b/Core/Memory/PhysicalMemoryManager.cpp index f33beab..bc5ea49 100644 --- a/Core/Memory/PhysicalMemoryManager.cpp +++ b/Core/Memory/PhysicalMemoryManager.cpp @@ -118,13 +118,13 @@ namespace Memory void Physical::FreePage(void *Address) { SmartLock(this->MemoryLock); - if (Address == nullptr) + if (unlikely(Address == nullptr)) { warn("Null pointer passed to FreePage."); return; } uint64_t Index = (uint64_t)Address / PAGE_SIZE; - if (PageBitmap[Index] == false) + if (unlikely(PageBitmap[Index] == false)) return; if (PageBitmap.Set(Index, false)) @@ -138,7 +138,7 @@ namespace Memory void Physical::FreePages(void *Address, uint64_t Count) { - if (Address == nullptr || Count == 0) + if (unlikely(Address == nullptr || Count == 0)) { warn("%s%s passed to FreePages.", Address == nullptr ? "Null pointer" : "", Count == 0 ? "Zero count" : ""); return; @@ -150,11 +150,11 @@ namespace Memory void Physical::LockPage(void *Address) { - if (Address == nullptr) + if (unlikely(Address == nullptr)) warn("Trying to lock null address."); uint64_t Index = (uint64_t)Address / PAGE_SIZE; - if (PageBitmap[Index] == true) + if (unlikely(PageBitmap[Index] == true)) return; if (PageBitmap.Set(Index, true)) { @@ -165,7 +165,7 @@ namespace Memory void Physical::LockPages(void *Address, uint64_t PageCount) { - if (Address == nullptr || PageCount == 0) + if (unlikely(Address == nullptr || PageCount == 0)) warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : ""); for (uint64_t i = 0; i < PageCount; i++) @@ -174,11 +174,11 @@ namespace Memory void Physical::ReservePage(void *Address) { - if (Address == nullptr) + if (unlikely(Address == nullptr)) warn("Trying to reserve null address."); uint64_t Index = (uint64_t)Address / PAGE_SIZE; - if (PageBitmap[Index] == true) + if (unlikely(PageBitmap[Index] == true)) return; if (PageBitmap.Set(Index, true)) @@ -190,7 +190,7 @@ namespace Memory void Physical::ReservePages(void *Address, uint64_t PageCount) { - if (Address == nullptr || PageCount == 0) + if (unlikely(Address == nullptr || PageCount == 0)) warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : ""); for (uint64_t t = 0; t < PageCount; t++) @@ -199,11 +199,11 @@ namespace Memory void Physical::UnreservePage(void *Address) { - if (Address == nullptr) + if (unlikely(Address == nullptr)) warn("Trying to unreserve null address."); uint64_t Index = (uint64_t)Address / PAGE_SIZE; - if (PageBitmap[Index] == false) + if (unlikely(PageBitmap[Index] == false)) return; if (PageBitmap.Set(Index, false)) @@ -217,7 +217,7 @@ namespace Memory void Physical::UnreservePages(void *Address, uint64_t PageCount) { - if (Address == nullptr || PageCount == 0) + if (unlikely(Address == nullptr || PageCount == 0)) warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : ""); for (uint64_t t = 0; t < PageCount; t++) @@ -253,7 +253,7 @@ namespace Memory CPU::Stop(); } - uint64_t BitmapSize = ALIGN_UP((MemorySize / PAGE_SIZE) / 8, PAGE_SIZE); + uint64_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1; trace("Initializing Bitmap at %llp-%llp (%lld Bytes)", LargestFreeMemorySegment, (void *)((uint64_t)LargestFreeMemorySegment + BitmapSize), @@ -264,13 +264,11 @@ namespace Memory *(uint8_t *)(PageBitmap.Buffer + i) = 0; trace("Reserving pages..."); - this->ReservePages(0, MemorySize / PAGE_SIZE + 1); - trace("Unreserve usable pages..."); for (uint64_t i = 0; i < Info->Memory.Entries; i++) - if (Info->Memory.Entry[i].Type == Usable) - this->UnreservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1); + if (Info->Memory.Entry[i].Type != Usable) + this->ReservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1); trace("Locking bitmap pages..."); - this->ReservePages(0, 0x100); // Reserve between 0 and 0x100000. + this->ReservePages(0, 0x100); this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1); } diff --git a/Core/Memory/StackGuard.cpp b/Core/Memory/StackGuard.cpp index a280beb..f9eac9f 100644 --- a/Core/Memory/StackGuard.cpp +++ b/Core/Memory/StackGuard.cpp @@ -11,6 +11,7 @@ namespace Memory if (this->UserMode) { void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE)); + debug("AllocatedStack: %p", AllocatedStack); memset(AllocatedStack, 0, USER_STACK_SIZE); for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++) { @@ -25,6 +26,7 @@ namespace Memory else { this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE)); + debug("StackBottom: %p", this->StackBottom); memset(this->StackBottom, 0, STACK_SIZE); this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE); this->Size = STACK_SIZE; @@ -32,7 +34,12 @@ namespace Memory trace("Allocated stack at %p", this->StackBottom); } - StackGuard::~StackGuard() { KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size)); } + StackGuard::~StackGuard() + { + fixme("Temporarily disabled stack guard deallocation"); + // KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size)); + // debug("Freed stack at %p", this->StackBottom); + } bool StackGuard::Expand(uint64_t FaultAddress) { @@ -46,6 +53,7 @@ namespace Memory else { void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE)); + debug("AllocatedStack: %p", AllocatedStack); memset(AllocatedStack, 0, USER_STACK_SIZE); for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++) Virtual(this->Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US); diff --git a/Core/Memory/VirtualMemoryManager.cpp b/Core/Memory/VirtualMemoryManager.cpp index 7350504..9db6078 100644 --- a/Core/Memory/VirtualMemoryManager.cpp +++ b/Core/Memory/VirtualMemoryManager.cpp @@ -45,7 +45,7 @@ namespace Memory void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags) { SmartLock(this->MemoryLock); - if (!this->Table) + if (unlikely(!this->Table)) { error("No page table"); return; @@ -57,9 +57,9 @@ namespace Memory { PDP = (PageTable *)KernelAllocator.RequestPage(); memset(PDP, 0, PAGE_SIZE); - PDE.SetAddress((uint64_t)PDP >> 12); PDE.SetFlag(PTFlag::P, true); PDE.AddFlag(Flags); + PDE.SetAddress((uint64_t)PDP >> 12); this->Table->Entries[Index.PDPIndex] = PDE; } else @@ -71,9 +71,9 @@ namespace Memory { PD = (PageTable *)KernelAllocator.RequestPage(); memset(PD, 0, PAGE_SIZE); - PDE.SetAddress((uint64_t)PD >> 12); PDE.SetFlag(PTFlag::P, true); PDE.AddFlag(Flags); + PDE.SetAddress((uint64_t)PD >> 12); PDP->Entries[Index.PDIndex] = PDE; } else @@ -85,19 +85,20 @@ namespace Memory { PT = (PageTable *)KernelAllocator.RequestPage(); memset(PT, 0, PAGE_SIZE); - PDE.SetAddress((uint64_t)PT >> 12); PDE.SetFlag(PTFlag::P, true); PDE.AddFlag(Flags); + PDE.SetAddress((uint64_t)PT >> 12); PD->Entries[Index.PTIndex] = PDE; } else PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12); PDE = PT->Entries[Index.PIndex]; - PDE.SetAddress((uint64_t)PhysicalAddress >> 12); PDE.SetFlag(PTFlag::P, true); PDE.AddFlag(Flags); + PDE.SetAddress((uint64_t)PhysicalAddress >> 12); PT->Entries[Index.PIndex] = PDE; + #if defined(__amd64__) CPU::x64::invlpg(VirtualAddress); #elif defined(__i386__) @@ -147,13 +148,35 @@ namespace Memory PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress); PageDirectoryEntry PDE = this->Table->Entries[Index.PDPIndex]; - PDE.ClearFlags(); -#if defined(__amd64__) || defined(__i386__) - asmv("invlpg (%0)" - : - : "r"(VirtualAddress) - : "memory"); + if (PDE.GetFlag(PTFlag::P)) + { + PageTable *PDP = (PageTable *)((uint64_t)PDE.GetAddress() << 12); + + PDE = PDP->Entries[Index.PDIndex]; + if (PDE.GetFlag(PTFlag::P)) + { + PageTable *PD = (PageTable *)((uint64_t)PDE.GetAddress() << 12); + + PDE = PD->Entries[Index.PTIndex]; + if (PDE.GetFlag(PTFlag::P)) + { + PageTable *PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12); + + PDE = PT->Entries[Index.PIndex]; + if (PDE.GetFlag(PTFlag::P)) + { + PDE.ClearFlags(); + // debug("Unmapped %#lx", VirtualAddress); + } + } + } + } + +#if defined(__amd64__) + CPU::x64::invlpg(VirtualAddress); +#elif defined(__i386__) + CPU::x32::invlpg(VirtualAddress); #elif defined(__aarch64__) asmv("dsb sy"); asmv("tlbi vae1is, %0" @@ -171,6 +194,12 @@ namespace Memory this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE))); } + void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags) + { + this->Unmap(VirtualAddress); + this->Map(VirtualAddress, PhysicalAddress, Flags); + } + Virtual::Virtual(PageTable *Table) { if (Table) diff --git a/Execute/Spawn.cpp b/Execute/Spawn.cpp index 6fc1080..17ffd42 100644 --- a/Execute/Spawn.cpp +++ b/Execute/Spawn.cpp @@ -71,17 +71,17 @@ namespace Execute case BinaryType::BinTypeELF: { #if defined(__amd64__) - const char *BaseName; cwk_path_get_basename(Path, &BaseName, nullptr); PCB *Process = TaskManager->CreateProcess(TaskManager->GetCurrentProcess(), BaseName, TaskTrustLevel::User); void *BaseImage = KernelAllocator.RequestPages(TO_PAGES(ExFile->Node->Length)); memcpy(BaseImage, (void *)ExFile->Node->Address, ExFile->Node->Length); + debug("Image Size: %#lx - %#lx (length: %ld)", BaseImage, (uint64_t)BaseImage + ExFile->Node->Length, ExFile->Node->Length); Memory::Virtual pva = Memory::Virtual(Process->PageTable); for (uint64_t i = 0; i < TO_PAGES(ExFile->Node->Length); i++) - pva.Map((void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), (void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US); + pva.Remap((void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), (void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US); Elf64_Ehdr *ELFHeader = (Elf64_Ehdr *)BaseImage; @@ -122,6 +122,8 @@ namespace Execute { trace("Executable"); Elf64_Phdr *pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff); + debug("p_paddr: %#lx | p_vaddr: %#lx | p_filesz: %#lx | p_memsz: %#lx | p_offset: %#lx", pheader->p_paddr, pheader->p_vaddr, pheader->p_filesz, pheader->p_memsz, pheader->p_offset); + void *Address = nullptr; for (int i = 0; i < ELFHeader->e_phnum; i++, pheader++) { @@ -131,8 +133,12 @@ namespace Execute } void *Offset = KernelAllocator.RequestPages(TO_PAGES((uint64_t)Address)); + pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff); for (uint64_t i = 0; i < TO_PAGES((uint64_t)Address); i++) - pva.Map((void *)((uint64_t)Offset + (i * PAGE_SIZE)), (void *)((uint64_t)Offset + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US); + { + pva.Remap((void *)((uint64_t)pheader->p_vaddr + (i * PAGE_SIZE)), (void *)((uint64_t)Offset + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US); + // debug("Mapping: %#lx -> %#lx", (uint64_t)pheader->p_vaddr + (i * PAGE_SIZE), (uint64_t)Offset + (i * PAGE_SIZE)); + } pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff); for (int i = 0; i < ELFHeader->e_phnum; i++, pheader++) @@ -144,21 +150,24 @@ namespace Execute memcpy(dst, ((char *)BaseImage) + pheader->p_offset, pheader->p_filesz); } + debug("Entry Point: %#lx", ELFHeader->e_entry); + Vector auxv; + pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff); auxv.push_back({.archaux = {.a_type = AT_PHDR, .a_un = {.a_val = (uint64_t)ELFHeader->e_phoff}}}); auxv.push_back({.archaux = {.a_type = AT_PHENT, .a_un = {.a_val = (uint64_t)ELFHeader->e_phentsize}}}); auxv.push_back({.archaux = {.a_type = AT_PHNUM, .a_un = {.a_val = (uint64_t)ELFHeader->e_phnum}}}); auxv.push_back({.archaux = {.a_type = AT_PAGESZ, .a_un = {.a_val = (uint64_t)PAGE_SIZE}}}); auxv.push_back({.archaux = {.a_type = AT_BASE, .a_un = {.a_val = (uint64_t)Offset}}}); - auxv.push_back({.archaux = {.a_type = AT_ENTRY, .a_un = {.a_val = (uint64_t)ELFHeader->e_entry + (uint64_t)Offset}}}); + auxv.push_back({.archaux = {.a_type = AT_ENTRY, .a_un = {.a_val = (uint64_t)ELFHeader->e_entry + (uint64_t)pheader->p_offset}}}); auxv.push_back({.archaux = {.a_type = AT_PLATFORM, .a_un = {.a_val = (uint64_t) "x86_64"}}}); auxv.push_back({.archaux = {.a_type = AT_EXECFN, .a_un = {.a_val = (uint64_t)Path}}}); TCB *Thread = TaskManager->CreateThread(Process, (IP)ELFHeader->e_entry, argv, envp, auxv, - (IPOffset)Offset, + (IPOffset)pheader->p_offset, Arch, Comp); ret.Process = Process; diff --git a/Tasking/Task.cpp b/Tasking/Task.cpp index bdeebfd..25065b4 100644 --- a/Tasking/Task.cpp +++ b/Tasking/Task.cpp @@ -516,6 +516,9 @@ namespace Tasking *Frame = CurrentCPU->CurrentThread->Registers; GlobalDescriptorTable::SetKernelStack((void *)((uint64_t)CurrentCPU->CurrentThread->Stack->GetStackTop())); CPU::x64::writecr3({.raw = (uint64_t)CurrentCPU->CurrentProcess->PageTable}); + // Not sure if this is needed, but it's better to be safe than sorry. + asmv("movq %cr3, %rax"); + asmv("movq %rax, %cr3"); CPU::x64::fxrstor(CurrentCPU->CurrentThread->FXRegion); CPU::x64::wrmsr(CPU::x64::MSR_GS_BASE, CurrentCPU->CurrentThread->GSBase); CPU::x64::wrmsr(CPU::x64::MSR_FS_BASE, CurrentCPU->CurrentThread->FSBase); @@ -882,6 +885,7 @@ namespace Tasking for (uint64_t i = 0; i < ArgvSize; i++) { void *Tmp = KernelAllocator.RequestPages(TO_PAGES(strlen(argv[i]) + 1)); + debug("argv[%d] ptr %#lx", i, (uint64_t)Tmp); Memory::Virtual().Map(Tmp, Tmp, Memory::PTFlag::RW | Memory::PTFlag::US); _argv = (uint8_t *)Tmp; strcpy((char *)_argv, argv[i]); @@ -893,6 +897,7 @@ namespace Tasking for (uint64_t i = 0; i < EnvpSize; i++) { void *Tmp = KernelAllocator.RequestPages(TO_PAGES(strlen(envp[i]) + 1)); + debug("envp[%d] ptr %#lx", i, (uint64_t)Tmp); Memory::Virtual().Map(Tmp, Tmp, Memory::PTFlag::RW | Memory::PTFlag::US); _envp = (uint8_t *)Tmp; strcpy((char *)_envp, envp[i]); @@ -957,7 +962,7 @@ namespace Tasking Thread->Info.Architecture = Architecture; Thread->Info.Compatibility = Compatibility; - debug("Thread offset is %#lx (EntryPoint:%#lx)", Thread->Offset, Thread->EntryPoint); + debug("Thread offset is %#lx (EntryPoint: %#lx) => RIP: %#lx", Thread->Offset, Thread->EntryPoint, Thread->Registers.rip); if (Parent->Security.TrustLevel == TaskTrustLevel::User) debug("Thread stack region is %#lx-%#lx (U) and rsp is %#lx", Thread->Stack->GetStackBottom(), Thread->Stack->GetStackTop(), Thread->Registers.rsp); else diff --git a/include/memory.hpp b/include/memory.hpp index 798f6db..bcbc241 100644 --- a/include/memory.hpp +++ b/include/memory.hpp @@ -224,6 +224,7 @@ namespace Memory void UnreservePages(void *Address, uint64_t PageCount); public: + Bitmap GetPageBitmap() { return PageBitmap; } /** * @brief Get Total Memory * @@ -338,6 +339,7 @@ namespace Memory NewLock(MemoryLock); PageTable *Table = nullptr; + public: class PageMapIndexer { public: @@ -348,7 +350,6 @@ namespace Memory PageMapIndexer(uint64_t VirtualAddress); }; - public: /** * @brief Check if page is present * @@ -393,6 +394,15 @@ namespace Memory */ void Unmap(void *VirtualAddress, uint64_t PageCount); + /** + * @brief Remap page. + * + * @param VirtualAddress Virtual address of the page. + * @param PhysicalAddress Physical address of the page. + * @param Flags Flags of the page. Check PTFlag enum. + */ + void Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags); + /** * @brief Construct a new Virtual object *