Memory related code optimization

This commit is contained in:
Alex 2022-12-05 00:48:41 +02:00
parent f34278891b
commit 2d1c42fbcd
Signed by untrusted user who does not match committer: enderice2
GPG Key ID: EACC3AD603BAB4DD
9 changed files with 123 additions and 57 deletions

View File

@ -44,8 +44,8 @@ __no_instrument_function void MapFromZero(PageTable *PT, BootInfo *Info)
uint64_t MemSize = Info->Memory.Size; uint64_t MemSize = Info->Memory.Size;
for (uint64_t t = 0; t < MemSize; t += PAGE_SIZE) for (uint64_t t = 0; t < MemSize; t += PAGE_SIZE)
{ {
va.Map((void *)t, (void *)t, PTFlag::RW); va.Map((void *)t, (void *)t, PTFlag::RW /* | PTFlag::US */);
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW); va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW /* | PTFlag::US */);
VirtualOffsetNormalVMA += PAGE_SIZE; VirtualOffsetNormalVMA += PAGE_SIZE;
} }
} }
@ -62,7 +62,7 @@ __no_instrument_function void MapFramebuffer(PageTable *PT, BootInfo *Info)
for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress; for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress;
fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE)); fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
fb_base += PAGE_SIZE) fb_base += PAGE_SIZE)
va.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US); va.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US | PTFlag::G);
itrfb++; itrfb++;
} }
} }
@ -91,21 +91,21 @@ __no_instrument_function void MapKernel(PageTable *PT, BootInfo *Info)
for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE) for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
{ {
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW); va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress); KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE; BaseKernelMapAddress += PAGE_SIZE;
} }
for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE) for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
{ {
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P); va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress); KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE; BaseKernelMapAddress += PAGE_SIZE;
} }
for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE) for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
{ {
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW); va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress); KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE; BaseKernelMapAddress += PAGE_SIZE;
} }
@ -209,6 +209,7 @@ __no_instrument_function void InitializeMemoryManagement(BootInfo *Info)
#elif defined(__aarch64__) #elif defined(__aarch64__)
asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable)); asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable));
#endif #endif
debug("Page table updated.");
if (strstr(Info->Kernel.CommandLine, "xallocv1")) if (strstr(Info->Kernel.CommandLine, "xallocv1"))
{ {
XallocV1Allocator = new Xalloc::AllocatorV1((void *)KERNEL_HEAP_BASE, false, false); XallocV1Allocator = new Xalloc::AllocatorV1((void *)KERNEL_HEAP_BASE, false, false);

View File

@ -7,7 +7,7 @@ namespace Memory
void PageDirectoryEntry::ClearFlags() { this->Value.raw = 0; } void PageDirectoryEntry::ClearFlags() { this->Value.raw = 0; }
void PageDirectoryEntry::SetFlag(uint64_t Flag, bool Enabled) void PageDirectoryEntry::SetFlag(uint64_t Flag, bool Enabled)
{ {
this->Value.raw &= ~Flag; this->Value.raw = 0;
if (Enabled) if (Enabled)
this->Value.raw |= Flag; this->Value.raw |= Flag;
} }

View File

@ -2,21 +2,27 @@
namespace Memory namespace Memory
{ {
Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress) Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress)
{ {
#if defined(__amd64__) #if defined(__amd64__)
this->PDPIndex = (VirtualAddress & ((uint64_t)0x1FF << 39)) >> 39; uint64_t Address = VirtualAddress;
this->PDIndex = (VirtualAddress & ((uint64_t)0x1FF << 30)) >> 30; Address >>= 12;
this->PTIndex = (VirtualAddress & ((uint64_t)0x1FF << 21)) >> 21; this->PIndex = Address & 0x1FF;
this->PIndex = (VirtualAddress & ((uint64_t)0x1FF << 12)) >> 12; Address >>= 9;
this->PTIndex = Address & 0x1FF;
Address >>= 9;
this->PDIndex = Address & 0x1FF;
Address >>= 9;
this->PDPIndex = Address & 0x1FF;
#elif defined(__i386__) #elif defined(__i386__)
this->PDIndex = (VirtualAddress & ((uint64_t)0x3FF << 22)) >> 22; uint64_t Address = VirtualAddress;
this->PTIndex = (VirtualAddress & ((uint64_t)0x3FF << 12)) >> 12; Address >>= 12;
this->PIndex = (VirtualAddress & ((uint64_t)0xFFF)) >> 0; this->PIndex = Address & 0x3FF;
Address >>= 10;
this->PTIndex = Address & 0x3FF;
Address >>= 10;
this->PDIndex = Address & 0x3FF;
#elif defined(__aarch64__) #elif defined(__aarch64__)
this->PDIndex = (VirtualAddress & ((uint64_t)0x1FF << 30)) >> 30;
this->PTIndex = (VirtualAddress & ((uint64_t)0x1FF << 21)) >> 21;
this->PIndex = (VirtualAddress & ((uint64_t)0x1FF << 12)) >> 12;
#endif #endif
} }
} }

View File

@ -118,13 +118,13 @@ namespace Memory
void Physical::FreePage(void *Address) void Physical::FreePage(void *Address)
{ {
SmartLock(this->MemoryLock); SmartLock(this->MemoryLock);
if (Address == nullptr) if (unlikely(Address == nullptr))
{ {
warn("Null pointer passed to FreePage."); warn("Null pointer passed to FreePage.");
return; return;
} }
uint64_t Index = (uint64_t)Address / PAGE_SIZE; uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == false) if (unlikely(PageBitmap[Index] == false))
return; return;
if (PageBitmap.Set(Index, false)) if (PageBitmap.Set(Index, false))
@ -138,7 +138,7 @@ namespace Memory
void Physical::FreePages(void *Address, uint64_t Count) void Physical::FreePages(void *Address, uint64_t Count)
{ {
if (Address == nullptr || Count == 0) if (unlikely(Address == nullptr || Count == 0))
{ {
warn("%s%s passed to FreePages.", Address == nullptr ? "Null pointer" : "", Count == 0 ? "Zero count" : ""); warn("%s%s passed to FreePages.", Address == nullptr ? "Null pointer" : "", Count == 0 ? "Zero count" : "");
return; return;
@ -150,11 +150,11 @@ namespace Memory
void Physical::LockPage(void *Address) void Physical::LockPage(void *Address)
{ {
if (Address == nullptr) if (unlikely(Address == nullptr))
warn("Trying to lock null address."); warn("Trying to lock null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE; uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == true) if (unlikely(PageBitmap[Index] == true))
return; return;
if (PageBitmap.Set(Index, true)) if (PageBitmap.Set(Index, true))
{ {
@ -165,7 +165,7 @@ namespace Memory
void Physical::LockPages(void *Address, uint64_t PageCount) void Physical::LockPages(void *Address, uint64_t PageCount)
{ {
if (Address == nullptr || PageCount == 0) if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : ""); warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t i = 0; i < PageCount; i++) for (uint64_t i = 0; i < PageCount; i++)
@ -174,11 +174,11 @@ namespace Memory
void Physical::ReservePage(void *Address) void Physical::ReservePage(void *Address)
{ {
if (Address == nullptr) if (unlikely(Address == nullptr))
warn("Trying to reserve null address."); warn("Trying to reserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE; uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == true) if (unlikely(PageBitmap[Index] == true))
return; return;
if (PageBitmap.Set(Index, true)) if (PageBitmap.Set(Index, true))
@ -190,7 +190,7 @@ namespace Memory
void Physical::ReservePages(void *Address, uint64_t PageCount) void Physical::ReservePages(void *Address, uint64_t PageCount)
{ {
if (Address == nullptr || PageCount == 0) if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : ""); warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++) for (uint64_t t = 0; t < PageCount; t++)
@ -199,11 +199,11 @@ namespace Memory
void Physical::UnreservePage(void *Address) void Physical::UnreservePage(void *Address)
{ {
if (Address == nullptr) if (unlikely(Address == nullptr))
warn("Trying to unreserve null address."); warn("Trying to unreserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE; uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == false) if (unlikely(PageBitmap[Index] == false))
return; return;
if (PageBitmap.Set(Index, false)) if (PageBitmap.Set(Index, false))
@ -217,7 +217,7 @@ namespace Memory
void Physical::UnreservePages(void *Address, uint64_t PageCount) void Physical::UnreservePages(void *Address, uint64_t PageCount)
{ {
if (Address == nullptr || PageCount == 0) if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : ""); warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++) for (uint64_t t = 0; t < PageCount; t++)
@ -253,7 +253,7 @@ namespace Memory
CPU::Stop(); CPU::Stop();
} }
uint64_t BitmapSize = ALIGN_UP((MemorySize / PAGE_SIZE) / 8, PAGE_SIZE); uint64_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
trace("Initializing Bitmap at %llp-%llp (%lld Bytes)", trace("Initializing Bitmap at %llp-%llp (%lld Bytes)",
LargestFreeMemorySegment, LargestFreeMemorySegment,
(void *)((uint64_t)LargestFreeMemorySegment + BitmapSize), (void *)((uint64_t)LargestFreeMemorySegment + BitmapSize),
@ -264,13 +264,11 @@ namespace Memory
*(uint8_t *)(PageBitmap.Buffer + i) = 0; *(uint8_t *)(PageBitmap.Buffer + i) = 0;
trace("Reserving pages..."); trace("Reserving pages...");
this->ReservePages(0, MemorySize / PAGE_SIZE + 1);
trace("Unreserve usable pages...");
for (uint64_t i = 0; i < Info->Memory.Entries; i++) for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type == Usable) if (Info->Memory.Entry[i].Type != Usable)
this->UnreservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1); this->ReservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1);
trace("Locking bitmap pages..."); trace("Locking bitmap pages...");
this->ReservePages(0, 0x100); // Reserve between 0 and 0x100000. this->ReservePages(0, 0x100);
this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1); this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1);
} }

View File

@ -11,6 +11,7 @@ namespace Memory
if (this->UserMode) if (this->UserMode)
{ {
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE)); void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE); memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++) for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{ {
@ -25,6 +26,7 @@ namespace Memory
else else
{ {
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE)); this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE));
debug("StackBottom: %p", this->StackBottom);
memset(this->StackBottom, 0, STACK_SIZE); memset(this->StackBottom, 0, STACK_SIZE);
this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE); this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE);
this->Size = STACK_SIZE; this->Size = STACK_SIZE;
@ -32,7 +34,12 @@ namespace Memory
trace("Allocated stack at %p", this->StackBottom); trace("Allocated stack at %p", this->StackBottom);
} }
StackGuard::~StackGuard() { KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size)); } StackGuard::~StackGuard()
{
fixme("Temporarily disabled stack guard deallocation");
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size));
// debug("Freed stack at %p", this->StackBottom);
}
bool StackGuard::Expand(uint64_t FaultAddress) bool StackGuard::Expand(uint64_t FaultAddress)
{ {
@ -46,6 +53,7 @@ namespace Memory
else else
{ {
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE)); void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE); memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++) for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
Virtual(this->Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US); Virtual(this->Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);

View File

@ -45,7 +45,7 @@ namespace Memory
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags) void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{ {
SmartLock(this->MemoryLock); SmartLock(this->MemoryLock);
if (!this->Table) if (unlikely(!this->Table))
{ {
error("No page table"); error("No page table");
return; return;
@ -57,9 +57,9 @@ namespace Memory
{ {
PDP = (PageTable *)KernelAllocator.RequestPage(); PDP = (PageTable *)KernelAllocator.RequestPage();
memset(PDP, 0, PAGE_SIZE); memset(PDP, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PDP >> 12);
PDE.SetFlag(PTFlag::P, true); PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags); PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PDP >> 12);
this->Table->Entries[Index.PDPIndex] = PDE; this->Table->Entries[Index.PDPIndex] = PDE;
} }
else else
@ -71,9 +71,9 @@ namespace Memory
{ {
PD = (PageTable *)KernelAllocator.RequestPage(); PD = (PageTable *)KernelAllocator.RequestPage();
memset(PD, 0, PAGE_SIZE); memset(PD, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PD >> 12);
PDE.SetFlag(PTFlag::P, true); PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags); PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PD >> 12);
PDP->Entries[Index.PDIndex] = PDE; PDP->Entries[Index.PDIndex] = PDE;
} }
else else
@ -85,19 +85,20 @@ namespace Memory
{ {
PT = (PageTable *)KernelAllocator.RequestPage(); PT = (PageTable *)KernelAllocator.RequestPage();
memset(PT, 0, PAGE_SIZE); memset(PT, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PT >> 12);
PDE.SetFlag(PTFlag::P, true); PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags); PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PT >> 12);
PD->Entries[Index.PTIndex] = PDE; PD->Entries[Index.PTIndex] = PDE;
} }
else else
PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12); PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PT->Entries[Index.PIndex]; PDE = PT->Entries[Index.PIndex];
PDE.SetAddress((uint64_t)PhysicalAddress >> 12);
PDE.SetFlag(PTFlag::P, true); PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags); PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PhysicalAddress >> 12);
PT->Entries[Index.PIndex] = PDE; PT->Entries[Index.PIndex] = PDE;
#if defined(__amd64__) #if defined(__amd64__)
CPU::x64::invlpg(VirtualAddress); CPU::x64::invlpg(VirtualAddress);
#elif defined(__i386__) #elif defined(__i386__)
@ -147,13 +148,35 @@ namespace Memory
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress); PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
PageDirectoryEntry PDE = this->Table->Entries[Index.PDPIndex]; PageDirectoryEntry PDE = this->Table->Entries[Index.PDPIndex];
PDE.ClearFlags();
#if defined(__amd64__) || defined(__i386__) if (PDE.GetFlag(PTFlag::P))
asmv("invlpg (%0)" {
: PageTable *PDP = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
: "r"(VirtualAddress)
: "memory"); PDE = PDP->Entries[Index.PDIndex];
if (PDE.GetFlag(PTFlag::P))
{
PageTable *PD = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PD->Entries[Index.PTIndex];
if (PDE.GetFlag(PTFlag::P))
{
PageTable *PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PT->Entries[Index.PIndex];
if (PDE.GetFlag(PTFlag::P))
{
PDE.ClearFlags();
// debug("Unmapped %#lx", VirtualAddress);
}
}
}
}
#if defined(__amd64__)
CPU::x64::invlpg(VirtualAddress);
#elif defined(__i386__)
CPU::x32::invlpg(VirtualAddress);
#elif defined(__aarch64__) #elif defined(__aarch64__)
asmv("dsb sy"); asmv("dsb sy");
asmv("tlbi vae1is, %0" asmv("tlbi vae1is, %0"
@ -171,6 +194,12 @@ namespace Memory
this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE))); this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)));
} }
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
this->Unmap(VirtualAddress);
this->Map(VirtualAddress, PhysicalAddress, Flags);
}
Virtual::Virtual(PageTable *Table) Virtual::Virtual(PageTable *Table)
{ {
if (Table) if (Table)

View File

@ -71,17 +71,17 @@ namespace Execute
case BinaryType::BinTypeELF: case BinaryType::BinTypeELF:
{ {
#if defined(__amd64__) #if defined(__amd64__)
const char *BaseName; const char *BaseName;
cwk_path_get_basename(Path, &BaseName, nullptr); cwk_path_get_basename(Path, &BaseName, nullptr);
PCB *Process = TaskManager->CreateProcess(TaskManager->GetCurrentProcess(), BaseName, TaskTrustLevel::User); PCB *Process = TaskManager->CreateProcess(TaskManager->GetCurrentProcess(), BaseName, TaskTrustLevel::User);
void *BaseImage = KernelAllocator.RequestPages(TO_PAGES(ExFile->Node->Length)); void *BaseImage = KernelAllocator.RequestPages(TO_PAGES(ExFile->Node->Length));
memcpy(BaseImage, (void *)ExFile->Node->Address, ExFile->Node->Length); memcpy(BaseImage, (void *)ExFile->Node->Address, ExFile->Node->Length);
debug("Image Size: %#lx - %#lx (length: %ld)", BaseImage, (uint64_t)BaseImage + ExFile->Node->Length, ExFile->Node->Length);
Memory::Virtual pva = Memory::Virtual(Process->PageTable); Memory::Virtual pva = Memory::Virtual(Process->PageTable);
for (uint64_t i = 0; i < TO_PAGES(ExFile->Node->Length); i++) for (uint64_t i = 0; i < TO_PAGES(ExFile->Node->Length); i++)
pva.Map((void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), (void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US); pva.Remap((void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), (void *)((uint64_t)BaseImage + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US);
Elf64_Ehdr *ELFHeader = (Elf64_Ehdr *)BaseImage; Elf64_Ehdr *ELFHeader = (Elf64_Ehdr *)BaseImage;
@ -122,6 +122,8 @@ namespace Execute
{ {
trace("Executable"); trace("Executable");
Elf64_Phdr *pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff); Elf64_Phdr *pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff);
debug("p_paddr: %#lx | p_vaddr: %#lx | p_filesz: %#lx | p_memsz: %#lx | p_offset: %#lx", pheader->p_paddr, pheader->p_vaddr, pheader->p_filesz, pheader->p_memsz, pheader->p_offset);
void *Address = nullptr; void *Address = nullptr;
for (int i = 0; i < ELFHeader->e_phnum; i++, pheader++) for (int i = 0; i < ELFHeader->e_phnum; i++, pheader++)
{ {
@ -131,8 +133,12 @@ namespace Execute
} }
void *Offset = KernelAllocator.RequestPages(TO_PAGES((uint64_t)Address)); void *Offset = KernelAllocator.RequestPages(TO_PAGES((uint64_t)Address));
pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff);
for (uint64_t i = 0; i < TO_PAGES((uint64_t)Address); i++) for (uint64_t i = 0; i < TO_PAGES((uint64_t)Address); i++)
pva.Map((void *)((uint64_t)Offset + (i * PAGE_SIZE)), (void *)((uint64_t)Offset + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US); {
pva.Remap((void *)((uint64_t)pheader->p_vaddr + (i * PAGE_SIZE)), (void *)((uint64_t)Offset + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US);
// debug("Mapping: %#lx -> %#lx", (uint64_t)pheader->p_vaddr + (i * PAGE_SIZE), (uint64_t)Offset + (i * PAGE_SIZE));
}
pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff); pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff);
for (int i = 0; i < ELFHeader->e_phnum; i++, pheader++) for (int i = 0; i < ELFHeader->e_phnum; i++, pheader++)
@ -144,21 +150,24 @@ namespace Execute
memcpy(dst, ((char *)BaseImage) + pheader->p_offset, pheader->p_filesz); memcpy(dst, ((char *)BaseImage) + pheader->p_offset, pheader->p_filesz);
} }
debug("Entry Point: %#lx", ELFHeader->e_entry);
Vector<AuxiliaryVector> auxv; Vector<AuxiliaryVector> auxv;
pheader = (Elf64_Phdr *)(((char *)BaseImage) + ELFHeader->e_phoff);
auxv.push_back({.archaux = {.a_type = AT_PHDR, .a_un = {.a_val = (uint64_t)ELFHeader->e_phoff}}}); auxv.push_back({.archaux = {.a_type = AT_PHDR, .a_un = {.a_val = (uint64_t)ELFHeader->e_phoff}}});
auxv.push_back({.archaux = {.a_type = AT_PHENT, .a_un = {.a_val = (uint64_t)ELFHeader->e_phentsize}}}); auxv.push_back({.archaux = {.a_type = AT_PHENT, .a_un = {.a_val = (uint64_t)ELFHeader->e_phentsize}}});
auxv.push_back({.archaux = {.a_type = AT_PHNUM, .a_un = {.a_val = (uint64_t)ELFHeader->e_phnum}}}); auxv.push_back({.archaux = {.a_type = AT_PHNUM, .a_un = {.a_val = (uint64_t)ELFHeader->e_phnum}}});
auxv.push_back({.archaux = {.a_type = AT_PAGESZ, .a_un = {.a_val = (uint64_t)PAGE_SIZE}}}); auxv.push_back({.archaux = {.a_type = AT_PAGESZ, .a_un = {.a_val = (uint64_t)PAGE_SIZE}}});
auxv.push_back({.archaux = {.a_type = AT_BASE, .a_un = {.a_val = (uint64_t)Offset}}}); auxv.push_back({.archaux = {.a_type = AT_BASE, .a_un = {.a_val = (uint64_t)Offset}}});
auxv.push_back({.archaux = {.a_type = AT_ENTRY, .a_un = {.a_val = (uint64_t)ELFHeader->e_entry + (uint64_t)Offset}}}); auxv.push_back({.archaux = {.a_type = AT_ENTRY, .a_un = {.a_val = (uint64_t)ELFHeader->e_entry + (uint64_t)pheader->p_offset}}});
auxv.push_back({.archaux = {.a_type = AT_PLATFORM, .a_un = {.a_val = (uint64_t) "x86_64"}}}); auxv.push_back({.archaux = {.a_type = AT_PLATFORM, .a_un = {.a_val = (uint64_t) "x86_64"}}});
auxv.push_back({.archaux = {.a_type = AT_EXECFN, .a_un = {.a_val = (uint64_t)Path}}}); auxv.push_back({.archaux = {.a_type = AT_EXECFN, .a_un = {.a_val = (uint64_t)Path}}});
TCB *Thread = TaskManager->CreateThread(Process, TCB *Thread = TaskManager->CreateThread(Process,
(IP)ELFHeader->e_entry, (IP)ELFHeader->e_entry,
argv, envp, auxv, argv, envp, auxv,
(IPOffset)Offset, (IPOffset)pheader->p_offset,
Arch, Arch,
Comp); Comp);
ret.Process = Process; ret.Process = Process;

View File

@ -516,6 +516,9 @@ namespace Tasking
*Frame = CurrentCPU->CurrentThread->Registers; *Frame = CurrentCPU->CurrentThread->Registers;
GlobalDescriptorTable::SetKernelStack((void *)((uint64_t)CurrentCPU->CurrentThread->Stack->GetStackTop())); GlobalDescriptorTable::SetKernelStack((void *)((uint64_t)CurrentCPU->CurrentThread->Stack->GetStackTop()));
CPU::x64::writecr3({.raw = (uint64_t)CurrentCPU->CurrentProcess->PageTable}); CPU::x64::writecr3({.raw = (uint64_t)CurrentCPU->CurrentProcess->PageTable});
// Not sure if this is needed, but it's better to be safe than sorry.
asmv("movq %cr3, %rax");
asmv("movq %rax, %cr3");
CPU::x64::fxrstor(CurrentCPU->CurrentThread->FXRegion); CPU::x64::fxrstor(CurrentCPU->CurrentThread->FXRegion);
CPU::x64::wrmsr(CPU::x64::MSR_GS_BASE, CurrentCPU->CurrentThread->GSBase); CPU::x64::wrmsr(CPU::x64::MSR_GS_BASE, CurrentCPU->CurrentThread->GSBase);
CPU::x64::wrmsr(CPU::x64::MSR_FS_BASE, CurrentCPU->CurrentThread->FSBase); CPU::x64::wrmsr(CPU::x64::MSR_FS_BASE, CurrentCPU->CurrentThread->FSBase);
@ -882,6 +885,7 @@ namespace Tasking
for (uint64_t i = 0; i < ArgvSize; i++) for (uint64_t i = 0; i < ArgvSize; i++)
{ {
void *Tmp = KernelAllocator.RequestPages(TO_PAGES(strlen(argv[i]) + 1)); void *Tmp = KernelAllocator.RequestPages(TO_PAGES(strlen(argv[i]) + 1));
debug("argv[%d] ptr %#lx", i, (uint64_t)Tmp);
Memory::Virtual().Map(Tmp, Tmp, Memory::PTFlag::RW | Memory::PTFlag::US); Memory::Virtual().Map(Tmp, Tmp, Memory::PTFlag::RW | Memory::PTFlag::US);
_argv = (uint8_t *)Tmp; _argv = (uint8_t *)Tmp;
strcpy((char *)_argv, argv[i]); strcpy((char *)_argv, argv[i]);
@ -893,6 +897,7 @@ namespace Tasking
for (uint64_t i = 0; i < EnvpSize; i++) for (uint64_t i = 0; i < EnvpSize; i++)
{ {
void *Tmp = KernelAllocator.RequestPages(TO_PAGES(strlen(envp[i]) + 1)); void *Tmp = KernelAllocator.RequestPages(TO_PAGES(strlen(envp[i]) + 1));
debug("envp[%d] ptr %#lx", i, (uint64_t)Tmp);
Memory::Virtual().Map(Tmp, Tmp, Memory::PTFlag::RW | Memory::PTFlag::US); Memory::Virtual().Map(Tmp, Tmp, Memory::PTFlag::RW | Memory::PTFlag::US);
_envp = (uint8_t *)Tmp; _envp = (uint8_t *)Tmp;
strcpy((char *)_envp, envp[i]); strcpy((char *)_envp, envp[i]);
@ -957,7 +962,7 @@ namespace Tasking
Thread->Info.Architecture = Architecture; Thread->Info.Architecture = Architecture;
Thread->Info.Compatibility = Compatibility; Thread->Info.Compatibility = Compatibility;
debug("Thread offset is %#lx (EntryPoint:%#lx)", Thread->Offset, Thread->EntryPoint); debug("Thread offset is %#lx (EntryPoint: %#lx) => RIP: %#lx", Thread->Offset, Thread->EntryPoint, Thread->Registers.rip);
if (Parent->Security.TrustLevel == TaskTrustLevel::User) if (Parent->Security.TrustLevel == TaskTrustLevel::User)
debug("Thread stack region is %#lx-%#lx (U) and rsp is %#lx", Thread->Stack->GetStackBottom(), Thread->Stack->GetStackTop(), Thread->Registers.rsp); debug("Thread stack region is %#lx-%#lx (U) and rsp is %#lx", Thread->Stack->GetStackBottom(), Thread->Stack->GetStackTop(), Thread->Registers.rsp);
else else

View File

@ -224,6 +224,7 @@ namespace Memory
void UnreservePages(void *Address, uint64_t PageCount); void UnreservePages(void *Address, uint64_t PageCount);
public: public:
Bitmap GetPageBitmap() { return PageBitmap; }
/** /**
* @brief Get Total Memory * @brief Get Total Memory
* *
@ -338,6 +339,7 @@ namespace Memory
NewLock(MemoryLock); NewLock(MemoryLock);
PageTable *Table = nullptr; PageTable *Table = nullptr;
public:
class PageMapIndexer class PageMapIndexer
{ {
public: public:
@ -348,7 +350,6 @@ namespace Memory
PageMapIndexer(uint64_t VirtualAddress); PageMapIndexer(uint64_t VirtualAddress);
}; };
public:
/** /**
* @brief Check if page is present * @brief Check if page is present
* *
@ -393,6 +394,15 @@ namespace Memory
*/ */
void Unmap(void *VirtualAddress, uint64_t PageCount); void Unmap(void *VirtualAddress, uint64_t PageCount);
/**
* @brief Remap page.
*
* @param VirtualAddress Virtual address of the page.
* @param PhysicalAddress Physical address of the page.
* @param Flags Flags of the page. Check PTFlag enum.
*/
void Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags);
/** /**
* @brief Construct a new Virtual object * @brief Construct a new Virtual object
* *