Memory related code optimization

This commit is contained in:
Alex
2022-12-05 00:48:41 +02:00
parent f34278891b
commit 2d1c42fbcd
9 changed files with 123 additions and 57 deletions

View File

@ -44,8 +44,8 @@ __no_instrument_function void MapFromZero(PageTable *PT, BootInfo *Info)
uint64_t MemSize = Info->Memory.Size;
for (uint64_t t = 0; t < MemSize; t += PAGE_SIZE)
{
va.Map((void *)t, (void *)t, PTFlag::RW);
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW);
va.Map((void *)t, (void *)t, PTFlag::RW /* | PTFlag::US */);
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW /* | PTFlag::US */);
VirtualOffsetNormalVMA += PAGE_SIZE;
}
}
@ -62,7 +62,7 @@ __no_instrument_function void MapFramebuffer(PageTable *PT, BootInfo *Info)
for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress;
fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
fb_base += PAGE_SIZE)
va.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US);
va.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US | PTFlag::G);
itrfb++;
}
}
@ -91,21 +91,21 @@ __no_instrument_function void MapKernel(PageTable *PT, BootInfo *Info)
for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P);
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
@ -209,6 +209,7 @@ __no_instrument_function void InitializeMemoryManagement(BootInfo *Info)
#elif defined(__aarch64__)
asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable));
#endif
debug("Page table updated.");
if (strstr(Info->Kernel.CommandLine, "xallocv1"))
{
XallocV1Allocator = new Xalloc::AllocatorV1((void *)KERNEL_HEAP_BASE, false, false);

View File

@ -7,7 +7,7 @@ namespace Memory
void PageDirectoryEntry::ClearFlags() { this->Value.raw = 0; }
void PageDirectoryEntry::SetFlag(uint64_t Flag, bool Enabled)
{
this->Value.raw &= ~Flag;
this->Value.raw = 0;
if (Enabled)
this->Value.raw |= Flag;
}

View File

@ -2,21 +2,27 @@
namespace Memory
{
Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress)
{
Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress)
{
#if defined(__amd64__)
this->PDPIndex = (VirtualAddress & ((uint64_t)0x1FF << 39)) >> 39;
this->PDIndex = (VirtualAddress & ((uint64_t)0x1FF << 30)) >> 30;
this->PTIndex = (VirtualAddress & ((uint64_t)0x1FF << 21)) >> 21;
this->PIndex = (VirtualAddress & ((uint64_t)0x1FF << 12)) >> 12;
uint64_t Address = VirtualAddress;
Address >>= 12;
this->PIndex = Address & 0x1FF;
Address >>= 9;
this->PTIndex = Address & 0x1FF;
Address >>= 9;
this->PDIndex = Address & 0x1FF;
Address >>= 9;
this->PDPIndex = Address & 0x1FF;
#elif defined(__i386__)
this->PDIndex = (VirtualAddress & ((uint64_t)0x3FF << 22)) >> 22;
this->PTIndex = (VirtualAddress & ((uint64_t)0x3FF << 12)) >> 12;
this->PIndex = (VirtualAddress & ((uint64_t)0xFFF)) >> 0;
uint64_t Address = VirtualAddress;
Address >>= 12;
this->PIndex = Address & 0x3FF;
Address >>= 10;
this->PTIndex = Address & 0x3FF;
Address >>= 10;
this->PDIndex = Address & 0x3FF;
#elif defined(__aarch64__)
this->PDIndex = (VirtualAddress & ((uint64_t)0x1FF << 30)) >> 30;
this->PTIndex = (VirtualAddress & ((uint64_t)0x1FF << 21)) >> 21;
this->PIndex = (VirtualAddress & ((uint64_t)0x1FF << 12)) >> 12;
#endif
}
}
}

View File

@ -118,13 +118,13 @@ namespace Memory
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
if (Address == nullptr)
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == false)
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
@ -138,7 +138,7 @@ namespace Memory
void Physical::FreePages(void *Address, uint64_t Count)
{
if (Address == nullptr || Count == 0)
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s passed to FreePages.", Address == nullptr ? "Null pointer" : "", Count == 0 ? "Zero count" : "");
return;
@ -150,11 +150,11 @@ namespace Memory
void Physical::LockPage(void *Address)
{
if (Address == nullptr)
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == true)
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
@ -165,7 +165,7 @@ namespace Memory
void Physical::LockPages(void *Address, uint64_t PageCount)
{
if (Address == nullptr || PageCount == 0)
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t i = 0; i < PageCount; i++)
@ -174,11 +174,11 @@ namespace Memory
void Physical::ReservePage(void *Address)
{
if (Address == nullptr)
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == true)
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
@ -190,7 +190,7 @@ namespace Memory
void Physical::ReservePages(void *Address, uint64_t PageCount)
{
if (Address == nullptr || PageCount == 0)
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++)
@ -199,11 +199,11 @@ namespace Memory
void Physical::UnreservePage(void *Address)
{
if (Address == nullptr)
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == false)
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
@ -217,7 +217,7 @@ namespace Memory
void Physical::UnreservePages(void *Address, uint64_t PageCount)
{
if (Address == nullptr || PageCount == 0)
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++)
@ -253,7 +253,7 @@ namespace Memory
CPU::Stop();
}
uint64_t BitmapSize = ALIGN_UP((MemorySize / PAGE_SIZE) / 8, PAGE_SIZE);
uint64_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
trace("Initializing Bitmap at %llp-%llp (%lld Bytes)",
LargestFreeMemorySegment,
(void *)((uint64_t)LargestFreeMemorySegment + BitmapSize),
@ -264,13 +264,11 @@ namespace Memory
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
trace("Reserving pages...");
this->ReservePages(0, MemorySize / PAGE_SIZE + 1);
trace("Unreserve usable pages...");
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type == Usable)
this->UnreservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1);
if (Info->Memory.Entry[i].Type != Usable)
this->ReservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1);
trace("Locking bitmap pages...");
this->ReservePages(0, 0x100); // Reserve between 0 and 0x100000.
this->ReservePages(0, 0x100);
this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1);
}

View File

@ -11,6 +11,7 @@ namespace Memory
if (this->UserMode)
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
@ -25,6 +26,7 @@ namespace Memory
else
{
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE));
debug("StackBottom: %p", this->StackBottom);
memset(this->StackBottom, 0, STACK_SIZE);
this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE);
this->Size = STACK_SIZE;
@ -32,7 +34,12 @@ namespace Memory
trace("Allocated stack at %p", this->StackBottom);
}
StackGuard::~StackGuard() { KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size)); }
StackGuard::~StackGuard()
{
fixme("Temporarily disabled stack guard deallocation");
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size));
// debug("Freed stack at %p", this->StackBottom);
}
bool StackGuard::Expand(uint64_t FaultAddress)
{
@ -46,6 +53,7 @@ namespace Memory
else
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
Virtual(this->Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);

View File

@ -45,7 +45,7 @@ namespace Memory
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
SmartLock(this->MemoryLock);
if (!this->Table)
if (unlikely(!this->Table))
{
error("No page table");
return;
@ -57,9 +57,9 @@ namespace Memory
{
PDP = (PageTable *)KernelAllocator.RequestPage();
memset(PDP, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PDP >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PDP >> 12);
this->Table->Entries[Index.PDPIndex] = PDE;
}
else
@ -71,9 +71,9 @@ namespace Memory
{
PD = (PageTable *)KernelAllocator.RequestPage();
memset(PD, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PD >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PD >> 12);
PDP->Entries[Index.PDIndex] = PDE;
}
else
@ -85,19 +85,20 @@ namespace Memory
{
PT = (PageTable *)KernelAllocator.RequestPage();
memset(PT, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PT >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PT >> 12);
PD->Entries[Index.PTIndex] = PDE;
}
else
PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PT->Entries[Index.PIndex];
PDE.SetAddress((uint64_t)PhysicalAddress >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
PDE.SetAddress((uint64_t)PhysicalAddress >> 12);
PT->Entries[Index.PIndex] = PDE;
#if defined(__amd64__)
CPU::x64::invlpg(VirtualAddress);
#elif defined(__i386__)
@ -147,13 +148,35 @@ namespace Memory
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
PageDirectoryEntry PDE = this->Table->Entries[Index.PDPIndex];
PDE.ClearFlags();
#if defined(__amd64__) || defined(__i386__)
asmv("invlpg (%0)"
:
: "r"(VirtualAddress)
: "memory");
if (PDE.GetFlag(PTFlag::P))
{
PageTable *PDP = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PDP->Entries[Index.PDIndex];
if (PDE.GetFlag(PTFlag::P))
{
PageTable *PD = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PD->Entries[Index.PTIndex];
if (PDE.GetFlag(PTFlag::P))
{
PageTable *PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PT->Entries[Index.PIndex];
if (PDE.GetFlag(PTFlag::P))
{
PDE.ClearFlags();
// debug("Unmapped %#lx", VirtualAddress);
}
}
}
}
#if defined(__amd64__)
CPU::x64::invlpg(VirtualAddress);
#elif defined(__i386__)
CPU::x32::invlpg(VirtualAddress);
#elif defined(__aarch64__)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
@ -171,6 +194,12 @@ namespace Memory
this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)));
}
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
this->Unmap(VirtualAddress);
this->Map(VirtualAddress, PhysicalAddress, Flags);
}
Virtual::Virtual(PageTable *Table)
{
if (Table)