mirror of
https://github.com/Fennix-Project/Kernel.git
synced 2025-07-11 15:29:18 +00:00
Updated types
This commit is contained in:
@ -6,10 +6,10 @@ namespace Xalloc
|
||||
{
|
||||
struct SpinLockData
|
||||
{
|
||||
uint64_t LockData = 0x0;
|
||||
uintptr_t LockData = 0x0;
|
||||
const char *CurrentHolder = "(null)";
|
||||
const char *AttemptingToGet = "(null)";
|
||||
uint64_t Count = 0;
|
||||
size_t Count = 0;
|
||||
};
|
||||
|
||||
void DeadLock(SpinLockData Lock)
|
||||
|
@ -45,9 +45,9 @@ __no_instrument_function void MapFromZero(PageTable4 *PT, BootInfo *Info)
|
||||
void *NullAddress = KernelAllocator.RequestPage();
|
||||
memset(NullAddress, 0, PAGE_SIZE); // TODO: If the CPU instruction pointer hits this page, there should be function to handle it. (memcpy assembly code?)
|
||||
va.Map((void *)0, (void *)NullAddress, PTFlag::RW | PTFlag::US);
|
||||
uint64_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
|
||||
uint64_t MemSize = Info->Memory.Size;
|
||||
for (uint64_t t = 0; t < MemSize; t += PAGE_SIZE)
|
||||
uintptr_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
|
||||
size_t MemSize = Info->Memory.Size;
|
||||
for (size_t t = 0; t < MemSize; t += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)t, (void *)t, PTFlag::RW /* | PTFlag::US */);
|
||||
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW /* | PTFlag::US */);
|
||||
@ -70,8 +70,8 @@ __no_instrument_function void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
|
||||
if (!Info->Framebuffer[itrfb].BaseAddress)
|
||||
break;
|
||||
|
||||
for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress;
|
||||
fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
|
||||
for (uintptr_t fb_base = (uintptr_t)Info->Framebuffer[itrfb].BaseAddress;
|
||||
fb_base < ((uintptr_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
|
||||
fb_base += PAGE_SIZE)
|
||||
va.Map((void *)fb_base, (void *)fb_base, PTFlag::RW | PTFlag::US | PTFlag::G);
|
||||
itrfb++;
|
||||
@ -84,14 +84,14 @@ __no_instrument_function void MapKernel(PageTable4 *PT, BootInfo *Info)
|
||||
Kernel Start & Text Start ------ Text End ------ Kernel Rodata End ------ Kernel Data End & Kernel End
|
||||
*/
|
||||
Virtual va = Virtual(PT);
|
||||
uint64_t KernelStart = (uint64_t)&_kernel_start;
|
||||
uint64_t KernelTextEnd = (uint64_t)&_kernel_text_end;
|
||||
uint64_t KernelDataEnd = (uint64_t)&_kernel_data_end;
|
||||
uint64_t KernelRoDataEnd = (uint64_t)&_kernel_rodata_end;
|
||||
uint64_t KernelEnd = (uint64_t)&_kernel_end;
|
||||
uintptr_t KernelStart = (uintptr_t)&_kernel_start;
|
||||
uintptr_t KernelTextEnd = (uintptr_t)&_kernel_text_end;
|
||||
uintptr_t KernelDataEnd = (uintptr_t)&_kernel_data_end;
|
||||
uintptr_t KernelRoDataEnd = (uintptr_t)&_kernel_rodata_end;
|
||||
uintptr_t KernelEnd = (uintptr_t)&_kernel_end;
|
||||
|
||||
uint64_t BaseKernelMapAddress = (uint64_t)Info->Kernel.PhysicalBase;
|
||||
uint64_t k;
|
||||
uintptr_t BaseKernelMapAddress = (uintptr_t)Info->Kernel.PhysicalBase;
|
||||
uintptr_t k;
|
||||
|
||||
for (k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
|
||||
{
|
||||
@ -130,9 +130,9 @@ __no_instrument_function void InitializeMemoryManagement(BootInfo *Info)
|
||||
#ifdef DEBUG
|
||||
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
|
||||
{
|
||||
uint64_t Base = reinterpret_cast<uint64_t>(Info->Memory.Entry[i].BaseAddress);
|
||||
uint64_t Length = Info->Memory.Entry[i].Length;
|
||||
uint64_t End = Base + Length;
|
||||
uintptr_t Base = reinterpret_cast<uintptr_t>(Info->Memory.Entry[i].BaseAddress);
|
||||
uintptr_t Length = Info->Memory.Entry[i].Length;
|
||||
uintptr_t End = Base + Length;
|
||||
const char *Type = "Unknown";
|
||||
|
||||
switch (Info->Memory.Entry[i].Type)
|
||||
@ -233,7 +233,7 @@ __no_instrument_function void InitializeMemoryManagement(BootInfo *Info)
|
||||
}
|
||||
}
|
||||
|
||||
void *HeapMalloc(uint64_t Size)
|
||||
void *HeapMalloc(size_t Size)
|
||||
{
|
||||
switch (AllocatorType)
|
||||
{
|
||||
@ -256,7 +256,7 @@ void *HeapMalloc(uint64_t Size)
|
||||
}
|
||||
}
|
||||
|
||||
void *HeapCalloc(uint64_t n, uint64_t Size)
|
||||
void *HeapCalloc(size_t n, size_t Size)
|
||||
{
|
||||
switch (AllocatorType)
|
||||
{
|
||||
@ -279,7 +279,7 @@ void *HeapCalloc(uint64_t n, uint64_t Size)
|
||||
}
|
||||
}
|
||||
|
||||
void *HeapRealloc(void *Address, uint64_t Size)
|
||||
void *HeapRealloc(void *Address, size_t Size)
|
||||
{
|
||||
switch (AllocatorType)
|
||||
{
|
||||
|
@ -2,10 +2,10 @@
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress)
|
||||
Virtual::PageMapIndexer::PageMapIndexer(uintptr_t VirtualAddress)
|
||||
{
|
||||
#if defined(__amd64__)
|
||||
uint64_t Address = VirtualAddress;
|
||||
uintptr_t Address = VirtualAddress;
|
||||
Address >>= 12;
|
||||
this->PTEIndex = Address & 0x1FF;
|
||||
Address >>= 9;
|
||||
@ -15,7 +15,7 @@ namespace Memory
|
||||
Address >>= 9;
|
||||
this->PMLIndex = Address & 0x1FF;
|
||||
#elif defined(__i386__)
|
||||
uint64_t Address = VirtualAddress;
|
||||
uintptr_t Address = VirtualAddress;
|
||||
Address >>= 12;
|
||||
this->PTEIndex = Address & 0x3FF;
|
||||
Address >>= 10;
|
||||
|
@ -34,10 +34,10 @@ namespace Memory
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Physical::SwapPages(void *Address, uint64_t PageCount)
|
||||
bool Physical::SwapPages(void *Address, size_t PageCount)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
if (!this->SwapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
|
||||
for (size_t i = 0; i < PageCount; i++)
|
||||
if (!this->SwapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
@ -48,10 +48,10 @@ namespace Memory
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Physical::UnswapPages(void *Address, uint64_t PageCount)
|
||||
bool Physical::UnswapPages(void *Address, size_t PageCount)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
if (!this->UnswapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
|
||||
for (size_t i = 0; i < PageCount; i++)
|
||||
if (!this->UnswapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
@ -78,7 +78,7 @@ namespace Memory
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void *Physical::RequestPages(uint64_t Count)
|
||||
void *Physical::RequestPages(size_t Count)
|
||||
{
|
||||
SmartLock(this->MemoryLock);
|
||||
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
|
||||
@ -91,7 +91,7 @@ namespace Memory
|
||||
if (PageBitmap[Index] == true)
|
||||
continue;
|
||||
|
||||
for (uint64_t i = 0; i < Count; i++)
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
if (PageBitmap[Index + i] == true)
|
||||
goto NextPage;
|
||||
|
||||
@ -123,7 +123,7 @@ namespace Memory
|
||||
warn("Null pointer passed to FreePage.");
|
||||
return;
|
||||
}
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
size_t Index = (size_t)Address / PAGE_SIZE;
|
||||
if (unlikely(PageBitmap[Index] == false))
|
||||
return;
|
||||
|
||||
@ -136,7 +136,7 @@ namespace Memory
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::FreePages(void *Address, uint64_t Count)
|
||||
void Physical::FreePages(void *Address, size_t Count)
|
||||
{
|
||||
if (unlikely(Address == nullptr || Count == 0))
|
||||
{
|
||||
@ -144,8 +144,8 @@ namespace Memory
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint64_t t = 0; t < Count; t++)
|
||||
this->FreePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
|
||||
for (size_t t = 0; t < Count; t++)
|
||||
this->FreePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::LockPage(void *Address)
|
||||
@ -153,7 +153,7 @@ namespace Memory
|
||||
if (unlikely(Address == nullptr))
|
||||
warn("Trying to lock null address.");
|
||||
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
|
||||
if (unlikely(PageBitmap[Index] == true))
|
||||
return;
|
||||
if (PageBitmap.Set(Index, true))
|
||||
@ -163,13 +163,13 @@ namespace Memory
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::LockPages(void *Address, uint64_t PageCount)
|
||||
void Physical::LockPages(void *Address, size_t PageCount)
|
||||
{
|
||||
if (unlikely(Address == nullptr || PageCount == 0))
|
||||
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
this->LockPage((void *)((uint64_t)Address + (i * PAGE_SIZE)));
|
||||
for (size_t i = 0; i < PageCount; i++)
|
||||
this->LockPage((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::ReservePage(void *Address)
|
||||
@ -177,7 +177,7 @@ namespace Memory
|
||||
if (unlikely(Address == nullptr))
|
||||
warn("Trying to reserve null address.");
|
||||
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
|
||||
if (unlikely(PageBitmap[Index] == true))
|
||||
return;
|
||||
|
||||
@ -188,13 +188,13 @@ namespace Memory
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::ReservePages(void *Address, uint64_t PageCount)
|
||||
void Physical::ReservePages(void *Address, size_t PageCount)
|
||||
{
|
||||
if (unlikely(Address == nullptr || PageCount == 0))
|
||||
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (uint64_t t = 0; t < PageCount; t++)
|
||||
this->ReservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
|
||||
for (size_t t = 0; t < PageCount; t++)
|
||||
this->ReservePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::UnreservePage(void *Address)
|
||||
@ -202,7 +202,7 @@ namespace Memory
|
||||
if (unlikely(Address == nullptr))
|
||||
warn("Trying to unreserve null address.");
|
||||
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
|
||||
if (unlikely(PageBitmap[Index] == false))
|
||||
return;
|
||||
|
||||
@ -215,13 +215,13 @@ namespace Memory
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::UnreservePages(void *Address, uint64_t PageCount)
|
||||
void Physical::UnreservePages(void *Address, size_t PageCount)
|
||||
{
|
||||
if (unlikely(Address == nullptr || PageCount == 0))
|
||||
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (uint64_t t = 0; t < PageCount; t++)
|
||||
this->UnreservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
|
||||
for (size_t t = 0; t < PageCount; t++)
|
||||
this->UnreservePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::Init(BootInfo *Info)
|
||||
@ -253,14 +253,14 @@ namespace Memory
|
||||
CPU::Stop();
|
||||
}
|
||||
|
||||
uint64_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
|
||||
size_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
|
||||
trace("Initializing Bitmap at %llp-%llp (%lld Bytes)",
|
||||
LargestFreeMemorySegment,
|
||||
(void *)((uint64_t)LargestFreeMemorySegment + BitmapSize),
|
||||
(void *)((uintptr_t)LargestFreeMemorySegment + BitmapSize),
|
||||
BitmapSize);
|
||||
PageBitmap.Size = BitmapSize;
|
||||
PageBitmap.Buffer = (uint8_t *)LargestFreeMemorySegment;
|
||||
for (uint64_t i = 0; i < BitmapSize; i++)
|
||||
for (size_t i = 0; i < BitmapSize; i++)
|
||||
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
|
||||
|
||||
trace("Reserving pages...");
|
||||
|
@ -13,18 +13,18 @@ namespace Memory
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
|
||||
debug("AllocatedStack: %p", AllocatedStack);
|
||||
memset(AllocatedStack, 0, USER_STACK_SIZE);
|
||||
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
{
|
||||
Virtual(Table).Map((void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
|
||||
(void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)),
|
||||
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)),
|
||||
PTFlag::RW | PTFlag::US);
|
||||
debug("Mapped %p to %p", (void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
|
||||
(void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)));
|
||||
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
|
||||
}
|
||||
this->StackBottom = (void *)USER_STACK_BASE;
|
||||
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
|
||||
this->StackPhyiscalBottom = AllocatedStack;
|
||||
this->StackPhyiscalTop = (void *)((uint64_t)AllocatedStack + USER_STACK_SIZE);
|
||||
this->StackPhyiscalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
|
||||
this->Size = USER_STACK_SIZE;
|
||||
}
|
||||
else
|
||||
@ -33,7 +33,7 @@ namespace Memory
|
||||
this->StackPhyiscalBottom = this->StackBottom;
|
||||
debug("StackBottom: %p", this->StackBottom);
|
||||
memset(this->StackBottom, 0, STACK_SIZE);
|
||||
this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE);
|
||||
this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE);
|
||||
this->StackPhyiscalTop = this->StackTop;
|
||||
this->Size = STACK_SIZE;
|
||||
}
|
||||
@ -47,12 +47,12 @@ namespace Memory
|
||||
// debug("Freed stack at %p", this->StackBottom);
|
||||
}
|
||||
|
||||
bool StackGuard::Expand(uint64_t FaultAddress)
|
||||
bool StackGuard::Expand(uintptr_t FaultAddress)
|
||||
{
|
||||
if (this->UserMode)
|
||||
{
|
||||
if (FaultAddress < (uint64_t)this->StackBottom - USER_STACK_SIZE ||
|
||||
FaultAddress > (uint64_t)this->StackTop)
|
||||
if (FaultAddress < (uintptr_t)this->StackBottom - USER_STACK_SIZE ||
|
||||
FaultAddress > (uintptr_t)this->StackTop)
|
||||
{
|
||||
return false; // It's not about the stack.
|
||||
}
|
||||
@ -61,12 +61,12 @@ namespace Memory
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
|
||||
debug("AllocatedStack: %p", AllocatedStack);
|
||||
memset(AllocatedStack, 0, USER_STACK_SIZE);
|
||||
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
for (uintptr_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
{
|
||||
Virtual(this->Table).Map((void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
|
||||
debug("Mapped %p to %p", (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)));
|
||||
Virtual(this->Table).Map((void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
|
||||
debug("Mapped %p to %p", (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
|
||||
}
|
||||
this->StackBottom = (void *)((uint64_t)this->StackBottom - USER_STACK_SIZE);
|
||||
this->StackBottom = (void *)((uintptr_t)this->StackBottom - USER_STACK_SIZE);
|
||||
this->Size += USER_STACK_SIZE;
|
||||
info("Stack expanded to %p", this->StackBottom);
|
||||
return true;
|
||||
|
@ -11,22 +11,22 @@ namespace Memory
|
||||
return FROM_PAGES(Size);
|
||||
}
|
||||
|
||||
void *Tracker::RequestPages(uint64_t Count)
|
||||
void *Tracker::RequestPages(size_t Count)
|
||||
{
|
||||
void *Address = KernelAllocator.RequestPages(Count);
|
||||
for (uint64_t i = 0; i < Count; i++)
|
||||
Memory::Virtual(this->PageTable).Remap((void *)((uint64_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US);
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US);
|
||||
AllocatedPagesList.push_back({Address, Count});
|
||||
return Address;
|
||||
}
|
||||
|
||||
void Tracker::FreePages(void *Address, uint64_t Count)
|
||||
void Tracker::FreePages(void *Address, size_t Count)
|
||||
{
|
||||
KernelAllocator.FreePages(Address, Count);
|
||||
for (uint64_t i = 0; i < Count; i++)
|
||||
Memory::Virtual(this->PageTable).Remap((void *)((uint64_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
|
||||
|
||||
for (uint64_t i = 0; i < AllocatedPagesList.size(); i++)
|
||||
for (uintptr_t i = 0; i < AllocatedPagesList.size(); i++)
|
||||
if (AllocatedPagesList[i].Address == Address)
|
||||
{
|
||||
AllocatedPagesList.remove(i);
|
||||
@ -48,8 +48,8 @@ namespace Memory
|
||||
foreach (auto var in AllocatedPagesList)
|
||||
{
|
||||
KernelAllocator.FreePages(var.Address, var.PageCount);
|
||||
for (uint64_t i = 0; i < var.PageCount; i++)
|
||||
Memory::Virtual(this->PageTable).Remap((void *)((uint64_t)var.Address + (i * PAGE_SIZE)), (void *)((uint64_t)var.Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
|
||||
for (size_t i = 0; i < var.PageCount; i++)
|
||||
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)var.Address + (i * PAGE_SIZE)), (void *)((uintptr_t)var.Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
|
||||
}
|
||||
debug("Tracker destroyed.");
|
||||
}
|
||||
|
@ -8,10 +8,10 @@ namespace Memory
|
||||
bool Virtual::Check(void *VirtualAddress, PTFlag Flag)
|
||||
{
|
||||
// 0x1000 aligned
|
||||
uint64_t Address = (uint64_t)VirtualAddress;
|
||||
uintptr_t Address = (uintptr_t)VirtualAddress;
|
||||
Address &= 0xFFFFFFFFFFFFF000;
|
||||
|
||||
PageMapIndexer Index = PageMapIndexer((uint64_t)Address);
|
||||
PageMapIndexer Index = PageMapIndexer(Address);
|
||||
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
|
||||
|
||||
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
|
||||
@ -20,15 +20,15 @@ namespace Memory
|
||||
|
||||
if ((PML4.raw & Flag) > 0)
|
||||
{
|
||||
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uint64_t)PML4.GetAddress() << 12);
|
||||
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
|
||||
if (PDPTE)
|
||||
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
|
||||
{
|
||||
PDE = (PageDirectoryEntryPtr *)((uint64_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
|
||||
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
|
||||
if (PDE)
|
||||
if ((PDE->Entries[Index.PDEIndex].Present))
|
||||
{
|
||||
PTE = (PageTableEntryPtr *)((uint64_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
|
||||
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
|
||||
if (PTE)
|
||||
if ((PTE->Entries[Index.PTEIndex].Present))
|
||||
{
|
||||
@ -49,7 +49,7 @@ namespace Memory
|
||||
return;
|
||||
}
|
||||
|
||||
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
|
||||
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
|
||||
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
|
||||
uint64_t DirectoryFlags = Flags & 0x3F;
|
||||
|
||||
@ -60,10 +60,10 @@ namespace Memory
|
||||
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPage();
|
||||
memset(PDPTEPtr, 0, PAGE_SIZE);
|
||||
PML4.Present = true;
|
||||
PML4.SetAddress((uint64_t)PDPTEPtr >> 12);
|
||||
PML4.SetAddress((uintptr_t)PDPTEPtr >> 12);
|
||||
}
|
||||
else
|
||||
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uint64_t)PML4.GetAddress() << 12);
|
||||
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
|
||||
PML4.raw |= DirectoryFlags;
|
||||
this->Table->Entries[Index.PMLIndex] = PML4;
|
||||
|
||||
@ -74,10 +74,10 @@ namespace Memory
|
||||
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPage();
|
||||
memset(PDEPtr, 0, PAGE_SIZE);
|
||||
PDPTE.Present = true;
|
||||
PDPTE.SetAddress((uint64_t)PDEPtr >> 12);
|
||||
PDPTE.SetAddress((uintptr_t)PDEPtr >> 12);
|
||||
}
|
||||
else
|
||||
PDEPtr = (PageDirectoryEntryPtr *)((uint64_t)PDPTE.GetAddress() << 12);
|
||||
PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.GetAddress() << 12);
|
||||
PDPTE.raw |= DirectoryFlags;
|
||||
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
|
||||
|
||||
@ -88,17 +88,17 @@ namespace Memory
|
||||
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPage();
|
||||
memset(PTEPtr, 0, PAGE_SIZE);
|
||||
PDE.Present = true;
|
||||
PDE.SetAddress((uint64_t)PTEPtr >> 12);
|
||||
PDE.SetAddress((uintptr_t)PTEPtr >> 12);
|
||||
}
|
||||
else
|
||||
PTEPtr = (PageTableEntryPtr *)((uint64_t)PDE.GetAddress() << 12);
|
||||
PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE.GetAddress() << 12);
|
||||
PDE.raw |= DirectoryFlags;
|
||||
PDEPtr->Entries[Index.PDEIndex] = PDE;
|
||||
|
||||
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
|
||||
PTE.Present = true;
|
||||
PTE.raw |= Flags;
|
||||
PTE.SetAddress((uint64_t)PhysicalAddress >> 12);
|
||||
PTE.SetAddress((uintptr_t)PhysicalAddress >> 12);
|
||||
PTEPtr->Entries[Index.PTEIndex] = PTE;
|
||||
|
||||
#if defined(__amd64__)
|
||||
@ -133,10 +133,10 @@ namespace Memory
|
||||
#endif
|
||||
}
|
||||
|
||||
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t PageCount, uint64_t Flags)
|
||||
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, size_t PageCount, uint64_t Flags)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
this->Map((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)), (void *)((uint64_t)PhysicalAddress + (i * PAGE_SIZE)), Flags);
|
||||
for (size_t i = 0; i < PageCount; i++)
|
||||
this->Map((void *)((uintptr_t)VirtualAddress + (i * PAGE_SIZE)), (void *)((uintptr_t)PhysicalAddress + (i * PAGE_SIZE)), Flags);
|
||||
}
|
||||
|
||||
void Virtual::Unmap(void *VirtualAddress)
|
||||
@ -148,28 +148,28 @@ namespace Memory
|
||||
return;
|
||||
}
|
||||
|
||||
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
|
||||
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
|
||||
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
|
||||
if (!PML4.Present)
|
||||
{
|
||||
error("Page not present");
|
||||
return;
|
||||
}
|
||||
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uint64_t)PML4.Address << 12);
|
||||
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.Address << 12);
|
||||
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
|
||||
if (!PDPTE.Present)
|
||||
{
|
||||
error("Page not present");
|
||||
return;
|
||||
}
|
||||
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uint64_t)PDPTE.Address << 12);
|
||||
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.Address << 12);
|
||||
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
|
||||
if (!PDE.Present)
|
||||
{
|
||||
error("Page not present");
|
||||
return;
|
||||
}
|
||||
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uint64_t)PDE.Address << 12);
|
||||
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE.Address << 12);
|
||||
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
|
||||
if (!PTE.Present)
|
||||
{
|
||||
@ -195,10 +195,10 @@ namespace Memory
|
||||
#endif
|
||||
}
|
||||
|
||||
void Virtual::Unmap(void *VirtualAddress, uint64_t PageCount)
|
||||
void Virtual::Unmap(void *VirtualAddress, size_t PageCount)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)));
|
||||
for (size_t i = 0; i < PageCount; i++)
|
||||
this->Unmap((void *)((uintptr_t)VirtualAddress + (i * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
|
||||
|
Reference in New Issue
Block a user