mirror of
https://github.com/Fennix-Project/Kernel.git
synced 2025-07-03 11:29:21 +00:00
Refactor memory allocation implementation and add KernelReserve flag to memory mappings
This commit is contained in:
@ -122,13 +122,13 @@ NIF void MapFramebuffer(PageTable *PT)
|
||||
{
|
||||
vmm.OptimizedMap(bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
fbSize, RW | G);
|
||||
fbSize, RW | G | KRsv);
|
||||
}
|
||||
else
|
||||
{
|
||||
vmm.Map(bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
fbSize, RW | G);
|
||||
fbSize, RW | G | KRsv);
|
||||
}
|
||||
itrfb++;
|
||||
}
|
||||
@ -190,7 +190,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
{
|
||||
for (k = BootstrapStart; k < BootstrapEnd; k += PAGE_SIZE)
|
||||
{
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -204,7 +204,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Text section */
|
||||
for (k = KernelTextStart; k < KernelTextEnd; k += PAGE_SIZE)
|
||||
{
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -212,7 +212,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Data section */
|
||||
for (k = KernelDataStart; k < KernelDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -220,7 +220,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Read only data section */
|
||||
for (k = KernelRoDataStart; k < KernelRoDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, G | KRsv);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -228,7 +228,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Block starting symbol section */
|
||||
for (k = KernelBssStart; k < KernelBssEnd; k += PAGE_SIZE)
|
||||
{
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -240,7 +240,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
{
|
||||
for (k = KernelFileStart; k < KernelFileEnd; k += PAGE_SIZE)
|
||||
{
|
||||
vmm.Map((void *)k, (void *)k, G);
|
||||
vmm.Map((void *)k, (void *)k, G | KRsv);
|
||||
KernelAllocator.ReservePage((void *)k);
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ namespace Memory
|
||||
debug("AllocatedStack: %#lx", AllocatedStack);
|
||||
memset(AllocatedStack, 0, USER_STACK_SIZE);
|
||||
|
||||
Virtual vmm = Virtual(this->vma->GetTable());
|
||||
Virtual vmm = Virtual(this->vma->Table);
|
||||
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
{
|
||||
void *VirtualPage = (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE));
|
||||
@ -79,8 +79,8 @@ namespace Memory
|
||||
|
||||
if (this->UserMode)
|
||||
{
|
||||
std::vector<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages();
|
||||
Virtual vma(this->vma->GetTable());
|
||||
std::list<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages();
|
||||
Virtual vma(this->vma->Table);
|
||||
foreach (auto Page in ParentAllocatedPages)
|
||||
{
|
||||
void *NewPhysical = this->vma->RequestPages(1);
|
||||
@ -114,7 +114,7 @@ namespace Memory
|
||||
debug("AllocatedStack: %#lx", AllocatedStack);
|
||||
|
||||
{
|
||||
Virtual vmm = Virtual(vma->GetTable());
|
||||
Virtual vmm = Virtual(vma->Table);
|
||||
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
{
|
||||
void *VirtualPage = (void *)(USER_STACK_BASE + (i * PAGE_SIZE));
|
||||
|
@ -17,51 +17,14 @@
|
||||
|
||||
#include <memory/vma.hpp>
|
||||
#include <memory/table.hpp>
|
||||
#include <cpu.hpp>
|
||||
#include <debug.h>
|
||||
#include <bitset>
|
||||
|
||||
#include "../../kernel.h"
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
// ReadFSFunction(MEM_Read)
|
||||
// {
|
||||
// if (Size <= 0)
|
||||
// Size = node->Length;
|
||||
|
||||
// if (RefOffset > node->Length)
|
||||
// return 0;
|
||||
|
||||
// if ((node->Length - RefOffset) == 0)
|
||||
// return 0; /* EOF */
|
||||
|
||||
// if (RefOffset + (off_t)Size > node->Length)
|
||||
// Size = node->Length;
|
||||
|
||||
// memcpy(Buffer, (uint8_t *)(node->Address + RefOffset), Size);
|
||||
// return Size;
|
||||
// }
|
||||
|
||||
// WriteFSFunction(MEM_Write)
|
||||
// {
|
||||
// if (Size <= 0)
|
||||
// Size = node->Length;
|
||||
|
||||
// if (RefOffset > node->Length)
|
||||
// return 0;
|
||||
|
||||
// if (RefOffset + (off_t)Size > node->Length)
|
||||
// Size = node->Length;
|
||||
|
||||
// memcpy((uint8_t *)(node->Address + RefOffset), Buffer, Size);
|
||||
// return Size;
|
||||
// }
|
||||
|
||||
// vfs::FileSystemOperations mem_op = {
|
||||
// .Name = "mem",
|
||||
// .Read = MEM_Read,
|
||||
// .Write = MEM_Write,
|
||||
// };
|
||||
|
||||
uint64_t VirtualMemoryArea::GetAllocatedMemorySize()
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
@ -71,120 +34,89 @@ namespace Memory
|
||||
return FROM_PAGES(Size);
|
||||
}
|
||||
|
||||
bool VirtualMemoryArea::Add(void *Address, size_t Count)
|
||||
void *VirtualMemoryArea::RequestPages(size_t Count, bool User, bool Protect)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%#lx, %lld", Address, Count);
|
||||
|
||||
if (Address == nullptr)
|
||||
{
|
||||
error("Address is null!");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Count == 0)
|
||||
{
|
||||
error("Count is 0!");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
|
||||
{
|
||||
if (AllocatedPagesList[i].Address == Address)
|
||||
{
|
||||
error("Address already exists!");
|
||||
return false;
|
||||
}
|
||||
else if ((uintptr_t)Address < (uintptr_t)AllocatedPagesList[i].Address)
|
||||
{
|
||||
if ((uintptr_t)Address + (Count * PAGE_SIZE) > (uintptr_t)AllocatedPagesList[i].Address)
|
||||
{
|
||||
error("Address intersects with an allocated page!");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((uintptr_t)AllocatedPagesList[i].Address + (AllocatedPagesList[i].PageCount * PAGE_SIZE) > (uintptr_t)Address)
|
||||
{
|
||||
error("Address intersects with an allocated page!");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AllocatedPagesList.push_back({Address, Count});
|
||||
return true;
|
||||
}
|
||||
|
||||
void *VirtualMemoryArea::RequestPages(size_t Count, bool User)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%lld, %s", Count, User ? "true" : "false");
|
||||
function("%lld, %s, %s", Count,
|
||||
User ? "true" : "false",
|
||||
Protect ? "true" : "false");
|
||||
|
||||
void *Address = KernelAllocator.RequestPages(Count);
|
||||
memset(Address, 0, Count * PAGE_SIZE);
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
{
|
||||
int Flags = Memory::PTFlag::RW;
|
||||
if (User)
|
||||
Flags |= Memory::PTFlag::US;
|
||||
|
||||
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
|
||||
int Flags = PTFlag::RW;
|
||||
if (User)
|
||||
Flags |= PTFlag::US;
|
||||
if (Protect)
|
||||
Flags |= PTFlag::KRsv;
|
||||
|
||||
Memory::Virtual vmm(this->Table);
|
||||
vmm.Map(AddressToMap, AddressToMap, Flags);
|
||||
}
|
||||
Virtual vmm(this->Table);
|
||||
|
||||
AllocatedPagesList.push_back({Address, Count});
|
||||
SmartLock(MgrLock);
|
||||
|
||||
vmm.Map(Address, Address, FROM_PAGES(Count), Flags);
|
||||
AllocatedPagesList.push_back({Address, Count, Protect});
|
||||
debug("%#lx +{%#lx, %lld}", this, Address, Count);
|
||||
return Address;
|
||||
}
|
||||
|
||||
void VirtualMemoryArea::FreePages(void *Address, size_t Count)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%#lx, %lld", Address, Count);
|
||||
|
||||
SmartLock(MgrLock);
|
||||
forItr(itr, AllocatedPagesList)
|
||||
{
|
||||
if (itr->Address == Address)
|
||||
if (itr->Address != Address)
|
||||
continue;
|
||||
|
||||
if (itr->Protected)
|
||||
{
|
||||
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
|
||||
* This will allow the user to free only a part of the allocated pages.
|
||||
*
|
||||
* But this will be in a separate function because we need to specify if we
|
||||
* want to free from the start or from the end and return the new address.
|
||||
*/
|
||||
if (itr->PageCount != Count)
|
||||
{
|
||||
error("Page count mismatch! (Allocated: %lld, Requested: %lld)", itr->PageCount, Count);
|
||||
return;
|
||||
}
|
||||
|
||||
KernelAllocator.FreePages(Address, Count);
|
||||
|
||||
Memory::Virtual vmm(this->Table);
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
{
|
||||
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
|
||||
vmm.Remap(AddressToMap, AddressToMap, Memory::PTFlag::RW);
|
||||
// vmm.Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
|
||||
}
|
||||
AllocatedPagesList.erase(itr);
|
||||
error("Address %#lx is protected", Address);
|
||||
return;
|
||||
}
|
||||
|
||||
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
|
||||
* This will allow the user to free only a part of the allocated pages.
|
||||
*
|
||||
* But this will be in a separate function because we need to specify if we
|
||||
* want to free from the start or from the end and return the new address.
|
||||
*/
|
||||
if (itr->PageCount != Count)
|
||||
{
|
||||
error("Page count mismatch! (Allocated: %lld, Requested: %lld)",
|
||||
itr->PageCount, Count);
|
||||
return;
|
||||
}
|
||||
|
||||
Virtual vmm(this->Table);
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
{
|
||||
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
|
||||
vmm.Remap(AddressToMap, AddressToMap, PTFlag::RW);
|
||||
}
|
||||
|
||||
KernelAllocator.FreePages(Address, Count);
|
||||
AllocatedPagesList.erase(itr);
|
||||
debug("%#lx -{%#lx, %lld}", this, Address, Count);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void VirtualMemoryArea::DetachAddress(void *Address)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%#lx", Address);
|
||||
|
||||
SmartLock(MgrLock);
|
||||
forItr(itr, AllocatedPagesList)
|
||||
{
|
||||
if (itr->Address == Address)
|
||||
{
|
||||
if (itr->Protected)
|
||||
{
|
||||
error("Address %#lx is protected", Address);
|
||||
return;
|
||||
}
|
||||
|
||||
AllocatedPagesList.erase(itr);
|
||||
return;
|
||||
}
|
||||
@ -203,14 +135,14 @@ namespace Memory
|
||||
Fixed ? "true" : "false",
|
||||
Shared ? "true" : "false");
|
||||
|
||||
Memory::Virtual vmm(this->Table);
|
||||
Virtual vmm(this->Table);
|
||||
|
||||
// FIXME
|
||||
// for (uintptr_t j = uintptr_t(Address);
|
||||
// j < uintptr_t(Address) + Length;
|
||||
// j += PAGE_SIZE)
|
||||
// {
|
||||
// if (vmm.Check((void *)j, Memory::G))
|
||||
// if (vmm.Check((void *)j, G))
|
||||
// {
|
||||
// if (Fixed)
|
||||
// return (void *)-EINVAL;
|
||||
@ -230,6 +162,12 @@ namespace Memory
|
||||
}
|
||||
|
||||
SmartLock(MgrLock);
|
||||
if (vmm.Check(Address, PTFlag::KRsv))
|
||||
{
|
||||
error("Cannot create CoW region at %#lx", Address);
|
||||
return (void *)-EPERM;
|
||||
}
|
||||
|
||||
vmm.Unmap(Address, Length);
|
||||
vmm.Map(Address, nullptr, Length, PTFlag::CoW);
|
||||
debug("CoW region created at range %#lx-%#lx for pt %#lx",
|
||||
@ -254,8 +192,10 @@ namespace Memory
|
||||
bool VirtualMemoryArea::HandleCoW(uintptr_t PFA)
|
||||
{
|
||||
function("%#lx", PFA);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
Memory::PageTableEntry *pte = vmm.GetPTE((void *)PFA);
|
||||
Virtual vmm(this->Table);
|
||||
PageTableEntry *pte = vmm.GetPTE((void *)PFA);
|
||||
|
||||
debug("ctx: %#lx", this);
|
||||
|
||||
if (!pte)
|
||||
{
|
||||
@ -264,51 +204,51 @@ namespace Memory
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pte->CopyOnWrite == true)
|
||||
if (!pte->CopyOnWrite)
|
||||
{
|
||||
foreach (auto sr in SharedRegions)
|
||||
debug("PFA %#lx is not CoW (pt %#lx) (flags %#lx)",
|
||||
PFA, this->Table, pte->raw);
|
||||
return false;
|
||||
}
|
||||
|
||||
foreach (auto sr in SharedRegions)
|
||||
{
|
||||
uintptr_t Start = (uintptr_t)sr.Address;
|
||||
uintptr_t End = (uintptr_t)sr.Address + sr.Length;
|
||||
debug("Start: %#lx, End: %#lx (PFA: %#lx)",
|
||||
Start, End, PFA);
|
||||
|
||||
if (PFA >= Start && PFA < End)
|
||||
{
|
||||
uintptr_t Start = (uintptr_t)sr.Address;
|
||||
uintptr_t End = (uintptr_t)sr.Address + sr.Length;
|
||||
|
||||
if (PFA >= Start && PFA < End)
|
||||
if (sr.Shared)
|
||||
{
|
||||
debug("Start: %#lx, End: %#lx (PFA: %#lx)",
|
||||
Start, End, PFA);
|
||||
|
||||
if (sr.Shared)
|
||||
{
|
||||
fixme("Shared CoW");
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
void *pAddr = this->RequestPages(1);
|
||||
if (pAddr == nullptr)
|
||||
return false;
|
||||
memset(pAddr, 0, PAGE_SIZE);
|
||||
|
||||
assert(pte->Present == true);
|
||||
pte->ReadWrite = sr.Write;
|
||||
pte->UserSupervisor = sr.Read;
|
||||
pte->ExecuteDisable = sr.Exec;
|
||||
|
||||
pte->CopyOnWrite = false;
|
||||
debug("PFA %#lx is CoW (pt %#lx, flags %#lx)",
|
||||
PFA, this->Table, pte->raw);
|
||||
#if defined(a64)
|
||||
CPU::x64::invlpg((void *)PFA);
|
||||
#elif defined(a32)
|
||||
CPU::x32::invlpg((void *)PFA);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
fixme("Shared CoW");
|
||||
return false;
|
||||
}
|
||||
|
||||
void *pAddr = this->RequestPages(1);
|
||||
if (pAddr == nullptr)
|
||||
return false;
|
||||
memset(pAddr, 0, PAGE_SIZE);
|
||||
|
||||
assert(pte->Present == true);
|
||||
pte->ReadWrite = sr.Write;
|
||||
pte->UserSupervisor = sr.Read;
|
||||
pte->ExecuteDisable = sr.Exec;
|
||||
|
||||
pte->CopyOnWrite = false;
|
||||
debug("PFA %#lx is CoW (pt %#lx, flags %#lx)",
|
||||
PFA, this->Table, pte->raw);
|
||||
#if defined(a64)
|
||||
CPU::x64::invlpg((void *)PFA);
|
||||
#elif defined(a32)
|
||||
CPU::x32::invlpg((void *)PFA);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
debug("PFA %#lx is not CoW (pt %#lx)",
|
||||
PFA, this->Table);
|
||||
debug("%#lx not found in CoW regions", PFA);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -318,11 +258,11 @@ namespace Memory
|
||||
foreach (auto ap in AllocatedPagesList)
|
||||
{
|
||||
KernelAllocator.FreePages(ap.Address, ap.PageCount);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
Virtual vmm(this->Table);
|
||||
for (size_t i = 0; i < ap.PageCount; i++)
|
||||
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
Memory::PTFlag::RW);
|
||||
PTFlag::RW);
|
||||
}
|
||||
AllocatedPagesList.clear();
|
||||
}
|
||||
@ -330,59 +270,66 @@ namespace Memory
|
||||
void VirtualMemoryArea::Fork(VirtualMemoryArea *Parent)
|
||||
{
|
||||
function("%#lx", Parent);
|
||||
assert(Parent);
|
||||
|
||||
if (Parent == nullptr)
|
||||
{
|
||||
error("Parent is null!");
|
||||
return;
|
||||
}
|
||||
debug("parent apl:%d sr:%d [P:%#lx C:%#lx]",
|
||||
Parent->AllocatedPagesList.size(), Parent->SharedRegions.size(),
|
||||
Parent->Table, this->Table);
|
||||
debug("ctx: this: %#lx parent: %#lx", this, Parent);
|
||||
|
||||
if (Parent->Table == nullptr)
|
||||
{
|
||||
error("Parent's table is null!");
|
||||
return;
|
||||
}
|
||||
|
||||
Memory::Virtual vmm(this->Table);
|
||||
Virtual vmm(this->Table);
|
||||
SmartLock(MgrLock);
|
||||
foreach (auto &ap in Parent->GetAllocatedPagesList())
|
||||
foreach (auto &ap in Parent->AllocatedPagesList)
|
||||
{
|
||||
if (ap.Protected)
|
||||
{
|
||||
debug("Protected %#lx-%#lx", ap.Address,
|
||||
(uintptr_t)ap.Address + (ap.PageCount * PAGE_SIZE));
|
||||
continue; /* We don't want to modify these pages. */
|
||||
}
|
||||
|
||||
MgrLock.Unlock();
|
||||
void *Address = this->RequestPages(ap.PageCount);
|
||||
MgrLock.Lock(__FUNCTION__);
|
||||
if (Address == nullptr)
|
||||
return;
|
||||
|
||||
memcpy(Address, ap.Address, ap.PageCount * PAGE_SIZE);
|
||||
memcpy(Address, ap.Address, FROM_PAGES(ap.PageCount));
|
||||
|
||||
// map these new allocated pages to be the same as the parent
|
||||
for (size_t i = 0; i < ap.PageCount; i++)
|
||||
{
|
||||
void *AddressToMap = (void *)((uintptr_t)ap.Address + (i * PAGE_SIZE));
|
||||
void *RealAddress = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
|
||||
|
||||
#if defined(a86)
|
||||
Memory::PageTableEntry *pte = vmm.GetPTE(AddressToMap);
|
||||
PageTableEntry *pte = vmm.GetPTE(AddressToMap);
|
||||
uintptr_t Flags = 0;
|
||||
Flags |= pte->Present ? 1UL : 0;
|
||||
Flags |= pte->ReadWrite ? 2UL : 0;
|
||||
Flags |= pte->UserSupervisor ? 4UL : 0;
|
||||
Flags |= pte->CopyOnWrite ? 512UL : 0;
|
||||
Flags |= pte->Present ? (uintptr_t)PTFlag::P : 0;
|
||||
Flags |= pte->ReadWrite ? (uintptr_t)PTFlag::RW : 0;
|
||||
Flags |= pte->UserSupervisor ? (uintptr_t)PTFlag::US : 0;
|
||||
Flags |= pte->CopyOnWrite ? (uintptr_t)PTFlag::CoW : 0;
|
||||
Flags |= pte->KernelReserve ? (uintptr_t)PTFlag::KRsv : 0;
|
||||
|
||||
debug("Mapping %#lx to %#lx (flags %s/%s/%s/%s)",
|
||||
debug("Mapping %#lx to %#lx (flags %s/%s/%s/%s/%s)",
|
||||
RealAddress, AddressToMap,
|
||||
Flags & PTFlag::P ? "P" : "-",
|
||||
Flags & PTFlag::RW ? "RW" : "-",
|
||||
Flags & PTFlag::US ? "US" : "-",
|
||||
Flags & PTFlag::CoW ? "CoW" : "-");
|
||||
vmm.Map(AddressToMap, RealAddress, Flags);
|
||||
Flags & PTFlag::CoW ? "CoW" : "-",
|
||||
Flags & PTFlag::KRsv ? "KRsv" : "-");
|
||||
MgrLock.Unlock();
|
||||
this->Map(AddressToMap, RealAddress, PAGE_SIZE, Flags);
|
||||
MgrLock.Lock(__FUNCTION__);
|
||||
#else
|
||||
#warning "Not implemented"
|
||||
#error "Not implemented"
|
||||
#endif
|
||||
}
|
||||
|
||||
debug("Forked %#lx-%#lx", ap.Address,
|
||||
(uintptr_t)ap.Address + (ap.PageCount * PAGE_SIZE));
|
||||
}
|
||||
|
||||
foreach (auto &sr in Parent->GetSharedRegions())
|
||||
foreach (auto &sr in Parent->SharedRegions)
|
||||
{
|
||||
MgrLock.Unlock();
|
||||
void *Address = this->CreateCoWRegion(sr.Address, sr.Length,
|
||||
@ -392,53 +339,154 @@ namespace Memory
|
||||
if (Address == nullptr)
|
||||
return;
|
||||
memcpy(Address, sr.Address, sr.Length);
|
||||
debug("Forked CoW region %#lx-%#lx", sr.Address,
|
||||
(uintptr_t)sr.Address + sr.Length);
|
||||
}
|
||||
}
|
||||
|
||||
VirtualMemoryArea::VirtualMemoryArea(PageTable *Table)
|
||||
int VirtualMemoryArea::Map(void *VirtualAddress, void *PhysicalAddress,
|
||||
size_t Length, uint64_t Flags)
|
||||
{
|
||||
debug("+ %#lx %s", this,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0)) : "");
|
||||
|
||||
Virtual vmm(this->Table);
|
||||
SmartLock(MgrLock);
|
||||
if (Table)
|
||||
this->Table = Table;
|
||||
else
|
||||
|
||||
uintptr_t intVirtualAddress = (uintptr_t)VirtualAddress;
|
||||
uintptr_t intPhysicalAddress = (uintptr_t)PhysicalAddress;
|
||||
|
||||
for (uintptr_t va = intVirtualAddress;
|
||||
va < intVirtualAddress + Length; va += PAGE_SIZE)
|
||||
{
|
||||
if (vmm.Check(VirtualAddress, PTFlag::KRsv))
|
||||
{
|
||||
error("Virtual address %#lx is reserved", VirtualAddress);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
for (uintptr_t va = intPhysicalAddress;
|
||||
va < intPhysicalAddress + Length; va += PAGE_SIZE)
|
||||
{
|
||||
if (vmm.Check(PhysicalAddress, PTFlag::KRsv))
|
||||
{
|
||||
error("Physical address %#lx is reserved", PhysicalAddress);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
vmm.Map(VirtualAddress, PhysicalAddress, Length, Flags);
|
||||
debug("Mapped %#lx-%#lx to %#lx-%#lx (flags %#lx)",
|
||||
VirtualAddress, intVirtualAddress + Length,
|
||||
PhysicalAddress, intPhysicalAddress + Length,
|
||||
Flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VirtualMemoryArea::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
|
||||
{
|
||||
Virtual vmm(this->Table);
|
||||
SmartLock(MgrLock);
|
||||
|
||||
if (vmm.Check(VirtualAddress, PTFlag::KRsv))
|
||||
{
|
||||
error("Virtual address %#lx is reserved", VirtualAddress);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (vmm.Check(PhysicalAddress, PTFlag::KRsv))
|
||||
{
|
||||
error("Physical address %#lx is reserved", PhysicalAddress);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
vmm.Remap(VirtualAddress, PhysicalAddress, Flags);
|
||||
debug("Remapped %#lx to %#lx (flags %#lx)",
|
||||
VirtualAddress, PhysicalAddress, Flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int VirtualMemoryArea::Unmap(void *VirtualAddress, size_t Length)
|
||||
{
|
||||
Virtual vmm(this->Table);
|
||||
SmartLock(MgrLock);
|
||||
|
||||
uintptr_t intVirtualAddress = (uintptr_t)VirtualAddress;
|
||||
|
||||
for (uintptr_t va = intVirtualAddress;
|
||||
va < intVirtualAddress + Length; va += PAGE_SIZE)
|
||||
{
|
||||
if (vmm.Check(VirtualAddress, PTFlag::KRsv))
|
||||
{
|
||||
error("Virtual address %#lx is reserved", VirtualAddress);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
vmm.Unmap(VirtualAddress, Length);
|
||||
debug("Unmapped %#lx-%#lx", VirtualAddress, intVirtualAddress + Length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *VirtualMemoryArea::__UserCheckAndGetAddress(void *Address, size_t Length)
|
||||
{
|
||||
Virtual vmm(this->Table);
|
||||
SmartLock(MgrLock);
|
||||
|
||||
void *pAddress = this->Table->Get(Address);
|
||||
if (pAddress == nullptr)
|
||||
{
|
||||
debug("Virtual address %#lx returns nullptr", Address);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uintptr_t intAddress = (uintptr_t)Address;
|
||||
intAddress = ALIGN_DOWN(intAddress, PAGE_SIZE);
|
||||
for (uintptr_t va = intAddress; va < intAddress + Length; va += PAGE_SIZE)
|
||||
{
|
||||
if (vmm.Check((void *)va, PTFlag::US))
|
||||
continue;
|
||||
|
||||
fixme("Unable to get address %#lx, page is not user accessible", va);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return pAddress;
|
||||
}
|
||||
|
||||
int VirtualMemoryArea::__UserCheck(void *Address, size_t Length)
|
||||
{
|
||||
Virtual vmm(this->Table);
|
||||
SmartLock(MgrLock);
|
||||
|
||||
if (vmm.Check(Address, PTFlag::US))
|
||||
return 0;
|
||||
|
||||
error("Address %#lx is not user accessible", Address);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
VirtualMemoryArea::VirtualMemoryArea(PageTable *_Table)
|
||||
: Table(_Table)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
if (_Table == nullptr)
|
||||
{
|
||||
if (TaskManager)
|
||||
{
|
||||
Tasking::PCB *pcb = thisProcess;
|
||||
assert(pcb);
|
||||
this->Table = thisProcess->PageTable;
|
||||
}
|
||||
else
|
||||
#if defined(a64)
|
||||
this->Table = (PageTable *)CPU::x64::readcr3().raw;
|
||||
#elif defined(a32)
|
||||
this->Table = (PageTable *)CPU::x32::readcr3().raw;
|
||||
#endif
|
||||
this->Table = (PageTable *)CPU::PageTable();
|
||||
}
|
||||
}
|
||||
|
||||
VirtualMemoryArea::~VirtualMemoryArea()
|
||||
{
|
||||
debug("- %#lx %s", this,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0)) : "");
|
||||
|
||||
#ifdef DEBUG
|
||||
if (this->Table == KernelPageTable)
|
||||
debug("Not remapping kernel page table allocated pages.");
|
||||
#endif
|
||||
/* No need to remap pages, the page table will be destroyed */
|
||||
|
||||
SmartLock(MgrLock);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
foreach (auto ap in AllocatedPagesList)
|
||||
{
|
||||
KernelAllocator.FreePages(ap.Address, ap.PageCount);
|
||||
|
||||
if (this->Table == KernelPageTable)
|
||||
continue;
|
||||
|
||||
for (size_t i = 0; i < ap.PageCount; i++)
|
||||
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
Memory::PTFlag::RW);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -424,13 +424,14 @@ nsa void DisplayDetailsScreen(CPU::ExceptionFrame *Frame)
|
||||
{
|
||||
Memory::Virtual vmm((Memory::PageTable *)Frame->cr3);
|
||||
if (vmm.GetMapType((void *)Frame->cr2) != Memory::Virtual::FourKiB)
|
||||
ExPrint("Can't display page at address %#lx\n", Frame->cr2);
|
||||
ExPrint("Can't display page %#lx\n", Frame->cr2);
|
||||
else
|
||||
{
|
||||
Memory::PageTableEntry *pte = vmm.GetPTE((void *)Frame->cr2);
|
||||
ExPrint("Page %#lx: P:%d W:%d U:%d G:%d CoW:%d NX:%d\n",
|
||||
ExPrint("Page %#lx: P:%d W:%d U:%d G:%d CoW:%d KRsv:%d NX:%d\n",
|
||||
ALIGN_DOWN(Frame->cr2, 0x1000), pte->Present, pte->ReadWrite,
|
||||
pte->UserSupervisor, pte->Global, pte->CopyOnWrite, pte->ExecuteDisable);
|
||||
pte->UserSupervisor, pte->Global, pte->CopyOnWrite,
|
||||
pte->KernelReserve, pte->ExecuteDisable);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user