Update kernel

This commit is contained in:
Alex
2023-08-06 04:53:14 +03:00
parent 3b65386399
commit 2c51e4432f
181 changed files with 21873 additions and 21475 deletions

View File

@ -22,7 +22,7 @@ Xalloc_def;
#define XALLOC_CONCAT(x, y) x##y
#define XStoP(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
#define XPtoS(d) ((d)*PAGE_SIZE)
#define Xalloc_BlockChecksum 0xA110C
#define Xalloc_BlockSanityKey 0xA110C
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages);
@ -55,7 +55,7 @@ namespace Xalloc
public:
void *Address = nullptr;
int Checksum = Xalloc_BlockChecksum;
int Sanity = Xalloc_BlockSanityKey;
Xsize_t Size = 0;
Block *Next = nullptr;
Block *Last = nullptr;
@ -63,7 +63,7 @@ namespace Xalloc
bool Check()
{
if (this->Checksum != Xalloc_BlockChecksum)
if (this->Sanity != Xalloc_BlockSanityKey)
return false;
return true;
}
@ -168,8 +168,8 @@ namespace Xalloc
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid checksum! (%#x != %#x)",
(Xuint64_t)CurrentBlock, CurrentBlock->Checksum, Xalloc_BlockChecksum);
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
(Xuint64_t)CurrentBlock, CurrentBlock->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}
@ -210,8 +210,8 @@ namespace Xalloc
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid checksum! (%#x != %#x)",
(Xuint64_t)CurrentBlock, CurrentBlock->Checksum, Xalloc_BlockChecksum);
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
(Xuint64_t)CurrentBlock, CurrentBlock->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}

File diff suppressed because it is too large Load Diff

View File

@ -22,250 +22,266 @@
namespace Memory
{
ReadFSFunction(MEM_Read)
{
if (!Size)
Size = node->Length;
ReadFSFunction(MEM_Read)
{
if (!Size)
Size = node->Length;
if ((size_t)node->Offset > node->Length)
return 0;
if (RefOffset > node->Length)
return 0;
if (node->Offset + Size > node->Length)
Size = node->Length - node->Offset;
if (RefOffset + (off_t)Size > node->Length)
Size = node->Length - RefOffset;
memcpy(Buffer, (uint8_t *)(node->Address + node->Offset), Size);
return Size;
}
memcpy(Buffer, (uint8_t *)(node->Address + RefOffset), Size);
return Size;
}
WriteFSFunction(MEM_Write)
{
if (!Size)
Size = node->Length;
WriteFSFunction(MEM_Write)
{
if (!Size)
Size = node->Length;
if ((size_t)node->Offset > node->Length)
return 0;
if (RefOffset > node->Length)
return 0;
if (node->Offset + Size > node->Length)
Size = node->Length - node->Offset;
if (RefOffset + (off_t)Size > node->Length)
Size = node->Length - RefOffset;
memcpy((uint8_t *)(node->Address + node->Offset), Buffer, Size);
return Size;
}
memcpy((uint8_t *)(node->Address + RefOffset), Buffer, Size);
return Size;
}
VirtualFileSystem::FileSystemOperations mem_op = {
.Name = "mem",
.Read = MEM_Read,
.Write = MEM_Write,
};
VirtualFileSystem::FileSystemOperations mem_op = {
.Name = "mem",
.Read = MEM_Read,
.Write = MEM_Write,
};
uint64_t MemMgr::GetAllocatedMemorySize()
{
SmartLock(MgrLock);
uint64_t Size = 0;
foreach (auto ap in AllocatedPagesList)
Size += ap.PageCount;
return FROM_PAGES(Size);
}
uint64_t MemMgr::GetAllocatedMemorySize()
{
SmartLock(MgrLock);
uint64_t Size = 0;
foreach (auto ap in AllocatedPagesList)
Size += ap.PageCount;
return FROM_PAGES(Size);
}
bool MemMgr::Add(void *Address, size_t Count)
{
SmartLock(MgrLock);
if (Address == nullptr)
{
error("Address is null!");
return false;
}
bool MemMgr::Add(void *Address, size_t Count)
{
SmartLock(MgrLock);
if (Address == nullptr)
{
error("Address is null!");
return false;
}
if (Count == 0)
{
error("Count is 0!");
return false;
}
if (Count == 0)
{
error("Count is 0!");
return false;
}
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
error("Address already exists!");
return false;
}
else if ((uintptr_t)Address < (uintptr_t)AllocatedPagesList[i].Address)
{
if ((uintptr_t)Address + (Count * PAGE_SIZE) > (uintptr_t)AllocatedPagesList[i].Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
else
{
if ((uintptr_t)AllocatedPagesList[i].Address + (AllocatedPagesList[i].PageCount * PAGE_SIZE) > (uintptr_t)Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
}
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
error("Address already exists!");
return false;
}
else if ((uintptr_t)Address < (uintptr_t)AllocatedPagesList[i].Address)
{
if ((uintptr_t)Address + (Count * PAGE_SIZE) > (uintptr_t)AllocatedPagesList[i].Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
else
{
if ((uintptr_t)AllocatedPagesList[i].Address + (AllocatedPagesList[i].PageCount * PAGE_SIZE) > (uintptr_t)Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n)
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
AllocatedPagesList.push_back({Address, Count});
return true;
}
void *MemMgr::RequestPages(size_t Count, bool User)
{
SmartLock(MgrLock);
void *Address = KernelAllocator.RequestPages(Count);
for (size_t i = 0; i < Count; i++)
{
int Flags = Memory::PTFlag::RW;
if (User)
Flags |= Memory::PTFlag::US;
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
Memory::Virtual vmm = Memory::Virtual(this->Table);
vmm.Remap(AddressToMap, AddressToMap, Flags);
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n) // If null, error or file already exists
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
AllocatedPagesList.push_back({Address, Count});
/* For security reasons, we clear the allocated page
if it's a user page. */
if (User)
memset(Address, 0, Count * PAGE_SIZE);
return Address;
}
void MemMgr::FreePages(void *Address, size_t Count)
{
SmartLock(MgrLock);
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
* This will allow the user to free only a part of the allocated pages.
*
* But this will be in a separate function because we need to specify if we
* want to free from the start or from the end and return the new address.
*/
if (AllocatedPagesList[i].PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)", AllocatedPagesList[i].PageCount, Count);
return;
}
KernelAllocator.FreePages(Address, Count);
Memory::Virtual vmm = Memory::Virtual(this->Table);
for (size_t i = 0; i < Count; i++)
{
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
vmm.Remap(AddressToMap, AddressToMap, Memory::PTFlag::RW);
// vmm.Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::FileStatus s = vfs->Delete(FileName, false, this->Directory);
if (s != VirtualFileSystem::FileStatus::OK)
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.remove(i);
return;
}
}
}
void MemMgr::DetachAddress(void *Address)
{
SmartLock(MgrLock);
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, AllocatedPagesList[i].PageCount);
VirtualFileSystem::FileStatus s = vfs->Delete(FileName, false, this->Directory);
if (s != VirtualFileSystem::FileStatus::OK)
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.remove(i);
return;
}
}
}
MemMgr::MemMgr(PageTable *Table, VirtualFileSystem::Node *Directory)
{
SmartLock(MgrLock);
if (Table)
this->Table = Table;
else
{
#if defined(a64)
this->Table = (PageTable *)CPU::x64::readcr3().raw;
if (this->Directory)
{
char FileName[64];
#if defined(a64) || defined(aa64)
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
#elif defined(a32)
this->Table = (PageTable *)CPU::x32::readcr3().raw;
sprintf(FileName, "%x-%ld", (uintptr_t)Address, Count);
#endif
}
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n)
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
this->Directory = Directory;
debug("+ %#lx", this);
}
AllocatedPagesList.push_back({Address, Count});
return true;
}
MemMgr::~MemMgr()
{
SmartLock(MgrLock);
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.Address, ap.PageCount);
Memory::Virtual vmm = Memory::Virtual(this->Table);
for (size_t i = 0; i < ap.PageCount; i++)
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
Memory::PTFlag::RW);
}
void *MemMgr::RequestPages(size_t Count, bool User)
{
SmartLock(MgrLock);
void *Address = KernelAllocator.RequestPages(Count);
for (size_t i = 0; i < Count; i++)
{
int Flags = Memory::PTFlag::RW;
if (User)
Flags |= Memory::PTFlag::US;
if (this->Directory)
{
foreach (auto Child in this->Directory->Children)
vfs->Delete(Child, true);
}
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
debug("- %#lx", this);
}
Memory::Virtual vmm = Memory::Virtual(this->Table);
vmm.Remap(AddressToMap, AddressToMap, Flags);
}
if (this->Directory)
{
char FileName[64];
#if defined(a64) || defined(aa64)
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
#elif defined(a32)
sprintf(FileName, "%x-%ld", (uintptr_t)Address, Count);
#endif
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n) // If null, error or file already exists
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
AllocatedPagesList.push_back({Address, Count});
/* For security reasons, we clear the allocated page
if it's a user page. */
if (User)
memset(Address, 0, Count * PAGE_SIZE);
return Address;
}
void MemMgr::FreePages(void *Address, size_t Count)
{
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (itr->Address == Address)
{
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
* This will allow the user to free only a part of the allocated pages.
*
* But this will be in a separate function because we need to specify if we
* want to free from the start or from the end and return the new address.
*/
if (itr->PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)", itr->PageCount, Count);
return;
}
KernelAllocator.FreePages(Address, Count);
Memory::Virtual vmm = Memory::Virtual(this->Table);
for (size_t i = 0; i < Count; i++)
{
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
vmm.Remap(AddressToMap, AddressToMap, Memory::PTFlag::RW);
// vmm.Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
if (this->Directory)
{
char FileName[64];
#if defined(a64) || defined(aa64)
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
#elif defined(a32)
sprintf(FileName, "%x-%ld", (uintptr_t)Address, Count);
#endif
if (!vfs->Delete(FileName, false, this->Directory))
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.erase(itr);
return;
}
}
}
void MemMgr::DetachAddress(void *Address)
{
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (itr->Address == Address)
{
if (this->Directory)
{
char FileName[64];
#if defined(a64) || defined(aa64)
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, itr->PageCount);
#elif defined(a32)
sprintf(FileName, "%x-%ld", (uintptr_t)Address, itr->PageCount);
#endif
if (!vfs->Delete(FileName, false, this->Directory))
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.erase(itr);
return;
}
}
}
MemMgr::MemMgr(PageTable *Table, VirtualFileSystem::Node *Directory)
{
SmartLock(MgrLock);
if (Table)
this->Table = Table;
else
{
#if defined(a64)
this->Table = (PageTable *)CPU::x64::readcr3().raw;
#elif defined(a32)
this->Table = (PageTable *)CPU::x32::readcr3().raw;
#endif
}
this->Directory = Directory;
debug("+ %#lx %s", this,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "");
}
MemMgr::~MemMgr()
{
SmartLock(MgrLock);
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.Address, ap.PageCount);
Memory::Virtual vmm = Memory::Virtual(this->Table);
for (size_t i = 0; i < ap.PageCount; i++)
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
Memory::PTFlag::RW);
}
if (this->Directory)
{
foreach (auto Child in this->Directory->Children)
vfs->Delete(Child, true);
}
debug("- %#lx %s", this,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "");
}
}

View File

@ -26,467 +26,477 @@
namespace Memory
{
uint64_t Physical::GetTotalMemory()
{
SmartLock(this->MemoryLock);
return this->TotalMemory;
}
uint64_t Physical::GetTotalMemory()
{
return this->TotalMemory.load();
}
uint64_t Physical::GetFreeMemory()
{
SmartLock(this->MemoryLock);
return this->FreeMemory;
}
uint64_t Physical::GetFreeMemory()
{
return this->FreeMemory.load();
}
uint64_t Physical::GetReservedMemory()
{
SmartLock(this->MemoryLock);
return this->ReservedMemory;
}
uint64_t Physical::GetReservedMemory()
{
return this->ReservedMemory.load();
}
uint64_t Physical::GetUsedMemory()
{
SmartLock(this->MemoryLock);
return this->UsedMemory;
}
uint64_t Physical::GetUsedMemory()
{
return this->UsedMemory.load();
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->SwapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
bool Physical::SwapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->SwapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->UnswapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
bool Physical::UnswapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->UnswapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
void *Physical::RequestPage()
{
SmartLock(this->MemoryLock);
void *Physical::RequestPage()
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPage( )=%p~%p\n\r",
(void *)(PageBitmapIndex * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPage( )=%p~%p\n\r",
(void *)(PageBitmapIndex * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(TaskManager->GetCurrentProcess(), Tasking::KILL_OOM);
TaskManager->Schedule();
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(thisProcess, Tasking::KILL_OOM);
TaskManager->Yield();
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Stop();
__builtin_unreachable();
}
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
CPU::Stop();
__builtin_unreachable();
}
void *Physical::RequestPages(size_t Count)
{
SmartLock(this->MemoryLock);
void *Physical::RequestPages(size_t Count)
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (size_t i = 0; i < Count; i++)
{
if (PageBitmap[Index + i] == true)
goto NextPage;
}
for (size_t i = 0; i < Count; i++)
{
if (PageBitmap[Index + i] == true)
goto NextPage;
}
this->LockPages((void *)(Index * PAGE_SIZE), Count);
this->LockPages((void *)(Index * PAGE_SIZE), Count);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPages( %ld )=%p~%p\n\r",
Count,
(void *)(Index * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPages( %ld )=%p~%p\n\r",
Count,
(void *)(Index * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return (void *)(Index * PAGE_SIZE);
return (void *)(Index * PAGE_SIZE);
NextPage:
Index += Count;
continue;
}
}
NextPage:
Index += Count;
continue;
}
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(TaskManager->GetCurrentProcess(), Tasking::KILL_OOM);
TaskManager->Schedule();
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(thisProcess, Tasking::KILL_OOM);
TaskManager->Yield();
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Halt(true);
__builtin_unreachable();
}
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
CPU::Halt(true);
__builtin_unreachable();
}
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
size_t Index = (size_t)Address / PAGE_SIZE;
size_t Index = (size_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
{
warn("Tried to free an already free page. (%p)", Address);
return;
}
if (unlikely(PageBitmap[Index] == false))
{
warn("Tried to free an already free page. (%p)",
Address);
return;
}
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
UsedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
UsedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "FreePage( %p )~%p\n\r",
Address,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "FreePage( %p )~%p\n\r",
Address,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
}
void Physical::FreePages(void *Address, size_t Count)
{
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s%s passed to FreePages.", Address == nullptr ? "Null pointer " : "", Address == nullptr && Count == 0 ? "and " : "", Count == 0 ? "Zero count" : "");
return;
}
void Physical::FreePages(void *Address, size_t Count)
{
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s%s passed to FreePages.", Address == nullptr ? "Null pointer " : "", Address == nullptr && Count == 0 ? "and " : "", Count == 0 ? "Zero count" : "");
return;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "!FreePages( %p %ld )~%p\n\r",
Address, Count,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "!FreePages( %p %ld )~%p\n\r",
Address, Count,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
for (size_t t = 0; t < Count; t++)
this->FreePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
for (size_t t = 0; t < Count; t++)
this->FreePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
void Physical::LockPage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
void Physical::LockPage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
UsedMemory += PAGE_SIZE;
}
}
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
UsedMemory += PAGE_SIZE;
}
}
void Physical::LockPages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
void Physical::LockPages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
for (size_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
void Physical::ReservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
void Physical::ReservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
void Physical::ReservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
void Physical::ReservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
}
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
}
void Physical::UnreservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
void Physical::UnreservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::UnreservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
void Physical::UnreservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
}
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
}
void Physical::Init()
{
SmartLock(this->MemoryLock);
void Physical::Init()
{
SmartLock(this->MemoryLock);
uint64_t MemorySize = bInfo.Memory.Size;
debug("Memory size: %lld bytes (%ld pages)", MemorySize, TO_PAGES(MemorySize));
TotalMemory = MemorySize;
FreeMemory = MemorySize;
uint64_t MemorySize = bInfo.Memory.Size;
debug("Memory size: %lld bytes (%ld pages)",
MemorySize, TO_PAGES(MemorySize));
TotalMemory = MemorySize;
FreeMemory = MemorySize;
size_t BitmapSize = (size_t)(MemorySize / PAGE_SIZE) / 8 + 1;
uintptr_t BitmapAddress = 0x0;
size_t BitmapAddressSize = 0;
size_t BitmapSize = (size_t)(MemorySize / PAGE_SIZE) / 8 + 1;
uintptr_t BitmapAddress = 0x0;
size_t BitmapAddressSize = 0;
uintptr_t KernelStart = (uintptr_t)bInfo.Kernel.PhysicalBase;
uintptr_t KernelEnd = (uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size;
uintptr_t KernelStart = (uintptr_t)bInfo.Kernel.PhysicalBase;
uintptr_t KernelEnd = (uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size;
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
if (bInfo.Memory.Entry[i].Type == Usable)
{
uintptr_t RegionAddress = (uintptr_t)bInfo.Memory.Entry[i].BaseAddress;
uintptr_t RegionSize = bInfo.Memory.Entry[i].Length;
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
if (bInfo.Memory.Entry[i].Type == Usable)
{
uintptr_t RegionAddress = (uintptr_t)bInfo.Memory.Entry[i].BaseAddress;
uintptr_t RegionSize = bInfo.Memory.Entry[i].Length;
/* We don't want to use 0 as a memory address. */
if (RegionAddress == 0x0)
continue;
/* We don't want to use the first 1MB of memory. */
if (RegionAddress <= 0xFFFFF)
continue;
if ((BitmapSize + 0x100) > RegionSize)
{
debug("Region %p-%p (%dMB) is too small for bitmap.",
(void *)RegionAddress,
(void *)(RegionAddress + RegionSize),
TO_MB(RegionSize));
continue;
}
if ((BitmapSize + 0x100) > RegionSize)
{
debug("Region %p-%p (%d MiB) is too small for bitmap.",
(void *)RegionAddress,
(void *)(RegionAddress + RegionSize),
TO_MiB(RegionSize));
continue;
}
BitmapAddress = RegionAddress;
BitmapAddressSize = RegionSize;
BitmapAddress = RegionAddress;
BitmapAddressSize = RegionSize;
if (RegionAddress >= KernelStart && KernelEnd <= (RegionAddress + RegionSize))
{
BitmapAddress = KernelEnd;
BitmapAddressSize = RegionSize - (KernelEnd - RegionAddress);
}
if (RegionAddress >= KernelStart && KernelEnd <= (RegionAddress + RegionSize))
{
BitmapAddress = KernelEnd;
BitmapAddressSize = RegionSize - (KernelEnd - RegionAddress);
}
if ((BitmapSize + 0x100) > BitmapAddressSize)
{
debug("Region %p-%p (%dMB) is too small for bitmap.",
(void *)RegionAddress,
(void *)(RegionAddress + BitmapAddressSize),
TO_MB(BitmapAddressSize));
continue;
}
if ((BitmapSize + 0x100) > BitmapAddressSize)
{
debug("Region %p-%p (%d MiB) is too small for bitmap.",
(void *)RegionAddress,
(void *)(RegionAddress + BitmapAddressSize),
TO_MiB(BitmapAddressSize));
continue;
}
for (size_t i = 0; i < MAX_MODULES; i++)
{
uintptr_t ModuleStart = (uintptr_t)bInfo.Modules[i].Address;
uintptr_t ModuleEnd = (uintptr_t)bInfo.Modules[i].Address + bInfo.Modules[i].Size;
for (size_t i = 0; i < MAX_MODULES; i++)
{
uintptr_t ModuleStart = (uintptr_t)bInfo.Modules[i].Address;
uintptr_t ModuleEnd = (uintptr_t)bInfo.Modules[i].Address + bInfo.Modules[i].Size;
if (ModuleStart == 0x0)
break;
if (ModuleStart == 0x0)
break;
if (RegionAddress >= ModuleStart && ModuleEnd <= (RegionAddress + RegionSize))
{
BitmapAddress = ModuleEnd;
BitmapAddressSize = RegionSize - (ModuleEnd - RegionAddress);
}
}
if (RegionAddress >= ModuleStart && ModuleEnd <= (RegionAddress + RegionSize))
{
BitmapAddress = ModuleEnd;
BitmapAddressSize = RegionSize - (ModuleEnd - RegionAddress);
}
}
if ((BitmapSize + 0x100) > BitmapAddressSize)
{
debug("Region %p-%p (%dMB) is too small for bitmap.",
(void *)BitmapAddress,
(void *)(BitmapAddress + BitmapAddressSize),
TO_MB(BitmapAddressSize));
continue;
}
if ((BitmapSize + 0x100) > BitmapAddressSize)
{
debug("Region %p-%p (%d MiB) is too small for bitmap.",
(void *)BitmapAddress,
(void *)(BitmapAddress + BitmapAddressSize),
TO_MiB(BitmapAddressSize));
continue;
}
debug("Found free memory for bitmap: %p (%dMB)",
(void *)BitmapAddress,
TO_MB(BitmapAddressSize));
break;
}
}
debug("Found free memory for bitmap: %p (%d MiB)",
(void *)BitmapAddress,
TO_MiB(BitmapAddressSize));
break;
}
}
if (BitmapAddress == 0x0)
{
error("No free memory found!");
CPU::Stop();
}
if (BitmapAddress == 0x0)
{
error("No free memory found!");
CPU::Stop();
}
/* TODO: Read swap config and make the configure the bitmap size correctly */
debug("Initializing Bitmap at %p-%p (%d Bytes)",
BitmapAddress,
(void *)(BitmapAddress + BitmapSize),
BitmapSize);
/* TODO: Read swap config and make the configure the bitmap size correctly */
debug("Initializing Bitmap at %p-%p (%d Bytes)",
BitmapAddress,
(void *)(BitmapAddress + BitmapSize),
BitmapSize);
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)BitmapAddress;
for (size_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)BitmapAddress;
for (size_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
ReserveEssentials();
}
ReserveEssentials();
}
Physical::Physical() {}
Physical::~Physical() {}
Physical::Physical() {}
Physical::~Physical() {}
}

View File

@ -18,6 +18,7 @@
#include <memory.hpp>
#include <debug.h>
#include <elf.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
@ -44,7 +45,13 @@ namespace Memory
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
if (bInfo.Memory.Entry[i].Type == Usable)
this->UnreservePages(bInfo.Memory.Entry[i].BaseAddress, TO_PAGES(bInfo.Memory.Entry[i].Length));
{
if (uintptr_t(bInfo.Memory.Entry[i].BaseAddress) <= 0xFFFFF)
continue;
this->UnreservePages(bInfo.Memory.Entry[i].BaseAddress,
TO_PAGES(bInfo.Memory.Entry[i].Length));
}
}
debug("Reserving 0x0-0xFFFFF range...");
@ -57,18 +64,72 @@ namespace Memory
*/
this->ReservePages((void *)0x0, TO_PAGES(0xFFFFF));
debug("Reserving bitmap region %#lx-%#lx...", PageBitmap.Buffer, (void *)((uintptr_t)PageBitmap.Buffer + PageBitmap.Size));
debug("Reserving bitmap region %#lx-%#lx...",
PageBitmap.Buffer,
(void *)((uintptr_t)PageBitmap.Buffer + PageBitmap.Size));
this->ReservePages(PageBitmap.Buffer, TO_PAGES(PageBitmap.Size));
debug("Reserving kernel physical region %#lx-%#lx...", bInfo.Kernel.PhysicalBase, (void *)((uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size));
debug("Reserving kernel physical region %#lx-%#lx...",
bInfo.Kernel.PhysicalBase,
(void *)((uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size));
this->ReservePages(bInfo.Kernel.PhysicalBase, TO_PAGES(bInfo.Kernel.Size));
debug("Reserving kernel file and symbols...");
if (bInfo.Kernel.FileBase)
this->ReservePages(bInfo.Kernel.FileBase, TO_PAGES(bInfo.Kernel.Size));
if (bInfo.Kernel.Symbols.Num && bInfo.Kernel.Symbols.EntSize && bInfo.Kernel.Symbols.Shndx)
this->ReservePages((void *)bInfo.Kernel.Symbols.Sections, TO_PAGES(bInfo.Kernel.Symbols.Num * bInfo.Kernel.Symbols.EntSize));
if (bInfo.Kernel.Symbols.Num &&
bInfo.Kernel.Symbols.EntSize &&
bInfo.Kernel.Symbols.Shndx)
{
char *sections = reinterpret_cast<char *>(bInfo.Kernel.Symbols.Sections);
uint8_t *StringAddress = nullptr;
Elf_Sym *Symbols = nullptr;
#if defined(a64) || defined(aa64)
Elf64_Xword SymbolSize = 0;
Elf64_Xword StringSize = 0;
#elif defined(a32)
Elf32_Word SymbolSize = 0;
Elf32_Word StringSize = 0;
#endif
for (size_t i = 0; i < bInfo.Kernel.Symbols.Num; ++i)
{
Elf_Shdr *sym = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize * i];
Elf_Shdr *str = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize *
sym->sh_link];
if (sym->sh_type == SHT_SYMTAB &&
str->sh_type == SHT_STRTAB)
{
Symbols = (Elf_Sym *)sym->sh_addr;
StringAddress = (uint8_t *)str->sh_addr;
SymbolSize = (int)sym->sh_size;
StringSize = (int)str->sh_size;
debug("Symbol table found, %d entries",
SymbolSize / sym->sh_entsize);
break;
}
}
if (Symbols)
{
debug("Reserving symbol table region %#lx-%#lx...",
Symbols, (void *)((uintptr_t)Symbols + SymbolSize));
this->ReservePages(Symbols, TO_PAGES(SymbolSize));
}
if (StringAddress)
{
debug("Reserving string table region %#lx-%#lx...",
StringAddress, (void *)((uintptr_t)StringAddress + StringSize));
this->ReservePages(StringAddress, TO_PAGES(StringSize));
}
}
debug("Reserving kernel modules...");
@ -78,13 +139,17 @@ namespace Memory
continue;
debug("Reserving module %s (%#lx-%#lx)...", bInfo.Modules[i].CommandLine,
bInfo.Modules[i].Address, (void *)((uintptr_t)bInfo.Modules[i].Address + bInfo.Modules[i].Size));
bInfo.Modules[i].Address,
(void *)((uintptr_t)bInfo.Modules[i].Address + bInfo.Modules[i].Size));
this->ReservePages((void *)bInfo.Modules[i].Address, TO_PAGES(bInfo.Modules[i].Size));
this->ReservePages((void *)bInfo.Modules[i].Address,
TO_PAGES(bInfo.Modules[i].Size));
}
#if defined(a86)
debug("Reserving RSDT region %#lx-%#lx...", bInfo.RSDP, (void *)((uintptr_t)bInfo.RSDP + sizeof(BootInfo::RSDPInfo)));
debug("Reserving RSDT region %#lx-%#lx...", bInfo.RSDP,
(void *)((uintptr_t)bInfo.RSDP + sizeof(BootInfo::RSDPInfo)));
this->ReservePages(bInfo.RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
ACPI::ACPI::ACPIHeader *ACPIPtr = nullptr;
@ -115,23 +180,27 @@ namespace Memory
}
#endif
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) / (XSDT ? 8 : 4));
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) /
(XSDT ? 8 : 4));
debug("Reserving %d ACPI tables...", TableSize);
for (size_t t = 0; t < TableSize; t++)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wint-to-pointer-cast"
// TODO: Should I be concerned about unaligned memory access?
ACPI::ACPI::ACPIHeader *SDTHdr = nullptr;
if (XSDT)
SDTHdr = (ACPI::ACPI::ACPIHeader *)(*(uint64_t *)((uint64_t)ACPIPtr + sizeof(ACPI::ACPI::ACPIHeader) + (t * 8)));
SDTHdr =
(ACPI::ACPI::ACPIHeader *)(*(uint64_t *)((uint64_t)ACPIPtr +
sizeof(ACPI::ACPI::ACPIHeader) +
(t * 8)));
else
SDTHdr = (ACPI::ACPI::ACPIHeader *)(*(uint32_t *)((uint64_t)ACPIPtr + sizeof(ACPI::ACPI::ACPIHeader) + (t * 4)));
SDTHdr =
(ACPI::ACPI::ACPIHeader *)(*(uint32_t *)((uint64_t)ACPIPtr +
sizeof(ACPI::ACPI::ACPIHeader) +
(t * 4)));
#pragma GCC diagnostic pop
this->ReservePages(SDTHdr, TO_PAGES(SDTHdr->Length));
}

15
Core/Memory/SmartHeap.cpp Normal file
View File

@ -0,0 +1,15 @@
#include <memory.hpp>
namespace Memory
{
SmartHeap::SmartHeap(size_t Size)
{
this->Object = kmalloc(Size);
this->ObjectSize = Size;
}
SmartHeap::~SmartHeap()
{
kfree(this->Object);
}
}