Restructured and rewritten entire codebase

This commit is contained in:
Alex
2023-10-09 01:16:24 +03:00
parent 446a571018
commit 889e1522a3
484 changed files with 15683 additions and 14032 deletions

94
core/memory/brk.cpp Normal file
View File

@ -0,0 +1,94 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory/brk.hpp>
#include <memory/virtual.hpp>
#include <memory/vma.hpp>
#include <assert.h>
#include <errno.h>
#include <debug.h>
namespace Memory
{
void *ProgramBreak::brk(void *Address)
{
if (HeapStart == 0x0 || Break == 0x0)
{
error("HeapStart or Break is 0x0");
return (void *)-EAGAIN;
}
/* Get the current program break. */
if (Address == nullptr)
return (void *)Break;
/* Check if the address is valid. */
if ((uintptr_t)Address < HeapStart)
return (void *)-ENOMEM;
Virtual vmm = Virtual(this->Table);
if ((uintptr_t)Address > Break)
{
/* Allocate more memory. */
size_t Pages = TO_PAGES(uintptr_t(Address) - Break);
void *Allocated = vma->RequestPages(Pages);
if (Allocated == nullptr)
return (void *)-ENOMEM;
/* Map the allocated pages. */
for (size_t i = 0; i < Pages; i++)
{
void *VirtAddr = (void *)(Break + (i * PAGE_SIZE));
void *PhysAddr = (void *)(uintptr_t(Allocated) + (i * PAGE_SIZE));
vmm.Map(VirtAddr, PhysAddr, RW | US);
}
Break = (uint64_t)Address;
return (void *)Break;
}
/* Free memory. */
size_t Pages = TO_PAGES(uintptr_t(Address) - Break);
vma->FreePages((void *)Break, Pages);
/* Unmap the freed pages. */
for (size_t i = 0; i < Pages; i++)
{
uint64_t Page = Break - (i * 0x1000);
vmm.Remap((void *)Page, (void *)Page, PTFlag::P | PTFlag::RW);
}
Break = (uint64_t)Address;
return (void *)Break;
}
ProgramBreak::ProgramBreak(PageTable *Table, VirtualMemoryArea *vma)
{
assert(Table != nullptr);
assert(vma != nullptr);
this->Table = Table;
this->vma = vma;
}
ProgramBreak::~ProgramBreak()
{
/* Do nothing because VirtualMemoryArea
will be destroyed later. */
}
}

View File

@ -0,0 +1,212 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <acpi.hpp>
#include <debug.h>
#include <elf.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../kernel.h"
namespace Memory
{
__no_sanitize("alignment") void Physical::FindBitmapRegion(uintptr_t &BitmapAddress,
size_t &BitmapAddressSize)
{
size_t BitmapSize = (size_t)(bInfo.Memory.Size / PAGE_SIZE) / 8 + 1;
uintptr_t KernelStart = (uintptr_t)bInfo.Kernel.PhysicalBase;
uintptr_t KernelEnd = (uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size;
uintptr_t SectionsStart = 0x0;
uintptr_t SectionsEnd = 0x0;
uintptr_t Symbols = 0x0;
uintptr_t StringAddress = 0x0;
size_t SymbolSize = 0;
size_t StringSize = 0;
uintptr_t RSDPStart = 0x0;
uintptr_t RSDPEnd = 0x0;
if (bInfo.Kernel.Symbols.Num &&
bInfo.Kernel.Symbols.EntSize &&
bInfo.Kernel.Symbols.Shndx)
{
char *sections = r_cst(char *, bInfo.Kernel.Symbols.Sections);
SectionsStart = (uintptr_t)sections;
SectionsEnd = (uintptr_t)sections + bInfo.Kernel.Symbols.EntSize *
bInfo.Kernel.Symbols.Num;
for (size_t i = 0; i < bInfo.Kernel.Symbols.Num; ++i)
{
Elf_Shdr *sym = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize * i];
Elf_Shdr *str = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize *
sym->sh_link];
if (sym->sh_type == SHT_SYMTAB &&
str->sh_type == SHT_STRTAB)
{
Symbols = (uintptr_t)sym->sh_addr;
StringAddress = (uintptr_t)str->sh_addr;
SymbolSize = (size_t)sym->sh_size;
StringSize = (size_t)str->sh_size;
break;
}
}
}
#if defined(a86)
if (bInfo.RSDP)
{
RSDPStart = (uintptr_t)bInfo.RSDP;
RSDPEnd = (uintptr_t)bInfo.RSDP + sizeof(BootInfo::RSDPInfo);
#ifdef DEBUG
ACPI::ACPI::ACPIHeader *ACPIPtr;
bool XSDT = false;
if (bInfo.RSDP->Revision >= 2 && bInfo.RSDP->XSDTAddress)
{
ACPIPtr = (ACPI::ACPI::ACPIHeader *)bInfo.RSDP->XSDTAddress;
XSDT = true;
}
else
ACPIPtr = (ACPI::ACPI::ACPIHeader *)(uintptr_t)bInfo.RSDP->RSDTAddress;
if (Memory::Virtual().Check(ACPIPtr))
{
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) /
(XSDT ? 8 : 4));
debug("There are %d ACPI tables", TableSize);
}
#endif
}
#elif defined(aa64)
#endif
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
if (bInfo.Memory.Entry[i].Type == Usable)
{
uintptr_t RegionAddress = (uintptr_t)bInfo.Memory.Entry[i].BaseAddress;
uintptr_t RegionSize = bInfo.Memory.Entry[i].Length;
/* We don't want to use the first 1MB of memory. */
if (RegionAddress <= 0xFFFFF)
continue;
if ((BitmapSize + 0x100) > RegionSize)
{
debug("Region %p-%p (%d MiB) is too small for bitmap.",
(void *)RegionAddress,
(void *)(RegionAddress + RegionSize),
TO_MiB(RegionSize));
continue;
}
BitmapAddress = RegionAddress;
BitmapAddressSize = RegionSize;
struct AddrRange
{
uintptr_t Start;
uintptr_t End;
};
auto SortAddresses = [](AddrRange *Array, size_t n)
{
size_t MinimumIndex;
for (size_t i = 0; i < n - 1; i++)
{
MinimumIndex = i;
for (size_t j = i + 1; j < n; j++)
if (Array[j].Start < Array[MinimumIndex].Start)
MinimumIndex = j;
AddrRange tmp = Array[MinimumIndex];
Array[MinimumIndex] = Array[i];
Array[i] = tmp;
}
};
AddrRange PtrArray[] =
{
{KernelStart,
KernelEnd},
{SectionsStart,
SectionsEnd},
{Symbols,
Symbols + SymbolSize},
{StringAddress,
StringAddress + StringSize},
{RSDPStart,
RSDPEnd},
{(uintptr_t)bInfo.Kernel.FileBase,
(uintptr_t)bInfo.Kernel.FileBase + bInfo.Kernel.Size},
{(uintptr_t)bInfo.Modules[0].Address,
(uintptr_t)bInfo.Modules[0].Address + bInfo.Modules[0].Size},
{(uintptr_t)bInfo.Modules[1].Address,
(uintptr_t)bInfo.Modules[1].Address + bInfo.Modules[1].Size},
{(uintptr_t)bInfo.Modules[2].Address,
(uintptr_t)bInfo.Modules[2].Address + bInfo.Modules[2].Size},
{(uintptr_t)bInfo.Modules[3].Address,
(uintptr_t)bInfo.Modules[3].Address + bInfo.Modules[3].Size},
/* MAX_MODULES == 4 */
};
SortAddresses(PtrArray, sizeof(PtrArray) / sizeof(PtrArray[0]));
for (size_t i = 0; i < sizeof(PtrArray) / sizeof(PtrArray[0]); i++)
{
if (PtrArray[i].Start == 0x0)
continue;
uintptr_t Start = PtrArray[i].Start;
uintptr_t End = PtrArray[i].End;
debug("%#lx - %#lx", Start, End);
if (RegionAddress >= Start &&
End <= (RegionAddress + RegionSize))
{
BitmapAddress = End;
BitmapAddressSize = RegionSize - (End - RegionAddress);
}
}
if ((BitmapSize + 0x100) > BitmapAddressSize)
{
debug("Region %p-%p (%d MiB) is too small for bitmap.",
(void *)BitmapAddress,
(void *)(BitmapAddress + BitmapAddressSize),
TO_MiB(BitmapAddressSize));
continue;
}
debug("Found free memory for bitmap: %p (%d MiB)",
(void *)BitmapAddress,
TO_MiB(BitmapAddressSize));
break;
}
}
}
}

View File

@ -0,0 +1,154 @@
# Xalloc
Xalloc is a custom memory allocator designed for hobby operating systems.
Written in C++ and provides a simple and efficient way to manage memory in your hobby OS.
#### ❗ This project is still in development and is not ready for use in production environments. ❗
---
## Features
- **Simple API** - Simple API for allocating and freeing memory.
- **Efficient** - Uses a free-list to manage memory and is designed to be fast.
- **No dependencies** - No dependencies and is designed to be easy to integrate into your OS.
---
## Getting Started
### Implementing missing functions
You will need to implement the following functions in your OS:
##### Wrapper.cpp
```cpp
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
// ...
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
// ...
}
/* Mandatory only if Xalloc_MapPages is set to true */
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
// ...
}
/* Mandatory only if Xalloc_MapPages is set to true */
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
// ...
}
```
##### Xalloc.hpp
```cpp
#define Xalloc_StopOnFail <bool> /* Infinite loop on failure */
#define Xalloc_MapPages <bool> /* Map pages on allocation */
#define Xalloc_PAGE_SIZE <page size> /* <-- Replace with your page size */
#define Xalloc_trace(m, ...) <trace function>
#define Xalloc_warn(m, ...) <warning function>
#define Xalloc_err(m, ...) <error function>
#define XallocV1_def <define a lock> /* eg. std::mutex Xalloc_lock; */
#define XallocV1_lock <lock function>
#define XallocV1_unlock <unlock function>
/* Same as above */
#define XallocV2_def <define a lock>
#define XallocV2_lock <lock function>
#define XallocV2_unlock <unlock function>
```
### Typical usage
```cpp
#include "Xalloc.hpp"
Xalloc::V1 *XallocV1Allocator = nullptr;
int main()
{
/* Virtual Base User SMAP */
XallocV1Allocator = new Xalloc::V1((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator->malloc(1234);
/* ... */
XallocV1Allocator->free(p);
delete XallocV1Allocator;
return 0;
}
```
or
```cpp
#include "Xalloc.hpp"
int main()
{
/* Virtual Base User SMAP */
Xalloc::V1 XallocV1Allocator((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator.malloc(1234);
/* ... */
XallocV1Allocator.free(p);
return 0;
}
```
---
## API
### Xalloc::V1
```cpp
void *malloc(Xsize_t Size);
```
Allocates a block of memory of size `Size` bytes.
If `Size` is 0, then `nullptr` is returned.
- `Size` - The size of the block to allocate in bytes.
<br><br>
```cpp
void free(void *Address);
```
Frees the memory block pointed to by `Address`.
If `Address` is `nullptr`, then no operation is performed.
- `Address` - The address of the memory block to free.
<br><br>
```cpp
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
```
Allocates a block of memory for an array of `NumberOfBlocks` elements, each of them `Size` bytes long.
If `NumberOfBlocks` or `Size` is 0, then `nullptr` is returned.
- `NumberOfBlocks` - The number of elements to allocate.
- `Size` - The size of each element in bytes.
<br><br>
```cpp
void *realloc(void *Address, Xsize_t Size);
```
Changes the size of the memory block pointed to by `Address` to `Size` bytes.
If `Address` is `nullptr`, then the call is equivalent to `malloc(Size)`.
If `Size` is equal to zero, and `Address` is not `nullptr`, then the call is equivalent to `free(Address)`.
- `Address` - The address of the memory block to resize.
- `Size` - The new size of the memory block in bytes.
---
## To-do
- [ ] Multiple free-lists for different block sizes

View File

@ -0,0 +1,40 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
#include <memory.hpp>
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
return KernelAllocator.RequestPages(Pages);
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
KernelAllocator.FreePages(Address, Pages);
}
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
Memory::Virtual(KernelPageTable).Map(VirtualAddress, PhysicalAddress, Flags);
}
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
Memory::Virtual(KernelPageTable).Unmap(VirtualAddress);
}

View File

@ -0,0 +1,236 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __FENNIX_KERNEL_Xalloc_H__
#define __FENNIX_KERNEL_Xalloc_H__
#include <memory.hpp>
#include <lock.hpp>
#include <debug.h>
typedef __UINT8_TYPE__ Xuint8_t;
typedef __SIZE_TYPE__ Xsize_t;
typedef __UINTPTR_TYPE__ Xuintptr_t;
#define Xalloc_StopOnFail true
#define Xalloc_MapPages true
#define Xalloc_PAGE_SIZE PAGE_SIZE
#define Xalloc_trace(m, ...) trace(m, ##__VA_ARGS__)
#define Xalloc_warn(m, ...) warn(m, ##__VA_ARGS__)
#define Xalloc_err(m, ...) error(m, ##__VA_ARGS__)
#define XallocV1_def NewLock(XallocV1Lock)
#define XallocV1_lock XallocV1Lock.Lock(__FUNCTION__)
#define XallocV1_unlock XallocV1Lock.Unlock()
#define XallocV2_def NewLock(XallocV2Lock)
#define XallocV2_lock XallocV2Lock.Lock(__FUNCTION__)
#define XallocV2_unlock XallocV2Lock.Unlock()
namespace Xalloc
{
class V1
{
private:
void *BaseVirtualAddress = nullptr;
void *FirstBlock = nullptr;
void *LastBlock = nullptr;
bool UserMapping = false;
bool SMAPUsed = false;
public:
/** @brief Execute "stac" instruction if the kernel has SMAP enabled */
void Xstac();
/** @brief Execute "clac" instruction if the kernel has SMAP enabled */
void Xclac();
/**
* @brief Arrange the blocks to optimize the memory usage
* The allocator is not arranged by default
* to avoid performance issues.
* This function will defragment the memory
* and free the unused blocks.
*
* You should call this function when the
* kernel is idle or when is not using
* the allocator.
*/
void Arrange();
/**
* @brief Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *malloc(Xsize_t Size);
/**
* @brief Free a previously allocated block
*
* @param Address Address of the block to free.
*/
void free(void *Address);
/**
* @brief Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
/**
* @brief Reallocate a previously allocated block
*
* @param Address Address of the block to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated block.
*/
void *realloc(void *Address, Xsize_t Size);
/**
* @brief Construct a new Allocator object
*
* @param BaseVirtualAddress Virtual address to map the pages.
* @param UserMode Map the new pages with USER flag?
* @param SMAPEnabled Does the kernel has Supervisor Mode Access Prevention enabled?
*/
V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled);
/**
* @brief Destroy the Allocator object
*
*/
~V1();
};
class V2
{
private:
class Block
{
public:
int Sanity = 0xA110C;
Block *Next = nullptr;
bool IsFree = true;
V2 *ctx = nullptr;
Xuint8_t *Data = nullptr;
Xsize_t DataSize = 0;
void Check();
Block(Xsize_t Size, V2 *ctx);
~Block();
void *operator new(Xsize_t);
void operator delete(void *Address);
} __attribute__((packed, aligned((16))));
/* The base address of the virtual memory */
Xuintptr_t BaseVirtualAddress = 0x0;
/* The size of the heap */
Xsize_t HeapSize = 0x0;
/* The used size of the heap */
Xsize_t HeapUsed = 0x0;
Block *FirstBlock = nullptr;
Xuint8_t *AllocateHeap(Xsize_t Size);
void FreeHeap(Xuint8_t *At, Xsize_t Size);
Xsize_t Align(Xsize_t Size);
void *FindFreeBlock(Xsize_t Size,
Block *&CurrentBlock);
public:
/**
* Arrange the blocks to optimize the memory
* usage.
* The allocator is not arranged by default
* to avoid performance issues.
* This function will defragment the memory
* and free the unused blocks.
*
* You should call this function when the
* kernel is idle or when is not using the
* allocator.
*/
void Arrange();
/**
* Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated
* block.
*/
void *malloc(Xsize_t Size);
/**
* Free a previously allocated block
*
* @param Address Address of the block to
* free.
*/
void free(void *Address);
/**
* Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks
* to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated
* block.
*/
void *calloc(Xsize_t NumberOfBlocks,
Xsize_t Size);
/**
* Reallocate a previously allocated block
*
* @param Address Address of the block
* to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated
* block.
*/
void *realloc(void *Address, Xsize_t Size);
/**
* Construct a new Allocator object
*
* @param VirtualBase Virtual address
* to map the pages.
*/
V2(void *VirtualBase);
/**
* Destroy the Allocator object
*/
~V2();
friend class Block;
};
}
#endif // !__FENNIX_KERNEL_Xalloc_H__

View File

@ -0,0 +1,290 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
XallocV1_def;
#define XALLOC_CONCAT(x, y) x##y
#define XStoP(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
#define XPtoS(d) ((d)*PAGE_SIZE)
#define Xalloc_BlockSanityKey 0xA110C
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages);
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags);
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress);
// TODO: Change memcpy with an optimized version
void *Xmemcpy(void *__restrict__ Destination, const void *__restrict__ Source, Xsize_t Length)
{
unsigned char *dst = (unsigned char *)Destination;
const unsigned char *src = (const unsigned char *)Source;
for (Xsize_t i = 0; i < Length; i++)
dst[i] = src[i];
return Destination;
}
// TODO: Change memset with an optimized version
void *Xmemset(void *__restrict__ Destination, int Data, Xsize_t Length)
{
unsigned char *Buffer = (unsigned char *)Destination;
for (Xsize_t i = 0; i < Length; i++)
Buffer[i] = (unsigned char)Data;
return Destination;
}
namespace Xalloc
{
class Block
{
public:
void *Address = nullptr;
int Sanity = Xalloc_BlockSanityKey;
Xsize_t Size = 0;
Block *Next = nullptr;
Block *Last = nullptr;
bool IsFree = true;
bool Check()
{
if (this->Sanity != Xalloc_BlockSanityKey)
return false;
return true;
}
Block(Xsize_t Size)
{
this->Address = Xalloc_REQUEST_PAGES(XStoP(Size + 1));
this->Size = Size;
Xmemset(this->Address, 0, Size);
}
~Block()
{
Xalloc_FREE_PAGES(this->Address, XStoP(this->Size + 1));
}
/**
* @brief Overload new operator to allocate memory from the heap
* @param Size Unused
* @return void* Pointer to the allocated memory
*/
void *operator new(Xsize_t Size)
{
void *ptr = Xalloc_REQUEST_PAGES(XStoP(sizeof(Block)));
return ptr;
(void)(Size);
}
/**
* @brief Overload delete operator to free memory from the heap
* @param Address Pointer to the memory to free
*/
void operator delete(void *Address)
{
Xalloc_FREE_PAGES(Address, XStoP(sizeof(Block)));
}
} __attribute__((packed, aligned((16))));
class SmartSMAPClass
{
private:
V1 *allocator = nullptr;
public:
SmartSMAPClass(V1 *allocator)
{
this->allocator = allocator;
this->allocator->Xstac();
}
~SmartSMAPClass() { this->allocator->Xclac(); }
};
#define SmartSMAP SmartSMAPClass XALLOC_CONCAT(SmartSMAP##_, __COUNTER__)(this)
void V1::Xstac()
{
if (this->SMAPUsed)
{
#if defined(a86)
asm volatile("stac" ::
: "cc");
#endif
}
}
void V1::Xclac()
{
if (this->SMAPUsed)
{
#if defined(a86)
asm volatile("clac" ::
: "cc");
#endif
}
}
void V1::Arrange()
{
Xalloc_err("Arrange() is not implemented yet!");
}
void *V1::malloc(Xsize_t Size)
{
if (Size == 0)
{
Xalloc_warn("Attempted to allocate 0 bytes!");
return nullptr;
}
SmartSMAP;
XallocV1_lock;
if (this->FirstBlock == nullptr)
{
this->FirstBlock = new Block(Size);
((Block *)this->FirstBlock)->IsFree = false;
XallocV1_unlock;
return ((Block *)this->FirstBlock)->Address;
}
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
(Xsize_t)CurrentBlock, CurrentBlock->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->IsFree && CurrentBlock->Size >= Size)
{
CurrentBlock->IsFree = false;
Xmemset(CurrentBlock->Address, 0, Size);
XallocV1_unlock;
return CurrentBlock->Address;
}
CurrentBlock = CurrentBlock->Next;
}
CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock->Next != nullptr)
CurrentBlock = CurrentBlock->Next;
CurrentBlock->Next = new Block(Size);
((Block *)CurrentBlock->Next)->Last = CurrentBlock;
((Block *)CurrentBlock->Next)->IsFree = false;
XallocV1_unlock;
return ((Block *)CurrentBlock->Next)->Address;
}
void V1::free(void *Address)
{
if (Address == nullptr)
{
Xalloc_warn("Attempted to free a null pointer!");
return;
}
SmartSMAP;
XallocV1_lock;
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
(Xsize_t)CurrentBlock, CurrentBlock->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->Address == Address)
{
if (CurrentBlock->IsFree)
{
Xalloc_warn("Attempted to free an already freed pointer!");
XallocV1_unlock;
return;
}
CurrentBlock->IsFree = true;
XallocV1_unlock;
return;
}
CurrentBlock = CurrentBlock->Next;
}
Xalloc_err("Invalid address %#lx.", Address);
XallocV1_unlock;
}
void *V1::calloc(Xsize_t NumberOfBlocks, Xsize_t Size)
{
if (NumberOfBlocks == 0 || Size == 0)
{
Xalloc_warn("The %s%s%s is 0!",
NumberOfBlocks == 0 ? "NumberOfBlocks" : "",
NumberOfBlocks == 0 && Size == 0 ? " and " : "",
Size == 0 ? "Size" : "");
return nullptr;
}
return this->malloc(NumberOfBlocks * Size);
}
void *V1::realloc(void *Address, Xsize_t Size)
{
if (Address == nullptr)
return this->malloc(Size);
if (Size == 0)
{
this->free(Address);
return nullptr;
}
// SmartSMAP;
// XallocV1_lock;
// ...
// XallocV1_unlock;
// TODO: Implement realloc
this->free(Address);
return this->malloc(Size);
}
V1::V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled)
{
SmartSMAP;
XallocV1_lock;
this->SMAPUsed = SMAPEnabled;
this->UserMapping = UserMode;
this->BaseVirtualAddress = BaseVirtualAddress;
XallocV1_unlock;
}
V1::~V1()
{
SmartSMAP;
XallocV1_lock;
Xalloc_trace("Destructor not implemented yet.");
XallocV1_unlock;
}
}

View File

@ -0,0 +1,281 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
XallocV2_def;
#define XALLOC_CONCAT(x, y) x##y
#define XStoP(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
#define XPtoS(d) ((d)*PAGE_SIZE)
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages);
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress,
void *PhysicalAddress,
Xsize_t Flags);
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress);
#define Xalloc_BlockSanityKey 0xA110C
/*
[ IN DEVELOPMENT ]
*/
namespace Xalloc
{
void V2::Block::Check()
{
if (unlikely(this->Sanity != Xalloc_BlockSanityKey))
{
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
this, this->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}
}
V2::Block::Block(Xsize_t Size, V2 *ctx)
{
this->ctx = ctx;
this->Data = ctx->AllocateHeap(Size);
this->DataSize = Size;
}
V2::Block::~Block()
{
}
void *V2::Block::operator new(Xsize_t)
{
constexpr Xsize_t bPgs = XStoP(sizeof(Block));
void *ptr = Xalloc_REQUEST_PAGES(bPgs);
/* TODO: Do something with the rest of
the allocated memory */
return ptr;
}
void V2::Block::operator delete(void *Address)
{
constexpr Xsize_t bPgs = XStoP(sizeof(Block));
Xalloc_FREE_PAGES(Address, bPgs);
}
/* ========================================= */
Xuint8_t *V2::AllocateHeap(Xsize_t Size)
{
Size = this->Align(Size);
Xsize_t Pages = XStoP(Size);
Xuint8_t *FinalAddress = 0x0;
if (this->HeapUsed + Size >= this->HeapSize)
{
void *Address = Xalloc_REQUEST_PAGES(Pages);
void *VirtualAddress = (void *)(this->BaseVirtualAddress + this->HeapSize);
if (Xalloc_MapPages)
{
for (Xsize_t i = 0; i < Pages; i++)
{
Xuintptr_t Page = i * Xalloc_PAGE_SIZE;
void *vAddress = (void *)((Xuintptr_t)VirtualAddress + Page);
Xalloc_MAP_MEMORY(vAddress, (void *)((Xuintptr_t)Address + Page), 0x3);
}
}
this->HeapSize += XPtoS(Pages);
FinalAddress = (Xuint8_t *)VirtualAddress;
}
else
FinalAddress = (Xuint8_t *)(this->BaseVirtualAddress + this->HeapUsed);
this->HeapUsed += Size;
return (uint8_t *)FinalAddress;
}
void V2::FreeHeap(Xuint8_t *At, Xsize_t Size)
{
Xsize_t Pages = XStoP(Size);
if (Xalloc_MapPages)
{
for (Xsize_t i = 0; i < Pages; i++)
{
Xuintptr_t Page = i * Xalloc_PAGE_SIZE;
void *VirtualAddress = (void *)((Xuintptr_t)At + Page);
Xalloc_UNMAP_MEMORY(VirtualAddress);
}
}
Xalloc_FREE_PAGES(At, Pages);
this->HeapUsed -= Size;
}
Xsize_t V2::Align(Xsize_t Size)
{
return (Size + 0xF) & ~0xF;
}
void *V2::FindFreeBlock(Xsize_t Size, Block *&CurrentBlock)
{
if (this->FirstBlock == nullptr)
{
this->FirstBlock = new Block(Size, this);
this->FirstBlock->IsFree = false;
return this->FirstBlock->Data;
}
while (true)
{
CurrentBlock->Check();
/* FIXME: This will waste a lot of space
need better algorithm */
if (CurrentBlock->IsFree &&
CurrentBlock->DataSize >= Size)
{
CurrentBlock->IsFree = false;
return CurrentBlock->Data;
}
if (CurrentBlock->Next == nullptr)
break;
CurrentBlock = CurrentBlock->Next;
}
return nullptr;
}
void V2::Arrange()
{
Xalloc_err("Arrange() is not implemented yet!");
}
void *V2::malloc(Xsize_t Size)
{
if (Size == 0)
{
Xalloc_warn("Attempted to allocate 0 bytes!");
return nullptr;
}
XallocV2_lock;
Block *CurrentBlock = this->FirstBlock;
void *ret = this->FindFreeBlock(Size, CurrentBlock);
if (ret)
{
XallocV2_unlock;
return ret;
}
CurrentBlock->Next = new Block(Size, this);
CurrentBlock->Next->IsFree = false;
XallocV2_unlock;
return CurrentBlock->Next->Data;
}
void V2::free(void *Address)
{
if (Address == nullptr)
{
Xalloc_warn("Attempted to free a null pointer!");
return;
}
XallocV2_lock;
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
CurrentBlock->Check();
if (CurrentBlock->Data == Address)
{
if (CurrentBlock->IsFree)
Xalloc_warn("Attempted to free an already freed block! %#lx", Address);
CurrentBlock->IsFree = true;
XallocV2_unlock;
return;
}
CurrentBlock = CurrentBlock->Next;
}
Xalloc_err("Invalid address %#lx.", Address);
XallocV2_unlock;
}
void *V2::calloc(Xsize_t NumberOfBlocks, Xsize_t Size)
{
if (NumberOfBlocks == 0 || Size == 0)
{
Xalloc_warn("The %s%s%s is 0!",
NumberOfBlocks == 0 ? "NumberOfBlocks" : "",
NumberOfBlocks == 0 && Size == 0 ? " and " : "",
Size == 0 ? "Size" : "");
return nullptr;
}
return this->malloc(NumberOfBlocks * Size);
}
void *V2::realloc(void *Address, Xsize_t Size)
{
if (Address == nullptr && Size != 0)
return this->malloc(Size);
if (Size == 0)
{
this->free(Address);
return nullptr;
}
// XallocV2_lock;
// ...
// XallocV2_unlock;
// TODO: Implement realloc
static int once = 0;
if (!once++)
Xalloc_trace("realloc is stub!");
this->free(Address);
return this->malloc(Size);
}
V2::V2(void *VirtualBase)
{
if (VirtualBase == 0x0 && Xalloc_MapPages)
{
Xalloc_err("VirtualBase is 0x0 and Xalloc_MapPages is true!");
while (true)
;
}
XallocV2_lock;
this->BaseVirtualAddress = Xuintptr_t(VirtualBase);
XallocV2_unlock;
}
V2::~V2()
{
XallocV2_lock;
Xalloc_trace("Destructor not implemented yet.");
XallocV2_unlock;
}
}

View File

@ -0,0 +1,793 @@
#include "liballoc_1_1.h"
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wsign-conversion"
/** Durand's Amazing Super Duper Memory functions. */
#define VERSION "1.1"
#define ALIGNMENT 16ul // 4ul ///< This is the byte alignment that memory must be allocated on. IMPORTANT for GTK and other stuff.
#define ALIGN_TYPE char /// unsigned char[16] /// unsigned short
#define ALIGN_INFO sizeof(ALIGN_TYPE) * 16 ///< Alignment information is stored right before the pointer. This is the number of bytes of information stored there.
#define USE_CASE1
#define USE_CASE2
#define USE_CASE3
#define USE_CASE4
#define USE_CASE5
/** This macro will conveniently align our pointer upwards */
#define ALIGN(ptr) \
if (ALIGNMENT > 1) \
{ \
uintptr_t diff; \
ptr = (void *)((uintptr_t)ptr + ALIGN_INFO); \
diff = (uintptr_t)ptr & (ALIGNMENT - 1); \
if (diff != 0) \
{ \
diff = ALIGNMENT - diff; \
ptr = (void *)((uintptr_t)ptr + diff); \
} \
*((ALIGN_TYPE *)((uintptr_t)ptr - ALIGN_INFO)) = \
diff + ALIGN_INFO; \
}
#define UNALIGN(ptr) \
if (ALIGNMENT > 1) \
{ \
uintptr_t diff = *((ALIGN_TYPE *)((uintptr_t)ptr - ALIGN_INFO)); \
if (diff < (ALIGNMENT + ALIGN_INFO)) \
{ \
ptr = (void *)((uintptr_t)ptr - diff); \
} \
}
#define LIBALLOC_MAGIC 0xc001c0de
#define LIBALLOC_DEAD 0xdeaddead
// #define LIBALLOCDEBUG 1
#define LIBALLOCINFO 1
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
// #include <stdio.h>
// #include <stdlib.h>
#include <debug.h>
// #define FLUSH() fflush(stdout)
#define FLUSH()
#define atexit(x)
#define printf(m, ...) trace(m, ##__VA_ARGS__)
#endif
/** A structure found at the top of all system allocated
* memory blocks. It details the usage of the memory block.
*/
struct liballoc_major
{
struct liballoc_major *prev; ///< Linked list information.
struct liballoc_major *next; ///< Linked list information.
unsigned int pages; ///< The number of pages in the block.
unsigned int size; ///< The number of pages in the block.
unsigned int usage; ///< The number of bytes used in the block.
struct liballoc_minor *first; ///< A pointer to the first allocated memory in the block.
};
/** This is a structure found at the beginning of all
* sections in a major block which were allocated by a
* malloc, calloc, realloc call.
*/
struct liballoc_minor
{
struct liballoc_minor *prev; ///< Linked list information.
struct liballoc_minor *next; ///< Linked list information.
struct liballoc_major *block; ///< The owning block. A pointer to the major structure.
unsigned int magic; ///< A magic number to idenfity correctness.
unsigned int size; ///< The size of the memory allocated. Could be 1 byte or more.
unsigned int req_size; ///< The size of memory requested.
};
static struct liballoc_major *l_memRoot = NULL; ///< The root memory block acquired from the system.
static struct liballoc_major *l_bestBet = NULL; ///< The major with the most free memory.
static unsigned int l_pageSize = 4096; ///< The size of an individual page. Set up in liballoc_init.
static unsigned int l_pageCount = 16; ///< The number of pages to request per chunk. Set up in liballoc_init.
static unsigned long long l_allocated = 0; ///< Running total of allocated memory.
static unsigned long long l_inuse = 0; ///< Running total of used memory.
static long long l_warningCount = 0; ///< Number of warnings encountered
static long long l_errorCount = 0; ///< Number of actual errors
static long long l_possibleOverruns = 0; ///< Number of possible overruns
// *********** HELPER FUNCTIONS *******************************
__no_sanitize("undefined") static void *liballoc_memset(void *s, int c, size_t n)
{
unsigned int i;
for (i = 0; i < n; i++)
((char *)s)[i] = c;
return s;
}
__no_sanitize("undefined") static void *liballoc_memcpy(void *s1, const void *s2, size_t n)
{
char *cdest;
char *csrc;
unsigned int *ldest = (unsigned int *)s1;
unsigned int *lsrc = (unsigned int *)s2;
while (n >= sizeof(unsigned int))
{
*ldest++ = *lsrc++;
n -= sizeof(unsigned int);
}
cdest = (char *)ldest;
csrc = (char *)lsrc;
while (n > 0)
{
*cdest++ = *csrc++;
n -= 1;
}
return s1;
}
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
__no_sanitize("undefined") static void liballoc_dump()
{
#ifdef LIBALLOCDEBUG
struct liballoc_major *maj = l_memRoot;
struct liballoc_minor *min = NULL;
#endif
printf("liballoc: ------ Memory data ---------------\n");
printf("liballoc: System memory allocated: %i bytes\n", l_allocated);
printf("liballoc: Memory in used (malloc'ed): %i bytes\n", l_inuse);
printf("liballoc: Warning count: %i\n", l_warningCount);
printf("liballoc: Error count: %i\n", l_errorCount);
printf("liballoc: Possible overruns: %i\n", l_possibleOverruns);
#ifdef LIBALLOCDEBUG
while (maj != NULL)
{
printf("liballoc: %lx: total = %i, used = %i\n",
maj,
maj->size,
maj->usage);
min = maj->first;
while (min != NULL)
{
printf("liballoc: %lx: %i bytes\n",
min,
min->size);
min = min->next;
}
maj = maj->next;
}
#endif
FLUSH();
}
#endif
// ***************************************************************
__no_sanitize("undefined") static struct liballoc_major *allocate_new_page(unsigned int size)
{
unsigned int st;
struct liballoc_major *maj;
// This is how much space is required.
st = size + sizeof(struct liballoc_major);
st += sizeof(struct liballoc_minor);
// Perfect amount of space?
if ((st % l_pageSize) == 0)
st = st / (l_pageSize);
else
st = st / (l_pageSize) + 1;
// No, add the buffer.
// Make sure it's >= the minimum size.
if (st < l_pageCount)
st = l_pageCount;
maj = (struct liballoc_major *)liballoc_alloc(st);
if (maj == NULL)
{
l_warningCount += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: liballoc_alloc( %i ) return NULL\n", st);
FLUSH();
#endif
return NULL; // uh oh, we ran out of memory.
}
maj->prev = NULL;
maj->next = NULL;
maj->pages = st;
maj->size = st * l_pageSize;
maj->usage = sizeof(struct liballoc_major);
maj->first = NULL;
l_allocated += maj->size;
#ifdef LIBALLOCDEBUG
printf("liballoc: Resource allocated %lx of %i pages (%i bytes) for %i size.\n", maj, st, maj->size, size);
printf("liballoc: Total memory usage = %i KB\n", (int)((l_allocated / (1024))));
FLUSH();
#endif
return maj;
}
__no_sanitize("undefined") void *PREFIX(malloc)(size_t req_size)
{
int startedBet = 0;
unsigned long long bestSize = 0;
void *p = NULL;
uintptr_t diff;
struct liballoc_major *maj;
struct liballoc_minor *min;
struct liballoc_minor *new_min;
unsigned long size = req_size;
// For alignment, we adjust size so there's enough space to align.
if (ALIGNMENT > 1)
{
size += ALIGNMENT + ALIGN_INFO;
}
// So, ideally, we really want an alignment of 0 or 1 in order
// to save space.
liballoc_lock();
if (size == 0)
{
l_warningCount += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: alloc( 0 ) called from %lx\n",
__builtin_return_address(0));
FLUSH();
#endif
liballoc_unlock();
return PREFIX(malloc)(1);
}
if (l_memRoot == NULL)
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
#ifdef LIBALLOCDEBUG
printf("liballoc: initialization of liballoc " VERSION "\n");
#endif
atexit(liballoc_dump);
FLUSH();
#endif
// This is the first time we are being used.
l_memRoot = allocate_new_page(size);
if (l_memRoot == NULL)
{
liballoc_unlock();
#ifdef LIBALLOCDEBUG
printf("liballoc: initial l_memRoot initialization failed\n", p);
FLUSH();
#endif
return NULL;
}
#ifdef LIBALLOCDEBUG
printf("liballoc: set up first memory major %lx\n", l_memRoot);
FLUSH();
#endif
}
#ifdef LIBALLOCDEBUG
printf("liballoc: %lx PREFIX(malloc)( %i ): ",
__builtin_return_address(0),
size);
FLUSH();
#endif
// Now we need to bounce through every major and find enough space....
maj = l_memRoot;
startedBet = 0;
// Start at the best bet....
if (l_bestBet != NULL)
{
bestSize = l_bestBet->size - l_bestBet->usage;
if (bestSize > (size + sizeof(struct liballoc_minor)))
{
maj = l_bestBet;
startedBet = 1;
}
}
while (maj != NULL)
{
diff = maj->size - maj->usage;
// free memory in the block
if (bestSize < diff)
{
// Hmm.. this one has more memory then our bestBet. Remember!
l_bestBet = maj;
bestSize = diff;
}
#ifdef USE_CASE1
// CASE 1: There is not enough space in this major block.
if (diff < (size + sizeof(struct liballoc_minor)))
{
#ifdef LIBALLOCDEBUG
printf("CASE 1: Insufficient space in block %lx\n", maj);
FLUSH();
#endif
// Another major block next to this one?
if (maj->next != NULL)
{
maj = maj->next; // Hop to that one.
continue;
}
if (startedBet == 1) // If we started at the best bet,
{ // let's start all over again.
maj = l_memRoot;
startedBet = 0;
continue;
}
// Create a new major block next to this one and...
maj->next = allocate_new_page(size); // next one will be okay.
if (maj->next == NULL)
break; // no more memory.
maj->next->prev = maj;
maj = maj->next;
// .. fall through to CASE 2 ..
}
#endif
#ifdef USE_CASE2
// CASE 2: It's a brand new block.
if (maj->first == NULL)
{
maj->first = (struct liballoc_minor *)((uintptr_t)maj + sizeof(struct liballoc_major));
maj->first->magic = LIBALLOC_MAGIC;
maj->first->prev = NULL;
maj->first->next = NULL;
maj->first->block = maj;
maj->first->size = size;
maj->first->req_size = req_size;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)(maj->first) + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 2: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
#endif
#ifdef USE_CASE3
// CASE 3: Block in use and enough space at the start of the block.
diff = (uintptr_t)(maj->first);
diff -= (uintptr_t)maj;
diff -= sizeof(struct liballoc_major);
if (diff >= (size + sizeof(struct liballoc_minor)))
{
// Yes, space in front. Squeeze in.
maj->first->prev = (struct liballoc_minor *)((uintptr_t)maj + sizeof(struct liballoc_major));
maj->first->prev->next = maj->first;
maj->first = maj->first->prev;
maj->first->magic = LIBALLOC_MAGIC;
maj->first->prev = NULL;
maj->first->block = maj;
maj->first->size = size;
maj->first->req_size = req_size;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)(maj->first) + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 3: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
#endif
#ifdef USE_CASE4
// CASE 4: There is enough space in this block. But is it contiguous?
min = maj->first;
// Looping within the block now...
while (min != NULL)
{
// CASE 4.1: End of minors in a block. Space from last and end?
if (min->next == NULL)
{
// the rest of this block is free... is it big enough?
diff = (uintptr_t)(maj) + maj->size;
diff -= (uintptr_t)min;
diff -= sizeof(struct liballoc_minor);
diff -= min->size;
// minus already existing usage..
if (diff >= (size + sizeof(struct liballoc_minor)))
{
// yay....
min->next = (struct liballoc_minor *)((uintptr_t)min + sizeof(struct liballoc_minor) + min->size);
min->next->prev = min;
min = min->next;
min->next = NULL;
min->magic = LIBALLOC_MAGIC;
min->block = maj;
min->size = size;
min->req_size = req_size;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)min + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 4.1: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
}
// CASE 4.2: Is there space between two minors?
if (min->next != NULL)
{
// is the difference between here and next big enough?
diff = (uintptr_t)(min->next);
diff -= (uintptr_t)min;
diff -= sizeof(struct liballoc_minor);
diff -= min->size;
// minus our existing usage.
if (diff >= (size + sizeof(struct liballoc_minor)))
{
// yay......
new_min = (struct liballoc_minor *)((uintptr_t)min + sizeof(struct liballoc_minor) + min->size);
new_min->magic = LIBALLOC_MAGIC;
new_min->next = min->next;
new_min->prev = min;
new_min->size = size;
new_min->req_size = req_size;
new_min->block = maj;
min->next->prev = new_min;
min->next = new_min;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)new_min + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 4.2: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
} // min->next != NULL
min = min->next;
} // while min != NULL ...
#endif
#ifdef USE_CASE5
// CASE 5: Block full! Ensure next block and loop.
if (maj->next == NULL)
{
#ifdef LIBALLOCDEBUG
printf("CASE 5: block full\n");
FLUSH();
#endif
if (startedBet == 1)
{
maj = l_memRoot;
startedBet = 0;
continue;
}
// we've run out. we need more...
maj->next = allocate_new_page(size); // next one guaranteed to be okay
if (maj->next == NULL)
break; // uh oh, no more memory.....
maj->next->prev = maj;
}
#endif
maj = maj->next;
} // while (maj != NULL)
liballoc_unlock(); // release the lock
#ifdef LIBALLOCDEBUG
printf("All cases exhausted. No memory available.\n");
FLUSH();
#endif
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: PREFIX(malloc)( %i ) returning NULL.\n", size);
liballoc_dump();
FLUSH();
#endif
return NULL;
}
__no_sanitize("undefined") void PREFIX(free)(void *ptr)
{
struct liballoc_minor *min;
struct liballoc_major *maj;
if (ptr == NULL)
{
l_warningCount += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: PREFIX(free)( NULL ) called from %lx\n",
__builtin_return_address(0));
FLUSH();
#endif
return;
}
UNALIGN(ptr);
liballoc_lock(); // lockit
min = (struct liballoc_minor *)((uintptr_t)ptr - sizeof(struct liballoc_minor));
if (min->magic != LIBALLOC_MAGIC)
{
l_errorCount += 1;
// Check for overrun errors. For all bytes of LIBALLOC_MAGIC
if (
((min->magic & 0xFFFFFF) == (LIBALLOC_MAGIC & 0xFFFFFF)) ||
((min->magic & 0xFFFF) == (LIBALLOC_MAGIC & 0xFFFF)) ||
((min->magic & 0xFF) == (LIBALLOC_MAGIC & 0xFF)))
{
l_possibleOverruns += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Possible 1-3 byte overrun for magic %lx != %lx\n",
min->magic,
LIBALLOC_MAGIC);
FLUSH();
#endif
}
if (min->magic == LIBALLOC_DEAD)
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: multiple PREFIX(free)() attempt on %lx from %lx.\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
else
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Bad PREFIX(free)( %lx ) called from %lx\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
// being lied to...
liballoc_unlock(); // release the lock
return;
}
#ifdef LIBALLOCDEBUG
printf("liballoc: %lx PREFIX(free)( %lx ): ",
__builtin_return_address(0),
ptr);
FLUSH();
#endif
maj = min->block;
l_inuse -= min->size;
maj->usage -= (min->size + sizeof(struct liballoc_minor));
min->magic = LIBALLOC_DEAD; // No mojo.
if (min->next != NULL)
min->next->prev = min->prev;
if (min->prev != NULL)
min->prev->next = min->next;
if (min->prev == NULL)
maj->first = min->next;
// Might empty the block. This was the first
// minor.
// We need to clean up after the majors now....
if (maj->first == NULL) // Block completely unused.
{
if (l_memRoot == maj)
l_memRoot = maj->next;
if (l_bestBet == maj)
l_bestBet = NULL;
if (maj->prev != NULL)
maj->prev->next = maj->next;
if (maj->next != NULL)
maj->next->prev = maj->prev;
l_allocated -= maj->size;
liballoc_free(maj, maj->pages);
}
else
{
if (l_bestBet != NULL)
{
int bestSize = l_bestBet->size - l_bestBet->usage;
int majSize = maj->size - maj->usage;
if (majSize > bestSize)
l_bestBet = maj;
}
}
#ifdef LIBALLOCDEBUG
printf("OK\n");
FLUSH();
#endif
liballoc_unlock(); // release the lock
}
__no_sanitize("undefined") void *PREFIX(calloc)(size_t nobj, size_t size)
{
int real_size;
void *p;
real_size = nobj * size;
p = PREFIX(malloc)(real_size);
liballoc_memset(p, 0, real_size);
return p;
}
__no_sanitize("undefined") void *PREFIX(realloc)(void *p, size_t size)
{
void *ptr;
struct liballoc_minor *min;
unsigned int real_size;
// Honour the case of size == 0 => free old and return NULL
if (size == 0)
{
PREFIX(free)
(p);
return NULL;
}
// In the case of a NULL pointer, return a simple malloc.
if (p == NULL)
return PREFIX(malloc)(size);
// Unalign the pointer if required.
ptr = p;
UNALIGN(ptr);
liballoc_lock(); // lockit
min = (struct liballoc_minor *)((uintptr_t)ptr - sizeof(struct liballoc_minor));
// Ensure it is a valid structure.
if (min->magic != LIBALLOC_MAGIC)
{
l_errorCount += 1;
// Check for overrun errors. For all bytes of LIBALLOC_MAGIC
if (
((min->magic & 0xFFFFFF) == (LIBALLOC_MAGIC & 0xFFFFFF)) ||
((min->magic & 0xFFFF) == (LIBALLOC_MAGIC & 0xFFFF)) ||
((min->magic & 0xFF) == (LIBALLOC_MAGIC & 0xFF)))
{
l_possibleOverruns += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Possible 1-3 byte overrun for magic %lx != %lx\n",
min->magic,
LIBALLOC_MAGIC);
FLUSH();
#endif
}
if (min->magic == LIBALLOC_DEAD)
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: multiple PREFIX(free)() attempt on %lx from %lx.\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
else
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Bad PREFIX(free)( %lx ) called from %lx\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
// being lied to...
liballoc_unlock(); // release the lock
return NULL;
}
// Definitely a memory block.
real_size = min->req_size;
if (real_size >= size)
{
min->req_size = size;
liballoc_unlock();
return p;
}
liballoc_unlock();
// If we got here then we're reallocating to a block bigger than us.
ptr = PREFIX(malloc)(size); // We need to allocate new memory
liballoc_memcpy(ptr, p, real_size);
PREFIX(free)
(p);
return ptr;
}

View File

@ -0,0 +1,74 @@
#ifndef _LIBALLOC_H
#define _LIBALLOC_H
#include <types.h>
/** \defgroup ALLOCHOOKS liballoc hooks
*
* These are the OS specific functions which need to
* be implemented on any platform that the library
* is expected to work on.
*/
/** @{ */
// If we are told to not define our own size_t, then we skip the define.
// #define _HAVE_UINTPTR_T
// typedef unsigned long uintptr_t;
// This lets you prefix malloc and friends
#define PREFIX(func) kliballoc_##func
#ifdef __cplusplus
extern "C"
{
#endif
/** This function is supposed to lock the memory data structures. It
* could be as simple as disabling interrupts or acquiring a spinlock.
* It's up to you to decide.
*
* \return 0 if the lock was acquired successfully. Anything else is
* failure.
*/
extern int liballoc_lock();
/** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it
* had acquiried a spinlock, it releases the spinlock. etc.
*
* \return 0 if the lock was successfully released.
*/
extern int liballoc_unlock();
/** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages
* required. The page size was set up in the liballoc_init function.
*
* \return NULL if the pages were not allocated.
* \return A pointer to the allocated memory.
*/
extern void *liballoc_alloc(size_t);
/** This frees previously allocated memory. The void* parameter passed
* to the function is the exact same value returned from a previous
* liballoc_alloc call.
*
* The integer value is the number of pages to free.
*
* \return 0 if the memory was successfully freed.
*/
extern int liballoc_free(void *, size_t);
extern void *PREFIX(malloc)(size_t); ///< The standard function.
extern void *PREFIX(realloc)(void *, size_t); ///< The standard function.
extern void *PREFIX(calloc)(size_t, size_t); ///< The standard function.
extern void PREFIX(free)(void *); ///< The standard function.
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@ -0,0 +1,26 @@
#include <types.h>
#include <lock.hpp>
#include <memory.hpp>
NewLock(liballocLock);
EXTERNC int liballoc_lock()
{
return liballocLock.Lock(__FUNCTION__);
}
EXTERNC int liballoc_unlock()
{
return liballocLock.Unlock();
}
EXTERNC void *liballoc_alloc(size_t Pages)
{
return KernelAllocator.RequestPages(Pages);
}
EXTERNC int liballoc_free(void *Address, size_t Pages)
{
KernelAllocator.FreePages(Address, Pages);
return 0;
}

619
core/memory/memory.cpp Normal file
View File

@ -0,0 +1,619 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <lock.hpp>
#include <debug.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "heap_allocators/Xalloc/Xalloc.hpp"
#include "heap_allocators/liballoc_1_1/liballoc_1_1.h"
#include "../../kernel.h"
// #define DEBUG_ALLOCATIONS 1
#ifdef DEBUG_ALLOCATIONS
#define memdbg(m, ...) \
debug(m, ##__VA_ARGS__); \
__sync
#else
#define memdbg(m, ...)
#endif
using namespace Memory;
Physical KernelAllocator;
PageTable *KernelPageTable = nullptr;
bool Page1GBSupport = false;
bool PSESupport = false;
MemoryAllocatorType AllocatorType = MemoryAllocatorType::Pages;
Xalloc::V1 *XallocV1Allocator = nullptr;
Xalloc::V2 *XallocV2Allocator = nullptr;
#ifdef DEBUG
NIF void tracepagetable(PageTable *pt)
{
for (int i = 0; i < 512; i++)
{
#if defined(a64)
if (pt->Entries[i].Present)
debug("Entry %03d: %x %x %x %x %x %x %x %p-%#llx", i,
pt->Entries[i].Present, pt->Entries[i].ReadWrite,
pt->Entries[i].UserSupervisor, pt->Entries[i].WriteThrough,
pt->Entries[i].CacheDisable, pt->Entries[i].Accessed,
pt->Entries[i].ExecuteDisable, pt->Entries[i].Address << 12,
pt->Entries[i]);
#elif defined(a32)
#elif defined(aa64)
#endif
}
}
#endif
NIF void MapFromZero(PageTable *PT)
{
debug("Mapping from 0x0 to %#llx", bInfo.Memory.Size);
Virtual va = Virtual(PT);
size_t MemSize = bInfo.Memory.Size;
if (Page1GBSupport && PSESupport)
{
/* Map the first 100MB of memory as 4KB pages */
// uintptr_t Physical4KBSectionStart = 0x10000000;
// va.Map((void *)0,
// (void *)0,
// Physical4KBSectionStart,
// PTFlag::RW);
// va.Map((void *)Physical4KBSectionStart,
// (void *)Physical4KBSectionStart,
// MemSize - Physical4KBSectionStart,
// PTFlag::RW,
// Virtual::MapType::OneGiB);
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
}
else
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
va.Unmap((void *)0);
}
NIF void MapFramebuffer(PageTable *PT, bool PSE, bool OneGB)
{
debug("Mapping Framebuffer");
Virtual va = Virtual(PT);
int itrfb = 0;
while (1)
{
if (!bInfo.Framebuffer[itrfb].BaseAddress)
break;
size_t fbSize = bInfo.Framebuffer[itrfb].Pitch * bInfo.Framebuffer[itrfb].Height;
if (PSE && OneGB)
{
va.OptimizedMap(bInfo.Framebuffer[itrfb].BaseAddress,
bInfo.Framebuffer[itrfb].BaseAddress,
fbSize, PTFlag::RW | PTFlag::US | PTFlag::G);
}
else
{
va.Map(bInfo.Framebuffer[itrfb].BaseAddress,
bInfo.Framebuffer[itrfb].BaseAddress,
fbSize, PTFlag::RW | PTFlag::US | PTFlag::G);
}
itrfb++;
}
}
NIF void MapKernel(PageTable *PT)
{
debug("Mapping Kernel");
/* RWX */
uintptr_t BootstrapStart = (uintptr_t)&_bootstrap_start;
uintptr_t BootstrapEnd = (uintptr_t)&_bootstrap_end;
/* RX */
uintptr_t KernelTextStart = (uintptr_t)&_kernel_text_start;
uintptr_t KernelTextEnd = (uintptr_t)&_kernel_text_end;
/* RW */
uintptr_t KernelDataStart = (uintptr_t)&_kernel_data_start;
uintptr_t KernelDataEnd = (uintptr_t)&_kernel_data_end;
/* R */
uintptr_t KernelRoDataStart = (uintptr_t)&_kernel_rodata_start;
uintptr_t KernelRoDataEnd = (uintptr_t)&_kernel_rodata_end;
/* RW */
uintptr_t KernelBssStart = (uintptr_t)&_kernel_bss_start;
uintptr_t KernelBssEnd = (uintptr_t)&_kernel_bss_end;
#ifdef DEBUG
uintptr_t KernelStart = (uintptr_t)&_kernel_start;
uintptr_t KernelEnd = (uintptr_t)&_kernel_end;
#endif
uintptr_t KernelFileStart = (uintptr_t)bInfo.Kernel.FileBase;
uintptr_t KernelFileEnd = KernelFileStart + bInfo.Kernel.Size;
debug("Bootstrap: %#lx-%#lx", BootstrapStart, BootstrapEnd);
debug("Kernel text: %#lx-%#lx", KernelTextStart, KernelTextEnd);
debug("Kernel data: %#lx-%#lx", KernelDataStart, KernelDataEnd);
debug("Kernel rodata: %#lx-%#lx", KernelRoDataStart, KernelRoDataEnd);
debug("Kernel bss: %#lx-%#lx", KernelBssStart, KernelBssEnd);
debug("Kernel: %#lx-%#lx", KernelStart, KernelEnd);
debug("Kernel file: %#lx-%#lx", KernelFileStart, KernelFileEnd);
debug("File size: %ld KiB", TO_KiB(bInfo.Kernel.Size));
debug(".bootstrap size: %ld KiB", TO_KiB(BootstrapEnd - BootstrapStart));
debug(".text size: %ld KiB", TO_KiB(KernelTextEnd - KernelTextStart));
debug(".data size: %ld KiB", TO_KiB(KernelDataEnd - KernelDataStart));
debug(".rodata size: %ld KiB", TO_KiB(KernelRoDataEnd - KernelRoDataStart));
debug(".bss size: %ld KiB", TO_KiB(KernelBssEnd - KernelBssStart));
uintptr_t BaseKernelMapAddress = (uintptr_t)bInfo.Kernel.PhysicalBase;
debug("Base kernel map address: %#lx", BaseKernelMapAddress);
uintptr_t k;
Virtual va = Virtual(PT);
/* Bootstrap section */
if (BaseKernelMapAddress == BootstrapStart)
{
for (k = BootstrapStart; k < BootstrapEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
}
else
{
trace("Ignoring bootstrap section.");
/* Bootstrap section must be mapped at 0x100000. */
}
/* Text section */
for (k = KernelTextStart; k < KernelTextEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Data section */
for (k = KernelDataStart; k < KernelDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Read only data section */
for (k = KernelRoDataStart; k < KernelRoDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Block starting symbol section */
for (k = KernelBssStart; k < KernelBssEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
debug("Base kernel map address: %#lx", BaseKernelMapAddress);
/* Kernel file */
if (KernelFileStart != 0)
{
for (k = KernelFileStart; k < KernelFileEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)k, PTFlag::G);
KernelAllocator.ReservePage((void *)k);
}
}
}
NIF void InitializeMemoryManagement()
{
#ifdef DEBUG
#ifndef a32
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
uintptr_t Base = r_cst(uintptr_t, bInfo.Memory.Entry[i].BaseAddress);
size_t Length = bInfo.Memory.Entry[i].Length;
uintptr_t End = Base + Length;
const char *Type = "Unknown";
switch (bInfo.Memory.Entry[i].Type)
{
case likely(Usable):
Type = "Usable";
break;
case Reserved:
Type = "Reserved";
break;
case ACPIReclaimable:
Type = "ACPI Reclaimable";
break;
case ACPINVS:
Type = "ACPI NVS";
break;
case BadMemory:
Type = "Bad Memory";
break;
case BootloaderReclaimable:
Type = "Bootloader Reclaimable";
break;
case KernelAndModules:
Type = "Kernel and Modules";
break;
case Framebuffer:
Type = "Framebuffer";
break;
default:
break;
}
debug("%ld: %p-%p %s",
i,
Base,
End,
Type);
}
#endif // a32
#endif // DEBUG
trace("Initializing Physical Memory Manager");
// KernelAllocator = Physical(); <- Already called in the constructor
KernelAllocator.Init();
debug("Memory Info:\n\n%lld MiB / %lld MiB (%lld MiB reserved)\n",
TO_MiB(KernelAllocator.GetUsedMemory()),
TO_MiB(KernelAllocator.GetTotalMemory()),
TO_MiB(KernelAllocator.GetReservedMemory()));
/* -- Debugging --
size_t bmap_size = KernelAllocator.GetPageBitmap().Size;
for (size_t i = 0; i < bmap_size; i++)
{
bool idx = KernelAllocator.GetPageBitmap().Get(i);
if (idx == true)
debug("Page %04d: %#lx", i, i * PAGE_SIZE);
}
inf_loop debug("Alloc.: %#lx", KernelAllocator.RequestPage());
*/
trace("Initializing Virtual Memory Manager");
KernelPageTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE + 1));
memset(KernelPageTable, 0, PAGE_SIZE);
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
{
CPU::x86::AMD::CPUID0x80000001 cpuid;
cpuid.Get();
PSESupport = cpuid.EDX.PSE;
Page1GBSupport = cpuid.EDX.Page1GB;
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x00000001 cpuid;
cpuid.Get();
PSESupport = cpuid.EDX.PSE;
}
if (PSESupport)
{
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
CPU::x64::writecr4(cr4);
#elif defined(a32)
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
cr4.PSE = 1;
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
trace("PSE Support Enabled");
}
#ifdef DEBUG
if (Page1GBSupport)
debug("1GB Page Support Enabled");
#endif
MapFromZero(KernelPageTable);
MapFramebuffer(KernelPageTable, PSESupport, Page1GBSupport);
MapKernel(KernelPageTable);
trace("Applying new page table from address %#lx", KernelPageTable);
#ifdef DEBUG
tracepagetable(KernelPageTable);
#endif
CPU::PageTable(KernelPageTable);
debug("Page table updated.");
XallocV1Allocator = new Xalloc::V1((void *)KERNEL_HEAP_BASE, false, false);
XallocV2Allocator = new Xalloc::V2((void *)KERNEL_HEAP_BASE);
trace("XallocV1 Allocator initialized at %#lx", XallocV1Allocator);
trace("XallocV2 Allocator initialized at %#lx", XallocV2Allocator);
/* FIXME: Read kernel config */
AllocatorType = MemoryAllocatorType::liballoc11;
}
void *malloc(size_t Size)
{
assert(Size > 0);
memdbg("malloc(%d)->[%s]", Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1));
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->malloc(Size);
break;
}
case MemoryAllocatorType::XallocV2:
{
ret = XallocV2Allocator->malloc(Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
ret = PREFIX(malloc)(Size);
break;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
memset(ret, 0, Size);
return ret;
}
void *calloc(size_t n, size_t Size)
{
assert(Size > 0);
memdbg("calloc(%d, %d)->[%s]", n, Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(n * Size + 1));
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->calloc(n, Size);
break;
}
case MemoryAllocatorType::XallocV2:
{
ret = XallocV2Allocator->calloc(n, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(calloc)(n, Size);
return ret;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
memset(ret, 0, n * Size);
return ret;
}
void *realloc(void *Address, size_t Size)
{
assert(Size > 0);
memdbg("realloc(%#lx, %d)->[%s]", Address, Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1)); // WARNING: Potential memory leak
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->realloc(Address, Size);
break;
}
case MemoryAllocatorType::XallocV2:
{
ret = XallocV2Allocator->realloc(Address, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(realloc)(Address, Size);
return ret;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
memset(ret, 0, Size);
return ret;
}
void free(void *Address)
{
assert(Address != nullptr);
memdbg("free(%#lx)->[%s]", Address,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
KernelAllocator.FreePage(Address); // WARNING: Potential memory leak
break;
}
case MemoryAllocatorType::XallocV1:
{
XallocV1Allocator->free(Address);
break;
}
case MemoryAllocatorType::XallocV2:
{
XallocV2Allocator->free(Address);
break;
}
case MemoryAllocatorType::liballoc11:
{
PREFIX(free)
(Address);
break;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
}
void *operator new(std::size_t Size)
{
assert(Size > 0);
memdbg("new(%d)->[%s]", Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = malloc(Size);
return ret;
}
void *operator new[](std::size_t Size)
{
assert(Size > 0);
memdbg("new[](%d)->[%s]", Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = malloc(Size);
return ret;
}
void *operator new(std::size_t Size, std::align_val_t Alignment)
{
assert(Size > 0);
memdbg("new(%d, %d)->[%s]", Size, Alignment,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
fixme("operator new with alignment(%#lx) is not implemented",
Alignment);
void *ret = malloc(Size);
return ret;
}
void operator delete(void *Pointer)
{
assert(Pointer != nullptr);
memdbg("delete(%#lx)->[%s]", Pointer,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
free(Pointer);
}
void operator delete[](void *Pointer)
{
assert(Pointer != nullptr);
memdbg("delete[](%#lx)->[%s]", Pointer,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
free(Pointer);
}
void operator delete(void *Pointer, long unsigned int Size)
{
assert(Pointer != nullptr);
assert(Size > 0);
memdbg("delete(%#lx, %d)->[%s]",
Pointer, Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
free(Pointer);
}
void operator delete[](void *Pointer, long unsigned int Size)
{
assert(Pointer != nullptr);
assert(Size > 0);
memdbg("delete[](%#lx, %d)->[%s]",
Pointer, Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
: "Unknown");
free(Pointer);
}

View File

@ -0,0 +1,41 @@
#include <memory.hpp>
#include <filesystem.hpp>
namespace Memory
{
void PageTable::Update()
{
#if defined(a86)
asmv("mov %0, %%cr3" ::"r"(this));
#elif defined(aa64)
asmv("msr ttbr0_el1, %0" ::"r"(this));
#endif
}
PageTable PageTable::Fork()
{
PageTable NewTable;
memcpy(&NewTable, this, sizeof(PageTable));
return NewTable;
}
template <typename T>
T PageTable::Get(T Address)
{
Virtual vmm = Virtual(this);
void *PhysAddr = vmm.GetPhysical((void *)Address);
uintptr_t Diff = uintptr_t(Address);
Diff &= 0xFFF;
Diff = uintptr_t(PhysAddr) + Diff;
return (T)Diff;
}
/* Templates */
template struct stat *PageTable::Get<struct stat *>(struct stat *);
template const char *PageTable::Get<const char *>(const char *);
template const void *PageTable::Get<const void *>(const void *);
template uintptr_t PageTable::Get<uintptr_t>(uintptr_t);
template void *PageTable::Get<void *>(void *);
/* ... */
}

54
core/memory/pmi.cpp Normal file
View File

@ -0,0 +1,54 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
namespace Memory
{
Virtual::PageMapIndexer::PageMapIndexer(uintptr_t VirtualAddress)
{
uintptr_t Address = VirtualAddress;
#if defined(a64)
Address >>= 12;
this->PTEIndex = Address & 0x1FF;
Address >>= 9;
this->PDEIndex = Address & 0x1FF;
Address >>= 9;
this->PDPTEIndex = Address & 0x1FF;
Address >>= 9;
this->PMLIndex = Address & 0x1FF;
#elif defined(a32)
Address >>= 12;
this->PTEIndex = Address & 0x3FF;
Address >>= 10;
this->PDEIndex = Address & 0x3FF;
#elif defined(aa64)
#endif
if (VirtualAddress > PAGE_SIZE)
{
assert(
this->PTEIndex != 0 ||
this->PDEIndex != 0
#if defined(a64)
|| this->PDPTEIndex != 0 ||
this->PMLIndex != 0
#endif
);
}
}
}

352
core/memory/pmm.cpp Normal file
View File

@ -0,0 +1,352 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <acpi.hpp>
#include <debug.h>
#include <elf.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../kernel.h"
namespace Memory
{
uint64_t Physical::GetTotalMemory()
{
return this->TotalMemory.load();
}
uint64_t Physical::GetFreeMemory()
{
return this->FreeMemory.load();
}
uint64_t Physical::GetReservedMemory()
{
return this->ReservedMemory.load();
}
uint64_t Physical::GetUsedMemory()
{
return this->UsedMemory.load();
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->SwapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->UnswapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
void *Physical::RequestPage()
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(thisProcess, Tasking::KILL_OOM);
TaskManager->Yield();
}
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
CPU::Stop();
__builtin_unreachable();
}
void *Physical::RequestPages(size_t Count)
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (size_t i = 0; i < Count; i++)
{
if (PageBitmap[Index + i] == true)
goto NextPage;
}
this->LockPages((void *)(Index * PAGE_SIZE), Count);
return (void *)(Index * PAGE_SIZE);
NextPage:
Index += Count;
continue;
}
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(thisProcess, Tasking::KILL_OOM);
TaskManager->Yield();
}
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
CPU::Halt(true);
__builtin_unreachable();
}
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
size_t Index = (size_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
{
warn("Tried to free an already free page. (%p)",
Address);
return;
}
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
UsedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::FreePages(void *Address, size_t Count)
{
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s%s passed to FreePages.", Address == nullptr ? "Null pointer " : "", Address == nullptr && Count == 0 ? "and " : "", Count == 0 ? "Zero count" : "");
return;
}
for (size_t t = 0; t < Count; t++)
this->FreePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
void Physical::LockPage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
UsedMemory += PAGE_SIZE;
}
}
void Physical::LockPages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
void Physical::ReservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
void Physical::ReservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
}
void Physical::UnreservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::UnreservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
}
void Physical::Init()
{
SmartLock(this->MemoryLock);
uint64_t MemorySize = bInfo.Memory.Size;
debug("Memory size: %lld bytes (%ld pages)",
MemorySize, TO_PAGES(MemorySize));
TotalMemory = MemorySize;
FreeMemory = MemorySize;
size_t BitmapSize = (size_t)(MemorySize / PAGE_SIZE) / 8 + 1;
uintptr_t BitmapAddress = 0x0;
size_t BitmapAddressSize = 0;
FindBitmapRegion(BitmapAddress, BitmapAddressSize);
if (BitmapAddress == 0x0)
{
error("No free memory found!");
CPU::Stop();
}
debug("Initializing Bitmap at %p-%p (%d Bytes)",
BitmapAddress,
(void *)(BitmapAddress + BitmapSize),
BitmapSize);
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)BitmapAddress;
for (size_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
ReserveEssentials();
}
Physical::Physical() {}
Physical::~Physical() {}
}

View File

@ -0,0 +1,207 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <acpi.hpp>
#include <debug.h>
#include <elf.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../kernel.h"
namespace Memory
{
__no_sanitize("alignment") void Physical::ReserveEssentials()
{
debug("Reserving pages...");
/* The bootloader won't give us the entire mapping, so we
reserve everything and then unreserve the usable pages. */
this->ReservePages(0, TO_PAGES(bInfo.Memory.Size));
debug("Unreserving usable pages...");
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
if (bInfo.Memory.Entry[i].Type == Usable)
{
if (uintptr_t(bInfo.Memory.Entry[i].BaseAddress) <= 0xFFFFF)
continue;
this->UnreservePages(bInfo.Memory.Entry[i].BaseAddress,
TO_PAGES(bInfo.Memory.Entry[i].Length));
}
}
debug("Reserving 0x0-0xFFFFF range...");
// this->ReservePage((void *)0x0); /* Trampoline stack, gdt, idt, etc... */
// this->ReservePages((void *)0x2000, 4); /* TRAMPOLINE_START */
/* Reserve the lower part of memory. (0x0-0xFFFFF)
This includes: BIOS, EBDA, VGA, SMP, etc...
https://wiki.osdev.org/Memory_Map_(x86)
*/
this->ReservePages((void *)0x0, TO_PAGES(0xFFFFF));
debug("Reserving bitmap region %#lx-%#lx...",
PageBitmap.Buffer,
(void *)((uintptr_t)PageBitmap.Buffer + PageBitmap.Size));
this->ReservePages(PageBitmap.Buffer, TO_PAGES(PageBitmap.Size));
debug("Reserving kernel physical region %#lx-%#lx...",
bInfo.Kernel.PhysicalBase,
(void *)((uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size));
this->ReservePages(bInfo.Kernel.PhysicalBase, TO_PAGES(bInfo.Kernel.Size));
debug("Reserving kernel file and symbols...");
if (bInfo.Kernel.FileBase)
this->ReservePages(bInfo.Kernel.FileBase, TO_PAGES(bInfo.Kernel.Size));
if (bInfo.Kernel.Symbols.Num &&
bInfo.Kernel.Symbols.EntSize &&
bInfo.Kernel.Symbols.Shndx)
{
char *sections = r_cst(char *, bInfo.Kernel.Symbols.Sections);
debug("Reserving sections region %#lx-%#lx...",
sections,
(void *)((uintptr_t)sections + bInfo.Kernel.Symbols.EntSize *
bInfo.Kernel.Symbols.Num));
this->ReservePages(sections, TO_PAGES(bInfo.Kernel.Symbols.EntSize *
bInfo.Kernel.Symbols.Num));
Elf_Sym *Symbols = nullptr;
uint8_t *StringAddress = nullptr;
#if defined(a64) || defined(aa64)
Elf64_Xword SymbolSize = 0;
Elf64_Xword StringSize = 0;
#elif defined(a32)
Elf32_Word SymbolSize = 0;
Elf32_Word StringSize = 0;
#endif
for (size_t i = 0; i < bInfo.Kernel.Symbols.Num; ++i)
{
Elf_Shdr *sym = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize * i];
Elf_Shdr *str = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize *
sym->sh_link];
if (sym->sh_type == SHT_SYMTAB &&
str->sh_type == SHT_STRTAB)
{
Symbols = (Elf_Sym *)sym->sh_addr;
StringAddress = (uint8_t *)str->sh_addr;
SymbolSize = (int)sym->sh_size;
StringSize = (int)str->sh_size;
debug("Symbol table found, %d entries (%ld KiB)",
SymbolSize / sym->sh_entsize,
TO_KiB(SymbolSize));
this->ReservePages(Symbols, TO_PAGES(SymbolSize));
break;
}
}
if (Symbols)
{
debug("Reserving symbol table region %#lx-%#lx...",
Symbols, (void *)((uintptr_t)Symbols + SymbolSize));
this->ReservePages(Symbols, TO_PAGES(SymbolSize));
}
if (StringAddress)
{
debug("Reserving string table region %#lx-%#lx...",
StringAddress, (void *)((uintptr_t)StringAddress + StringSize));
this->ReservePages(StringAddress, TO_PAGES(StringSize));
}
}
debug("Reserving kernel modules...");
for (uint64_t i = 0; i < MAX_MODULES; i++)
{
if (bInfo.Modules[i].Address == 0x0)
continue;
debug("Reserving module %s (%#lx-%#lx)...", bInfo.Modules[i].CommandLine,
bInfo.Modules[i].Address,
(void *)((uintptr_t)bInfo.Modules[i].Address + bInfo.Modules[i].Size));
this->ReservePages((void *)bInfo.Modules[i].Address,
TO_PAGES(bInfo.Modules[i].Size));
}
#if defined(a86)
if (bInfo.RSDP)
{
debug("Reserving RSDT region %#lx-%#lx...", bInfo.RSDP,
(void *)((uintptr_t)bInfo.RSDP + sizeof(BootInfo::RSDPInfo)));
this->ReservePages(bInfo.RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
ACPI::ACPI::ACPIHeader *ACPIPtr;
bool XSDT = false;
if (bInfo.RSDP->Revision >= 2 && bInfo.RSDP->XSDTAddress)
{
ACPIPtr = (ACPI::ACPI::ACPIHeader *)bInfo.RSDP->XSDTAddress;
XSDT = true;
}
else
ACPIPtr = (ACPI::ACPI::ACPIHeader *)(uintptr_t)bInfo.RSDP->RSDTAddress;
debug("Reserving RSDT...");
this->ReservePages((void *)bInfo.RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
if (!Memory::Virtual().Check(ACPIPtr))
{
error("ACPI table is located in an unmapped region.");
return;
}
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) /
(XSDT ? 8 : 4));
debug("Reserving %d ACPI tables...", TableSize);
for (size_t t = 0; t < TableSize; t++)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wint-to-pointer-cast"
// TODO: Should I be concerned about unaligned memory access?
ACPI::ACPI::ACPIHeader *SDTHdr = nullptr;
if (XSDT)
SDTHdr =
(ACPI::ACPI::ACPIHeader *)(*(uint64_t *)((uint64_t)ACPIPtr +
sizeof(ACPI::ACPI::ACPIHeader) +
(t * 8)));
else
SDTHdr =
(ACPI::ACPI::ACPIHeader *)(*(uint32_t *)((uint64_t)ACPIPtr +
sizeof(ACPI::ACPI::ACPIHeader) +
(t * 4)));
#pragma GCC diagnostic pop
this->ReservePages(SDTHdr, TO_PAGES(SDTHdr->Length));
}
}
#elif defined(aa64)
#endif
}
}

View File

@ -0,0 +1,25 @@
#include <memory.hpp>
namespace Memory
{
SmartHeap::SmartHeap(size_t Size, VirtualMemoryArea *vma)
{
if (vma)
{
this->vma = vma;
this->Object = vma->RequestPages(TO_PAGES(Size));
}
else
this->Object = kmalloc(Size);
this->ObjectSize = Size;
memset(this->Object, 0, Size);
}
SmartHeap::~SmartHeap()
{
if (this->vma)
this->vma->FreePages(this->Object, TO_PAGES(this->ObjectSize));
else
kfree(this->Object);
}
}

171
core/memory/stack_guard.cpp Normal file
View File

@ -0,0 +1,171 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
bool StackGuard::Expand(uintptr_t FaultAddress)
{
if (this->UserMode)
{
if (FaultAddress < (uintptr_t)this->StackBottom - 0x100 ||
FaultAddress > (uintptr_t)this->StackTop)
{
info("Fault address %#lx is not in range of stack %#lx - %#lx", FaultAddress,
(uintptr_t)this->StackBottom - 0x100, (uintptr_t)this->StackTop);
return false; /* It's not about the stack. */
}
else
{
void *AllocatedStack = this->vma->RequestPages(TO_PAGES(USER_STACK_SIZE) + 1);
debug("AllocatedStack: %#lx", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
Virtual vmm = Virtual(this->vma->GetTable());
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
void *VirtualPage = (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE));
void *PhysicalPage = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vmm.Map(VirtualPage, PhysicalPage, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = PhysicalPage,
.VirtualAddress = VirtualPage,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", PhysicalPage, VirtualPage);
}
this->StackBottom = (void *)((uintptr_t)this->StackBottom - USER_STACK_SIZE);
this->Size += USER_STACK_SIZE;
debug("Stack expanded to %#lx", this->StackBottom);
this->Expanded = true;
return true;
}
}
else
{
fixme("Not implemented and probably not needed");
return false;
}
}
void StackGuard::Fork(StackGuard *Parent)
{
this->UserMode = Parent->GetUserMode();
this->StackBottom = Parent->GetStackBottom();
this->StackTop = Parent->GetStackTop();
this->StackPhysicalBottom = Parent->GetStackPhysicalBottom();
this->StackPhysicalTop = Parent->GetStackPhysicalTop();
this->Size = Parent->GetSize();
this->Expanded = Parent->IsExpanded();
if (this->UserMode)
{
std::vector<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages();
Virtual vma = Virtual(this->vma->GetTable());
foreach (auto Page in ParentAllocatedPages)
{
void *NewPhysical = this->vma->RequestPages(1);
debug("Forking address %#lx to %#lx", Page.PhysicalAddress, NewPhysical);
memcpy(NewPhysical, Page.PhysicalAddress, PAGE_SIZE);
vma.Map(Page.VirtualAddress, NewPhysical, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = NewPhysical,
.VirtualAddress = Page.VirtualAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", NewPhysical, Page.VirtualAddress);
}
}
else
{
fixme("Kernel mode stack fork not implemented");
}
}
StackGuard::StackGuard(bool User, VirtualMemoryArea *vma)
{
this->UserMode = User;
this->vma = vma;
if (this->UserMode)
{
void *AllocatedStack = vma->RequestPages(TO_PAGES(USER_STACK_SIZE) + 1);
memset(AllocatedStack, 0, USER_STACK_SIZE);
debug("AllocatedStack: %#lx", AllocatedStack);
{
Virtual vmm = Virtual(vma->GetTable());
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
void *VirtualPage = (void *)(USER_STACK_BASE + (i * PAGE_SIZE));
void *PhysicalPage = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vmm.Map(VirtualPage, PhysicalPage, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = PhysicalPage,
.VirtualAddress = VirtualPage,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", PhysicalPage, VirtualPage);
}
}
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->StackPhysicalBottom = AllocatedStack;
this->StackPhysicalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
}
else
{
this->StackBottom = vma->RequestPages(TO_PAGES(STACK_SIZE) + 1);
memset(this->StackBottom, 0, STACK_SIZE);
debug("StackBottom: %#lx", this->StackBottom);
this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE);
this->StackPhysicalBottom = this->StackBottom;
this->StackPhysicalTop = this->StackTop;
this->Size = STACK_SIZE;
for (size_t i = 0; i < TO_PAGES(STACK_SIZE); i++)
{
AllocatedPages pa = {
.PhysicalAddress = (void *)((uintptr_t)this->StackBottom + (i * PAGE_SIZE)),
.VirtualAddress = (void *)((uintptr_t)this->StackBottom + (i * PAGE_SIZE)),
};
AllocatedPagesList.push_back(pa);
}
}
debug("Allocated stack at %#lx", this->StackBottom);
}
StackGuard::~StackGuard()
{
/* VMA will free the stack */
}
}

331
core/memory/vma.cpp Normal file
View File

@ -0,0 +1,331 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory/vma.hpp>
#include <memory/table.hpp>
#include <debug.h>
#include "../../kernel.h"
namespace Memory
{
// ReadFSFunction(MEM_Read)
// {
// if (Size <= 0)
// Size = node->Length;
// if (RefOffset > node->Length)
// return 0;
// if ((node->Length - RefOffset) == 0)
// return 0; /* EOF */
// if (RefOffset + (off_t)Size > node->Length)
// Size = node->Length;
// memcpy(Buffer, (uint8_t *)(node->Address + RefOffset), Size);
// return Size;
// }
// WriteFSFunction(MEM_Write)
// {
// if (Size <= 0)
// Size = node->Length;
// if (RefOffset > node->Length)
// return 0;
// if (RefOffset + (off_t)Size > node->Length)
// Size = node->Length;
// memcpy((uint8_t *)(node->Address + RefOffset), Buffer, Size);
// return Size;
// }
// vfs::FileSystemOperations mem_op = {
// .Name = "mem",
// .Read = MEM_Read,
// .Write = MEM_Write,
// };
uint64_t VirtualMemoryArea::GetAllocatedMemorySize()
{
SmartLock(MgrLock);
uint64_t Size = 0;
foreach (auto ap in AllocatedPagesList)
Size += ap.PageCount;
return FROM_PAGES(Size);
}
bool VirtualMemoryArea::Add(void *Address, size_t Count)
{
SmartLock(MgrLock);
if (Address == nullptr)
{
error("Address is null!");
return false;
}
if (Count == 0)
{
error("Count is 0!");
return false;
}
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
error("Address already exists!");
return false;
}
else if ((uintptr_t)Address < (uintptr_t)AllocatedPagesList[i].Address)
{
if ((uintptr_t)Address + (Count * PAGE_SIZE) > (uintptr_t)AllocatedPagesList[i].Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
else
{
if ((uintptr_t)AllocatedPagesList[i].Address + (AllocatedPagesList[i].PageCount * PAGE_SIZE) > (uintptr_t)Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
}
AllocatedPagesList.push_back({Address, Count});
return true;
}
void *VirtualMemoryArea::RequestPages(size_t Count, bool User)
{
SmartLock(MgrLock);
void *Address = KernelAllocator.RequestPages(Count);
for (size_t i = 0; i < Count; i++)
{
int Flags = Memory::PTFlag::RW;
if (User)
Flags |= Memory::PTFlag::US;
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
Memory::Virtual vmm = Memory::Virtual(this->Table);
vmm.Remap(AddressToMap, AddressToMap, Flags);
}
AllocatedPagesList.push_back({Address, Count});
/* For security reasons, we clear the allocated page
if it's a user page. */
if (User)
memset(Address, 0, Count * PAGE_SIZE);
return Address;
}
void VirtualMemoryArea::FreePages(void *Address, size_t Count)
{
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (itr->Address == Address)
{
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
* This will allow the user to free only a part of the allocated pages.
*
* But this will be in a separate function because we need to specify if we
* want to free from the start or from the end and return the new address.
*/
if (itr->PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)", itr->PageCount, Count);
return;
}
KernelAllocator.FreePages(Address, Count);
Memory::Virtual vmm = Memory::Virtual(this->Table);
for (size_t i = 0; i < Count; i++)
{
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
vmm.Remap(AddressToMap, AddressToMap, Memory::PTFlag::RW);
// vmm.Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
AllocatedPagesList.erase(itr);
return;
}
}
}
void VirtualMemoryArea::DetachAddress(void *Address)
{
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (itr->Address == Address)
{
AllocatedPagesList.erase(itr);
return;
}
}
}
void *VirtualMemoryArea::CreateCoWRegion(void *Address,
size_t Length,
bool Read, bool Write, bool Exec,
bool Fixed, bool Shared)
{
Memory::Virtual vmm = Memory::Virtual(this->Table);
// FIXME
// for (uintptr_t j = uintptr_t(Address);
// j < uintptr_t(Address) + Length;
// j += PAGE_SIZE)
// {
// if (vmm.Check((void *)j, Memory::G))
// {
// if (Fixed)
// return (void *)-EINVAL;
// Address = (void *)(j + PAGE_SIZE);
// }
// }
bool AnyAddress = Address == nullptr;
if (AnyAddress)
{
Address = this->RequestPages(1);
if (Address == nullptr)
return nullptr;
memset(Address, 0, PAGE_SIZE);
}
vmm.Unmap(Address, Length);
vmm.Map(Address, nullptr, Length, PTFlag::CoW);
if (AnyAddress)
vmm.Remap(Address, Address, PTFlag::RW | PTFlag::US);
SharedRegion sr{
.Address = Address,
.Read = Read,
.Write = Write,
.Exec = Exec,
.Fixed = Fixed,
.Shared = Shared,
.Length = Length,
.ReferenceCount = 0,
};
SharedRegions.push_back(sr);
return Address;
}
bool VirtualMemoryArea::HandleCoW(uintptr_t PFA)
{
function("%#lx", PFA);
Memory::Virtual vmm = Memory::Virtual(this->Table);
Memory::PageTableEntry *pte = vmm.GetPTE((void *)PFA);
if (!pte)
{
/* Unmapped page */
debug("PTE is null!");
return false;
}
if (pte->CopyOnWrite == true)
{
foreach (auto sr in SharedRegions)
{
uintptr_t Start = (uintptr_t)sr.Address;
uintptr_t End = (uintptr_t)sr.Address + sr.Length;
if (PFA >= Start && PFA < End)
{
if (sr.Shared)
{
fixme("Shared CoW");
return false;
}
else
{
void *pAddr = this->RequestPages(1);
if (pAddr == nullptr)
return false;
memset(pAddr, 0, PAGE_SIZE);
uint64_t Flags = 0;
if (sr.Read)
Flags |= PTFlag::US;
if (sr.Write)
Flags |= PTFlag::RW;
// if (sr.Exec)
// Flags |= PTFlag::XD;
vmm.Remap((void *)PFA, pAddr, Flags);
pte->CopyOnWrite = false;
return true;
}
}
}
}
debug("PFA %#lx is not CoW", PFA);
return false;
}
VirtualMemoryArea::VirtualMemoryArea(PageTable *Table)
{
debug("+ %#lx %s", this,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "");
SmartLock(MgrLock);
if (Table)
this->Table = Table;
else
{
if (TaskManager)
this->Table = thisProcess->PageTable;
else
#if defined(a64)
this->Table = (PageTable *)CPU::x64::readcr3().raw;
#elif defined(a32)
this->Table = (PageTable *)CPU::x32::readcr3().raw;
#endif
}
}
VirtualMemoryArea::~VirtualMemoryArea()
{
debug("- %#lx %s", this,
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "");
SmartLock(MgrLock);
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.Address, ap.PageCount);
Memory::Virtual vmm = Memory::Virtual(this->Table);
for (size_t i = 0; i < ap.PageCount; i++)
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
Memory::PTFlag::RW);
}
}
}

34
core/memory/vmm.cpp Normal file
View File

@ -0,0 +1,34 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
namespace Memory
{
Virtual::Virtual(PageTable *Table)
{
if (Table)
this->Table = Table;
else
this->Table = (PageTable *)CPU::PageTable();
}
Virtual::~Virtual() {}
}