This commit is contained in:
Victor Olin 2023-05-15 18:09:50 +02:00
parent e2b3f36a64
commit 43ce0ecd71
39 changed files with 0 additions and 4330 deletions

View file

@ -1,63 +0,0 @@
#include <stdlib.h>
#include <iostream>
#include "heap.hpp"
#include "cheap.h"
#ifndef WRAPPER_DEBUG
struct cheap
{
void *obj;
};
#endif
cheap_t *cheap_the()
{
cheap_t *c;
GC::Heap *heap;
c = static_cast<cheap_t *>(malloc(sizeof(cheap_t)));
heap = &GC::Heap::the();
c->obj = heap;
return c;
}
void cheap_init()
{
GC::Heap::init();
}
void cheap_dispose()
{
std::cout << "In dispose\n";
GC::Heap::dispose();
std::cout << "Out dispose" << std::endl;
}
void *cheap_alloc(unsigned long size)
{
return GC::Heap::alloc(size);
}
void cheap_set_profiler(cheap_t *cheap, bool mode)
{
GC::Heap *heap = static_cast<GC::Heap *>(cheap->obj);
heap->set_profiler(mode);
}
void cheap_profiler_log_options(cheap_t *cheap, unsigned long flags)
{
GC::Heap *heap = static_cast<GC::Heap *>(cheap->obj);
GC::RecordOption cast_flag;
if (flags == FuncCallsOnly)
cast_flag = GC::FunctionCalls;
else if (flags == ChunkOpsOnly)
cast_flag = GC::ChunkOps;
else
cast_flag = GC::AllOps;
heap->set_profiler_log_options(cast_flag);
}

View file

@ -1,71 +0,0 @@
#include "chunk.hpp"
#include "event.hpp"
namespace GC
{
/**
* @returns The type of the event
*/
GCEventType GCEvent::get_type()
{
return m_type;
}
/**
* @returns The time the event happened in
* the form of time_t.
*/
std::time_t GCEvent::get_time_stamp()
{
return m_timestamp;
}
/**
* If the event is related to a chunk, this
* function returns the chunk that it is
* related to. If the event is independent
* of a chunk, it returns the nullptr.
*
* @returns A chunk pointer or the nullptr.
*/
const Chunk *GCEvent::get_chunk()
{
return m_chunk;
}
/**
* If the event is an AllocStart event, this
* returns the size of the alloc() request.
* otherwise this returns 0.
*
* @returns A number representing the number
* of bytes requested to alloc()
* or 0 if the event is not an
* AllocStart event.
*/
size_t GCEvent::get_size()
{
return m_size;
}
/**
* @returns The string conversion of the event type.
*/
const char *GCEvent::type_to_string()
{
switch (m_type)
{
case HeapInit: return "HeapInit";
case AllocStart: return "AllocStart";
case CollectStart: return "CollectStart";
case MarkStart: return "MarkStart";
case ChunkMarked: return "ChunkMarked";
case ChunkSwept: return "ChunkSwept";
case ChunkFreed: return "ChunkFreed";
case NewChunk: return "NewChunk";
case ReusedChunk: return "ReusedChunk";
case ProfilerDispose: return "ProfilerDispose";
default: return "[Unknown]";
}
}
}

View file

@ -1,840 +0,0 @@
#include <iostream>
#include <stdexcept>
#include <stdlib.h>
#include <vector>
#include <unordered_map>
#include <chrono>
#include <queue>
#include <set>
#include "heap.hpp"
#define time_now std::chrono::high_resolution_clock::now()
#define to_us std::chrono::duration_cast<std::chrono::microseconds>
using std::cout, std::endl, std::vector, std::hex, std::dec, std::unordered_map;
namespace GC
{
/**
* This implementation of the() guarantees laziness
* on the instance and a correct destruction with
* the destructor.
*
* @returns The singleton object.
*/
Heap& Heap::the()
{
static Heap instance;
return instance;
}
/**
* Initialises the heap singleton and saves the address
* of the calling function's stack frame as the stack_top.
* Presumeably this address points to the stack frame of
* the compiled LLVM executable after linking.
*/
void Heap::init()
{
Heap &heap = Heap::the();
if (heap.profiler_enabled())
Profiler::record(HeapInit);
// clang complains because arg for __b_f_a is not 0 which is "unsafe"
#pragma clang diagnostic ignored "-Wframe-address"
heap.m_stack_top = static_cast<uintptr_t *>(__builtin_frame_address(1));
// TODO: handle this below
//heap.m_heap_top = heap.m_heap;
}
void Heap::set_profiler_log_options(RecordOption flags)
{
Profiler::set_log_options(flags);
}
/**
* Disposes the heap and the profiler at program exit
* which also triggers a heap log file dumped if the
* profiler is enabled.
*/
void Heap::dispose()
{
Heap &heap = Heap::the();
if (heap.profiler_enabled())
Profiler::dispose();
}
/**
* Allocates a given amount of bytes on the heap.
*
* @param size The amount of bytes to be allocated.
*
* @return A pointer to the address where the memory
* has been allocated. This pointer is supposed
* to be casted to and object pointer.
*/
void *Heap::alloc(size_t size)
{
auto a_start = time_now;
// Singleton
Heap &heap = Heap::the();
bool profiler_enabled = heap.profiler_enabled();
if (profiler_enabled)
Profiler::record(AllocStart, size);
if (size == 0)
{
cout << "Heap: Cannot alloc 0B. No bytes allocated." << endl;
return nullptr;
}
if (heap.m_size + size > HEAP_SIZE)
{
// auto a_ms = to_us(c_start - a_start);
// Profiler::record(AllocStart, a_ms);
auto stack_bottom = reinterpret_cast<uintptr_t *>(__builtin_frame_address(0));
heap.collect(stack_bottom);
// If memory is not enough after collect, crash with OOM error
if (heap.m_size > HEAP_SIZE)
{
throw std::runtime_error(std::string("Error: Heap out of memory"));
}
//throw std::runtime_error(std::string("Error: Heap out of memory"));
}
if (heap.m_size + size > HEAP_SIZE)
{
if (profiler_enabled)
Profiler::dispose();
throw std::runtime_error(std::string("Error: Heap out of memory"));
}
// If a chunk was recycled, return the old chunk address
Chunk *reused_chunk = heap.try_recycle_chunks(size);
if (reused_chunk != nullptr)
{
if (profiler_enabled)
Profiler::record(ReusedChunk, reused_chunk);
auto a_end = time_now;
auto a_ms = to_us(a_end - a_start);
Profiler::record(AllocStart, a_ms);
return static_cast<void *>(reused_chunk->m_start);
}
// If no free chunks was found (reused_chunk is a nullptr),
// then create a new chunk
auto new_chunk = new Chunk(size, (uintptr_t *)(heap.m_heap + heap.m_size));
heap.m_size += size;
// TODO: handle this below
//heap.m_total_size += size;
heap.m_allocated_chunks.push_back(new_chunk);
if (profiler_enabled)
Profiler::record(NewChunk, new_chunk);
auto a_end = time_now;
auto a_ms = to_us(a_end - a_start);
Profiler::record(AllocStart, a_ms);
return new_chunk->m_start;
}
/**
* Tries to recycle used and freed chunks that are
* already allocated objects by the OS but freed
* from our Heap. This reduces the amount of GC
* objects slightly which saves time from malloc'ing
* memory from the OS.
*
* @param size Amount of bytes needed for the object
* which is about to be allocated.
*
* @returns If a chunk is found and recycled, a
* pointer to the allocated memory for
* the object is returned. If not, a
* nullptr is returned to signify no
* chunks were found.
*/
Chunk *Heap::try_recycle_chunks(size_t size)
{
Heap &heap = Heap::the();
// Check if there are any freed chunks large enough for current request
for (size_t i = 0; i < heap.m_freed_chunks.size(); i++)
{
//auto chunk = Heap::get_at(heap.m_freed_chunks, i);
auto chunk = heap.m_freed_chunks[i];
auto iter = heap.m_freed_chunks.begin();
i++;
//advance(iter, i);
if (chunk->m_size > size)
{
// Split the chunk, use one part and add the remaining part to
// the list of freed chunks
size_t diff = chunk->m_size - size;
auto chunk_complement = new Chunk(diff, chunk->m_start + chunk->m_size);
heap.m_freed_chunks.erase(iter);
heap.m_freed_chunks.push_back(chunk_complement);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
else if (chunk->m_size == size)
{
// Reuse the whole chunk
heap.m_freed_chunks.erase(iter);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
}
// If no chunk was found, return nullptr
return nullptr;
}
/**
* Returns a bool whether the profiler is enabled
* or not.
*
* @returns True or false if the profiler is enabled
* or disabled respectively.
*/
bool Heap::profiler_enabled() {
Heap &heap = Heap::the();
return heap.m_profiler_enable;
}
/**
* Collection phase of the garbage collector. When
* an allocation is requested and there is no space
* left on the heap, a collection is triggered. This
* function is private so that the user cannot trigger
* a collection unneccessarily.
*/
void Heap::collect(uintptr_t *stack_bottom)
{
auto c_start = time_now;
Heap &heap = Heap::the();
if (heap.profiler_enabled())
Profiler::record(CollectStart);
// get current stack frame
stack_bottom = reinterpret_cast<uintptr_t *>(__builtin_frame_address(0));
if (heap.m_stack_top == nullptr)
throw std::runtime_error(std::string("Error: Heap is not initialized, read the docs!"));
// uintptr_t *stack_top = heap.m_stack_top;
//auto work_list = heap.m_allocated_chunks;
//mark(stack_bottom, stack_top, work_list);
// Testing mark_hash, previous woking implementation above
// create_table();
// mark_hash(stack_bottom, stack_top);
create_table();
vector<uintptr_t *> roots;
// cout << "\nb4 find_roots\n";
find_roots(stack_bottom, roots);
// cout << "b4 mark\n";''
mark(roots);
// cout << "b4 sweep\n";
sweep(heap);
// cout << "b4 free\n";
free(heap);
auto c_end = time_now;
Profiler::record(CollectStart, to_us(c_end - c_start));
}
void Heap::find_roots(uintptr_t *stack_bottom, vector<uintptr_t *> &roots)
{
auto heap_bottom = reinterpret_cast<const uintptr_t>(m_heap);
auto heap_top = reinterpret_cast<const uintptr_t>(m_heap + HEAP_SIZE);
while (stack_bottom < m_stack_top)
{
if (heap_bottom < *stack_bottom && *stack_bottom < heap_top)
{
roots.push_back(stack_bottom);
}
stack_bottom++;
}
}
/**
* Iterates through the stack, if an element on the stack points to a chunk,
* called a root chunk, that chunk is marked (i.e. reachable).
* Then it recursively follows all chunks which are possibly reachable from
* the root chunk and mark those chunks.
* If a chunk is marked it is removed from the worklist, since it's no longer of
* concern for this method.
*
* Time complexity: 0(N^2 * log(N)) as upper bound.
* Where N is either the size of the worklist or the size of
* the stack frame, depending on which is the largest.
*
* @param start Pointer to the start of the stack frame.
* @param end Pointer to the end of the stack frame.
* @param worklist The currently allocated chunks, which haven't been marked.
*/
void Heap::mark(vector<uintptr_t *> &roots)
{
bool prof_enabled = m_profiler_enable;
if (prof_enabled)
Profiler::record(MarkStart);
auto iter = roots.begin(), end = roots.end();
std::queue<std::pair<uintptr_t, uintptr_t>> chunk_spaces;
while (iter != end)
{
find_chunks(*iter++, chunk_spaces);
}
while (!chunk_spaces.empty())
{
auto range = chunk_spaces.front();
chunk_spaces.pop();
auto addr_bottom = reinterpret_cast<uintptr_t *>(range.first);
auto addr_top = reinterpret_cast<uintptr_t *>(range.second);
while (addr_bottom < addr_top)
{
find_chunks(addr_bottom, chunk_spaces);
addr_bottom++;
}
}
}
void Heap::find_chunks(uintptr_t *stack_addr, std::queue<std::pair<uintptr_t, uintptr_t>> &chunk_spaces)
{
Heap &heap = Heap::the();
auto it = heap.m_chunk_table.find(*stack_addr);
if (it != heap.m_chunk_table.end())
{
auto chunk = it->second;
if (!chunk->m_marked)
{
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
chunk->m_marked = true;
chunk_spaces.push(std::make_pair(c_start, c_end));
}
}
/* auto iter = m_allocated_chunks.begin();
auto end = m_allocated_chunks.end();
while (iter != end)
{
auto chunk = *iter++;
if (chunk->m_marked)
continue;
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
if (c_start < *stack_addr && *stack_addr < c_end)
{
chunk->m_marked = true;
chunk_spaces.push(std::make_pair(c_start, c_end));
}
} */
}
void Heap::create_table()
{
Heap &heap = Heap::the();
unordered_map<uintptr_t, Chunk*> chunk_table;
for (auto chunk : heap.m_allocated_chunks) {
auto pair = std::make_pair(reinterpret_cast<uintptr_t>(chunk->m_start), chunk);
heap.m_chunk_table.insert(pair);
}
}
void Heap::mark_hash(uintptr_t *start, const uintptr_t* const end)
{
Heap &heap = Heap::the();
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(MarkStart);
for (; start <= end; start++)
{
auto search = heap.m_chunk_table.find(*start);
if (search != heap.m_chunk_table.end())
{
Chunk *chunk = search->second;
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t*>(c_start + c_size);
if (!chunk->m_marked)
{
chunk->m_marked = true;
if (profiler_enabled)
Profiler::record(ChunkMarked, chunk);
//mark_hash(chunk->m_start, c_end);
Chunk *next = find_pointer_hash((uintptr_t *) c_start, (uintptr_t *) c_end);
while (next != NULL)
{
if (!next->m_marked)
{
next->m_marked = true;
if (profiler_enabled)
Profiler::record(ChunkMarked, chunk);
auto c_start = reinterpret_cast<uintptr_t>(next->m_start);
auto c_size = reinterpret_cast<uintptr_t>(next->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
next = find_pointer_hash((uintptr_t *) c_start, (uintptr_t *) c_end);
}
}
}
}
}
}
/**
* Sweeps the heap, unmarks the marked chunks for the next cycle,
* adds the unmarked nodes to the list of freed chunks; to be freed.
*
* Time complexity: O(N^2), where N is the number of allocated chunks.
* It is quadratic, in the worst case,
* since each call to erase() is linear.
*
* @param heap Pointer to the heap singleton instance.
*/
void Heap::sweep(Heap &heap)
{
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(SweepStart);
auto iter = heap.m_allocated_chunks.begin();
// std::cout << "Chunks alloced: " << heap.m_allocated_chunks.size() << std::endl;
// This cannot "iter != stop", results in seg fault, since the end gets updated, I think.
while (iter != heap.m_allocated_chunks.end())
{
Chunk *chunk = *iter;
// Unmark the marked chunks for the next iteration.
if (chunk->m_marked)
{
chunk->m_marked = false;
++iter;
}
else
{
// Add the unmarked chunks to freed chunks and remove from
// the list of allocated chunks
if (profiler_enabled)
Profiler::record(ChunkSwept, chunk);
heap.m_freed_chunks.push_back(chunk);
iter = heap.m_allocated_chunks.erase(iter);
heap.m_size -= chunk->m_size;
// cout << "Decremented total heap size with: " << chunk->m_size << endl;
// cout << "Total size is: " << heap.m_size << endl;
}
}
// std::cout << "Chunks left: " << heap.m_allocated_chunks.size() << std::endl;
}
/**
* Frees chunks that was moved to the list m_freed_chunks
* by the sweep phase. If there are more than a certain
* amount of free chunks, delete the free chunks to
* avoid cluttering.
*
* Time complexity: O(N^2), where N is the freed chunks.
* If free_overlap() is called, it runs in O(N^2),
* otherwise O(N).
*
* @param heap Heap singleton instance, only for avoiding
* redundant calls to the singleton get
*/
void Heap::free(Heap &heap)
{
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(FreeStart);
if (heap.m_freed_chunks.size() > FREE_THRESH)
{
bool profiler_enabled = heap.profiler_enabled();
while (heap.m_freed_chunks.size())
{
auto chunk = heap.m_freed_chunks.back();
heap.m_freed_chunks.pop_back();
if (profiler_enabled)
Profiler::record(ChunkFreed, chunk);
// heap.m_size -= chunk->m_size;
// cout << "Decremented total heap size with: " << chunk->m_size << endl;
// cout << "Total size is: " << heap.m_size << endl;
delete chunk;
}
}
// if there are chunks but not more than FREE_THRESH
else if (heap.m_freed_chunks.size())
{
// essentially, always check for overlap between
// chunks before finishing the allocation
free_overlap(heap);
}
}
/**
* Checks for overlaps between freed chunks of memory
* and removes overlapping chunks while prioritizing
* the chunks at lower addresses.
*
* Time complexity: O(N^2), where N is the number of freed chunks.
* At each iteration get_at() is called, which is linear.
*
* @param heap Heap singleton instance, only for avoiding
* redundant calls to the singleton get
*
* @note Maybe this should be changed to prioritizing
* larger chunks. Should remove get_at() to indexing,
* since that's constant.
*/
void Heap::free_overlap(Heap &heap) // borde göra en record(ChunkFreed) på onödiga chunks
{
std::vector<Chunk *> filtered;
size_t i = 0;
//auto prev = Heap::get_at(heap.m_freed_chunks, i++);
auto prev = heap.m_freed_chunks[i++];
prev->m_marked = true;
filtered.push_back(prev);
// cout << filtered.back()->m_start << endl;
for (; i < heap.m_freed_chunks.size(); i++)
{
prev = filtered.back();
//auto next = Heap::get_at(heap.m_freed_chunks, i);
auto next = heap.m_freed_chunks[i];
auto p_start = (uintptr_t)(prev->m_start);
auto p_size = (uintptr_t)(prev->m_size);
auto n_start = (uintptr_t)(next->m_start);
if (n_start >= (p_start + p_size))
{
next->m_marked = true;
filtered.push_back(next);
}
}
heap.m_freed_chunks.swap(filtered);
bool profiler_enabled = heap.m_profiler_enable;
// After swap m_freed_chunks contains still available chunks
// and filtered contains all the chunks, so delete unused chunks
for (Chunk *chunk : filtered)
{
// if chunk was filtered away, delete it
if (!chunk->m_marked)
{
if (profiler_enabled)
Profiler::record(ChunkFreed, chunk);
heap.m_size -= chunk->m_size;
cout << "Decremented total heap size with: " << chunk->m_size << endl;
cout << "Total size is: " << heap.m_size << endl;
delete chunk;
}
else
{
chunk->m_marked = false;
}
}
}
void Heap::set_profiler(bool mode)
{
Heap &heap = Heap::the();
heap.m_profiler_enable = mode;
}
Chunk* find_pointer(uintptr_t *start, const uintptr_t* const end, vector<Chunk *> &worklist) {
for (; start <= end; start++) {
auto it = worklist.begin();
auto stop = worklist.end();
while (it != stop)
{
Chunk *chunk = *it;
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
// Check if the stack pointer points to something within the chunk
if (c_start <= *start && *start < c_end)
{
return chunk;
}
return NULL;
}
}
}
// Checks if a given chunk points to another chunk and returns it
Chunk* Heap::find_pointer_hash(uintptr_t *start, const uintptr_t* const end) {
Heap &heap = Heap::the();
for (; start <= end; start++) {
auto search = heap.m_chunk_table.find(*start);
if (search != heap.m_chunk_table.end()) {
return search->second;
}
return NULL;
}
}
#ifdef HEAP_DEBUG
/**
* Prints the result of Heap::init() and a dummy value
* for the current stack frame for reference.
*/
void Heap::check_init()
{
Heap &heap = Heap::the();
cout << "Heap addr:\t" << &heap << "\n";
cout << "GC m_stack_top:\t" << heap.m_stack_top << "\n";
auto stack_bottom = reinterpret_cast<uintptr_t *>(__builtin_frame_address(0));
cout << "GC stack_bottom:\t" << stack_bottom << endl;
}
/**
* Conditional collection, only to be used in debugging
*
* @param flags Bitmap of flags
*/
void Heap::collect(CollectOption flags)
{
set_profiler(true);
Heap &heap = Heap::the();
if (heap.m_profiler_enable)
Profiler::record(CollectStart);
cout << "DEBUG COLLECT\nFLAGS: ";
if (flags & MARK)
cout << "\n - MARK";
if (flags & SWEEP)
cout << "\n - SWEEP";
if (flags & FREE)
cout << "\n - FREE";
cout << "\n";
// get the frame adress, whwere local variables and saved registers are located
auto stack_bottom = reinterpret_cast<uintptr_t *>(__builtin_frame_address(0));
cout << "Stack bottom in collect:\t" << stack_bottom << "\n";
uintptr_t *stack_top = heap.m_stack_top;
cout << "Stack end in collect:\t " << stack_top << endl;
auto work_list = heap.m_allocated_chunks;
if (flags & MARK)
mark(stack_bottom, stack_top, work_list);
if (flags & SWEEP)
sweep(heap);
if (flags & FREE)
free(heap);
}
// Mark child references from the root references
void mark_test(vector<Chunk *> &worklist)
{
while (worklist.size() > 0)
{
Chunk *ref = worklist.back();
worklist.pop_back();
Chunk *child = (Chunk *)ref; // this is probably not correct
if (child != nullptr && !child->m_marked)
{
child->m_marked = true;
worklist.push_back(child);
mark_test(worklist);
}
}
}
// Mark the root references and look for child references to them
void mark_from_roots(uintptr_t *start, const uintptr_t *end)
{
vector<Chunk *> worklist;
for (; start > end; start--)
{
if (*start % 8 == 0)
{ // all pointers must be aligned as double words
Chunk *ref = (Chunk *)*start;
if (ref != nullptr && !ref->m_marked)
{
ref->m_marked = true;
worklist.push_back(ref);
mark_test(worklist);
}
}
}
}
// For testing purposes
void Heap::print_line(Chunk *chunk)
{
cout << "Marked: " << chunk->m_marked << "\nStart adr: " << chunk->m_start << "\nSize: " << chunk->m_size << " B\n"
<< endl;
}
void Heap::print_worklist(std::vector<Chunk *> &list)
{
for (auto cp : list)
cout << "Chunk at:\t" << cp->m_start << "\nSize:\t\t" << cp->m_size << "\n";
cout << endl;
}
void Heap::print_contents()
{
Heap &heap = Heap::the();
if (heap.m_allocated_chunks.size())
{
cout << "\nALLOCATED CHUNKS #" << dec << heap.m_allocated_chunks.size() << endl;
for (auto chunk : heap.m_allocated_chunks)
print_line(chunk);
}
else
{
cout << "NO ALLOCATIONS\n" << endl;
}
if (heap.m_freed_chunks.size())
{
cout << "\nFREED CHUNKS #" << dec << heap.m_freed_chunks.size() << endl;
for (auto fchunk : heap.m_freed_chunks)
print_line(fchunk);
}
else
{
cout << "NO FREED CHUNKS" << endl;
}
}
void Heap::print_summary()
{
Heap &heap = Heap::the();
if (heap.m_allocated_chunks.size())
{
cout << "\nALLOCATED CHUNKS #" << dec << heap.m_allocated_chunks.size() << endl;
}
else
{
cout << "NO ALLOCATIONS\n" << endl;
}
if (heap.m_freed_chunks.size())
{
cout << "\nFREED CHUNKS #" << dec << heap.m_freed_chunks.size() << endl;
}
else
{
cout << "NO FREED CHUNKS" << endl;
}
}
void Heap::print_allocated_chunks(Heap *heap) {
cout << "--- Allocated Chunks ---\n" << endl;
for (auto chunk : heap->m_allocated_chunks) {
print_line(chunk);
}
}
Chunk *Heap::try_recycle_chunks_new(size_t size)
{
Heap &heap = Heap::the();
// Check if there are any freed chunks large enough for current request
for (size_t i = 0; i < heap.m_freed_chunks.size(); i++)
{
auto chunk = heap.m_freed_chunks[i]; //Heap::get_at(heap.m_freed_chunks, i);
auto iter = heap.m_freed_chunks.begin();
//advance(iter, i);
i++;
if (chunk->m_size > size)
{
// Split the chunk, use one part and add the remaining part to
// the list of freed chunks
size_t diff = chunk->m_size - size;
auto chunk_complement = new Chunk(diff, chunk->m_start + chunk->m_size);
heap.m_freed_chunks.erase(iter);
heap.m_freed_chunks.push_back(chunk_complement);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
else if (chunk->m_size == size)
{
// Reuse the whole chunk
heap.m_freed_chunks.erase(iter);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
}
// If no chunk was found, return nullptr
return nullptr;
}
void Heap::free_overlap_new(Heap &heap) // borde göra en record(ChunkFreed) på onödiga chunks
{
std::vector<Chunk *> filtered;
size_t i = 0;
auto prev = heap.m_freed_chunks[i++]; //Heap::get_at(heap.m_freed_chunks, i++);
prev->m_marked = true;
filtered.push_back(prev);
cout << filtered.back()->m_start << endl;
for (; i < heap.m_freed_chunks.size(); i++)
{
prev = filtered.back();
auto next = heap.m_freed_chunks[i]; //Heap::get_at(heap.m_freed_chunks, i);
auto p_start = (uintptr_t)(prev->m_start);
auto p_size = (uintptr_t)(prev->m_size);
auto n_start = (uintptr_t)(next->m_start);
if (n_start >= (p_start + p_size))
{
next->m_marked = true;
filtered.push_back(next);
}
}
heap.m_freed_chunks.swap(filtered);
bool profiler_enabled = heap.m_profiler_enable;
// After swap m_freed_chunks contains still available chunks
// and filtered contains all the chunks, so delete unused chunks
for (Chunk *chunk : filtered)
{
// if chunk was filtered away, delete it
if (!chunk->m_marked)
{
if (profiler_enabled)
Profiler::record(ChunkFreed, chunk);
delete chunk;
}
else
{
chunk->m_marked = false;
}
}
}
#endif
}

View file

@ -1,853 +0,0 @@
#include <iostream>
#include <stdexcept>
#include <stdlib.h>
#include <vector>
#include <unordered_map>
#include <chrono>
#include "heap.hpp"
#define time_now std::chrono::high_resolution_clock::now()
#define to_us std::chrono::duration_cast<std::chrono::microseconds>
using std::cout, std::endl, std::vector, std::hex, std::dec, std::unordered_map;
namespace GC
{
/**
* This implementation of the() guarantees laziness
* on the instance and a correct destruction with
* the destructor.
*
* @returns The singleton object.
*/
Heap& Heap::the()
{
static Heap instance;
return instance;
}
/**
* Initialises the heap singleton and saves the address
* of the calling function's stack frame as the stack_top.
* Presumeably this address points to the stack frame of
* the compiled LLVM executable after linking.
*/
void Heap::init()
{
Heap &heap = Heap::the();
if (heap.profiler_enabled())
Profiler::record(HeapInit);
// clang complains because arg for __b_f_a is not 0 which is "unsafe"
#pragma clang diagnostic ignored "-Wframe-address"
heap.m_stack_top = static_cast<uintptr_t *>(__builtin_frame_address(1));
// TODO: handle this below
//heap.m_heap_top = heap.m_heap;
}
void Heap::set_profiler_log_options(RecordOption flags)
{
Profiler::set_log_options(flags);
}
/**
* Disposes the heap and the profiler at program exit
* which also triggers a heap log file dumped if the
* profiler is enabled.
*/
void Heap::dispose()
{
Heap &heap = Heap::the();
if (heap.profiler_enabled())
Profiler::dispose();
}
/**
* Allocates a given amount of bytes on the heap.
*
* @param size The amount of bytes to be allocated.
*
* @return A pointer to the address where the memory
* has been allocated. This pointer is supposed
* to be casted to and object pointer.
*/
void *Heap::alloc(size_t size)
{
auto a_start = time_now;
// Singleton
Heap &heap = Heap::the();
bool profiler_enabled = heap.profiler_enabled();
if (profiler_enabled)
Profiler::record(AllocStart, size);
if (size == 0)
{
cout << "Heap: Cannot alloc 0B. No bytes allocated." << endl;
return nullptr;
}
if (heap.m_size + size > HEAP_SIZE)
{
// auto a_ms = to_us(c_start - a_start);
// Profiler::record(AllocStart, a_ms);
heap.collect();
// If memory is not enough after collect, crash with OOM error
if (heap.m_size > HEAP_SIZE)
{
throw std::runtime_error(std::string("Error: Heap out of memory"));
}
//throw std::runtime_error(std::string("Error: Heap out of memory"));
}
if (heap.m_size + size > HEAP_SIZE)
{
if (profiler_enabled)
Profiler::dispose();
throw std::runtime_error(std::string("Error: Heap out of memory"));
}
// If a chunk was recycled, return the old chunk address
Chunk *reused_chunk = heap.try_recycle_chunks(size);
if (reused_chunk != nullptr)
{
if (profiler_enabled)
Profiler::record(ReusedChunk, reused_chunk);
auto a_end = time_now;
auto a_ms = to_us(a_end - a_start);
Profiler::record(AllocStart, a_ms);
return static_cast<void *>(reused_chunk->m_start);
}
// If no free chunks was found (reused_chunk is a nullptr),
// then create a new chunk
auto new_chunk = new Chunk(size, (uintptr_t *)(heap.m_heap + heap.m_size));
heap.m_size += size;
// TODO: handle this below
//heap.m_total_size += size;
heap.m_allocated_chunks.push_back(new_chunk);
if (profiler_enabled)
Profiler::record(NewChunk, new_chunk);
auto a_end = time_now;
auto a_ms = to_us(a_end - a_start);
Profiler::record(AllocStart, a_ms);
return new_chunk->m_start;
}
/**
* Tries to recycle used and freed chunks that are
* already allocated objects by the OS but freed
* from our Heap. This reduces the amount of GC
* objects slightly which saves time from malloc'ing
* memory from the OS.
*
* @param size Amount of bytes needed for the object
* which is about to be allocated.
*
* @returns If a chunk is found and recycled, a
* pointer to the allocated memory for
* the object is returned. If not, a
* nullptr is returned to signify no
* chunks were found.
*/
Chunk *Heap::try_recycle_chunks(size_t size)
{
Heap &heap = Heap::the();
// Check if there are any freed chunks large enough for current request
for (size_t i = 0; i < heap.m_freed_chunks.size(); i++)
{
//auto chunk = Heap::get_at(heap.m_freed_chunks, i);
auto chunk = heap.m_freed_chunks[i];
auto iter = heap.m_freed_chunks.begin();
i++;
//advance(iter, i);
if (chunk->m_size > size)
{
// Split the chunk, use one part and add the remaining part to
// the list of freed chunks
size_t diff = chunk->m_size - size;
auto chunk_complement = new Chunk(diff, chunk->m_start + chunk->m_size);
heap.m_freed_chunks.erase(iter);
heap.m_freed_chunks.push_back(chunk_complement);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
else if (chunk->m_size == size)
{
// Reuse the whole chunk
heap.m_freed_chunks.erase(iter);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
}
// If no chunk was found, return nullptr
return nullptr;
}
/**
* Returns a bool whether the profiler is enabled
* or not.
*
* @returns True or false if the profiler is enabled
* or disabled respectively.
*/
bool Heap::profiler_enabled() {
Heap &heap = Heap::the();
return heap.m_profiler_enable;
}
/**
* Collection phase of the garbage collector. When
* an allocation is requested and there is no space
* left on the heap, a collection is triggered. This
* function is private so that the user cannot trigger
* a collection unneccessarily.
*/
void Heap::collect()
{
auto c_start = time_now;
Heap &heap = Heap::the();
if (heap.profiler_enabled())
Profiler::record(CollectStart);
// get current stack frame
auto stack_bottom = reinterpret_cast<uintptr_t *>(__builtin_frame_address(2));
if (heap.m_stack_top == nullptr)
throw std::runtime_error(std::string("Error: Heap is not initialized, read the docs!"));
uintptr_t *stack_top = heap.m_stack_top;
//auto work_list = heap.m_allocated_chunks;
//mark(stack_bottom, stack_top, work_list);
// Testing mark_hash, previous woking implementation above
create_table();
mark_hash(stack_bottom, stack_top);
sweep(heap);
free(heap);
auto c_end = time_now;
Profiler::record(CollectStart, to_us(c_end - c_start));
}
/**
* Iterates through the stack, if an element on the stack points to a chunk,
* called a root chunk, that chunk is marked (i.e. reachable).
* Then it recursively follows all chunks which are possibly reachable from
* the root chunk and mark those chunks.
* If a chunk is marked it is removed from the worklist, since it's no longer of
* concern for this method.
*
* Time complexity: 0(N^2 * log(N)) as upper bound.
* Where N is either the size of the worklist or the size of
* the stack frame, depending on which is the largest.
*
* @param start Pointer to the start of the stack frame.
* @param end Pointer to the end of the stack frame.
* @param worklist The currently allocated chunks, which haven't been marked.
*/
void Heap::mark(uintptr_t *start, const uintptr_t* const end, vector<Chunk *> &worklist)
{
// cout << "\nWorklist size: " << worklist.size() << "\n";
Heap &heap = Heap::the();
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(MarkStart);
vector<AddrRange *> rangeWL;
// To find adresses thats in the worklist
for (; start <= end; start++)
{
auto it = worklist.begin();
auto stop = worklist.end();
while (it != stop)
{
Chunk *chunk = *it;
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
// Check if the stack pointer points to something within the chunk
if (c_start <= *start && *start < c_end)
{
if (!chunk->m_marked)
{
if (profiler_enabled)
Profiler::record(ChunkMarked, chunk);
chunk->m_marked = true;
it = worklist.erase(it);
/* Chunk *next = find_pointer((uintptr_t *) c_start, (uintptr_t *) c_end, worklist);
while (next != NULL) {
if (!next->m_marked)
{
next->m_marked = true;
auto c_start = reinterpret_cast<uintptr_t>(next->m_start);
auto c_size = reinterpret_cast<uintptr_t>(next->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
next = find_pointer((uintptr_t *) c_start, (uintptr_t *) c_end, worklist);
}
} */
// Recursively call mark, to see if the reachable chunk further points to another chunk
// mark((uintptr_t *)c_start, (uintptr_t *)c_end, worklist);
// AddrRange *range = new AddrRange((uintptr_t *)c_start, (uintptr_t *)c_end);
rangeWL.push_back(new AddrRange((uintptr_t *)c_start, (uintptr_t *)c_end));
}
else
{
++it;
}
}
else
{
++it;
}
}
}
mark_range(rangeWL, worklist);
rangeWL.clear();
}
void Heap::mark_range(vector<AddrRange *> &ranges, vector<Chunk *> &worklist)
{
Heap &heap = Heap::the();
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(MarkStart);
auto iter = ranges.begin();
auto stop = ranges.end();
while (iter != stop)
{
auto range = *iter++;
uintptr_t *start = (uintptr_t *)range->start;
const uintptr_t *end = range->end;
if (start == nullptr)
cout << "\nstart is null\n";
for (; start <= end; start++)
{
auto wliter = worklist.begin();
auto wlstop = worklist.end();
while (wliter != wlstop)
{
Chunk *chunk = *wliter;
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
if (c_start <= *start && *start < c_end)
{
if (!chunk->m_marked)
{
chunk->m_marked = true;
wliter = worklist.erase(wliter);
ranges.push_back(new AddrRange((uintptr_t *)c_start, (uintptr_t *)c_end));
stop = ranges.end();
}
else
{
wliter++;
}
}
else
{
wliter++;
}
}
}
}
}
void Heap::create_table()
{
Heap &heap = Heap::the();
unordered_map<uintptr_t, Chunk*> chunk_table;
for (auto chunk : heap.m_allocated_chunks) {
auto pair = std::make_pair(reinterpret_cast<uintptr_t>(chunk->m_start), chunk);
heap.m_chunk_table.insert(pair);
}
}
void Heap::mark_hash(uintptr_t *start, const uintptr_t* const end)
{
Heap &heap = Heap::the();
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(MarkStart);
for (; start <= end; start++)
{
auto search = heap.m_chunk_table.find(*start);
if (search != heap.m_chunk_table.end())
{
Chunk *chunk = search->second;
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t*>(c_start + c_size);
if (!chunk->m_marked)
{
chunk->m_marked = true;
if (profiler_enabled)
Profiler::record(ChunkMarked, chunk);
//mark_hash(chunk->m_start, c_end);
Chunk *next = find_pointer_hash((uintptr_t *) c_start, (uintptr_t *) c_end);
while (next != NULL)
{
if (!next->m_marked)
{
next->m_marked = true;
if (profiler_enabled)
Profiler::record(ChunkMarked, chunk);
auto c_start = reinterpret_cast<uintptr_t>(next->m_start);
auto c_size = reinterpret_cast<uintptr_t>(next->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
next = find_pointer_hash((uintptr_t *) c_start, (uintptr_t *) c_end);
}
}
}
}
}
}
/**
* Sweeps the heap, unmarks the marked chunks for the next cycle,
* adds the unmarked nodes to the list of freed chunks; to be freed.
*
* Time complexity: O(N^2), where N is the number of allocated chunks.
* It is quadratic, in the worst case,
* since each call to erase() is linear.
*
* @param heap Pointer to the heap singleton instance.
*/
void Heap::sweep(Heap &heap)
{
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(SweepStart);
auto iter = heap.m_allocated_chunks.begin();
std::cout << "Chunks alloced: " << heap.m_allocated_chunks.size() << std::endl;
// This cannot "iter != stop", results in seg fault, since the end gets updated, I think.
while (iter != heap.m_allocated_chunks.end())
{
Chunk *chunk = *iter;
// Unmark the marked chunks for the next iteration.
if (chunk->m_marked)
{
chunk->m_marked = false;
++iter;
}
else
{
// Add the unmarked chunks to freed chunks and remove from
// the list of allocated chunks
if (profiler_enabled)
Profiler::record(ChunkSwept, chunk);
heap.m_freed_chunks.push_back(chunk);
iter = heap.m_allocated_chunks.erase(iter);
//heap.m_size -= chunk->m_size;
cout << "Decremented total heap size with: " << chunk->m_size << endl;
cout << "Total size is: " << heap.m_size << endl;
}
}
std::cout << "Chunks left: " << heap.m_allocated_chunks.size() << std::endl;
}
/**
* Frees chunks that was moved to the list m_freed_chunks
* by the sweep phase. If there are more than a certain
* amount of free chunks, delete the free chunks to
* avoid cluttering.
*
* Time complexity: O(N^2), where N is the freed chunks.
* If free_overlap() is called, it runs in O(N^2),
* otherwise O(N).
*
* @param heap Heap singleton instance, only for avoiding
* redundant calls to the singleton get
*/
void Heap::free(Heap &heap)
{
bool profiler_enabled = heap.m_profiler_enable;
if (profiler_enabled)
Profiler::record(FreeStart);
if (heap.m_freed_chunks.size() > FREE_THRESH)
{
bool profiler_enabled = heap.profiler_enabled();
while (heap.m_freed_chunks.size())
{
auto chunk = heap.m_freed_chunks.back();
heap.m_freed_chunks.pop_back();
if (profiler_enabled)
Profiler::record(ChunkFreed, chunk);
heap.m_size -= chunk->m_size;
cout << "Decremented total heap size with: " << chunk->m_size << endl;
cout << "Total size is: " << heap.m_size << endl;
delete chunk;
}
}
// if there are chunks but not more than FREE_THRESH
else if (heap.m_freed_chunks.size())
{
// essentially, always check for overlap between
// chunks before finishing the allocation
free_overlap(heap);
}
}
/**
* Checks for overlaps between freed chunks of memory
* and removes overlapping chunks while prioritizing
* the chunks at lower addresses.
*
* Time complexity: O(N^2), where N is the number of freed chunks.
* At each iteration get_at() is called, which is linear.
*
* @param heap Heap singleton instance, only for avoiding
* redundant calls to the singleton get
*
* @note Maybe this should be changed to prioritizing
* larger chunks. Should remove get_at() to indexing,
* since that's constant.
*/
void Heap::free_overlap(Heap &heap) // borde göra en record(ChunkFreed) på onödiga chunks
{
std::vector<Chunk *> filtered;
size_t i = 0;
//auto prev = Heap::get_at(heap.m_freed_chunks, i++);
auto prev = heap.m_freed_chunks[i++];
prev->m_marked = true;
filtered.push_back(prev);
// cout << filtered.back()->m_start << endl;
for (; i < heap.m_freed_chunks.size(); i++)
{
prev = filtered.back();
//auto next = Heap::get_at(heap.m_freed_chunks, i);
auto next = heap.m_freed_chunks[i];
auto p_start = (uintptr_t)(prev->m_start);
auto p_size = (uintptr_t)(prev->m_size);
auto n_start = (uintptr_t)(next->m_start);
if (n_start >= (p_start + p_size))
{
next->m_marked = true;
filtered.push_back(next);
}
}
heap.m_freed_chunks.swap(filtered);
bool profiler_enabled = heap.m_profiler_enable;
// After swap m_freed_chunks contains still available chunks
// and filtered contains all the chunks, so delete unused chunks
for (Chunk *chunk : filtered)
{
// if chunk was filtered away, delete it
if (!chunk->m_marked)
{
if (profiler_enabled)
Profiler::record(ChunkFreed, chunk);
heap.m_size -= chunk->m_size;
cout << "Decremented total heap size with: " << chunk->m_size << endl;
cout << "Total size is: " << heap.m_size << endl;
delete chunk;
}
else
{
chunk->m_marked = false;
}
}
}
void Heap::set_profiler(bool mode)
{
Heap &heap = Heap::the();
heap.m_profiler_enable = mode;
}
Chunk* find_pointer(uintptr_t *start, const uintptr_t* const end, vector<Chunk *> &worklist) {
for (; start <= end; start++) {
auto it = worklist.begin();
auto stop = worklist.end();
while (it != stop)
{
Chunk *chunk = *it;
auto c_start = reinterpret_cast<uintptr_t>(chunk->m_start);
auto c_size = reinterpret_cast<uintptr_t>(chunk->m_size);
auto c_end = reinterpret_cast<uintptr_t>(c_start + c_size);
// Check if the stack pointer points to something within the chunk
if (c_start <= *start && *start < c_end)
{
return chunk;
}
return NULL;
}
}
}
// Checks if a given chunk points to another chunk and returns it
Chunk* Heap::find_pointer_hash(uintptr_t *start, const uintptr_t* const end) {
Heap &heap = Heap::the();
for (; start <= end; start++) {
auto search = heap.m_chunk_table.find(*start);
if (search != heap.m_chunk_table.end()) {
return search->second;
}
return NULL;
}
}
#ifdef HEAP_DEBUG
/**
* Prints the result of Heap::init() and a dummy value
* for the current stack frame for reference.
*/
void Heap::check_init()
{
Heap &heap = Heap::the();
cout << "Heap addr:\t" << &heap << "\n";
cout << "GC m_stack_top:\t" << heap.m_stack_top << "\n";
auto stack_bottom = reinterpret_cast<uintptr_t *>(__builtin_frame_address(0));
cout << "GC stack_bottom:\t" << stack_bottom << endl;
}
/**
* Conditional collection, only to be used in debugging
*
* @param flags Bitmap of flags
*/
void Heap::collect(CollectOption flags)
{
set_profiler(true);
Heap &heap = Heap::the();
if (heap.m_profiler_enable)
Profiler::record(CollectStart);
cout << "DEBUG COLLECT\nFLAGS: ";
if (flags & MARK)
cout << "\n - MARK";
if (flags & SWEEP)
cout << "\n - SWEEP";
if (flags & FREE)
cout << "\n - FREE";
cout << "\n";
// get the frame adress, whwere local variables and saved registers are located
auto stack_bottom = reinterpret_cast<uintptr_t *>(__builtin_frame_address(0));
cout << "Stack bottom in collect:\t" << stack_bottom << "\n";
uintptr_t *stack_top = heap.m_stack_top;
cout << "Stack end in collect:\t " << stack_top << endl;
auto work_list = heap.m_allocated_chunks;
if (flags & MARK)
mark(stack_bottom, stack_top, work_list);
if (flags & SWEEP)
sweep(heap);
if (flags & FREE)
free(heap);
}
// Mark child references from the root references
void mark_test(vector<Chunk *> &worklist)
{
while (worklist.size() > 0)
{
Chunk *ref = worklist.back();
worklist.pop_back();
Chunk *child = (Chunk *)ref; // this is probably not correct
if (child != nullptr && !child->m_marked)
{
child->m_marked = true;
worklist.push_back(child);
mark_test(worklist);
}
}
}
// Mark the root references and look for child references to them
void mark_from_roots(uintptr_t *start, const uintptr_t *end)
{
vector<Chunk *> worklist;
for (; start > end; start--)
{
if (*start % 8 == 0)
{ // all pointers must be aligned as double words
Chunk *ref = (Chunk *)*start;
if (ref != nullptr && !ref->m_marked)
{
ref->m_marked = true;
worklist.push_back(ref);
mark_test(worklist);
}
}
}
}
// For testing purposes
void Heap::print_line(Chunk *chunk)
{
cout << "Marked: " << chunk->m_marked << "\nStart adr: " << chunk->m_start << "\nSize: " << chunk->m_size << " B\n"
<< endl;
}
void Heap::print_worklist(std::vector<Chunk *> &list)
{
for (auto cp : list)
cout << "Chunk at:\t" << cp->m_start << "\nSize:\t\t" << cp->m_size << "\n";
cout << endl;
}
void Heap::print_contents()
{
Heap &heap = Heap::the();
if (heap.m_allocated_chunks.size())
{
cout << "\nALLOCATED CHUNKS #" << dec << heap.m_allocated_chunks.size() << endl;
for (auto chunk : heap.m_allocated_chunks)
print_line(chunk);
}
else
{
cout << "NO ALLOCATIONS\n" << endl;
}
if (heap.m_freed_chunks.size())
{
cout << "\nFREED CHUNKS #" << dec << heap.m_freed_chunks.size() << endl;
for (auto fchunk : heap.m_freed_chunks)
print_line(fchunk);
}
else
{
cout << "NO FREED CHUNKS" << endl;
}
}
void Heap::print_summary()
{
Heap &heap = Heap::the();
if (heap.m_allocated_chunks.size())
{
cout << "\nALLOCATED CHUNKS #" << dec << heap.m_allocated_chunks.size() << endl;
}
else
{
cout << "NO ALLOCATIONS\n" << endl;
}
if (heap.m_freed_chunks.size())
{
cout << "\nFREED CHUNKS #" << dec << heap.m_freed_chunks.size() << endl;
}
else
{
cout << "NO FREED CHUNKS" << endl;
}
}
void Heap::print_allocated_chunks(Heap *heap) {
cout << "--- Allocated Chunks ---\n" << endl;
for (auto chunk : heap->m_allocated_chunks) {
print_line(chunk);
}
}
Chunk *Heap::try_recycle_chunks_new(size_t size)
{
Heap &heap = Heap::the();
// Check if there are any freed chunks large enough for current request
for (size_t i = 0; i < heap.m_freed_chunks.size(); i++)
{
auto chunk = heap.m_freed_chunks[i]; //Heap::get_at(heap.m_freed_chunks, i);
auto iter = heap.m_freed_chunks.begin();
//advance(iter, i);
i++;
if (chunk->m_size > size)
{
// Split the chunk, use one part and add the remaining part to
// the list of freed chunks
size_t diff = chunk->m_size - size;
auto chunk_complement = new Chunk(diff, chunk->m_start + chunk->m_size);
heap.m_freed_chunks.erase(iter);
heap.m_freed_chunks.push_back(chunk_complement);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
else if (chunk->m_size == size)
{
// Reuse the whole chunk
heap.m_freed_chunks.erase(iter);
heap.m_allocated_chunks.push_back(chunk);
return chunk;
}
}
// If no chunk was found, return nullptr
return nullptr;
}
void Heap::free_overlap_new(Heap &heap) // borde göra en record(ChunkFreed) på onödiga chunks
{
std::vector<Chunk *> filtered;
size_t i = 0;
auto prev = heap.m_freed_chunks[i++]; //Heap::get_at(heap.m_freed_chunks, i++);
prev->m_marked = true;
filtered.push_back(prev);
cout << filtered.back()->m_start << endl;
for (; i < heap.m_freed_chunks.size(); i++)
{
prev = filtered.back();
auto next = heap.m_freed_chunks[i]; //Heap::get_at(heap.m_freed_chunks, i);
auto p_start = (uintptr_t)(prev->m_start);
auto p_size = (uintptr_t)(prev->m_size);
auto n_start = (uintptr_t)(next->m_start);
if (n_start >= (p_start + p_size))
{
next->m_marked = true;
filtered.push_back(next);
}
}
heap.m_freed_chunks.swap(filtered);
bool profiler_enabled = heap.m_profiler_enable;
// After swap m_freed_chunks contains still available chunks
// and filtered contains all the chunks, so delete unused chunks
for (Chunk *chunk : filtered)
{
// if chunk was filtered away, delete it
if (!chunk->m_marked)
{
if (profiler_enabled)
Profiler::record(ChunkFreed, chunk);
delete chunk;
}
else
{
chunk->m_marked = false;
}
}
}
#endif
}

View file

@ -1,311 +0,0 @@
#include <ctime>
#include <cstring>
#include <iostream>
#include <fstream>
#include <time.h>
#include <vector>
#include <unistd.h>
#include <stdexcept>
#include "chunk.hpp"
#include "event.hpp"
#include "profiler.hpp"
// #define MAC_OS
namespace GC
{
Profiler& Profiler::the()
{
static Profiler instance;
return instance;
}
RecordOption Profiler::log_options()
{
Profiler &prof = Profiler::the();
return prof.flags;
}
void Profiler::set_log_options(RecordOption flags)
{
Profiler &prof = Profiler::the();
prof.flags = flags;
}
void Profiler::record_data(GCEvent *event)
{
Profiler &prof = Profiler::the();
prof.m_events.push_back(event);
if (prof.m_last_prof_event->m_type == event->get_type())
prof.m_last_prof_event->m_n++;
else
{
prof.m_prof_events.push_back(prof.m_last_prof_event);
prof.m_last_prof_event = new ProfilerEvent(event->get_type());
}
}
/**
* Records an event independent of a chunk.
*
* @param type The type of event to record.
*/
void Profiler::record(GCEventType type)
{
Profiler &prof = Profiler::the();
if (prof.flags & type)
Profiler::record_data(new GCEvent(type));
// auto event = new GCEvent(type);
// auto profiler = Profiler::the();
// profiler.m_events.push_back(event);
}
/**
* This overload is only used with an AllocStart
* event.
*
* @param type The type of event to record.
*
* @param size The size of requested to alloc().
*/
void Profiler::record(GCEventType type, size_t size)
{
Profiler &prof = Profiler::the();
if (prof.flags & type)
Profiler::record_data(new GCEvent(type, size));
// auto event = new GCEvent(type, size);
// auto profiler = Profiler::the();
// profiler.m_events.push_back(event);
}
void Profiler::dump_trace()
{
Profiler &prof = Profiler::the();
if (prof.flags & FunctionCalls)
dump_prof_trace();
else
dump_chunk_trace();
}
/**
* Records an event related to a chunk.
*
* @param type The type of event to record.
*
* @param chunk The chunk the event is connected
* to.
*/
void Profiler::record(GCEventType type, Chunk *chunk)
{
// Create a copy of chunk to store in the profiler
// because in free() chunks are deleted and cannot
// be referenced by the profiler. These copied
// chunks are deleted by the profiler on dispose().
Profiler &prof = Profiler::the();
if (prof.flags & type)
{
auto chunk_copy = new Chunk(chunk);
auto event = new GCEvent(type, chunk_copy);
Profiler::record_data(event);
}
// auto profiler = Profiler::the();
// profiler.m_events.push_back(event);
}
void Profiler::record(GCEventType type, std::chrono::microseconds time)
{
Profiler &prof = Profiler::the();
if (type == AllocStart)
{
prof.alloc_time += time;
}
else if (type == CollectStart)
{
prof.collect_time += time;
}
}
void Profiler::dump_prof_trace()
{
Profiler &prof = Profiler::the();
prof.m_prof_events.push_back(prof.m_last_prof_event);
auto start = prof.m_prof_events.begin();
auto end = prof.m_prof_events.end();
int allocs = 0, collects = 0;
char buffer[22];
std::ofstream fstr = prof.create_file_stream();
while (start != end)
{
auto event = *start++;
if (event->m_type == AllocStart)
allocs += event->m_n;
else if (event->m_type == CollectStart)
collects += event->m_n;
fstr << "\n--------------------------------\n"
<< Profiler::type_to_string(event->m_type) << " "
<< event->m_n << " times:";
}
fstr << "\n--------------------------------";
fstr << "\n\nTime spent on allocations:\t" << prof.alloc_time.count() << " microseconds"
<< "\nAllocation cycles:\t" << allocs
<< "\nTime spent on collections:\t" << prof.collect_time.count() << " microseconds"
<< "\nCollection cycles:\t" << collects
<< "\n--------------------------------";
}
/**
* Prints the history of the recorded events
* to a log file in the /tests/logs folder.
*/
void Profiler::dump_chunk_trace()
{
Profiler &prof = Profiler::the();
auto start = prof.m_events.begin();
auto end = prof.m_events.end();
// Buffer for timestamp
char buffer[22];
while (start != end)
{
auto event = *start++;
auto e_type = event->get_type();
prof.print_chunk_event(event, buffer);
}
}
void Profiler::print_chunk_event(GCEvent *event, char buffer[22])
{
Profiler &prof = Profiler::the();
// File output stream
std::ofstream fstr = prof.create_file_stream();
std::time_t tt = event->get_time_stamp();
std::tm *btm = std::localtime(&tt);
std::strftime(buffer, 22, "%a %T", btm);
fstr << "--------------------------------\n"
<< buffer
<< "\nEvent:\t" << Profiler::type_to_string(event->get_type());
// event->type_to_string();
const Chunk *chunk = event->get_chunk();
if (event->get_type() == AllocStart)
{
fstr << "\nSize: " << event->get_size();
}
else if (chunk)
{
fstr << "\nChunk: " << chunk->m_start
<< "\n Size: " << chunk->m_size
<< "\n Mark: " << chunk->m_marked;
}
fstr << "\n";
}
/**
* Deletes the profiler singleton and all
* the events recorded after recording
* the ProfilerDispose event and dumping
* the history to a log file.
*/
void Profiler::dispose()
{
Profiler::record(ProfilerDispose);
Profiler::dump_trace();
}
/**
* Creates a filestream for the future
* log file to print the history to in
* dump_trace().
*
* @returns The output stream to the file.
*/
std::ofstream Profiler::create_file_stream()
{
// get current time
std::time_t tt = std::time(NULL);
std::tm *ptm = std::localtime(&tt);
// format to string
char buffer[32];
std::strftime(buffer, 32, "/log_%a_%H_%M_%S.txt", ptm);
std::string filename(buffer);
// const std::string ABS_PATH = "/home/virre/dev/systemF/org/language/src/GC/";
// // const std::string ABS_PATH = "/Users/valtermiari/Desktop/DV/Bachelors/code/language/src/GC";
// std::string fullpath = ABS_PATH + filename;
const std::string fullpath = get_log_folder() + filename;
std::ofstream fstr(fullpath);
return fstr;
}
/**
* This function retrieves the path to the folder
* of the executable to use for log files.
*
* @returns The path to the logs folder.
*
* @throws A runtime error if the call
* to readlink() fails.
*/
std::string Profiler::get_log_folder()
{
#ifndef MAC_OS
char buffer[1024];
// chars read from path
ssize_t len = readlink("/proc/self/exe", buffer, sizeof(buffer)-1);
// if readlink fails
if (len == -1)
{
throw std::runtime_error(std::string("Error: readlink failed on '/proc/self/exe/'"));
}
buffer[len] = '\0';
// convert to string for string operators
auto path = std::string(buffer);
// remove filename
size_t last_slash = path.find_last_of('/');
std::string folder = path.substr(0, last_slash);
#else
auto folder = std::string("/Users/valtermiari/Desktop/DV/Bachelors/code/language/src/GC/tests");
#endif
return folder + "/logs";
}
const char *Profiler::type_to_string(GCEventType type)
{
switch (type)
{
case HeapInit: return "HeapInit";
case AllocStart: return "AllocStart";
case CollectStart: return "CollectStart";
case MarkStart: return "MarkStart";
case ChunkMarked: return "ChunkMarked";
case ChunkSwept: return "ChunkSwept";
case ChunkFreed: return "ChunkFreed";
case NewChunk: return "NewChunk";
case ReusedChunk: return "ReusedChunk";
case ProfilerDispose: return "ProfilerDispose";
case SweepStart: return "SweepStart";
case FreeStart: return "FreeStart";
default: return "[Unknown]";
}
}
}