Allow memory pools to have a fallback.

If it runs out of memory, it will try and allocate from its fallback.
Freed memory is not returned to the fallback until it is finalised.

Change-Id: Ib63ae95321d5d70931dedda3c5f9267caec82b2d
diff --git a/inc/hf/mpool.h b/inc/hf/mpool.h
index 556fea9..0034dbb 100644
--- a/inc/hf/mpool.h
+++ b/inc/hf/mpool.h
@@ -26,11 +26,14 @@
 	size_t entry_size;
 	struct mpool_chunk *chunk_list;
 	struct mpool_entry *entry_list;
+	struct mpool *fallback;
 };
 
 void mpool_enable_locks(void);
 void mpool_init(struct mpool *p, size_t entry_size);
 void mpool_init_from(struct mpool *p, struct mpool *from);
+void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback);
+void mpool_fini(struct mpool *p);
 bool mpool_add_chunk(struct mpool *p, void *begin, size_t size);
 void *mpool_alloc(struct mpool *p);
 void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align);
diff --git a/src/mpool.c b/src/mpool.c
index 21bce22..4cc1d40 100644
--- a/src/mpool.c
+++ b/src/mpool.c
@@ -70,6 +70,7 @@
 	p->entry_size = entry_size;
 	p->chunk_list = NULL;
 	p->entry_list = NULL;
+	p->fallback = NULL;
 	sl_init(&p->lock);
 }
 
@@ -85,13 +86,64 @@
 	mpool_lock(from);
 	p->chunk_list = from->chunk_list;
 	p->entry_list = from->entry_list;
+	p->fallback = from->fallback;
 
 	from->chunk_list = NULL;
 	from->entry_list = NULL;
+	from->fallback = NULL;
 	mpool_unlock(from);
 }
 
 /**
+ * Initialises the given memory pool with a fallback memory pool if this pool
+ * runs out of memory.
+ */
+void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
+{
+	mpool_init(p, fallback->entry_size);
+	p->fallback = fallback;
+}
+
+/**
+ * Finishes the given memory pool, giving all free memory to the fallback pool
+ * if there is one.
+ */
+void mpool_fini(struct mpool *p)
+{
+	struct mpool_entry *entry;
+	struct mpool_chunk *chunk;
+
+	if (!p->fallback) {
+		return;
+	}
+
+	mpool_lock(p);
+
+	/* Merge the freelist into the fallback. */
+	entry = p->entry_list;
+	while (entry != NULL) {
+		void *ptr = entry;
+		entry = entry->next;
+		mpool_free(p->fallback, ptr);
+	}
+
+	/* Merge the chunk list into the fallback. */
+	chunk = p->chunk_list;
+	while (chunk != NULL) {
+		void *ptr = chunk;
+		size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
+		chunk = chunk->next_chunk;
+		mpool_add_chunk(p->fallback, ptr, size);
+	}
+
+	p->chunk_list = NULL;
+	p->entry_list = NULL;
+	p->fallback = NULL;
+
+	mpool_unlock(p);
+}
+
+/**
  * Adds a contiguous chunk of memory to the given memory pool. The chunk will
  * eventually be broken up into entries of the size held by the memory pool.
  *
@@ -128,9 +180,10 @@
 }
 
 /**
- * Allocates an entry from the given memory pool, if one is available.
+ * Allocates and entry from the given memory pool, if one is available. The
+ * fallback will not be used even if there is one.
  */
-void *mpool_alloc(struct mpool *p)
+static void *mpool_alloc_no_fallback(struct mpool *p)
 {
 	void *ret;
 	struct mpool_chunk *chunk;
@@ -164,10 +217,30 @@
 
 exit:
 	mpool_unlock(p);
+
 	return ret;
 }
 
 /**
+ * Allocates an entry from the given memory pool, if one is available. If there
+ * isn't one available, try and allocate from the fallback if there is one.
+ */
+void *mpool_alloc(struct mpool *p)
+{
+	do {
+		void *ret = mpool_alloc_no_fallback(p);
+
+		if (ret != NULL) {
+			return ret;
+		}
+
+		p = p->fallback;
+	} while (p != NULL);
+
+	return NULL;
+}
+
+/**
  * Frees an entry back into the memory pool, making it available for reuse.
  *
  * This is meant to be used for freeing single entries. To free multiple
@@ -185,17 +258,12 @@
 }
 
 /**
- * Allocates a number of contiguous and aligned entries. This is a best-effort
- * operation and only succeeds if such entries can be found in chunks (i.e., the
- * entry list is never used to satisfy these allocations).
- *
- * The alignment is specified as the number of entries, that is, if `align` is
- * 4, the alignment in bytes will be 4 * entry_size.
- *
- * The caller can enventually free the returned entries by calling
- * mpool_add_chunk.
+ * Allocates a number of contiguous and aligned entries. If a suitable
+ * allocation could not be found, the fallback will not be used even if there is
+ * one.
  */
-void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
+void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
+					 size_t align)
 {
 	struct mpool_chunk **prev;
 	void *ret = NULL;
@@ -215,7 +283,7 @@
 		struct mpool_chunk *chunk = *prev;
 
 		/* Round start address up to the required alignment. */
-		start = ((uintptr_t)chunk + align - 1) / align * align;
+		start = (((uintptr_t)chunk + align - 1) / align) * align;
 
 		/*
 		 * Calculate where the new chunk would be if we consume the
@@ -223,7 +291,7 @@
 		 * enough to satisfy the request.
 		 */
 		new_chunk =
-			(struct mpool_chunk *)(start + count * p->entry_size);
+			(struct mpool_chunk *)(start + (count * p->entry_size));
 		if (new_chunk <= chunk->limit) {
 			/* Remove the consumed area. */
 			if (new_chunk == chunk->limit) {
@@ -254,3 +322,30 @@
 
 	return ret;
 }
+
+/**
+ * Allocates a number of contiguous and aligned entries. This is a best-effort
+ * operation and only succeeds if such entries can be found in the chunks list
+ * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
+ * these allocations).
+ *
+ * The alignment is specified as the number of entries, that is, if `align` is
+ * 4, the alignment in bytes will be 4 * entry_size.
+ *
+ * The caller can enventually free the returned entries by calling
+ * mpool_add_chunk.
+ */
+void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
+{
+	do {
+		void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
+
+		if (ret != NULL) {
+			return ret;
+		}
+
+		p = p->fallback;
+	} while (p != NULL);
+
+	return NULL;
+}
diff --git a/src/mpool_test.cc b/src/mpool_test.cc
index f4267b3..7c30f89 100644
--- a/src/mpool_test.cc
+++ b/src/mpool_test.cc
@@ -24,6 +24,7 @@
 
 namespace
 {
+using ::testing::Eq;
 using ::testing::IsNull;
 using ::testing::NotNull;
 
@@ -123,7 +124,7 @@
 {
 	struct mpool p;
 	constexpr size_t entry_size = 16;
-	constexpr size_t entries_per_chunk = 10;
+	constexpr size_t entries_per_chunk = 12;
 	constexpr size_t chunk_count = 10;
 	std::vector<std::unique_ptr<char[]>> chunks;
 	std::vector<uintptr_t> allocs;
@@ -255,7 +256,7 @@
 		ret = mpool_alloc(&p);
 		ASSERT_THAT(ret, NotNull());
 		allocs.push_back((uintptr_t)ret);
-		next = (uintptr_t)ret / entry_size + 1;
+		next = ((uintptr_t)ret / entry_size) + 1;
 	} while ((next % 4) != 2);
 
 	/* Allocate 5 entries with an alignment of 4. So two must be skipped. */
@@ -289,4 +290,71 @@
 		    true);
 }
 
+TEST(mpool, allocation_with_fallback)
+{
+	struct mpool fallback;
+	struct mpool p;
+	constexpr size_t entry_size = 16;
+	constexpr size_t entries_per_chunk = 10;
+	constexpr size_t chunk_count = 10;
+	std::vector<std::unique_ptr<char[]>> chunks;
+	std::vector<uintptr_t> allocs;
+	void* ret;
+
+	mpool_init(&fallback, entry_size);
+	mpool_init_with_fallback(&p, &fallback);
+
+	/* Allocate from an empty pool. */
+	EXPECT_THAT(mpool_alloc(&p), IsNull());
+
+	/* Allocate a number of chunks and add them to the fallback pool. */
+	add_chunks(chunks, &fallback, chunk_count,
+		   entries_per_chunk * entry_size);
+
+	/* Allocate from the pool until we run out of memory. */
+	while ((ret = mpool_alloc(&p))) {
+		allocs.push_back((uintptr_t)ret);
+	}
+
+	/* Check that returned entries are within chunks that were added. */
+	ASSERT_THAT(check_allocs(chunks, allocs, entries_per_chunk, entry_size),
+		    true);
+}
+
+TEST(mpool, free_with_fallback)
+{
+	struct mpool fallback;
+	struct mpool p;
+	constexpr size_t entry_size = 16;
+	constexpr size_t entries_per_chunk = 1;
+	constexpr size_t chunk_count = 1;
+	std::vector<std::unique_ptr<char[]>> chunks;
+	std::vector<uintptr_t> allocs;
+	void* ret;
+
+	mpool_init(&fallback, entry_size);
+	mpool_init_with_fallback(&p, &fallback);
+
+	/* Allocate a number of chunks and add them to the fallback pool. */
+	add_chunks(chunks, &fallback, chunk_count,
+		   entries_per_chunk * entry_size);
+
+	/* Allocate, making use of the fallback and free again. */
+	ret = mpool_alloc(&p);
+	mpool_free(&p, ret);
+
+	/* The entry is not available in the fallback. */
+	EXPECT_THAT(mpool_alloc(&fallback), IsNull());
+
+	/* The entry will be allocated by the local pool. */
+	EXPECT_THAT(mpool_alloc(&p), Eq(ret));
+
+	/* Return the memory to the local pool and then to the fallback. */
+	mpool_free(&p, ret);
+	mpool_fini(&p);
+
+	/* The fallback can now allocate the entry. */
+	EXPECT_THAT(mpool_alloc(&fallback), Eq(ret));
+}
+
 } /* namespace */