commit 8937a8fc0c49a6bad7b1f0e031db39db66668441
parent a9013df4db99531e397783f38d24b8f0d174a097
Author: sin <sin@2f30.org>
Date: Fri, 2 Aug 2013 14:32:23 +0100
Add Makefile
Diffstat:
A | Makefile | | | 50 | ++++++++++++++++++++++++++++++++++++++++++++++++++ |
A | config.mk | | | 18 | ++++++++++++++++++ |
A | lemoncake.c | | | 339 | +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
D | rballoc.c | | | 341 | ------------------------------------------------------------------------------- |
4 files changed, 407 insertions(+), 341 deletions(-)
diff --git a/Makefile b/Makefile
@@ -0,0 +1,50 @@
+# lemoncake - rbtree based memory allocator
+# See LICENSE file for copyright and license details.
+
+include config.mk
+
+SRC = lemoncake.c
+OBJ = ${SRC:.c=.o}
+
+all: options lemoncake.so
+
+options:
+ @echo lemoncake build options:
+ @echo "CFLAGS = ${CFLAGS}"
+ @echo "LDFLAGS = ${LDFLAGS}"
+ @echo "CC = ${CC}"
+
+.c.o:
+ @echo CC $<
+ @${CC} -c ${CFLAGS} $<
+
+${OBJ}: config.mk
+
+lemoncake.so: lemoncake.o
+ @echo CC -o $@
+ @${CC} -o $@ lemoncake.o ${LDFLAGS}
+
+clean:
+ @echo cleaning
+ @rm -f lemoncake.so ${OBJ} lemoncake-${VERSION}.tar.gz
+
+dist: clean
+ @echo creating dist tarball
+ @mkdir -p lemoncake-${VERSION}
+ @cp -R LICENSE Makefile config.mk \
+ spinlock.h tree.h ${SRC} lemoncake-${VERSION}
+ @tar -cf lemoncake-${VERSION}.tar lemoncake-${VERSION}
+ @gzip lemoncake-${VERSION}.tar
+ @rm -rf lemoncake-${VERSION}
+
+install: all
+ @echo installing library file to ${DESTDIR}${PREFIX}/lib
+ @mkdir -p ${DESTDIR}${PREFIX}/lib
+ @cp -f lemoncake.so ${DESTDIR}${PREFIX}/lib
+ @chmod 755 ${DESTDIR}${PREFIX}/lib/lemoncake.so
+
+uninstall:
+ @echo removing library file from ${DESTDIR}${PREFIX}/lib
+ @rm -f ${DESTDIR}${PREFIX}/lib/lemoncake.so
+
+.PHONY: all options clean dist install uninstall
diff --git a/config.mk b/config.mk
@@ -0,0 +1,18 @@
+# lemoncake version
+VERSION = 0.1
+
+# Customize below to fit your system
+
+# paths
+PREFIX = /usr/local
+
+# includes and libs
+INCS = -I. -I/usr/include
+LIBS =
+
+# flags
+CFLAGS = -fPIC -Wall -O3 ${INCS}
+LDFLAGS = -shared ${LIBS}
+
+# compiler and linker
+CC = cc
diff --git a/lemoncake.c b/lemoncake.c
@@ -0,0 +1,339 @@
+/* See LICENSE file for copyright and license details. */
+#include <sys/mman.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include "tree.h"
+#include "spinlock.h"
+
+enum { MINALIGNMENT = 4 * sizeof(size_t) };
+
+struct node {
+ void *buf;
+ size_t siz;
+ RB_ENTRY(node) entry;
+};
+
+RB_HEAD(free_tree, node) ft = RB_INITIALIZER(&ft);
+RB_HEAD(alloc_tree, node) at = RB_INITIALIZER(&at);
+static spinlock_t rblock;
+
+static int ft_cmp(struct node *a, struct node *b);
+RB_PROTOTYPE(free_tree, node, entry, ft_cmp)
+
+static int at_cmp(struct node *a, struct node *b);
+RB_PROTOTYPE(alloc_tree, node, entry, at_cmp)
+
+RB_GENERATE(free_tree, node, entry, ft_cmp)
+/* These are ordered by `siz' */
+static int
+ft_cmp(struct node *a, struct node *b)
+{
+ if (a->siz < b->siz)
+ return -1;
+ if (a->siz > b->siz)
+ return 1;
+ return 0;
+}
+
+RB_GENERATE(alloc_tree, node, entry, at_cmp)
+/* These are ordered by address */
+static int
+at_cmp(struct node *a, struct node *b)
+{
+ if (a->buf < b->buf)
+ return -1;
+ if (a->buf > b->buf)
+ return 1;
+ return 0;
+}
+
+static void
+dump_alloc_tree(void)
+{
+ struct node *n;
+
+ RB_FOREACH(n, alloc_tree, &at)
+ fprintf(stderr, "%s: buf: %p, size: %zu\n",
+ __func__, n->buf, n->siz);
+}
+
+static void
+dump_free_tree(void)
+{
+ struct node *n;
+
+ RB_FOREACH(n, free_tree, &ft)
+ fprintf(stderr, "%s: buf: %p, size: %zu\n",
+ __func__, n->buf, n->siz);
+}
+
+static inline void *
+alloc_object(size_t siz)
+{
+ void *base, *p;
+
+ base = sbrk(siz + MINALIGNMENT);
+ if (base == (void *)-1)
+ return NULL;
+ p = base;
+ p = (void *)(((uintptr_t)p + MINALIGNMENT) & ~(MINALIGNMENT - 1));
+ return p;
+}
+
+static inline void *
+mmap_aligned(size_t align, size_t siz)
+{
+ void *p;
+
+ /* align should be a power of two */
+ if ((align - 1) & align)
+ return NULL;
+ p = mmap(0, siz + align, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (p == MAP_FAILED)
+ return NULL;
+ p = (void *)(((uintptr_t)p + align) & ~(align - 1));
+ return p;
+}
+
+void *
+malloc(size_t siz)
+{
+ struct node n, *an, *res;
+ void *p;
+
+ if (!siz)
+ return NULL;
+ lock(&rblock);
+ /* Lookup in the free tree for a block greater
+ * than or equal to `siz' bytes */
+ n.siz = siz;
+ res = RB_NFIND(free_tree, &ft, &n);
+ if (!res) {
+ /* No available free block, create a new block
+ * and add it to the alloc tree */
+ an = alloc_object(sizeof(*an));
+ if (!an) {
+ unlock(&rblock);
+ return NULL;
+ }
+ p = mmap_aligned(MINALIGNMENT, siz);
+ if (!p) {
+ unlock(&rblock);
+ return NULL;
+ }
+ an->buf = p;
+ an->siz = siz;
+ RB_INSERT(alloc_tree, &at, an);
+ unlock(&rblock);
+ return an->buf;
+ }
+ an = RB_REMOVE(free_tree, &ft, res);
+ RB_INSERT(alloc_tree, &at, an);
+ unlock(&rblock);
+ return an->buf;
+}
+
+void *
+realloc(void *oldp, size_t siz)
+{
+ struct node n, *res;
+ struct node *oldan, *newan;
+ struct node *fn;
+
+ if (!oldp)
+ return malloc(siz);
+ if (!siz) {
+ if (oldp)
+ free(oldp);
+ return NULL;
+ }
+ lock(&rblock);
+ n.buf = oldp;
+ res = RB_FIND(alloc_tree, &at, &n);
+ if (res) {
+ /* If we were asked to shrink the allocated space
+ * just re-use it */
+ if (res->siz >= siz) {
+ unlock(&rblock);
+ return res->buf;
+ }
+ oldan = res;
+ /* Lookup in the free tree for a block greater
+ * than or equal to `siz' bytes */
+ n.siz = siz;
+ res = RB_NFIND(free_tree, &ft, &n);
+ if (!res) {
+ /* No available free block, create a new block
+ * and add it to the alloc tree */
+ newan = alloc_object(sizeof(*newan));
+ if (!newan) {
+ unlock(&rblock);
+ return NULL;
+ }
+ newan->buf = mmap_aligned(MINALIGNMENT, siz);
+ if (!newan->buf) {
+ free_object(newan, sizeof(*newan));
+ unlock(&rblock);
+ return NULL;
+ }
+ newan->siz = siz;
+ RB_INSERT(alloc_tree, &at, newan);
+ } else {
+ /* Grab the block from the free tree instead */
+ newan = RB_REMOVE(free_tree, &ft, res);
+ RB_INSERT(alloc_tree, &at, newan);
+ }
+ /* Copy over the contents from `oldp' to the
+ * new memory block */
+ memcpy(newan->buf, oldan->buf,
+ siz < oldan->siz ? siz : oldan->siz);
+ /* Return `oldp' to the free tree */
+ n.buf = oldan;
+ res = RB_FIND(alloc_tree, &at, &n);
+ if (res) {
+ fn = RB_REMOVE(alloc_tree, &at, res);
+ RB_INSERT(free_tree, &ft, fn);
+ }
+ unlock(&rblock);
+ return newan->buf;
+ }
+ unlock(&rblock);
+ return NULL;
+}
+
+void *
+calloc(size_t nmemb, size_t siz)
+{
+ void *p;
+
+ p = malloc(nmemb * siz);
+ if (!p)
+ return NULL;
+ memset(p, 0, nmemb * siz);
+ return p;
+}
+
+void
+free(void *p)
+{
+ struct node n, *fn, *res;
+
+ if (!p)
+ return;
+ lock(&rblock);
+ n.buf = p;
+ res = RB_FIND(alloc_tree, &at, &n);
+ if (res) {
+ fn = RB_REMOVE(alloc_tree, &at, res);
+ RB_INSERT(free_tree, &ft, fn);
+ }
+ unlock(&rblock);
+}
+
+void
+cfree(void *p)
+{
+ return free(p);
+}
+
+void *
+memalign(size_t align, size_t siz)
+{
+ struct node *an;
+ void *p;
+
+ if (((align - 1) & align))
+ return NULL;
+ if (align < sizeof(void *))
+ return NULL;
+ if (!siz)
+ return 0;
+ /* Just allocate a new block, we don't care to look
+ * for a block in the free tree as it might not be properly
+ * aligned. The previous implementation could cope with
+ * that but it was sort of hackish. There are few calls to
+ * posix_memalign() in most cases, so the overhead should
+ * not really matter. */
+ an = alloc_object(sizeof(*an));
+ if (!an)
+ return NULL;
+ p = mmap_aligned(align, siz);
+ if (!p)
+ return NULL;
+ an->buf = p;
+ an->siz = siz;
+ lock(&rblock);
+ RB_INSERT(alloc_tree, &at, an);
+ unlock(&rblock);
+ return p;
+}
+
+void *
+aligned_alloc(size_t align, size_t siz)
+{
+ if (siz % align)
+ return NULL;
+ return memalign(align, siz);
+}
+
+void *
+valloc(size_t siz)
+{
+ return memalign(sysconf(_SC_PAGESIZE), siz);
+}
+
+void *
+pvalloc(size_t siz)
+{
+ long pagesize = sysconf(_SC_PAGESIZE);
+ siz = pagesize * ((siz + pagesize - 1) / pagesize);
+ return valloc(siz);
+}
+
+int
+posix_memalign(void **memptr, size_t align, size_t siz)
+{
+ struct node *an;
+ void *p;
+
+ *memptr = NULL;
+ if (((align - 1) & align))
+ return EINVAL;
+ if (align < sizeof(void *))
+ return EINVAL;
+ if (!siz)
+ return 0;
+ *memptr = memalign(align, siz);
+ if (!*memptr)
+ return ENOMEM;
+ return 0;
+}
+
+size_t
+malloc_usable_size(void *p)
+{
+ struct node n, *res;
+
+ if (!p)
+ return 0;
+ lock(&rblock);
+ n.buf = p;
+ res = RB_FIND(alloc_tree, &at, &n);
+ if (res) {
+ unlock(&rblock);
+ return res->siz;
+ }
+ unlock(&rblock);
+ return 0;
+}
+
+size_t
+malloc_size(void *p)
+{
+ return malloc_usable_size(p);
+}
diff --git a/rballoc.c b/rballoc.c
@@ -1,341 +0,0 @@
-/* See LICENSE file for copyright and license details. */
-/* Compile with gcc -shared -fPIC -o rballoc.so rballoc.c */
-/* LD_PRELOAD=./rballoc.so <prog> */
-#include <sys/mman.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdint.h>
-#include <errno.h>
-#include "tree.h"
-#include "spinlock.h"
-
-enum { MINALIGNMENT = 4 * sizeof(size_t) };
-
-struct node {
- void *buf;
- size_t siz;
- RB_ENTRY(node) entry;
-};
-
-RB_HEAD(free_tree, node) ft = RB_INITIALIZER(&ft);
-RB_HEAD(alloc_tree, node) at = RB_INITIALIZER(&at);
-static spinlock_t rblock;
-
-static int ft_cmp(struct node *a, struct node *b);
-RB_PROTOTYPE(free_tree, node, entry, ft_cmp)
-
-static int at_cmp(struct node *a, struct node *b);
-RB_PROTOTYPE(alloc_tree, node, entry, at_cmp)
-
-RB_GENERATE(free_tree, node, entry, ft_cmp)
-/* These are ordered by `siz' */
-static int
-ft_cmp(struct node *a, struct node *b)
-{
- if (a->siz < b->siz)
- return -1;
- if (a->siz > b->siz)
- return 1;
- return 0;
-}
-
-RB_GENERATE(alloc_tree, node, entry, at_cmp)
-/* These are ordered by address */
-static int
-at_cmp(struct node *a, struct node *b)
-{
- if (a->buf < b->buf)
- return -1;
- if (a->buf > b->buf)
- return 1;
- return 0;
-}
-
-static void
-dump_alloc_tree(void)
-{
- struct node *n;
-
- RB_FOREACH(n, alloc_tree, &at)
- fprintf(stderr, "%s: buf: %p, size: %zu\n",
- __func__, n->buf, n->siz);
-}
-
-static void
-dump_free_tree(void)
-{
- struct node *n;
-
- RB_FOREACH(n, free_tree, &ft)
- fprintf(stderr, "%s: buf: %p, size: %zu\n",
- __func__, n->buf, n->siz);
-}
-
-static inline void *
-alloc_object(size_t siz)
-{
- void *base, *p;
-
- base = sbrk(siz + MINALIGNMENT);
- if (base == (void *)-1)
- return NULL;
- p = base;
- p = (void *)(((uintptr_t)p + MINALIGNMENT) & ~(MINALIGNMENT - 1));
- return p;
-}
-
-static inline void *
-mmap_aligned(size_t align, size_t siz)
-{
- void *p;
-
- /* align should be a power of two */
- if ((align - 1) & align)
- return NULL;
- p = mmap(0, siz + align, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- if (p == MAP_FAILED)
- return NULL;
- p = (void *)(((uintptr_t)p + align) & ~(align - 1));
- return p;
-}
-
-void *
-malloc(size_t siz)
-{
- struct node n, *an, *res;
- void *p;
-
- if (!siz)
- return NULL;
- lock(&rblock);
- /* Lookup in the free tree for a block greater
- * than or equal to `siz' bytes */
- n.siz = siz;
- res = RB_NFIND(free_tree, &ft, &n);
- if (!res) {
- /* No available free block, create a new block
- * and add it to the alloc tree */
- an = alloc_object(sizeof(*an));
- if (!an) {
- unlock(&rblock);
- return NULL;
- }
- p = mmap_aligned(MINALIGNMENT, siz);
- if (!p) {
- unlock(&rblock);
- return NULL;
- }
- an->buf = p;
- an->siz = siz;
- RB_INSERT(alloc_tree, &at, an);
- unlock(&rblock);
- return an->buf;
- }
- an = RB_REMOVE(free_tree, &ft, res);
- RB_INSERT(alloc_tree, &at, an);
- unlock(&rblock);
- return an->buf;
-}
-
-void *
-realloc(void *oldp, size_t siz)
-{
- struct node n, *res;
- struct node *oldan, *newan;
- struct node *fn;
-
- if (!oldp)
- return malloc(siz);
- if (!siz) {
- if (oldp)
- free(oldp);
- return NULL;
- }
- lock(&rblock);
- n.buf = oldp;
- res = RB_FIND(alloc_tree, &at, &n);
- if (res) {
- /* If we were asked to shrink the allocated space
- * just re-use it */
- if (res->siz >= siz) {
- unlock(&rblock);
- return res->buf;
- }
- oldan = res;
- /* Lookup in the free tree for a block greater
- * than or equal to `siz' bytes */
- n.siz = siz;
- res = RB_NFIND(free_tree, &ft, &n);
- if (!res) {
- /* No available free block, create a new block
- * and add it to the alloc tree */
- newan = alloc_object(sizeof(*newan));
- if (!newan) {
- unlock(&rblock);
- return NULL;
- }
- newan->buf = mmap_aligned(MINALIGNMENT, siz);
- if (!newan->buf) {
- free_object(newan, sizeof(*newan));
- unlock(&rblock);
- return NULL;
- }
- newan->siz = siz;
- RB_INSERT(alloc_tree, &at, newan);
- } else {
- /* Grab the block from the free tree instead */
- newan = RB_REMOVE(free_tree, &ft, res);
- RB_INSERT(alloc_tree, &at, newan);
- }
- /* Copy over the contents from `oldp' to the
- * new memory block */
- memcpy(newan->buf, oldan->buf,
- siz < oldan->siz ? siz : oldan->siz);
- /* Return `oldp' to the free tree */
- n.buf = oldan;
- res = RB_FIND(alloc_tree, &at, &n);
- if (res) {
- fn = RB_REMOVE(alloc_tree, &at, res);
- RB_INSERT(free_tree, &ft, fn);
- }
- unlock(&rblock);
- return newan->buf;
- }
- unlock(&rblock);
- return NULL;
-}
-
-void *
-calloc(size_t nmemb, size_t siz)
-{
- void *p;
-
- p = malloc(nmemb * siz);
- if (!p)
- return NULL;
- memset(p, 0, nmemb * siz);
- return p;
-}
-
-void
-free(void *p)
-{
- struct node n, *fn, *res;
-
- if (!p)
- return;
- lock(&rblock);
- n.buf = p;
- res = RB_FIND(alloc_tree, &at, &n);
- if (res) {
- fn = RB_REMOVE(alloc_tree, &at, res);
- RB_INSERT(free_tree, &ft, fn);
- }
- unlock(&rblock);
-}
-
-void
-cfree(void *p)
-{
- return free(p);
-}
-
-void *
-memalign(size_t align, size_t siz)
-{
- struct node *an;
- void *p;
-
- if (((align - 1) & align))
- return NULL;
- if (align < sizeof(void *))
- return NULL;
- if (!siz)
- return 0;
- /* Just allocate a new block, we don't care to look
- * for a block in the free tree as it might not be properly
- * aligned. The previous implementation could cope with
- * that but it was sort of hackish. There are few calls to
- * posix_memalign() in most cases, so the overhead should
- * not really matter. */
- an = alloc_object(sizeof(*an));
- if (!an)
- return NULL;
- p = mmap_aligned(align, siz);
- if (!p)
- return NULL;
- an->buf = p;
- an->siz = siz;
- lock(&rblock);
- RB_INSERT(alloc_tree, &at, an);
- unlock(&rblock);
- return p;
-}
-
-void *
-aligned_alloc(size_t align, size_t siz)
-{
- if (siz % align)
- return NULL;
- return memalign(align, siz);
-}
-
-void *
-valloc(size_t siz)
-{
- return memalign(sysconf(_SC_PAGESIZE), siz);
-}
-
-void *
-pvalloc(size_t siz)
-{
- long pagesize = sysconf(_SC_PAGESIZE);
- siz = pagesize * ((siz + pagesize - 1) / pagesize);
- return valloc(siz);
-}
-
-int
-posix_memalign(void **memptr, size_t align, size_t siz)
-{
- struct node *an;
- void *p;
-
- *memptr = NULL;
- if (((align - 1) & align))
- return EINVAL;
- if (align < sizeof(void *))
- return EINVAL;
- if (!siz)
- return 0;
- *memptr = memalign(align, siz);
- if (!*memptr)
- return ENOMEM;
- return 0;
-}
-
-size_t
-malloc_usable_size(void *p)
-{
- struct node n, *res;
-
- if (!p)
- return 0;
- lock(&rblock);
- n.buf = p;
- res = RB_FIND(alloc_tree, &at, &n);
- if (res) {
- unlock(&rblock);
- return res->siz;
- }
- unlock(&rblock);
- return 0;
-}
-
-size_t
-malloc_size(void *p)
-{
- return malloc_usable_size(p);
-}