memzap

replay memory writes
git clone git://git.2f30.org/memzap
Log | Files | Refs | README | LICENSE

commit 41bb2b3c5523f33a9119b2493171358c19ac3449
Author: sin <sin@2f30.org>
Date:   Fri,  1 Mar 2013 11:42:09 +0000

Initial commit

Diffstat:
ALICENSE | 13+++++++++++++
AMakefile | 37+++++++++++++++++++++++++++++++++++++
Adata.h | 134+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Amd5.c | 247+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Amd5.h | 35+++++++++++++++++++++++++++++++++++
Amem.c | 351+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Amemzap.c | 142+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Aqueue.h | 574+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Atest.c | 16++++++++++++++++
Atree.h | 765+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Autils.c | 25+++++++++++++++++++++++++
11 files changed, 2339 insertions(+), 0 deletions(-)

diff --git a/LICENSE b/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) sin <sin@2f30.org> + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/Makefile b/Makefile @@ -0,0 +1,37 @@ +BIN = memzap +VER = 0.1 +SRC = memzap.c mem.c utils.c md5.c +OBJ = ${SRC:.c=.o} + +PREFIX = /usr +MANDIR = /man/man1 +MANDST = ${PREFIX}${MANDIR} + +CC = gcc + +# These might need updating, depending on your system +INCS = -I/usr/local/include +LIBS = -L/usr/local/lib + +CFLAGS += -g -O3 -Wall -Wextra -Wunused -DVERSION=\"${VER}\" ${INCS} +LDFLAGS += + +$(BIN): ${OBJ} + ${CC} ${CFLAGS} ${LDFLAGS} -o $@ ${OBJ} + +%.o: %.c + ${CC} ${CFLAGS} -c -o $@ $< + +clean: + rm -rf ${BIN} ${OBJ} + +all: memzap + +install: + cp -f ${BIN} ${PREFIX}/bin + chmod 755 ${PREFIX}/bin/${BIN} + +uninstall: + rm -f ${PREFIX}/bin/${BIN} + +.PHONY: all clean install uninstall diff --git a/data.h b/data.h @@ -0,0 +1,134 @@ +/* See LICENSE file for copyright and license details. */ + +#ifndef _DATA_H +#define _DATA_H + +#include <errno.h> +#include <unistd.h> +#include <sys/stat.h> +#include <sys/ptrace.h> +#include <sys/wait.h> +#include <sys/mman.h> +#include <fcntl.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stdint.h> +#include <stdbool.h> +#include <limits.h> +#include <signal.h> +#include <err.h> + +#include "queue.h" +#include "tree.h" +#include "md5.h" + +#define min(a, b) ((a) < (b) ? (a) : (b)) + +enum { + /* The preferred block size */ + BLKSIZE = 16384 +}; + +struct mem_blk { + /* Points to the actual data for this + * block. The user should never free pointers + * passed to this API unless the corresponding + * memory region has been freed. */ + void *buf; + /* Simple rolling sum of this block */ + uint32_t weak_sum; + /* The MD5 digest */ + unsigned char digest[MD5_DIGEST_LENGTH]; + /* Offset into the memory region of this block */ + off_t offset; + /* Size of this block, typically `BLKSIZE' + * or less for the last block */ + size_t len; +}; + +struct mem_region { + /* Size of the memory region */ + size_t rsize; + /* Number of blocks in this region */ + size_t nblocks; + /* rsize % nblocks */ + size_t rem; + /* The block size used for this region */ + size_t blksize; + /* There are `nblocks' mem_blk entries + * here, describing the entire memory region */ + struct mem_blk *mblks; +}; + +struct mem_diff { + /* Our updated data */ + void *buf; + /* Offset into the memory region where + * this buffer should be written */ + off_t offset; + /* Actual size of the buffer */ + size_t len; + /* Index into the memory region where + * the corresponding memory block is located */ + int index; +}; + +struct mem_region_diff { + /* The diffs for this memory region */ + struct mem_diff *mrdiffs; + /* The number of diffs */ + size_t nmrdiffs; +}; + +struct mem_tree_entry { + /* The rolling sum for this rbnode, we sort + * based on this property */ + uint32_t weak_sum; + /* Pointers to memory blocks, normally there + * will be only 1 memory block here, however, + * if we have weak sum collisions, then we will + * chain more entries here */ + struct mem_blk **mblks; + /* Actual number of tracked memory blocks */ + size_t nmblks; + /* The memory region this tree is generated + * from */ + struct mem_region *mr; + RB_ENTRY(mem_tree_entry) entry; +}; +RB_HEAD(mem_tree, mem_tree_entry); + +/* Return the number of blocks in a buffer of `len' bytes */ +static inline size_t +num_blocks(size_t len) +{ + return (len + (BLKSIZE - 1)) / BLKSIZE; +} + +/* utils.c */ +void *xmalloc(size_t len); +void *xrealloc(void *ptr, size_t len); + +/* mem.c */ +uint32_t weak_sum(const void *buf, size_t len); +void strong_sum(const void *buf, size_t len, void *digest); +void dump_mem_blk(struct mem_blk *mb); +struct mem_region *build_mem_region(unsigned char *buf, size_t len); +void dump_mem_region(struct mem_region *mr); +void free_mem_region(struct mem_region *mr); +int mem_cmp(struct mem_tree_entry *a, struct mem_tree_entry *b); +RB_PROTOTYPE(mem_tree, mem_tree_entry, entry, + mem_cmp); +struct mem_tree *build_mem_tree(struct mem_region *mr); +void dump_mem_tree(struct mem_tree *mt); +void free_mem_tree(struct mem_tree *mt); +struct mem_region_diff *diff_mem_region(struct mem_tree *dst, + struct mem_region *src); +struct mem_region *apply_diff(struct mem_region *dst, + struct mem_region_diff *rdiff); +void dump_mem_region_diff(struct mem_region_diff *rdiff); +void free_mem_region_diff(struct mem_region_diff *rdiff); + +#endif diff --git a/md5.c b/md5.c @@ -0,0 +1,247 @@ +/* $OpenBSD: md5.c,v 1.2 2011/01/11 15:42:05 deraadt Exp $ */ + +/* + * This code implements the MD5 message-digest algorithm. + * The algorithm is due to Ron Rivest. This code was + * written by Colin Plumb in 1993, no copyright is claimed. + * This code is in the public domain; do with it what you wish. + * + * Equivalent code is available from RSA Data Security, Inc. + * This code has been tested against that, and is equivalent, + * except that you don't need to include two pages of legalese + * with every copy. + * + * To compute the message digest of a chunk of bytes, declare an + * MD5Context structure, pass it to MD5Init, call MD5Update as + * needed on buffers full of bytes, and then call MD5Final, which + * will fill a supplied 16-byte array with the digest. + */ + +#include <stdio.h> +#include <strings.h> +#include <stdint.h> + +#include "md5.h" + +#define PUT_64BIT_LE(cp, value) do { \ + (cp)[7] = (value) >> 56; \ + (cp)[6] = (value) >> 48; \ + (cp)[5] = (value) >> 40; \ + (cp)[4] = (value) >> 32; \ + (cp)[3] = (value) >> 24; \ + (cp)[2] = (value) >> 16; \ + (cp)[1] = (value) >> 8; \ + (cp)[0] = (value); } while (0) + +#define PUT_32BIT_LE(cp, value) do { \ + (cp)[3] = (value) >> 24; \ + (cp)[2] = (value) >> 16; \ + (cp)[1] = (value) >> 8; \ + (cp)[0] = (value); } while (0) + +static uint8_t PADDING[MD5_BLOCK_LENGTH] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +/* + * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious + * initialization constants. + */ +void +MD5Init(MD5_CTX *ctx) +{ + ctx->count = 0; + ctx->state[0] = 0x67452301; + ctx->state[1] = 0xefcdab89; + ctx->state[2] = 0x98badcfe; + ctx->state[3] = 0x10325476; +} + +/* + * Update context to reflect the concatenation of another buffer full + * of bytes. + */ +void +MD5Update(MD5_CTX *ctx, const unsigned char *input, size_t len) +{ + size_t have, need; + + /* Check how many bytes we already have and how many more we need. */ + have = (size_t)((ctx->count >> 3) & (MD5_BLOCK_LENGTH - 1)); + need = MD5_BLOCK_LENGTH - have; + + /* Update bitcount */ + ctx->count += (uint64_t)len << 3; + + if (len >= need) { + if (have != 0) { + bcopy(input, ctx->buffer + have, need); + MD5Transform(ctx->state, ctx->buffer); + input += need; + len -= need; + have = 0; + } + + /* Process data in MD5_BLOCK_LENGTH-byte chunks. */ + while (len >= MD5_BLOCK_LENGTH) { + MD5Transform(ctx->state, input); + input += MD5_BLOCK_LENGTH; + len -= MD5_BLOCK_LENGTH; + } + } + + /* Handle any remaining bytes of data. */ + if (len != 0) + bcopy(input, ctx->buffer + have, len); +} + +/* explicit_bzero - don't let the compiler optimize away bzero */ +static void +explicit_bzero(void *p, size_t n) +{ + bzero(p, n); +} + +/* + * Final wrapup - pad to 64-byte boundary with the bit pattern + * 1 0* (64-bit count of bits processed, MSB-first) + */ +void +MD5Final(unsigned char digest[MD5_DIGEST_LENGTH], MD5_CTX *ctx) +{ + uint8_t count[8]; + size_t padlen; + int i; + + /* Convert count to 8 bytes in little endian order. */ + PUT_64BIT_LE(count, ctx->count); + + /* Pad out to 56 mod 64. */ + padlen = MD5_BLOCK_LENGTH - + ((ctx->count >> 3) & (MD5_BLOCK_LENGTH - 1)); + if (padlen < 1 + 8) + padlen += MD5_BLOCK_LENGTH; + MD5Update(ctx, PADDING, padlen - 8); /* padlen - 8 <= 64 */ + MD5Update(ctx, count, 8); + + if (digest != NULL) { + for (i = 0; i < 4; i++) + PUT_32BIT_LE(digest + i * 4, ctx->state[i]); + } + explicit_bzero(ctx, sizeof(*ctx)); /* in case it's sensitive */ +} + +/* The four core functions - F1 is optimized somewhat */ + +/* #define F1(x, y, z) (x & y | ~x & z) */ +#define F1(x, y, z) (z ^ (x & (y ^ z))) +#define F2(x, y, z) F1(z, x, y) +#define F3(x, y, z) (x ^ y ^ z) +#define F4(x, y, z) (y ^ (x | ~z)) + +/* This is the central step in the MD5 algorithm. */ +#define MD5STEP(f, w, x, y, z, data, s) \ + ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x ) + +/* + * The core of the MD5 algorithm, this alters an existing MD5 hash to + * reflect the addition of 16 longwords of new data. MD5Update blocks + * the data and converts bytes into longwords for this routine. + */ +void +MD5Transform(uint32_t state[4], const uint8_t block[MD5_BLOCK_LENGTH]) +{ + uint32_t a, b, c, d, in[MD5_BLOCK_LENGTH / 4]; + +#if BYTE_ORDER == LITTLE_ENDIAN + bcopy(block, in, sizeof(in)); +#else + for (a = 0; a < MD5_BLOCK_LENGTH / 4; a++) { + in[a] = (uint32_t)( + (uint32_t)(block[a * 4 + 0]) | + (uint32_t)(block[a * 4 + 1]) << 8 | + (uint32_t)(block[a * 4 + 2]) << 16 | + (uint32_t)(block[a * 4 + 3]) << 24); + } +#endif + + a = state[0]; + b = state[1]; + c = state[2]; + d = state[3]; + + MD5STEP(F1, a, b, c, d, in[ 0] + 0xd76aa478, 7); + MD5STEP(F1, d, a, b, c, in[ 1] + 0xe8c7b756, 12); + MD5STEP(F1, c, d, a, b, in[ 2] + 0x242070db, 17); + MD5STEP(F1, b, c, d, a, in[ 3] + 0xc1bdceee, 22); + MD5STEP(F1, a, b, c, d, in[ 4] + 0xf57c0faf, 7); + MD5STEP(F1, d, a, b, c, in[ 5] + 0x4787c62a, 12); + MD5STEP(F1, c, d, a, b, in[ 6] + 0xa8304613, 17); + MD5STEP(F1, b, c, d, a, in[ 7] + 0xfd469501, 22); + MD5STEP(F1, a, b, c, d, in[ 8] + 0x698098d8, 7); + MD5STEP(F1, d, a, b, c, in[ 9] + 0x8b44f7af, 12); + MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); + MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); + MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); + MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); + MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); + MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); + + MD5STEP(F2, a, b, c, d, in[ 1] + 0xf61e2562, 5); + MD5STEP(F2, d, a, b, c, in[ 6] + 0xc040b340, 9); + MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); + MD5STEP(F2, b, c, d, a, in[ 0] + 0xe9b6c7aa, 20); + MD5STEP(F2, a, b, c, d, in[ 5] + 0xd62f105d, 5); + MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); + MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); + MD5STEP(F2, b, c, d, a, in[ 4] + 0xe7d3fbc8, 20); + MD5STEP(F2, a, b, c, d, in[ 9] + 0x21e1cde6, 5); + MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); + MD5STEP(F2, c, d, a, b, in[ 3] + 0xf4d50d87, 14); + MD5STEP(F2, b, c, d, a, in[ 8] + 0x455a14ed, 20); + MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); + MD5STEP(F2, d, a, b, c, in[ 2] + 0xfcefa3f8, 9); + MD5STEP(F2, c, d, a, b, in[ 7] + 0x676f02d9, 14); + MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); + + MD5STEP(F3, a, b, c, d, in[ 5] + 0xfffa3942, 4); + MD5STEP(F3, d, a, b, c, in[ 8] + 0x8771f681, 11); + MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); + MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); + MD5STEP(F3, a, b, c, d, in[ 1] + 0xa4beea44, 4); + MD5STEP(F3, d, a, b, c, in[ 4] + 0x4bdecfa9, 11); + MD5STEP(F3, c, d, a, b, in[ 7] + 0xf6bb4b60, 16); + MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); + MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); + MD5STEP(F3, d, a, b, c, in[ 0] + 0xeaa127fa, 11); + MD5STEP(F3, c, d, a, b, in[ 3] + 0xd4ef3085, 16); + MD5STEP(F3, b, c, d, a, in[ 6] + 0x04881d05, 23); + MD5STEP(F3, a, b, c, d, in[ 9] + 0xd9d4d039, 4); + MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); + MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); + MD5STEP(F3, b, c, d, a, in[2 ] + 0xc4ac5665, 23); + + MD5STEP(F4, a, b, c, d, in[ 0] + 0xf4292244, 6); + MD5STEP(F4, d, a, b, c, in[7 ] + 0x432aff97, 10); + MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); + MD5STEP(F4, b, c, d, a, in[5 ] + 0xfc93a039, 21); + MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); + MD5STEP(F4, d, a, b, c, in[3 ] + 0x8f0ccc92, 10); + MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); + MD5STEP(F4, b, c, d, a, in[1 ] + 0x85845dd1, 21); + MD5STEP(F4, a, b, c, d, in[8 ] + 0x6fa87e4f, 6); + MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); + MD5STEP(F4, c, d, a, b, in[6 ] + 0xa3014314, 15); + MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); + MD5STEP(F4, a, b, c, d, in[4 ] + 0xf7537e82, 6); + MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); + MD5STEP(F4, c, d, a, b, in[2 ] + 0x2ad7d2bb, 15); + MD5STEP(F4, b, c, d, a, in[9 ] + 0xeb86d391, 21); + + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; +} diff --git a/md5.h b/md5.h @@ -0,0 +1,35 @@ +/* $OpenBSD: md5.h,v 1.2 2012/12/05 23:20:15 deraadt Exp $ */ + +/* + * This code implements the MD5 message-digest algorithm. + * The algorithm is due to Ron Rivest. This code was + * written by Colin Plumb in 1993, no copyright is claimed. + * This code is in the public domain; do with it what you wish. + * + * Equivalent code is available from RSA Data Security, Inc. + * This code has been tested against that, and is equivalent, + * except that you don't need to include two pages of legalese + * with every copy. + */ + +#ifndef _MD5_H +#define _MD5_H + +#define MD5_BLOCK_LENGTH 64 +#define MD5_DIGEST_LENGTH 16 + +typedef struct MD5Context { + /* state */ + uint32_t state[4]; + /* number of bits, mod 2^64 */ + uint64_t count; + /* input buffer */ + uint8_t buffer[MD5_BLOCK_LENGTH]; +} MD5_CTX; + +void MD5Init(MD5_CTX *); +void MD5Update(MD5_CTX *, const uint8_t *, size_t); +void MD5Final(uint8_t [MD5_DIGEST_LENGTH], MD5_CTX *); +void MD5Transform(uint32_t [4], const uint8_t [MD5_BLOCK_LENGTH]); + +#endif diff --git a/mem.c b/mem.c @@ -0,0 +1,351 @@ +/* See LICENSE file for copyright and license details. */ + +#include "data.h" + +/* Increase this for more verbose output */ +static int verbose = 0; + +/* This is a simple rolling sum taken from + * the core rsync algorithm */ +uint32_t +weak_sum(const void *buf, size_t len) +{ + size_t i; + uint32_t s1, s2; + const unsigned char *p = buf; + + s1 = s2 = 0; + for (i = 0; i < len; i++) { + s1 += p[i]; + s2 += s1; + } + return (s1 & 0xffff) + (s2 << 16); +} + +void +strong_sum(const void *buf, size_t len, void *digest) +{ + MD5_CTX md5_ctx; + const uint8_t *p = (const uint8_t *)buf; + + MD5Init(&md5_ctx); + MD5Update(&md5_ctx, p, len); + MD5Final(digest, &md5_ctx); +} + +void +dump_mem_blk(struct mem_blk *mb) +{ + int i; + + if (!mb) + return; + + printf("weak sum: %08x\n", mb->weak_sum); + printf("MD5 hash: "); + for (i = 0; i < MD5_DIGEST_LENGTH; i++) + printf("%02x", mb->digest[i]); + putchar('\n'); + printf("offset: %jd\n", (intmax_t)mb->offset); + printf("len: %zu\n", mb->len); + +} + +/* Given a buffer, build a region that describes it. + * NOTE: The passed in `buf' is bound to this memory + * region and *cannot* be freed until after the call + * to free_memory_region(). */ +struct mem_region * +build_mem_region(unsigned char *buf, size_t len) +{ + struct mem_blk *mb; + struct mem_region *mr; + size_t offset = 0; + size_t i; + size_t n; + + if (!buf || !len) + return NULL; + + mr = xmalloc(sizeof(*mr)); + memset(mr, 0, sizeof(*mr)); + mr->rsize = len; + mr->nblocks = num_blocks(len); + mr->rem = len % BLKSIZE; + mr->blksize = BLKSIZE; + mr->mblks = xmalloc(sizeof(struct mem_blk) * mr->nblocks); + + if (verbose > 0) + fprintf(stderr, "%s: blksize: %d, rsize: %zu, nblocks: %zu, rem: %zu\n", + __func__, BLKSIZE, mr->rsize, mr->nblocks, mr->rem); + + for (i = 0; i < mr->nblocks; i++) { + n = min(len, BLKSIZE); + mb = &mr->mblks[i]; + mb->buf = buf; + mb->weak_sum = weak_sum(buf, n); + strong_sum(buf, n, mb->digest); + mb->offset = offset; + mb->len = n; + if (verbose > 1) { + fprintf(stderr, "%s: adding node with weak sum: %08x", + __func__, mb->weak_sum); + fprintf(stderr, " at offset: %jd of len: %zu\n", + (intmax_t)mb->offset, mb->len); + } + len -= n; + buf += n; + offset += n; + } + return mr; +} + +void +dump_mem_region(struct mem_region *mr) +{ + size_t i; + + if (!mr) + return; + + for (i = 0; i < mr->nblocks; i++) + dump_mem_blk(&mr->mblks[i]); +} + +void +free_mem_region(struct mem_region *mr) +{ + if (!mr) + return; + + free(mr->mblks); + mr->mblks = NULL; + free(mr); +} + +RB_GENERATE(mem_tree, mem_tree_entry, entry, + mem_cmp); +int +mem_cmp(struct mem_tree_entry *a, struct mem_tree_entry *b) +{ + if (a->weak_sum < b->weak_sum) + return -1; + if (a->weak_sum > b->weak_sum) + return 1; + return 0; +} + +/* Given a memory region, build an rbtree + * of the checksums. We use this rbtree later on + * when we want to find the differences between + * two memory regions */ +struct mem_tree * +build_mem_tree(struct mem_region *mr) +{ + struct mem_blk *mb; + struct mem_tree_entry *me, *n; + struct mem_tree *mt; + size_t i; + + if (!mr) + return NULL; + + mt = xmalloc(sizeof(*mt)); + RB_INIT(mt); + + for (i = 0; i < mr->nblocks; i++) { + mb = &mr->mblks[i]; + me = xmalloc(sizeof(*me)); + me->weak_sum = mb->weak_sum; + me->mr = mr; + n = RB_INSERT(mem_tree, mt, me); + if (!n) { + me->mblks = xmalloc(sizeof(*me->mblks)); + me->mblks[0] = mb; + me->nmblks = 1; + continue; + } + /* If we have two blocks with the same weak + * sum then try to chain the MD5 hash so we can keep track + * of it */ + n->mblks = xrealloc(n->mblks, + (n->nmblks + 1) * sizeof(*me->mblks)); + n->mblks[n->nmblks] = mb; + n->nmblks++; + } + return mt; +} + +void +dump_mem_tree(struct mem_tree *mt) +{ + + struct mem_tree_entry *me; + struct mem_blk *mb; + size_t i; + + if (!mt) + return; + + RB_FOREACH(me, mem_tree, mt) { + for (i = 0; i < me->nmblks; i++) { + mb = me->mblks[i]; + dump_mem_blk(mb); + } + } +} + +void +free_mem_tree(struct mem_tree *mt) +{ + + struct mem_tree_entry *me, *next; + + if (!mt) + return; + + next = RB_MIN(mem_tree, mt); + while (next) { + me = next; + next = RB_NEXT(mem_tree, mt, me); + RB_REMOVE(mem_tree, mt, me); + free(me->mblks); + me->mblks = NULL; + free(me); + } + free(mt); +} + +/* Diff the two memory regions and return a + * a diff structure. In fact `dst' is an rbtree + * that describes the *old* memory region + * and `src' is the *new* and updated version of + * the memory region. */ +struct mem_region_diff * +diff_mem_region(struct mem_tree *dst, + struct mem_region *src) +{ + struct mem_blk *mb; + struct mem_diff *mdiff; + struct mem_region_diff *rdiff; + struct mem_region *mr_old; + struct mem_tree_entry find, *me; + size_t i, j; + bool found; + size_t size; + + if (!dst || !src) + return NULL; + + rdiff = xmalloc(sizeof(*rdiff)); + rdiff->mrdiffs = NULL; + rdiff->nmrdiffs = 0; + me = RB_MIN(mem_tree, dst); + mr_old = me->mr; + + /* We can only diff regions of the same size */ + if (mr_old->rsize != src->rsize) + return NULL; + + for (i = 0; i < src->nblocks; i++) { + found = false; + mb = &src->mblks[i]; + find.weak_sum = mb->weak_sum; + me = RB_FIND(mem_tree, dst, &find); + if (me) { + /* We might have a weak sum collision + * so check the strong sum as well */ + for (j = 0; j < me->nmblks; j++) { + if (!memcmp(me->mblks[j]->digest, + mb->digest, + MD5_DIGEST_LENGTH)) { + found = true; + break; + } + } + } + if (!found) { + /* Cool, this is a modified block - update the rdiff */ + size = (rdiff->nmrdiffs + 1) * sizeof(*rdiff->mrdiffs); + rdiff->mrdiffs = xrealloc(rdiff->mrdiffs, size); + mdiff = &rdiff->mrdiffs[rdiff->nmrdiffs]; + mdiff->buf = xmalloc(mb->len); + memcpy(mdiff->buf, mb->buf, mb->len); + mdiff->offset = mb->offset; + mdiff->len = mb->len; + mdiff->index = i; + rdiff->nmrdiffs++; + } + } + + return rdiff; +} + +/* Apply the diff on `dst'. We return the same + * memory region as was passed in. */ +struct mem_region * +apply_diff(struct mem_region *dst, + struct mem_region_diff *rdiff) +{ + size_t i; + struct mem_diff *mdiff; + struct mem_blk *mb; + + if (!dst || !rdiff) + return NULL; + + for (i = 0; i < rdiff->nmrdiffs; i++) { + mdiff = &rdiff->mrdiffs[i]; + mb = &dst->mblks[mdiff->index]; + if (verbose > 1) + fprintf(stderr, "patching block %d\n", + mdiff->index); + if (mdiff->len != mb->len) + errx(1, "wtf?"); + /* Patch it! */ + memcpy(mb->buf, mdiff->buf, mb->len); + } + + return dst; +} + +void +dump_mem_region_diff(struct mem_region_diff *rdiff) +{ + struct mem_diff *mdiff; + char *p; + size_t i, j; + + if (!rdiff) + return; + + for (i = 0; i < rdiff->nmrdiffs; i++) { + mdiff = &rdiff->mrdiffs[i]; + printf("buf: %p: ", mdiff->buf); + p = mdiff->buf; + for (j = 0; j < 64 && j < mdiff->len; j++) + putchar(p[j]); + putchar('\n'); + printf("offset: %jd\n", (intmax_t)mdiff->offset); + printf("len: %zu\n", mdiff->len); + } +} + +void +free_mem_region_diff(struct mem_region_diff *rdiff) +{ + struct mem_diff *mdiff; + size_t i; + + if (!rdiff) + return; + + for (i = 0; i < rdiff->nmrdiffs; i++) { + mdiff = &rdiff->mrdiffs[i]; + free(mdiff->buf); + mdiff->buf = NULL; + } + free(rdiff->mrdiffs); + rdiff->mrdiffs = NULL; + free(rdiff); +} diff --git a/memzap.c b/memzap.c @@ -0,0 +1,142 @@ +/* See LICENSE file for copyright and license details. */ + +#include "data.h" + +void +readmem(pid_t pid, void *buf, off_t offset, size_t size) +{ + char tbuf[PATH_MAX - 1]; + int fdmem, r; + ssize_t rs; + size_t s; + + snprintf(tbuf, sizeof(tbuf), "/proc/%d/mem", pid); + fdmem = open(tbuf, O_RDONLY); + if (fdmem < 0) + err(1, "open"); + + r = lseek(fdmem, offset, SEEK_SET); + if (r < 0) + err(1, "lseek"); + + s = 0; + while (s < size) { + rs = read(fdmem, buf + s, size - s); + if (rs < 0) + err(1, "read"); + s += rs; + } + + close(fdmem); +} + +int +main(int argc, char *argv[]) +{ + struct mem_region *mr_old, *mr_new; + struct mem_tree *mt_old; + struct mem_region_diff *rdiff; + int ret, stat; + pid_t pid; + unsigned char *buf, *buf_new, *pold, *pnew; + char *addr; + size_t len; + + if (argc != 4) { + fprintf(stderr, "usage: %s <program> <address> <len>\n", *argv); + return 1; + } + + setbuf(stdout, NULL); + + addr = (char *)strtoul(argv[2], NULL, 16); + len = strtoul(argv[3], NULL, 10); + + printf("[+] Base address: %p, length: %zu\n", + addr, len); + + pid = fork(); + if (pid < 0) + err(1, "fork"); + switch (pid) { + case 0: + ret = ptrace(PTRACE_TRACEME, 0, NULL, NULL); + if (ret < 0) + err(1, "ptrace"); + execl(argv[1], argv[1], (char *)NULL); + _Exit(1); + default: + waitpid(pid, &stat, WSTOPPED); + break; + } + + printf("[+] Mapping buffers into memory\n"); + + buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, + -1, 0); + if (buf == MAP_FAILED) { + err(1, "mmap"); + } + + buf_new = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, + -1, 0); + if (buf_new == MAP_FAILED) { + err(1, "mmap"); + } + + printf("[+] Single stepping child with pid %jd\n", + (intmax_t)pid); + + ret = ptrace(PTRACE_SINGLESTEP, pid, NULL, NULL); + if (ret < 0) + err(1, "ptrace"); + waitpid(pid, &stat, WSTOPPED); + + if (!WIFSTOPPED(stat)) + goto out_mmap; + + pold = buf; + pnew = buf_new; + do { + readmem(pid, pold, (off_t)addr, len); + + mr_old = build_mem_region(pold, len); + if (!mr_old) + errx(1, "[-] Failed to build memory region\n"); + mt_old = build_mem_tree(mr_old); + if (!mt_old) + errx(1, "[-] Failed to build memory tree\n"); + + ret = ptrace(PTRACE_SINGLESTEP, pid, NULL, NULL); + if (ret < 0) + err(1, "ptrace"); + waitpid(pid, &stat, WSTOPPED); + + if (!WIFSTOPPED(stat)) { + free_mem_region(mr_old); + free_mem_tree(mt_old); + goto out_mmap; + } + + readmem(pid, pnew, (off_t)addr, len); + + mr_new = build_mem_region(pnew, len); + if (!mr_new) + errx(1, "[-] Failed to build memory region\n"); + rdiff = diff_mem_region(mt_old, mr_new); + if (rdiff->nmrdiffs) { + apply_diff(mr_old, rdiff); + dump_mem_region_diff(rdiff); + } + + free_mem_region(mr_old); + free_mem_tree(mt_old); + free_mem_region(mr_new); + free_mem_region_diff(rdiff); + } while(1); + +out_mmap: + munmap(buf, len); + munmap(buf_new, len); + return 0; +} diff --git a/queue.h b/queue.h @@ -0,0 +1,574 @@ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef _SYS_QUEUE_H +#define _SYS_QUEUE_H + +/* + * This file defines five types of data structures: singly-linked lists, + * lists, simple queues, tail queues, and circular queues. + * + * A singly-linked list is headed by a single forward pointer. The + * elements are singly linked for minimum space and pointer manipulation + * overhead at the expense of O(n) removal for arbitrary elements. New + * elements can be added to the list after an existing element or at the + * head of the list. Elements being removed from the head of the list + * should use the explicit macro for this purpose for optimum + * efficiency. A singly-linked list may only be traversed in the forward + * direction. Singly-linked lists are ideal for applications with large + * datasets and few or no removals or for implementing a LIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A simple queue is headed by a pair of pointers, one the head of the + * list and the other to the tail of the list. The elements are singly + * linked to save space, so elements can only be removed from the + * head of the list. New elements can be added to the list after + * an existing element, at the head of the list, or at the end of the + * list. A simple queue may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may be traversed in either direction. + * + * A circle queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the list. + * A circle queue may be traversed in either direction, but has a more + * complex end of list detection. + * + * For details on the use of these macros, see the queue(3) manual page. + */ + +/* + * List definitions. + */ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ +#define LIST_INIT(head) do { \ + (head)->lh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (/*CONSTCOND*/0) + +#define LIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ +} while (/*CONSTCOND*/0) + +#define LIST_FOREACH(var, head, field) \ + for ((var) = ((head)->lh_first); \ + (var); \ + (var) = ((var)->field.le_next)) + +/* + * List access methods. + */ +#define LIST_EMPTY(head) ((head)->lh_first == NULL) +#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + + +/* + * Singly-linked List definitions. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define SLIST_INIT(head) do { \ + (head)->slh_first = NULL; \ +} while (/*CONSTCOND*/0) + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (/*CONSTCOND*/0) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if ((head)->slh_first == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = (head)->slh_first; \ + while(curelm->field.sle_next != (elm)) \ + curelm = curelm->field.sle_next; \ + curelm->field.sle_next = \ + curelm->field.sle_next->field.sle_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define SLIST_FOREACH(var, head, field) \ + for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) + +/* + * Singly-linked List access methods. + */ +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) +#define SLIST_FIRST(head) ((head)->slh_first) +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + + +/* + * Singly-linked Tail queue declarations. + */ +#define STAILQ_HEAD(name, type) \ +struct name { \ + struct type *stqh_first; /* first element */ \ + struct type **stqh_last; /* addr of last next element */ \ +} + +#define STAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).stqh_first } + +#define STAILQ_ENTRY(type) \ +struct { \ + struct type *stqe_next; /* next element */ \ +} + +/* + * Singly-linked Tail queue functions. + */ +#define STAILQ_INIT(head) do { \ + (head)->stqh_first = NULL; \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (head)->stqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.stqe_next = NULL; \ + *(head)->stqh_last = (elm); \ + (head)->stqh_last = &(elm)->field.stqe_next; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (listelm)->field.stqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define STAILQ_REMOVE_HEAD(head, field) do { \ + if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (/*CONSTCOND*/0) + +#define STAILQ_REMOVE(head, elm, type, field) do { \ + if ((head)->stqh_first == (elm)) { \ + STAILQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->stqh_first; \ + while (curelm->field.stqe_next != (elm)) \ + curelm = curelm->field.stqe_next; \ + if ((curelm->field.stqe_next = \ + curelm->field.stqe_next->field.stqe_next) == NULL) \ + (head)->stqh_last = &(curelm)->field.stqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define STAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->stqh_first); \ + (var); \ + (var) = ((var)->field.stqe_next)) + +#define STAILQ_CONCAT(head1, head2) do { \ + if (!STAILQ_EMPTY((head2))) { \ + *(head1)->stqh_last = (head2)->stqh_first; \ + (head1)->stqh_last = (head2)->stqh_last; \ + STAILQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +/* + * Singly-linked Tail queue access methods. + */ +#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) +#define STAILQ_FIRST(head) ((head)->stqh_first) +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) + + +/* + * Simple queue definitions. + */ +#define SIMPLEQ_HEAD(name, type) \ +struct name { \ + struct type *sqh_first; /* first element */ \ + struct type **sqh_last; /* addr of last next element */ \ +} + +#define SIMPLEQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).sqh_first } + +#define SIMPLEQ_ENTRY(type) \ +struct { \ + struct type *sqe_next; /* next element */ \ +} + +/* + * Simple queue functions. + */ +#define SIMPLEQ_INIT(head) do { \ + (head)->sqh_first = NULL; \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (head)->sqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.sqe_next = NULL; \ + *(head)->sqh_last = (elm); \ + (head)->sqh_last = &(elm)->field.sqe_next; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ + (head)->sqh_last = &(elm)->field.sqe_next; \ + (listelm)->field.sqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_REMOVE_HEAD(head, field) do { \ + if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ + (head)->sqh_last = &(head)->sqh_first; \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_REMOVE(head, elm, type, field) do { \ + if ((head)->sqh_first == (elm)) { \ + SIMPLEQ_REMOVE_HEAD((head), field); \ + } else { \ + struct type *curelm = (head)->sqh_first; \ + while (curelm->field.sqe_next != (elm)) \ + curelm = curelm->field.sqe_next; \ + if ((curelm->field.sqe_next = \ + curelm->field.sqe_next->field.sqe_next) == NULL) \ + (head)->sqh_last = &(curelm)->field.sqe_next; \ + } \ +} while (/*CONSTCOND*/0) + +#define SIMPLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->sqh_first); \ + (var); \ + (var) = ((var)->field.sqe_next)) + +/* + * Simple queue access methods. + */ +#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) +#define SIMPLEQ_FIRST(head) ((head)->sqh_first) +#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) + + +/* + * Tail queue definitions. + */ +#define _TAILQ_HEAD(name, type, qual) \ +struct name { \ + qual type *tqh_first; /* first element */ \ + qual type *qual *tqh_last; /* addr of last next element */ \ +} +#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,) + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define _TAILQ_ENTRY(type, qual) \ +struct { \ + qual type *tqe_next; /* next element */ \ + qual type *qual *tqe_prev; /* address of previous next element */\ +} +#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,) + +/* + * Tail queue functions. + */ +#define TAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ +} while (/*CONSTCOND*/0) + +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = ((head)->tqh_first); \ + (var); \ + (var) = ((var)->field.tqe_next)) + +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ + (var); \ + (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) + +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + *(head1)->tqh_last = (head2)->tqh_first; \ + (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ + (head1)->tqh_last = (head2)->tqh_last; \ + TAILQ_INIT((head2)); \ + } \ +} while (/*CONSTCOND*/0) + +/* + * Tail queue access methods. + */ +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + + +/* + * Circular queue definitions. + */ +#define CIRCLEQ_HEAD(name, type) \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} + +#define CIRCLEQ_HEAD_INITIALIZER(head) \ + { (void *)&head, (void *)&head } + +#define CIRCLEQ_ENTRY(type) \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} + +/* + * Circular queue functions. + */ +#define CIRCLEQ_INIT(head) do { \ + (head)->cqh_first = (void *)(head); \ + (head)->cqh_last = (void *)(head); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ + (elm)->field.cqe_prev = (listelm); \ + if ((listelm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ + (listelm)->field.cqe_next = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm); \ + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ + if ((listelm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ + (listelm)->field.cqe_prev = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.cqe_next = (head)->cqh_first; \ + (elm)->field.cqe_prev = (void *)(head); \ + if ((head)->cqh_last == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (head)->cqh_first->field.cqe_prev = (elm); \ + (head)->cqh_first = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.cqe_next = (void *)(head); \ + (elm)->field.cqe_prev = (head)->cqh_last; \ + if ((head)->cqh_first == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (head)->cqh_last->field.cqe_next = (elm); \ + (head)->cqh_last = (elm); \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + if ((elm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm)->field.cqe_prev; \ + else \ + (elm)->field.cqe_next->field.cqe_prev = \ + (elm)->field.cqe_prev; \ + if ((elm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm)->field.cqe_next; \ + else \ + (elm)->field.cqe_prev->field.cqe_next = \ + (elm)->field.cqe_next; \ +} while (/*CONSTCOND*/0) + +#define CIRCLEQ_FOREACH(var, head, field) \ + for ((var) = ((head)->cqh_first); \ + (var) != (const void *)(head); \ + (var) = ((var)->field.cqe_next)) + +#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ + for ((var) = ((head)->cqh_last); \ + (var) != (const void *)(head); \ + (var) = ((var)->field.cqe_prev)) + +/* + * Circular queue access methods. + */ +#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) +#define CIRCLEQ_LAST(head) ((head)->cqh_last) +#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) +#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) + +#define CIRCLEQ_LOOP_NEXT(head, elm, field) \ + (((elm)->field.cqe_next == (void *)(head)) \ + ? ((head)->cqh_first) \ + : (elm->field.cqe_next)) +#define CIRCLEQ_LOOP_PREV(head, elm, field) \ + (((elm)->field.cqe_prev == (void *)(head)) \ + ? ((head)->cqh_last) \ + : (elm->field.cqe_prev)) + +#endif diff --git a/test.c b/test.c @@ -0,0 +1,16 @@ +#include <stdio.h> +#include <string.h> + +char buf[128] __attribute__ ((section ("TRACED_AREA"))) = { 0 }; + +int +main(void) +{ + size_t i; + char *p = buf; + + fprintf(stderr, "buf is at %p\n", buf); + for (i = 0; i < sizeof(buf); i++) + p[i] = 0x41 + i; + return 0; +} diff --git a/tree.h b/tree.h @@ -0,0 +1,765 @@ +/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */ +/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */ +/* $FreeBSD$ */ + +/*- + * Copyright 2002 Niels Provos <provos@citi.umich.edu> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_TREE_H +#define _SYS_TREE_H + +#include <sys/cdefs.h> + +/* + * This file defines data structures for different types of trees: + * splay trees and red-black trees. + * + * A splay tree is a self-organizing data structure. Every operation + * on the tree causes a splay to happen. The splay moves the requested + * node to the root of the tree and partly rebalances it. + * + * This has the benefit that request locality causes faster lookups as + * the requested nodes move to the top of the tree. On the other hand, + * every lookup causes memory writes. + * + * The Balance Theorem bounds the total access time for m operations + * and n inserts on an initially empty tree as O((m + n)lg n). The + * amortized cost for a sequence of m accesses to a splay tree is O(lg n); + * + * A red-black tree is a binary search tree with the node color as an + * extra attribute. It fulfills a set of conditions: + * - every search path from the root to a leaf consists of the + * same number of black nodes, + * - each red node (except for the root) has a black parent, + * - each leaf node is black. + * + * Every operation on a red-black tree is bounded as O(lg n). + * The maximum height of a red-black tree is 2lg (n+1). + */ + +#define SPLAY_HEAD(name, type) \ +struct name { \ + struct type *sph_root; /* root of the tree */ \ +} + +#define SPLAY_INITIALIZER(root) \ + { NULL } + +#define SPLAY_INIT(root) do { \ + (root)->sph_root = NULL; \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_ENTRY(type) \ +struct { \ + struct type *spe_left; /* left element */ \ + struct type *spe_right; /* right element */ \ +} + +#define SPLAY_LEFT(elm, field) (elm)->field.spe_left +#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right +#define SPLAY_ROOT(head) (head)->sph_root +#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) + +/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ +#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_LINKLEFT(head, tmp, field) do { \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_LINKRIGHT(head, tmp, field) do { \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ + SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ + SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ +} while (/*CONSTCOND*/ 0) + +/* Generates prototypes and inline functions */ + +#define SPLAY_PROTOTYPE(name, type, field, cmp) \ +void name##_SPLAY(struct name *, struct type *); \ +void name##_SPLAY_MINMAX(struct name *, int); \ +struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ +struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ + \ +/* Finds the node with the same key as elm */ \ +static __inline struct type * \ +name##_SPLAY_FIND(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) \ + return(NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) \ + return (head->sph_root); \ + return (NULL); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_NEXT(struct name *head, struct type *elm) \ +{ \ + name##_SPLAY(head, elm); \ + if (SPLAY_RIGHT(elm, field) != NULL) { \ + elm = SPLAY_RIGHT(elm, field); \ + while (SPLAY_LEFT(elm, field) != NULL) { \ + elm = SPLAY_LEFT(elm, field); \ + } \ + } else \ + elm = NULL; \ + return (elm); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_MIN_MAX(struct name *head, int val) \ +{ \ + name##_SPLAY_MINMAX(head, val); \ + return (SPLAY_ROOT(head)); \ +} + +/* Main splay operation. + * Moves node close to the key of elm to top + */ +#define SPLAY_GENERATE(name, type, field, cmp) \ +struct type * \ +name##_SPLAY_INSERT(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) { \ + SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ + } else { \ + int __comp; \ + name##_SPLAY(head, elm); \ + __comp = (cmp)(elm, (head)->sph_root); \ + if(__comp < 0) { \ + SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ + SPLAY_RIGHT(elm, field) = (head)->sph_root; \ + SPLAY_LEFT((head)->sph_root, field) = NULL; \ + } else if (__comp > 0) { \ + SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ + SPLAY_LEFT(elm, field) = (head)->sph_root; \ + SPLAY_RIGHT((head)->sph_root, field) = NULL; \ + } else \ + return ((head)->sph_root); \ + } \ + (head)->sph_root = (elm); \ + return (NULL); \ +} \ + \ +struct type * \ +name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *__tmp; \ + if (SPLAY_EMPTY(head)) \ + return (NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) { \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ + } else { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ + name##_SPLAY(head, elm); \ + SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ + } \ + return (elm); \ + } \ + return (NULL); \ +} \ + \ +void \ +name##_SPLAY(struct name *head, struct type *elm) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ + int __comp; \ +\ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ + __left = __right = &__node; \ +\ + while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) > 0){ \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} \ + \ +/* Splay with either the minimum or the maximum element \ + * Used to find minimum or maximum element in tree. \ + */ \ +void name##_SPLAY_MINMAX(struct name *head, int __comp) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ +\ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ + __left = __right = &__node; \ +\ + while (1) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp > 0) { \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} + +#define SPLAY_NEGINF -1 +#define SPLAY_INF 1 + +#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) +#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) +#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) +#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) +#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) +#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) + +#define SPLAY_FOREACH(x, name, head) \ + for ((x) = SPLAY_MIN(name, head); \ + (x) != NULL; \ + (x) = SPLAY_NEXT(name, head, x)) + +/* Macros that define a red-black tree */ +#define RB_HEAD(name, type) \ +struct name { \ + struct type *rbh_root; /* root of the tree */ \ +} + +#define RB_INITIALIZER(root) \ + { NULL } + +#define RB_INIT(root) do { \ + (root)->rbh_root = NULL; \ +} while (/*CONSTCOND*/ 0) + +#define RB_BLACK 0 +#define RB_RED 1 +#define RB_ENTRY(type) \ +struct { \ + struct type *rbe_left; /* left element */ \ + struct type *rbe_right; /* right element */ \ + struct type *rbe_parent; /* parent element */ \ + int rbe_color; /* node color */ \ +} + +#define RB_LEFT(elm, field) (elm)->field.rbe_left +#define RB_RIGHT(elm, field) (elm)->field.rbe_right +#define RB_PARENT(elm, field) (elm)->field.rbe_parent +#define RB_COLOR(elm, field) (elm)->field.rbe_color +#define RB_ROOT(head) (head)->rbh_root +#define RB_EMPTY(head) (RB_ROOT(head) == NULL) + +#define RB_SET(elm, parent, field) do { \ + RB_PARENT(elm, field) = parent; \ + RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ + RB_COLOR(elm, field) = RB_RED; \ +} while (/*CONSTCOND*/ 0) + +#define RB_SET_BLACKRED(black, red, field) do { \ + RB_COLOR(black, field) = RB_BLACK; \ + RB_COLOR(red, field) = RB_RED; \ +} while (/*CONSTCOND*/ 0) + +#ifndef RB_AUGMENT +#define RB_AUGMENT(x) do {} while (0) +#endif + +#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ + (tmp) = RB_RIGHT(elm, field); \ + if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ + RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_LEFT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (/*CONSTCOND*/ 0) + +#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ + (tmp) = RB_LEFT(elm, field); \ + if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ + RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_RIGHT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (/*CONSTCOND*/ 0) + +/* Generates prototypes and inline functions */ +#define RB_PROTOTYPE(name, type, field, cmp) \ + RB_PROTOTYPE_INTERNAL(name, type, field, cmp,) +#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ + RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static) +#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ +attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \ +attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ +attr struct type *name##_RB_REMOVE(struct name *, struct type *); \ +attr struct type *name##_RB_INSERT(struct name *, struct type *); \ +attr struct type *name##_RB_FIND(struct name *, struct type *); \ +attr struct type *name##_RB_NFIND(struct name *, struct type *); \ +attr struct type *name##_RB_NEXT(struct type *); \ +attr struct type *name##_RB_PREV(struct type *); \ +attr struct type *name##_RB_MINMAX(struct name *, int); \ + \ + +/* Main rb operation. + * Moves node close to the key of elm to top + */ +#define RB_GENERATE(name, type, field, cmp) \ + RB_GENERATE_INTERNAL(name, type, field, cmp,) +#define RB_GENERATE_STATIC(name, type, field, cmp) \ + RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static) +#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ +attr void \ +name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ +{ \ + struct type *parent, *gparent, *tmp; \ + while ((parent = RB_PARENT(elm, field)) != NULL && \ + RB_COLOR(parent, field) == RB_RED) { \ + gparent = RB_PARENT(parent, field); \ + if (parent == RB_LEFT(gparent, field)) { \ + tmp = RB_RIGHT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_RIGHT(parent, field) == elm) { \ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_RIGHT(head, gparent, tmp, field); \ + } else { \ + tmp = RB_LEFT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_LEFT(parent, field) == elm) { \ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_LEFT(head, gparent, tmp, field); \ + } \ + } \ + RB_COLOR(head->rbh_root, field) = RB_BLACK; \ +} \ + \ +attr void \ +name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ +{ \ + struct type *tmp; \ + while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ + elm != RB_ROOT(head)) { \ + if (RB_LEFT(parent, field) == elm) { \ + tmp = RB_RIGHT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\ + struct type *oleft; \ + if ((oleft = RB_LEFT(tmp, field)) \ + != NULL) \ + RB_COLOR(oleft, field) = RB_BLACK;\ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_RIGHT(head, tmp, oleft, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_RIGHT(tmp, field)) \ + RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\ + RB_ROTATE_LEFT(head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } else { \ + tmp = RB_LEFT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\ + struct type *oright; \ + if ((oright = RB_RIGHT(tmp, field)) \ + != NULL) \ + RB_COLOR(oright, field) = RB_BLACK;\ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_LEFT(head, tmp, oright, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_LEFT(tmp, field)) \ + RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\ + RB_ROTATE_RIGHT(head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } \ + } \ + if (elm) \ + RB_COLOR(elm, field) = RB_BLACK; \ +} \ + \ +attr struct type * \ +name##_RB_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *child, *parent, *old = elm; \ + int color; \ + if (RB_LEFT(elm, field) == NULL) \ + child = RB_RIGHT(elm, field); \ + else if (RB_RIGHT(elm, field) == NULL) \ + child = RB_LEFT(elm, field); \ + else { \ + struct type *left; \ + elm = RB_RIGHT(elm, field); \ + while ((left = RB_LEFT(elm, field)) != NULL) \ + elm = left; \ + child = RB_RIGHT(elm, field); \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ + if (RB_PARENT(elm, field) == old) \ + parent = elm; \ + (elm)->field = (old)->field; \ + if (RB_PARENT(old, field)) { \ + if (RB_LEFT(RB_PARENT(old, field), field) == old)\ + RB_LEFT(RB_PARENT(old, field), field) = elm;\ + else \ + RB_RIGHT(RB_PARENT(old, field), field) = elm;\ + RB_AUGMENT(RB_PARENT(old, field)); \ + } else \ + RB_ROOT(head) = elm; \ + RB_PARENT(RB_LEFT(old, field), field) = elm; \ + if (RB_RIGHT(old, field)) \ + RB_PARENT(RB_RIGHT(old, field), field) = elm; \ + if (parent) { \ + left = parent; \ + do { \ + RB_AUGMENT(left); \ + } while ((left = RB_PARENT(left, field)) != NULL); \ + } \ + goto color; \ + } \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ +color: \ + if (color == RB_BLACK) \ + name##_RB_REMOVE_COLOR(head, parent, child); \ + return (old); \ +} \ + \ +/* Inserts a node into the RB tree */ \ +attr struct type * \ +name##_RB_INSERT(struct name *head, struct type *elm) \ +{ \ + struct type *tmp; \ + struct type *parent = NULL; \ + int comp = 0; \ + tmp = RB_ROOT(head); \ + while (tmp) { \ + parent = tmp; \ + comp = (cmp)(elm, parent); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + RB_SET(elm, parent, field); \ + if (parent != NULL) { \ + if (comp < 0) \ + RB_LEFT(parent, field) = elm; \ + else \ + RB_RIGHT(parent, field) = elm; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = elm; \ + name##_RB_INSERT_COLOR(head, elm); \ + return (NULL); \ +} \ + \ +/* Finds the node with the same key as elm */ \ +attr struct type * \ +name##_RB_FIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ +} \ + \ +/* Finds the first node greater than or equal to the search key */ \ +attr struct type * \ +name##_RB_NFIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *res = NULL; \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) { \ + res = tmp; \ + tmp = RB_LEFT(tmp, field); \ + } \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (res); \ +} \ + \ +/* ARGSUSED */ \ +attr struct type * \ +name##_RB_NEXT(struct type *elm) \ +{ \ + if (RB_RIGHT(elm, field)) { \ + elm = RB_RIGHT(elm, field); \ + while (RB_LEFT(elm, field)) \ + elm = RB_LEFT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && \ + (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && \ + (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ +} \ + \ +/* ARGSUSED */ \ +attr struct type * \ +name##_RB_PREV(struct type *elm) \ +{ \ + if (RB_LEFT(elm, field)) { \ + elm = RB_LEFT(elm, field); \ + while (RB_RIGHT(elm, field)) \ + elm = RB_RIGHT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && \ + (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && \ + (elm == RB_LEFT(RB_PARENT(elm, field), field)))\ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ +} \ + \ +attr struct type * \ +name##_RB_MINMAX(struct name *head, int val) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *parent = NULL; \ + while (tmp) { \ + parent = tmp; \ + if (val < 0) \ + tmp = RB_LEFT(tmp, field); \ + else \ + tmp = RB_RIGHT(tmp, field); \ + } \ + return (parent); \ +} + +#define RB_NEGINF -1 +#define RB_INF 1 + +#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) +#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) +#define RB_FIND(name, x, y) name##_RB_FIND(x, y) +#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) +#define RB_NEXT(name, x, y) name##_RB_NEXT(y) +#define RB_PREV(name, x, y) name##_RB_PREV(y) +#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) +#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) + +#define RB_FOREACH(x, name, head) \ + for ((x) = RB_MIN(name, head); \ + (x) != NULL; \ + (x) = name##_RB_NEXT(x)) + +#define RB_FOREACH_FROM(x, name, y) \ + for ((x) = (y); \ + ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ + (x) = (y)) + +#define RB_FOREACH_SAFE(x, name, head, y) \ + for ((x) = RB_MIN(name, head); \ + ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ + (x) = (y)) + +#define RB_FOREACH_REVERSE(x, name, head) \ + for ((x) = RB_MAX(name, head); \ + (x) != NULL; \ + (x) = name##_RB_PREV(x)) + +#define RB_FOREACH_REVERSE_FROM(x, name, y) \ + for ((x) = (y); \ + ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ + (x) = (y)) + +#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ + for ((x) = RB_MAX(name, head); \ + ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ + (x) = (y)) + +#endif diff --git a/utils.c b/utils.c @@ -0,0 +1,25 @@ +/* See LICENSE file for copyright and license details. */ + +#include "data.h" + +void * +xmalloc(size_t size) +{ + void *p; + + p = malloc(size); + if (!p) + err(1, "malloc"); + return p; +} + +void * +xrealloc(void *ptr, size_t size) +{ + void *p; + + p = realloc(ptr, size); + if (!p) + err(1, "malloc"); + return p; +}