diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b964f61 --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +.*.swp +*.o +*.a +*.gcda +*.gcno +.dirstamp +.deps/ +Makefile +Makefile.in +/config* +m4/ +/docs/ +/INSTALL +coverage.base +coverage.run +coverage.info +coveragereport/ +*.m4 +/autom4te.cache/ +/compile +/depcomp +/docs/ +/install-sh +/libtool +/ltmain.sh +/missing +stamp-h1 +src/taskrambler +/tests/*Test +gmon.out diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..4d2db83 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,14 @@ +## This is the shared library to be built +lib_LTLIBRARIES = libmodentropy.la + +## Define the source file for the module +libmodentropy_la_SOURCES = mod_entropy.c mod_entropy_get_entropy_bits.c \ + mod_entropy_add_entropy.c +libmodentropy_la_LDFLAGS = -lrt -lm + +install: libmodentropy.la + apxs -i -a -n entropy libmodentropy.la + +## Define that an include directory is required. +#INCLUDES = -I@apache_dir@/include -I/usr/include/apr-1 + diff --git a/mod_entropy.c b/mod_entropy.c new file mode 100644 index 0000000..9153669 --- /dev/null +++ b/mod_entropy.c @@ -0,0 +1,264 @@ +/** + * this filter generates a sha1 from the current microtime and request + * useses this to fill the linux random source. + * + * inspired by timed_entropyd. + * + * ATTENTION: This module is not portable right now as i don't know + * howto fill the random source for other systems. It is linux only. + * + * Most time was spend in figuring out how to write apache modules. + * + * \author Georg Hopp + */ +#define _POSIX_C_SOURCE 199309L + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define min(x, y) ((x)<(y)?(x):(y)) + + +module AP_MODULE_DECLARE_DATA entropy_module; + +char * getData(const char *, size_t); + +/** + * This is taken from timer_entropyd and modified so + * that the constant 1/log(2.0) is not calculated but + * set directly. + * + * As far as i can say this correlates to the shannon + * entropy algorithm with equal probabilities + * for entropy where the entropy units are bits. + * + * But actually i am no mathemacian and my analysis capabilities + * are limited. Additionally i have not analysed the linux random + * character device code, so i trusted the code in timer_entropyd. + */ +static +int +get_entropy(const unsigned char * data, size_t ndata) +{ + size_t byte_count[256]; + size_t iterator; + static double log2inv = 1.442695; //!< 1 / log(2.0): the entropy unit size + double entropy = 0.0; + + memset(byte_count, 0, sizeof(byte_count)); + + /** + * first get the amount each byte occurs in the array + */ + for (iterator = 0; iterator < ndata; iterator++) { + byte_count[data[iterator]]++; + } + + for (iterator = 0; iterator < 256; iterator++) { + double probability = (double)byte_count[iterator] / (double)ndata; + + if (0.0 < probability) { + entropy += probability * log2inv * (log(1.0 / probability)); + } + } + + entropy *= (double)ndata; + entropy = (entropy < 0.0)? 0.0 : entropy; + entropy = min((double)(ndata * 8), entropy); + + return entropy; +} + +static +int +header_do_print(void * rec, const char * key, const char * value) +{ + apr_sha1_ctx_t * sha1_ctx = rec; + + apr_sha1_update(sha1_ctx, value, strlen(value)); + + return 1; +} + +static +apr_status_t +entropy_filter_in( + ap_filter_t * filter, + apr_bucket_brigade * brigade, + ap_input_mode_t mode, + apr_read_type_e block, + apr_off_t readbytes) +{ + apr_bucket * bucket; + apr_status_t status; + request_rec * request = filter->r; + conn_rec * connection = filter->c; + apr_sha1_ctx_t sha1_ctx; + unsigned char digest[APR_SHA1_DIGESTSIZE]; + + struct timespec ts; + + clock_gettime(CLOCK_REALTIME, &ts); + + apr_sha1_init(&sha1_ctx); + + /** + * add current microtime to sha1 + */ + apr_sha1_update_binary( + &sha1_ctx, + (const unsigned char *)&ts, + sizeof(ts)); + + /** + * add client ip to sha1 + */ + apr_sha1_update( + &sha1_ctx, + connection->client_ip, + strlen(connection->client_ip)); + + /** + * add request line to sha1 + */ + apr_sha1_update( + &sha1_ctx, + request->the_request, + strlen(request->the_request)); + + /** + * add all header values to sha1 + */ + apr_table_do(header_do_print, &sha1_ctx, request->headers_in, NULL); + + /** + * get the request body and add it to the sha1 + */ + status = ap_get_brigade(filter->next, brigade, mode, block, readbytes); + + if (status == APR_SUCCESS) { + for ( + bucket = APR_BRIGADE_FIRST(brigade); + bucket != APR_BRIGADE_SENTINEL(brigade); + bucket = APR_BUCKET_NEXT(bucket)) { + + if (!(APR_BUCKET_IS_METADATA(bucket))) { + const char * buffer; + apr_size_t nbuffer; + + status = apr_bucket_read( + bucket, + &buffer, + &nbuffer, + APR_BLOCK_READ); + + if (status == APR_SUCCESS) { + apr_sha1_update(&sha1_ctx, buffer, nbuffer); + } + } + } + } + + /** + * get the sha1 digest + */ + apr_sha1_final(digest, &sha1_ctx); + + /** + * fill /dev/random with sha1 from current request + */ + { + int i; + int entropy = get_entropy(digest, APR_SHA1_DIGESTSIZE); + int fd = open("/dev/random", O_WRONLY|O_NONBLOCK); + struct rand_pool_info * output; + + output = (struct rand_pool_info *)malloc( + sizeof(struct rand_pool_info) + APR_SHA1_DIGESTSIZE); + + output->entropy_count = entropy; + output->buf_size = APR_SHA1_DIGESTSIZE; + memcpy(output->buf, digest, APR_SHA1_DIGESTSIZE); + + fprintf(stderr, "sha1 so far: "); + for (i=0; i