diff --git a/.gitignore b/.gitignore index 75e5137..0c6c49b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,8 @@ .*.sw? DS_Store -library.c -library.h run compile_commands.json +*.txt build/ +cythoned/ __pycache__/ diff --git a/library.pyx b/bridge.pyx similarity index 51% rename from library.pyx rename to bridge.pyx index 92a02bc..1e801d3 100644 --- a/library.pyx +++ b/bridge.pyx @@ -3,13 +3,14 @@ import numpy as np from sys import stderr -from libc.stdlib cimport malloc +from libc.stdlib cimport malloc, realloc from libc.string cimport memcpy -import nn +import library as nn X_train, y_train, X_test, y_test = nn.load_mnist() +tokenizers = {} cdef extern from "numpy/arrayobject.h": @@ -27,16 +28,62 @@ ctypedef public struct WeightList: Weight* weights; +ctypedef public struct Word: + size_t mem + char* data + + +ctypedef public struct WordList: + size_t mem + size_t n_words + Word* words + + cdef public char *greeting(): return f'The value is {3**3**3}'.encode('utf-8') +cdef public int get_tokens(WordList* wl, const char *filename): + fnu = filename.decode('utf-8') + if fnu not in tokenizers: + tokenizers[fnu] = nn.token_generator(fnu) + g = tokenizers[fnu] + try: + words = next(g) + except StopIteration: + return 0 + words_into_wordlist(wl, words) + return 1 + + +cdef public long vocab_idx_of(Word* w): + word = w.data.decode('utf-8') + if word.lower() in nn.vocab: + return nn.vocab.index(word.lower()) + else: + return -1 + + +cdef public void c_onehot(float* y, float* idxs, size_t n_idx): + oh = nn.onehot(np.asarray(idxs)) + ensure_contiguous(oh) + memcpy(y, PyArray_DATA(oh), oh.size * sizeof(float)) + + +cdef public void c_slices(float* X, float* idxs, size_t bs, size_t win): + X_np = np.asarray(X) + idxs_np = np.asarray(idxs) + for r in range(bs): + X_np[r, :win] = idxs_np[r:r+win] + X_np[r, win+1:] = idxs_np[r+win+1:r+2*win+1] + + cdef public void debug_print(object o): - print(o) + eprint(o) -cdef public object create_network(): - return nn.create_mnist_network() +cdef public object create_network(int win, int embed): + return nn.create_cbow_network(win, len(nn.vocab), embed) cdef public void set_net_weights(object net, WeightList* wl): @@ -46,16 +93,20 @@ cdef public void set_net_weights(object net, WeightList* wl): cdef public void step_net( object net, float* X, float* y, size_t batch_size ): - in_shape = (batch_size,) + net.layers[0].input_shape[1:] - out_shape = (batch_size,) + net.layers[-1].output_shape[1:] + in_shape = (batch_size,) + net.input_shape[1:] + out_shape = (batch_size,) + net.output_shape[1:] X_train = np.asarray(X).reshape(in_shape) - y_train = np.asarray(y).reshape(out_shape) + y_train = np.asarray(y).reshape(out_shape), net.train_on_batch(X_train, y_train) +cdef public size_t out_size(object net): + return np.prod(net.output_shape[1:]) + + cdef public float eval_net(object net): - return net.evaluate(X_test, y_test, verbose=False)[1] + return net.evaluate(X_test, y_test, verbose=False) cdef public void mnist_batch(float* X, float* y, size_t bs, @@ -74,8 +125,8 @@ cdef public void mnist_batch(float* X, float* y, size_t bs, assert X_r.flags['C_CONTIGUOUS'] assert y_r.flags['C_CONTIGUOUS'] - memcpy(X, PyArray_DATA(X_r), X_r.size * sizeof(float)) - memcpy(y, PyArray_DATA(y_r), y_r.size * sizeof(float)) + memcpy(X, PyArray_DATA(X_r), X_r.size * sizeof(float)) + memcpy(y, PyArray_DATA(y_r), y_r.size * sizeof(float)) cdef public void init_weightlist_like(WeightList* wl, object net): @@ -89,8 +140,7 @@ cdef public void init_weightlist_like(WeightList* wl, object net): wl.weights[i].W = malloc(sizeof(float) * w.size) assert sh.flags['C_CONTIGUOUS'] - memcpy(wl.weights[i].shape, PyArray_DATA(sh), - sh.size * sizeof(long)) + memcpy(wl.weights[i].shape, PyArray_DATA(sh), sh.size * sizeof(long)) cdef public void update_weightlist(WeightList* wl, object net): @@ -99,8 +149,7 @@ cdef public void update_weightlist(WeightList* wl, object net): w = w.astype(np.float32) assert w.flags['C_CONTIGUOUS'] - memcpy(wl.weights[i].W, PyArray_DATA(w), - w.size * sizeof(float)) + memcpy(wl.weights[i].W, PyArray_DATA(w), w.size * sizeof(float)) cdef public void combo_weights( @@ -127,7 +176,36 @@ cdef list wrap_weight_list(WeightList* wl): return weights +cdef void words_into_wordlist(WordList* wl, list words): + if wl.mem < len(words): + old = wl.mem + wl.mem = len(words) + wl.words = realloc(wl.words, wl.mem * sizeof(Word)) + for i in range(old, wl.mem): + wl.words[i].mem = 0 + wl.words[i].data = 0 + + wl.n_words = len(words) + for i, w in enumerate(words): + wenc = w.encode('utf-8') + if wl.words[i].mem < len(wenc) + 1: + wl.words[i].mem = len(wenc) + 1 + wl.words[i].data = realloc( + wl.words[i].data, wl.words[i].mem * sizeof(char) + ) + memcpy(wl.words[i].data, wenc, len(wenc) * sizeof(char)) + wl.words[i].data[len(wenc)] = 0 + + def inspect_array(a): print(a.flags, flush=True) print(a.dtype, flush=True) print(a.sum(), flush=True) + + +def ensure_contiguous(a): + assert a.flats['C_CONTIGUOUS'] + + +def eprint(*args, **kwargs): + return print(*args, flush=True, **kwargs) diff --git a/library.py b/library.py new file mode 100644 index 0000000..b7cbda7 --- /dev/null +++ b/library.py @@ -0,0 +1,54 @@ +import os +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +import tensorflow as tf +tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # STFU! +# from nltk.corpus import stopwords +# from nltk.tokenize import word_tokenize +from mynet import load_mnist, onehot + + +def word_tokenize(s: str): + l = ''.join(c if c.isalpha() else ' ' for c in s) + return l.split() + + +HERE = os.path.abspath(os.path.dirname(__file__)) +CORPUS = os.path.join(HERE, 'melville-moby_dick.txt') +# sw = set(stopwords.words('english')) +sw = ['the'] +vocab = list(set( + w.lower() for w in word_tokenize(open(CORPUS).read()) + if w.isalpha() and not w.lower() in sw +)) + + +def create_mnist_network(): + model = tf.keras.models.Sequential([ + tf.keras.layers.Dense(30, input_shape=(784,), activation='relu'), + tf.keras.layers.Dense(10, activation='softmax') + ]) + model.compile(loss='categorical_crossentropy', optimizer='sgd', + metrics=['accuracy']) + return model + + +def create_cbow_network(win, vocab, embed): + ctxt = tf.keras.layers.Input(shape=[win]) + ed = tf.keras.layers.Embedding(vocab, embed, input_length=win)(ctxt) + avgd = tf.keras.layers.Lambda(lambda x: tf.reduce_mean(x, axis=1))(ed) + mod = tf.keras.Model(inputs=ctxt, outputs=avgd) + mod.compile( + optimizer='sgd', + loss='categorical_crossentropy', + ) + return mod + + +def token_generator(filename): + with open(filename) as f: + for l in f.readlines(500): + if not l.isspace(): + tok = word_tokenize(l) + if tok: + yield tok diff --git a/main.c b/main.c index 3eb8d78..174866d 100644 --- a/main.c +++ b/main.c @@ -1,7 +1,8 @@ -#include "cythoned/library.h" +#include "cythoned/bridge.h" #include #include +#include #include #define TAG_IDGAF 0 @@ -10,10 +11,15 @@ #define TAG_WEIGH 3 #define TAG_READY 4 #define TAG_BREAK 5 +#define TAG_STLEN 6 +#define TAG_SWORD 7 +#define TAG_IWORD 8 #define COMM 100 #define ITER 20 #define BS 50 +#define EMB 20 +#define WIN 2 #define FSPC 1 #define in_range(i, x) (size_t (i) = 0; (i) < (x); (i)++) @@ -24,109 +30,75 @@ #define INFO_PRINTLN(what) \ do { fprintf(stderr, "%s\n", what); } while(0) +// char_stream -> tokenize -> word_strem -> filter + batch -> slave network typedef enum{ - DATA, + TOKENIZER, + FILTERER, + BATCHER, SLAVE, MASTER } Role; -typedef struct IntQueue IntQueue; -struct IntQueue { - int head; - int tail; - size_t size; - int* data; -}; - -void queue_from_size(IntQueue* q, size_t s) { - q->data = malloc(s * sizeof(int)); - q->size = s+1; - q->head = 0; - q->tail = 0; -} - -void push_queue(IntQueue *q, int d) { - // Assuming queue is not full - q->data[q->tail] = d; - q->tail = (q->tail + 1) % q->size; -} - -int pop_queue(IntQueue *q) { - int d = q->data[q->head]; - q->head = (q->head + 1) % q->size; - return d; -} - -int queue_empty(IntQueue *q) { - return q->head == q->tail; -} - -int queue_full(IntQueue *q) { - return ((q->tail + 1) % q->size) == q->head; -} - -int number_of_nodes() { +int world_size() { int n; MPI_Comm_size(MPI_COMM_WORLD, &n); return n; } -int number_of_masters() { - return 1; -} - -int number_of_readers() { - return 1; -} - -int number_of_slaves() { - return number_of_nodes() - number_of_masters() - number_of_readers(); -} - -int my_id() { +int my_mpi_id() { int i; MPI_Comm_rank(MPI_COMM_WORLD, &i); return i; } -int master_id(int m) { - return m; +size_t number_of(Role what) { + switch (what) { + case TOKENIZER: + return 1; + case FILTERER: + return 1; + case BATCHER: + return 1; + case SLAVE: + return world_size() + - number_of(TOKENIZER) + - number_of(FILTERER) + - number_of(BATCHER) + - number_of(MASTER); + case MASTER: + return 1; + } } -int reader_id(int r) { - return r + number_of_masters(); +int mpi_id_from_role_id(Role role, int rid) { + int base = 0; + for (Role r = TOKENIZER; r < role; r++) { + base += number_of(r); + } + return rid + base; } -int slave_id(int s) { - return s + number_of_masters() + number_of_readers(); +int role_id_from_mpi_id(Role role, int mid) { + int z = mpi_id_from_role_id(role, 0); + int rid = mid - z; + if (rid >= number_of(role) || rid < 0) { + INFO_PRINTF("%d is not a %d\n", mid, role); + exit(1); + } + return rid; } Role map_node() { - int node; - MPI_Comm_rank(MPI_COMM_WORLD, &node); - if (node >= reader_id(0) && node <= reader_id(number_of_readers()-1)) { - return DATA; - } - if (node >= master_id(0) && node <= master_id(number_of_masters()-1)) { - return MASTER; - } - if (node >= slave_id(0) && node <= slave_id(number_of_slaves()-1)) { - return SLAVE; + int node = my_mpi_id(); + size_t base = 0; + for (Role r = TOKENIZER; r <= MASTER; r++) { + if (node < number_of(r) + base) return r; + base += number_of(r); } exit(1); // this is bad } -int rid(int id, Role what) { - int z; - switch (what) { - case DATA: z = reader_id(0); break; - case SLAVE: z = slave_id(0); break; - case MASTER: z = master_id(0); break; - } - return id - z; -} - void free_weightlist(WeightList* wl) { for in_range(i, wl->n_weights) { free(wl->weights[i].shape); @@ -135,27 +107,92 @@ void free_weightlist(WeightList* wl) { free(wl->weights); } -void data_reader() { - // Reads some data and converts it to a float array - INFO_PRINTF("Starting reader %d\n", getpid()); +void free_word(Word* w) { + free(w->data); + w->data = NULL; + w->mem = 0; +} - size_t X_numel = 784 * BS; - size_t y_numel = 10 * BS; - float* X = malloc(X_numel * sizeof(float)); - float* y = malloc(y_numel * sizeof(float)); +void free_wordlist(WordList* wl) { + for in_range(i, wl->mem) { + free_word(wl->words + i); + } + free(wl->words); + wl->words = NULL; + wl->n_words = 0; +} + +void send_word(Word* w, int dest) { + long len = strlen(w->data); + MPI_Send(&len, 1, MPI_LONG, dest, TAG_STLEN, MPI_COMM_WORLD); + MPI_Send(w->data, len + 1, MPI_CHAR, dest, TAG_SWORD, MPI_COMM_WORLD); +} + +void recv_word(Word* w, int src) { + long len; + MPI_Recv(&len, 1, MPI_LONG, src, TAG_STLEN, MPI_COMM_WORLD, + MPI_STATUS_IGNORE); + if (w->mem < len + 1) { + w->mem = len + 1; + w->data = realloc(w->data, sizeof(char) * w->mem); + } + MPI_Recv(w->data, len + 1, MPI_CHAR, src, TAG_SWORD, MPI_COMM_WORLD, + MPI_STATUS_IGNORE); +} + +void tokenizer(const char* source) { + WordList wl = {0, 0, NULL}; + while (get_tokens(&wl, source)) { + for in_range(i, wl.n_words) { + send_word(&wl.words[i], mpi_id_from_role_id(FILTERER, 0)); + // printf("OI %s\n", wl.words[i].data); + } + // INFO_PRINTLN(""); + } + Word terminator = {0, ""}; + send_word(&terminator, mpi_id_from_role_id(FILTERER, 0)); + free_wordlist(&wl); +} + +void filterer() { + Word w = {0, NULL}; + while (1) { + recv_word(&w, role_id_from_mpi_id(TOKENIZER, 0)); + if (!strlen(w.data)) { + break; + } + INFO_PRINTF("%s: ", w.data); + long idx = vocab_idx_of(&w); + INFO_PRINTF("%ld\n", idx); + // if (idx != -1) { + // MPI_Send(&idx, 1, MPI_LONG, mpi_id_from_role_id(BATCHER, 0), + // TAG_IWORD, MPI_COMM_WORLD); + // } + } + free_word(&w); +} + +void batcher() { + // Reads some data and converts it to a float array + INFO_PRINTF("Starting batcher %d\n", getpid()); int s = 0; + const size_t n_words = BS + WIN + WIN; + float* f_widx = malloc(n_words * sizeof(float)); while (s != -1) { + for in_range(i, n_words) { + long l_wid; + MPI_Recv(&l_wid, 1, MPI_LONG, role_id_from_mpi_id(FILTERER, 0), + TAG_IWORD, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + f_widx[i] = (float)l_wid; + } MPI_Recv(&s, 1, MPI_INT, MPI_ANY_SOURCE, TAG_READY, MPI_COMM_WORLD, MPI_STATUS_IGNORE); if (s != -1) { - mnist_batch(X, y, BS, rid(s, SLAVE), number_of_slaves()); - MPI_Send(X, X_numel, MPI_FLOAT, s, TAG_BATCH, MPI_COMM_WORLD); - MPI_Send(y, y_numel, MPI_FLOAT, s, TAG_BATCH, MPI_COMM_WORLD); + MPI_Send(f_widx, n_words, MPI_FLOAT, s, TAG_BATCH, MPI_COMM_WORLD); } } - free(X); - free(y); + free(f_widx); } void send_weights(const WeightList* wl, int dest, int tag) { @@ -191,34 +228,39 @@ void slave_node() { // 3. Do computations // 4. Send weights back to master INFO_PRINTF("Starting slave %d\n", getpid()); + int me = my_mpi_id(); - int me; - MPI_Comm_rank(MPI_COMM_WORLD, &me); - - size_t X_numel = 784 * BS; - size_t y_numel = 10 * BS; - float* X = malloc(X_numel * sizeof(float)); - float* y = malloc(y_numel * sizeof(float)); - - PyObject* net = create_network(); + PyObject* net = create_network(WIN, EMB); WeightList wl; init_weightlist_like(&wl, net); + size_t vocab = out_size(net); + size_t n_words = (BS + WIN + WIN); + size_t X_numel = BS * (WIN + WIN); + size_t y_numel = BS * vocab; + + float* X = malloc(X_numel * sizeof(float)); + float* y = malloc(y_numel * sizeof(float)); + float* f_widx = malloc(n_words * sizeof(float)); + for in_range(i, COMM) { - MPI_Send(&me, 1, MPI_INT, master_id(0), TAG_READY, MPI_COMM_WORLD); - recv_weights(&wl, master_id(0), TAG_WEIGH); + MPI_Send(&me, 1, MPI_INT, mpi_id_from_role_id(MASTER, 0), + TAG_READY, MPI_COMM_WORLD); + recv_weights(&wl, mpi_id_from_role_id(MASTER, 0), TAG_WEIGH); set_net_weights(net, &wl); for in_range(k, ITER) { - MPI_Send(&me, 1, MPI_INT, reader_id(0), TAG_READY, MPI_COMM_WORLD); - MPI_Recv(X, X_numel, MPI_FLOAT, reader_id(0), TAG_BATCH, - MPI_COMM_WORLD, MPI_STATUS_IGNORE); - MPI_Recv(y, y_numel, MPI_FLOAT, reader_id(0), TAG_BATCH, - MPI_COMM_WORLD, MPI_STATUS_IGNORE); + MPI_Send(&me, 1, MPI_INT, mpi_id_from_role_id(BATCHER, 0), + TAG_READY, MPI_COMM_WORLD); + MPI_Recv(f_widx, n_words, MPI_FLOAT, + mpi_id_from_role_id(BATCHER, 0), TAG_BATCH, MPI_COMM_WORLD, + MPI_STATUS_IGNORE); + c_slices(X, f_widx, BS, WIN); + c_onehot(y, f_widx + WIN, BS); step_net(net, X, y, BS); } - printf("%d net: %f\n", my_id(), eval_net(net)); + printf("%d net: %f\n", my_mpi_id(), eval_net(net)); update_weightlist(&wl, net); - send_weights(&wl, master_id(0), TAG_WEIGH); + send_weights(&wl, mpi_id_from_role_id(MASTER, 0), TAG_WEIGH); } Py_DECREF(net); free_weightlist(&wl); @@ -232,12 +274,12 @@ void master_node() { // 3. Average the weights - PyObject* frank = create_network(); + PyObject* frank = create_network(WIN, EMB); WeightList wl; init_weightlist_like(&wl, frank); update_weightlist(&wl, frank); - int spr = number_of_slaves() * FSPC; // Slaves per round + int spr = number_of(SLAVE) * FSPC; // Slaves per round int s; WeightList *wls = malloc(sizeof(WeightList) * spr); @@ -265,33 +307,40 @@ void master_node() { free_weightlist(&wl); for in_range(i, spr) free_weightlist(wls + i); free(wls); - if (rid(my_id(), MASTER) == 0) { - for in_range(r, number_of_readers()) { - int stop = -1; - MPI_Send(&stop, 1, MPI_INT, reader_id(r), TAG_READY, - MPI_COMM_WORLD); - } - } + // if (role_id_from_mpi_id(my_mpi_id(), MASTER) == 0) { + // for in_range(r, number_of(BATCHER)) { + // int stop = -1; + // MPI_Send(&stop, 1, MPI_INT, reader_id(r), TAG_READY, + // MPI_COMM_WORLD); + // } + // } } int main (int argc, const char **argv) { MPI_Init(NULL, NULL); // Cython Boilerplate - PyImport_AppendInittab("library", PyInit_library); + PyImport_AppendInittab("bridge", PyInit_bridge); Py_Initialize(); PyRun_SimpleString("import sys\nsys.path.insert(0,'')"); - PyObject* library_module = PyImport_ImportModule("library"); + PyObject* bridge_module = PyImport_ImportModule("bridge"); // Actual Code switch (map_node()) { - case DATA: data_reader(); break; - case SLAVE: slave_node(); break; - case MASTER: master_node(); break; + case TOKENIZER: + tokenizer(argv[1]); + break; + case FILTERER: + filterer(); + break; + default: + INFO_PRINTLN("DYING HORRIBLY!"); + // case SLAVE: slave_node(); break; + // case MASTER: master_node(); break; } // Finalizing Boilerplate - Py_DECREF(library_module); + Py_DECREF(bridge_module); Py_Finalize(); MPI_Finalize(); } diff --git a/meson.build b/meson.build index 5df3f49..50f8270 100644 --- a/meson.build +++ b/meson.build @@ -12,7 +12,7 @@ numpy_header = include_directories(run_command( ).stdout().strip()) executable( - 'fedavg_mpi', 'main.c', 'cythoned/library.c', + 'fedavg_mpi', 'main.c', 'cythoned/bridge.c', dependencies: [mpi, python], include_directories: numpy_header, link_args: '-Wl,-w' diff --git a/nn.py b/nn.py deleted file mode 100644 index b70201b..0000000 --- a/nn.py +++ /dev/null @@ -1,16 +0,0 @@ -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - -import tensorflow as tf -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # STFU! -from mynet import load_mnist - - -def create_mnist_network(): - model = tf.keras.models.Sequential([ - tf.keras.layers.Dense(30, input_shape=(784,), activation='relu'), - tf.keras.layers.Dense(10, activation='softmax') - ]) - model.compile(loss='categorical_crossentropy', optimizer='sgd', - metrics=['accuracy']) - return model