implemented a very smooth frankenstein function
This commit is contained in:
36
library.pyx
36
library.pyx
@@ -41,10 +41,6 @@ cdef public void predict(
|
|||||||
size_t batch_size
|
size_t batch_size
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
# try:
|
|
||||||
# return net(X)
|
|
||||||
# except Exception as e:
|
|
||||||
# print(e)
|
|
||||||
|
|
||||||
|
|
||||||
cdef public void step_net(
|
cdef public void step_net(
|
||||||
@@ -71,7 +67,7 @@ cdef public void mnist_batch(float* batch, size_t bs):
|
|||||||
|
|
||||||
|
|
||||||
cdef public void create_c_network(Network* c_net):
|
cdef public void create_c_network(Network* c_net):
|
||||||
net = _create_network()
|
net = create_network()
|
||||||
c_net.n_layers = len(net.layers)
|
c_net.n_layers = len(net.layers)
|
||||||
c_net.layers = <Dense*>malloc(sizeof(Dense) * c_net.n_layers)
|
c_net.layers = <Dense*>malloc(sizeof(Dense) * c_net.n_layers)
|
||||||
for i, l in enumerate(net.layers):
|
for i, l in enumerate(net.layers):
|
||||||
@@ -85,14 +81,25 @@ cdef public void create_c_network(Network* c_net):
|
|||||||
c_net.layers[i].ownmem = 1
|
c_net.layers[i].ownmem = 1
|
||||||
|
|
||||||
|
|
||||||
|
cdef public void frankenstein(Network* c_frank, Network* c_nets,
|
||||||
|
size_t num_nets):
|
||||||
|
"""ONE-LINER HOW BOUT THAT HUH."""
|
||||||
|
combo_net(
|
||||||
|
wrap_c_network(c_frank),
|
||||||
|
[wrap_c_network(&c_nets[i]) for i in range(num_nets)]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
cdef public void be_like(Network* c_dst, Network* c_src):
|
cdef public void be_like(Network* c_dst, Network* c_src):
|
||||||
|
"""Conveniently transform one C network into another."""
|
||||||
dst = wrap_c_network(c_dst)
|
dst = wrap_c_network(c_dst)
|
||||||
src = wrap_c_network(c_src)
|
src = wrap_c_network(c_src)
|
||||||
dst.be_like(src)
|
dst.be_like(src)
|
||||||
|
|
||||||
|
|
||||||
cdef object wrap_c_network(Network* c_net):
|
cdef object wrap_c_network(Network* c_net):
|
||||||
net = _create_network(init=False)
|
"""Create a thin wrapper not owning the memory."""
|
||||||
|
net = create_network(init=False)
|
||||||
for i, l in enumerate(net.layers):
|
for i, l in enumerate(net.layers):
|
||||||
d0, d1 = l.W.shape
|
d0, d1 = l.W.shape
|
||||||
l.W = np.asarray(<float[:d0,:d1]>c_net.layers[i].W)
|
l.W = np.asarray(<float[:d0,:d1]>c_net.layers[i].W)
|
||||||
@@ -100,18 +107,23 @@ cdef object wrap_c_network(Network* c_net):
|
|||||||
return net
|
return net
|
||||||
|
|
||||||
|
|
||||||
cdef void inspect_array(
|
def inspect_array(a):
|
||||||
np.ndarray[np.float32_t, ndim=2, mode='c'] a
|
|
||||||
):
|
|
||||||
print(a.flags, flush=True)
|
print(a.flags, flush=True)
|
||||||
print(a.dtype, flush=True)
|
print(a.dtype, flush=True)
|
||||||
print(a.sum(), flush=True)
|
print(a.sum(), flush=True)
|
||||||
|
|
||||||
|
|
||||||
def _create_network(init=True):
|
def create_network(init=True):
|
||||||
return mn.Network((784, 10), mn.relu, mn.sigmoid, mn.bin_x_entropy,
|
return mn.Network((784, 10), mn.relu, mn.sigmoid, mn.bin_x_entropy,
|
||||||
initialize=init)
|
initialize=init)
|
||||||
|
|
||||||
|
|
||||||
def combo_net(nets):
|
def combo_net(net, nets, alpha=None):
|
||||||
return mn.combo_net(nets)
|
tot = len(nets)
|
||||||
|
if alpha is None:
|
||||||
|
alpha = [1 / tot] * tot
|
||||||
|
for l in net.layers:
|
||||||
|
l.set_weights(np.zeros_like(t) for t in l.trainables())
|
||||||
|
for n, a in zip(nets, alpha):
|
||||||
|
for la, lb in zip(n.layers, net.layers):
|
||||||
|
lb.update(t * a for t in la.trainables())
|
||||||
|
|||||||
39
main.c
39
main.c
@@ -1,10 +1,9 @@
|
|||||||
#include <Python.h>
|
#include "library.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <mpi.h>
|
#include <mpi.h>
|
||||||
|
|
||||||
#include "library.h"
|
|
||||||
|
|
||||||
#define P_READER 0
|
#define P_READER 0
|
||||||
#define P_SLAVE 1
|
#define P_SLAVE 1
|
||||||
#define P_MASTER 2
|
#define P_MASTER 2
|
||||||
@@ -19,18 +18,21 @@ typedef enum{
|
|||||||
MASTER
|
MASTER
|
||||||
} Role;
|
} Role;
|
||||||
|
|
||||||
|
|
||||||
// Reads some data and converts it to 2D float array
|
|
||||||
void data_reader() {
|
void data_reader() {
|
||||||
|
// Reads some data and converts it to a float array
|
||||||
|
printf("Start reader\n");
|
||||||
size_t batch_numel = (784 + 10) * BS;
|
size_t batch_numel = (784 + 10) * BS;
|
||||||
float* batch = malloc(batch_numel * sizeof(float));
|
float* batch = malloc(batch_numel * sizeof(float));
|
||||||
while (1) {
|
while (1) {
|
||||||
mnist_batch(batch, BS);
|
mnist_batch(batch, BS);
|
||||||
MPI_Send(batch, batch_numel, MPI_FLOAT, P_SLAVE, 0, MPI_COMM_WORLD);
|
MPI_Send(batch, batch_numel, MPI_FLOAT, P_SLAVE, 0, MPI_COMM_WORLD);
|
||||||
}
|
}
|
||||||
|
free(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void send_network(const Network* c_net, int dest, int tag) {
|
void send_network(const Network* c_net, int dest, int tag) {
|
||||||
|
// Send a network to the expecting destination
|
||||||
|
// It's best to receive with `recv_network`
|
||||||
size_t n_layers = c_net->n_layers;
|
size_t n_layers = c_net->n_layers;
|
||||||
MPI_Send(&n_layers, 1, MPI_LONG, dest, tag, MPI_COMM_WORLD);
|
MPI_Send(&n_layers, 1, MPI_LONG, dest, tag, MPI_COMM_WORLD);
|
||||||
for (size_t i = 0; i < n_layers; i++) {
|
for (size_t i = 0; i < n_layers; i++) {
|
||||||
@@ -46,7 +48,7 @@ void send_network(const Network* c_net, int dest, int tag) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void recv_network(Network* c_net, int src, int tag) {
|
void recv_network(Network* c_net, int src, int tag) {
|
||||||
// Creates a new network at c_net
|
// Creates a new network at c_net (all pointers will be lost so beware)
|
||||||
MPI_Recv(&c_net->n_layers, 1, MPI_LONG, src, tag, MPI_COMM_WORLD,
|
MPI_Recv(&c_net->n_layers, 1, MPI_LONG, src, tag, MPI_COMM_WORLD,
|
||||||
MPI_STATUS_IGNORE);
|
MPI_STATUS_IGNORE);
|
||||||
c_net->layers = malloc(sizeof(Dense) * c_net->n_layers);
|
c_net->layers = malloc(sizeof(Dense) * c_net->n_layers);
|
||||||
@@ -66,6 +68,7 @@ void recv_network(Network* c_net, int src, int tag) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void free_network_contents(Network* c_net) {
|
void free_network_contents(Network* c_net) {
|
||||||
|
// Cleans up the net
|
||||||
for (size_t i = 0; i < c_net->n_layers; i++) {
|
for (size_t i = 0; i < c_net->n_layers; i++) {
|
||||||
if (c_net->layers[i].ownmem) {
|
if (c_net->layers[i].ownmem) {
|
||||||
free(c_net->layers[i].b);
|
free(c_net->layers[i].b);
|
||||||
@@ -73,10 +76,12 @@ void free_network_contents(Network* c_net) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
free(c_net->layers);
|
free(c_net->layers);
|
||||||
|
c_net->layers = NULL; // So that you don't get any ideas
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receives weight updates and trains, sends learned weights back to master
|
// Receives weight updates and trains, sends learned weights back to master
|
||||||
void slave_node() {
|
void slave_node() {
|
||||||
|
printf("Start slave\n");
|
||||||
Network net;
|
Network net;
|
||||||
create_c_network(&net);
|
create_c_network(&net);
|
||||||
|
|
||||||
@@ -102,6 +107,7 @@ void slave_node() {
|
|||||||
|
|
||||||
// Stores most up-to-date model, sends it to slaves for training
|
// Stores most up-to-date model, sends it to slaves for training
|
||||||
void master_node() {
|
void master_node() {
|
||||||
|
printf("Start master\n");
|
||||||
Network frank;
|
Network frank;
|
||||||
create_c_network(&frank);
|
create_c_network(&frank);
|
||||||
for (int i = 0; i < COMM; i++) {
|
for (int i = 0; i < COMM; i++) {
|
||||||
@@ -109,7 +115,7 @@ void master_node() {
|
|||||||
MPI_Send(&go, 1, MPI_CHAR, P_SLAVE, 0, MPI_COMM_WORLD);
|
MPI_Send(&go, 1, MPI_CHAR, P_SLAVE, 0, MPI_COMM_WORLD);
|
||||||
Network net;
|
Network net;
|
||||||
recv_network(&net, P_SLAVE, MPI_ANY_TAG);
|
recv_network(&net, P_SLAVE, MPI_ANY_TAG);
|
||||||
be_like(&frank, &net);
|
frankenstein(&frank, &net, 1);
|
||||||
free_network_contents(&net);
|
free_network_contents(&net);
|
||||||
printf("Frank: %f\n", eval_net(&frank));
|
printf("Frank: %f\n", eval_net(&frank));
|
||||||
}
|
}
|
||||||
@@ -119,10 +125,11 @@ void master_node() {
|
|||||||
Role map_node() {
|
Role map_node() {
|
||||||
int node;
|
int node;
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &node);
|
MPI_Comm_rank(MPI_COMM_WORLD, &node);
|
||||||
if (node == 0) return DATA;
|
if (node == P_READER) return DATA;
|
||||||
if (node == 1) return SLAVE;
|
if (node == P_MASTER) return MASTER;
|
||||||
if (node == 2) return MASTER;
|
if (node == P_SLAVE) return SLAVE;
|
||||||
return SLAVE;
|
|
||||||
|
exit(1); // this is bad
|
||||||
}
|
}
|
||||||
|
|
||||||
int main (int argc, const char **argv) {
|
int main (int argc, const char **argv) {
|
||||||
@@ -131,18 +138,14 @@ int main (int argc, const char **argv) {
|
|||||||
// Cython Boilerplate
|
// Cython Boilerplate
|
||||||
PyImport_AppendInittab("library", PyInit_library);
|
PyImport_AppendInittab("library", PyInit_library);
|
||||||
Py_Initialize();
|
Py_Initialize();
|
||||||
// import_array();
|
|
||||||
PyRun_SimpleString("import sys\nsys.path.insert(0,'')");
|
PyRun_SimpleString("import sys\nsys.path.insert(0,'')");
|
||||||
PyObject* library_module = PyImport_ImportModule("library");
|
PyObject* library_module = PyImport_ImportModule("library");
|
||||||
|
|
||||||
// Actual Code
|
// Actual Code
|
||||||
switch (map_node()) {
|
switch (map_node()) {
|
||||||
case DATA: data_reader();
|
case DATA: data_reader(); break;
|
||||||
break;
|
case SLAVE: slave_node(); break;
|
||||||
case SLAVE: slave_node();
|
case MASTER: master_node(); break;
|
||||||
break;
|
|
||||||
case MASTER: master_node();
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalizing Boilerplate
|
// Finalizing Boilerplate
|
||||||
|
|||||||
Reference in New Issue
Block a user