well done sending a network over mpi, pat pat
now do a lot of nets in parallel and then we'll talk
This commit is contained in:
67
library.pyx
67
library.pyx
@@ -2,25 +2,38 @@ cimport numpy as np
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import mynet as mn
|
import mynet as mn
|
||||||
|
|
||||||
|
from libc.stdlib cimport malloc
|
||||||
|
|
||||||
|
|
||||||
ctr = []
|
ctr = []
|
||||||
X_train, y_train, X_test, y_test = mn.load_mnist()
|
X_train, y_train, X_test, y_test = mn.load_mnist()
|
||||||
|
|
||||||
|
|
||||||
|
cdef extern from "numpy/arrayobject.h":
|
||||||
|
object PyArray_SimpleNewFromData(
|
||||||
|
int nd, long* dims, int typenum, void* data
|
||||||
|
)
|
||||||
|
void *PyArray_DATA(np.ndarray arr)
|
||||||
|
|
||||||
|
|
||||||
|
ctypedef public struct Dense:
|
||||||
|
long[2] shape
|
||||||
|
int ownmem
|
||||||
|
float* W
|
||||||
|
float* b
|
||||||
|
|
||||||
|
|
||||||
|
ctypedef public struct Network:
|
||||||
|
Py_ssize_t n_layers;
|
||||||
|
Dense* layers;
|
||||||
|
|
||||||
|
|
||||||
cdef public char * greeting():
|
cdef public char * greeting():
|
||||||
return f'The value is {3**3**3}'.encode('utf-8')
|
return f'The value is {3**3**3}'.encode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
cdef public void debug_print(object o):
|
cdef public void debug_print(object o):
|
||||||
print(o.flags)
|
print(o.flags)
|
||||||
# print(o)
|
|
||||||
|
|
||||||
|
|
||||||
cdef public np.ndarray[np.float32_t, ndim=2, mode='c'] dot(
|
|
||||||
np.ndarray[np.float32_t, ndim=2, mode='c'] x,
|
|
||||||
np.ndarray[np.float32_t, ndim=2, mode='c'] y
|
|
||||||
):
|
|
||||||
return x @ y
|
|
||||||
|
|
||||||
|
|
||||||
cdef public np.ndarray[np.float32_t, ndim=2, mode='c'] predict(
|
cdef public np.ndarray[np.float32_t, ndim=2, mode='c'] predict(
|
||||||
@@ -66,7 +79,41 @@ cdef public np.ndarray[np.float32_t, ndim=2, mode='c'] mnist_batch(
|
|||||||
arr = np.concatenate([X_train[idx], y_train[idx]], axis=1)
|
arr = np.concatenate([X_train[idx], y_train[idx]], axis=1)
|
||||||
return arr
|
return arr
|
||||||
|
|
||||||
cdef public float arrsum(
|
|
||||||
|
cdef public void inspect_array(
|
||||||
np.ndarray[np.float32_t, ndim=2, mode='c'] a
|
np.ndarray[np.float32_t, ndim=2, mode='c'] a
|
||||||
):
|
):
|
||||||
return np.sum(a)
|
print(a.flags)
|
||||||
|
print(a.dtype)
|
||||||
|
print(a.sum())
|
||||||
|
|
||||||
|
|
||||||
|
cdef public void be_like_cified(
|
||||||
|
object net,
|
||||||
|
Network* c_net
|
||||||
|
):
|
||||||
|
"""WARNING this function makes an assumption that `net` and `c_net`
|
||||||
|
have the same shape and hopefully is going to crash horribly otherwise."""
|
||||||
|
for i, l in enumerate(net.layers):
|
||||||
|
w1, w2 = l.W.shape
|
||||||
|
l.W[:] = <float[:w1,:w2]>c_net.layers[i].W
|
||||||
|
l.b[:] = <float[:w2]>c_net.layers[i].b
|
||||||
|
|
||||||
|
|
||||||
|
cdef public void cify_network(
|
||||||
|
object net, Network* c_net
|
||||||
|
):
|
||||||
|
"""WARNING `c_net` is valid as long as `net` is
|
||||||
|
|
||||||
|
Whoever has `c_net` is responsible for freeing c_net.layers list
|
||||||
|
Layers themselves don't need any de-init.
|
||||||
|
"""
|
||||||
|
c_net.n_layers = len(net.layers)
|
||||||
|
c_net.layers = <Dense*>malloc(len(net.layers) * sizeof(Dense))
|
||||||
|
for i, l in enumerate(net.layers):
|
||||||
|
w1, w2 = l.W.shape
|
||||||
|
c_net.layers[i].shape[0] = w1
|
||||||
|
c_net.layers[i].shape[1] = w2
|
||||||
|
c_net.layers[i].W = <float*>PyArray_DATA(l.W)
|
||||||
|
c_net.layers[i].b = <float*>PyArray_DATA(l.b)
|
||||||
|
c_net.layers[i].ownmem = 0
|
||||||
|
|||||||
103
main.c
103
main.c
@@ -11,14 +11,21 @@
|
|||||||
#define P_SLAVE 1
|
#define P_SLAVE 1
|
||||||
#define P_MASTER 2
|
#define P_MASTER 2
|
||||||
|
|
||||||
#define COMM 100
|
#define COMM 50
|
||||||
#define ITER 20
|
#define ITER 32
|
||||||
#define BS 50
|
#define BS 32
|
||||||
|
|
||||||
|
typedef enum{
|
||||||
|
DATA,
|
||||||
|
SLAVE,
|
||||||
|
MASTER
|
||||||
|
} Role;
|
||||||
|
|
||||||
|
|
||||||
// Reads some data and converts it to 2D float array
|
// Reads some data and converts it to 2D float array
|
||||||
void data_reader() {
|
void data_reader() {
|
||||||
while (1) {
|
while (1) {
|
||||||
PyArrayObject* batch = mnist_batch(10);
|
PyArrayObject* batch = mnist_batch(BS);
|
||||||
|
|
||||||
long* shape = PyArray_SHAPE(batch);
|
long* shape = PyArray_SHAPE(batch);
|
||||||
MPI_Send(shape, 2, MPI_LONG, P_SLAVE, 0, MPI_COMM_WORLD);
|
MPI_Send(shape, 2, MPI_LONG, P_SLAVE, 0, MPI_COMM_WORLD);
|
||||||
@@ -28,6 +35,51 @@ void data_reader() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void send_network(Network* c_net, int dest, int tag) {
|
||||||
|
Py_ssize_t n_layers = c_net->n_layers;
|
||||||
|
MPI_Send(&n_layers, 1, MPI_LONG, dest, tag, MPI_COMM_WORLD);
|
||||||
|
for (Py_ssize_t i = 0; i < n_layers; i++) {
|
||||||
|
long d0 = c_net->layers[i].shape[0];
|
||||||
|
long d1 = c_net->layers[i].shape[1];
|
||||||
|
|
||||||
|
MPI_Send(c_net->layers[i].shape, 2, MPI_LONG, dest, tag,
|
||||||
|
MPI_COMM_WORLD);
|
||||||
|
MPI_Send(c_net->layers[i].W, d0 * d1, MPI_FLOAT, dest, tag,
|
||||||
|
MPI_COMM_WORLD);
|
||||||
|
MPI_Send(c_net->layers[i].b, d1, MPI_FLOAT, dest, tag,
|
||||||
|
MPI_COMM_WORLD);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void recv_network(Network* c_net, int src, int tag) {
|
||||||
|
MPI_Recv(&c_net->n_layers, 1, MPI_LONG, src, tag, MPI_COMM_WORLD,
|
||||||
|
MPI_STATUS_IGNORE);
|
||||||
|
c_net->layers = malloc(sizeof(Dense) * c_net->n_layers);
|
||||||
|
for (Py_ssize_t i = 0; i < c_net->n_layers; i++) {
|
||||||
|
MPI_Recv(&c_net->layers[i].shape, 2, MPI_LONG, src, tag,
|
||||||
|
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||||
|
long d0 = c_net->layers[i].shape[0];
|
||||||
|
long d1 = c_net->layers[i].shape[1];
|
||||||
|
c_net->layers[i].ownmem = 1;
|
||||||
|
c_net->layers[i].W = malloc(sizeof(float) * d0 * d1);
|
||||||
|
c_net->layers[i].b = malloc(sizeof(float) * d1);
|
||||||
|
MPI_Recv(c_net->layers[i].W, d0 * d1, MPI_FLOAT, src, tag,
|
||||||
|
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||||
|
MPI_Recv(c_net->layers[i].b, d1, MPI_FLOAT, src, tag,
|
||||||
|
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void free_network_contents(Network* c_net) {
|
||||||
|
for (Py_ssize_t i = 0; i < c_net->n_layers; i++) {
|
||||||
|
if (c_net->layers[i].ownmem) {
|
||||||
|
free(c_net->layers[i].b);
|
||||||
|
free(c_net->layers[i].W);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
free(c_net->layers);
|
||||||
|
}
|
||||||
|
|
||||||
// Receives weight updates and trains, sends learned weights back to master
|
// Receives weight updates and trains, sends learned weights back to master
|
||||||
void slave_node() {
|
void slave_node() {
|
||||||
PyObject* net = create_network();
|
PyObject* net = create_network();
|
||||||
@@ -46,23 +98,43 @@ void slave_node() {
|
|||||||
PyArrayObject* batch = PyArray_SimpleNewFromData(
|
PyArrayObject* batch = PyArray_SimpleNewFromData(
|
||||||
2, shape, NPY_FLOAT32, data);
|
2, shape, NPY_FLOAT32, data);
|
||||||
step_net(net, batch);
|
step_net(net, batch);
|
||||||
|
Py_DECREF(batch);
|
||||||
|
free(data);
|
||||||
}
|
}
|
||||||
printf("%f\n", eval_net(net));
|
Network c_net;
|
||||||
|
cify_network(net, &c_net);
|
||||||
|
send_network(&c_net, P_MASTER, 0);
|
||||||
|
free_network_contents(&c_net);
|
||||||
}
|
}
|
||||||
|
Py_DECREF(net);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stores most up-to-date model, sends it to slaves for training
|
// Stores most up-to-date model, sends it to slaves for training
|
||||||
void master_node() {
|
void master_node() {
|
||||||
|
PyObject* frank = create_network();
|
||||||
for (int i = 0; i < COMM; i++) {
|
for (int i = 0; i < COMM; i++) {
|
||||||
char go;
|
char go;
|
||||||
MPI_Send(&go, 1, MPI_CHAR, P_SLAVE, 0, MPI_COMM_WORLD);
|
MPI_Send(&go, 1, MPI_CHAR, P_SLAVE, 0, MPI_COMM_WORLD);
|
||||||
|
Network c_net;
|
||||||
|
recv_network(&c_net, P_SLAVE, MPI_ANY_TAG);
|
||||||
|
be_like_cified(frank, &c_net);
|
||||||
|
free_network_contents(&c_net);
|
||||||
|
printf("Frank: %f\n", eval_net(frank));
|
||||||
}
|
}
|
||||||
|
Py_DECREF(frank);
|
||||||
|
}
|
||||||
|
|
||||||
|
Role map_node() {
|
||||||
|
int node;
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &node);
|
||||||
|
if (node == 0) return DATA;
|
||||||
|
if (node == 1) return SLAVE;
|
||||||
|
if (node == 2) return MASTER;
|
||||||
|
return SLAVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
int main (int argc, const char **argv) {
|
int main (int argc, const char **argv) {
|
||||||
int node;
|
|
||||||
MPI_Init(NULL, NULL);
|
MPI_Init(NULL, NULL);
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &node);
|
|
||||||
|
|
||||||
// Cython Boilerplate
|
// Cython Boilerplate
|
||||||
PyImport_AppendInittab("library", PyInit_library);
|
PyImport_AppendInittab("library", PyInit_library);
|
||||||
@@ -72,17 +144,16 @@ int main (int argc, const char **argv) {
|
|||||||
PyObject* library_module = PyImport_ImportModule("library");
|
PyObject* library_module = PyImport_ImportModule("library");
|
||||||
|
|
||||||
// Actual Code
|
// Actual Code
|
||||||
if (node == 0) {
|
switch (map_node()) {
|
||||||
data_reader();
|
case DATA: data_reader();
|
||||||
}
|
break;
|
||||||
else if (node == 1) {
|
case SLAVE: slave_node();
|
||||||
slave_node();
|
break;
|
||||||
}
|
case MASTER: master_node();
|
||||||
else if (node == 2) {
|
break;
|
||||||
master_node();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cython Finalizing Boilerplate
|
// Finalizing Boilerplate
|
||||||
Py_DECREF(library_module);
|
Py_DECREF(library_module);
|
||||||
Py_Finalize();
|
Py_Finalize();
|
||||||
MPI_Finalize();
|
MPI_Finalize();
|
||||||
|
|||||||
Reference in New Issue
Block a user