Greetings,
The script below demonstrates this. When I do the training with sigmoid for the activation and output activation, then save and load the MLP it doesn’t report correctly what the activations are. It still gets the right answer though, so I think they’re probably right on the server side.
Thanks all!
(
Routine{
var nn = FluidMLPRegressor(s,[3],FluidMLPRegressor.sigmoid,FluidMLPRegressor.sigmoid,maxIter:10000,learnRate:0.5,validation:0);
var xx = FluidDataSet(s);
var yy = FluidDataSet(s);
var xbuf = Buffer.alloc(s,2);
var ybuf = Buffer.alloc(s,1);
var yhat = FluidDataSet(s);
~save_path = "/Users/ted/Desktop/SUPERCOLLIDER/timbral_space_mapping/nn docs/%_nn_test.json".format(Date.localtime.stamp);
s.sync;
[
[[0,0],[0]],
[[1,1],[0]],
[[1,0],[1]],
[[0,1],[1]]
].do({
arg vecs, i;
var x = vecs[0];
var y = vecs[1];
x.postln;
y.postln;
"".postln;
xbuf.loadCollection(x);
s.sync;
xx.addPoint(i.asString,xbuf);
s.sync;
ybuf.loadCollection(y);
s.sync;
yy.addPoint(i.asString,ybuf);
s.sync;
});
"xx:".postln;
xx.print;
"yy:".postln;
yy.print;
nn.fit(xx,yy,{
arg out;
"done".postln;
out.postln;
nn.predict(xx,yhat,{
"done 2".postln;
yhat.print;
});
nn.write(~save_path);
"during training".postln;
"activation: %".format(nn.activation).postln;
"out act: %".format(nn.outputActivation).postln;
});
}.play;
)
(
Routine{
var nn = FluidMLPRegressor(s);
var x_buf = Buffer.alloc(s,2);
var y_buf = Buffer.alloc(s,1);
s.sync;
"before read".postln;
"activation: %".format(nn.activation).postln;
"out act: %".format(nn.outputActivation).postln;
s.sync;
nn.read(~save_path);
//nn.load(~save_path);
s.sync;
"after read".postln;
"activation: %".format(nn.activation).postln;
"out act: %".format(nn.outputActivation).postln;
x_buf.loadCollection([0,1]);
s.sync;
nn.predictPoint(x_buf,y_buf,{
y_buf.loadToFloatArray(action:{
arg fa;
fa.postln;
});
"done".postln;
});
}.play;
)