Add integration test for training

main
Shad Amethyst 2 years ago
parent b3b97f76bd
commit c1473a6d5c

@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
boxed-array = "0.1.0"
nalgebra = { version = "^0.32", features = ["std", "macros", "rand"] }
nalgebra = { version = "^0.32", features = ["std", "macros", "rand", "serde-serialize"] }
ndarray = "^0.15"
num = "^0.4"
# num-traits = "0.2.15"
@ -19,3 +19,5 @@ textplots = "0.8.0"
image = "0.24.6"
viuer = "0.6.2"
rust-mnist = "0.2.0"
serde_json = "1.0.96"
approx = "0.5.1"

@ -0,0 +1,54 @@
#![feature(generic_arg_infer)]
use std::fs::File;
use nalgebra::dvector;
use neuramethyst::derivable::activation::{Relu, Tanh};
use neuramethyst::derivable::loss::Euclidean;
use neuramethyst::prelude::*;
fn main() {
let mut network = neura_sequential![
neura_layer!("dense", 4, f64).activation(Relu),
neura_layer!("dense", 1, f64).activation(Tanh)
]
.construct(NeuraShape::Vector(2))
.unwrap();
let inputs = [
(dvector![0.0, 0.0], dvector![0.0]),
(dvector![0.0, 1.0], dvector![1.0]),
(dvector![1.0, 0.0], dvector![1.0]),
(dvector![1.0, 1.0], dvector![0.0]),
];
let mut trainer = NeuraBatchedTrainer::new(0.05, 1);
trainer.batch_size = 1;
let mut parameters = vec![(
network.layer.weights.clone(),
network.layer.bias.clone(),
network.child_network.layer.weights.clone(),
network.child_network.layer.bias.clone()
)];
for iteration in 0..4 {
trainer.train(
&NeuraBackprop::new(Euclidean),
&mut network,
inputs.iter().cloned().skip(iteration).take(1),
&inputs,
);
parameters.push((
network.layer.weights.clone(),
network.layer.bias.clone(),
network.child_network.layer.weights.clone(),
network.child_network.layer.bias.clone()
));
}
let mut output = File::create("tests/xor.json").unwrap();
serde_json::to_writer(&mut output, &parameters).unwrap();
}

@ -10,8 +10,8 @@ use super::*;
#[derive(Clone, Debug)]
pub struct NeuraDenseLayer<F: Float, Act: NeuraDerivable<F>, Reg: NeuraDerivable<F>> {
weights: DMatrix<F>,
bias: DVector<F>,
pub weights: DMatrix<F>,
pub bias: DVector<F>,
activation: Act,
regularization: Reg,
}
@ -187,7 +187,7 @@ impl<
input: &DVector<F>,
epsilon: Self::Output,
) -> (DVector<F>, Self::Gradient) {
let evaluated = &self.weights * input;
let evaluated = &self.weights * input + &self.bias;
// Compute delta (the input gradient of the neuron) from epsilon (the output gradient of the neuron),
// with `self.activation'(input) ° epsilon = delta`
let mut delta = epsilon.clone();

@ -0,0 +1 @@
[[[[-0.784019737923873,0.07487625690483957,0.26019825178263306,-1.3672346823409536,0.6615557659794828,0.03277321470276125,0.10255556278341912,-1.1741372389561717],4,2],[[0.1,0.1,0.1,0.1],4,null],[[-0.22956756852546423,-0.3896251045321558,-0.41082820479510523,-0.18775700687825625],1,4],[[0.0],1,null]],[[[-0.784019737923873,0.07487625690483957,0.26019825178263306,-1.3672346823409536,0.6615557659794828,0.03277321470276125,0.10255556278341912,-1.1741372389561717],4,2],[[0.09862948269238463,0.09767393995296188,0.09754735754381703,0.09887909154936157],4,null],[[-0.22897056899438134,-0.38902810500107293,-0.41023120526402235,-0.18716000734717336],1,4],[[0.005969995310828754],1,null]],[[[-0.784019737923873,0.07487625690483957,0.26019825178263306,-1.3672346823409536,0.6480251568143229,0.009784289679993521,0.07831367820341369,-1.1741372389561717],4,2],[[0.08509887352722474,0.07468501493019415,0.0733054729638116,0.09887909154936157],4,null],[[-0.1840487716731225,-0.3813195620468164,-0.3984064785332197,-0.18716000734717336],1,4],[[0.06506321949498975],1,null]],[[[-0.784019737923873,0.05377301152907931,0.23814937070596057,-1.3672346823409536,0.6480251568143229,0.009784289679993521,0.07831367820341369,-1.1741372389561717],4,2],[[0.08509887352722474,0.053581769554433896,0.05125659188713909,0.09887909154936157],4,null],[[-0.1840487716731225,-0.37304244088024485,-0.37994948959911895,-0.18716000734717336],1,4],[[0.12040589654644748],1,null]],[[[-0.784019737923873,0.05260399992912556,0.2369587143300002,-1.3672346823409536,0.6480251568143229,0.008615278080039767,0.07712302182745333,-1.1741372389561717],4,2],[[0.08509887352722474,0.052412757954480145,0.05006593551117873,0.09887909154936157],4,null],[[-0.1840487716731225,-0.3726753595041762,-0.3787971581791132,-0.18716000734717336],1,4],[[0.12353961934001303],1,null]]]

@ -0,0 +1,54 @@
use std::fs::File;
use approx::assert_relative_eq;
use nalgebra::{DMatrix, DVector, dvector};
use neuramethyst::{prelude::{*, dense::NeuraDenseLayer}, derivable::{activation::{Relu, Tanh}, regularize::NeuraL0, loss::Euclidean}};
fn load_test_data() -> Vec<(DMatrix<f64>, DVector<f64>, DMatrix<f64>, DVector<f64>)> {
let file = File::open("tests/xor.json").unwrap();
let data: Vec<(DMatrix<f64>, DVector<f64>, DMatrix<f64>, DVector<f64>)> = serde_json::from_reader(&file).unwrap();
data
}
#[test]
fn test_xor_training() {
let data = load_test_data();
let mut network = neura_sequential![
NeuraDenseLayer::new(data[0].0.clone(), data[0].1.clone(), Relu, NeuraL0),
NeuraDenseLayer::new(data[0].2.clone(), data[0].3.clone(), Tanh, NeuraL0),
];
let inputs = [
(dvector![0.0, 0.0], dvector![0.0]),
(dvector![0.0, 1.0], dvector![1.0]),
(dvector![1.0, 0.0], dvector![1.0]),
(dvector![1.0, 1.0], dvector![0.0]),
];
let mut trainer = NeuraBatchedTrainer::new(0.05, 1);
trainer.batch_size = 1;
for iteration in 0..4 {
trainer.train(
&NeuraBackprop::new(Euclidean),
&mut network,
inputs.iter().cloned().skip(iteration).take(1),
&inputs,
);
let expected = data[iteration + 1].clone();
let actual = (
network.layer.weights.clone(),
network.layer.bias.clone(),
network.child_network.layer.weights.clone(),
network.child_network.layer.bias.clone()
);
assert_relative_eq!(expected.0.as_slice(), actual.0.as_slice());
assert_relative_eq!(expected.1.as_slice(), actual.1.as_slice());
assert_relative_eq!(expected.2.as_slice(), actual.2.as_slice());
assert_relative_eq!(expected.3.as_slice(), actual.3.as_slice());
}
}
Loading…
Cancel
Save