diff --git a/examples/xor.rs b/examples/xor.rs index 4b7c277..b7cde53 100644 --- a/examples/xor.rs +++ b/examples/xor.rs @@ -2,17 +2,19 @@ use nalgebra::dvector; +use neuramethyst::cycle_shuffling; use neuramethyst::derivable::activation::Relu; use neuramethyst::derivable::loss::Euclidean; use neuramethyst::prelude::*; -use neuramethyst::cycle_shuffling; fn main() { let mut network = neura_sequential![ neura_layer!("dense", 4, Relu), neura_layer!("dense", 3, Relu), neura_layer!("dense", 1, Relu) - ].construct(NeuraShape::Vector(2)).unwrap(); + ] + .construct(NeuraShape::Vector(2)) + .unwrap(); let inputs = [ (dvector![0.0, 0.0], dvector![0.0]), diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs index 8e26d80..15d1478 100644 --- a/src/algebra/mod.rs +++ b/src/algebra/mod.rs @@ -107,11 +107,12 @@ impl NeuraVectorSpace for [T; N] { } } -impl> NeuraVectorSpace for Matrix +impl> + NeuraVectorSpace for Matrix where Matrix: std::ops::MulAssign, for<'c> Matrix: std::ops::AddAssign<&'c Matrix>, - F: From + Into + F: From + Into, { fn add_assign(&mut self, other: &Self) { *self += other; @@ -122,7 +123,11 @@ where } fn norm_squared(&self) -> f64 { - self.iter().map(|x| *x * *x).reduce(|sum, curr| sum + curr).unwrap_or(F::zero()).into() + self.iter() + .map(|x| *x * *x) + .reduce(|sum, curr| sum + curr) + .unwrap_or(F::zero()) + .into() } } diff --git a/src/derivable/loss.rs b/src/derivable/loss.rs index 9bb79da..0b4d833 100644 --- a/src/derivable/loss.rs +++ b/src/derivable/loss.rs @@ -24,11 +24,7 @@ impl NeuraLoss for Euclidean { } #[inline] - fn nabla( - &self, - target: &DVector, - actual: &DVector, - ) -> DVector { + fn nabla(&self, target: &DVector, actual: &DVector) -> DVector { let mut res = DVector::zeros(target.len()); // ∂E(y)/∂yᵢ = yᵢ - yᵢ' diff --git a/src/layer/dense.rs b/src/layer/dense.rs index dd56367..ef5a6df 100644 --- a/src/layer/dense.rs +++ b/src/layer/dense.rs @@ -17,12 +17,8 @@ pub struct NeuraDenseLayer, Reg: NeuraDerivable } #[derive(Clone, Debug)] -pub struct NeuraDenseLayerPartial< - F: Float, - Act: NeuraDerivable, - Reg: NeuraDerivable, - R: Rng, -> { +pub struct NeuraDenseLayerPartial, Reg: NeuraDerivable, R: Rng> +{ activation: Act, regularization: Reg, output_size: usize, @@ -143,7 +139,13 @@ impl< } impl< - F: Float + From + Into + std::fmt::Debug + 'static + std::ops::AddAssign + std::ops::MulAssign, + F: Float + + From + + Into + + std::fmt::Debug + + 'static + + std::ops::AddAssign + + std::ops::MulAssign, Act: NeuraDerivable, Reg: NeuraDerivable, > NeuraTrainableLayer> for NeuraDenseLayer @@ -184,7 +186,10 @@ impl< } fn regularize_layer(&self) -> Self::Gradient { - (self.weights.map(|x| self.regularization.derivate(x)), DVector::zeros(self.bias.shape().0)) + ( + self.weights.map(|x| self.regularization.derivate(x)), + DVector::zeros(self.bias.shape().0), + ) } fn apply_gradient(&mut self, gradient: &Self::Gradient) { diff --git a/src/layer/mod.rs b/src/layer/mod.rs index 560d738..5f61dd0 100644 --- a/src/layer/mod.rs +++ b/src/layer/mod.rs @@ -1,5 +1,3 @@ -use num::Float; - use crate::algebra::NeuraVectorSpace; pub mod dense; @@ -7,8 +5,8 @@ pub use dense::NeuraDenseLayer; #[derive(Clone, Copy, PartialEq, Debug)] pub enum NeuraShape { - Vector(usize), // entries - Matrix(usize, usize), // rows, columns + Vector(usize), // entries + Matrix(usize, usize), // rows, columns Tensor(usize, usize, usize), // rows, columns, channels } @@ -17,7 +15,7 @@ impl NeuraShape { match self { NeuraShape::Vector(entries) => *entries, NeuraShape::Matrix(rows, columns) => rows * columns, - NeuraShape::Tensor(rows, columns, channels) => rows * columns * channels + NeuraShape::Tensor(rows, columns, channels) => rows * columns * channels, } } } @@ -31,6 +29,7 @@ pub trait NeuraLayer { impl NeuraLayer for () { type Output = Input; + #[inline(always)] fn eval(&self, input: &Input) -> Self::Output { input.clone() } @@ -62,11 +61,7 @@ pub trait NeuraTrainableLayer: NeuraLayer { /// The function should then return a pair `(epsilon_{l-1}, δW_l)`, /// with `epsilon_{l-1}` being multiplied by `f_{l-1}'(activation)` by the next layer to obtain `delta_{l-1}`. /// Using this intermediate value for `delta` allows us to isolate it computation to the respective layers. - fn backprop_layer( - &self, - input: &Input, - epsilon: Self::Output, - ) -> (Input, Self::Gradient); + fn backprop_layer(&self, input: &Input, epsilon: Self::Output) -> (Input, Self::Gradient); /// Computes the regularization fn regularize_layer(&self) -> Self::Gradient; @@ -80,10 +75,39 @@ pub trait NeuraTrainableLayer: NeuraLayer { fn prepare_layer(&mut self, is_training: bool) {} } +impl NeuraTrainableLayer for () { + type Gradient = (); + + #[inline(always)] + fn default_gradient(&self) -> Self::Gradient { + () + } + + #[inline(always)] + fn backprop_layer(&self, _input: &Input, epsilon: Self::Output) -> (Input, Self::Gradient) { + (epsilon, ()) + } + + #[inline(always)] + fn regularize_layer(&self) -> Self::Gradient { + () + } + + #[inline(always)] + fn apply_gradient(&mut self, _gradient: &Self::Gradient) { + // Noop + } +} + /// Temporary implementation of neura_layer #[macro_export] macro_rules! neura_layer { ( "dense", $output:expr, $activation:expr ) => { - $crate::layer::dense::NeuraDenseLayer::new_partial($output, rand::thread_rng(), $activation, $crate::derivable::regularize::NeuraL0) - } + $crate::layer::dense::NeuraDenseLayer::new_partial( + $output, + rand::thread_rng(), + $activation, + $crate::derivable::regularize::NeuraL0, + ) + }; } diff --git a/src/lib.rs b/src/lib.rs index 4b2d837..80b548d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,15 +1,12 @@ #![feature(generic_arg_infer)] -#![feature(generic_const_exprs)] -#![feature(negative_impls)] +// #![feature(generic_const_exprs)] pub mod algebra; pub mod derivable; -// pub mod layer; +pub mod layer; pub mod network; pub mod train; -pub mod layer; - mod utils; // TODO: move to a different file @@ -21,6 +18,8 @@ pub mod prelude { // Structs and traits pub use crate::layer::*; - pub use crate::network::sequential::{NeuraSequential, NeuraSequentialTail, NeuraSequentialBuild}; + pub use crate::network::sequential::{ + NeuraSequential, NeuraSequentialConstruct, NeuraSequentialTail, + }; pub use crate::train::{NeuraBackprop, NeuraBatchedTrainer}; } diff --git a/src/network/mod.rs b/src/network/mod.rs index 68bdcfa..d08ae3f 100644 --- a/src/network/mod.rs +++ b/src/network/mod.rs @@ -3,11 +3,11 @@ use crate::{algebra::NeuraVectorSpace, derivable::NeuraLoss, layer::NeuraLayer}; pub mod sequential; pub trait NeuraTrainableNetwork: NeuraLayer { - type Delta: NeuraVectorSpace; + type Gradient: NeuraVectorSpace; - fn default_gradient(&self) -> Self::Delta; + fn default_gradient(&self) -> Self::Gradient; - fn apply_gradient(&mut self, gradient: &Self::Delta); + fn apply_gradient(&mut self, gradient: &Self::Gradient); /// Should implement the backpropagation algorithm, see `NeuraTrainableLayer::backpropagate` for more information. fn backpropagate>( @@ -15,10 +15,10 @@ pub trait NeuraTrainableNetwork: NeuraLayer { input: &Input, target: &Loss::Target, loss: Loss, - ) -> (Input, Self::Delta); + ) -> (Input, Self::Gradient); /// Should return the regularization gradient - fn regularize(&self) -> Self::Delta; + fn regularize(&self) -> Self::Gradient; /// Called before an iteration begins, to allow the network to set itself up for training or not. fn prepare(&mut self, train_iteration: bool); diff --git a/src/network/sequential.rs b/src/network/sequential.rs deleted file mode 100644 index 0fcd876..0000000 --- a/src/network/sequential.rs +++ /dev/null @@ -1,298 +0,0 @@ -use num::Float; - -use crate::{ - derivable::NeuraLoss, - layer::{NeuraLayer, NeuraTrainableLayer, NeuraShape, NeuraPartialLayer}, -}; - -use super::NeuraTrainableNetwork; - -#[derive(Clone, Debug)] -pub struct NeuraSequential { - pub layer: Layer, - pub child_network: Box, -} - -/// Operations on the tail end of a sequential network -pub trait NeuraSequentialTail { - type TailTrimmed; - type TailPushed; - - fn trim_tail(self) -> Self::TailTrimmed; - fn push_tail(self, layer: T) -> Self::TailPushed; -} - -impl NeuraSequential { - pub fn new(layer: Layer, child_network: ChildNetwork) -> Self { - Self { - layer, - child_network: Box::new(child_network), - } - } - - pub fn new_match_output(layer: Layer, child_network: ChildNetwork) -> Self - where - Layer: NeuraLayer, - ChildNetwork: NeuraLayer, - { - Self::new(layer, child_network) - } - - pub fn trim_front(self) -> ChildNetwork { - *self.child_network - } - - pub fn push_front>(self, layer: T) -> NeuraSequential - where - Layer: NeuraLayer - { - NeuraSequential { - layer: layer, - child_network: Box::new(self), - } - } -} - -// Trimming the last layer returns an empty network -impl NeuraSequentialTail for NeuraSequential { - type TailTrimmed = (); - type TailPushed = NeuraSequential>; - - fn trim_tail(self) -> Self::TailTrimmed { - () - } - - fn push_tail(self, layer: T) -> Self::TailPushed { - NeuraSequential { - layer: self.layer, - child_network: Box::new(NeuraSequential { - layer, - child_network: Box::new(()), - }), - } - } -} - -// Trimming another layer returns a network which calls trim recursively -impl NeuraSequentialTail - for NeuraSequential -{ - type TailTrimmed = NeuraSequential::TailTrimmed>; - type TailPushed = - NeuraSequential::TailPushed>; - - fn trim_tail(self) -> Self::TailTrimmed { - NeuraSequential { - layer: self.layer, - child_network: Box::new(self.child_network.trim_tail()), - } - } - - fn push_tail(self, layer: T) -> Self::TailPushed { - NeuraSequential { - layer: self.layer, - child_network: Box::new(self.child_network.push_tail(layer)), - } - } -} - -impl, ChildNetwork: NeuraLayer> NeuraLayer - for NeuraSequential -{ - type Output = ChildNetwork::Output; - - fn eval(&self, input: &Input) -> Self::Output { - self.child_network.eval(&self.layer.eval(input)) - } -} - -impl NeuraTrainableNetwork for () { - type Delta = (); - - fn default_gradient(&self) -> () { - () - } - - fn apply_gradient(&mut self, _gradient: &()) { - // Noop - } - - fn backpropagate>( - &self, - final_activation: &Input, - target: &Loss::Target, - loss: Loss, - ) -> (Input, Self::Delta) { - let backprop_epsilon = loss.nabla(target, &final_activation); - - (backprop_epsilon, ()) - } - - fn regularize(&self) -> () { - () - } - - fn prepare(&mut self, _is_training: bool) { - // Noop - } -} - -impl, ChildNetwork: NeuraTrainableNetwork> - NeuraTrainableNetwork for NeuraSequential -{ - type Delta = (Layer::Gradient, Box); - - fn default_gradient(&self) -> Self::Delta { - (self.layer.default_gradient(), Box::new(self.child_network.default_gradient())) - } - - fn apply_gradient(&mut self, gradient: &Self::Delta) { - self.layer.apply_gradient(&gradient.0); - self.child_network.apply_gradient(&gradient.1); - } - - fn backpropagate>( - &self, - input: &Input, - target: &Loss::Target, - loss: Loss, - ) -> (Input, Self::Delta) { - let next_activation = self.layer.eval(input); - let (backprop_gradient, weights_gradient) = - self.child_network - .backpropagate(&next_activation, target, loss); - let (backprop_gradient, layer_gradient) = - self.layer.backprop_layer(input, backprop_gradient); - - ( - backprop_gradient, - (layer_gradient, Box::new(weights_gradient)), - ) - } - - fn regularize(&self) -> Self::Delta { - ( - self.layer.regularize_layer(), - Box::new(self.child_network.regularize()), - ) - } - - fn prepare(&mut self, is_training: bool) { - self.layer.prepare_layer(is_training); - self.child_network.prepare(is_training); - } -} - -impl From for NeuraSequential { - fn from(layer: Layer) -> Self { - Self { - layer, - child_network: Box::new(()), - } - } -} - -pub trait NeuraSequentialBuild { - type Constructed; - type Err; - - fn construct(self, input_shape: NeuraShape) -> Result; -} - -#[derive(Debug, Clone)] -pub enum NeuraSequentialBuildErr { - Current(Err), - Child(ChildErr), -} - -impl NeuraSequentialBuild for NeuraSequential { - type Constructed = NeuraSequential; - type Err = Layer::Err; - - fn construct(self, input_shape: NeuraShape) -> Result { - Ok(NeuraSequential { - layer: self.layer.construct(input_shape)?, - child_network: Box::new(()) - }) - } -} - -impl NeuraSequentialBuild for NeuraSequential { - type Constructed = NeuraSequential; - type Err = NeuraSequentialBuildErr; - - fn construct(self, input_shape: NeuraShape) -> Result { - let layer = self.layer.construct(input_shape).map_err(|e| NeuraSequentialBuildErr::Current(e))?; - - // TODO: ensure that this operation (and all recursive operations) are directly allocated on the heap - let child_network = self.child_network - .construct(Layer::output_shape(&layer)) - .map_err(|e| NeuraSequentialBuildErr::Child(e))?; - let child_network = Box::new(child_network); - - Ok(NeuraSequential { - layer, - child_network, - }) - } - - -} - -/// An utility to recursively create a NeuraSequential network, while writing it in a declarative and linear fashion. -/// Note that this can quickly create big and unwieldly types. -#[macro_export] -macro_rules! neura_sequential { - [] => { - () - }; - - [ $layer:expr $(,)? ] => { - $crate::network::sequential::NeuraSequential::from($layer) - }; - - [ $first:expr, $($rest:expr),+ $(,)? ] => { - $crate::network::sequential::NeuraSequential::new($first, neura_sequential![$($rest),+]) - }; -} - -#[cfg(test)] -mod test { - use nalgebra::dvector; - - use crate::{ - derivable::{activation::Relu, regularize::NeuraL0}, - layer::{NeuraDenseLayer, NeuraShape, NeuraLayer}, - neura_layer, - }; - - use super::NeuraSequentialBuild; - - #[test] - fn test_neura_network_macro() { - let mut rng = rand::thread_rng(); - - let _ = neura_sequential![ - NeuraDenseLayer::from_rng(8, 12, &mut rng, Relu, NeuraL0) as NeuraDenseLayer, - NeuraDenseLayer::from_rng(12, 16, &mut rng, Relu, NeuraL0) as NeuraDenseLayer, - NeuraDenseLayer::from_rng(16, 2, &mut rng, Relu, NeuraL0) as NeuraDenseLayer - ]; - - let _ = neura_sequential![ - NeuraDenseLayer::from_rng(2, 2, &mut rng, Relu, NeuraL0) as NeuraDenseLayer, - ]; - - let _ = neura_sequential![ - NeuraDenseLayer::from_rng(8, 16, &mut rng, Relu, NeuraL0) as NeuraDenseLayer, - NeuraDenseLayer::from_rng(16, 12, &mut rng, Relu, NeuraL0) as NeuraDenseLayer, - ]; - - let network = neura_sequential![ - neura_layer!("dense", 16, Relu), - neura_layer!("dense", 12, Relu), - neura_layer!("dense", 2, Relu) - ].construct(NeuraShape::Vector(2)).unwrap(); - - network.eval(&dvector![0.0f64, 0.0]); - } -} diff --git a/src/network/sequential/construct.rs b/src/network/sequential/construct.rs new file mode 100644 index 0000000..1e257e9 --- /dev/null +++ b/src/network/sequential/construct.rs @@ -0,0 +1,52 @@ +use super::*; + +pub trait NeuraSequentialConstruct { + type Constructed; + type Err; + + fn construct(self, input_shape: NeuraShape) -> Result; +} + +#[derive(Debug, Clone)] +pub enum NeuraSequentialConstructErr { + Current(Err), + Child(ChildErr), +} + +impl NeuraSequentialConstruct for NeuraSequential { + type Constructed = NeuraSequential; + type Err = Layer::Err; + + fn construct(self, input_shape: NeuraShape) -> Result { + Ok(NeuraSequential { + layer: self.layer.construct(input_shape)?, + child_network: Box::new(()), + }) + } +} + +impl NeuraSequentialConstruct + for NeuraSequential +{ + type Constructed = NeuraSequential; + type Err = NeuraSequentialConstructErr; + + fn construct(self, input_shape: NeuraShape) -> Result { + let layer = self + .layer + .construct(input_shape) + .map_err(|e| NeuraSequentialConstructErr::Current(e))?; + + // TODO: ensure that this operation (and all recursive operations) are directly allocated on the heap + let child_network = self + .child_network + .construct(Layer::output_shape(&layer)) + .map_err(|e| NeuraSequentialConstructErr::Child(e))?; + let child_network = Box::new(child_network); + + Ok(NeuraSequential { + layer, + child_network, + }) + } +} diff --git a/src/network/sequential/mod.rs b/src/network/sequential/mod.rs new file mode 100644 index 0000000..e6920ea --- /dev/null +++ b/src/network/sequential/mod.rs @@ -0,0 +1,287 @@ +use super::NeuraTrainableNetwork; +use crate::{ + derivable::NeuraLoss, + layer::{NeuraLayer, NeuraPartialLayer, NeuraShape, NeuraTrainableLayer}, +}; + +mod construct; +mod tail; + +pub use construct::*; +pub use tail::*; + +/// Chains a layer with the rest of a neural network, in a fashion similar to a cartesian product, +/// while preserving all type information. +/// The type `Layer` represents the current layer of the neural network, +/// and its output will be fed to the `ChildNetwork`, which will typically either be another `NeuraSequential` +/// instance or `()`. +/// +/// `ChildNetwork` is also free to be another implementation of `NeuraNetwork`, +/// which allows `NeuraSequential` to be used together with other network structures. +/// +/// `child_network` is stored in a `Box`, so as to avoid taking up too much space on the stack. +/// +/// ## Notes on implemented traits +/// +/// The different implementations for `NeuraTrainableNetwork`, +/// `NeuraLayer` and `NeuraTrainableLayer` each require that `ChildNetwork` implements those respective traits, +/// and that the output type of `Layer` matches the input type of `ChildNetwork`. +/// +/// If a method, like `eval`, is reported as missing, +/// then it likely means that the output type of `Layer` does not match the input type of `ChildNetwork`, +/// or that a similar issue arose within `ChildNetwork`. +/// +/// ## Trimming and appending layers +/// +/// If you want to modify the network structure, you can do so by using the `trim_front`, `trim_tail`, +/// `push_front` and `push_tail` methods. +/// +/// The operations on the front are trivial, as it simply involves wrapping the current instance in a new `NeuraSequential` +/// instance. +/// +/// The operations on the tail end are more complex, and require recursively traversing the `NeuraSequential` structure, +/// until an instance of `NeuraSequential` is found. +/// If your network feeds into a type that does not implement `NeuraSequentialTail`, then you will not be able to use those operations. +#[derive(Clone, Debug)] +pub struct NeuraSequential { + pub layer: Layer, + pub child_network: Box, +} + +impl NeuraSequential { + pub fn new(layer: Layer, child_network: ChildNetwork) -> Self { + Self { + layer, + child_network: Box::new(child_network), + } + } + + pub fn trim_front(self) -> ChildNetwork { + *self.child_network + } + + pub fn push_front>( + self, + layer: T, + ) -> NeuraSequential + where + Layer: NeuraLayer, + { + NeuraSequential { + layer: layer, + child_network: Box::new(self), + } + } +} + +impl, ChildNetwork: NeuraLayer> NeuraLayer + for NeuraSequential +{ + type Output = ChildNetwork::Output; + + fn eval(&self, input: &Input) -> Self::Output { + self.child_network.eval(&self.layer.eval(input)) + } +} + +impl< + Input, + Layer: NeuraTrainableLayer, + ChildNetwork: NeuraTrainableLayer, + > NeuraTrainableLayer for NeuraSequential +{ + type Gradient = (Layer::Gradient, Box); + + fn default_gradient(&self) -> Self::Gradient { + ( + self.layer.default_gradient(), + Box::new(self.child_network.default_gradient()), + ) + } + + fn backprop_layer( + &self, + input: &Input, + incoming_epsilon: Self::Output, + ) -> (Input, Self::Gradient) { + let output = self.layer.eval(input); + let (transient_epsilon, child_gradient) = + self.child_network.backprop_layer(&output, incoming_epsilon); + let (outgoing_epsilon, layer_gradient) = + self.layer.backprop_layer(input, transient_epsilon); + + (outgoing_epsilon, (layer_gradient, Box::new(child_gradient))) + } + + fn regularize_layer(&self) -> Self::Gradient { + ( + self.layer.regularize_layer(), + Box::new(self.child_network.regularize_layer()), + ) + } + + fn apply_gradient(&mut self, gradient: &Self::Gradient) { + self.layer.apply_gradient(&gradient.0); + self.child_network.apply_gradient(&gradient.1); + } +} + +impl< + Input, + Layer: NeuraTrainableLayer, + ChildNetwork: NeuraTrainableNetwork, + > NeuraTrainableNetwork for NeuraSequential +{ + type Gradient = (Layer::Gradient, Box); + + fn default_gradient(&self) -> Self::Gradient { + ( + self.layer.default_gradient(), + Box::new(self.child_network.default_gradient()), + ) + } + + fn apply_gradient(&mut self, gradient: &Self::Gradient) { + self.layer.apply_gradient(&gradient.0); + self.child_network.apply_gradient(&gradient.1); + } + + fn backpropagate>( + &self, + input: &Input, + target: &Loss::Target, + loss: Loss, + ) -> (Input, Self::Gradient) { + let next_activation = self.layer.eval(input); + let (backprop_gradient, weights_gradient) = + self.child_network + .backpropagate(&next_activation, target, loss); + let (backprop_gradient, layer_gradient) = + self.layer.backprop_layer(input, backprop_gradient); + + ( + backprop_gradient, + (layer_gradient, Box::new(weights_gradient)), + ) + } + + fn regularize(&self) -> Self::Gradient { + ( + self.layer.regularize_layer(), + Box::new(self.child_network.regularize()), + ) + } + + fn prepare(&mut self, is_training: bool) { + self.layer.prepare_layer(is_training); + self.child_network.prepare(is_training); + } +} + +/// A dummy implementation of `NeuraTrainableNetwork`, which simply calls `loss.eval` in `backpropagate`. +impl NeuraTrainableNetwork for () { + type Gradient = (); + + #[inline(always)] + fn default_gradient(&self) -> () { + () + } + + #[inline(always)] + fn apply_gradient(&mut self, _gradient: &()) { + // Noop + } + + #[inline(always)] + fn backpropagate>( + &self, + final_activation: &Input, + target: &Loss::Target, + loss: Loss, + ) -> (Input, Self::Gradient) { + let backprop_epsilon = loss.nabla(target, &final_activation); + + (backprop_epsilon, ()) + } + + #[inline(always)] + fn regularize(&self) -> () { + () + } + + #[inline(always)] + fn prepare(&mut self, _is_training: bool) { + // Noop + } +} + +impl From for NeuraSequential { + fn from(layer: Layer) -> Self { + Self { + layer, + child_network: Box::new(()), + } + } +} + +/// An utility to recursively create a NeuraSequential network, while writing it in a declarative and linear fashion. +/// Note that this can quickly create big and unwieldly types. +#[macro_export] +macro_rules! neura_sequential { + [] => { + () + }; + + [ $layer:expr $(,)? ] => { + $crate::network::sequential::NeuraSequential::from($layer) + }; + + [ $first:expr, $($rest:expr),+ $(,)? ] => { + $crate::network::sequential::NeuraSequential::new($first, neura_sequential![$($rest),+]) + }; +} + +#[cfg(test)] +mod test { + use nalgebra::dvector; + + use crate::{ + derivable::{activation::Relu, regularize::NeuraL0}, + layer::{NeuraDenseLayer, NeuraLayer, NeuraShape}, + neura_layer, + }; + + use super::NeuraSequentialConstruct; + + #[test] + fn test_neura_network_macro() { + let mut rng = rand::thread_rng(); + + let _ = neura_sequential![ + NeuraDenseLayer::from_rng(8, 12, &mut rng, Relu, NeuraL0) as NeuraDenseLayer, + NeuraDenseLayer::from_rng(12, 16, &mut rng, Relu, NeuraL0) + as NeuraDenseLayer, + NeuraDenseLayer::from_rng(16, 2, &mut rng, Relu, NeuraL0) as NeuraDenseLayer + ]; + + let _ = + neura_sequential![NeuraDenseLayer::from_rng(2, 2, &mut rng, Relu, NeuraL0) + as NeuraDenseLayer,]; + + let _ = neura_sequential![ + NeuraDenseLayer::from_rng(8, 16, &mut rng, Relu, NeuraL0) as NeuraDenseLayer, + NeuraDenseLayer::from_rng(16, 12, &mut rng, Relu, NeuraL0) + as NeuraDenseLayer, + ]; + + let network = neura_sequential![ + neura_layer!("dense", 16, Relu), + neura_layer!("dense", 12, Relu), + neura_layer!("dense", 2, Relu) + ] + .construct(NeuraShape::Vector(2)) + .unwrap(); + + network.eval(&dvector![0.0f64, 0.0]); + } +} diff --git a/src/network/sequential/tail.rs b/src/network/sequential/tail.rs new file mode 100644 index 0000000..dd6e9e2 --- /dev/null +++ b/src/network/sequential/tail.rs @@ -0,0 +1,54 @@ +use super::*; + +/// Operations on the tail end of a sequential network +pub trait NeuraSequentialTail { + type TailTrimmed; + type TailPushed; + + fn trim_tail(self) -> Self::TailTrimmed; + fn push_tail(self, layer: T) -> Self::TailPushed; +} + +// Trimming the last layer returns an empty network +impl NeuraSequentialTail for NeuraSequential { + type TailTrimmed = (); + // GAT :3 + type TailPushed = NeuraSequential>; + + fn trim_tail(self) -> Self::TailTrimmed { + () + } + + fn push_tail(self, layer: T) -> Self::TailPushed { + NeuraSequential { + layer: self.layer, + child_network: Box::new(NeuraSequential { + layer, + child_network: Box::new(()), + }), + } + } +} + +// Trimming another layer returns a network which calls trim recursively +impl NeuraSequentialTail + for NeuraSequential +{ + type TailTrimmed = NeuraSequential::TailTrimmed>; + type TailPushed = + NeuraSequential::TailPushed>; + + fn trim_tail(self) -> Self::TailTrimmed { + NeuraSequential { + layer: self.layer, + child_network: Box::new(self.child_network.trim_tail()), + } + } + + fn push_tail(self, layer: T) -> Self::TailPushed { + NeuraSequential { + layer: self.layer, + child_network: Box::new(self.child_network.push_tail(layer)), + } + } +} diff --git a/src/train.rs b/src/train.rs index 78d5e63..4eede29 100644 --- a/src/train.rs +++ b/src/train.rs @@ -1,9 +1,4 @@ -use crate::{ - algebra::{NeuraVector, NeuraVectorSpace}, - derivable::NeuraLoss, - layer::NeuraLayer, - network::{sequential::NeuraSequential, NeuraTrainableNetwork}, -}; +use crate::{algebra::NeuraVectorSpace, derivable::NeuraLoss, network::NeuraTrainableNetwork}; pub trait NeuraGradientSolver> { fn get_gradient( @@ -11,14 +6,9 @@ pub trait NeuraGradientSolver Trainable::Delta; + ) -> Trainable::Gradient; - fn score( - &self, - trainable: &Trainable, - input: &Input, - target: &Target, - ) -> f64; + fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64; } #[non_exhaustive] @@ -32,24 +22,23 @@ impl NeuraBackprop { } } -impl, Loss: NeuraLoss + Clone> - NeuraGradientSolver for NeuraBackprop +impl< + Input, + Target, + Trainable: NeuraTrainableNetwork, + Loss: NeuraLoss + Clone, + > NeuraGradientSolver for NeuraBackprop { fn get_gradient( &self, trainable: &Trainable, input: &Input, target: &Target, - ) -> Trainable::Delta { + ) -> Trainable::Gradient { trainable.backpropagate(input, target, self.loss.clone()).1 } - fn score( - &self, - trainable: &Trainable, - input: &Input, - target: &Target, - ) -> f64 { + fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64 { let output = trainable.eval(&input); self.loss.eval(target, &output) } @@ -187,14 +176,14 @@ impl NeuraBatchedTrainer { #[cfg(test)] mod test { - use nalgebra::{DMatrix, dmatrix, dvector}; + use nalgebra::{dmatrix, dvector}; use super::*; use crate::{ assert_approx, derivable::{activation::Linear, loss::Euclidean, regularize::NeuraL0}, - layer::NeuraDenseLayer, - network::sequential::NeuraSequentialTail, + layer::{NeuraLayer, NeuraDenseLayer}, + network::sequential::{NeuraSequentialTail, NeuraSequential}, neura_sequential, }; @@ -242,18 +231,14 @@ mod test { assert_approx!(0.48, intermediary[1], EPSILON); assert_approx!(0.191, network.eval(&input)[0], EPSILON); - assert_approx!( - 0.327, - Euclidean.eval(&target, &network.eval(&input)), - 0.001 - ); + assert_approx!(0.327, Euclidean.eval(&target, &network.eval(&input)), 0.001); let delta = network.eval(&input)[0] - target[0]; let (gradient_first, gradient_second) = NeuraBackprop::new(Euclidean).get_gradient(&network, &input, &target); let gradient_first = gradient_first.0; - let gradient_second = gradient_second.0.0; + let gradient_second = gradient_second.0 .0; assert_approx!(gradient_second[0], intermediary[0] * delta, EPSILON); assert_approx!(gradient_second[1], intermediary[1] * delta, EPSILON);