Create NeuraNetwork traits, WIP

main
Shad Amethyst 2 years ago
parent 060b801ad6
commit d82cab788b

@ -1,8 +1,8 @@
use num::ToPrimitive; use num::ToPrimitive;
use crate::{ use crate::{
derivable::NeuraLoss, layer::NeuraTrainableLayerBackprop, layer::NeuraTrainableLayerSelf, derivable::NeuraLoss, layer::*,
network::NeuraOldTrainableNetworkBase, network::*,
}; };
use super::*; use super::*;
@ -91,9 +91,46 @@ impl<
} }
} }
trait BackpropRecurse<Input, Network, Gradient> {
fn recurse(&self, network: &Network, input: &Input) -> (Input, Gradient);
}
impl<Input, Loss: NeuraLoss<Input>> BackpropRecurse<Input, (), ()> for (&NeuraBackprop<Loss>, &Loss::Target) {
fn recurse(&self, _network: &(), input: &Input) -> (Input, ()) {
(self.0.loss.nabla(self.1, input), ())
}
}
impl<
Input: Clone,
Network: NeuraNetworkRec + NeuraNetwork<Input> + NeuraTrainableLayerBase<Input>,
Loss,
Target
> BackpropRecurse<Input, Network, Network::Gradient> for (&NeuraBackprop<Loss>, &Target)
where
// Verify that we can traverse recursively
for<'a> (&'a NeuraBackprop<Loss>, &'a Target): BackpropRecurse<Network::NodeOutput, Network::NextNode, <Network::NextNode as NeuraTrainableLayerBase<Network::NodeOutput>>::Gradient>,
// Verify that the current layer implements the right traits
Network::Layer: NeuraTrainableLayerSelf<Network::LayerInput> + NeuraTrainableLayerBackprop<Network::LayerInput>,
// Verify that the layer output can be cloned
<Network::Layer as NeuraLayer<Network::LayerInput>>::Output: Clone,
Network::NextNode: NeuraTrainableLayerBase<Network::NodeOutput>,
{
fn recurse(&self, network: &Network, input: &Input) -> (Input, Network::Gradient) {
let layer_input = network.map_input(input);
let (layer_output, layer_intermediary) = network.get_layer().eval_training(layer_input.as_ref());
let output = network.map_output(input, &layer_output);
let (epsilon_in, gradient_rec) = self.recurse(network.get_next(), output.as_ref());
todo!()
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use approx::assert_relative_eq; use approx::assert_relative_eq;
use nalgebra::dvector;
use super::*; use super::*;
use crate::{ use crate::{
@ -161,4 +198,12 @@ mod test {
assert_relative_eq!(gradient1_actual.as_slice(), gradient1_expected.as_slice()); assert_relative_eq!(gradient1_actual.as_slice(), gradient1_expected.as_slice());
} }
} }
#[test]
fn test_recursive() {
let backprop = NeuraBackprop::new(Euclidean);
let target = dvector![0.0];
(&backprop, &target).recurse(&(), &dvector![0.0]);
}
} }

@ -71,6 +71,7 @@ pub trait NeuraTrainableLayerBase<Input>: NeuraLayer<Input> {
/// Applies `δW_l` to the weights of the layer /// Applies `δW_l` to the weights of the layer
fn apply_gradient(&mut self, gradient: &Self::Gradient); fn apply_gradient(&mut self, gradient: &Self::Gradient);
// TODO: move this into another trait
fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr); fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr);
/// Arbitrary computation that can be executed at the start of an epoch /// Arbitrary computation that can be executed at the start of an epoch

@ -5,6 +5,9 @@ use crate::{
// pub mod residual; // pub mod residual;
pub mod sequential; pub mod sequential;
mod traits;
pub use traits::*;
// TODO: extract regularize from this, so that we can drop the trait constraints on NeuraSequential's impl // TODO: extract regularize from this, so that we can drop the trait constraints on NeuraSequential's impl
pub trait NeuraOldTrainableNetworkBase<Input>: NeuraLayer<Input> { pub trait NeuraOldTrainableNetworkBase<Input>: NeuraLayer<Input> {
type Gradient: NeuraVectorSpace; type Gradient: NeuraVectorSpace;

@ -0,0 +1,44 @@
use std::borrow::Cow;
use super::*;
/// This trait has to be non-generic, to ensure that no downstream crate can implement it for foreign types,
/// as that would otherwise cause infinite recursion when dealing with `NeuraNetworkRec`.
pub trait NeuraNetworkBase {
/// The type of the enclosed layer
type Layer;
fn get_layer(&self) -> &Self::Layer;
}
pub trait NeuraNetwork<NodeInput: Clone>: NeuraNetworkBase
where
Self::Layer: NeuraLayer<Self::LayerInput>,
<Self::Layer as NeuraLayer<Self::LayerInput>>::Output: Clone
{
/// The type of the input to `Self::Layer`
type LayerInput: Clone;
/// The type of the output of this node
type NodeOutput: Clone;
/// Maps the input of network node to the enclosed layer
fn map_input<'a>(&'_ self, input: &'a NodeInput) -> Cow<'a, Self::LayerInput>;
/// Maps the output of the enclosed layer to the output of the network node
fn map_output<'a>(&'_ self, input: &'_ NodeInput, layer_output: &'a <Self::Layer as NeuraLayer<Self::LayerInput>>::Output) -> Cow<'a, Self::NodeOutput>;
/// Maps a gradient in the format of the node's output into the format of the enclosed layer's output
fn map_gradient_in<'a>(&'_ self, input: &'_ NodeInput, gradient_in: &'a Self::NodeOutput) -> Cow<'a, <Self::Layer as NeuraLayer<Self::LayerInput>>::Output>;
/// Maps a gradient in the format of the enclosed layer's input into the format of the node's input
fn map_gradient_out<'a>(&'_ self, input: &'_ NodeInput, gradient_in: &'_ Self::NodeOutput, gradient_out: &'a Self::LayerInput) -> Cow<'a, NodeInput>;
}
pub trait NeuraNetworkRec: NeuraNetworkBase {
/// The type of the children network, it does not need to implement `NeuraNetworkBase`,
/// although many functions will expect it to be either `()` or an implementation of `NeuraNetworkRec`.
type NextNode;
fn get_next(&self) -> &Self::NextNode;
}
Loading…
Cancel
Save