🔥 WIP implementation of NeuraNetwork for NeuraSequential

main
Shad Amethyst 2 years ago
parent 1f007bc986
commit ee4b57b00c

@ -17,20 +17,23 @@ impl<Loss> NeuraBackprop<Loss> {
impl< impl<
Input, Input,
Target, Target,
Trainable: NeuraOldTrainableNetworkBase<Input>, Trainable: NeuraTrainableLayerBase + NeuraLayer<Input> + NeuraNetworkRec,
Loss: NeuraLoss<Trainable::Output, Target = Target> + Clone, Loss: NeuraLoss<Trainable::Output, Target = Target> + Clone,
> NeuraGradientSolver<Input, Target, Trainable> for NeuraBackprop<Loss> > NeuraGradientSolver<Input, Target, Trainable> for NeuraBackprop<Loss>
where where
<Loss as NeuraLoss<Trainable::Output>>::Output: ToPrimitive, <Loss as NeuraLoss<Trainable::Output>>::Output: ToPrimitive,
Trainable: for<'a> NeuraOldTrainableNetwork<Input, (&'a NeuraBackprop<Loss>, &'a Target)>, // Trainable: NeuraOldTrainableNetworkBase<Input, Gradient = <Trainable as NeuraTrainableLayerBase>::Gradient>,
// Trainable: for<'a> NeuraOldTrainableNetwork<Input, (&'a NeuraBackprop<Loss>, &'a Target)>,
for<'a> (&'a NeuraBackprop<Loss>, &'a Target): BackpropRecurse<Input, Trainable, <Trainable as NeuraTrainableLayerBase>::Gradient>
{ {
fn get_gradient( fn get_gradient(
&self, &self,
trainable: &Trainable, trainable: &Trainable,
input: &Input, input: &Input,
target: &Target, target: &Target,
) -> Trainable::Gradient { ) -> <Trainable as NeuraTrainableLayerBase>::Gradient {
let (_, gradient) = trainable.traverse(input, &(self, target)); let (_, gradient) = (self, target).recurse(trainable, input);
// let (_, gradient) = trainable.traverse(input, &(self, target));
gradient gradient
} }
@ -119,14 +122,24 @@ where
Network::NextNode: NeuraTrainableLayerEval<Network::NodeOutput>, Network::NextNode: NeuraTrainableLayerEval<Network::NodeOutput>,
{ {
fn recurse(&self, network: &Network, input: &Input) -> (Input, Network::Gradient) { fn recurse(&self, network: &Network, input: &Input) -> (Input, Network::Gradient) {
let layer = network.get_layer();
// Get layer output
let layer_input = network.map_input(input); let layer_input = network.map_input(input);
let (layer_output, layer_intermediary) = let (layer_output, layer_intermediary) = layer.eval_training(layer_input.as_ref());
network.get_layer().eval_training(layer_input.as_ref());
let output = network.map_output(input, &layer_output); let output = network.map_output(input, &layer_output);
// Recurse
let (epsilon_in, gradient_rec) = self.recurse(network.get_next(), output.as_ref()); let (epsilon_in, gradient_rec) = self.recurse(network.get_next(), output.as_ref());
todo!() // Get layer outgoing gradient vector
let layer_epsilon_in = network.map_gradient_in(input, &epsilon_in);
let layer_epsilon_out = layer.backprop_layer(&layer_input, &layer_intermediary, &layer_epsilon_in);
let epsilon_out = network.map_gradient_out(input, &epsilon_in, &layer_epsilon_out);
// Get layer parameter gradient
let gradient = layer.get_gradient(&layer_input, &layer_intermediary, &layer_epsilon_in);
(epsilon_out.into_owned(), network.merge_gradient(gradient_rec, gradient))
} }
} }

@ -30,17 +30,18 @@ impl<
F, F,
Act: Clone + NeuraDerivable<f64>, Act: Clone + NeuraDerivable<f64>,
Input, Input,
Trainable: NeuraOldTrainableNetwork<Input, NeuraForwardPair<Act>, Output = DVector<F>>, Trainable: NeuraTrainableLayerBase,
> NeuraGradientSolver<Input, bool, Trainable> for NeuraForwardForward<Act> > NeuraGradientSolver<Input, bool, Trainable> for NeuraForwardForward<Act>
where where
F: ToPrimitive, F: ToPrimitive,
Trainable: NeuraOldTrainableNetwork<Input, NeuraForwardPair<Act>, Output = DVector<F>, Gradient = <Trainable as NeuraTrainableLayerBase>::Gradient>
{ {
fn get_gradient( fn get_gradient(
&self, &self,
trainable: &Trainable, trainable: &Trainable,
input: &Input, input: &Input,
target: &bool, target: &bool,
) -> Trainable::Gradient { ) -> <Trainable as NeuraTrainableLayerBase>::Gradient {
let target = *target; let target = *target;
trainable.traverse( trainable.traverse(

@ -37,7 +37,7 @@ pub trait NeuraGradientSolverTransient<Input, Layer: NeuraTrainableLayerEval<Inp
) -> Self::Output<To, Gradient>; ) -> Self::Output<To, Gradient>;
} }
pub trait NeuraGradientSolver<Input, Target, Trainable: NeuraOldTrainableNetworkBase<Input>> { pub trait NeuraGradientSolver<Input, Target, Trainable: NeuraTrainableLayerBase> {
fn get_gradient( fn get_gradient(
&self, &self,
trainable: &Trainable, trainable: &Trainable,

@ -1,4 +1,4 @@
use super::{NeuraOldTrainableNetwork, NeuraOldTrainableNetworkBase}; use super::*;
use crate::{ use crate::{
gradient_solver::NeuraGradientSolverTransient, gradient_solver::NeuraGradientSolverTransient,
layer::{ layer::{
@ -177,6 +177,30 @@ impl<Layer> From<Layer> for NeuraSequential<Layer, ()> {
} }
} }
impl<Layer, ChildNetwork> NeuraNetworkBase for NeuraSequential<Layer, ChildNetwork> {
type Layer = Layer;
fn get_layer(&self) -> &Self::Layer {
&self.layer
}
}
impl<Layer: NeuraTrainableLayerBase, ChildNetwork: NeuraTrainableLayerBase> NeuraNetworkRec for NeuraSequential<Layer, ChildNetwork> {
type NextNode = ChildNetwork;
fn get_next(&self) -> &Self::NextNode {
&self.child_network
}
fn merge_gradient(
&self,
rec_gradient: <Self::NextNode as NeuraTrainableLayerBase>::Gradient,
layer_gradient: <Self::Layer as NeuraTrainableLayerBase>::Gradient
) -> Self::Gradient {
(rec_gradient, Box::new(layer_gradient))
}
}
/// An utility to recursively create a NeuraSequential network, while writing it in a declarative and linear fashion. /// An utility to recursively create a NeuraSequential network, while writing it in a declarative and linear fashion.
/// Note that this can quickly create big and unwieldly types. /// Note that this can quickly create big and unwieldly types.
#[macro_export] #[macro_export]

@ -1,5 +1,7 @@
use std::borrow::Cow; use std::borrow::Cow;
use crate::prelude::NeuraTrainableLayerBase;
use super::*; use super::*;
/// This trait has to be non-generic, to ensure that no downstream crate can implement it for foreign types, /// This trait has to be non-generic, to ensure that no downstream crate can implement it for foreign types,
@ -46,10 +48,17 @@ where
) -> Cow<'a, NodeInput>; ) -> Cow<'a, NodeInput>;
} }
pub trait NeuraNetworkRec: NeuraNetworkBase { pub trait NeuraNetworkRec: NeuraNetworkBase + NeuraTrainableLayerBase {
/// The type of the children network, it does not need to implement `NeuraNetworkBase`, /// The type of the children network, it does not need to implement `NeuraNetworkBase`,
/// although many functions will expect it to be either `()` or an implementation of `NeuraNetworkRec`. /// although many functions will expect it to be either `()` or an implementation of `NeuraNetworkRec`.
type NextNode; type NextNode: NeuraTrainableLayerBase;
fn get_next(&self) -> &Self::NextNode; fn get_next(&self) -> &Self::NextNode;
fn merge_gradient(
&self,
rec_gradient: <Self::NextNode as NeuraTrainableLayerBase>::Gradient,
layer_gradient: <Self::Layer as NeuraTrainableLayerBase>::Gradient
) -> Self::Gradient
where Self::Layer: NeuraTrainableLayerBase;
} }

Loading…
Cancel
Save