♻️ Split NeuraTrainableLayerBase into ~ and NeuraTrainableLayerEval

main
Shad Amethyst 2 years ago
parent d82cab788b
commit 1f007bc986

@ -1,9 +1,6 @@
use num::ToPrimitive; use num::ToPrimitive;
use crate::{ use crate::{derivable::NeuraLoss, layer::*, network::*};
derivable::NeuraLoss, layer::*,
network::*,
};
use super::*; use super::*;
@ -83,11 +80,9 @@ impl<
fn map_epsilon<From, To, Gradient, Cb: Fn(From) -> To>( fn map_epsilon<From, To, Gradient, Cb: Fn(From) -> To>(
&self, &self,
rec_opt_output: Self::Output<From, Gradient>, rec_opt_output: Self::Output<From, Gradient>,
callback: Cb callback: Cb,
) -> Self::Output<To, Gradient> { ) -> Self::Output<To, Gradient> {
( (callback(rec_opt_output.0), rec_opt_output.1)
callback(rec_opt_output.0), rec_opt_output.1
)
} }
} }
@ -95,30 +90,38 @@ trait BackpropRecurse<Input, Network, Gradient> {
fn recurse(&self, network: &Network, input: &Input) -> (Input, Gradient); fn recurse(&self, network: &Network, input: &Input) -> (Input, Gradient);
} }
impl<Input, Loss: NeuraLoss<Input>> BackpropRecurse<Input, (), ()> for (&NeuraBackprop<Loss>, &Loss::Target) { impl<Input, Loss: NeuraLoss<Input>> BackpropRecurse<Input, (), ()>
for (&NeuraBackprop<Loss>, &Loss::Target)
{
fn recurse(&self, _network: &(), input: &Input) -> (Input, ()) { fn recurse(&self, _network: &(), input: &Input) -> (Input, ()) {
(self.0.loss.nabla(self.1, input), ()) (self.0.loss.nabla(self.1, input), ())
} }
} }
impl< impl<
Input: Clone, Input: Clone,
Network: NeuraNetworkRec + NeuraNetwork<Input> + NeuraTrainableLayerBase<Input>, Network: NeuraNetworkRec + NeuraNetwork<Input> + NeuraTrainableLayerEval<Input>,
Loss, Loss,
Target Target,
> BackpropRecurse<Input, Network, Network::Gradient> for (&NeuraBackprop<Loss>, &Target) > BackpropRecurse<Input, Network, Network::Gradient> for (&NeuraBackprop<Loss>, &Target)
where where
// Verify that we can traverse recursively // Verify that we can traverse recursively
for<'a> (&'a NeuraBackprop<Loss>, &'a Target): BackpropRecurse<Network::NodeOutput, Network::NextNode, <Network::NextNode as NeuraTrainableLayerBase<Network::NodeOutput>>::Gradient>, for<'a> (&'a NeuraBackprop<Loss>, &'a Target): BackpropRecurse<
Network::NodeOutput,
Network::NextNode,
<Network::NextNode as NeuraTrainableLayerBase>::Gradient,
>,
// Verify that the current layer implements the right traits // Verify that the current layer implements the right traits
Network::Layer: NeuraTrainableLayerSelf<Network::LayerInput> + NeuraTrainableLayerBackprop<Network::LayerInput>, Network::Layer: NeuraTrainableLayerSelf<Network::LayerInput>
+ NeuraTrainableLayerBackprop<Network::LayerInput>,
// Verify that the layer output can be cloned // Verify that the layer output can be cloned
<Network::Layer as NeuraLayer<Network::LayerInput>>::Output: Clone, <Network::Layer as NeuraLayer<Network::LayerInput>>::Output: Clone,
Network::NextNode: NeuraTrainableLayerBase<Network::NodeOutput>, Network::NextNode: NeuraTrainableLayerEval<Network::NodeOutput>,
{ {
fn recurse(&self, network: &Network, input: &Input) -> (Input, Network::Gradient) { fn recurse(&self, network: &Network, input: &Input) -> (Input, Network::Gradient) {
let layer_input = network.map_input(input); let layer_input = network.map_input(input);
let (layer_output, layer_intermediary) = network.get_layer().eval_training(layer_input.as_ref()); let (layer_output, layer_intermediary) =
network.get_layer().eval_training(layer_input.as_ref());
let output = network.map_output(input, &layer_output); let output = network.map_output(input, &layer_output);
let (epsilon_in, gradient_rec) = self.recurse(network.get_next(), output.as_ref()); let (epsilon_in, gradient_rec) = self.recurse(network.get_next(), output.as_ref());

@ -138,7 +138,7 @@ impl<
fn map_epsilon<From, To, Gradient, Cb: Fn(From) -> To>( fn map_epsilon<From, To, Gradient, Cb: Fn(From) -> To>(
&self, &self,
rec_opt_output: Self::Output<From, Gradient>, rec_opt_output: Self::Output<From, Gradient>,
_callback: Cb _callback: Cb,
) -> Self::Output<To, Gradient> { ) -> Self::Output<To, Gradient> {
rec_opt_output rec_opt_output
} }

@ -5,7 +5,7 @@ mod forward_forward;
pub use forward_forward::NeuraForwardForward; pub use forward_forward::NeuraForwardForward;
use crate::{ use crate::{
layer::NeuraTrainableLayerBase, layer::{NeuraTrainableLayerBase, NeuraTrainableLayerEval},
network::{NeuraOldTrainableNetwork, NeuraOldTrainableNetworkBase}, network::{NeuraOldTrainableNetwork, NeuraOldTrainableNetworkBase},
}; };
@ -17,7 +17,7 @@ pub trait NeuraGradientSolverFinal<LayerOutput>: NeuraGradientSolverBase {
fn eval_final(&self, output: LayerOutput) -> Self::Output<LayerOutput, ()>; fn eval_final(&self, output: LayerOutput) -> Self::Output<LayerOutput, ()>;
} }
pub trait NeuraGradientSolverTransient<Input, Layer: NeuraTrainableLayerBase<Input>>: pub trait NeuraGradientSolverTransient<Input, Layer: NeuraTrainableLayerEval<Input>>:
NeuraGradientSolverBase NeuraGradientSolverBase
{ {
fn eval_layer<NetworkGradient, RecGradient>( fn eval_layer<NetworkGradient, RecGradient>(
@ -33,7 +33,7 @@ pub trait NeuraGradientSolverTransient<Input, Layer: NeuraTrainableLayerBase<Inp
fn map_epsilon<From, To, Gradient, Cb: Fn(From) -> To>( fn map_epsilon<From, To, Gradient, Cb: Fn(From) -> To>(
&self, &self,
rec_opt_output: Self::Output<From, Gradient>, rec_opt_output: Self::Output<From, Gradient>,
callback: Cb callback: Cb,
) -> Self::Output<To, Gradient>; ) -> Self::Output<To, Gradient>;
} }

@ -175,17 +175,9 @@ impl<
F: Float + std::fmt::Debug + 'static + std::ops::AddAssign + std::ops::MulAssign, F: Float + std::fmt::Debug + 'static + std::ops::AddAssign + std::ops::MulAssign,
Act: NeuraDerivable<F>, Act: NeuraDerivable<F>,
Reg: NeuraDerivable<F>, Reg: NeuraDerivable<F>,
> NeuraTrainableLayerBase<DVector<F>> for NeuraDenseLayer<F, Act, Reg> > NeuraTrainableLayerBase for NeuraDenseLayer<F, Act, Reg>
{ {
type Gradient = (DMatrix<F>, DVector<F>); type Gradient = (DMatrix<F>, DVector<F>);
type IntermediaryRepr = DVector<F>; // pre-activation values
fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) {
let evaluated = &self.weights * input + &self.bias;
let output = evaluated.map(|x| self.activation.eval(x));
(output, evaluated)
}
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
( (
@ -200,6 +192,22 @@ impl<
} }
} }
impl<
F: Float + std::fmt::Debug + 'static + std::ops::AddAssign + std::ops::MulAssign,
Act: NeuraDerivable<F>,
Reg: NeuraDerivable<F>,
> NeuraTrainableLayerEval<DVector<F>> for NeuraDenseLayer<F, Act, Reg>
{
type IntermediaryRepr = DVector<F>; // pre-activation values
fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) {
let evaluated = &self.weights * input + &self.bias;
let output = evaluated.map(|x| self.activation.eval(x));
(output, evaluated)
}
}
impl< impl<
F: Float + std::fmt::Debug + 'static + std::ops::AddAssign + std::ops::MulAssign, F: Float + std::fmt::Debug + 'static + std::ops::AddAssign + std::ops::MulAssign,
Act: NeuraDerivable<F>, Act: NeuraDerivable<F>,

@ -63,13 +63,8 @@ impl<R: Rng, F: Float> NeuraLayer<DVector<F>> for NeuraDropoutLayer<R> {
} }
} }
impl<R: Rng, F: Float> NeuraTrainableLayerBase<DVector<F>> for NeuraDropoutLayer<R> { impl<R: Rng> NeuraTrainableLayerBase for NeuraDropoutLayer<R> {
type Gradient = (); type Gradient = ();
type IntermediaryRepr = ();
fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) {
(self.eval(input), ())
}
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
() ()
@ -103,6 +98,14 @@ impl<R: Rng, F: Float> NeuraTrainableLayerBase<DVector<F>> for NeuraDropoutLayer
} }
} }
impl<R: Rng, F: Float> NeuraTrainableLayerEval<DVector<F>> for NeuraDropoutLayer<R> {
type IntermediaryRepr = ();
fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) {
(self.eval(input), ())
}
}
impl<R: Rng, F: Float> NeuraTrainableLayerSelf<DVector<F>> for NeuraDropoutLayer<R> { impl<R: Rng, F: Float> NeuraTrainableLayerSelf<DVector<F>> for NeuraDropoutLayer<R> {
fn regularize_layer(&self) -> Self::Gradient { fn regularize_layer(&self) -> Self::Gradient {
() ()
@ -144,9 +147,7 @@ mod test {
.unwrap(); .unwrap();
for _ in 0..100 { for _ in 0..100 {
<NeuraDropoutLayer<_> as NeuraTrainableLayerBase<DVector<f64>>>::prepare_layer( layer.prepare_layer(true);
&mut layer, true,
);
assert!(layer.multiplier.is_finite()); assert!(layer.multiplier.is_finite());
assert!(!layer.multiplier.is_nan()); assert!(!layer.multiplier.is_nan());
} }

@ -32,11 +32,8 @@ impl<Input, Layer: NeuraLayer<Input>> NeuraLayer<Input> for NeuraLockLayer<Layer
} }
} }
impl<Input, Layer: NeuraTrainableLayerBase<Input>> NeuraTrainableLayerBase<Input> impl<Layer: NeuraTrainableLayerBase> NeuraTrainableLayerBase for NeuraLockLayer<Layer> {
for NeuraLockLayer<Layer>
{
type Gradient = (); type Gradient = ();
type IntermediaryRepr = Layer::IntermediaryRepr;
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
() ()
@ -45,13 +42,19 @@ impl<Input, Layer: NeuraTrainableLayerBase<Input>> NeuraTrainableLayerBase<Input
fn apply_gradient(&mut self, _gradient: &Self::Gradient) { fn apply_gradient(&mut self, _gradient: &Self::Gradient) {
// Noop // Noop
} }
}
impl<Input, Layer: NeuraTrainableLayerEval<Input>> NeuraTrainableLayerEval<Input>
for NeuraLockLayer<Layer>
{
type IntermediaryRepr = Layer::IntermediaryRepr;
fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr) { fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr) {
self.layer.eval_training(input) self.layer.eval_training(input)
} }
} }
impl<Input, Layer: NeuraTrainableLayerBase<Input>> NeuraTrainableLayerSelf<Input> impl<Input, Layer: NeuraTrainableLayerEval<Input>> NeuraTrainableLayerSelf<Input>
for NeuraLockLayer<Layer> for NeuraLockLayer<Layer>
{ {
fn regularize_layer(&self) -> Self::Gradient { fn regularize_layer(&self) -> Self::Gradient {

@ -59,30 +59,32 @@ pub trait NeuraPartialLayer {
fn construct(self, input_shape: NeuraShape) -> Result<Self::Constructed, Self::Err>; fn construct(self, input_shape: NeuraShape) -> Result<Self::Constructed, Self::Err>;
} }
pub trait NeuraTrainableLayerBase<Input>: NeuraLayer<Input> { pub trait NeuraTrainableLayerBase {
/// The representation of the layer gradient as a vector space /// The representation of the layer gradient as a vector space
type Gradient: NeuraVectorSpace; type Gradient: NeuraVectorSpace;
/// An intermediary object type to be passed to the various training methods
type IntermediaryRepr;
fn default_gradient(&self) -> Self::Gradient; fn default_gradient(&self) -> Self::Gradient;
/// Applies `δW_l` to the weights of the layer /// Applies `δW_l` to the weights of the layer
fn apply_gradient(&mut self, gradient: &Self::Gradient); fn apply_gradient(&mut self, gradient: &Self::Gradient);
// TODO: move this into another trait
fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr);
/// Arbitrary computation that can be executed at the start of an epoch /// Arbitrary computation that can be executed at the start of an epoch
#[allow(unused_variables)] #[allow(unused_variables)]
#[inline(always)] #[inline(always)]
fn prepare_layer(&mut self, is_training: bool) {} fn prepare_layer(&mut self, is_training: bool) {}
} }
pub trait NeuraTrainableLayerEval<Input>: NeuraTrainableLayerBase + NeuraLayer<Input> {
/// An intermediary object type to be passed to the various training methods
type IntermediaryRepr;
// TODO: move this into another trait
fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr);
}
/// Contains methods relative to a layer's ability to compute its own weights gradients, /// Contains methods relative to a layer's ability to compute its own weights gradients,
/// given the derivative of the output variables. /// given the derivative of the output variables.
pub trait NeuraTrainableLayerSelf<Input>: NeuraTrainableLayerBase<Input> { pub trait NeuraTrainableLayerSelf<Input>: NeuraTrainableLayerEval<Input> {
/// Computes the regularization /// Computes the regularization
fn regularize_layer(&self) -> Self::Gradient; fn regularize_layer(&self) -> Self::Gradient;
@ -117,7 +119,7 @@ pub trait NeuraTrainableLayerSelf<Input>: NeuraTrainableLayerBase<Input> {
// } // }
// } // }
pub trait NeuraTrainableLayerBackprop<Input>: NeuraTrainableLayerBase<Input> { pub trait NeuraTrainableLayerBackprop<Input>: NeuraTrainableLayerEval<Input> {
/// Computes the backpropagation term and the derivative of the internal weights, /// Computes the backpropagation term and the derivative of the internal weights,
/// using the `input` vector outputted by the previous layer and the backpropagation term `epsilon` of the next layer. /// using the `input` vector outputted by the previous layer and the backpropagation term `epsilon` of the next layer.
/// ///
@ -137,9 +139,8 @@ pub trait NeuraTrainableLayerBackprop<Input>: NeuraTrainableLayerBase<Input> {
) -> Input; ) -> Input;
} }
impl<Input: Clone> NeuraTrainableLayerBase<Input> for () { impl NeuraTrainableLayerBase for () {
type Gradient = (); type Gradient = ();
type IntermediaryRepr = ();
#[inline(always)] #[inline(always)]
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
@ -150,7 +151,12 @@ impl<Input: Clone> NeuraTrainableLayerBase<Input> for () {
fn apply_gradient(&mut self, _gradient: &Self::Gradient) { fn apply_gradient(&mut self, _gradient: &Self::Gradient) {
// Noop // Noop
} }
}
impl<Input: Clone> NeuraTrainableLayerEval<Input> for () {
type IntermediaryRepr = ();
#[inline(always)]
fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr) { fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr) {
(self.eval(input), ()) (self.eval(input), ())
} }

@ -56,9 +56,8 @@ impl<F: Float + Scalar> NeuraLayer<DVector<F>> for NeuraNormalizeLayer {
} }
} }
impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerBase<DVector<F>> for NeuraNormalizeLayer { impl NeuraTrainableLayerBase for NeuraNormalizeLayer {
type Gradient = (); type Gradient = ();
type IntermediaryRepr = (DMatrix<F>, F); // Partial jacobian matrix (without the kroenecker term) and stddev
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
() ()
@ -67,6 +66,10 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerBase<DVector<F>> for N
fn apply_gradient(&mut self, _gradient: &Self::Gradient) { fn apply_gradient(&mut self, _gradient: &Self::Gradient) {
// Noop // Noop
} }
}
impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerEval<DVector<F>> for NeuraNormalizeLayer {
type IntermediaryRepr = (DMatrix<F>, F); // Partial jacobian matrix (without the kroenecker term) and stddev
fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) { fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) {
let (mean, variance, len) = mean_variance(input); let (mean, variance, len) = mean_variance(input);

@ -56,9 +56,8 @@ impl NeuraPartialLayer for NeuraSoftmaxLayer {
} }
} }
impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerBase<DVector<F>> for NeuraSoftmaxLayer { impl NeuraTrainableLayerBase for NeuraSoftmaxLayer {
type Gradient = (); type Gradient = ();
type IntermediaryRepr = Self::Output; // Result of self.eval
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
() ()
@ -67,6 +66,10 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerBase<DVector<F>> for N
fn apply_gradient(&mut self, _gradient: &Self::Gradient) { fn apply_gradient(&mut self, _gradient: &Self::Gradient) {
// Noop // Noop
} }
}
impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerEval<DVector<F>> for NeuraSoftmaxLayer {
type IntermediaryRepr = Self::Output; // Result of self.eval
fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) { fn eval_training(&self, input: &DVector<F>) -> (Self::Output, Self::IntermediaryRepr) {
let res = self.eval(input); let res = self.eval(input);

@ -1,5 +1,7 @@
use crate::{ use crate::{
algebra::NeuraVectorSpace, gradient_solver::{NeuraGradientSolverBase, NeuraGradientSolverFinal}, layer::NeuraLayer, algebra::NeuraVectorSpace,
gradient_solver::{NeuraGradientSolverBase, NeuraGradientSolverFinal},
layer::NeuraLayer,
}; };
// pub mod residual; // pub mod residual;

@ -1,5 +1,5 @@
use super::*; use super::*;
use crate::layer::NeuraTrainableLayerBackprop; use crate::layer::{NeuraTrainableLayerBackprop, NeuraTrainableLayerEval};
impl<Input, Layer: NeuraLayer<Input>, ChildNetwork: NeuraLayer<Layer::Output>> NeuraLayer<Input> impl<Input, Layer: NeuraLayer<Input>, ChildNetwork: NeuraLayer<Layer::Output>> NeuraLayer<Input>
for NeuraSequential<Layer, ChildNetwork> for NeuraSequential<Layer, ChildNetwork>
@ -11,14 +11,10 @@ impl<Input, Layer: NeuraLayer<Input>, ChildNetwork: NeuraLayer<Layer::Output>> N
} }
} }
impl< impl<Layer: NeuraTrainableLayerBase, ChildNetwork: NeuraTrainableLayerBase> NeuraTrainableLayerBase
Input, for NeuraSequential<Layer, ChildNetwork>
Layer: NeuraTrainableLayerBase<Input>,
ChildNetwork: NeuraTrainableLayerBase<Layer::Output>,
> NeuraTrainableLayerBase<Input> for NeuraSequential<Layer, ChildNetwork>
{ {
type Gradient = (Layer::Gradient, Box<ChildNetwork::Gradient>); type Gradient = (Layer::Gradient, Box<ChildNetwork::Gradient>);
type IntermediaryRepr = (Layer::IntermediaryRepr, Box<ChildNetwork::IntermediaryRepr>);
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
( (
@ -27,16 +23,6 @@ impl<
) )
} }
fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr) {
let (layer_output, layer_intermediary) = self.layer.eval_training(input);
let (child_output, child_intermediary) = self.child_network.eval_training(&layer_output);
(
child_output,
(layer_intermediary, Box::new(child_intermediary)),
)
}
fn prepare_layer(&mut self, is_training: bool) { fn prepare_layer(&mut self, is_training: bool) {
self.layer.prepare_layer(is_training); self.layer.prepare_layer(is_training);
self.child_network.prepare_layer(is_training); self.child_network.prepare_layer(is_training);
@ -48,6 +34,25 @@ impl<
} }
} }
impl<
Input,
Layer: NeuraTrainableLayerEval<Input>,
ChildNetwork: NeuraTrainableLayerEval<Layer::Output>,
> NeuraTrainableLayerEval<Input> for NeuraSequential<Layer, ChildNetwork>
{
type IntermediaryRepr = (Layer::IntermediaryRepr, Box<ChildNetwork::IntermediaryRepr>);
fn eval_training(&self, input: &Input) -> (Self::Output, Self::IntermediaryRepr) {
let (layer_output, layer_intermediary) = self.layer.eval_training(input);
let (child_output, child_intermediary) = self.child_network.eval_training(&layer_output);
(
child_output,
(layer_intermediary, Box::new(child_intermediary)),
)
}
}
impl< impl<
Input, Input,
Layer: NeuraTrainableLayerSelf<Input>, Layer: NeuraTrainableLayerSelf<Input>,

@ -1,8 +1,9 @@
use super::{NeuraOldTrainableNetwork, NeuraOldTrainableNetworkBase}; use super::{NeuraOldTrainableNetwork, NeuraOldTrainableNetworkBase};
use crate::{ use crate::{
gradient_solver::{NeuraGradientSolverTransient}, gradient_solver::NeuraGradientSolverTransient,
layer::{ layer::{
NeuraLayer, NeuraPartialLayer, NeuraShape, NeuraTrainableLayerBase, NeuraTrainableLayerSelf, NeuraLayer, NeuraPartialLayer, NeuraShape, NeuraTrainableLayerBase,
NeuraTrainableLayerEval, NeuraTrainableLayerSelf,
}, },
}; };
@ -81,7 +82,7 @@ impl<Layer, ChildNetwork> NeuraSequential<Layer, ChildNetwork> {
impl< impl<
Input, Input,
Layer: NeuraTrainableLayerBase<Input> + NeuraTrainableLayerSelf<Input>, Layer: NeuraTrainableLayerEval<Input> + NeuraTrainableLayerSelf<Input>,
ChildNetwork: NeuraOldTrainableNetworkBase<Layer::Output>, ChildNetwork: NeuraOldTrainableNetworkBase<Layer::Output>,
> NeuraOldTrainableNetworkBase<Input> for NeuraSequential<Layer, ChildNetwork> > NeuraOldTrainableNetworkBase<Input> for NeuraSequential<Layer, ChildNetwork>
{ {
@ -141,7 +142,7 @@ impl<Input: Clone> NeuraOldTrainableNetworkBase<Input> for () {
impl< impl<
Input, Input,
Layer: NeuraTrainableLayerBase<Input> + NeuraTrainableLayerSelf<Input>, Layer: NeuraTrainableLayerEval<Input> + NeuraTrainableLayerSelf<Input>,
Optimizer: NeuraGradientSolverTransient<Input, Layer>, Optimizer: NeuraGradientSolverTransient<Input, Layer>,
ChildNetwork: NeuraOldTrainableNetworkBase<Layer::Output>, ChildNetwork: NeuraOldTrainableNetworkBase<Layer::Output>,
> NeuraOldTrainableNetwork<Input, Optimizer> for NeuraSequential<Layer, ChildNetwork> > NeuraOldTrainableNetwork<Input, Optimizer> for NeuraSequential<Layer, ChildNetwork>

@ -14,7 +14,7 @@ pub trait NeuraNetworkBase {
pub trait NeuraNetwork<NodeInput: Clone>: NeuraNetworkBase pub trait NeuraNetwork<NodeInput: Clone>: NeuraNetworkBase
where where
Self::Layer: NeuraLayer<Self::LayerInput>, Self::Layer: NeuraLayer<Self::LayerInput>,
<Self::Layer as NeuraLayer<Self::LayerInput>>::Output: Clone <Self::Layer as NeuraLayer<Self::LayerInput>>::Output: Clone,
{ {
/// The type of the input to `Self::Layer` /// The type of the input to `Self::Layer`
type LayerInput: Clone; type LayerInput: Clone;
@ -25,12 +25,25 @@ where
/// Maps the input of network node to the enclosed layer /// Maps the input of network node to the enclosed layer
fn map_input<'a>(&'_ self, input: &'a NodeInput) -> Cow<'a, Self::LayerInput>; fn map_input<'a>(&'_ self, input: &'a NodeInput) -> Cow<'a, Self::LayerInput>;
/// Maps the output of the enclosed layer to the output of the network node /// Maps the output of the enclosed layer to the output of the network node
fn map_output<'a>(&'_ self, input: &'_ NodeInput, layer_output: &'a <Self::Layer as NeuraLayer<Self::LayerInput>>::Output) -> Cow<'a, Self::NodeOutput>; fn map_output<'a>(
&'_ self,
input: &'_ NodeInput,
layer_output: &'a <Self::Layer as NeuraLayer<Self::LayerInput>>::Output,
) -> Cow<'a, Self::NodeOutput>;
/// Maps a gradient in the format of the node's output into the format of the enclosed layer's output /// Maps a gradient in the format of the node's output into the format of the enclosed layer's output
fn map_gradient_in<'a>(&'_ self, input: &'_ NodeInput, gradient_in: &'a Self::NodeOutput) -> Cow<'a, <Self::Layer as NeuraLayer<Self::LayerInput>>::Output>; fn map_gradient_in<'a>(
&'_ self,
input: &'_ NodeInput,
gradient_in: &'a Self::NodeOutput,
) -> Cow<'a, <Self::Layer as NeuraLayer<Self::LayerInput>>::Output>;
/// Maps a gradient in the format of the enclosed layer's input into the format of the node's input /// Maps a gradient in the format of the enclosed layer's input into the format of the node's input
fn map_gradient_out<'a>(&'_ self, input: &'_ NodeInput, gradient_in: &'_ Self::NodeOutput, gradient_out: &'a Self::LayerInput) -> Cow<'a, NodeInput>; fn map_gradient_out<'a>(
&'_ self,
input: &'_ NodeInput,
gradient_in: &'_ Self::NodeOutput,
gradient_out: &'a Self::LayerInput,
) -> Cow<'a, NodeInput>;
} }
pub trait NeuraNetworkRec: NeuraNetworkBase { pub trait NeuraNetworkRec: NeuraNetworkBase {
@ -39,6 +52,4 @@ pub trait NeuraNetworkRec: NeuraNetworkBase {
type NextNode; type NextNode;
fn get_next(&self) -> &Self::NextNode; fn get_next(&self) -> &Self::NextNode;
} }

Loading…
Cancel
Save