🚚 rename optimize to gradient_solver

main
Shad Amethyst 2 years ago
parent 81de6ddbcd
commit 6d45eafbe7

@ -1,44 +1,9 @@
use num::ToPrimitive; use num::ToPrimitive;
use crate::{ use crate::{network::NeuraTrainableNetworkBase, derivable::NeuraLoss, layer::NeuraTrainableLayer};
derivable::NeuraLoss,
layer::NeuraTrainableLayer,
network::{NeuraTrainableNetwork, NeuraTrainableNetworkBase},
};
pub trait NeuraOptimizerBase { use super::*;
type Output<NetworkInput, NetworkGradient>;
}
pub trait NeuraOptimizerFinal<LayerOutput>: NeuraOptimizerBase {
fn eval_final(&self, output: LayerOutput) -> Self::Output<LayerOutput, ()>;
}
pub trait NeuraOptimizerTransient<LayerOutput>: NeuraOptimizerBase {
fn eval_layer<
Input,
NetworkGradient,
RecGradient,
Layer: NeuraTrainableLayer<Input, Output = LayerOutput>,
>(
&self,
layer: &Layer,
input: &Input,
rec_opt_output: Self::Output<LayerOutput, RecGradient>,
combine_gradients: impl Fn(Layer::Gradient, RecGradient) -> NetworkGradient,
) -> Self::Output<Input, NetworkGradient>;
}
pub trait NeuraOptimizer<Input, Target, Trainable: NeuraTrainableNetworkBase<Input>> {
fn get_gradient(
&self,
trainable: &Trainable,
input: &Input,
target: &Target,
) -> Trainable::Gradient;
fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64;
}
pub struct NeuraBackprop<Loss> { pub struct NeuraBackprop<Loss> {
loss: Loss, loss: Loss,
@ -55,7 +20,7 @@ impl<
Target, Target,
Trainable: NeuraTrainableNetworkBase<Input>, Trainable: NeuraTrainableNetworkBase<Input>,
Loss: NeuraLoss<Trainable::Output, Target = Target> + Clone, Loss: NeuraLoss<Trainable::Output, Target = Target> + Clone,
> NeuraOptimizer<Input, Target, Trainable> for NeuraBackprop<Loss> > NeuraGradientSolver<Input, Target, Trainable> for NeuraBackprop<Loss>
where where
<Loss as NeuraLoss<Trainable::Output>>::Output: ToPrimitive, <Loss as NeuraLoss<Trainable::Output>>::Output: ToPrimitive,
Trainable: for<'a> NeuraTrainableNetwork<Input, (&'a NeuraBackprop<Loss>, &'a Target)>, Trainable: for<'a> NeuraTrainableNetwork<Input, (&'a NeuraBackprop<Loss>, &'a Target)>,
@ -77,19 +42,19 @@ where
} }
} }
impl<Loss, Target> NeuraOptimizerBase for (&NeuraBackprop<Loss>, &Target) { impl<Loss, Target> NeuraGradientSolverBase for (&NeuraBackprop<Loss>, &Target) {
type Output<NetworkInput, NetworkGradient> = (NetworkInput, NetworkGradient); // epsilon, gradient type Output<NetworkInput, NetworkGradient> = (NetworkInput, NetworkGradient); // epsilon, gradient
} }
impl<LayerOutput, Target, Loss: NeuraLoss<LayerOutput, Target = Target>> impl<LayerOutput, Target, Loss: NeuraLoss<LayerOutput, Target = Target>>
NeuraOptimizerFinal<LayerOutput> for (&NeuraBackprop<Loss>, &Target) NeuraGradientSolverFinal<LayerOutput> for (&NeuraBackprop<Loss>, &Target)
{ {
fn eval_final(&self, output: LayerOutput) -> Self::Output<LayerOutput, ()> { fn eval_final(&self, output: LayerOutput) -> Self::Output<LayerOutput, ()> {
(self.0.loss.nabla(self.1, &output), ()) (self.0.loss.nabla(self.1, &output), ())
} }
} }
impl<LayerOutput, Target, Loss> NeuraOptimizerTransient<LayerOutput> impl<LayerOutput, Target, Loss> NeuraGradientSolverTransient<LayerOutput>
for (&NeuraBackprop<Loss>, &Target) for (&NeuraBackprop<Loss>, &Target)
{ {
fn eval_layer< fn eval_layer<

@ -0,0 +1,41 @@
mod backprop;
pub use backprop::NeuraBackprop;
use crate::{
layer::NeuraTrainableLayer,
network::{NeuraTrainableNetwork, NeuraTrainableNetworkBase},
};
pub trait NeuraGradientSolverBase {
type Output<NetworkInput, NetworkGradient>;
}
pub trait NeuraGradientSolverFinal<LayerOutput>: NeuraGradientSolverBase {
fn eval_final(&self, output: LayerOutput) -> Self::Output<LayerOutput, ()>;
}
pub trait NeuraGradientSolverTransient<LayerOutput>: NeuraGradientSolverBase {
fn eval_layer<
Input,
NetworkGradient,
RecGradient,
Layer: NeuraTrainableLayer<Input, Output = LayerOutput>,
>(
&self,
layer: &Layer,
input: &Input,
rec_opt_output: Self::Output<LayerOutput, RecGradient>,
combine_gradients: impl Fn(Layer::Gradient, RecGradient) -> NetworkGradient,
) -> Self::Output<Input, NetworkGradient>;
}
pub trait NeuraGradientSolver<Input, Target, Trainable: NeuraTrainableNetworkBase<Input>> {
fn get_gradient(
&self,
trainable: &Trainable,
input: &Input,
target: &Target,
) -> Trainable::Gradient;
fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64;
}

@ -5,7 +5,7 @@ pub mod algebra;
pub mod derivable; pub mod derivable;
pub mod layer; pub mod layer;
pub mod network; pub mod network;
pub mod optimize; pub mod gradient_solver;
pub mod train; pub mod train;
mod utils; mod utils;
@ -22,6 +22,6 @@ pub mod prelude {
pub use crate::network::sequential::{ pub use crate::network::sequential::{
NeuraSequential, NeuraSequentialConstruct, NeuraSequentialTail, NeuraSequential, NeuraSequentialConstruct, NeuraSequentialTail,
}; };
pub use crate::optimize::NeuraBackprop; pub use crate::gradient_solver::NeuraBackprop;
pub use crate::train::NeuraBatchedTrainer; pub use crate::train::NeuraBatchedTrainer;
} }

@ -1,4 +1,4 @@
use crate::{algebra::NeuraVectorSpace, layer::NeuraLayer, optimize::NeuraOptimizerBase}; use crate::{algebra::NeuraVectorSpace, layer::NeuraLayer, gradient_solver::NeuraGradientSolverBase};
pub mod sequential; pub mod sequential;
@ -19,7 +19,7 @@ pub trait NeuraTrainableNetworkBase<Input>: NeuraLayer<Input> {
pub trait NeuraTrainableNetwork<Input, Optimizer>: NeuraTrainableNetworkBase<Input> pub trait NeuraTrainableNetwork<Input, Optimizer>: NeuraTrainableNetworkBase<Input>
where where
Optimizer: NeuraOptimizerBase, Optimizer: NeuraGradientSolverBase,
{ {
fn traverse( fn traverse(
&self, &self,

@ -1,7 +1,7 @@
use super::{NeuraTrainableNetwork, NeuraTrainableNetworkBase}; use super::{NeuraTrainableNetwork, NeuraTrainableNetworkBase};
use crate::{ use crate::{
layer::{NeuraLayer, NeuraPartialLayer, NeuraShape, NeuraTrainableLayer}, layer::{NeuraLayer, NeuraPartialLayer, NeuraShape, NeuraTrainableLayer},
optimize::{NeuraOptimizerFinal, NeuraOptimizerTransient}, gradient_solver::{NeuraGradientSolverFinal, NeuraGradientSolverTransient},
}; };
mod construct; mod construct;
@ -189,7 +189,7 @@ impl<Input: Clone> NeuraTrainableNetworkBase<Input> for () {
impl< impl<
Input, Input,
Layer: NeuraTrainableLayer<Input>, Layer: NeuraTrainableLayer<Input>,
Optimizer: NeuraOptimizerTransient<Layer::Output>, Optimizer: NeuraGradientSolverTransient<Layer::Output>,
ChildNetwork: NeuraTrainableNetworkBase<Layer::Output>, ChildNetwork: NeuraTrainableNetworkBase<Layer::Output>,
> NeuraTrainableNetwork<Input, Optimizer> for NeuraSequential<Layer, ChildNetwork> > NeuraTrainableNetwork<Input, Optimizer> for NeuraSequential<Layer, ChildNetwork>
where where
@ -212,7 +212,7 @@ where
} }
} }
impl<Input: Clone, Optimizer: NeuraOptimizerFinal<Input>> NeuraTrainableNetwork<Input, Optimizer> impl<Input: Clone, Optimizer: NeuraGradientSolverFinal<Input>> NeuraTrainableNetwork<Input, Optimizer>
for () for ()
{ {
fn traverse( fn traverse(

@ -1,5 +1,5 @@
use crate::{ use crate::{
algebra::NeuraVectorSpace, network::NeuraTrainableNetworkBase, optimize::NeuraOptimizer, algebra::NeuraVectorSpace, network::NeuraTrainableNetworkBase, gradient_solver::NeuraGradientSolver,
}; };
#[non_exhaustive] #[non_exhaustive]
@ -73,7 +73,7 @@ impl NeuraBatchedTrainer {
Input: Clone, Input: Clone,
Target: Clone, Target: Clone,
Network: NeuraTrainableNetworkBase<Input>, Network: NeuraTrainableNetworkBase<Input>,
GradientSolver: NeuraOptimizer<Input, Target, Network>, GradientSolver: NeuraGradientSolver<Input, Target, Network>,
Inputs: IntoIterator<Item = (Input, Target)>, Inputs: IntoIterator<Item = (Input, Target)>,
>( >(
&self, &self,
@ -143,7 +143,7 @@ mod test {
layer::{dense::NeuraDenseLayer, NeuraLayer}, layer::{dense::NeuraDenseLayer, NeuraLayer},
network::sequential::{NeuraSequential, NeuraSequentialTail}, network::sequential::{NeuraSequential, NeuraSequentialTail},
neura_sequential, neura_sequential,
optimize::NeuraBackprop, gradient_solver::NeuraBackprop,
}; };
#[test] #[test]

Loading…
Cancel
Save