🎨 Small cleanup

main
Shad Amethyst 2 years ago
parent d40098d2ef
commit 2ea5502575

@ -242,7 +242,7 @@ impl<
{ {
fn backprop_layer( fn backprop_layer(
&self, &self,
input: &DVector<F>, _input: &DVector<F>,
evaluated: &Self::IntermediaryRepr, evaluated: &Self::IntermediaryRepr,
epsilon: &Self::Output, epsilon: &Self::Output,
) -> DVector<F> { ) -> DVector<F> {

@ -89,9 +89,9 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerSelf<DVector<F>> for N
fn get_gradient( fn get_gradient(
&self, &self,
input: &DVector<F>, _input: &DVector<F>,
intermediary: &Self::IntermediaryRepr, _intermediary: &Self::IntermediaryRepr,
epsilon: &Self::Output, _epsilon: &Self::Output,
) -> Self::Gradient { ) -> Self::Gradient {
() ()
} }
@ -102,7 +102,7 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerBackprop<DVector<F>>
{ {
fn backprop_layer( fn backprop_layer(
&self, &self,
input: &DVector<F>, _input: &DVector<F>,
(jacobian_partial, stddev): &Self::IntermediaryRepr, (jacobian_partial, stddev): &Self::IntermediaryRepr,
epsilon: &Self::Output, epsilon: &Self::Output,
) -> DVector<F> { ) -> DVector<F> {

@ -81,9 +81,9 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerSelf<DVector<F>> for N
#[inline(always)] #[inline(always)]
fn get_gradient( fn get_gradient(
&self, &self,
input: &DVector<F>, _input: &DVector<F>,
intermediary: &Self::IntermediaryRepr, _intermediary: &Self::IntermediaryRepr,
epsilon: &Self::Output, _epsilon: &Self::Output,
) -> Self::Gradient { ) -> Self::Gradient {
() ()
} }

@ -63,9 +63,9 @@ impl<
fn get_gradient( fn get_gradient(
&self, &self,
input: &Input, _input: &Input,
intermediary: &Self::IntermediaryRepr, _intermediary: &Self::IntermediaryRepr,
epsilon: &Self::Output, _epsilon: &Self::Output,
) -> Self::Gradient { ) -> Self::Gradient {
unimplemented!("NeuraSequential::get_gradient is not yet implemented, sorry"); unimplemented!("NeuraSequential::get_gradient is not yet implemented, sorry");
} }

Loading…
Cancel
Save