🎨 Small cleanup

main
Shad Amethyst 2 years ago
parent d40098d2ef
commit 2ea5502575

@ -242,7 +242,7 @@ impl<
{
fn backprop_layer(
&self,
input: &DVector<F>,
_input: &DVector<F>,
evaluated: &Self::IntermediaryRepr,
epsilon: &Self::Output,
) -> DVector<F> {

@ -89,9 +89,9 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerSelf<DVector<F>> for N
fn get_gradient(
&self,
input: &DVector<F>,
intermediary: &Self::IntermediaryRepr,
epsilon: &Self::Output,
_input: &DVector<F>,
_intermediary: &Self::IntermediaryRepr,
_epsilon: &Self::Output,
) -> Self::Gradient {
()
}
@ -102,7 +102,7 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerBackprop<DVector<F>>
{
fn backprop_layer(
&self,
input: &DVector<F>,
_input: &DVector<F>,
(jacobian_partial, stddev): &Self::IntermediaryRepr,
epsilon: &Self::Output,
) -> DVector<F> {

@ -81,9 +81,9 @@ impl<F: Float + Scalar + NumAssignOps> NeuraTrainableLayerSelf<DVector<F>> for N
#[inline(always)]
fn get_gradient(
&self,
input: &DVector<F>,
intermediary: &Self::IntermediaryRepr,
epsilon: &Self::Output,
_input: &DVector<F>,
_intermediary: &Self::IntermediaryRepr,
_epsilon: &Self::Output,
) -> Self::Gradient {
()
}

@ -63,9 +63,9 @@ impl<
fn get_gradient(
&self,
input: &Input,
intermediary: &Self::IntermediaryRepr,
epsilon: &Self::Output,
_input: &Input,
_intermediary: &Self::IntermediaryRepr,
_epsilon: &Self::Output,
) -> Self::Gradient {
unimplemented!("NeuraSequential::get_gradient is not yet implemented, sorry");
}

Loading…
Cancel
Save