🎨 Run clippy on the project :)

main
Shad Amethyst 2 years ago
parent 6fbfd4e38c
commit b8c654ebb1

@ -82,7 +82,7 @@ impl<T: NeuraVectorSpace + ?Sized> NeuraVectorSpace for Box<T> {
impl NeuraVectorSpace for dyn NeuraDynVectorSpace { impl NeuraVectorSpace for dyn NeuraDynVectorSpace {
fn add_assign(&mut self, other: &Self) { fn add_assign(&mut self, other: &Self) {
<dyn NeuraDynVectorSpace>::add_assign(self, &*other) <dyn NeuraDynVectorSpace>::add_assign(self, other)
} }
fn mul_assign(&mut self, by: f64) { fn mul_assign(&mut self, by: f64) {

@ -34,7 +34,7 @@ impl<const LENGTH: usize, F> NeuraVector<LENGTH, F> {
LENGTH LENGTH
} }
pub fn iter<'a>(&'a self) -> std::slice::Iter<'a, F> { pub fn iter(&self) -> std::slice::Iter<'_, F> {
self.data.iter() self.data.iter()
} }
} }

@ -59,7 +59,7 @@ impl NeuraAxisBase for NeuraAxisAppend {
type Err = NeuraAxisErr; type Err = NeuraAxisErr;
fn shape(&self, inputs: &[NeuraShape]) -> Result<NeuraShape, NeuraAxisErr> { fn shape(&self, inputs: &[NeuraShape]) -> Result<NeuraShape, NeuraAxisErr> {
let mut inputs = inputs.into_iter().map(|x| *x.borrow()); let mut inputs = inputs.iter().map(|x| *x.borrow());
if let Some(mut res) = inputs.next() { if let Some(mut res) = inputs.next() {
for operand in inputs { for operand in inputs {
match (res, operand) { match (res, operand) {
@ -82,7 +82,7 @@ impl<F: Clone + Default + Scalar> NeuraAxis<DVector<F>> for NeuraAxisAppend {
type Combined = DVector<F>; type Combined = DVector<F>;
fn combine(&self, inputs: &[impl Borrow<DVector<F>>]) -> Self::Combined { fn combine(&self, inputs: &[impl Borrow<DVector<F>>]) -> Self::Combined {
assert!(inputs.len() > 0); assert!(!inputs.is_empty());
let mut res = Vec::with_capacity(inputs.iter().map(|vec| vec.borrow().len()).sum()); let mut res = Vec::with_capacity(inputs.iter().map(|vec| vec.borrow().len()).sum());
for input in inputs { for input in inputs {

@ -44,7 +44,7 @@ where
} }
fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64 { fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64 {
let output = trainable.eval(&input); let output = trainable.eval(input);
self.loss.eval(target, &output).to_f64().unwrap() self.loss.eval(target, &output).to_f64().unwrap()
} }
} }

@ -129,8 +129,8 @@ trait ForwardForwardRecurse<Input, Network, Gradient> {
impl<Act, Input> ForwardForwardRecurse<Input, (), ()> for NeuraForwardPair<Act> { impl<Act, Input> ForwardForwardRecurse<Input, (), ()> for NeuraForwardPair<Act> {
#[inline(always)] #[inline(always)]
fn recurse(&self, _network: &(), _input: &Input) -> () { fn recurse(&self, _network: &(), _input: &Input) {
()
} }
} }

@ -51,7 +51,7 @@ impl<R: Rng + Clone + std::fmt::Debug + 'static> NeuraLayerBase for NeuraDropout
type Gradient = (); type Gradient = ();
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
fn output_shape(&self) -> NeuraShape { fn output_shape(&self) -> NeuraShape {

@ -69,7 +69,7 @@ impl NeuraLayerBase for NeuraIsolateLayer {
#[inline(always)] #[inline(always)]
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
fn output_shape(&self) -> NeuraShape { fn output_shape(&self) -> NeuraShape {
@ -87,7 +87,7 @@ impl<F: Clone + Scalar + Default> NeuraLayer<DVector<F>> for NeuraIsolateLayer {
panic!("NeuraIsolateLayer expected a value of dimension {}, got a vector", self.start.dims()); panic!("NeuraIsolateLayer expected a value of dimension {}, got a vector", self.start.dims());
}; };
let res = DVector::from_iterator(end - start, input.iter().cloned().skip(start).take(end)); let res = DVector::from_iterator(end - start, input.iter().skip(start).cloned().take(end));
(res, ()) (res, ())
} }

@ -32,7 +32,7 @@ impl<Layer: NeuraLayerBase> NeuraLayerBase for NeuraLockLayer<Layer> {
} }
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
fn prepare_layer(&mut self, is_training: bool) { fn prepare_layer(&mut self, is_training: bool) {

@ -189,7 +189,7 @@ impl NeuraLayerBase for () {
#[inline(always)] #[inline(always)]
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
#[inline(always)] #[inline(always)]

@ -41,7 +41,7 @@ impl NeuraLayerBase for NeuraNormalizeLayer {
type Gradient = (); type Gradient = ();
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
} }

@ -33,7 +33,7 @@ impl NeuraLayerBase for NeuraSoftmaxLayer {
} }
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
} }
@ -76,7 +76,7 @@ impl<F: Float + Scalar + NumAssignOps> NeuraLayer<DVector<F>> for NeuraSoftmaxLa
let mut epsilon = epsilon.clone(); let mut epsilon = epsilon.clone();
// Compute $a_{l-1,i} ° \epsilon_{l,i}$ // Compute $a_{l-1,i} ° \epsilon_{l,i}$
hadamard_product(&mut epsilon, &evaluated); hadamard_product(&mut epsilon, evaluated);
// Compute $\sum_{k}{a_{l-1,k} \epsilon_{l,k}}$ // Compute $\sum_{k}{a_{l-1,k} \epsilon_{l,k}}$
let sum_diagonal_terms = epsilon.sum(); let sum_diagonal_terms = epsilon.sum();

@ -19,7 +19,7 @@ impl<Data> FromSequential<NeuraSequentialLast, Data> for NeuraGraph<Data> {
Self { Self {
output_index: nodes.len(), output_index: nodes.len(),
buffer_size: nodes.len() + 1, buffer_size: nodes.len() + 1,
nodes: nodes, nodes,
output_shape: input_shape, output_shape: input_shape,
} }
} }

@ -28,7 +28,7 @@ impl<Data> Clone for NeuraGraphNodeConstructed<Data> {
Self { Self {
node: dyn_clone::clone_box(&*self.node), node: dyn_clone::clone_box(&*self.node),
inputs: self.inputs.clone(), inputs: self.inputs.clone(),
output: self.output.clone(), output: self.output,
} }
} }
} }

@ -10,8 +10,8 @@ use crate::{
use super::*; use super::*;
pub trait NeuraGraphNodePartial<Data>: DynClone + Debug { pub trait NeuraGraphNodePartial<Data>: DynClone + Debug {
fn inputs<'a>(&'a self) -> &'a [String]; fn inputs(&self) -> &[String];
fn name<'a>(&'a self) -> &'a str; fn name(&self) -> &str;
fn construct( fn construct(
&self, &self,
@ -112,7 +112,7 @@ where
impl<Data: Clone, Axis: NeuraAxis<Data>, Layer: NeuraLayer<Axis::Combined, Output = Data>> impl<Data: Clone, Axis: NeuraAxis<Data>, Layer: NeuraLayer<Axis::Combined, Output = Data>>
NeuraGraphNodeEval<Data> for NeuraGraphNode<Axis, Layer> NeuraGraphNodeEval<Data> for NeuraGraphNode<Axis, Layer>
{ {
fn eval<'a>(&'a self, inputs: &[Data]) -> Data { fn eval(&self, inputs: &[Data]) -> Data {
let combined = self.axis.combine(inputs); let combined = self.axis.combine(inputs);
self.layer.eval(&combined) self.layer.eval(&combined)
} }
@ -184,11 +184,11 @@ where
Layer::Constructed: NeuraLayer<Axis::Combined, Output = Data>, Layer::Constructed: NeuraLayer<Axis::Combined, Output = Data>,
Layer::Err: Debug, Layer::Err: Debug,
{ {
fn inputs<'a>(&'a self) -> &'a [String] { fn inputs(&self) -> &[String] {
&self.inputs &self.inputs
} }
fn name<'a>(&'a self) -> &'a str { fn name(&self) -> &str {
&self.name &self.name
} }

@ -169,7 +169,7 @@ impl<Data: Clone + std::fmt::Debug + 'static> NeuraPartialLayer for NeuraGraphPa
let (constructed, output_shape) = node let (constructed, output_shape) = node
.construct(input_shapes) .construct(input_shapes)
.map_err(|e| NeuraGraphErr::LayerErr(e))?; .map_err(NeuraGraphErr::LayerErr)?;
shapes[index] = Some(output_shape); shapes[index] = Some(output_shape);

@ -44,7 +44,7 @@ impl<Layer: NeuraPartialLayer, ChildNetwork: NeuraResidualConstruct, Axis: Neura
.map_err(|e| NeuraRecursiveErr::Current(Layer(e)))?; .map_err(|e| NeuraRecursiveErr::Current(Layer(e)))?;
let layer_shape = Rc::new(layer.output_shape()); let layer_shape = Rc::new(layer.output_shape());
if self.offsets.len() == 0 { if self.offsets.is_empty() {
return Err(NeuraRecursiveErr::Current(NoOutput)); return Err(NeuraRecursiveErr::Current(NoOutput));
} }
@ -63,7 +63,7 @@ impl<Layer: NeuraPartialLayer, ChildNetwork: NeuraResidualConstruct, Axis: Neura
let child_network = self let child_network = self
.child_network .child_network
.construct_residual(rest_inputs, rest_indices, current_index + 1) .construct_residual(rest_inputs, rest_indices, current_index + 1)
.map_err(|e| NeuraRecursiveErr::Child(e))?; .map_err(NeuraRecursiveErr::Child)?;
Ok(NeuraResidualNode { Ok(NeuraResidualNode {
layer, layer,

@ -67,7 +67,7 @@ impl NeuraLayerBase for NeuraResidualLast {
type Gradient = (); type Gradient = ();
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
fn output_shape(&self) -> NeuraShape { fn output_shape(&self) -> NeuraShape {
@ -98,7 +98,7 @@ impl NeuraNetworkRec for NeuraResidualLast {
where where
Self::Layer: NeuraLayerBase, Self::Layer: NeuraLayerBase,
{ {
()
} }
} }
@ -147,8 +147,7 @@ impl<Data: Clone> NeuraLayer<NeuraResidualInput<Data>> for NeuraResidualLast {
fn eval_training(&self, input: &NeuraResidualInput<Data>) -> (Self::Output, ()) { fn eval_training(&self, input: &NeuraResidualInput<Data>) -> (Self::Output, ()) {
let result: Rc<Self::Output> = input.clone().get_first() let result: Rc<Self::Output> = input.clone().get_first()
.expect("Invalid NeuraResidual state: network returned no data, did you forget to link the last layer?") .expect("Invalid NeuraResidual state: network returned no data, did you forget to link the last layer?");
.into();
(unwrap_or_clone(result), ()) (unwrap_or_clone(result), ())
} }

@ -68,7 +68,7 @@ where
epsilon: &Self::Output, epsilon: &Self::Output,
) -> Self::Gradient { ) -> Self::Gradient {
self.layers self.layers
.get_gradient(&self.input_to_residual_input(input), intermediary, &epsilon) .get_gradient(&self.input_to_residual_input(input), intermediary, epsilon)
} }
fn backprop_layer( fn backprop_layer(
@ -79,7 +79,7 @@ where
) -> Data { ) -> Data {
unwrap_or_clone( unwrap_or_clone(
self.layers self.layers
.backprop_layer(&self.input_to_residual_input(input), intermediary, &epsilon) .backprop_layer(&self.input_to_residual_input(input), intermediary, epsilon)
.get_first() .get_first()
.unwrap(), .unwrap(),
) )

@ -24,13 +24,13 @@ impl<Layer: NeuraPartialLayer, ChildNetwork: NeuraPartialLayer> NeuraPartialLaye
let layer = self let layer = self
.layer .layer
.construct(input_shape) .construct(input_shape)
.map_err(|e| NeuraRecursiveErr::Current(e))?; .map_err(NeuraRecursiveErr::Current)?;
// TODO: ensure that this operation (and all recursive operations) are directly allocated on the heap // TODO: ensure that this operation (and all recursive operations) are directly allocated on the heap
let child_network = self let child_network = self
.child_network .child_network
.construct(layer.output_shape()) .construct(layer.output_shape())
.map_err(|e| NeuraRecursiveErr::Child(e))?; .map_err(NeuraRecursiveErr::Child)?;
let child_network = Box::new(child_network); let child_network = Box::new(child_network);
Ok(NeuraSequential { Ok(NeuraSequential {

@ -75,10 +75,9 @@ impl<Input, Layer: NeuraLayer<Input>, ChildNetwork: NeuraLayer<Layer::Output>> N
let transient_epsilon = let transient_epsilon =
self.child_network self.child_network
.backprop_layer(&transient_output, &intermediary.1, incoming_epsilon); .backprop_layer(&transient_output, &intermediary.1, incoming_epsilon);
let outgoing_epsilon =
self.layer
.backprop_layer(input, &intermediary.0, &transient_epsilon);
outgoing_epsilon
self.layer
.backprop_layer(input, &intermediary.0, &transient_epsilon)
} }
} }

@ -70,7 +70,7 @@ impl<Layer, ChildNetwork> NeuraSequential<Layer, ChildNetwork> {
Layer: NeuraLayer<Input>, Layer: NeuraLayer<Input>,
{ {
NeuraSequential { NeuraSequential {
layer: layer, layer,
child_network: Box::new(self), child_network: Box::new(self),
} }
} }
@ -80,7 +80,7 @@ impl<Layer> From<Layer> for NeuraSequential<Layer, NeuraSequentialLast> {
fn from(layer: Layer) -> Self { fn from(layer: Layer) -> Self {
Self { Self {
layer, layer,
child_network: Box::new(NeuraSequentialLast::default()), child_network: Box::<NeuraSequentialLast>::default(),
} }
} }
} }

@ -2,6 +2,7 @@ use super::*;
/// Last element of a NeuraSequential network /// Last element of a NeuraSequential network
#[derive(Clone, Debug, PartialEq, Copy)] #[derive(Clone, Debug, PartialEq, Copy)]
#[derive(Default)]
pub struct NeuraSequentialLast { pub struct NeuraSequentialLast {
shape: Option<NeuraShape>, shape: Option<NeuraShape>,
} }
@ -28,7 +29,7 @@ impl NeuraLayerBase for NeuraSequentialLast {
#[inline(always)] #[inline(always)]
fn default_gradient(&self) -> Self::Gradient { fn default_gradient(&self) -> Self::Gradient {
()
} }
} }
@ -116,11 +117,7 @@ impl<Input: Clone> NeuraNetwork<Input> for NeuraSequentialLast {
} }
} }
impl Default for NeuraSequentialLast {
fn default() -> Self {
Self { shape: None }
}
}
/// Operations on the tail end of a sequential network /// Operations on the tail end of a sequential network
pub trait NeuraSequentialTail { pub trait NeuraSequentialTail {
@ -146,7 +143,7 @@ impl<Layer> NeuraSequentialTail for NeuraSequential<Layer, NeuraSequentialLast>
layer: self.layer, layer: self.layer,
child_network: Box::new(NeuraSequential { child_network: Box::new(NeuraSequential {
layer, layer,
child_network: Box::new(NeuraSequentialLast::default()), child_network: Box::<NeuraSequentialLast>::default(),
}), }),
} }
} }

@ -123,10 +123,10 @@ impl NeuraBatchedTrainer {
for _ in 0..self.batch_size { for _ in 0..self.batch_size {
if let Some((input, target)) = iter.next() { if let Some((input, target)) = iter.next() {
let gradient = gradient_solver.get_gradient(&network, &input, &target); let gradient = gradient_solver.get_gradient(network, &input, &target);
gradient_sum.add_assign(&gradient); gradient_sum.add_assign(&gradient);
train_loss += gradient_solver.score(&network, &input, &target); train_loss += gradient_solver.score(network, &input, &target);
} else { } else {
break 'd; break 'd;
} }
@ -151,7 +151,7 @@ impl NeuraBatchedTrainer {
network.prepare_layer(false); network.prepare_layer(false);
let mut val_loss = 0.0; let mut val_loss = 0.0;
for (input, target) in test_inputs { for (input, target) in test_inputs {
val_loss += gradient_solver.score(&network, input, target); val_loss += gradient_solver.score(network, input, target);
} }
val_loss /= test_inputs.len() as f64; val_loss /= test_inputs.len() as f64;
train_loss /= (self.batch_size * self.log_iterations) as f64; train_loss /= (self.batch_size * self.log_iterations) as f64;

@ -26,7 +26,7 @@ impl<J: Iterator> Iterator for Chunked<J> {
} }
} }
if result.len() > 0 { if !result.is_empty() {
Some(result) Some(result)
} else { } else {
None None
@ -54,8 +54,8 @@ where
if let Some(next) = self.iter.next() { if let Some(next) = self.iter.next() {
// Base iterator is not empty yet // Base iterator is not empty yet
self.buffer.push(next.clone()); self.buffer.push(next.clone());
return Some(next); Some(next)
} else if self.buffer.len() > 0 { } else if !self.buffer.is_empty() {
if self.index == 0 { if self.index == 0 {
// Shuffle the vector and return the first element, setting the index to 1 // Shuffle the vector and return the first element, setting the index to 1
self.buffer.shuffle(&mut self.rng); self.buffer.shuffle(&mut self.rng);

Loading…
Cancel
Save