diff --git a/src/algebra/mod.rs b/src/algebra/mod.rs index cde2b58..9382294 100644 --- a/src/algebra/mod.rs +++ b/src/algebra/mod.rs @@ -82,7 +82,7 @@ impl NeuraVectorSpace for Box { impl NeuraVectorSpace for dyn NeuraDynVectorSpace { fn add_assign(&mut self, other: &Self) { - ::add_assign(self, &*other) + ::add_assign(self, other) } fn mul_assign(&mut self, by: f64) { diff --git a/src/algebra/vector.rs b/src/algebra/vector.rs index 89dee93..5204efa 100644 --- a/src/algebra/vector.rs +++ b/src/algebra/vector.rs @@ -34,7 +34,7 @@ impl NeuraVector { LENGTH } - pub fn iter<'a>(&'a self) -> std::slice::Iter<'a, F> { + pub fn iter(&self) -> std::slice::Iter<'_, F> { self.data.iter() } } diff --git a/src/axis.rs b/src/axis.rs index 02f997c..852039f 100644 --- a/src/axis.rs +++ b/src/axis.rs @@ -59,7 +59,7 @@ impl NeuraAxisBase for NeuraAxisAppend { type Err = NeuraAxisErr; fn shape(&self, inputs: &[NeuraShape]) -> Result { - let mut inputs = inputs.into_iter().map(|x| *x.borrow()); + let mut inputs = inputs.iter().map(|x| *x.borrow()); if let Some(mut res) = inputs.next() { for operand in inputs { match (res, operand) { @@ -82,7 +82,7 @@ impl NeuraAxis> for NeuraAxisAppend { type Combined = DVector; fn combine(&self, inputs: &[impl Borrow>]) -> Self::Combined { - assert!(inputs.len() > 0); + assert!(!inputs.is_empty()); let mut res = Vec::with_capacity(inputs.iter().map(|vec| vec.borrow().len()).sum()); for input in inputs { diff --git a/src/gradient_solver/backprop.rs b/src/gradient_solver/backprop.rs index 269657d..9865b3d 100644 --- a/src/gradient_solver/backprop.rs +++ b/src/gradient_solver/backprop.rs @@ -44,7 +44,7 @@ where } fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64 { - let output = trainable.eval(&input); + let output = trainable.eval(input); self.loss.eval(target, &output).to_f64().unwrap() } } diff --git a/src/gradient_solver/forward_forward.rs b/src/gradient_solver/forward_forward.rs index 0b15774..6a78bfd 100644 --- a/src/gradient_solver/forward_forward.rs +++ b/src/gradient_solver/forward_forward.rs @@ -129,8 +129,8 @@ trait ForwardForwardRecurse { impl ForwardForwardRecurse for NeuraForwardPair { #[inline(always)] - fn recurse(&self, _network: &(), _input: &Input) -> () { - () + fn recurse(&self, _network: &(), _input: &Input) { + } } diff --git a/src/layer/dropout.rs b/src/layer/dropout.rs index 8989cba..bc2b16e 100644 --- a/src/layer/dropout.rs +++ b/src/layer/dropout.rs @@ -51,7 +51,7 @@ impl NeuraLayerBase for NeuraDropout type Gradient = (); fn default_gradient(&self) -> Self::Gradient { - () + } fn output_shape(&self) -> NeuraShape { diff --git a/src/layer/isolate.rs b/src/layer/isolate.rs index ea553b4..e572d26 100644 --- a/src/layer/isolate.rs +++ b/src/layer/isolate.rs @@ -69,7 +69,7 @@ impl NeuraLayerBase for NeuraIsolateLayer { #[inline(always)] fn default_gradient(&self) -> Self::Gradient { - () + } fn output_shape(&self) -> NeuraShape { @@ -87,7 +87,7 @@ impl NeuraLayer> for NeuraIsolateLayer { panic!("NeuraIsolateLayer expected a value of dimension {}, got a vector", self.start.dims()); }; - let res = DVector::from_iterator(end - start, input.iter().cloned().skip(start).take(end)); + let res = DVector::from_iterator(end - start, input.iter().skip(start).cloned().take(end)); (res, ()) } diff --git a/src/layer/lock.rs b/src/layer/lock.rs index 64bebe2..5a5d3fb 100644 --- a/src/layer/lock.rs +++ b/src/layer/lock.rs @@ -32,7 +32,7 @@ impl NeuraLayerBase for NeuraLockLayer { } fn default_gradient(&self) -> Self::Gradient { - () + } fn prepare_layer(&mut self, is_training: bool) { diff --git a/src/layer/mod.rs b/src/layer/mod.rs index 3f46cda..8a52b49 100644 --- a/src/layer/mod.rs +++ b/src/layer/mod.rs @@ -189,7 +189,7 @@ impl NeuraLayerBase for () { #[inline(always)] fn default_gradient(&self) -> Self::Gradient { - () + } #[inline(always)] diff --git a/src/layer/normalize.rs b/src/layer/normalize.rs index 5e2d6af..52704aa 100644 --- a/src/layer/normalize.rs +++ b/src/layer/normalize.rs @@ -41,7 +41,7 @@ impl NeuraLayerBase for NeuraNormalizeLayer { type Gradient = (); fn default_gradient(&self) -> Self::Gradient { - () + } } diff --git a/src/layer/softmax.rs b/src/layer/softmax.rs index ebae80f..77bd255 100644 --- a/src/layer/softmax.rs +++ b/src/layer/softmax.rs @@ -33,7 +33,7 @@ impl NeuraLayerBase for NeuraSoftmaxLayer { } fn default_gradient(&self) -> Self::Gradient { - () + } } @@ -76,7 +76,7 @@ impl NeuraLayer> for NeuraSoftmaxLa let mut epsilon = epsilon.clone(); // Compute $a_{l-1,i} ° \epsilon_{l,i}$ - hadamard_product(&mut epsilon, &evaluated); + hadamard_product(&mut epsilon, evaluated); // Compute $\sum_{k}{a_{l-1,k} \epsilon_{l,k}}$ let sum_diagonal_terms = epsilon.sum(); diff --git a/src/network/graph/from.rs b/src/network/graph/from.rs index 2e2f05a..b913a1a 100644 --- a/src/network/graph/from.rs +++ b/src/network/graph/from.rs @@ -19,7 +19,7 @@ impl FromSequential for NeuraGraph { Self { output_index: nodes.len(), buffer_size: nodes.len() + 1, - nodes: nodes, + nodes, output_shape: input_shape, } } diff --git a/src/network/graph/mod.rs b/src/network/graph/mod.rs index f4a33ae..4def358 100644 --- a/src/network/graph/mod.rs +++ b/src/network/graph/mod.rs @@ -28,7 +28,7 @@ impl Clone for NeuraGraphNodeConstructed { Self { node: dyn_clone::clone_box(&*self.node), inputs: self.inputs.clone(), - output: self.output.clone(), + output: self.output, } } } diff --git a/src/network/graph/node.rs b/src/network/graph/node.rs index 73ba289..8b536ea 100644 --- a/src/network/graph/node.rs +++ b/src/network/graph/node.rs @@ -10,8 +10,8 @@ use crate::{ use super::*; pub trait NeuraGraphNodePartial: DynClone + Debug { - fn inputs<'a>(&'a self) -> &'a [String]; - fn name<'a>(&'a self) -> &'a str; + fn inputs(&self) -> &[String]; + fn name(&self) -> &str; fn construct( &self, @@ -112,7 +112,7 @@ where impl, Layer: NeuraLayer> NeuraGraphNodeEval for NeuraGraphNode { - fn eval<'a>(&'a self, inputs: &[Data]) -> Data { + fn eval(&self, inputs: &[Data]) -> Data { let combined = self.axis.combine(inputs); self.layer.eval(&combined) } @@ -184,11 +184,11 @@ where Layer::Constructed: NeuraLayer, Layer::Err: Debug, { - fn inputs<'a>(&'a self) -> &'a [String] { + fn inputs(&self) -> &[String] { &self.inputs } - fn name<'a>(&'a self) -> &'a str { + fn name(&self) -> &str { &self.name } diff --git a/src/network/graph/partial.rs b/src/network/graph/partial.rs index cfaa118..2a043a8 100644 --- a/src/network/graph/partial.rs +++ b/src/network/graph/partial.rs @@ -169,7 +169,7 @@ impl NeuraPartialLayer for NeuraGraphPa let (constructed, output_shape) = node .construct(input_shapes) - .map_err(|e| NeuraGraphErr::LayerErr(e))?; + .map_err(NeuraGraphErr::LayerErr)?; shapes[index] = Some(output_shape); diff --git a/src/network/residual/construct.rs b/src/network/residual/construct.rs index 6a7f305..3357f33 100644 --- a/src/network/residual/construct.rs +++ b/src/network/residual/construct.rs @@ -44,7 +44,7 @@ impl Self::Gradient { - () + } fn output_shape(&self) -> NeuraShape { @@ -98,7 +98,7 @@ impl NeuraNetworkRec for NeuraResidualLast { where Self::Layer: NeuraLayerBase, { - () + } } @@ -147,8 +147,7 @@ impl NeuraLayer> for NeuraResidualLast { fn eval_training(&self, input: &NeuraResidualInput) -> (Self::Output, ()) { let result: Rc = input.clone().get_first() - .expect("Invalid NeuraResidual state: network returned no data, did you forget to link the last layer?") - .into(); + .expect("Invalid NeuraResidual state: network returned no data, did you forget to link the last layer?"); (unwrap_or_clone(result), ()) } diff --git a/src/network/residual/wrapper.rs b/src/network/residual/wrapper.rs index 5d3b1f4..9f35097 100644 --- a/src/network/residual/wrapper.rs +++ b/src/network/residual/wrapper.rs @@ -68,7 +68,7 @@ where epsilon: &Self::Output, ) -> Self::Gradient { self.layers - .get_gradient(&self.input_to_residual_input(input), intermediary, &epsilon) + .get_gradient(&self.input_to_residual_input(input), intermediary, epsilon) } fn backprop_layer( @@ -79,7 +79,7 @@ where ) -> Data { unwrap_or_clone( self.layers - .backprop_layer(&self.input_to_residual_input(input), intermediary, &epsilon) + .backprop_layer(&self.input_to_residual_input(input), intermediary, epsilon) .get_first() .unwrap(), ) diff --git a/src/network/sequential/construct.rs b/src/network/sequential/construct.rs index 4f8153a..134dbbf 100644 --- a/src/network/sequential/construct.rs +++ b/src/network/sequential/construct.rs @@ -24,13 +24,13 @@ impl NeuraPartialLaye let layer = self .layer .construct(input_shape) - .map_err(|e| NeuraRecursiveErr::Current(e))?; + .map_err(NeuraRecursiveErr::Current)?; // TODO: ensure that this operation (and all recursive operations) are directly allocated on the heap let child_network = self .child_network .construct(layer.output_shape()) - .map_err(|e| NeuraRecursiveErr::Child(e))?; + .map_err(NeuraRecursiveErr::Child)?; let child_network = Box::new(child_network); Ok(NeuraSequential { diff --git a/src/network/sequential/layer_impl.rs b/src/network/sequential/layer_impl.rs index 283b55c..bcf27fa 100644 --- a/src/network/sequential/layer_impl.rs +++ b/src/network/sequential/layer_impl.rs @@ -75,10 +75,9 @@ impl, ChildNetwork: NeuraLayer> N let transient_epsilon = self.child_network .backprop_layer(&transient_output, &intermediary.1, incoming_epsilon); - let outgoing_epsilon = - self.layer - .backprop_layer(input, &intermediary.0, &transient_epsilon); + - outgoing_epsilon + self.layer + .backprop_layer(input, &intermediary.0, &transient_epsilon) } } diff --git a/src/network/sequential/mod.rs b/src/network/sequential/mod.rs index bd01449..c3c9b30 100644 --- a/src/network/sequential/mod.rs +++ b/src/network/sequential/mod.rs @@ -70,7 +70,7 @@ impl NeuraSequential { Layer: NeuraLayer, { NeuraSequential { - layer: layer, + layer, child_network: Box::new(self), } } @@ -80,7 +80,7 @@ impl From for NeuraSequential { fn from(layer: Layer) -> Self { Self { layer, - child_network: Box::new(NeuraSequentialLast::default()), + child_network: Box::::default(), } } } diff --git a/src/network/sequential/tail.rs b/src/network/sequential/tail.rs index efc4c6d..13d40bb 100644 --- a/src/network/sequential/tail.rs +++ b/src/network/sequential/tail.rs @@ -2,6 +2,7 @@ use super::*; /// Last element of a NeuraSequential network #[derive(Clone, Debug, PartialEq, Copy)] +#[derive(Default)] pub struct NeuraSequentialLast { shape: Option, } @@ -28,7 +29,7 @@ impl NeuraLayerBase for NeuraSequentialLast { #[inline(always)] fn default_gradient(&self) -> Self::Gradient { - () + } } @@ -116,11 +117,7 @@ impl NeuraNetwork for NeuraSequentialLast { } } -impl Default for NeuraSequentialLast { - fn default() -> Self { - Self { shape: None } - } -} + /// Operations on the tail end of a sequential network pub trait NeuraSequentialTail { @@ -146,7 +143,7 @@ impl NeuraSequentialTail for NeuraSequential layer: self.layer, child_network: Box::new(NeuraSequential { layer, - child_network: Box::new(NeuraSequentialLast::default()), + child_network: Box::::default(), }), } } diff --git a/src/train.rs b/src/train.rs index af10da8..d340a04 100644 --- a/src/train.rs +++ b/src/train.rs @@ -123,10 +123,10 @@ impl NeuraBatchedTrainer { for _ in 0..self.batch_size { if let Some((input, target)) = iter.next() { - let gradient = gradient_solver.get_gradient(&network, &input, &target); + let gradient = gradient_solver.get_gradient(network, &input, &target); gradient_sum.add_assign(&gradient); - train_loss += gradient_solver.score(&network, &input, &target); + train_loss += gradient_solver.score(network, &input, &target); } else { break 'd; } @@ -151,7 +151,7 @@ impl NeuraBatchedTrainer { network.prepare_layer(false); let mut val_loss = 0.0; for (input, target) in test_inputs { - val_loss += gradient_solver.score(&network, input, target); + val_loss += gradient_solver.score(network, input, target); } val_loss /= test_inputs.len() as f64; train_loss /= (self.batch_size * self.log_iterations) as f64; diff --git a/src/utils.rs b/src/utils.rs index d1296e9..6c4c9d0 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -26,7 +26,7 @@ impl Iterator for Chunked { } } - if result.len() > 0 { + if !result.is_empty() { Some(result) } else { None @@ -54,8 +54,8 @@ where if let Some(next) = self.iter.next() { // Base iterator is not empty yet self.buffer.push(next.clone()); - return Some(next); - } else if self.buffer.len() > 0 { + Some(next) + } else if !self.buffer.is_empty() { if self.index == 0 { // Shuffle the vector and return the first element, setting the index to 1 self.buffer.shuffle(&mut self.rng);