🎨 Run clippy on the project :)

main
Shad Amethyst 2 years ago
parent 6fbfd4e38c
commit b8c654ebb1

@ -82,7 +82,7 @@ impl<T: NeuraVectorSpace + ?Sized> NeuraVectorSpace for Box<T> {
impl NeuraVectorSpace for dyn NeuraDynVectorSpace {
fn add_assign(&mut self, other: &Self) {
<dyn NeuraDynVectorSpace>::add_assign(self, &*other)
<dyn NeuraDynVectorSpace>::add_assign(self, other)
}
fn mul_assign(&mut self, by: f64) {

@ -34,7 +34,7 @@ impl<const LENGTH: usize, F> NeuraVector<LENGTH, F> {
LENGTH
}
pub fn iter<'a>(&'a self) -> std::slice::Iter<'a, F> {
pub fn iter(&self) -> std::slice::Iter<'_, F> {
self.data.iter()
}
}

@ -59,7 +59,7 @@ impl NeuraAxisBase for NeuraAxisAppend {
type Err = NeuraAxisErr;
fn shape(&self, inputs: &[NeuraShape]) -> Result<NeuraShape, NeuraAxisErr> {
let mut inputs = inputs.into_iter().map(|x| *x.borrow());
let mut inputs = inputs.iter().map(|x| *x.borrow());
if let Some(mut res) = inputs.next() {
for operand in inputs {
match (res, operand) {
@ -82,7 +82,7 @@ impl<F: Clone + Default + Scalar> NeuraAxis<DVector<F>> for NeuraAxisAppend {
type Combined = DVector<F>;
fn combine(&self, inputs: &[impl Borrow<DVector<F>>]) -> Self::Combined {
assert!(inputs.len() > 0);
assert!(!inputs.is_empty());
let mut res = Vec::with_capacity(inputs.iter().map(|vec| vec.borrow().len()).sum());
for input in inputs {

@ -44,7 +44,7 @@ where
}
fn score(&self, trainable: &Trainable, input: &Input, target: &Target) -> f64 {
let output = trainable.eval(&input);
let output = trainable.eval(input);
self.loss.eval(target, &output).to_f64().unwrap()
}
}

@ -129,8 +129,8 @@ trait ForwardForwardRecurse<Input, Network, Gradient> {
impl<Act, Input> ForwardForwardRecurse<Input, (), ()> for NeuraForwardPair<Act> {
#[inline(always)]
fn recurse(&self, _network: &(), _input: &Input) -> () {
()
fn recurse(&self, _network: &(), _input: &Input) {
}
}

@ -51,7 +51,7 @@ impl<R: Rng + Clone + std::fmt::Debug + 'static> NeuraLayerBase for NeuraDropout
type Gradient = ();
fn default_gradient(&self) -> Self::Gradient {
()
}
fn output_shape(&self) -> NeuraShape {

@ -69,7 +69,7 @@ impl NeuraLayerBase for NeuraIsolateLayer {
#[inline(always)]
fn default_gradient(&self) -> Self::Gradient {
()
}
fn output_shape(&self) -> NeuraShape {
@ -87,7 +87,7 @@ impl<F: Clone + Scalar + Default> NeuraLayer<DVector<F>> for NeuraIsolateLayer {
panic!("NeuraIsolateLayer expected a value of dimension {}, got a vector", self.start.dims());
};
let res = DVector::from_iterator(end - start, input.iter().cloned().skip(start).take(end));
let res = DVector::from_iterator(end - start, input.iter().skip(start).cloned().take(end));
(res, ())
}

@ -32,7 +32,7 @@ impl<Layer: NeuraLayerBase> NeuraLayerBase for NeuraLockLayer<Layer> {
}
fn default_gradient(&self) -> Self::Gradient {
()
}
fn prepare_layer(&mut self, is_training: bool) {

@ -189,7 +189,7 @@ impl NeuraLayerBase for () {
#[inline(always)]
fn default_gradient(&self) -> Self::Gradient {
()
}
#[inline(always)]

@ -41,7 +41,7 @@ impl NeuraLayerBase for NeuraNormalizeLayer {
type Gradient = ();
fn default_gradient(&self) -> Self::Gradient {
()
}
}

@ -33,7 +33,7 @@ impl NeuraLayerBase for NeuraSoftmaxLayer {
}
fn default_gradient(&self) -> Self::Gradient {
()
}
}
@ -76,7 +76,7 @@ impl<F: Float + Scalar + NumAssignOps> NeuraLayer<DVector<F>> for NeuraSoftmaxLa
let mut epsilon = epsilon.clone();
// Compute $a_{l-1,i} ° \epsilon_{l,i}$
hadamard_product(&mut epsilon, &evaluated);
hadamard_product(&mut epsilon, evaluated);
// Compute $\sum_{k}{a_{l-1,k} \epsilon_{l,k}}$
let sum_diagonal_terms = epsilon.sum();

@ -19,7 +19,7 @@ impl<Data> FromSequential<NeuraSequentialLast, Data> for NeuraGraph<Data> {
Self {
output_index: nodes.len(),
buffer_size: nodes.len() + 1,
nodes: nodes,
nodes,
output_shape: input_shape,
}
}

@ -28,7 +28,7 @@ impl<Data> Clone for NeuraGraphNodeConstructed<Data> {
Self {
node: dyn_clone::clone_box(&*self.node),
inputs: self.inputs.clone(),
output: self.output.clone(),
output: self.output,
}
}
}

@ -10,8 +10,8 @@ use crate::{
use super::*;
pub trait NeuraGraphNodePartial<Data>: DynClone + Debug {
fn inputs<'a>(&'a self) -> &'a [String];
fn name<'a>(&'a self) -> &'a str;
fn inputs(&self) -> &[String];
fn name(&self) -> &str;
fn construct(
&self,
@ -112,7 +112,7 @@ where
impl<Data: Clone, Axis: NeuraAxis<Data>, Layer: NeuraLayer<Axis::Combined, Output = Data>>
NeuraGraphNodeEval<Data> for NeuraGraphNode<Axis, Layer>
{
fn eval<'a>(&'a self, inputs: &[Data]) -> Data {
fn eval(&self, inputs: &[Data]) -> Data {
let combined = self.axis.combine(inputs);
self.layer.eval(&combined)
}
@ -184,11 +184,11 @@ where
Layer::Constructed: NeuraLayer<Axis::Combined, Output = Data>,
Layer::Err: Debug,
{
fn inputs<'a>(&'a self) -> &'a [String] {
fn inputs(&self) -> &[String] {
&self.inputs
}
fn name<'a>(&'a self) -> &'a str {
fn name(&self) -> &str {
&self.name
}

@ -169,7 +169,7 @@ impl<Data: Clone + std::fmt::Debug + 'static> NeuraPartialLayer for NeuraGraphPa
let (constructed, output_shape) = node
.construct(input_shapes)
.map_err(|e| NeuraGraphErr::LayerErr(e))?;
.map_err(NeuraGraphErr::LayerErr)?;
shapes[index] = Some(output_shape);

@ -44,7 +44,7 @@ impl<Layer: NeuraPartialLayer, ChildNetwork: NeuraResidualConstruct, Axis: Neura
.map_err(|e| NeuraRecursiveErr::Current(Layer(e)))?;
let layer_shape = Rc::new(layer.output_shape());
if self.offsets.len() == 0 {
if self.offsets.is_empty() {
return Err(NeuraRecursiveErr::Current(NoOutput));
}
@ -63,7 +63,7 @@ impl<Layer: NeuraPartialLayer, ChildNetwork: NeuraResidualConstruct, Axis: Neura
let child_network = self
.child_network
.construct_residual(rest_inputs, rest_indices, current_index + 1)
.map_err(|e| NeuraRecursiveErr::Child(e))?;
.map_err(NeuraRecursiveErr::Child)?;
Ok(NeuraResidualNode {
layer,

@ -67,7 +67,7 @@ impl NeuraLayerBase for NeuraResidualLast {
type Gradient = ();
fn default_gradient(&self) -> Self::Gradient {
()
}
fn output_shape(&self) -> NeuraShape {
@ -98,7 +98,7 @@ impl NeuraNetworkRec for NeuraResidualLast {
where
Self::Layer: NeuraLayerBase,
{
()
}
}
@ -147,8 +147,7 @@ impl<Data: Clone> NeuraLayer<NeuraResidualInput<Data>> for NeuraResidualLast {
fn eval_training(&self, input: &NeuraResidualInput<Data>) -> (Self::Output, ()) {
let result: Rc<Self::Output> = input.clone().get_first()
.expect("Invalid NeuraResidual state: network returned no data, did you forget to link the last layer?")
.into();
.expect("Invalid NeuraResidual state: network returned no data, did you forget to link the last layer?");
(unwrap_or_clone(result), ())
}

@ -68,7 +68,7 @@ where
epsilon: &Self::Output,
) -> Self::Gradient {
self.layers
.get_gradient(&self.input_to_residual_input(input), intermediary, &epsilon)
.get_gradient(&self.input_to_residual_input(input), intermediary, epsilon)
}
fn backprop_layer(
@ -79,7 +79,7 @@ where
) -> Data {
unwrap_or_clone(
self.layers
.backprop_layer(&self.input_to_residual_input(input), intermediary, &epsilon)
.backprop_layer(&self.input_to_residual_input(input), intermediary, epsilon)
.get_first()
.unwrap(),
)

@ -24,13 +24,13 @@ impl<Layer: NeuraPartialLayer, ChildNetwork: NeuraPartialLayer> NeuraPartialLaye
let layer = self
.layer
.construct(input_shape)
.map_err(|e| NeuraRecursiveErr::Current(e))?;
.map_err(NeuraRecursiveErr::Current)?;
// TODO: ensure that this operation (and all recursive operations) are directly allocated on the heap
let child_network = self
.child_network
.construct(layer.output_shape())
.map_err(|e| NeuraRecursiveErr::Child(e))?;
.map_err(NeuraRecursiveErr::Child)?;
let child_network = Box::new(child_network);
Ok(NeuraSequential {

@ -75,10 +75,9 @@ impl<Input, Layer: NeuraLayer<Input>, ChildNetwork: NeuraLayer<Layer::Output>> N
let transient_epsilon =
self.child_network
.backprop_layer(&transient_output, &intermediary.1, incoming_epsilon);
let outgoing_epsilon =
self.layer
.backprop_layer(input, &intermediary.0, &transient_epsilon);
outgoing_epsilon
self.layer
.backprop_layer(input, &intermediary.0, &transient_epsilon)
}
}

@ -70,7 +70,7 @@ impl<Layer, ChildNetwork> NeuraSequential<Layer, ChildNetwork> {
Layer: NeuraLayer<Input>,
{
NeuraSequential {
layer: layer,
layer,
child_network: Box::new(self),
}
}
@ -80,7 +80,7 @@ impl<Layer> From<Layer> for NeuraSequential<Layer, NeuraSequentialLast> {
fn from(layer: Layer) -> Self {
Self {
layer,
child_network: Box::new(NeuraSequentialLast::default()),
child_network: Box::<NeuraSequentialLast>::default(),
}
}
}

@ -2,6 +2,7 @@ use super::*;
/// Last element of a NeuraSequential network
#[derive(Clone, Debug, PartialEq, Copy)]
#[derive(Default)]
pub struct NeuraSequentialLast {
shape: Option<NeuraShape>,
}
@ -28,7 +29,7 @@ impl NeuraLayerBase for NeuraSequentialLast {
#[inline(always)]
fn default_gradient(&self) -> Self::Gradient {
()
}
}
@ -116,11 +117,7 @@ impl<Input: Clone> NeuraNetwork<Input> for NeuraSequentialLast {
}
}
impl Default for NeuraSequentialLast {
fn default() -> Self {
Self { shape: None }
}
}
/// Operations on the tail end of a sequential network
pub trait NeuraSequentialTail {
@ -146,7 +143,7 @@ impl<Layer> NeuraSequentialTail for NeuraSequential<Layer, NeuraSequentialLast>
layer: self.layer,
child_network: Box::new(NeuraSequential {
layer,
child_network: Box::new(NeuraSequentialLast::default()),
child_network: Box::<NeuraSequentialLast>::default(),
}),
}
}

@ -123,10 +123,10 @@ impl NeuraBatchedTrainer {
for _ in 0..self.batch_size {
if let Some((input, target)) = iter.next() {
let gradient = gradient_solver.get_gradient(&network, &input, &target);
let gradient = gradient_solver.get_gradient(network, &input, &target);
gradient_sum.add_assign(&gradient);
train_loss += gradient_solver.score(&network, &input, &target);
train_loss += gradient_solver.score(network, &input, &target);
} else {
break 'd;
}
@ -151,7 +151,7 @@ impl NeuraBatchedTrainer {
network.prepare_layer(false);
let mut val_loss = 0.0;
for (input, target) in test_inputs {
val_loss += gradient_solver.score(&network, input, target);
val_loss += gradient_solver.score(network, input, target);
}
val_loss /= test_inputs.len() as f64;
train_loss /= (self.batch_size * self.log_iterations) as f64;

@ -26,7 +26,7 @@ impl<J: Iterator> Iterator for Chunked<J> {
}
}
if result.len() > 0 {
if !result.is_empty() {
Some(result)
} else {
None
@ -54,8 +54,8 @@ where
if let Some(next) = self.iter.next() {
// Base iterator is not empty yet
self.buffer.push(next.clone());
return Some(next);
} else if self.buffer.len() > 0 {
Some(next)
} else if !self.buffer.is_empty() {
if self.index == 0 {
// Shuffle the vector and return the first element, setting the index to 1
self.buffer.shuffle(&mut self.rng);

Loading…
Cancel
Save