diff --git a/README.md b/README.md new file mode 100644 index 0000000..8ee550d --- /dev/null +++ b/README.md @@ -0,0 +1,34 @@ +# NeurAmethyst + +A neural network library written in [Rust](https://www.rust-lang.org/) and for Rust, that focuses on flexibility and ease of use. + +```rust +use neuramethyst::prelude::*; +use neuramethyst::derivable::loss::CrossEntropy; + +// Create the network +let network = neura_sequential![ + neura_layer!("dense", 100), + neura_layer!("dropout", 0.5), + neura_layer!("dense", 40), + neura_layer!("dropout", 0.5), + neura_layer!("dense", 10), + neura_layer!("softmax"), +]; + +// Assemble the network together, allowing layers to infer the shape of the input data +let mut network = network.construct(NeuraShape::Vector(100)).unwrap(); + +// Train the network +let trainer = NeuraBatchedTrainer::new() + .learning_rate(0.03) + .batch_size(128) + .epochs(20, 50000); // number of epochs and size of the training set + +trainer.train( + &NeuraBackprop::new(CrossEntropy), + &mut network, + input_data(), + test_data(), +); +``` diff --git a/examples/bivariate-forward.rs b/examples/bivariate-forward.rs index 2bf9cdb..acfaaa7 100644 --- a/examples/bivariate-forward.rs +++ b/examples/bivariate-forward.rs @@ -45,7 +45,7 @@ fn main() { if std::env::args().any(|arg| arg == "draw") { for epoch in 0..200 { - let mut trainer = NeuraBatchedTrainer::new(0.03, 10); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0); trainer.batch_size = 10; trainer.train( @@ -84,7 +84,9 @@ fn main() { std::thread::sleep(std::time::Duration::new(0, 50_000_000)); } } else { - let mut trainer = NeuraBatchedTrainer::new(0.03, 20 * 50); + let mut trainer = NeuraBatchedTrainer::new() + .learning_rate(0.03) + .iterations(20 * 50); trainer.batch_size = 10; trainer.log_iterations = 20; diff --git a/examples/bivariate.rs b/examples/bivariate.rs index 89fea50..d1a2771 100644 --- a/examples/bivariate.rs +++ b/examples/bivariate.rs @@ -42,7 +42,7 @@ fn main() { if std::env::args().any(|arg| arg == "draw") { for epoch in 0..200 { - let mut trainer = NeuraBatchedTrainer::new(0.03, 10); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0); trainer.batch_size = 10; trainer.train( @@ -72,7 +72,9 @@ fn main() { std::thread::sleep(std::time::Duration::new(0, 50_000_000)); } } else { - let mut trainer = NeuraBatchedTrainer::new(0.03, 20 * 50); + let mut trainer = NeuraBatchedTrainer::new() + .learning_rate(0.03) + .iterations(20 * 50); trainer.batch_size = 10; trainer.log_iterations = 20; diff --git a/examples/densenet-fwdfwd.rs b/examples/densenet-fwdfwd.rs index b7e1c1e..6dc5c1c 100644 --- a/examples/densenet-fwdfwd.rs +++ b/examples/densenet-fwdfwd.rs @@ -42,7 +42,7 @@ fn main() { if std::env::args().any(|arg| arg == "draw") { for epoch in 0..200 { - let mut trainer = NeuraBatchedTrainer::new(0.03, 10); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0); trainer.batch_size = 50; trainer.train( @@ -81,7 +81,9 @@ fn main() { std::thread::sleep(std::time::Duration::new(0, 50_000_000)); } } else { - let mut trainer = NeuraBatchedTrainer::new(0.03, 20 * 50); + let mut trainer = NeuraBatchedTrainer::new() + .learning_rate(0.03) + .iterations(20 * 50); trainer.batch_size = 50; trainer.log_iterations = 20; diff --git a/examples/densenet.rs b/examples/densenet.rs index 85018dc..937c2eb 100644 --- a/examples/densenet.rs +++ b/examples/densenet.rs @@ -41,7 +41,7 @@ fn main() { if std::env::args().any(|arg| arg == "draw") { for epoch in 0..200 { - let mut trainer = NeuraBatchedTrainer::new(0.03, 10); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0); trainer.batch_size = 10; trainer.train( diff --git a/examples/forward-progressive.rs b/examples/forward-progressive.rs index e9ce9c1..c925ba3 100644 --- a/examples/forward-progressive.rs +++ b/examples/forward-progressive.rs @@ -39,7 +39,7 @@ pub fn main() { let test_inputs = generator().filter(|x| x.1).take(50).collect::>(); let gradient_solver = NeuraForwardForward::new(Tanh, 0.5); - let mut trainer = NeuraBatchedTrainer::new(0.01, 200); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.01).iterations(0); trainer.batch_size = 256; for _epoch in 0..EPOCHS { diff --git a/examples/generate-tests.rs b/examples/generate-tests.rs index c35e334..905ecd1 100644 --- a/examples/generate-tests.rs +++ b/examples/generate-tests.rs @@ -23,7 +23,7 @@ fn main() { (dvector![1.0, 1.0], dvector![0.0]), ]; - let mut trainer = NeuraBatchedTrainer::new(0.05, 1); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.05).iterations(1); trainer.batch_size = 1; let mut parameters = vec![( diff --git a/examples/xor.rs b/examples/xor.rs index 8d4d7c3..cdb2cde 100644 --- a/examples/xor.rs +++ b/examples/xor.rs @@ -32,7 +32,7 @@ fn main() { ); } - let mut trainer = NeuraBatchedTrainer::new(0.05, 1000); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.05).iterations(0); trainer.batch_size = 6; trainer.log_iterations = 250; trainer.learning_momentum = 0.01; diff --git a/src/axis.rs b/src/axis.rs index a264a40..02f997c 100644 --- a/src/axis.rs +++ b/src/axis.rs @@ -33,7 +33,7 @@ impl NeuraAxisBase for NeuraAxisDefault { if inputs.len() != 1 { Err(NeuraAxisErr::InvalidAmount(inputs.len(), 1, Some(1))) } else { - Ok(*inputs[0].borrow()) + Ok(inputs[0]) } } } diff --git a/src/gradient_solver/forward_forward.rs b/src/gradient_solver/forward_forward.rs index a4d5be2..0b15774 100644 --- a/src/gradient_solver/forward_forward.rs +++ b/src/gradient_solver/forward_forward.rs @@ -202,7 +202,9 @@ mod test { let solver = NeuraForwardForward::new(Tanh, 0.25); - let trainer = NeuraBatchedTrainer::new(0.01, 20); + let trainer = NeuraBatchedTrainer::new() + .learning_rate(0.01) + .iterations(20); let inputs = (0..1).cycle().map(|_| { let mut rng = rand::thread_rng(); diff --git a/src/train.rs b/src/train.rs index b64b4d2..af10da8 100644 --- a/src/train.rs +++ b/src/train.rs @@ -44,14 +44,39 @@ impl Default for NeuraBatchedTrainer { } impl NeuraBatchedTrainer { - pub fn new(learning_rate: f64, iterations: usize) -> Self { - Self { - learning_rate, - iterations, - ..Default::default() + pub fn new() -> Self { + Self::default() + } + + pub fn batch_size(mut self, batch_size: usize) -> Self { + self.batch_size = batch_size; + self + } + + pub fn learning_rate(mut self, learning_rate: f64) -> Self { + self.learning_rate = learning_rate; + self + } + + pub fn iterations(mut self, iterations: usize) -> Self { + self.iterations = iterations; + self + } + + pub fn log_iterations(mut self, log_iterations: usize) -> Self { + self.log_iterations = log_iterations; + self + } + + pub fn epochs(mut self, epochs: usize, training_size: usize) -> Self { + if self.log_iterations == 0 { + self.log_iterations = (training_size / self.batch_size).max(1); + self.iterations = (training_size * epochs / self.batch_size).max(1); } + self } + #[deprecated] pub fn with_epochs( learning_rate: f64, epochs: usize, diff --git a/tests/xor.rs b/tests/xor.rs index a19f4e1..fef4a73 100644 --- a/tests/xor.rs +++ b/tests/xor.rs @@ -36,7 +36,7 @@ fn test_xor_training() { (dvector![1.0, 1.0], dvector![0.0]), ]; - let mut trainer = NeuraBatchedTrainer::new(0.05, 1); + let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.05).iterations(1); trainer.batch_size = 1; for iteration in 0..4 {