🎨 Introduce builder pattern for NeuraBatchedTrainer

main
Shad Amethyst 2 years ago
parent 93fa7e238a
commit 6fbfd4e38c

@ -0,0 +1,34 @@
# NeurAmethyst
A neural network library written in [Rust](https://www.rust-lang.org/) and for Rust, that focuses on flexibility and ease of use.
```rust
use neuramethyst::prelude::*;
use neuramethyst::derivable::loss::CrossEntropy;
// Create the network
let network = neura_sequential![
neura_layer!("dense", 100),
neura_layer!("dropout", 0.5),
neura_layer!("dense", 40),
neura_layer!("dropout", 0.5),
neura_layer!("dense", 10),
neura_layer!("softmax"),
];
// Assemble the network together, allowing layers to infer the shape of the input data
let mut network = network.construct(NeuraShape::Vector(100)).unwrap();
// Train the network
let trainer = NeuraBatchedTrainer::new()
.learning_rate(0.03)
.batch_size(128)
.epochs(20, 50000); // number of epochs and size of the training set
trainer.train(
&NeuraBackprop::new(CrossEntropy),
&mut network,
input_data(),
test_data(),
);
```

@ -45,7 +45,7 @@ fn main() {
if std::env::args().any(|arg| arg == "draw") { if std::env::args().any(|arg| arg == "draw") {
for epoch in 0..200 { for epoch in 0..200 {
let mut trainer = NeuraBatchedTrainer::new(0.03, 10); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0);
trainer.batch_size = 10; trainer.batch_size = 10;
trainer.train( trainer.train(
@ -84,7 +84,9 @@ fn main() {
std::thread::sleep(std::time::Duration::new(0, 50_000_000)); std::thread::sleep(std::time::Duration::new(0, 50_000_000));
} }
} else { } else {
let mut trainer = NeuraBatchedTrainer::new(0.03, 20 * 50); let mut trainer = NeuraBatchedTrainer::new()
.learning_rate(0.03)
.iterations(20 * 50);
trainer.batch_size = 10; trainer.batch_size = 10;
trainer.log_iterations = 20; trainer.log_iterations = 20;

@ -42,7 +42,7 @@ fn main() {
if std::env::args().any(|arg| arg == "draw") { if std::env::args().any(|arg| arg == "draw") {
for epoch in 0..200 { for epoch in 0..200 {
let mut trainer = NeuraBatchedTrainer::new(0.03, 10); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0);
trainer.batch_size = 10; trainer.batch_size = 10;
trainer.train( trainer.train(
@ -72,7 +72,9 @@ fn main() {
std::thread::sleep(std::time::Duration::new(0, 50_000_000)); std::thread::sleep(std::time::Duration::new(0, 50_000_000));
} }
} else { } else {
let mut trainer = NeuraBatchedTrainer::new(0.03, 20 * 50); let mut trainer = NeuraBatchedTrainer::new()
.learning_rate(0.03)
.iterations(20 * 50);
trainer.batch_size = 10; trainer.batch_size = 10;
trainer.log_iterations = 20; trainer.log_iterations = 20;

@ -42,7 +42,7 @@ fn main() {
if std::env::args().any(|arg| arg == "draw") { if std::env::args().any(|arg| arg == "draw") {
for epoch in 0..200 { for epoch in 0..200 {
let mut trainer = NeuraBatchedTrainer::new(0.03, 10); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0);
trainer.batch_size = 50; trainer.batch_size = 50;
trainer.train( trainer.train(
@ -81,7 +81,9 @@ fn main() {
std::thread::sleep(std::time::Duration::new(0, 50_000_000)); std::thread::sleep(std::time::Duration::new(0, 50_000_000));
} }
} else { } else {
let mut trainer = NeuraBatchedTrainer::new(0.03, 20 * 50); let mut trainer = NeuraBatchedTrainer::new()
.learning_rate(0.03)
.iterations(20 * 50);
trainer.batch_size = 50; trainer.batch_size = 50;
trainer.log_iterations = 20; trainer.log_iterations = 20;

@ -41,7 +41,7 @@ fn main() {
if std::env::args().any(|arg| arg == "draw") { if std::env::args().any(|arg| arg == "draw") {
for epoch in 0..200 { for epoch in 0..200 {
let mut trainer = NeuraBatchedTrainer::new(0.03, 10); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.03).iterations(0);
trainer.batch_size = 10; trainer.batch_size = 10;
trainer.train( trainer.train(

@ -39,7 +39,7 @@ pub fn main() {
let test_inputs = generator().filter(|x| x.1).take(50).collect::<Vec<_>>(); let test_inputs = generator().filter(|x| x.1).take(50).collect::<Vec<_>>();
let gradient_solver = NeuraForwardForward::new(Tanh, 0.5); let gradient_solver = NeuraForwardForward::new(Tanh, 0.5);
let mut trainer = NeuraBatchedTrainer::new(0.01, 200); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.01).iterations(0);
trainer.batch_size = 256; trainer.batch_size = 256;
for _epoch in 0..EPOCHS { for _epoch in 0..EPOCHS {

@ -23,7 +23,7 @@ fn main() {
(dvector![1.0, 1.0], dvector![0.0]), (dvector![1.0, 1.0], dvector![0.0]),
]; ];
let mut trainer = NeuraBatchedTrainer::new(0.05, 1); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.05).iterations(1);
trainer.batch_size = 1; trainer.batch_size = 1;
let mut parameters = vec![( let mut parameters = vec![(

@ -32,7 +32,7 @@ fn main() {
); );
} }
let mut trainer = NeuraBatchedTrainer::new(0.05, 1000); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.05).iterations(0);
trainer.batch_size = 6; trainer.batch_size = 6;
trainer.log_iterations = 250; trainer.log_iterations = 250;
trainer.learning_momentum = 0.01; trainer.learning_momentum = 0.01;

@ -33,7 +33,7 @@ impl NeuraAxisBase for NeuraAxisDefault {
if inputs.len() != 1 { if inputs.len() != 1 {
Err(NeuraAxisErr::InvalidAmount(inputs.len(), 1, Some(1))) Err(NeuraAxisErr::InvalidAmount(inputs.len(), 1, Some(1)))
} else { } else {
Ok(*inputs[0].borrow()) Ok(inputs[0])
} }
} }
} }

@ -202,7 +202,9 @@ mod test {
let solver = NeuraForwardForward::new(Tanh, 0.25); let solver = NeuraForwardForward::new(Tanh, 0.25);
let trainer = NeuraBatchedTrainer::new(0.01, 20); let trainer = NeuraBatchedTrainer::new()
.learning_rate(0.01)
.iterations(20);
let inputs = (0..1).cycle().map(|_| { let inputs = (0..1).cycle().map(|_| {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();

@ -44,14 +44,39 @@ impl Default for NeuraBatchedTrainer {
} }
impl NeuraBatchedTrainer { impl NeuraBatchedTrainer {
pub fn new(learning_rate: f64, iterations: usize) -> Self { pub fn new() -> Self {
Self { Self::default()
learning_rate, }
iterations,
..Default::default() pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
pub fn learning_rate(mut self, learning_rate: f64) -> Self {
self.learning_rate = learning_rate;
self
}
pub fn iterations(mut self, iterations: usize) -> Self {
self.iterations = iterations;
self
}
pub fn log_iterations(mut self, log_iterations: usize) -> Self {
self.log_iterations = log_iterations;
self
}
pub fn epochs(mut self, epochs: usize, training_size: usize) -> Self {
if self.log_iterations == 0 {
self.log_iterations = (training_size / self.batch_size).max(1);
self.iterations = (training_size * epochs / self.batch_size).max(1);
} }
self
} }
#[deprecated]
pub fn with_epochs( pub fn with_epochs(
learning_rate: f64, learning_rate: f64,
epochs: usize, epochs: usize,

@ -36,7 +36,7 @@ fn test_xor_training() {
(dvector![1.0, 1.0], dvector![0.0]), (dvector![1.0, 1.0], dvector![0.0]),
]; ];
let mut trainer = NeuraBatchedTrainer::new(0.05, 1); let mut trainer = NeuraBatchedTrainer::new().learning_rate(0.05).iterations(1);
trainer.batch_size = 1; trainer.batch_size = 1;
for iteration in 0..4 { for iteration in 0..4 {

Loading…
Cancel
Save