Add NeuraNetwork

main
Shad Amethyst 2 years ago
parent 7759a6615d
commit 5a20acf595

@ -0,0 +1,32 @@
/// An extension of `std::ops::AddAssign`
pub trait NeuraAddAssign {
fn add_assign(&mut self, other: &Self);
}
impl<Left: NeuraAddAssign, Right: NeuraAddAssign> NeuraAddAssign for (Left, Right) {
fn add_assign(&mut self, other: &Self) {
NeuraAddAssign::add_assign(&mut self.0, &other.0);
NeuraAddAssign::add_assign(&mut self.1, &other.1);
}
}
impl<const N: usize, T: NeuraAddAssign> NeuraAddAssign for [T; N] {
fn add_assign(&mut self, other: &[T; N]) {
for i in 0..N {
NeuraAddAssign::add_assign(&mut self[i], &other[i]);
}
}
}
macro_rules! base {
( $type:ty ) => {
impl NeuraAddAssign for $type {
fn add_assign(&mut self, other: &Self) {
std::ops::AddAssign::add_assign(self, other);
}
}
}
}
base!(f32);
base!(f64);

@ -1,27 +1,11 @@
pub trait Activation {
fn eval(&self, input: f64) -> f64;
fn eval_f32(&self, input: f32) -> f32 {
self.eval(input as f64) as f32
}
fn derivate(&self, at: f64) -> f64;
fn derivate_f32(&self, at: f32) -> f32 {
self.derivate(at as f64) as f32
}
}
use super::NeuraDerivable;
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Relu;
impl Activation for Relu {
#[inline(always)]
fn eval(&self, input: f64) -> f64 {
input.max(0.0)
}
impl NeuraDerivable<f64> for Relu {
#[inline(always)]
fn eval_f32(&self, input: f32) -> f32 {
fn eval(&self, input: f64) -> f64 {
input.max(0.0)
}
@ -33,9 +17,16 @@ impl Activation for Relu {
0.0
}
}
}
impl NeuraDerivable<f32> for Relu {
#[inline(always)]
fn eval(&self, input: f32) -> f32 {
input.max(0.0)
}
#[inline(always)]
fn derivate_f32(&self, input: f32) -> f32 {
fn derivate(&self, input: f32) -> f32 {
if input > 0.0 {
1.0
} else {

@ -0,0 +1,22 @@
use super::NeuraLoss;
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Euclidean;
impl<const N: usize> NeuraLoss<[f64; N]> for Euclidean {
type Out = f64;
type Target = [f64; N];
fn eval(&self, target: [f64; N], actual: [f64; N]) -> f64 {
let mut sum_squared = 0.0;
for i in 0..N {
sum_squared += (target[i] - actual[i]) * (target[i] - actual[i]);
}
sum_squared * 0.5
}
fn nabla(&self, target: [f64; N], actual: [f64; N]) -> [f64; N] {
todo!()
}
}

@ -0,0 +1,20 @@
pub mod activation;
pub mod loss;
pub trait NeuraDerivable<F> {
fn eval(&self, input: F) -> F;
/// Should return the derivative of `self.eval(input)`
fn derivate(&self, at: F) -> F;
}
pub trait NeuraLoss<F> {
type Out;
type Target;
fn eval(&self, target: Self::Target, actual: F) -> Self::Out;
/// Should return the gradient of the loss function according to `actual`
/// ($\nabla_{\texttt{actual}} \texttt{self.eval}(\texttt{target}, \texttt{actual})$).
fn nabla(&self, target: Self::Target, actual: F) -> F;
}

@ -1,9 +0,0 @@
mod dense;
pub use dense::NeuraDenseLayer;
pub trait NeuraLayer {
type Input;
type Output;
fn eval(&self, input: &Self::Input) -> Self::Output;
}

@ -1,14 +1,18 @@
use super::NeuraLayer;
use crate::{activation::Activation, utils::multiply_matrix_vector};
use crate::{derivable::NeuraDerivable, utils::multiply_matrix_vector};
use rand::Rng;
pub struct NeuraDenseLayer<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize> {
pub struct NeuraDenseLayer<
Act: NeuraDerivable<f64>,
const INPUT_LEN: usize,
const OUTPUT_LEN: usize,
> {
weights: [[f64; INPUT_LEN]; OUTPUT_LEN],
bias: [f64; OUTPUT_LEN],
activation: Act,
}
impl<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize>
impl<Act: NeuraDerivable<f64>, const INPUT_LEN: usize, const OUTPUT_LEN: usize>
NeuraDenseLayer<Act, INPUT_LEN, OUTPUT_LEN>
{
pub fn new(
@ -43,7 +47,7 @@ impl<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize>
}
}
impl<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize> NeuraLayer
impl<Act: NeuraDerivable<f64>, const INPUT_LEN: usize, const OUTPUT_LEN: usize> NeuraLayer
for NeuraDenseLayer<Act, INPUT_LEN, OUTPUT_LEN>
{
type Input = [f64; INPUT_LEN];
@ -64,7 +68,7 @@ impl<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize> NeuraLaye
#[cfg(test)]
mod test {
use super::*;
use crate::activation::Relu;
use crate::derivable::activation::Relu;
#[test]
fn test_from_rng() {

@ -0,0 +1,22 @@
mod dense;
pub use dense::NeuraDenseLayer;
pub trait NeuraLayer {
type Input;
type Output;
fn eval(&self, input: &Self::Input) -> Self::Output;
}
#[macro_export]
macro_rules! neura_layer {
( "dense", $activation:expr, $output:expr ) => {
NeuraDenseLayer::from_rng(&mut rand::thread_rng(), $activation)
as NeuraDenseLayer<_, _, $output>
};
( "dense", $activation:expr, $output:expr, $input:expr ) => {
NeuraDenseLayer::from_rng(&mut rand::thread_rng(), $activation)
as NeuraDenseLayer<_, $input, $output>
};
}

@ -1,3 +1,8 @@
pub mod activation;
#![feature(generic_arg_infer)]
pub mod derivable;
pub mod layer;
pub mod network;
pub mod algebra;
mod utils;

@ -0,0 +1,103 @@
use crate::{layer::NeuraLayer, train::NeuraTrainable};
pub struct NeuraNetwork<Layer: NeuraLayer, ChildNetwork> {
layer: Layer,
child_network: ChildNetwork,
}
impl<Layer: NeuraLayer, ChildNetwork> NeuraNetwork<Layer, ChildNetwork> {
pub fn new(layer: Layer, child_network: ChildNetwork) -> Self {
Self {
layer,
child_network,
}
}
pub fn new_match_output(layer: Layer, child_network: ChildNetwork) -> Self
where
ChildNetwork: NeuraLayer<Input = Layer::Output>,
{
Self::new(layer, child_network)
}
pub fn child_network(&self) -> &ChildNetwork {
&self.child_network
}
}
impl<Layer: NeuraLayer> From<Layer> for NeuraNetwork<Layer, ()> {
fn from(layer: Layer) -> Self {
Self {
layer,
child_network: (),
}
}
}
impl<Layer: NeuraLayer> NeuraLayer for NeuraNetwork<Layer, ()> {
type Input = Layer::Input;
type Output = Layer::Output;
fn eval(&self, input: &Self::Input) -> Self::Output {
self.layer.eval(input)
}
}
impl<Layer: NeuraLayer, ChildNetwork: NeuraLayer<Input = Layer::Output>> NeuraLayer
for NeuraNetwork<Layer, ChildNetwork>
{
type Input = Layer::Input;
type Output = ChildNetwork::Output;
fn eval(&self, input: &Self::Input) -> Self::Output {
self.child_network.eval(&self.layer.eval(input))
}
}
#[macro_export]
macro_rules! neura_network {
[] => {
()
};
[ $layer:expr $(,)? ] => {
NeuraNetwork::from($layer)
};
[ $first:expr, $($rest:expr),+ $(,)? ] => {
NeuraNetwork::new_match_output($first, neura_network![$($rest),+])
};
}
#[cfg(test)]
mod test {
use crate::{derivable::activation::Relu, layer::NeuraDenseLayer, neura_layer};
use super::*;
#[test]
fn test_neura_network_macro() {
let mut rng = rand::thread_rng();
let _ = neura_network![
NeuraDenseLayer::from_rng(&mut rng, Relu) as NeuraDenseLayer<_, 8, 16>,
NeuraDenseLayer::from_rng(&mut rng, Relu) as NeuraDenseLayer<_, _, 12>,
NeuraDenseLayer::from_rng(&mut rng, Relu) as NeuraDenseLayer<_, _, 2>
];
let _ =
neura_network![NeuraDenseLayer::from_rng(&mut rng, Relu) as NeuraDenseLayer<_, 8, 16>,];
let _ = neura_network![
NeuraDenseLayer::from_rng(&mut rng, Relu) as NeuraDenseLayer<_, 8, 16>,
NeuraDenseLayer::from_rng(&mut rng, Relu) as NeuraDenseLayer<_, _, 12>,
];
let _ = neura_network![
neura_layer!("dense", Relu, 16, 8),
neura_layer!("dense", Relu, 12),
neura_layer!("dense", Relu, 2)
];
}
}

@ -1,4 +1,4 @@
pub fn multiply_matrix_vector<const WIDTH: usize, const HEIGHT: usize>(
pub(crate) fn multiply_matrix_vector<const WIDTH: usize, const HEIGHT: usize>(
matrix: &[[f64; WIDTH]; HEIGHT],
vector: &[f64; WIDTH],
) -> [f64; HEIGHT] {
@ -14,3 +14,43 @@ pub fn multiply_matrix_vector<const WIDTH: usize, const HEIGHT: usize>(
result
}
pub(crate) fn assign_add_vector<const N: usize>(sum: &mut [f64; N], operand: &[f64; N]) {
for i in 0..N {
sum[i] += operand[i];
}
}
pub(crate) fn chunked<I: Iterator>(
iter: I,
chunk_size: usize,
) -> impl Iterator<Item = Vec<I::Item>> {
struct Chunked<J: Iterator> {
iter: J,
chunk_size: usize,
}
impl<J: Iterator> Iterator for Chunked<J> {
type Item = Vec<J::Item>;
fn next(&mut self) -> Option<Self::Item> {
let mut result = Vec::with_capacity(self.chunk_size);
for _ in 0..self.chunk_size {
if let Some(item) = self.iter.next() {
result.push(item);
} else {
break;
}
}
if result.len() > 0 {
Some(result)
} else {
None
}
}
}
Chunked { iter, chunk_size }
}

Loading…
Cancel
Save