🎉 First commit

main
Shad Amethyst 2 years ago
commit 7759a6615d

2
.gitignore vendored

@ -0,0 +1,2 @@
/target
/Cargo.lock

@ -0,0 +1,10 @@
[package]
name = "neuramethyst"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ndarray = "^0.15"
rand = "^0.8"

@ -0,0 +1,45 @@
pub trait Activation {
fn eval(&self, input: f64) -> f64;
fn eval_f32(&self, input: f32) -> f32 {
self.eval(input as f64) as f32
}
fn derivate(&self, at: f64) -> f64;
fn derivate_f32(&self, at: f32) -> f32 {
self.derivate(at as f64) as f32
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Relu;
impl Activation for Relu {
#[inline(always)]
fn eval(&self, input: f64) -> f64 {
input.max(0.0)
}
#[inline(always)]
fn eval_f32(&self, input: f32) -> f32 {
input.max(0.0)
}
#[inline(always)]
fn derivate(&self, input: f64) -> f64 {
if input > 0.0 {
1.0
} else {
0.0
}
}
#[inline(always)]
fn derivate_f32(&self, input: f32) -> f32 {
if input > 0.0 {
1.0
} else {
0.0
}
}
}

@ -0,0 +1,9 @@
mod dense;
pub use dense::NeuraDenseLayer;
pub trait NeuraLayer {
type Input;
type Output;
fn eval(&self, input: &Self::Input) -> Self::Output;
}

@ -0,0 +1,79 @@
use super::NeuraLayer;
use crate::{activation::Activation, utils::multiply_matrix_vector};
use rand::Rng;
pub struct NeuraDenseLayer<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize> {
weights: [[f64; INPUT_LEN]; OUTPUT_LEN],
bias: [f64; OUTPUT_LEN],
activation: Act,
}
impl<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize>
NeuraDenseLayer<Act, INPUT_LEN, OUTPUT_LEN>
{
pub fn new(
weights: [[f64; INPUT_LEN]; OUTPUT_LEN],
bias: [f64; OUTPUT_LEN],
activation: Act,
) -> Self {
Self {
weights,
bias,
activation,
}
}
pub fn from_rng(rng: &mut impl Rng, activation: Act) -> Self {
let mut weights = [[0.0; INPUT_LEN]; OUTPUT_LEN];
let multiplier = std::f64::consts::SQRT_2 / (INPUT_LEN as f64).sqrt();
for i in 0..OUTPUT_LEN {
for j in 0..INPUT_LEN {
weights[i][j] = rng.gen::<f64>() * multiplier;
}
}
Self {
weights,
// Biases are zero-initialized, as this shouldn't cause any issues during training
bias: [0.0; OUTPUT_LEN],
activation,
}
}
}
impl<Act: Activation, const INPUT_LEN: usize, const OUTPUT_LEN: usize> NeuraLayer
for NeuraDenseLayer<Act, INPUT_LEN, OUTPUT_LEN>
{
type Input = [f64; INPUT_LEN];
type Output = [f64; OUTPUT_LEN];
fn eval(&self, input: &Self::Input) -> Self::Output {
let mut result = multiply_matrix_vector(&self.weights, input);
for i in 0..OUTPUT_LEN {
result[i] = self.activation.eval(result[i] + self.bias[i]);
}
result
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::activation::Relu;
#[test]
fn test_from_rng() {
let mut rng = rand::thread_rng();
let layer: NeuraDenseLayer<_, 64, 32> = NeuraDenseLayer::from_rng(&mut rng, Relu);
let mut input = [0.0; 64];
for x in 0..64 {
input[x] = rng.gen();
}
assert!(layer.eval(&input).len() == 32);
}
}

@ -0,0 +1,3 @@
pub mod activation;
pub mod layer;
mod utils;

@ -0,0 +1,16 @@
pub fn multiply_matrix_vector<const WIDTH: usize, const HEIGHT: usize>(
matrix: &[[f64; WIDTH]; HEIGHT],
vector: &[f64; WIDTH],
) -> [f64; HEIGHT] {
let mut result = [0.0; HEIGHT];
for i in 0..HEIGHT {
let mut sum = 0.0;
for k in 0..WIDTH {
sum += matrix[i][k] * vector[k];
}
result[i] = sum;
}
result
}
Loading…
Cancel
Save