parent
bca56a5557
commit
b4a97694a6
@ -1,135 +1,108 @@
|
|||||||
use super::NeuraDerivable;
|
#![allow(unused_variables)]
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
|
||||||
pub struct Relu;
|
|
||||||
|
|
||||||
impl NeuraDerivable<f64> for Relu {
|
use super::NeuraDerivable;
|
||||||
#[inline(always)]
|
|
||||||
fn eval(&self, input: f64) -> f64 {
|
|
||||||
input.max(0.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
macro_rules! impl_derivable {
|
||||||
fn derivate(&self, input: f64) -> f64 {
|
( $type_f32:ty, $type_f64:ty, $self:ident, $variable:ident, $eval:expr, $derivate:expr $(; $variance_hint:expr, $bias_hint:expr )? ) => {
|
||||||
if input > 0.0 {
|
impl NeuraDerivable<f32> for $type_f32 {
|
||||||
1.0
|
#[inline(always)]
|
||||||
} else {
|
fn eval($self: &Self, $variable: f32) -> f32 {
|
||||||
0.0
|
$eval
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn derivate($self: &Self, $variable: f32) -> f32 {
|
||||||
|
$derivate
|
||||||
|
}
|
||||||
|
|
||||||
|
$(
|
||||||
|
#[inline(always)]
|
||||||
|
fn variance_hint($self: &Self) -> f64 {
|
||||||
|
$variance_hint
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn bias_hint($self: &Self) -> f64 {
|
||||||
|
$bias_hint
|
||||||
|
}
|
||||||
|
)?
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NeuraDerivable<f32> for Relu {
|
impl NeuraDerivable<f64> for $type_f64 {
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn eval(&self, input: f32) -> f32 {
|
fn eval($self: &Self, $variable: f64) -> f64 {
|
||||||
input.max(0.0)
|
$eval
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn derivate(&self, input: f32) -> f32 {
|
fn derivate($self: &Self, $variable: f64) -> f64 {
|
||||||
if input > 0.0 {
|
$derivate
|
||||||
1.0
|
}
|
||||||
} else {
|
|
||||||
0.0
|
$(
|
||||||
|
#[inline(always)]
|
||||||
|
fn variance_hint($self: &Self) -> f64 {
|
||||||
|
$variance_hint
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn bias_hint($self: &Self) -> f64 {
|
||||||
|
$bias_hint
|
||||||
|
}
|
||||||
|
)?
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
|
( $type:ty, $variable:ident, $eval:expr, $derivate:expr $(; $variance_hint:expr, $bias_hint:expr )? ) => {
|
||||||
|
impl_derivable!($type, $type, self, $variable, $eval, $derivate $(; $variance_hint, $bias_hint)?);
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||||
pub struct LeakyRelu<F>(pub F);
|
pub struct Relu;
|
||||||
|
|
||||||
impl NeuraDerivable<f64> for LeakyRelu<f64> {
|
impl_derivable!(Relu, x, x.max(0.0), {
|
||||||
#[inline(always)]
|
if x > 0.0 {
|
||||||
fn eval(&self, input: f64) -> f64 {
|
1.0
|
||||||
if input > 0.0 {
|
} else {
|
||||||
input
|
0.0
|
||||||
} else {
|
|
||||||
self.0 * input
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}; 2.0, 0.1);
|
||||||
|
|
||||||
#[inline(always)]
|
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||||
fn derivate(&self, input: f64) -> f64 {
|
pub struct LeakyRelu<F>(pub F);
|
||||||
if input > 0.0 {
|
|
||||||
1.0
|
|
||||||
} else {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NeuraDerivable<f32> for LeakyRelu<f32> {
|
impl_derivable!(
|
||||||
#[inline(always)]
|
LeakyRelu<f32>,
|
||||||
fn eval(&self, input: f32) -> f32 {
|
LeakyRelu<f64>,
|
||||||
if input > 0.0 {
|
self,
|
||||||
input
|
x,
|
||||||
|
{
|
||||||
|
if x > 0.0 {
|
||||||
|
x
|
||||||
} else {
|
} else {
|
||||||
self.0 * input
|
self.0 * x
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
{
|
||||||
#[inline(always)]
|
if x > 0.0 {
|
||||||
fn derivate(&self, input: f32) -> f32 {
|
|
||||||
if input > 0.0 {
|
|
||||||
1.0
|
1.0
|
||||||
} else {
|
} else {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
}
|
2.0, 0.1
|
||||||
|
);
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||||
pub struct Tanh;
|
pub struct Tanh;
|
||||||
|
|
||||||
impl NeuraDerivable<f64> for Tanh {
|
impl_derivable!(Tanh, x, x.tanh(), {
|
||||||
#[inline(always)]
|
let y = x.tanh();
|
||||||
fn eval(&self, input: f64) -> f64 {
|
1.0 - y * y
|
||||||
0.5 * input.tanh() + 0.5
|
});
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn derivate(&self, at: f64) -> f64 {
|
|
||||||
let tanh = at.tanh();
|
|
||||||
0.5 * (1.0 - tanh * tanh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NeuraDerivable<f32> for Tanh {
|
|
||||||
#[inline(always)]
|
|
||||||
fn eval(&self, input: f32) -> f32 {
|
|
||||||
0.5 * input.tanh() + 0.5
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn derivate(&self, at: f32) -> f32 {
|
|
||||||
let tanh = at.tanh();
|
|
||||||
0.5 * (1.0 - tanh * tanh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||||
pub struct Linear;
|
pub struct Linear;
|
||||||
|
|
||||||
impl NeuraDerivable<f64> for Linear {
|
impl_derivable!(Linear, x, x, 1.0);
|
||||||
#[inline(always)]
|
|
||||||
fn eval(&self, input: f64) -> f64 {
|
|
||||||
input
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn derivate(&self, _at: f64) -> f64 {
|
|
||||||
1.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NeuraDerivable<f32> for Linear {
|
|
||||||
#[inline(always)]
|
|
||||||
fn eval(&self, input: f32) -> f32 {
|
|
||||||
input
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn derivate(&self, _at: f32) -> f32 {
|
|
||||||
1.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -0,0 +1,61 @@
|
|||||||
|
use crate::train::NeuraTrainableLayer;
|
||||||
|
|
||||||
|
use super::NeuraLayer;
|
||||||
|
|
||||||
|
/// A special layer that allows you to split a vector into one-hot vectors
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub struct NeuraOneHotLayer<const CATS: usize, const LENGTH: usize>;
|
||||||
|
|
||||||
|
impl<const CATS: usize, const LENGTH: usize> NeuraLayer for NeuraOneHotLayer<CATS, LENGTH>
|
||||||
|
where
|
||||||
|
[(); LENGTH * CATS]: Sized,
|
||||||
|
{
|
||||||
|
type Input = [f64; LENGTH];
|
||||||
|
type Output = [f64; LENGTH * CATS];
|
||||||
|
|
||||||
|
fn eval(&self, input: &Self::Input) -> Self::Output {
|
||||||
|
let mut res = [0.0; LENGTH * CATS];
|
||||||
|
|
||||||
|
for i in 0..LENGTH {
|
||||||
|
let cat_low = input[i].floor().max(0.0).min(CATS as f64 - 2.0);
|
||||||
|
let amount = (input[i] - cat_low).max(0.0).min(1.0);
|
||||||
|
let cat_low = cat_low as usize;
|
||||||
|
res[i * LENGTH + cat_low] = 1.0 - amount;
|
||||||
|
res[i * LENGTH + cat_low + 1] = amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const CATS: usize, const LENGTH: usize> NeuraTrainableLayer for NeuraOneHotLayer<CATS, LENGTH>
|
||||||
|
where
|
||||||
|
[(); LENGTH * CATS]: Sized,
|
||||||
|
{
|
||||||
|
type Delta = ();
|
||||||
|
|
||||||
|
fn backpropagate(
|
||||||
|
&self,
|
||||||
|
input: &Self::Input,
|
||||||
|
epsilon: Self::Output,
|
||||||
|
) -> (Self::Input, Self::Delta) {
|
||||||
|
let mut res = [0.0; LENGTH];
|
||||||
|
|
||||||
|
for i in 0..LENGTH {
|
||||||
|
let cat_low = input[i].floor().max(0.0).min(CATS as f64 - 2.0) as usize;
|
||||||
|
let epsilon = -epsilon[i * LENGTH + cat_low] + epsilon[i * LENGTH + cat_low + 1];
|
||||||
|
// Scale epsilon by how many entries were ignored
|
||||||
|
res[i] = epsilon * CATS as f64 / 2.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
(res, ())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn regularize(&self) -> Self::Delta {
|
||||||
|
()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_gradient(&mut self, _gradient: &Self::Delta) {
|
||||||
|
// Noop
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in new issue