+
Skip to content

Adding nn::MinPoolGlobal, nn::MaxPoolGlobal, nn::AvgPoolGlobal #216

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/nn/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ mod layer_norm;
mod linear;
mod module;
mod npz;
mod pool_global_avg;
mod pool_global_max;
mod pool_global_min;
mod repeated;
mod residual;
mod split_into;
Expand All @@ -95,6 +98,9 @@ pub use layer_norm::*;
pub use linear::*;
pub use module::*;
pub use npz::*;
pub use pool_global_avg::*;
pub use pool_global_max::*;
pub use pool_global_min::*;
pub use repeated::*;
pub use residual::*;
pub use split_into::*;
Expand Down
55 changes: 55 additions & 0 deletions src/nn/pool_global_avg.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
use super::{LoadFromNpz, Module, ResetParams, SaveToNpz};
use crate::gradients::*;
use crate::tensor::*;

/// Applies average pooling over an entire image, fully reducing the height and width
/// dimensions:
/// - Reduces 2d (C, L) to 1d (C, )
/// - Reduces 3d (C, H, W) to 1d (C, )
/// - Reduces 4d (B, C, H, W) to 2d (B, C)
///
/// **Pytorch equivalent**: `torch.nn.AdaptiveAvgPool2d(1)` followed by a flatten.
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// let m: AvgPoolGlobal = Default::default();
/// let _: Tensor1D<5> = m.forward(Tensor3D::<5, 16, 8>::zeros());
/// let _: Tensor2D<10, 5> = m.forward(Tensor4D::<10, 5, 16, 8>::zeros());
/// ```
#[derive(Clone, Copy, Default)]
pub struct AvgPoolGlobal;

impl ResetParams for AvgPoolGlobal {
fn reset_params<R: rand::Rng>(&mut self, _: &mut R) {}
}
impl CanUpdateWithGradients for AvgPoolGlobal {
fn update<G: GradientProvider>(&mut self, _: &mut G, _: &mut UnusedTensors) {}
}
impl SaveToNpz for AvgPoolGlobal {}
impl LoadFromNpz for AvgPoolGlobal {}

impl<const C: usize, const L: usize, T: Tape> Module<Tensor2D<C, L, T>> for AvgPoolGlobal {
type Output = Tensor1D<C, T>;
fn forward(&self, input: Tensor2D<C, L, T>) -> Self::Output {
input.mean()
}
}

impl<const C: usize, const H: usize, const W: usize, T: Tape> Module<Tensor3D<C, H, W, T>>
for AvgPoolGlobal
{
type Output = Tensor1D<C, T>;
fn forward(&self, input: Tensor3D<C, H, W, T>) -> Self::Output {
input.mean()
}
}

impl<const B: usize, const C: usize, const H: usize, const W: usize, T: Tape>
Module<Tensor4D<B, C, H, W, T>> for AvgPoolGlobal
{
type Output = Tensor2D<B, C, T>;
fn forward(&self, input: Tensor4D<B, C, H, W, T>) -> Self::Output {
input.mean()
}
}
55 changes: 55 additions & 0 deletions src/nn/pool_global_max.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
use super::{LoadFromNpz, Module, ResetParams, SaveToNpz};
use crate::gradients::*;
use crate::tensor::*;

/// Applies max pooling over an entire image, fully reducing the height and width
/// dimensions:
/// - Reduces 2d (C, L) to 1d (C, )
/// - Reduces 3d (C, H, W) to 1d (C, )
/// - Reduces 4d (B, C, H, W) to 2d (B, C)
///
/// **Pytorch equivalent**: `torch.nn.AdaptiveMaxPool2d(1)` followed by a flatten.
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// let m: MaxPoolGlobal = Default::default();
/// let _: Tensor1D<5> = m.forward(Tensor3D::<5, 16, 8>::zeros());
/// let _: Tensor2D<10, 5> = m.forward(Tensor4D::<10, 5, 16, 8>::zeros());
/// ```
#[derive(Clone, Copy, Default)]
pub struct MaxPoolGlobal;

impl ResetParams for MaxPoolGlobal {
fn reset_params<R: rand::Rng>(&mut self, _: &mut R) {}
}
impl CanUpdateWithGradients for MaxPoolGlobal {
fn update<G: GradientProvider>(&mut self, _: &mut G, _: &mut UnusedTensors) {}
}
impl SaveToNpz for MaxPoolGlobal {}
impl LoadFromNpz for MaxPoolGlobal {}

impl<const C: usize, const L: usize, T: Tape> Module<Tensor2D<C, L, T>> for MaxPoolGlobal {
type Output = Tensor1D<C, T>;
fn forward(&self, input: Tensor2D<C, L, T>) -> Self::Output {
input.max()
}
}

impl<const C: usize, const H: usize, const W: usize, T: Tape> Module<Tensor3D<C, H, W, T>>
for MaxPoolGlobal
{
type Output = Tensor1D<C, T>;
fn forward(&self, input: Tensor3D<C, H, W, T>) -> Self::Output {
input.max()
}
}

impl<const B: usize, const C: usize, const H: usize, const W: usize, T: Tape>
Module<Tensor4D<B, C, H, W, T>> for MaxPoolGlobal
{
type Output = Tensor2D<B, C, T>;
fn forward(&self, input: Tensor4D<B, C, H, W, T>) -> Self::Output {
input.max()
}
}
55 changes: 55 additions & 0 deletions src/nn/pool_global_min.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
use super::{LoadFromNpz, Module, ResetParams, SaveToNpz};
use crate::gradients::*;
use crate::tensor::*;

/// Applies min pooling over an entire image, fully reducing the height and width
/// dimensions:
/// - Reduces 2d (C, L) to 1d (C, )
/// - Reduces 3d (C, H, W) to 1d (C, )
/// - Reduces 4d (B, C, H, W) to 2d (B, C)
///
/// **Pytorch equivalent**: `torch.nn.AdaptiveMinPool2d(1)` followed by a flatten.
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// let m: MinPoolGlobal = Default::default();
/// let _: Tensor1D<5> = m.forward(Tensor3D::<5, 16, 8>::zeros());
/// let _: Tensor2D<10, 5> = m.forward(Tensor4D::<10, 5, 16, 8>::zeros());
/// ```
#[derive(Clone, Copy, Default)]
pub struct MinPoolGlobal;

impl ResetParams for MinPoolGlobal {
fn reset_params<R: rand::Rng>(&mut self, _: &mut R) {}
}
impl CanUpdateWithGradients for MinPoolGlobal {
fn update<G: GradientProvider>(&mut self, _: &mut G, _: &mut UnusedTensors) {}
}
impl SaveToNpz for MinPoolGlobal {}
impl LoadFromNpz for MinPoolGlobal {}

impl<const C: usize, const L: usize, T: Tape> Module<Tensor2D<C, L, T>> for MinPoolGlobal {
type Output = Tensor1D<C, T>;
fn forward(&self, input: Tensor2D<C, L, T>) -> Self::Output {
input.min()
}
}

impl<const C: usize, const H: usize, const W: usize, T: Tape> Module<Tensor3D<C, H, W, T>>
for MinPoolGlobal
{
type Output = Tensor1D<C, T>;
fn forward(&self, input: Tensor3D<C, H, W, T>) -> Self::Output {
input.min()
}
}

impl<const B: usize, const C: usize, const H: usize, const W: usize, T: Tape>
Module<Tensor4D<B, C, H, W, T>> for MinPoolGlobal
{
type Output = Tensor2D<B, C, T>;
fn forward(&self, input: Tensor4D<B, C, H, W, T>) -> Self::Output {
input.min()
}
}
点击 这是indexloc提供的php浏览器服务,不要输入任何密码和下载