+
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions benches/batchnorm2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,12 @@ fn main() {
let start = Instant::now();
let out = m.forward_mut(img.traced(grads));
let loss = out.square().mean();
dev.synchronize();
let fwd_dur = start.elapsed();

let start = Instant::now();
let _ = loss.backward();
dev.synchronize();
let bwd_dur = start.elapsed();
println!("fwd={:?} bwd={:?}", fwd_dur, bwd_dur);
}
Expand Down
2 changes: 2 additions & 0 deletions benches/conv2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,12 @@ fn main() {
let start = Instant::now();
let out = m.forward_mut(img.leaky_traced());
let loss = out.square().mean();
dev.synchronize();
let fwd_dur = start.elapsed();

let start = Instant::now();
let _ = loss.backward();
dev.synchronize();
let bwd_dur = start.elapsed();
println!("fwd={:?} bwd={:?}", fwd_dur, bwd_dur);
}
Expand Down
2 changes: 2 additions & 0 deletions benches/softmax.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,12 @@ fn main() {

let start = Instant::now();
let y = img.traced(grads).softmax::<Ax>();
dev.synchronize();
let fwd_dur = start.elapsed();

let start = Instant::now();
let _ = y.sum().backward();
dev.synchronize();
let bwd_dur = start.elapsed();
println!("fwd={:?} bwd={:?}", fwd_dur, bwd_dur);
}
Expand Down
2 changes: 2 additions & 0 deletions benches/sum.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,12 @@ fn main() {

let start = Instant::now();
let y = img.traced(grads).sum();
dev.synchronize();
let fwd_dur = start.elapsed();

let start = Instant::now();
let _ = y.backward();
dev.synchronize();
let bwd_dur = start.elapsed();
println!("fwd={:?} bwd={:?}", fwd_dur, bwd_dur);
}
Expand Down
4 changes: 4 additions & 0 deletions src/tensor/cpu/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,4 +86,8 @@ impl DeviceStorage for Cpu {
}
buf
}

fn try_synchronize(&self) -> Result<(), Self::Err> {
Ok(())
}
}
18 changes: 4 additions & 14 deletions src/tensor/cuda/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,20 +82,6 @@ impl Cuda {
workspace,
})
}

/// Block until kernels finish processing. Useful for benchmarking.
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// let dev: Cuda = Default::default();
/// let a = dev.tensor([1., 2., 3.]);
/// let _b = a.square();
/// dev.synchronize().unwrap(); // blocks until square kernel finishes.
/// ```
pub fn synchronize(&self) -> Result<(), CudaError> {
self.dev.synchronize().map_err(CudaError::from)
}
}

impl Cuda {
Expand Down Expand Up @@ -149,4 +135,8 @@ impl DeviceStorage for Cuda {
}
contiguous
}

fn try_synchronize(&self) -> Result<(), CudaError> {
self.dev.synchronize().map_err(CudaError::from)
}
}
8 changes: 8 additions & 0 deletions src/tensor/storage_traits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,14 @@ pub trait DeviceStorage: 'static + std::fmt::Debug + Default + Clone + HasErr {
fn try_alloc_grad<E: Unit>(&self, storage: &Self::Vec<E>) -> Result<Self::Vec<E>, Self::Err>;

fn tensor_to_vec<S: Shape, E: Unit, T>(&self, tensor: &Tensor<S, E, Self, T>) -> Vec<E>;

/// Blocks until all work on device to complete. Useful for benchmarking.
fn synchronize(&self) {
self.try_synchronize().unwrap()
}

/// Blocks until all work on device to complete. Useful for benchmarking.
fn try_synchronize(&self) -> Result<(), Self::Err>;
}

/// Internal trait - Represents something that can allocate its own gradient.
Expand Down
点击 这是indexloc提供的php浏览器服务,不要输入任何密码和下载