We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Describe the bug Using burn::tensor::Distribution::Normal with Tensor::random produces INF on OSX.
burn::tensor::Distribution::Normal
Tensor::random
To Reproduce
#[cfg(test)] mod tests { use super::*; use core::panic; use rand::prelude::*; use rand_distr::{Normal, Uniform}; use burn::tensor::{Tensor, Shape}; pub type ElemF = f32; #[cfg(any(feature = "wgpu", feature = "metal-autotune", feature = "metal-fusion", feature = "default"))] use burn::backend::{Wgpu, wgpu::WgpuDevice}; #[cfg(feature = "cuda")] use burn::backend::cuda::{Cuda, CudaDevice}; #[cfg(any(feature = "candle-cpu", feature = "candle-cuda", feature = "candle-metal"))] use burn::backend::{Candle, candle::CandleDevice}; #[cfg(any(feature = "tch-gpu", feature = "tch-cpu"))] use burn::backend::{LibTorch, libtorch::LibTorchDevice}; #[test] fn test_random_tensor() { #[cfg(any(feature = "wgpu", feature = "metal-autotune", feature = "metal-fusion", feature = "default"))] type MyBackend = Wgpu<ElemF, i32>; #[cfg(any(feature = "wgpu", feature = "metal-autotune", feature = "metal-fusion", feature = "default"))] let device = WgpuDevice::default(); #[cfg(any(feature = "candle-metal", feature = "candle-cuda", feature = "candle-cpu"))] type MyBackend = Candle<ElemF, u32>; #[cfg(feature = "candle-metal")] let device = CandleDevice::metal(0); #[cfg(feature = "candle-cuda")] let device = CandleDevice::cuda(0); #[cfg(feature = "candle-cpu")] let device = CandleDevice::Cpu; #[cfg(feature = "cuda")] type MyBackend = Cuda; #[cfg(feature = "cuda")] let device = CudaDevice::default(); #[cfg(any(feature = "tch-gpu"))] type MyBackend = LibTorch<half::f16>; #[cfg(all(feature = "tch-gpu", not(target_os = "macos")))] let device = LibTorchDevice::Cuda(0); #[cfg(all(feature = "tch-gpu", target_os = "macos"))] let device = LibTorchDevice::Mps; let noise = burn::tensor::Distribution::Normal(0.0, 1.0); for i in 0..(u64::pow(10, 7)) { let dforce_random = Tensor::<MyBackend, 1>::random(Shape::new([(10 as usize).pow(6)]), noise, &device); if dforce_random.clone().sum().into_scalar().is_finite() == false { println!("inf exists in dfroce_random: {dforce_random}"); for (i,elem) in dforce_random.clone().iter_dim(0).enumerate() { if elem.clone().into_scalar().is_finite() == false { println!("inf elemeent: {elem} at index {i}"); } } panic!("dforce_random became inf!") } if dforce_random.clone().is_nan().any().into_scalar() == 1{ println!("nan exists in dforce_random: {dforce_random}"); for (i,elem) in dforce_random.clone().iter_dim(0).enumerate() { if elem.clone().into_scalar().is_nan() == true { println!("inf elemeent: {elem} at index {i}"); } } panic!("dforce_random: became nan!") } } } }
compiled with
cargo test test_random_tensor --release --features metal-fusion -- --nocapture
Expected behavior The random number should never be INF since it is supposed to come from a normal distribution with mean zero and variance 1.
Screenshots
Desktop (please complete the following information):
The text was updated successfully, but these errors were encountered:
No branches or pull requests
Describe the bug
Using
burn::tensor::Distribution::Normal
withTensor::random
produces INF on OSX.To Reproduce
compiled with
Expected behavior
The random number should never be INF since it is supposed to come from a normal distribution with mean zero and variance 1.
Screenshots
Desktop (please complete the following information):
The text was updated successfully, but these errors were encountered: