Home / Class/ _rrelu_with_noise_train Class — pytorch Architecture

_rrelu_with_noise_train Class — pytorch Architecture

Architecture documentation for the _rrelu_with_noise_train class in Activation.cpp from the pytorch codebase.

Entity Profile

Source Code

aten/src/ATen/native/Activation.cpp lines 578–609

template <typename scalar_t>
static void _rrelu_with_noise_train(
    Tensor& output,
    const Tensor& input,
    Tensor& noise,
    const Scalar& lower_,
    const Scalar& upper_,
    const std::optional<Generator>& generator) {
  using opmath_t = at::opmath_type<scalar_t>;
  opmath_t lower = lower_.to<opmath_t>();
  opmath_t upper = upper_.to<opmath_t>();
  Tensor tmp_tensor = output.contiguous();
  scalar_t* output_data = tmp_tensor.mutable_data_ptr<scalar_t>();
  const scalar_t* input_data = input.const_data_ptr<scalar_t>();
  scalar_t* noise_data = noise.mutable_data_ptr<scalar_t>();
  auto gen  = at::get_generator_or_default<CPUGeneratorImpl>(generator, detail::getDefaultCPUGenerator());
  std::lock_guard<std::mutex> lock(gen->mutex_);
  for (const auto i : c10::irange(input.numel())) {
    if (input_data[i] <= 0) {
      at::uniform_real_distribution<double> uniform(lower, upper);
      const opmath_t r = (opmath_t)uniform(gen);
      output_data[i] = input_data[i] * r;
      noise_data[i] = r;
    } else {
      noise_data[i] = 1;
      output_data[i] = input_data[i];
    }
  }
  if (!output.is_contiguous()) {
    output.copy_(tmp_tensor);
  }
}

Analyze Your Own Codebase

Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.

Try Supermodel Free