is_same_v Class — pytorch Architecture
Architecture documentation for the is_same_v class in FusedAdagradKernel.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/cpu/FusedAdagradKernel.cpp lines 14–80
template <typename scalar_t, typename opmath_t>
std::enable_if_t<
std::is_same_v<scalar_t, Half> || std::is_same_v<scalar_t, BFloat16>,
void>
inline adagrad_math(
scalar_t* param_ptr,
scalar_t* grad_ptr,
scalar_t* state_sum_ptr,
const double clr,
const double eps,
const double weight_decay,
const bool maximize,
const float* grad_scale_ptr,
int64_t size
){
using lpVec = at::vec::Vectorized<scalar_t>;
using fVec = at::vec::Vectorized<opmath_t>;
int64_t d = 0;
for (; d < size - (size % lpVec::size()); d += lpVec::size()) {
lpVec param_lpvec = lpVec::loadu(param_ptr + d);
auto [param_vec1, param_vec2] = vec::convert_to_float<scalar_t>(param_lpvec);
lpVec grad_lpvec = lpVec::loadu(grad_ptr + d);
auto [grad_vec1, grad_vec2] = vec::convert_to_float<scalar_t>(grad_lpvec);
if (grad_scale_ptr) {
grad_vec1 = grad_vec1 / fVec(float(*grad_scale_ptr));
grad_vec2 = grad_vec2 / fVec(float(*grad_scale_ptr));
lpVec grad_vec_to_store = vec::convert_from_float<scalar_t>(grad_vec1, grad_vec2);
grad_vec_to_store.store(grad_ptr + d);
}
if (maximize){
grad_vec1 = grad_vec1 * fVec(opmath_t(-1.0));
grad_vec2 = grad_vec2 * fVec(opmath_t(-1.0));
}
if (weight_decay != 0.0){
grad_vec1 += param_vec1 * fVec(scalar_t(weight_decay));
grad_vec2 += param_vec2 * fVec(scalar_t(weight_decay));
}
auto [state_sum_vec1, state_sum_vec2] = vec::convert_to_float<scalar_t>(lpVec::loadu(state_sum_ptr + d));
state_sum_vec1 += grad_vec1 * grad_vec1;
state_sum_vec2 += grad_vec2 * grad_vec2;
vec::convert_from_float<scalar_t>(state_sum_vec1, state_sum_vec2).store(state_sum_ptr + d);
fVec std_vec1 = state_sum_vec1.sqrt() + fVec(scalar_t(eps));
fVec std_vec2 = state_sum_vec2.sqrt() + fVec(scalar_t(eps));
param_vec1 = param_vec1 - fVec(scalar_t(clr)) * grad_vec1 / std_vec1;
param_vec2 = param_vec2 - fVec(scalar_t(clr)) * grad_vec2 / std_vec2;
vec::convert_from_float<scalar_t>(param_vec1, param_vec2).store(param_ptr + d);
}
for (; d < size; d++) {
opmath_t grad_val = grad_ptr[d];
opmath_t param_val = param_ptr[d];
if (grad_scale_ptr) {
grad_val = grad_ptr[d] / opmath_t(*grad_scale_ptr);
grad_ptr[d] = grad_val;
}
if (maximize) grad_val = -grad_val;
if (weight_decay != 0.0){
grad_val += param_val * opmath_t(weight_decay);
}
opmath_t state_sum_val = state_sum_ptr[d];
state_sum_val += grad_val * grad_val;
state_sum_ptr[d] = state_sum_val;
opmath_t std_val = std::sqrt(state_sum_val) + opmath_t(eps);
param_val -= opmath_t(clr) * grad_val / std_val;
param_ptr[d] = param_val;
}
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free