slow_conv2d_backward_weight_frame Class — pytorch Architecture
Architecture documentation for the slow_conv2d_backward_weight_frame class in ConvolutionMM2d.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/ConvolutionMM2d.cpp lines 435–481
template <typename scalar_t>
void slow_conv2d_backward_weight_frame(
TensorAccessor<scalar_t, 2> grad_weight,
TensorAccessor<const scalar_t, 3> grad_output,
TensorAccessor<const scalar_t, 2> finput,
bool is_channels_last) {
// Compute grad_weight += grad_output.reshape({grad_output.shape(0), -1}) * finput.T
// Note gemm expects fortran order, so all 3 matrices are transposed.
// Swapping argument order cancels this, since C == AB <=> T(C) == T(B)T(A)
if (is_channels_last) {
const int64_t m = finput.size(1);
const int64_t n = grad_output.size(0);
const int64_t k = grad_output.size(1) * grad_output.size(2);
const int64_t lda = m;
const int64_t ldb = n;
const int64_t ldc = m;
at::native::cpublas::gemm(
TransposeType::NoTranspose,
TransposeType::Transpose,
m, n, k,
static_cast<scalar_t>(1),
finput.data(), lda,
grad_output.data(), ldb,
static_cast<scalar_t>(1),
grad_weight.data(), ldc);
} else {
const int64_t m = finput.size(0);
const int64_t n = grad_output.size(0);
const int64_t k = grad_output.size(1) * grad_output.size(2);
const int64_t lda = k;
const int64_t ldb = k;
const int64_t ldc = m;
at::native::cpublas::gemm(
TransposeType::Transpose,
TransposeType::NoTranspose,
m, n, k,
static_cast<scalar_t>(1),
finput.data(), lda,
grad_output.data(), ldb,
static_cast<scalar_t>(1),
grad_weight.data(), ldc);
}
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free