apply_ldl_factor Class — pytorch Architecture
Architecture documentation for the apply_ldl_factor class in BatchLinearAlgebraKernel.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/BatchLinearAlgebraKernel.cpp lines 826–877
template <typename scalar_t>
void apply_ldl_factor(
const Tensor& A,
const Tensor& pivots,
const Tensor& info,
bool upper,
bool hermitian) {
#if !AT_BUILD_WITH_LAPACK()
TORCH_CHECK(
false,
"Calling torch.linalg.ldl_factor on a CPU tensor requires compiling ",
"PyTorch with LAPACK. Please use PyTorch built with LAPACK support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(batchCount(A) > 0);
auto batch_size = batchCount(A);
auto n = A.size(-2);
auto leading_dim = A.stride(-1);
auto uplo = upper ? 'U' : 'L';
auto a_stride = A.dim() > 2 ? A.stride(-3) : 0;
auto pivots_stride = pivots.dim() > 1 ? pivots.stride(-2) : 0;
auto a_data = A.data_ptr<scalar_t>();
auto pivots_data = pivots.data_ptr<int>();
auto info_data = info.data_ptr<int>();
auto ldl_func =
hermitian ? lapackLdlHermitian<scalar_t> : lapackLdlSymmetric<scalar_t>;
scalar_t wkopt;
ldl_func(uplo, n, a_data, leading_dim, pivots_data, &wkopt, -1, info_data);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
int lwork = std::max<int>(1, real_impl<scalar_t, value_t>(wkopt));
Tensor work = at::empty({lwork}, A.dtype());
auto work_data = work.mutable_data_ptr<scalar_t>();
for (const auto i : c10::irange(batch_size)) {
scalar_t* a_working_ptr = &a_data[i * a_stride];
auto* pivots_working_ptr = &pivots_data[i * pivots_stride];
auto* info_working_ptr = &info_data[i];
ldl_func(
uplo,
n,
a_working_ptr,
leading_dim,
pivots_working_ptr,
work_data,
lwork,
info_working_ptr);
}
#endif
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free