apply_ldl_solve Class — pytorch Architecture
Architecture documentation for the apply_ldl_solve class in BatchLinearAlgebraKernel.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/BatchLinearAlgebraKernel.cpp lines 891–943
template <typename scalar_t>
void apply_ldl_solve(
const Tensor& A,
const Tensor& pivots,
const Tensor& B,
bool upper,
bool hermitian) {
#if !AT_BUILD_WITH_LAPACK()
TORCH_CHECK(
false,
"Calling torch.linalg.ldl_factor on a CPU tensor requires compiling ",
"PyTorch with LAPACK. Please use PyTorch built with LAPACK support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(batchCount(A) > 0);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(batchCount(pivots.unsqueeze(-1)) > 0);
auto batch_size = batchCount(B);
auto n = A.size(-2);
auto nrhs = B.size(-1);
auto lda = A.stride(-1);
auto ldb = B.stride(-1);
auto uplo = upper ? 'U' : 'L';
auto a_stride = A.dim() > 2 ? A.stride(-3) : 0;
auto b_stride = B.dim() > 2 ? B.stride(-3) : 0;
auto pivots_stride = pivots.dim() > 1 ? pivots.stride(-2) : 0;
auto a_data = A.const_data_ptr<scalar_t>();
auto b_data = B.data_ptr<scalar_t>();
auto pivots_ = pivots.to(kInt);
auto pivots_data = pivots_.const_data_ptr<int>();
auto ldl_solve_func = hermitian ? lapackLdlSolveHermitian<scalar_t>
: lapackLdlSolveSymmetric<scalar_t>;
int info = 0;
for (const auto i : c10::irange(batch_size)) {
const scalar_t* a_working_ptr = &a_data[i * a_stride];
scalar_t* b_working_ptr = &b_data[i * b_stride];
const auto* pivots_working_ptr = &pivots_data[i * pivots_stride];
ldl_solve_func(
uplo,
n,
nrhs,
const_cast<scalar_t*>(a_working_ptr),
lda,
const_cast<int*>(pivots_working_ptr),
b_working_ptr,
ldb,
&info);
}
TORCH_INTERNAL_ASSERT(info == 0);
#endif
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free