is_same_v Class — pytorch Architecture
Architecture documentation for the is_same_v class in CPUFallback.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/CPUFallback.cpp lines 24–59
template<typename T, std::enable_if_t<std::is_same_v<T, at::Tensor> || std::is_same_v<T, std::optional<at::Tensor>>, int> = 1>
static std::vector<T> to_cpu(const std::vector<T>& tensors) {
// We can't just call at::to_cpu() on the entire list of Tensors
// Because it will break on undefined tensors. Separate out undefined tensors first.
const int num = tensors.size();
std::vector<T> cpu_tensors(num);
std::vector<at::Tensor> valid_tensors;
std::vector<bool> to_translate(num);
for (const auto i : c10::irange(num)) {
// Explicitly handling undefined tensors here instead of letting `at::_to_cpu` handle it.
// Otherwise, we'd need to require all backends with their own implementation of _to_cpu
// to properly handle undefined tensors.
if constexpr(std::is_same_v<T, std::optional<at::Tensor>>) {
if (tensors[i].has_value() && tensors[i].value().defined()) {
to_translate[i] = true;
valid_tensors.push_back(tensors[i].value());
} else {
cpu_tensors[i] = tensors[i];
}
} else {
if (tensors[i].defined()) {
to_translate[i] = true;
valid_tensors.push_back(tensors[i]);
} else {
cpu_tensors[i] = tensors[i];
}
}
}
auto cpu_valid_tensors = at::_to_cpu(valid_tensors);
for (int i = 0, defined_pos = 0; i < num; ++i) {
if (to_translate[i]) {
cpu_tensors[i] = std::move(cpu_valid_tensors[defined_pos++]);
}
}
return cpu_tensors;
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free