kReluFused Class — pytorch Architecture
Architecture documentation for the kReluFused class in Conv.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/quantized/cudnn/Conv.cpp lines 61–254
template <bool kReluFused>
void PackedConvWeightCudnn<kSpatialDim>::apply_impl_helper(const at::Tensor& quantized_output, const at::Tensor& input, double output_scale) {
auto act_scale = input.q_scale();
auto weight_scale = maybe_padded_weight_.q_scale();
auto requantize_multiplier = act_scale * weight_scale / output_scale;
at::Tensor requantize_multiplier_tensor = cudnn_utils::getRequantMultiplierTensor(requantize_multiplier, kSpatialDim + 2);
std::optional<at::Tensor> bias_multiplier_tensor;
std::optional<at::Tensor> broadcasted_bias;
if (bias_.has_value()) {
// the input bias is a 1-D tensor whose size is the same as the size of the second dimension of quantized_output.
// we need to add trailing dimensions in order to properly broadcast bias, otherwise broadcast_to will fail.
// the number of trailing dimensions is quantized_output.dim() - 2, so the new size of the broadcast_bias
// becomes quantized_output.dim() - 2 + 1. nothing needs to be done for the leading dimensions
std::vector<int64_t> new_size(quantized_output.dim() - 1, 1);
new_size[0] = bias_.value().size(0);
broadcasted_bias = bias_.value().reshape(new_size);
broadcasted_bias.value() = broadcasted_bias.value().broadcast_to(quantized_output.sizes());
broadcasted_bias.value() = broadcasted_bias.value().to(c10::MemoryFormat::ChannelsLast);
bias_multiplier_tensor = at::empty(quantized_output.sizes(), at::device(at::kCUDA).dtype(at::kFloat), at::MemoryFormat::ChannelsLast);
auto bias_multiplier = 1.0 / (act_scale * weight_scale);
bias_multiplier_tensor.value().fill_(bias_multiplier);
}
cudnnHandle_t handle = at::native::getCudnnHandle();
CacheKey key{};
// memset is needed here because there is implicit packing added for CacheKey, and this can result in uninitialized padded values that are
// used for hashing (see how at::native::ParamsHash is defined). without memset, we can potentially come across a situation where two
// CacheKey objects have the same user defined parameters, but
// different padded values, resulting in different hash outputs.
memset(&key, 0, sizeof(key));
bool deterministic{true};
bool allow_tf32{false};
auto padding_vec = padding_.vec();
auto stride_vec = stride_.vec();
auto dilation_vec = dilation_.vec();
setConvolutionParams(&key.params, input, maybe_padded_weight_, padding_vec, stride_vec, dilation_vec, groups_, deterministic, allow_tf32, input.suggest_memory_format());
// operator datatype needs to be int32 for int8 convolution, but we can
// set the datatype for output tensor to int32 or fp32
key.params.dataType = CUDNN_DATA_INT32;
key.input_alignment = cudnn_utils::getAlignment(input);
key.output_alignment = cudnn_utils::getAlignment(quantized_output);
key.weight_alignment = cudnn_utils::getAlignment(maybe_padded_weight_);
if (bias_.has_value()) {
key.bias_alignment = static_cast<int8_t>(cudnn_utils::getAlignment(broadcasted_bias.value()));
} else {
key.bias_alignment = -1;
}
key.kReluFused = kReluFused;
auto run = [&](const cudnn_frontend::ExecutionPlan& plan_desc) {
auto workspace_size = plan_desc.getWorkspaceSize();
auto workspace_ptr = c10::cuda::CUDACachingAllocator::get()->allocate(workspace_size);
at::SmallVector<void *, 7> data_ptrs;
at::SmallVector<int64_t, 7> uids;
data_ptrs = {input.data_ptr<int8_t>(), maybe_padded_weight_.data_ptr<int8_t>(),
requantize_multiplier_tensor.data_ptr(), quantized_output.data_ptr<int8_t>()};
uids = {'x', 'w', 's', 'r'};
if (bias_.has_value()) {
data_ptrs.insert(data_ptrs.end(), {broadcasted_bias.value().data_ptr(), bias_multiplier_tensor.value().data_ptr(),
broadcasted_bias.value().data_ptr()});
uids.insert(uids.end(), {'b', 'c', 'd'});
}
auto variantPack = cudnn_frontend::VariantPackBuilder()
.setWorkspacePointer(workspace_size ? workspace_ptr.get() : nullptr)
.setDataPointers(static_cast<int64_t>(uids.size()), data_ptrs.data())
.setUids(static_cast<int64_t>(uids.size()), uids.data())
.build();
auto variant_pack_desc = variantPack.get_raw_desc();
AT_CUDNN_CHECK(cudnnBackendExecute(handle, plan_desc.get_raw_desc(), variant_pack_desc));
};
auto search = execution_plan_cache.find(key);
if (search != execution_plan_cache.end()) {
cudnn_frontend::ExecutionPlan plan_desc = search->second;
run(plan_desc);
return;
}
// conv_op computes act_fp32 * w_fp32 (matrix multiplication)
// where act_fp32 and w_fp32 are the input and weight variables, resp.
// output is a fp32 tensor
auto conv_op = cudnn_frontend::OperationBuilder(CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR)
.setxDesc(cudnn_utils::getTensorDescriptor(input.sizes(), input.strides(), CUDNN_DATA_INT8, 'x', key.input_alignment))
// for virtual tensors, the alignment is not used, so we can just put an arbitrary value here, e.g., key.output_alignment
.setyDesc(cudnn_utils::getTensorDescriptor(quantized_output.sizes(), quantized_output.strides(), CUDNN_DATA_FLOAT, 'y', key.output_alignment, true))
.setwDesc(cudnn_utils::getTensorDescriptor(maybe_padded_weight_.sizes(), maybe_padded_weight_.strides(), CUDNN_DATA_INT8, 'w', key.weight_alignment))
.setcDesc(getConvDescriptor(key.params.dataType, padding_vec, stride_vec, dilation_vec))
.build();
// std::cout << "operator:" << conv_op.describe() << std::endl;
std::optional<cudnn_frontend::Operation> bias_mult_op;
std::optional<cudnn_frontend::Operation> sum_conv_bias_op;
if (bias_.has_value()) {
// we can't directly assign bias_mult_op because operator= is deleted for cudnn_frontend::Operation;
// alternatively, I think we can use std::unique_ptr and dynamically allocate these builder ops
// but here, we chose to do it statically. std::optional<T>::emplace() enables this approach
// bias_mult_op computes bias_fp32 / (act_scale * w_scale) or bias_fp32 * (1 / (act_scale * w_scale))
// where bias_multiplier = (1 / (act_scale * w_scale))
// output is a fp32 tensor
// we use inplace operation here where the output is assigned to the input
bias_mult_op.emplace(cudnn_frontend::OperationBuilder(CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR)
.setxDesc(cudnn_utils::getTensorDescriptor(broadcasted_bias.value(), 'b', cudnn_utils::getAlignment(broadcasted_bias.value())))
.setbDesc(cudnn_utils::getTensorDescriptor(bias_multiplier_tensor.value(), 'c', cudnn_utils::getAlignment(bias_multiplier_tensor.value())))
.setyDesc(cudnn_utils::getTensorDescriptor(broadcasted_bias.value(), 'd', cudnn_utils::getAlignment(broadcasted_bias.value())))
.setpwDesc(cudnn_utils::getPointWiseMulDescriptor(at::native::getCudnnDataType(bias_multiplier_tensor.value())))
.build());
// computes (act_int8 * w_int8 + [bias_fp32/(act_scale * w_scale)])
// where the 1st and 2nd summands is output of conv_op and broadcasted_bias, resp.
// output is a fp32 tensor
// we use inplace operation here where the output is assigned to the input
sum_conv_bias_op.emplace(cudnn_frontend::OperationBuilder(CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR)
.setxDesc(conv_op.getOutputTensor())
.setbDesc(cudnn_utils::getTensorDescriptor(broadcasted_bias.value(), 'd', cudnn_utils::getAlignment(broadcasted_bias.value())))
// for virtual tensors, the alignment is not used, so we can just put an arbitrary value here, e.g., key.output_alignment
.setyDesc(cudnn_utils::getTensorDescriptor(quantized_output.sizes(), quantized_output.strides(), CUDNN_DATA_FLOAT, 'e', key.output_alignment, true))
.setpwDesc(cudnn_utils::getPointWiseAddDescriptor(at::native::getCudnnDataType(broadcasted_bias.value())))
.build());
}
// relu_op computes relu(act_int8 * w_int8 + [bias_fp32/(act_scale * w_scale)]
// or relu(act_int8 * w_int8) if bias is not present.
// output is a fp32 tensor
std::optional<cudnn_frontend::Operation> relu_op;
std::shared_ptr<cudnn_frontend::OpaqueBackendPointer> tensor2requant_ptr = bias_.has_value() ? sum_conv_bias_op.value().getOutputTensor() : conv_op.getOutputTensor();
if (kReluFused) {
// we use inplace operation here where the output is assigned to the input
relu_op.emplace(cudnn_frontend::OperationBuilder(CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR)
.setxDesc(tensor2requant_ptr)
// for virtual tensors, the alignment is not used, so we can just put an arbitrary value here, e.g., key.output_alignment
.setyDesc(cudnn_utils::getTensorDescriptor(quantized_output.sizes(), quantized_output.strides(), CUDNN_DATA_FLOAT, 'f', key.output_alignment, true))
.setpwDesc(cudnn_utils::getPointWiseReluDescriptor(CUDNN_DATA_FLOAT))
.build());
}
// relu_op computes relu(act_int8 * w_int8 + [bias_fp32/(act_scale * w_scale)]) / (out_scale / (act_scale * w_scale))
// or relu(act_int8 * w_int8) / (out_scale / (act_scale * w_scale))) if bias is not present.
// output is a fp32 tensor
auto requant_op = cudnn_frontend::OperationBuilder(CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR)
.setxDesc(kReluFused ? relu_op.value().getOutputTensor() : tensor2requant_ptr)
.setbDesc(cudnn_utils::getTensorDescriptor(requantize_multiplier_tensor, 's', cudnn_utils::getAlignment(requantize_multiplier_tensor)))
.setyDesc(cudnn_utils::getTensorDescriptor(quantized_output.sizes(), quantized_output.strides(), CUDNN_DATA_INT8, 'r', key.output_alignment))
.setpwDesc(cudnn_utils::getPointWiseMulDescriptor(at::native::getCudnnDataType(requantize_multiplier_tensor)))
.build();
// std::cout << "operator:" << requant_op.describe() << std::endl;
std::vector<cudnn_frontend::Operation const *> ops{&conv_op};
if (bias_.has_value()) {
ops.emplace_back(&(bias_mult_op.value()));
ops.emplace_back(&(sum_conv_bias_op.value()));
}
if (kReluFused) {
ops.emplace_back(&(relu_op.value()));
}
ops.emplace_back(&requant_op);
auto opGraph = cudnn_frontend::OperationGraphBuilder()
.setHandle(handle)
.setOperationGraph(static_cast<int64_t>(ops.size()), ops.data())
.build();
// std::cout << "opGraph: " << opGraph.describe() << std::endl;
auto heuristics = cudnn_frontend::EngineHeuristicsBuilder()
.setOperationGraph(opGraph)
.setHeurMode(CUDNN_HEUR_MODE_INSTANT)
.build();
auto fallback = cudnn_frontend::EngineFallbackListBuilder()
.setOperationGraph(opGraph)
.setOperation(CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR)
.build();
auto& engine_configs = heuristics.getEngineConfig(heuristics.getEngineConfigCount());
auto& fallback_list = fallback.getFallbackList();
cudnn_frontend::EngineConfigList filtered_configs;
cudnn_utils::filterEngineConfigs(engine_configs, filtered_configs, deterministic, allow_tf32, at::kChar);
cudnn_utils::filterEngineConfigs(fallback_list, filtered_configs, deterministic, allow_tf32, at::kChar);
for (auto &cfg : engine_configs) {
try {
auto plan = cudnn_frontend::ExecutionPlanBuilder()
.setHandle(handle)
.setEngineConfig(cfg)
.build();
run(plan);
execution_plan_cache.emplace(key, plan);
return;
} catch (cudnn_frontend::cudnnException &e) {std::cout << "cudnn error:" << e.what() << '\n';} catch(c10::CuDNNError &e) { std::cout << "other error" << e.what() << '\n';}
}
TORCH_CHECK(false, "Unable to find an engine to execute this computation in Quantized Conv2D Cudnn");
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free