Context Class — pytorch Architecture
Architecture documentation for the Context class in Context.h from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/vulkan/api/Context.h lines 40–203
class Context final {
public:
explicit Context(size_t adapter_i, const ContextConfig&);
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
Context(Context&&) = delete;
Context& operator=(Context&&) = delete;
~Context();
private:
// Config
ContextConfig config_;
// Important handles
Adapter* adapter_p_;
VkDevice device_;
Adapter::Queue queue_;
// Resource Pools
CommandPool command_pool_;
DescriptorPool descriptor_pool_;
FencePool fences_;
// Diagnostics
// TODO: remove USE_VULKAN_GPU_DIAGNOSTICS
bool enable_op_profiling_{false};
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
QueryPool querypool_;
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
// Command buffers submission
std::mutex cmd_mutex_;
CommandBuffer cmd_;
uint32_t submit_count_;
// Memory Management
std::mutex buffer_clearlist_mutex_;
std::vector<VulkanBuffer> buffers_to_clear_;
std::mutex image_clearlist_mutex_;
std::vector<VulkanImage> images_to_clear_;
public:
// Adapter access
inline Adapter* adapter_ptr() {
return adapter_p_;
}
inline void enable_op_profiling() {
enable_op_profiling_ = true;
}
inline void disable_op_profiling() {
enable_op_profiling_ = false;
}
inline bool op_profiling_enabled() {
return enable_op_profiling_;
}
inline VkDevice device() {
return device_;
}
inline VkQueue queue() {
return queue_.handle;
}
// Device Caches
inline ShaderLayoutCache& shader_layout_cache() {
return adapter_ptr()->shader_layout_cache();
}
inline ShaderCache& shader_cache() {
return adapter_ptr()->shader_cache();
}
inline PipelineLayoutCache& pipeline_layout_cache() {
return adapter_ptr()->pipeline_layout_cache();
}
inline ComputePipelineCache& pipeline_cache() {
return adapter_ptr()->compute_pipeline_cache();
}
// Resource Pools
inline DescriptorPool& descriptor_pool() {
return descriptor_pool_;
}
inline FencePool& fences() {
return fences_;
}
// Diagnostics
#ifdef USE_VULKAN_GPU_DIAGNOSTICS
inline QueryPool& querypool() {
return querypool_;
}
inline void reset_querypool() {
set_cmd();
querypool_.reset(cmd_);
}
#endif /* USE_VULKAN_GPU_DIAGNOSTICS */
// Memory Management
void register_buffer_cleanup(VulkanBuffer& buffer) {
std::lock_guard<std::mutex> bufferlist_lock(buffer_clearlist_mutex_);
buffers_to_clear_.emplace_back(std::move(buffer));
}
void register_image_cleanup(VulkanImage& image) {
std::lock_guard<std::mutex> imagelist_lock(image_clearlist_mutex_);
images_to_clear_.emplace_back(std::move(image));
}
// GPU RPC
inline std::unique_lock<std::mutex> dispatch_lock() {
return std::unique_lock<std::mutex>(cmd_mutex_);
}
inline void set_cmd(bool reusable = false) {
if (!cmd_) {
cmd_ = command_pool_.get_new_cmd(reusable);
cmd_.begin();
}
}
DescriptorSet get_descriptor_set(const ShaderInfo&, const utils::uvec3&);
void register_shader_dispatch(
const DescriptorSet&,
PipelineBarrier&,
const ShaderInfo&,
const utils::uvec3&);
template <class S, class D>
bool submit_copy(
PipelineBarrier&,
const S&,
const D&,
const api::utils::uvec3&,
const api::utils::uvec3&,
const api::utils::uvec3&,
VkFence fence_handle);
template <typename... Arguments>
bool submit_compute_job(
const ShaderInfo&,
PipelineBarrier&,
const utils::uvec3&,
const utils::uvec3&,
VkFence fence_handle,
Arguments&&...);
void submit_cmd_to_gpu(
VkFence fence_handle = VK_NULL_HANDLE,
const bool final_use = false);
void flush();
};
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free