Home / Class/ key Class — pytorch Architecture

key Class — pytorch Architecture

Architecture documentation for the key class in Conv_v8.cpp from the pytorch codebase.

Entity Profile

Source Code

aten/src/ATen/native/cudnn/Conv_v8.cpp lines 287–347

template <typename T, typename KeyType>
struct BenchmarkCache {
  std::list<KeyType> engine_cache_order;
  std::unordered_map<
      KeyType,
      std::pair<
          cudnn_frontend::ExecutionPlan,
          typename std::list<KeyType>::iterator>,
      ParamsWrapperHash<KeyType>>
      engine_cache;

  // no mutexes here as caches are now thread local for v8, can also return a
  // pointer to the Execution Plan if we know it will not be invalidated by
  // another thread
  cudnn_frontend::ExecutionPlan* find(const KeyType& key) {
    const int lru_cache_limit = getLRUCacheLimit();
    if (lru_cache_limit < 0) {
      return nullptr;
    }
    auto it = engine_cache.find(key);
    if (it == engine_cache.end()) {
      return nullptr;
    }
    if (lru_cache_limit) {
      // update most recently accessed
      engine_cache_order.splice(
          engine_cache_order.begin(), engine_cache_order, it->second.second);
    }
    return &(it->second.first);
  }

  void update(const KeyType& key, T& results) {
    int lru_cache_limit = getLRUCacheLimit();
    if (lru_cache_limit < 0) {
      return;
    } else if (lru_cache_limit) {
      auto it = engine_cache.find(key);
      if (it == engine_cache.end()) {
        if ((long)engine_cache.size() >= lru_cache_limit) {
          auto erase_count = engine_cache.erase(engine_cache_order.back());
          TORCH_INTERNAL_ASSERT(
              erase_count == 1,
              "CUDNN V8 LRU Cache Corrupted (eviction key not in map). Please report a bug to PyTorch.");
          engine_cache_order.pop_back();
        }
        engine_cache_order.emplace_front(key);
        engine_cache.emplace(
            key, std::make_pair(results, engine_cache_order.begin()));
      } else {
        it->second.first = results;
        // update most recently accessed
        engine_cache_order.splice(
            engine_cache_order.begin(), engine_cache_order, it->second.second);
      }
    } else {
      engine_cache.insert_or_assign(
          key,
          std::make_pair(results, engine_cache_order.end())); // dummy iterator
    }
  }
};

Analyze Your Own Codebase

Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.

Try Supermodel Free