// a cache which evicts the least recently used item when it is full template<classKey, classValue> classlru_cache { public: typedef Key key_type; typedef Value value_type; typedef std::list<key_type> list_type; typedef std::map< key_type, std::pair<value_type, typename list_type::iterator> > map_type;
// Destroys all existing entries by calling the "deleter" // function that was passed to the constructor. virtual ~Cache();
// Opaque handle to an entry stored in the cache. structHandle {};
// Insert a mapping from key->value into the cache and assign it // the specified charge against the total cache capacity. // // Returns a handle that corresponds to the mapping. The caller // must call this->Release(handle) when the returned mapping is no // longer needed. // // When the inserted entry is no longer needed, the key and // value will be passed to "deleter". virtual Handle* Insert(const Slice& key, void* value, size_t charge, void (*deleter)(const Slice& key, void* value))= 0;
// If the cache has no mapping for "key", returns nullptr. // // Else return a handle that corresponds to the mapping. The caller // must call this->Release(handle) when the returned mapping is no // longer needed. virtual Handle* Lookup(const Slice& key)= 0;
// Release a mapping returned by a previous Lookup(). // REQUIRES: handle must not have been released yet. // REQUIRES: handle must have been returned by a method on *this. virtualvoidRelease(Handle* handle)= 0;
// Return the value encapsulated in a handle returned by a // successful Lookup(). // REQUIRES: handle must not have been released yet. // REQUIRES: handle must have been returned by a method on *this. virtualvoid* Value(Handle* handle)= 0;
// If the cache contains entry for key, erase it. Note that the // underlying entry will be kept around until all existing handles // to it have been released. virtualvoidErase(const Slice& key)= 0;
// Return a new numeric id. May be used by multiple clients who are // sharing the same cache to partition the key space. Typically the // client will allocate a new id at startup and prepend the id to // its cache keys. virtualuint64_tNewId()= 0;
// Remove all cache entries that are not actively in use. Memory-constrained // applications may wish to call this method to reduce memory usage. // Default implementation of Prune() does nothing. Subclasses are strongly // encouraged to override the default implementation. A future release of // leveldb may change Prune() to a pure abstract method. virtualvoidPrune(){}
// Return an estimate of the combined charges of all elements stored in the // cache. virtualsize_tTotalCharge()const= 0;
structLRUHandle { void* value; void (*deleter)(const Slice&, void* value); LRUHandle* next_hash; LRUHandle* next; LRUHandle* prev; // charge 是一个统计量,在 block_cache 中,这个统计量是 size(). // 在 TableCache 中,这个统计量是1. // 注:TableCache 和 block 不是同一个对象。 size_t charge; // TODO(opt): Only allow uint32_t? size_t key_length; bool in_cache; // Whether entry is in the cache. uint32_t refs; // References, including cache reference, if present. uint32_t hash; // Hash of key(); used for fast sharding and comparisons char key_data[1]; // Beginning of key
Slice key()const{ // next_ is only equal to this if the LRU handle is the list head of an // empty list. List heads never have meaningful keys. assert(next != this);
// mutex_ protects the following state. mutable port::Mutex mutex_; size_t usage_ GUARDED_BY(mutex_);
// Dummy head of LRU list. // lru.prev is newest entry, lru.next is oldest entry. // Entries have refs==1 and in_cache==true. LRUHandle lru_ GUARDED_BY(mutex_);
// Dummy head of in-use list. // Entries are in use by clients, and have refs >= 2 and in_cache==true. LRUHandle in_use_ GUARDED_BY(mutex_);
private: // The table consists of an array of buckets where each bucket is // a linked list of cache entries that hash into the bucket. uint32_t length_; uint32_t elems_; LRUHandle** list_;
// Return a pointer to slot that points to a cache entry that // matches key/hash. If there is no such cache entry, return a // pointer to the trailing slot in the corresponding linked list. LRUHandle** FindPointer(const Slice& key, uint32_t hash){ LRUHandle** ptr = &list_[hash & (length_ - 1)]; while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) { ptr = &(*ptr)->next_hash; } return ptr; }
voidResize(){ uint32_t new_length = 4; while (new_length < elems_) { new_length *= 2; } LRUHandle** new_list = new LRUHandle*[new_length]; memset(new_list, 0, sizeof(new_list[0]) * new_length); uint32_t count = 0; for (uint32_t i = 0; i < length_; i++) { LRUHandle* h = list_[i]; while (h != nullptr) { LRUHandle* next = h->next_hash; uint32_t hash = h->hash; LRUHandle** ptr = &new_list[hash & (new_length - 1)]; h->next_hash = *ptr; *ptr = h; h = next; count++; } } assert(elems_ == count); delete[] list_; list_ = new_list; length_ = new_length; } };
enumFlags : uint8_t { // Whether this entry is referenced by the hash table. IN_CACHE = (1 << 0), // Whether this entry is high priority entry. IS_HIGH_PRI = (1 << 1), // Whether this entry is in high-pri pool. IN_HIGH_PRI_POOL = (1 << 2), // Whether this entry has had any lookups (hits). HAS_HIT = (1 << 3), }; voidLRUCacheShard::LRU_Insert(LRUHandle* e){ assert(e->next == nullptr); assert(e->prev == nullptr); size_t total_charge = e->CalcTotalCharge(metadata_charge_policy_); if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) { // Inset "e" to head of LRU list. e->next = &lru_; e->prev = lru_.prev; e->prev->next = e; e->next->prev = e; e->SetInHighPriPool(true); high_pri_pool_usage_ += total_charge; MaintainPoolSize(); } else { // Insert "e" to the head of low-pri pool. Note that when // high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list. e->next = lru_low_pri_->next; e->prev = lru_low_pri_; e->prev->next = e; e->next->prev = e; e->SetInHighPriPool(false); lru_low_pri_ = e; } lru_usage_ += total_charge; }
近似 LRU 和其他方法
可以看到,这里的 LRU 是带锁实现的。HashTable + doubly linked list 让它的并发变得比较麻烦。这里可以参考一下一些近似的 LRU 算法,在算法本身的性能上有相对好一些的开销: