146. LRU Cache
主要逻辑:
from collections import OrderedDict
class LRUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.cap = capacity
self.d = OrderedDict()
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key in self.d:
value = self.d.pop(key)
self.d[key] = value
return value
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key in self.d:
self.d.pop(key)
else:
if self.cap > 0:
self.cap -= 1
self.d[key] = value
else:
self.d.popitem(last=False)
self.d[key] = value
链表+字典实现,所有用过或者新插入的点会插入链表末尾(tail之前),删除时取最前面的(head指向)的节点删除:
class Node:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.d = {}
self.head, self.tail = Node(0, 0), Node(0, 0)
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key in self.d:
node = self.d[key]
self._remove(node)
self._add(node)
return node.value
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
new_node = Node(key, value)
if key in self.d:
node_remove = self.d[key]
self._remove(node_remove)
else:
if self.capacity > 0:
self.capacity -= 1
else:
node_remove = self.head.next # get the front node
self._remove(node_remove) # remove linked list
del self.d[node_remove.key] # remove dict
self._add(new_node) # update linked list
self.d[key] = new_node # update dict
def _add(self, node):
last_node = self.tail.prev # get the last node
last_node.next = node
node.prev = last_node
node.next = self.tail
self.tail.prev = node
def _remove(self, node):
prev_node = node.prev
next_node = node.next
prev_node.next = next_node
next_node.prev = prev_node
java LinkedHashMap直接实现,注意参数和removeEldestEntry
的返回值,这个配置在TIJ中也提到过,没有被访问过的元素会在列表的最前面。
class LRUCache2 {
int capacity;
Map<Integer, Integer> map;
public LRUCache2(int capacity) {
this.capacity = capacity;
map = new LinkedHashMap<>(capacity, 0.75f, true) { // removed by access order
// true if eldest element should be removed
// (when self.size larger than capacity)
protected boolean removeEldestEntry(Map.Entry eldest) {
return size() > capacity;
}
};
}
public int get(int key) {
return map.getOrDefault(key, -1);
}
public void put(int key, int value) {
map.put(key, value);
}
}
0.75f是HashMap默认的load factor,用于hashmap的自动扩容。比如说hashmap有16个buckets,当12(16*0.75)个buckets装满后,hashmap会自动增加到32个buckets。
作用:平衡时间和空间的开销。高的load factor会减少空间开销,但是会增加collision,从而增加了get和put的时间开销。