1717 from _thread import RLock
1818except :
1919 class RLock :
20- 'Dummy reentrant lock'
20+ 'Dummy reentrant lock for builds without threads '
2121 def __enter__ (self ): pass
2222 def __exit__ (self , exctype , excinst , exctb ): pass
2323
@@ -146,6 +146,12 @@ def __ne__(self, other):
146146_CacheInfo = namedtuple ("CacheInfo" , ["hits" , "misses" , "maxsize" , "currsize" ])
147147
148148class _HashedSeq (list ):
149+ """ This class guarantees that hash() will be called no more than once
150+ per element. This is important because the lru_cache() will hash
151+ the key multiple times on a cache miss.
152+
153+ """
154+
149155 __slots__ = 'hashvalue'
150156
151157 def __init__ (self , tup , hash = hash ):
@@ -159,7 +165,16 @@ def _make_key(args, kwds, typed,
159165 kwd_mark = (object (),),
160166 fasttypes = {int , str , frozenset , type (None )},
161167 sorted = sorted , tuple = tuple , type = type , len = len ):
162- 'Make a cache key from optionally typed positional and keyword arguments'
168+ """Make a cache key from optionally typed positional and keyword arguments
169+
170+ The key is constructed in a way that is flat as possible rather than
171+ as a nested structure that would take more memory.
172+
173+ If there is only a single argument and its data type is known to cache
174+ its hash value, then that argument is returned without a wrapper. This
175+ saves space and improves lookup speed.
176+
177+ """
163178 key = args
164179 if kwds :
165180 sorted_items = sorted (kwds .items ())
@@ -217,7 +232,7 @@ def decorating_function(user_function):
217232 if maxsize == 0 :
218233
219234 def wrapper (* args , ** kwds ):
220- # no caching, just a statistics update after a successful call
235+ # No caching -- just a statistics update after a successful call
221236 nonlocal misses
222237 result = user_function (* args , ** kwds )
223238 misses += 1
@@ -226,7 +241,7 @@ def wrapper(*args, **kwds):
226241 elif maxsize is None :
227242
228243 def wrapper (* args , ** kwds ):
229- # simple caching without ordering or size limit
244+ # Simple caching without ordering or size limit
230245 nonlocal hits , misses , currsize
231246 key = make_key (args , kwds , typed )
232247 result = cache_get (key , sentinel )
@@ -242,14 +257,14 @@ def wrapper(*args, **kwds):
242257 else :
243258
244259 def wrapper (* args , ** kwds ):
245- # size limited caching that tracks accesses by recency
260+ # Size limited caching that tracks accesses by recency
246261 nonlocal root , hits , misses , currsize , full
247262 key = make_key (args , kwds , typed )
248263 with lock :
249264 link = cache_get (key )
250265 if link is not None :
251- # move the link to the front of the circular queue
252- link_prev , link_next , key , result = link
266+ # Move the link to the front of the circular queue
267+ link_prev , link_next , _key , result = link
253268 link_prev [NEXT ] = link_next
254269 link_next [PREV ] = link_prev
255270 last = root [PREV ]
@@ -261,26 +276,34 @@ def wrapper(*args, **kwds):
261276 result = user_function (* args , ** kwds )
262277 with lock :
263278 if key in cache :
264- # getting here means that this same key was added to the
265- # cache while the lock was released. since the link
279+ # Getting here means that this same key was added to the
280+ # cache while the lock was released. Since the link
266281 # update is already done, we need only return the
267282 # computed result and update the count of misses.
268283 pass
269284 elif full :
270- # use the old root to store the new key and result
285+ # Use the old root to store the new key and result.
271286 oldroot = root
272287 oldroot [KEY ] = key
273288 oldroot [RESULT ] = result
274- # empty the oldest link and make it the new root
289+ # Empty the oldest link and make it the new root.
290+ # Keep a reference to the old key and old result to
291+ # prevent their ref counts from going to zero during the
292+ # update. That will prevent potentially arbitrary object
293+ # clean-up code (i.e. __del__) from running while we're
294+ # still adjusting the links.
275295 root = oldroot [NEXT ]
276296 oldkey = root [KEY ]
277- oldvalue = root [RESULT ]
297+ oldresult = root [RESULT ]
278298 root [KEY ] = root [RESULT ] = None
279- # now update the cache dictionary for the new links
299+ # Now update the cache dictionary.
280300 del cache [oldkey ]
301+ # Save the potentially reentrant cache[key] assignment
302+ # for last, after the root and links have been put in
303+ # a consistent state.
281304 cache [key ] = oldroot
282305 else :
283- # put result in a new link at the front of the queue
306+ # Put result in a new link at the front of the queue.
284307 last = root [PREV ]
285308 link = [last , root , key , result ]
286309 last [NEXT ] = root [PREV ] = cache [key ] = link
0 commit comments