1 | n/a | """functools.py - Tools for working with functions and callable objects |
---|
2 | n/a | """ |
---|
3 | n/a | # Python module wrapper for _functools C module |
---|
4 | n/a | # to allow utilities written in Python to be added |
---|
5 | n/a | # to the functools module. |
---|
6 | n/a | # Written by Nick Coghlan <ncoghlan at gmail.com>, |
---|
7 | n/a | # Raymond Hettinger <python at rcn.com>, |
---|
8 | n/a | # and Åukasz Langa <lukasz at langa.pl>. |
---|
9 | n/a | # Copyright (C) 2006-2013 Python Software Foundation. |
---|
10 | n/a | # See C source code for _functools credits/copyright |
---|
11 | n/a | |
---|
12 | n/a | __all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', |
---|
13 | n/a | 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial', |
---|
14 | n/a | 'partialmethod', 'singledispatch'] |
---|
15 | n/a | |
---|
16 | n/a | try: |
---|
17 | n/a | from _functools import reduce |
---|
18 | n/a | except ImportError: |
---|
19 | n/a | pass |
---|
20 | n/a | from abc import get_cache_token |
---|
21 | n/a | from collections import namedtuple |
---|
22 | n/a | from types import MappingProxyType |
---|
23 | n/a | from weakref import WeakKeyDictionary |
---|
24 | n/a | from reprlib import recursive_repr |
---|
25 | n/a | try: |
---|
26 | n/a | from _thread import RLock |
---|
27 | n/a | except ImportError: |
---|
28 | n/a | class RLock: |
---|
29 | n/a | 'Dummy reentrant lock for builds without threads' |
---|
30 | n/a | def __enter__(self): pass |
---|
31 | n/a | def __exit__(self, exctype, excinst, exctb): pass |
---|
32 | n/a | |
---|
33 | n/a | |
---|
34 | n/a | ################################################################################ |
---|
35 | n/a | ### update_wrapper() and wraps() decorator |
---|
36 | n/a | ################################################################################ |
---|
37 | n/a | |
---|
38 | n/a | # update_wrapper() and wraps() are tools to help write |
---|
39 | n/a | # wrapper functions that can handle naive introspection |
---|
40 | n/a | |
---|
41 | n/a | WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', |
---|
42 | n/a | '__annotations__') |
---|
43 | n/a | WRAPPER_UPDATES = ('__dict__',) |
---|
44 | n/a | def update_wrapper(wrapper, |
---|
45 | n/a | wrapped, |
---|
46 | n/a | assigned = WRAPPER_ASSIGNMENTS, |
---|
47 | n/a | updated = WRAPPER_UPDATES): |
---|
48 | n/a | """Update a wrapper function to look like the wrapped function |
---|
49 | n/a | |
---|
50 | n/a | wrapper is the function to be updated |
---|
51 | n/a | wrapped is the original function |
---|
52 | n/a | assigned is a tuple naming the attributes assigned directly |
---|
53 | n/a | from the wrapped function to the wrapper function (defaults to |
---|
54 | n/a | functools.WRAPPER_ASSIGNMENTS) |
---|
55 | n/a | updated is a tuple naming the attributes of the wrapper that |
---|
56 | n/a | are updated with the corresponding attribute from the wrapped |
---|
57 | n/a | function (defaults to functools.WRAPPER_UPDATES) |
---|
58 | n/a | """ |
---|
59 | n/a | for attr in assigned: |
---|
60 | n/a | try: |
---|
61 | n/a | value = getattr(wrapped, attr) |
---|
62 | n/a | except AttributeError: |
---|
63 | n/a | pass |
---|
64 | n/a | else: |
---|
65 | n/a | setattr(wrapper, attr, value) |
---|
66 | n/a | for attr in updated: |
---|
67 | n/a | getattr(wrapper, attr).update(getattr(wrapped, attr, {})) |
---|
68 | n/a | # Issue #17482: set __wrapped__ last so we don't inadvertently copy it |
---|
69 | n/a | # from the wrapped function when updating __dict__ |
---|
70 | n/a | wrapper.__wrapped__ = wrapped |
---|
71 | n/a | # Return the wrapper so this can be used as a decorator via partial() |
---|
72 | n/a | return wrapper |
---|
73 | n/a | |
---|
74 | n/a | def wraps(wrapped, |
---|
75 | n/a | assigned = WRAPPER_ASSIGNMENTS, |
---|
76 | n/a | updated = WRAPPER_UPDATES): |
---|
77 | n/a | """Decorator factory to apply update_wrapper() to a wrapper function |
---|
78 | n/a | |
---|
79 | n/a | Returns a decorator that invokes update_wrapper() with the decorated |
---|
80 | n/a | function as the wrapper argument and the arguments to wraps() as the |
---|
81 | n/a | remaining arguments. Default arguments are as for update_wrapper(). |
---|
82 | n/a | This is a convenience function to simplify applying partial() to |
---|
83 | n/a | update_wrapper(). |
---|
84 | n/a | """ |
---|
85 | n/a | return partial(update_wrapper, wrapped=wrapped, |
---|
86 | n/a | assigned=assigned, updated=updated) |
---|
87 | n/a | |
---|
88 | n/a | |
---|
89 | n/a | ################################################################################ |
---|
90 | n/a | ### total_ordering class decorator |
---|
91 | n/a | ################################################################################ |
---|
92 | n/a | |
---|
93 | n/a | # The total ordering functions all invoke the root magic method directly |
---|
94 | n/a | # rather than using the corresponding operator. This avoids possible |
---|
95 | n/a | # infinite recursion that could occur when the operator dispatch logic |
---|
96 | n/a | # detects a NotImplemented result and then calls a reflected method. |
---|
97 | n/a | |
---|
98 | n/a | def _gt_from_lt(self, other, NotImplemented=NotImplemented): |
---|
99 | n/a | 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' |
---|
100 | n/a | op_result = self.__lt__(other) |
---|
101 | n/a | if op_result is NotImplemented: |
---|
102 | n/a | return op_result |
---|
103 | n/a | return not op_result and self != other |
---|
104 | n/a | |
---|
105 | n/a | def _le_from_lt(self, other, NotImplemented=NotImplemented): |
---|
106 | n/a | 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' |
---|
107 | n/a | op_result = self.__lt__(other) |
---|
108 | n/a | return op_result or self == other |
---|
109 | n/a | |
---|
110 | n/a | def _ge_from_lt(self, other, NotImplemented=NotImplemented): |
---|
111 | n/a | 'Return a >= b. Computed by @total_ordering from (not a < b).' |
---|
112 | n/a | op_result = self.__lt__(other) |
---|
113 | n/a | if op_result is NotImplemented: |
---|
114 | n/a | return op_result |
---|
115 | n/a | return not op_result |
---|
116 | n/a | |
---|
117 | n/a | def _ge_from_le(self, other, NotImplemented=NotImplemented): |
---|
118 | n/a | 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' |
---|
119 | n/a | op_result = self.__le__(other) |
---|
120 | n/a | if op_result is NotImplemented: |
---|
121 | n/a | return op_result |
---|
122 | n/a | return not op_result or self == other |
---|
123 | n/a | |
---|
124 | n/a | def _lt_from_le(self, other, NotImplemented=NotImplemented): |
---|
125 | n/a | 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' |
---|
126 | n/a | op_result = self.__le__(other) |
---|
127 | n/a | if op_result is NotImplemented: |
---|
128 | n/a | return op_result |
---|
129 | n/a | return op_result and self != other |
---|
130 | n/a | |
---|
131 | n/a | def _gt_from_le(self, other, NotImplemented=NotImplemented): |
---|
132 | n/a | 'Return a > b. Computed by @total_ordering from (not a <= b).' |
---|
133 | n/a | op_result = self.__le__(other) |
---|
134 | n/a | if op_result is NotImplemented: |
---|
135 | n/a | return op_result |
---|
136 | n/a | return not op_result |
---|
137 | n/a | |
---|
138 | n/a | def _lt_from_gt(self, other, NotImplemented=NotImplemented): |
---|
139 | n/a | 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' |
---|
140 | n/a | op_result = self.__gt__(other) |
---|
141 | n/a | if op_result is NotImplemented: |
---|
142 | n/a | return op_result |
---|
143 | n/a | return not op_result and self != other |
---|
144 | n/a | |
---|
145 | n/a | def _ge_from_gt(self, other, NotImplemented=NotImplemented): |
---|
146 | n/a | 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' |
---|
147 | n/a | op_result = self.__gt__(other) |
---|
148 | n/a | return op_result or self == other |
---|
149 | n/a | |
---|
150 | n/a | def _le_from_gt(self, other, NotImplemented=NotImplemented): |
---|
151 | n/a | 'Return a <= b. Computed by @total_ordering from (not a > b).' |
---|
152 | n/a | op_result = self.__gt__(other) |
---|
153 | n/a | if op_result is NotImplemented: |
---|
154 | n/a | return op_result |
---|
155 | n/a | return not op_result |
---|
156 | n/a | |
---|
157 | n/a | def _le_from_ge(self, other, NotImplemented=NotImplemented): |
---|
158 | n/a | 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' |
---|
159 | n/a | op_result = self.__ge__(other) |
---|
160 | n/a | if op_result is NotImplemented: |
---|
161 | n/a | return op_result |
---|
162 | n/a | return not op_result or self == other |
---|
163 | n/a | |
---|
164 | n/a | def _gt_from_ge(self, other, NotImplemented=NotImplemented): |
---|
165 | n/a | 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' |
---|
166 | n/a | op_result = self.__ge__(other) |
---|
167 | n/a | if op_result is NotImplemented: |
---|
168 | n/a | return op_result |
---|
169 | n/a | return op_result and self != other |
---|
170 | n/a | |
---|
171 | n/a | def _lt_from_ge(self, other, NotImplemented=NotImplemented): |
---|
172 | n/a | 'Return a < b. Computed by @total_ordering from (not a >= b).' |
---|
173 | n/a | op_result = self.__ge__(other) |
---|
174 | n/a | if op_result is NotImplemented: |
---|
175 | n/a | return op_result |
---|
176 | n/a | return not op_result |
---|
177 | n/a | |
---|
178 | n/a | _convert = { |
---|
179 | n/a | '__lt__': [('__gt__', _gt_from_lt), |
---|
180 | n/a | ('__le__', _le_from_lt), |
---|
181 | n/a | ('__ge__', _ge_from_lt)], |
---|
182 | n/a | '__le__': [('__ge__', _ge_from_le), |
---|
183 | n/a | ('__lt__', _lt_from_le), |
---|
184 | n/a | ('__gt__', _gt_from_le)], |
---|
185 | n/a | '__gt__': [('__lt__', _lt_from_gt), |
---|
186 | n/a | ('__ge__', _ge_from_gt), |
---|
187 | n/a | ('__le__', _le_from_gt)], |
---|
188 | n/a | '__ge__': [('__le__', _le_from_ge), |
---|
189 | n/a | ('__gt__', _gt_from_ge), |
---|
190 | n/a | ('__lt__', _lt_from_ge)] |
---|
191 | n/a | } |
---|
192 | n/a | |
---|
193 | n/a | def total_ordering(cls): |
---|
194 | n/a | """Class decorator that fills in missing ordering methods""" |
---|
195 | n/a | # Find user-defined comparisons (not those inherited from object). |
---|
196 | n/a | roots = [op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)] |
---|
197 | n/a | if not roots: |
---|
198 | n/a | raise ValueError('must define at least one ordering operation: < > <= >=') |
---|
199 | n/a | root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ |
---|
200 | n/a | for opname, opfunc in _convert[root]: |
---|
201 | n/a | if opname not in roots: |
---|
202 | n/a | opfunc.__name__ = opname |
---|
203 | n/a | setattr(cls, opname, opfunc) |
---|
204 | n/a | return cls |
---|
205 | n/a | |
---|
206 | n/a | |
---|
207 | n/a | ################################################################################ |
---|
208 | n/a | ### cmp_to_key() function converter |
---|
209 | n/a | ################################################################################ |
---|
210 | n/a | |
---|
211 | n/a | def cmp_to_key(mycmp): |
---|
212 | n/a | """Convert a cmp= function into a key= function""" |
---|
213 | n/a | class K(object): |
---|
214 | n/a | __slots__ = ['obj'] |
---|
215 | n/a | def __init__(self, obj): |
---|
216 | n/a | self.obj = obj |
---|
217 | n/a | def __lt__(self, other): |
---|
218 | n/a | return mycmp(self.obj, other.obj) < 0 |
---|
219 | n/a | def __gt__(self, other): |
---|
220 | n/a | return mycmp(self.obj, other.obj) > 0 |
---|
221 | n/a | def __eq__(self, other): |
---|
222 | n/a | return mycmp(self.obj, other.obj) == 0 |
---|
223 | n/a | def __le__(self, other): |
---|
224 | n/a | return mycmp(self.obj, other.obj) <= 0 |
---|
225 | n/a | def __ge__(self, other): |
---|
226 | n/a | return mycmp(self.obj, other.obj) >= 0 |
---|
227 | n/a | __hash__ = None |
---|
228 | n/a | return K |
---|
229 | n/a | |
---|
230 | n/a | try: |
---|
231 | n/a | from _functools import cmp_to_key |
---|
232 | n/a | except ImportError: |
---|
233 | n/a | pass |
---|
234 | n/a | |
---|
235 | n/a | |
---|
236 | n/a | ################################################################################ |
---|
237 | n/a | ### partial() argument application |
---|
238 | n/a | ################################################################################ |
---|
239 | n/a | |
---|
240 | n/a | # Purely functional, no descriptor behaviour |
---|
241 | n/a | class partial: |
---|
242 | n/a | """New function with partial application of the given arguments |
---|
243 | n/a | and keywords. |
---|
244 | n/a | """ |
---|
245 | n/a | |
---|
246 | n/a | __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" |
---|
247 | n/a | |
---|
248 | n/a | def __new__(*args, **keywords): |
---|
249 | n/a | if not args: |
---|
250 | n/a | raise TypeError("descriptor '__new__' of partial needs an argument") |
---|
251 | n/a | if len(args) < 2: |
---|
252 | n/a | raise TypeError("type 'partial' takes at least one argument") |
---|
253 | n/a | cls, func, *args = args |
---|
254 | n/a | if not callable(func): |
---|
255 | n/a | raise TypeError("the first argument must be callable") |
---|
256 | n/a | args = tuple(args) |
---|
257 | n/a | |
---|
258 | n/a | if hasattr(func, "func"): |
---|
259 | n/a | args = func.args + args |
---|
260 | n/a | tmpkw = func.keywords.copy() |
---|
261 | n/a | tmpkw.update(keywords) |
---|
262 | n/a | keywords = tmpkw |
---|
263 | n/a | del tmpkw |
---|
264 | n/a | func = func.func |
---|
265 | n/a | |
---|
266 | n/a | self = super(partial, cls).__new__(cls) |
---|
267 | n/a | |
---|
268 | n/a | self.func = func |
---|
269 | n/a | self.args = args |
---|
270 | n/a | self.keywords = keywords |
---|
271 | n/a | return self |
---|
272 | n/a | |
---|
273 | n/a | def __call__(*args, **keywords): |
---|
274 | n/a | if not args: |
---|
275 | n/a | raise TypeError("descriptor '__call__' of partial needs an argument") |
---|
276 | n/a | self, *args = args |
---|
277 | n/a | newkeywords = self.keywords.copy() |
---|
278 | n/a | newkeywords.update(keywords) |
---|
279 | n/a | return self.func(*self.args, *args, **newkeywords) |
---|
280 | n/a | |
---|
281 | n/a | @recursive_repr() |
---|
282 | n/a | def __repr__(self): |
---|
283 | n/a | qualname = type(self).__qualname__ |
---|
284 | n/a | args = [repr(self.func)] |
---|
285 | n/a | args.extend(repr(x) for x in self.args) |
---|
286 | n/a | args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) |
---|
287 | n/a | if type(self).__module__ == "functools": |
---|
288 | n/a | return f"functools.{qualname}({', '.join(args)})" |
---|
289 | n/a | return f"{qualname}({', '.join(args)})" |
---|
290 | n/a | |
---|
291 | n/a | def __reduce__(self): |
---|
292 | n/a | return type(self), (self.func,), (self.func, self.args, |
---|
293 | n/a | self.keywords or None, self.__dict__ or None) |
---|
294 | n/a | |
---|
295 | n/a | def __setstate__(self, state): |
---|
296 | n/a | if not isinstance(state, tuple): |
---|
297 | n/a | raise TypeError("argument to __setstate__ must be a tuple") |
---|
298 | n/a | if len(state) != 4: |
---|
299 | n/a | raise TypeError(f"expected 4 items in state, got {len(state)}") |
---|
300 | n/a | func, args, kwds, namespace = state |
---|
301 | n/a | if (not callable(func) or not isinstance(args, tuple) or |
---|
302 | n/a | (kwds is not None and not isinstance(kwds, dict)) or |
---|
303 | n/a | (namespace is not None and not isinstance(namespace, dict))): |
---|
304 | n/a | raise TypeError("invalid partial state") |
---|
305 | n/a | |
---|
306 | n/a | args = tuple(args) # just in case it's a subclass |
---|
307 | n/a | if kwds is None: |
---|
308 | n/a | kwds = {} |
---|
309 | n/a | elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? |
---|
310 | n/a | kwds = dict(kwds) |
---|
311 | n/a | if namespace is None: |
---|
312 | n/a | namespace = {} |
---|
313 | n/a | |
---|
314 | n/a | self.__dict__ = namespace |
---|
315 | n/a | self.func = func |
---|
316 | n/a | self.args = args |
---|
317 | n/a | self.keywords = kwds |
---|
318 | n/a | |
---|
319 | n/a | try: |
---|
320 | n/a | from _functools import partial |
---|
321 | n/a | except ImportError: |
---|
322 | n/a | pass |
---|
323 | n/a | |
---|
324 | n/a | # Descriptor version |
---|
325 | n/a | class partialmethod(object): |
---|
326 | n/a | """Method descriptor with partial application of the given arguments |
---|
327 | n/a | and keywords. |
---|
328 | n/a | |
---|
329 | n/a | Supports wrapping existing descriptors and handles non-descriptor |
---|
330 | n/a | callables as instance methods. |
---|
331 | n/a | """ |
---|
332 | n/a | |
---|
333 | n/a | def __init__(self, func, *args, **keywords): |
---|
334 | n/a | if not callable(func) and not hasattr(func, "__get__"): |
---|
335 | n/a | raise TypeError("{!r} is not callable or a descriptor" |
---|
336 | n/a | .format(func)) |
---|
337 | n/a | |
---|
338 | n/a | # func could be a descriptor like classmethod which isn't callable, |
---|
339 | n/a | # so we can't inherit from partial (it verifies func is callable) |
---|
340 | n/a | if isinstance(func, partialmethod): |
---|
341 | n/a | # flattening is mandatory in order to place cls/self before all |
---|
342 | n/a | # other arguments |
---|
343 | n/a | # it's also more efficient since only one function will be called |
---|
344 | n/a | self.func = func.func |
---|
345 | n/a | self.args = func.args + args |
---|
346 | n/a | self.keywords = func.keywords.copy() |
---|
347 | n/a | self.keywords.update(keywords) |
---|
348 | n/a | else: |
---|
349 | n/a | self.func = func |
---|
350 | n/a | self.args = args |
---|
351 | n/a | self.keywords = keywords |
---|
352 | n/a | |
---|
353 | n/a | def __repr__(self): |
---|
354 | n/a | args = ", ".join(map(repr, self.args)) |
---|
355 | n/a | keywords = ", ".join("{}={!r}".format(k, v) |
---|
356 | n/a | for k, v in self.keywords.items()) |
---|
357 | n/a | format_string = "{module}.{cls}({func}, {args}, {keywords})" |
---|
358 | n/a | return format_string.format(module=self.__class__.__module__, |
---|
359 | n/a | cls=self.__class__.__qualname__, |
---|
360 | n/a | func=self.func, |
---|
361 | n/a | args=args, |
---|
362 | n/a | keywords=keywords) |
---|
363 | n/a | |
---|
364 | n/a | def _make_unbound_method(self): |
---|
365 | n/a | def _method(*args, **keywords): |
---|
366 | n/a | call_keywords = self.keywords.copy() |
---|
367 | n/a | call_keywords.update(keywords) |
---|
368 | n/a | cls_or_self, *rest = args |
---|
369 | n/a | call_args = (cls_or_self,) + self.args + tuple(rest) |
---|
370 | n/a | return self.func(*call_args, **call_keywords) |
---|
371 | n/a | _method.__isabstractmethod__ = self.__isabstractmethod__ |
---|
372 | n/a | _method._partialmethod = self |
---|
373 | n/a | return _method |
---|
374 | n/a | |
---|
375 | n/a | def __get__(self, obj, cls): |
---|
376 | n/a | get = getattr(self.func, "__get__", None) |
---|
377 | n/a | result = None |
---|
378 | n/a | if get is not None: |
---|
379 | n/a | new_func = get(obj, cls) |
---|
380 | n/a | if new_func is not self.func: |
---|
381 | n/a | # Assume __get__ returning something new indicates the |
---|
382 | n/a | # creation of an appropriate callable |
---|
383 | n/a | result = partial(new_func, *self.args, **self.keywords) |
---|
384 | n/a | try: |
---|
385 | n/a | result.__self__ = new_func.__self__ |
---|
386 | n/a | except AttributeError: |
---|
387 | n/a | pass |
---|
388 | n/a | if result is None: |
---|
389 | n/a | # If the underlying descriptor didn't do anything, treat this |
---|
390 | n/a | # like an instance method |
---|
391 | n/a | result = self._make_unbound_method().__get__(obj, cls) |
---|
392 | n/a | return result |
---|
393 | n/a | |
---|
394 | n/a | @property |
---|
395 | n/a | def __isabstractmethod__(self): |
---|
396 | n/a | return getattr(self.func, "__isabstractmethod__", False) |
---|
397 | n/a | |
---|
398 | n/a | |
---|
399 | n/a | ################################################################################ |
---|
400 | n/a | ### LRU Cache function decorator |
---|
401 | n/a | ################################################################################ |
---|
402 | n/a | |
---|
403 | n/a | _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) |
---|
404 | n/a | |
---|
405 | n/a | class _HashedSeq(list): |
---|
406 | n/a | """ This class guarantees that hash() will be called no more than once |
---|
407 | n/a | per element. This is important because the lru_cache() will hash |
---|
408 | n/a | the key multiple times on a cache miss. |
---|
409 | n/a | |
---|
410 | n/a | """ |
---|
411 | n/a | |
---|
412 | n/a | __slots__ = 'hashvalue' |
---|
413 | n/a | |
---|
414 | n/a | def __init__(self, tup, hash=hash): |
---|
415 | n/a | self[:] = tup |
---|
416 | n/a | self.hashvalue = hash(tup) |
---|
417 | n/a | |
---|
418 | n/a | def __hash__(self): |
---|
419 | n/a | return self.hashvalue |
---|
420 | n/a | |
---|
421 | n/a | def _make_key(args, kwds, typed, |
---|
422 | n/a | kwd_mark = (object(),), |
---|
423 | n/a | fasttypes = {int, str, frozenset, type(None)}, |
---|
424 | n/a | tuple=tuple, type=type, len=len): |
---|
425 | n/a | """Make a cache key from optionally typed positional and keyword arguments |
---|
426 | n/a | |
---|
427 | n/a | The key is constructed in a way that is flat as possible rather than |
---|
428 | n/a | as a nested structure that would take more memory. |
---|
429 | n/a | |
---|
430 | n/a | If there is only a single argument and its data type is known to cache |
---|
431 | n/a | its hash value, then that argument is returned without a wrapper. This |
---|
432 | n/a | saves space and improves lookup speed. |
---|
433 | n/a | |
---|
434 | n/a | """ |
---|
435 | n/a | key = args |
---|
436 | n/a | if kwds: |
---|
437 | n/a | key += kwd_mark |
---|
438 | n/a | for item in kwds.items(): |
---|
439 | n/a | key += item |
---|
440 | n/a | if typed: |
---|
441 | n/a | key += tuple(type(v) for v in args) |
---|
442 | n/a | if kwds: |
---|
443 | n/a | key += tuple(type(v) for v in kwds.values()) |
---|
444 | n/a | elif len(key) == 1 and type(key[0]) in fasttypes: |
---|
445 | n/a | return key[0] |
---|
446 | n/a | return _HashedSeq(key) |
---|
447 | n/a | |
---|
448 | n/a | def lru_cache(maxsize=128, typed=False): |
---|
449 | n/a | """Least-recently-used cache decorator. |
---|
450 | n/a | |
---|
451 | n/a | If *maxsize* is set to None, the LRU features are disabled and the cache |
---|
452 | n/a | can grow without bound. |
---|
453 | n/a | |
---|
454 | n/a | If *typed* is True, arguments of different types will be cached separately. |
---|
455 | n/a | For example, f(3.0) and f(3) will be treated as distinct calls with |
---|
456 | n/a | distinct results. |
---|
457 | n/a | |
---|
458 | n/a | Arguments to the cached function must be hashable. |
---|
459 | n/a | |
---|
460 | n/a | View the cache statistics named tuple (hits, misses, maxsize, currsize) |
---|
461 | n/a | with f.cache_info(). Clear the cache and statistics with f.cache_clear(). |
---|
462 | n/a | Access the underlying function with f.__wrapped__. |
---|
463 | n/a | |
---|
464 | n/a | See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used |
---|
465 | n/a | |
---|
466 | n/a | """ |
---|
467 | n/a | |
---|
468 | n/a | # Users should only access the lru_cache through its public API: |
---|
469 | n/a | # cache_info, cache_clear, and f.__wrapped__ |
---|
470 | n/a | # The internals of the lru_cache are encapsulated for thread safety and |
---|
471 | n/a | # to allow the implementation to change (including a possible C version). |
---|
472 | n/a | |
---|
473 | n/a | # Early detection of an erroneous call to @lru_cache without any arguments |
---|
474 | n/a | # resulting in the inner function being passed to maxsize instead of an |
---|
475 | n/a | # integer or None. |
---|
476 | n/a | if maxsize is not None and not isinstance(maxsize, int): |
---|
477 | n/a | raise TypeError('Expected maxsize to be an integer or None') |
---|
478 | n/a | |
---|
479 | n/a | def decorating_function(user_function): |
---|
480 | n/a | wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) |
---|
481 | n/a | return update_wrapper(wrapper, user_function) |
---|
482 | n/a | |
---|
483 | n/a | return decorating_function |
---|
484 | n/a | |
---|
485 | n/a | def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): |
---|
486 | n/a | # Constants shared by all lru cache instances: |
---|
487 | n/a | sentinel = object() # unique object used to signal cache misses |
---|
488 | n/a | make_key = _make_key # build a key from the function arguments |
---|
489 | n/a | PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields |
---|
490 | n/a | |
---|
491 | n/a | cache = {} |
---|
492 | n/a | hits = misses = 0 |
---|
493 | n/a | full = False |
---|
494 | n/a | cache_get = cache.get # bound method to lookup a key or return None |
---|
495 | n/a | cache_len = cache.__len__ # get cache size without calling len() |
---|
496 | n/a | lock = RLock() # because linkedlist updates aren't threadsafe |
---|
497 | n/a | root = [] # root of the circular doubly linked list |
---|
498 | n/a | root[:] = [root, root, None, None] # initialize by pointing to self |
---|
499 | n/a | |
---|
500 | n/a | if maxsize == 0: |
---|
501 | n/a | |
---|
502 | n/a | def wrapper(*args, **kwds): |
---|
503 | n/a | # No caching -- just a statistics update after a successful call |
---|
504 | n/a | nonlocal misses |
---|
505 | n/a | result = user_function(*args, **kwds) |
---|
506 | n/a | misses += 1 |
---|
507 | n/a | return result |
---|
508 | n/a | |
---|
509 | n/a | elif maxsize is None: |
---|
510 | n/a | |
---|
511 | n/a | def wrapper(*args, **kwds): |
---|
512 | n/a | # Simple caching without ordering or size limit |
---|
513 | n/a | nonlocal hits, misses |
---|
514 | n/a | key = make_key(args, kwds, typed) |
---|
515 | n/a | result = cache_get(key, sentinel) |
---|
516 | n/a | if result is not sentinel: |
---|
517 | n/a | hits += 1 |
---|
518 | n/a | return result |
---|
519 | n/a | result = user_function(*args, **kwds) |
---|
520 | n/a | cache[key] = result |
---|
521 | n/a | misses += 1 |
---|
522 | n/a | return result |
---|
523 | n/a | |
---|
524 | n/a | else: |
---|
525 | n/a | |
---|
526 | n/a | def wrapper(*args, **kwds): |
---|
527 | n/a | # Size limited caching that tracks accesses by recency |
---|
528 | n/a | nonlocal root, hits, misses, full |
---|
529 | n/a | key = make_key(args, kwds, typed) |
---|
530 | n/a | with lock: |
---|
531 | n/a | link = cache_get(key) |
---|
532 | n/a | if link is not None: |
---|
533 | n/a | # Move the link to the front of the circular queue |
---|
534 | n/a | link_prev, link_next, _key, result = link |
---|
535 | n/a | link_prev[NEXT] = link_next |
---|
536 | n/a | link_next[PREV] = link_prev |
---|
537 | n/a | last = root[PREV] |
---|
538 | n/a | last[NEXT] = root[PREV] = link |
---|
539 | n/a | link[PREV] = last |
---|
540 | n/a | link[NEXT] = root |
---|
541 | n/a | hits += 1 |
---|
542 | n/a | return result |
---|
543 | n/a | result = user_function(*args, **kwds) |
---|
544 | n/a | with lock: |
---|
545 | n/a | if key in cache: |
---|
546 | n/a | # Getting here means that this same key was added to the |
---|
547 | n/a | # cache while the lock was released. Since the link |
---|
548 | n/a | # update is already done, we need only return the |
---|
549 | n/a | # computed result and update the count of misses. |
---|
550 | n/a | pass |
---|
551 | n/a | elif full: |
---|
552 | n/a | # Use the old root to store the new key and result. |
---|
553 | n/a | oldroot = root |
---|
554 | n/a | oldroot[KEY] = key |
---|
555 | n/a | oldroot[RESULT] = result |
---|
556 | n/a | # Empty the oldest link and make it the new root. |
---|
557 | n/a | # Keep a reference to the old key and old result to |
---|
558 | n/a | # prevent their ref counts from going to zero during the |
---|
559 | n/a | # update. That will prevent potentially arbitrary object |
---|
560 | n/a | # clean-up code (i.e. __del__) from running while we're |
---|
561 | n/a | # still adjusting the links. |
---|
562 | n/a | root = oldroot[NEXT] |
---|
563 | n/a | oldkey = root[KEY] |
---|
564 | n/a | oldresult = root[RESULT] |
---|
565 | n/a | root[KEY] = root[RESULT] = None |
---|
566 | n/a | # Now update the cache dictionary. |
---|
567 | n/a | del cache[oldkey] |
---|
568 | n/a | # Save the potentially reentrant cache[key] assignment |
---|
569 | n/a | # for last, after the root and links have been put in |
---|
570 | n/a | # a consistent state. |
---|
571 | n/a | cache[key] = oldroot |
---|
572 | n/a | else: |
---|
573 | n/a | # Put result in a new link at the front of the queue. |
---|
574 | n/a | last = root[PREV] |
---|
575 | n/a | link = [last, root, key, result] |
---|
576 | n/a | last[NEXT] = root[PREV] = cache[key] = link |
---|
577 | n/a | # Use the cache_len bound method instead of the len() function |
---|
578 | n/a | # which could potentially be wrapped in an lru_cache itself. |
---|
579 | n/a | full = (cache_len() >= maxsize) |
---|
580 | n/a | misses += 1 |
---|
581 | n/a | return result |
---|
582 | n/a | |
---|
583 | n/a | def cache_info(): |
---|
584 | n/a | """Report cache statistics""" |
---|
585 | n/a | with lock: |
---|
586 | n/a | return _CacheInfo(hits, misses, maxsize, cache_len()) |
---|
587 | n/a | |
---|
588 | n/a | def cache_clear(): |
---|
589 | n/a | """Clear the cache and cache statistics""" |
---|
590 | n/a | nonlocal hits, misses, full |
---|
591 | n/a | with lock: |
---|
592 | n/a | cache.clear() |
---|
593 | n/a | root[:] = [root, root, None, None] |
---|
594 | n/a | hits = misses = 0 |
---|
595 | n/a | full = False |
---|
596 | n/a | |
---|
597 | n/a | wrapper.cache_info = cache_info |
---|
598 | n/a | wrapper.cache_clear = cache_clear |
---|
599 | n/a | return wrapper |
---|
600 | n/a | |
---|
601 | n/a | try: |
---|
602 | n/a | from _functools import _lru_cache_wrapper |
---|
603 | n/a | except ImportError: |
---|
604 | n/a | pass |
---|
605 | n/a | |
---|
606 | n/a | |
---|
607 | n/a | ################################################################################ |
---|
608 | n/a | ### singledispatch() - single-dispatch generic function decorator |
---|
609 | n/a | ################################################################################ |
---|
610 | n/a | |
---|
611 | n/a | def _c3_merge(sequences): |
---|
612 | n/a | """Merges MROs in *sequences* to a single MRO using the C3 algorithm. |
---|
613 | n/a | |
---|
614 | n/a | Adapted from http://www.python.org/download/releases/2.3/mro/. |
---|
615 | n/a | |
---|
616 | n/a | """ |
---|
617 | n/a | result = [] |
---|
618 | n/a | while True: |
---|
619 | n/a | sequences = [s for s in sequences if s] # purge empty sequences |
---|
620 | n/a | if not sequences: |
---|
621 | n/a | return result |
---|
622 | n/a | for s1 in sequences: # find merge candidates among seq heads |
---|
623 | n/a | candidate = s1[0] |
---|
624 | n/a | for s2 in sequences: |
---|
625 | n/a | if candidate in s2[1:]: |
---|
626 | n/a | candidate = None |
---|
627 | n/a | break # reject the current head, it appears later |
---|
628 | n/a | else: |
---|
629 | n/a | break |
---|
630 | n/a | if candidate is None: |
---|
631 | n/a | raise RuntimeError("Inconsistent hierarchy") |
---|
632 | n/a | result.append(candidate) |
---|
633 | n/a | # remove the chosen candidate |
---|
634 | n/a | for seq in sequences: |
---|
635 | n/a | if seq[0] == candidate: |
---|
636 | n/a | del seq[0] |
---|
637 | n/a | |
---|
638 | n/a | def _c3_mro(cls, abcs=None): |
---|
639 | n/a | """Computes the method resolution order using extended C3 linearization. |
---|
640 | n/a | |
---|
641 | n/a | If no *abcs* are given, the algorithm works exactly like the built-in C3 |
---|
642 | n/a | linearization used for method resolution. |
---|
643 | n/a | |
---|
644 | n/a | If given, *abcs* is a list of abstract base classes that should be inserted |
---|
645 | n/a | into the resulting MRO. Unrelated ABCs are ignored and don't end up in the |
---|
646 | n/a | result. The algorithm inserts ABCs where their functionality is introduced, |
---|
647 | n/a | i.e. issubclass(cls, abc) returns True for the class itself but returns |
---|
648 | n/a | False for all its direct base classes. Implicit ABCs for a given class |
---|
649 | n/a | (either registered or inferred from the presence of a special method like |
---|
650 | n/a | __len__) are inserted directly after the last ABC explicitly listed in the |
---|
651 | n/a | MRO of said class. If two implicit ABCs end up next to each other in the |
---|
652 | n/a | resulting MRO, their ordering depends on the order of types in *abcs*. |
---|
653 | n/a | |
---|
654 | n/a | """ |
---|
655 | n/a | for i, base in enumerate(reversed(cls.__bases__)): |
---|
656 | n/a | if hasattr(base, '__abstractmethods__'): |
---|
657 | n/a | boundary = len(cls.__bases__) - i |
---|
658 | n/a | break # Bases up to the last explicit ABC are considered first. |
---|
659 | n/a | else: |
---|
660 | n/a | boundary = 0 |
---|
661 | n/a | abcs = list(abcs) if abcs else [] |
---|
662 | n/a | explicit_bases = list(cls.__bases__[:boundary]) |
---|
663 | n/a | abstract_bases = [] |
---|
664 | n/a | other_bases = list(cls.__bases__[boundary:]) |
---|
665 | n/a | for base in abcs: |
---|
666 | n/a | if issubclass(cls, base) and not any( |
---|
667 | n/a | issubclass(b, base) for b in cls.__bases__ |
---|
668 | n/a | ): |
---|
669 | n/a | # If *cls* is the class that introduces behaviour described by |
---|
670 | n/a | # an ABC *base*, insert said ABC to its MRO. |
---|
671 | n/a | abstract_bases.append(base) |
---|
672 | n/a | for base in abstract_bases: |
---|
673 | n/a | abcs.remove(base) |
---|
674 | n/a | explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] |
---|
675 | n/a | abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] |
---|
676 | n/a | other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] |
---|
677 | n/a | return _c3_merge( |
---|
678 | n/a | [[cls]] + |
---|
679 | n/a | explicit_c3_mros + abstract_c3_mros + other_c3_mros + |
---|
680 | n/a | [explicit_bases] + [abstract_bases] + [other_bases] |
---|
681 | n/a | ) |
---|
682 | n/a | |
---|
683 | n/a | def _compose_mro(cls, types): |
---|
684 | n/a | """Calculates the method resolution order for a given class *cls*. |
---|
685 | n/a | |
---|
686 | n/a | Includes relevant abstract base classes (with their respective bases) from |
---|
687 | n/a | the *types* iterable. Uses a modified C3 linearization algorithm. |
---|
688 | n/a | |
---|
689 | n/a | """ |
---|
690 | n/a | bases = set(cls.__mro__) |
---|
691 | n/a | # Remove entries which are already present in the __mro__ or unrelated. |
---|
692 | n/a | def is_related(typ): |
---|
693 | n/a | return (typ not in bases and hasattr(typ, '__mro__') |
---|
694 | n/a | and issubclass(cls, typ)) |
---|
695 | n/a | types = [n for n in types if is_related(n)] |
---|
696 | n/a | # Remove entries which are strict bases of other entries (they will end up |
---|
697 | n/a | # in the MRO anyway. |
---|
698 | n/a | def is_strict_base(typ): |
---|
699 | n/a | for other in types: |
---|
700 | n/a | if typ != other and typ in other.__mro__: |
---|
701 | n/a | return True |
---|
702 | n/a | return False |
---|
703 | n/a | types = [n for n in types if not is_strict_base(n)] |
---|
704 | n/a | # Subclasses of the ABCs in *types* which are also implemented by |
---|
705 | n/a | # *cls* can be used to stabilize ABC ordering. |
---|
706 | n/a | type_set = set(types) |
---|
707 | n/a | mro = [] |
---|
708 | n/a | for typ in types: |
---|
709 | n/a | found = [] |
---|
710 | n/a | for sub in typ.__subclasses__(): |
---|
711 | n/a | if sub not in bases and issubclass(cls, sub): |
---|
712 | n/a | found.append([s for s in sub.__mro__ if s in type_set]) |
---|
713 | n/a | if not found: |
---|
714 | n/a | mro.append(typ) |
---|
715 | n/a | continue |
---|
716 | n/a | # Favor subclasses with the biggest number of useful bases |
---|
717 | n/a | found.sort(key=len, reverse=True) |
---|
718 | n/a | for sub in found: |
---|
719 | n/a | for subcls in sub: |
---|
720 | n/a | if subcls not in mro: |
---|
721 | n/a | mro.append(subcls) |
---|
722 | n/a | return _c3_mro(cls, abcs=mro) |
---|
723 | n/a | |
---|
724 | n/a | def _find_impl(cls, registry): |
---|
725 | n/a | """Returns the best matching implementation from *registry* for type *cls*. |
---|
726 | n/a | |
---|
727 | n/a | Where there is no registered implementation for a specific type, its method |
---|
728 | n/a | resolution order is used to find a more generic implementation. |
---|
729 | n/a | |
---|
730 | n/a | Note: if *registry* does not contain an implementation for the base |
---|
731 | n/a | *object* type, this function may return None. |
---|
732 | n/a | |
---|
733 | n/a | """ |
---|
734 | n/a | mro = _compose_mro(cls, registry.keys()) |
---|
735 | n/a | match = None |
---|
736 | n/a | for t in mro: |
---|
737 | n/a | if match is not None: |
---|
738 | n/a | # If *match* is an implicit ABC but there is another unrelated, |
---|
739 | n/a | # equally matching implicit ABC, refuse the temptation to guess. |
---|
740 | n/a | if (t in registry and t not in cls.__mro__ |
---|
741 | n/a | and match not in cls.__mro__ |
---|
742 | n/a | and not issubclass(match, t)): |
---|
743 | n/a | raise RuntimeError("Ambiguous dispatch: {} or {}".format( |
---|
744 | n/a | match, t)) |
---|
745 | n/a | break |
---|
746 | n/a | if t in registry: |
---|
747 | n/a | match = t |
---|
748 | n/a | return registry.get(match) |
---|
749 | n/a | |
---|
750 | n/a | def singledispatch(func): |
---|
751 | n/a | """Single-dispatch generic function decorator. |
---|
752 | n/a | |
---|
753 | n/a | Transforms a function into a generic function, which can have different |
---|
754 | n/a | behaviours depending upon the type of its first argument. The decorated |
---|
755 | n/a | function acts as the default implementation, and additional |
---|
756 | n/a | implementations can be registered using the register() attribute of the |
---|
757 | n/a | generic function. |
---|
758 | n/a | |
---|
759 | n/a | """ |
---|
760 | n/a | registry = {} |
---|
761 | n/a | dispatch_cache = WeakKeyDictionary() |
---|
762 | n/a | cache_token = None |
---|
763 | n/a | |
---|
764 | n/a | def dispatch(cls): |
---|
765 | n/a | """generic_func.dispatch(cls) -> <function implementation> |
---|
766 | n/a | |
---|
767 | n/a | Runs the dispatch algorithm to return the best available implementation |
---|
768 | n/a | for the given *cls* registered on *generic_func*. |
---|
769 | n/a | |
---|
770 | n/a | """ |
---|
771 | n/a | nonlocal cache_token |
---|
772 | n/a | if cache_token is not None: |
---|
773 | n/a | current_token = get_cache_token() |
---|
774 | n/a | if cache_token != current_token: |
---|
775 | n/a | dispatch_cache.clear() |
---|
776 | n/a | cache_token = current_token |
---|
777 | n/a | try: |
---|
778 | n/a | impl = dispatch_cache[cls] |
---|
779 | n/a | except KeyError: |
---|
780 | n/a | try: |
---|
781 | n/a | impl = registry[cls] |
---|
782 | n/a | except KeyError: |
---|
783 | n/a | impl = _find_impl(cls, registry) |
---|
784 | n/a | dispatch_cache[cls] = impl |
---|
785 | n/a | return impl |
---|
786 | n/a | |
---|
787 | n/a | def register(cls, func=None): |
---|
788 | n/a | """generic_func.register(cls, func) -> func |
---|
789 | n/a | |
---|
790 | n/a | Registers a new implementation for the given *cls* on a *generic_func*. |
---|
791 | n/a | |
---|
792 | n/a | """ |
---|
793 | n/a | nonlocal cache_token |
---|
794 | n/a | if func is None: |
---|
795 | n/a | return lambda f: register(cls, f) |
---|
796 | n/a | registry[cls] = func |
---|
797 | n/a | if cache_token is None and hasattr(cls, '__abstractmethods__'): |
---|
798 | n/a | cache_token = get_cache_token() |
---|
799 | n/a | dispatch_cache.clear() |
---|
800 | n/a | return func |
---|
801 | n/a | |
---|
802 | n/a | def wrapper(*args, **kw): |
---|
803 | n/a | return dispatch(args[0].__class__)(*args, **kw) |
---|
804 | n/a | |
---|
805 | n/a | registry[object] = func |
---|
806 | n/a | wrapper.register = register |
---|
807 | n/a | wrapper.dispatch = dispatch |
---|
808 | n/a | wrapper.registry = MappingProxyType(registry) |
---|
809 | n/a | wrapper._clear_cache = dispatch_cache.clear |
---|
810 | n/a | update_wrapper(wrapper, func) |
---|
811 | n/a | return wrapper |
---|