»Core Development>Code coverage>Lib/functools.py

Python code coverage for Lib/functools.py

#countcontent
1n/a"""functools.py - Tools for working with functions and callable objects
2n/a"""
3n/a# Python module wrapper for _functools C module
4n/a# to allow utilities written in Python to be added
5n/a# to the functools module.
6n/a# Written by Nick Coghlan <ncoghlan at gmail.com>,
7n/a# Raymond Hettinger <python at rcn.com>,
8n/a# and Łukasz Langa <lukasz at langa.pl>.
9n/a# Copyright (C) 2006-2013 Python Software Foundation.
10n/a# See C source code for _functools credits/copyright
11n/a
12n/a__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
13n/a 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial',
14n/a 'partialmethod', 'singledispatch']
15n/a
16n/atry:
17n/a from _functools import reduce
18n/aexcept ImportError:
19n/a pass
20n/afrom abc import get_cache_token
21n/afrom collections import namedtuple
22n/afrom types import MappingProxyType
23n/afrom weakref import WeakKeyDictionary
24n/afrom reprlib import recursive_repr
25n/atry:
26n/a from _thread import RLock
27n/aexcept ImportError:
28n/a class RLock:
29n/a 'Dummy reentrant lock for builds without threads'
30n/a def __enter__(self): pass
31n/a def __exit__(self, exctype, excinst, exctb): pass
32n/a
33n/a
34n/a################################################################################
35n/a### update_wrapper() and wraps() decorator
36n/a################################################################################
37n/a
38n/a# update_wrapper() and wraps() are tools to help write
39n/a# wrapper functions that can handle naive introspection
40n/a
41n/aWRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
42n/a '__annotations__')
43n/aWRAPPER_UPDATES = ('__dict__',)
44n/adef update_wrapper(wrapper,
45n/a wrapped,
46n/a assigned = WRAPPER_ASSIGNMENTS,
47n/a updated = WRAPPER_UPDATES):
48n/a """Update a wrapper function to look like the wrapped function
49n/a
50n/a wrapper is the function to be updated
51n/a wrapped is the original function
52n/a assigned is a tuple naming the attributes assigned directly
53n/a from the wrapped function to the wrapper function (defaults to
54n/a functools.WRAPPER_ASSIGNMENTS)
55n/a updated is a tuple naming the attributes of the wrapper that
56n/a are updated with the corresponding attribute from the wrapped
57n/a function (defaults to functools.WRAPPER_UPDATES)
58n/a """
59n/a for attr in assigned:
60n/a try:
61n/a value = getattr(wrapped, attr)
62n/a except AttributeError:
63n/a pass
64n/a else:
65n/a setattr(wrapper, attr, value)
66n/a for attr in updated:
67n/a getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
68n/a # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
69n/a # from the wrapped function when updating __dict__
70n/a wrapper.__wrapped__ = wrapped
71n/a # Return the wrapper so this can be used as a decorator via partial()
72n/a return wrapper
73n/a
74n/adef wraps(wrapped,
75n/a assigned = WRAPPER_ASSIGNMENTS,
76n/a updated = WRAPPER_UPDATES):
77n/a """Decorator factory to apply update_wrapper() to a wrapper function
78n/a
79n/a Returns a decorator that invokes update_wrapper() with the decorated
80n/a function as the wrapper argument and the arguments to wraps() as the
81n/a remaining arguments. Default arguments are as for update_wrapper().
82n/a This is a convenience function to simplify applying partial() to
83n/a update_wrapper().
84n/a """
85n/a return partial(update_wrapper, wrapped=wrapped,
86n/a assigned=assigned, updated=updated)
87n/a
88n/a
89n/a################################################################################
90n/a### total_ordering class decorator
91n/a################################################################################
92n/a
93n/a# The total ordering functions all invoke the root magic method directly
94n/a# rather than using the corresponding operator. This avoids possible
95n/a# infinite recursion that could occur when the operator dispatch logic
96n/a# detects a NotImplemented result and then calls a reflected method.
97n/a
98n/adef _gt_from_lt(self, other, NotImplemented=NotImplemented):
99n/a 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).'
100n/a op_result = self.__lt__(other)
101n/a if op_result is NotImplemented:
102n/a return op_result
103n/a return not op_result and self != other
104n/a
105n/adef _le_from_lt(self, other, NotImplemented=NotImplemented):
106n/a 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).'
107n/a op_result = self.__lt__(other)
108n/a return op_result or self == other
109n/a
110n/adef _ge_from_lt(self, other, NotImplemented=NotImplemented):
111n/a 'Return a >= b. Computed by @total_ordering from (not a < b).'
112n/a op_result = self.__lt__(other)
113n/a if op_result is NotImplemented:
114n/a return op_result
115n/a return not op_result
116n/a
117n/adef _ge_from_le(self, other, NotImplemented=NotImplemented):
118n/a 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).'
119n/a op_result = self.__le__(other)
120n/a if op_result is NotImplemented:
121n/a return op_result
122n/a return not op_result or self == other
123n/a
124n/adef _lt_from_le(self, other, NotImplemented=NotImplemented):
125n/a 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).'
126n/a op_result = self.__le__(other)
127n/a if op_result is NotImplemented:
128n/a return op_result
129n/a return op_result and self != other
130n/a
131n/adef _gt_from_le(self, other, NotImplemented=NotImplemented):
132n/a 'Return a > b. Computed by @total_ordering from (not a <= b).'
133n/a op_result = self.__le__(other)
134n/a if op_result is NotImplemented:
135n/a return op_result
136n/a return not op_result
137n/a
138n/adef _lt_from_gt(self, other, NotImplemented=NotImplemented):
139n/a 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).'
140n/a op_result = self.__gt__(other)
141n/a if op_result is NotImplemented:
142n/a return op_result
143n/a return not op_result and self != other
144n/a
145n/adef _ge_from_gt(self, other, NotImplemented=NotImplemented):
146n/a 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).'
147n/a op_result = self.__gt__(other)
148n/a return op_result or self == other
149n/a
150n/adef _le_from_gt(self, other, NotImplemented=NotImplemented):
151n/a 'Return a <= b. Computed by @total_ordering from (not a > b).'
152n/a op_result = self.__gt__(other)
153n/a if op_result is NotImplemented:
154n/a return op_result
155n/a return not op_result
156n/a
157n/adef _le_from_ge(self, other, NotImplemented=NotImplemented):
158n/a 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).'
159n/a op_result = self.__ge__(other)
160n/a if op_result is NotImplemented:
161n/a return op_result
162n/a return not op_result or self == other
163n/a
164n/adef _gt_from_ge(self, other, NotImplemented=NotImplemented):
165n/a 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).'
166n/a op_result = self.__ge__(other)
167n/a if op_result is NotImplemented:
168n/a return op_result
169n/a return op_result and self != other
170n/a
171n/adef _lt_from_ge(self, other, NotImplemented=NotImplemented):
172n/a 'Return a < b. Computed by @total_ordering from (not a >= b).'
173n/a op_result = self.__ge__(other)
174n/a if op_result is NotImplemented:
175n/a return op_result
176n/a return not op_result
177n/a
178n/a_convert = {
179n/a '__lt__': [('__gt__', _gt_from_lt),
180n/a ('__le__', _le_from_lt),
181n/a ('__ge__', _ge_from_lt)],
182n/a '__le__': [('__ge__', _ge_from_le),
183n/a ('__lt__', _lt_from_le),
184n/a ('__gt__', _gt_from_le)],
185n/a '__gt__': [('__lt__', _lt_from_gt),
186n/a ('__ge__', _ge_from_gt),
187n/a ('__le__', _le_from_gt)],
188n/a '__ge__': [('__le__', _le_from_ge),
189n/a ('__gt__', _gt_from_ge),
190n/a ('__lt__', _lt_from_ge)]
191n/a}
192n/a
193n/adef total_ordering(cls):
194n/a """Class decorator that fills in missing ordering methods"""
195n/a # Find user-defined comparisons (not those inherited from object).
196n/a roots = [op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)]
197n/a if not roots:
198n/a raise ValueError('must define at least one ordering operation: < > <= >=')
199n/a root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
200n/a for opname, opfunc in _convert[root]:
201n/a if opname not in roots:
202n/a opfunc.__name__ = opname
203n/a setattr(cls, opname, opfunc)
204n/a return cls
205n/a
206n/a
207n/a################################################################################
208n/a### cmp_to_key() function converter
209n/a################################################################################
210n/a
211n/adef cmp_to_key(mycmp):
212n/a """Convert a cmp= function into a key= function"""
213n/a class K(object):
214n/a __slots__ = ['obj']
215n/a def __init__(self, obj):
216n/a self.obj = obj
217n/a def __lt__(self, other):
218n/a return mycmp(self.obj, other.obj) < 0
219n/a def __gt__(self, other):
220n/a return mycmp(self.obj, other.obj) > 0
221n/a def __eq__(self, other):
222n/a return mycmp(self.obj, other.obj) == 0
223n/a def __le__(self, other):
224n/a return mycmp(self.obj, other.obj) <= 0
225n/a def __ge__(self, other):
226n/a return mycmp(self.obj, other.obj) >= 0
227n/a __hash__ = None
228n/a return K
229n/a
230n/atry:
231n/a from _functools import cmp_to_key
232n/aexcept ImportError:
233n/a pass
234n/a
235n/a
236n/a################################################################################
237n/a### partial() argument application
238n/a################################################################################
239n/a
240n/a# Purely functional, no descriptor behaviour
241n/aclass partial:
242n/a """New function with partial application of the given arguments
243n/a and keywords.
244n/a """
245n/a
246n/a __slots__ = "func", "args", "keywords", "__dict__", "__weakref__"
247n/a
248n/a def __new__(*args, **keywords):
249n/a if not args:
250n/a raise TypeError("descriptor '__new__' of partial needs an argument")
251n/a if len(args) < 2:
252n/a raise TypeError("type 'partial' takes at least one argument")
253n/a cls, func, *args = args
254n/a if not callable(func):
255n/a raise TypeError("the first argument must be callable")
256n/a args = tuple(args)
257n/a
258n/a if hasattr(func, "func"):
259n/a args = func.args + args
260n/a tmpkw = func.keywords.copy()
261n/a tmpkw.update(keywords)
262n/a keywords = tmpkw
263n/a del tmpkw
264n/a func = func.func
265n/a
266n/a self = super(partial, cls).__new__(cls)
267n/a
268n/a self.func = func
269n/a self.args = args
270n/a self.keywords = keywords
271n/a return self
272n/a
273n/a def __call__(*args, **keywords):
274n/a if not args:
275n/a raise TypeError("descriptor '__call__' of partial needs an argument")
276n/a self, *args = args
277n/a newkeywords = self.keywords.copy()
278n/a newkeywords.update(keywords)
279n/a return self.func(*self.args, *args, **newkeywords)
280n/a
281n/a @recursive_repr()
282n/a def __repr__(self):
283n/a qualname = type(self).__qualname__
284n/a args = [repr(self.func)]
285n/a args.extend(repr(x) for x in self.args)
286n/a args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items())
287n/a if type(self).__module__ == "functools":
288n/a return f"functools.{qualname}({', '.join(args)})"
289n/a return f"{qualname}({', '.join(args)})"
290n/a
291n/a def __reduce__(self):
292n/a return type(self), (self.func,), (self.func, self.args,
293n/a self.keywords or None, self.__dict__ or None)
294n/a
295n/a def __setstate__(self, state):
296n/a if not isinstance(state, tuple):
297n/a raise TypeError("argument to __setstate__ must be a tuple")
298n/a if len(state) != 4:
299n/a raise TypeError(f"expected 4 items in state, got {len(state)}")
300n/a func, args, kwds, namespace = state
301n/a if (not callable(func) or not isinstance(args, tuple) or
302n/a (kwds is not None and not isinstance(kwds, dict)) or
303n/a (namespace is not None and not isinstance(namespace, dict))):
304n/a raise TypeError("invalid partial state")
305n/a
306n/a args = tuple(args) # just in case it's a subclass
307n/a if kwds is None:
308n/a kwds = {}
309n/a elif type(kwds) is not dict: # XXX does it need to be *exactly* dict?
310n/a kwds = dict(kwds)
311n/a if namespace is None:
312n/a namespace = {}
313n/a
314n/a self.__dict__ = namespace
315n/a self.func = func
316n/a self.args = args
317n/a self.keywords = kwds
318n/a
319n/atry:
320n/a from _functools import partial
321n/aexcept ImportError:
322n/a pass
323n/a
324n/a# Descriptor version
325n/aclass partialmethod(object):
326n/a """Method descriptor with partial application of the given arguments
327n/a and keywords.
328n/a
329n/a Supports wrapping existing descriptors and handles non-descriptor
330n/a callables as instance methods.
331n/a """
332n/a
333n/a def __init__(self, func, *args, **keywords):
334n/a if not callable(func) and not hasattr(func, "__get__"):
335n/a raise TypeError("{!r} is not callable or a descriptor"
336n/a .format(func))
337n/a
338n/a # func could be a descriptor like classmethod which isn't callable,
339n/a # so we can't inherit from partial (it verifies func is callable)
340n/a if isinstance(func, partialmethod):
341n/a # flattening is mandatory in order to place cls/self before all
342n/a # other arguments
343n/a # it's also more efficient since only one function will be called
344n/a self.func = func.func
345n/a self.args = func.args + args
346n/a self.keywords = func.keywords.copy()
347n/a self.keywords.update(keywords)
348n/a else:
349n/a self.func = func
350n/a self.args = args
351n/a self.keywords = keywords
352n/a
353n/a def __repr__(self):
354n/a args = ", ".join(map(repr, self.args))
355n/a keywords = ", ".join("{}={!r}".format(k, v)
356n/a for k, v in self.keywords.items())
357n/a format_string = "{module}.{cls}({func}, {args}, {keywords})"
358n/a return format_string.format(module=self.__class__.__module__,
359n/a cls=self.__class__.__qualname__,
360n/a func=self.func,
361n/a args=args,
362n/a keywords=keywords)
363n/a
364n/a def _make_unbound_method(self):
365n/a def _method(*args, **keywords):
366n/a call_keywords = self.keywords.copy()
367n/a call_keywords.update(keywords)
368n/a cls_or_self, *rest = args
369n/a call_args = (cls_or_self,) + self.args + tuple(rest)
370n/a return self.func(*call_args, **call_keywords)
371n/a _method.__isabstractmethod__ = self.__isabstractmethod__
372n/a _method._partialmethod = self
373n/a return _method
374n/a
375n/a def __get__(self, obj, cls):
376n/a get = getattr(self.func, "__get__", None)
377n/a result = None
378n/a if get is not None:
379n/a new_func = get(obj, cls)
380n/a if new_func is not self.func:
381n/a # Assume __get__ returning something new indicates the
382n/a # creation of an appropriate callable
383n/a result = partial(new_func, *self.args, **self.keywords)
384n/a try:
385n/a result.__self__ = new_func.__self__
386n/a except AttributeError:
387n/a pass
388n/a if result is None:
389n/a # If the underlying descriptor didn't do anything, treat this
390n/a # like an instance method
391n/a result = self._make_unbound_method().__get__(obj, cls)
392n/a return result
393n/a
394n/a @property
395n/a def __isabstractmethod__(self):
396n/a return getattr(self.func, "__isabstractmethod__", False)
397n/a
398n/a
399n/a################################################################################
400n/a### LRU Cache function decorator
401n/a################################################################################
402n/a
403n/a_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
404n/a
405n/aclass _HashedSeq(list):
406n/a """ This class guarantees that hash() will be called no more than once
407n/a per element. This is important because the lru_cache() will hash
408n/a the key multiple times on a cache miss.
409n/a
410n/a """
411n/a
412n/a __slots__ = 'hashvalue'
413n/a
414n/a def __init__(self, tup, hash=hash):
415n/a self[:] = tup
416n/a self.hashvalue = hash(tup)
417n/a
418n/a def __hash__(self):
419n/a return self.hashvalue
420n/a
421n/adef _make_key(args, kwds, typed,
422n/a kwd_mark = (object(),),
423n/a fasttypes = {int, str, frozenset, type(None)},
424n/a tuple=tuple, type=type, len=len):
425n/a """Make a cache key from optionally typed positional and keyword arguments
426n/a
427n/a The key is constructed in a way that is flat as possible rather than
428n/a as a nested structure that would take more memory.
429n/a
430n/a If there is only a single argument and its data type is known to cache
431n/a its hash value, then that argument is returned without a wrapper. This
432n/a saves space and improves lookup speed.
433n/a
434n/a """
435n/a key = args
436n/a if kwds:
437n/a key += kwd_mark
438n/a for item in kwds.items():
439n/a key += item
440n/a if typed:
441n/a key += tuple(type(v) for v in args)
442n/a if kwds:
443n/a key += tuple(type(v) for v in kwds.values())
444n/a elif len(key) == 1 and type(key[0]) in fasttypes:
445n/a return key[0]
446n/a return _HashedSeq(key)
447n/a
448n/adef lru_cache(maxsize=128, typed=False):
449n/a """Least-recently-used cache decorator.
450n/a
451n/a If *maxsize* is set to None, the LRU features are disabled and the cache
452n/a can grow without bound.
453n/a
454n/a If *typed* is True, arguments of different types will be cached separately.
455n/a For example, f(3.0) and f(3) will be treated as distinct calls with
456n/a distinct results.
457n/a
458n/a Arguments to the cached function must be hashable.
459n/a
460n/a View the cache statistics named tuple (hits, misses, maxsize, currsize)
461n/a with f.cache_info(). Clear the cache and statistics with f.cache_clear().
462n/a Access the underlying function with f.__wrapped__.
463n/a
464n/a See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
465n/a
466n/a """
467n/a
468n/a # Users should only access the lru_cache through its public API:
469n/a # cache_info, cache_clear, and f.__wrapped__
470n/a # The internals of the lru_cache are encapsulated for thread safety and
471n/a # to allow the implementation to change (including a possible C version).
472n/a
473n/a # Early detection of an erroneous call to @lru_cache without any arguments
474n/a # resulting in the inner function being passed to maxsize instead of an
475n/a # integer or None.
476n/a if maxsize is not None and not isinstance(maxsize, int):
477n/a raise TypeError('Expected maxsize to be an integer or None')
478n/a
479n/a def decorating_function(user_function):
480n/a wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
481n/a return update_wrapper(wrapper, user_function)
482n/a
483n/a return decorating_function
484n/a
485n/adef _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
486n/a # Constants shared by all lru cache instances:
487n/a sentinel = object() # unique object used to signal cache misses
488n/a make_key = _make_key # build a key from the function arguments
489n/a PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
490n/a
491n/a cache = {}
492n/a hits = misses = 0
493n/a full = False
494n/a cache_get = cache.get # bound method to lookup a key or return None
495n/a cache_len = cache.__len__ # get cache size without calling len()
496n/a lock = RLock() # because linkedlist updates aren't threadsafe
497n/a root = [] # root of the circular doubly linked list
498n/a root[:] = [root, root, None, None] # initialize by pointing to self
499n/a
500n/a if maxsize == 0:
501n/a
502n/a def wrapper(*args, **kwds):
503n/a # No caching -- just a statistics update after a successful call
504n/a nonlocal misses
505n/a result = user_function(*args, **kwds)
506n/a misses += 1
507n/a return result
508n/a
509n/a elif maxsize is None:
510n/a
511n/a def wrapper(*args, **kwds):
512n/a # Simple caching without ordering or size limit
513n/a nonlocal hits, misses
514n/a key = make_key(args, kwds, typed)
515n/a result = cache_get(key, sentinel)
516n/a if result is not sentinel:
517n/a hits += 1
518n/a return result
519n/a result = user_function(*args, **kwds)
520n/a cache[key] = result
521n/a misses += 1
522n/a return result
523n/a
524n/a else:
525n/a
526n/a def wrapper(*args, **kwds):
527n/a # Size limited caching that tracks accesses by recency
528n/a nonlocal root, hits, misses, full
529n/a key = make_key(args, kwds, typed)
530n/a with lock:
531n/a link = cache_get(key)
532n/a if link is not None:
533n/a # Move the link to the front of the circular queue
534n/a link_prev, link_next, _key, result = link
535n/a link_prev[NEXT] = link_next
536n/a link_next[PREV] = link_prev
537n/a last = root[PREV]
538n/a last[NEXT] = root[PREV] = link
539n/a link[PREV] = last
540n/a link[NEXT] = root
541n/a hits += 1
542n/a return result
543n/a result = user_function(*args, **kwds)
544n/a with lock:
545n/a if key in cache:
546n/a # Getting here means that this same key was added to the
547n/a # cache while the lock was released. Since the link
548n/a # update is already done, we need only return the
549n/a # computed result and update the count of misses.
550n/a pass
551n/a elif full:
552n/a # Use the old root to store the new key and result.
553n/a oldroot = root
554n/a oldroot[KEY] = key
555n/a oldroot[RESULT] = result
556n/a # Empty the oldest link and make it the new root.
557n/a # Keep a reference to the old key and old result to
558n/a # prevent their ref counts from going to zero during the
559n/a # update. That will prevent potentially arbitrary object
560n/a # clean-up code (i.e. __del__) from running while we're
561n/a # still adjusting the links.
562n/a root = oldroot[NEXT]
563n/a oldkey = root[KEY]
564n/a oldresult = root[RESULT]
565n/a root[KEY] = root[RESULT] = None
566n/a # Now update the cache dictionary.
567n/a del cache[oldkey]
568n/a # Save the potentially reentrant cache[key] assignment
569n/a # for last, after the root and links have been put in
570n/a # a consistent state.
571n/a cache[key] = oldroot
572n/a else:
573n/a # Put result in a new link at the front of the queue.
574n/a last = root[PREV]
575n/a link = [last, root, key, result]
576n/a last[NEXT] = root[PREV] = cache[key] = link
577n/a # Use the cache_len bound method instead of the len() function
578n/a # which could potentially be wrapped in an lru_cache itself.
579n/a full = (cache_len() >= maxsize)
580n/a misses += 1
581n/a return result
582n/a
583n/a def cache_info():
584n/a """Report cache statistics"""
585n/a with lock:
586n/a return _CacheInfo(hits, misses, maxsize, cache_len())
587n/a
588n/a def cache_clear():
589n/a """Clear the cache and cache statistics"""
590n/a nonlocal hits, misses, full
591n/a with lock:
592n/a cache.clear()
593n/a root[:] = [root, root, None, None]
594n/a hits = misses = 0
595n/a full = False
596n/a
597n/a wrapper.cache_info = cache_info
598n/a wrapper.cache_clear = cache_clear
599n/a return wrapper
600n/a
601n/atry:
602n/a from _functools import _lru_cache_wrapper
603n/aexcept ImportError:
604n/a pass
605n/a
606n/a
607n/a################################################################################
608n/a### singledispatch() - single-dispatch generic function decorator
609n/a################################################################################
610n/a
611n/adef _c3_merge(sequences):
612n/a """Merges MROs in *sequences* to a single MRO using the C3 algorithm.
613n/a
614n/a Adapted from http://www.python.org/download/releases/2.3/mro/.
615n/a
616n/a """
617n/a result = []
618n/a while True:
619n/a sequences = [s for s in sequences if s] # purge empty sequences
620n/a if not sequences:
621n/a return result
622n/a for s1 in sequences: # find merge candidates among seq heads
623n/a candidate = s1[0]
624n/a for s2 in sequences:
625n/a if candidate in s2[1:]:
626n/a candidate = None
627n/a break # reject the current head, it appears later
628n/a else:
629n/a break
630n/a if candidate is None:
631n/a raise RuntimeError("Inconsistent hierarchy")
632n/a result.append(candidate)
633n/a # remove the chosen candidate
634n/a for seq in sequences:
635n/a if seq[0] == candidate:
636n/a del seq[0]
637n/a
638n/adef _c3_mro(cls, abcs=None):
639n/a """Computes the method resolution order using extended C3 linearization.
640n/a
641n/a If no *abcs* are given, the algorithm works exactly like the built-in C3
642n/a linearization used for method resolution.
643n/a
644n/a If given, *abcs* is a list of abstract base classes that should be inserted
645n/a into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
646n/a result. The algorithm inserts ABCs where their functionality is introduced,
647n/a i.e. issubclass(cls, abc) returns True for the class itself but returns
648n/a False for all its direct base classes. Implicit ABCs for a given class
649n/a (either registered or inferred from the presence of a special method like
650n/a __len__) are inserted directly after the last ABC explicitly listed in the
651n/a MRO of said class. If two implicit ABCs end up next to each other in the
652n/a resulting MRO, their ordering depends on the order of types in *abcs*.
653n/a
654n/a """
655n/a for i, base in enumerate(reversed(cls.__bases__)):
656n/a if hasattr(base, '__abstractmethods__'):
657n/a boundary = len(cls.__bases__) - i
658n/a break # Bases up to the last explicit ABC are considered first.
659n/a else:
660n/a boundary = 0
661n/a abcs = list(abcs) if abcs else []
662n/a explicit_bases = list(cls.__bases__[:boundary])
663n/a abstract_bases = []
664n/a other_bases = list(cls.__bases__[boundary:])
665n/a for base in abcs:
666n/a if issubclass(cls, base) and not any(
667n/a issubclass(b, base) for b in cls.__bases__
668n/a ):
669n/a # If *cls* is the class that introduces behaviour described by
670n/a # an ABC *base*, insert said ABC to its MRO.
671n/a abstract_bases.append(base)
672n/a for base in abstract_bases:
673n/a abcs.remove(base)
674n/a explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
675n/a abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
676n/a other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
677n/a return _c3_merge(
678n/a [[cls]] +
679n/a explicit_c3_mros + abstract_c3_mros + other_c3_mros +
680n/a [explicit_bases] + [abstract_bases] + [other_bases]
681n/a )
682n/a
683n/adef _compose_mro(cls, types):
684n/a """Calculates the method resolution order for a given class *cls*.
685n/a
686n/a Includes relevant abstract base classes (with their respective bases) from
687n/a the *types* iterable. Uses a modified C3 linearization algorithm.
688n/a
689n/a """
690n/a bases = set(cls.__mro__)
691n/a # Remove entries which are already present in the __mro__ or unrelated.
692n/a def is_related(typ):
693n/a return (typ not in bases and hasattr(typ, '__mro__')
694n/a and issubclass(cls, typ))
695n/a types = [n for n in types if is_related(n)]
696n/a # Remove entries which are strict bases of other entries (they will end up
697n/a # in the MRO anyway.
698n/a def is_strict_base(typ):
699n/a for other in types:
700n/a if typ != other and typ in other.__mro__:
701n/a return True
702n/a return False
703n/a types = [n for n in types if not is_strict_base(n)]
704n/a # Subclasses of the ABCs in *types* which are also implemented by
705n/a # *cls* can be used to stabilize ABC ordering.
706n/a type_set = set(types)
707n/a mro = []
708n/a for typ in types:
709n/a found = []
710n/a for sub in typ.__subclasses__():
711n/a if sub not in bases and issubclass(cls, sub):
712n/a found.append([s for s in sub.__mro__ if s in type_set])
713n/a if not found:
714n/a mro.append(typ)
715n/a continue
716n/a # Favor subclasses with the biggest number of useful bases
717n/a found.sort(key=len, reverse=True)
718n/a for sub in found:
719n/a for subcls in sub:
720n/a if subcls not in mro:
721n/a mro.append(subcls)
722n/a return _c3_mro(cls, abcs=mro)
723n/a
724n/adef _find_impl(cls, registry):
725n/a """Returns the best matching implementation from *registry* for type *cls*.
726n/a
727n/a Where there is no registered implementation for a specific type, its method
728n/a resolution order is used to find a more generic implementation.
729n/a
730n/a Note: if *registry* does not contain an implementation for the base
731n/a *object* type, this function may return None.
732n/a
733n/a """
734n/a mro = _compose_mro(cls, registry.keys())
735n/a match = None
736n/a for t in mro:
737n/a if match is not None:
738n/a # If *match* is an implicit ABC but there is another unrelated,
739n/a # equally matching implicit ABC, refuse the temptation to guess.
740n/a if (t in registry and t not in cls.__mro__
741n/a and match not in cls.__mro__
742n/a and not issubclass(match, t)):
743n/a raise RuntimeError("Ambiguous dispatch: {} or {}".format(
744n/a match, t))
745n/a break
746n/a if t in registry:
747n/a match = t
748n/a return registry.get(match)
749n/a
750n/adef singledispatch(func):
751n/a """Single-dispatch generic function decorator.
752n/a
753n/a Transforms a function into a generic function, which can have different
754n/a behaviours depending upon the type of its first argument. The decorated
755n/a function acts as the default implementation, and additional
756n/a implementations can be registered using the register() attribute of the
757n/a generic function.
758n/a
759n/a """
760n/a registry = {}
761n/a dispatch_cache = WeakKeyDictionary()
762n/a cache_token = None
763n/a
764n/a def dispatch(cls):
765n/a """generic_func.dispatch(cls) -> <function implementation>
766n/a
767n/a Runs the dispatch algorithm to return the best available implementation
768n/a for the given *cls* registered on *generic_func*.
769n/a
770n/a """
771n/a nonlocal cache_token
772n/a if cache_token is not None:
773n/a current_token = get_cache_token()
774n/a if cache_token != current_token:
775n/a dispatch_cache.clear()
776n/a cache_token = current_token
777n/a try:
778n/a impl = dispatch_cache[cls]
779n/a except KeyError:
780n/a try:
781n/a impl = registry[cls]
782n/a except KeyError:
783n/a impl = _find_impl(cls, registry)
784n/a dispatch_cache[cls] = impl
785n/a return impl
786n/a
787n/a def register(cls, func=None):
788n/a """generic_func.register(cls, func) -> func
789n/a
790n/a Registers a new implementation for the given *cls* on a *generic_func*.
791n/a
792n/a """
793n/a nonlocal cache_token
794n/a if func is None:
795n/a return lambda f: register(cls, f)
796n/a registry[cls] = func
797n/a if cache_token is None and hasattr(cls, '__abstractmethods__'):
798n/a cache_token = get_cache_token()
799n/a dispatch_cache.clear()
800n/a return func
801n/a
802n/a def wrapper(*args, **kw):
803n/a return dispatch(args[0].__class__)(*args, **kw)
804n/a
805n/a registry[object] = func
806n/a wrapper.register = register
807n/a wrapper.dispatch = dispatch
808n/a wrapper.registry = MappingProxyType(registry)
809n/a wrapper._clear_cache = dispatch_cache.clear
810n/a update_wrapper(wrapper, func)
811n/a return wrapper