diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..21c125c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: 2024 Justin Myers for Adafruit Industries +# +# SPDX-License-Identifier: Unlicense + +.py text eol=lf +.rst text eol=lf +.txt text eol=lf +.yaml text eol=lf +.toml text eol=lf +.license text eol=lf +.md text eol=lf diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 70ade69..ff19dde 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,42 +1,21 @@ -# SPDX-FileCopyrightText: 2020 Diego Elio Pettenò +# SPDX-FileCopyrightText: 2024 Justin Myers for Adafruit Industries # # SPDX-License-Identifier: Unlicense repos: - - repo: https://github.com/python/black - rev: 23.3.0 - hooks: - - id: black - - repo: https://github.com/fsfe/reuse-tool - rev: v1.1.2 - hooks: - - id: reuse - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - - repo: https://github.com/pycqa/pylint - rev: v2.17.4 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.4 hooks: - - id: pylint - name: pylint (library code) - types: [python] - args: - - --disable=consider-using-f-string - exclude: "^(docs/|examples/|tests/|setup.py$)" - - id: pylint - name: pylint (example code) - description: Run pylint rules on "examples/*.py" files - types: [python] - files: "^examples/" - args: - - --disable=missing-docstring,invalid-name,consider-using-f-string,duplicate-code - - id: pylint - name: pylint (test code) - description: Run pylint rules on "tests/*.py" files - types: [python] - files: "^tests/" - args: - - --disable=missing-docstring,consider-using-f-string,duplicate-code + - id: ruff-format + - id: ruff + args: ["--fix"] + - repo: https://github.com/fsfe/reuse-tool + rev: v3.0.1 + hooks: + - id: reuse diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index f945e92..0000000 --- a/.pylintrc +++ /dev/null @@ -1,399 +0,0 @@ -# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries -# -# SPDX-License-Identifier: Unlicense - -[MASTER] - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Add files or directories to the ignore-list. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the ignore-list. The -# regex matches against base names, not paths. -ignore-patterns= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins=pylint.extensions.no_self_use - -# Pickle collected data for later comparisons. -persistent=yes - -# Specify a configuration file. -#rcfile= - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -# disable=import-error,raw-checker-failed,bad-inline-option,locally-disabled,file-ignored,suppressed-message,useless-suppression,deprecated-pragma,deprecated-str-translate-call -disable=raw-checker-failed,bad-inline-option,locally-disabled,file-ignored,suppressed-message,useless-suppression,deprecated-pragma,import-error,pointless-string-statement,unspecified-encoding - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable= - - -[REPORTS] - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - -# Set the output format. Available formats are text, parseable, colorized, json -# and msvs (visual studio).You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Tells whether to display a full report or only the messages -reports=no - -# Activate the evaluation score. -score=yes - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -# notes=FIXME,XXX,TODO -notes=FIXME,XXX - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules=board - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.*|^ignored_|^unused_ - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,future.builtins - - -[FORMAT] - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -# expected-line-ending-format= -expected-line-ending-format=LF - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=100 - -# Maximum number of lines in a module -max-module-lines=1000 - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - - -[SIMILARITIES] - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=yes - -# Minimum lines number of a similarity. -min-similarity-lines=12 - - -[BASIC] - -# Regular expression matching correct argument names -argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Regular expression matching correct attribute names -attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct class names -# class-rgx=[A-Z_][a-zA-Z0-9]+$ -class-rgx=[A-Z_][a-zA-Z0-9_]+$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Regular expression matching correct function names -function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Good variable names which should always be accepted, separated by a comma -# good-names=i,j,k,ex,Run,_ -good-names=r,g,b,w,i,j,k,n,x,y,z,ex,ok,Run,_ - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct method names -method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Regular expression matching correct variable names -variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ - - -[IMPORTS] - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=optparse,tkinter.tix - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Maximum number of attributes for a class (see R0902). -# max-attributes=7 -max-attributes=11 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of statements in function / method body -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=1 - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=builtins.Exception diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 33c2a61..88bca9f 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -8,6 +8,9 @@ # Required version: 2 +sphinx: + configuration: docs/conf.py + build: os: ubuntu-20.04 tools: diff --git a/README.rst b/README.rst index 6dba50a..f686f8f 100644 --- a/README.rst +++ b/README.rst @@ -13,9 +13,9 @@ Introduction :target: https://github.com/adafruit/Adafruit_CircuitPython_IterTools/actions :alt: Build Status -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: Code Style: Black +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json + :target: https://github.com/astral-sh/ruff + :alt: Code Style: Ruff Python's itertools for CircuitPython diff --git a/adafruit_itertools/__init__.py b/adafruit_itertools/__init__.py index f739f28..4afc41b 100644 --- a/adafruit_itertools/__init__.py +++ b/adafruit_itertools/__init__.py @@ -22,14 +22,40 @@ * Adafruit CircuitPython firmware for the supported boards: https://github.com/adafruit/circuitpython/releases """ -# pylint:disable=invalid-name,redefined-builtin,attribute-defined-outside-init -# pylint:disable=stop-iteration-return,anomalous-backslash-in-string __version__ = "0.0.0+auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Itertools.git" - -def accumulate(iterable, func=lambda x, y: x + y): +try: + from typing import ( + Any, + Callable, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + ) + + from typing_extensions import TypeAlias + + _T = TypeVar("_T") + _Fill = TypeVar("_Fill") + _OptionalFill: TypeAlias = Optional[_Fill] + _N: TypeAlias = Union[int, float, complex] + _Predicate: TypeAlias = Callable[[_T], object] + +except ImportError: + pass + + +def accumulate( + iterable: Iterable[_T], + func: Callable[[_T, _T], _T] = lambda x, y: x + y, # type: ignore[operator] +) -> Iterator[_T]: """Make an iterator that returns accumulated sums, or accumulated results of other binary functions (specified via the optional func argument). If func is supplied, it should be a function of two @@ -52,7 +78,7 @@ def accumulate(iterable, func=lambda x, y: x + y): yield acc -def chain(*iterables): +def chain(*iterables: Iterable[_T]) -> Iterator[_T]: """Make an iterator that returns elements from the first iterable until it is exhausted, then proceeds to the next iterable, until all of the iterables are exhausted. Used for treating consecutive sequences as a single sequence. @@ -65,7 +91,7 @@ def chain(*iterables): yield from i -def chain_from_iterable(iterables): +def chain_from_iterable(iterables: Iterable[Iterable[_T]]) -> Iterator[_T]: """Alternate constructor for chain(). Gets chained inputs from a single iterable argument that is evaluated lazily. @@ -78,7 +104,7 @@ def chain_from_iterable(iterables): yield element -def combinations(iterable, r): +def combinations(iterable: Iterable[_T], r: int) -> Iterator[Tuple[_T, ...]]: """Return r length subsequences of elements from the input iterable. Combinations are emitted in lexicographic sort order. So, if the input iterable is sorted, the combination tuples will be produced in sorted order. @@ -113,7 +139,7 @@ def combinations(iterable, r): yield tuple(pool[i] for i in indices) -def combinations_with_replacement(iterable, r): +def combinations_with_replacement(iterable: Iterable[_T], r: int) -> Iterator[Tuple[_T, ...]]: """Return r length subsequences of elements from the input iterable allowing individual elements to be repeated more than once. @@ -147,7 +173,7 @@ def combinations_with_replacement(iterable, r): yield tuple(pool[i] for i in indices) -def compress(data, selectors): +def compress(data: Iterable[_T], selectors: Iterable[Any]) -> Iterable[_T]: """Make an iterator that filters elements from data returning only those that have a corresponding element in selectors that evaluates to True. Stops when either the data or selectors iterables has been exhausted. @@ -160,7 +186,7 @@ def compress(data, selectors): return (d for d, s in zip(data, selectors) if s) -def count(start=0, step=1): +def count(start: _N = 0, step: _N = 1) -> Iterator[_N]: """Make an iterator that returns evenly spaced values starting with number start. Often used as an argument to map() to generate consecutive data points. Also, used with zip() to add sequence numbers. @@ -174,7 +200,7 @@ def count(start=0, step=1): start += step -def cycle(p): +def cycle(p: Iterable[_T]) -> Iterator[_T]: """Make an iterator returning elements from the iterable and saving a copy of each. When the iterable is exhausted, return elements from the saved copy. Repeats indefinitely. @@ -183,7 +209,7 @@ def cycle(p): """ try: - len(p) + len(p) # type: ignore[arg-type] except TypeError: # len() is not defined for this type. Assume it is # a finite iterable so we must cache the elements. @@ -196,7 +222,7 @@ def cycle(p): yield from p -def dropwhile(predicate, iterable): +def dropwhile(predicate: _Predicate[_T], iterable: Iterable[_T]) -> Iterator[_T]: """Make an iterator that drops elements from the iterable as long as the predicate is true; afterwards, returns every element. Note, the iterator does not produce any output until the predicate first becomes false, so it @@ -216,7 +242,7 @@ def dropwhile(predicate, iterable): yield x -def filterfalse(predicate, iterable): +def filterfalse(predicate: Optional[_Predicate[_T]], iterable: Iterable[_T]) -> Iterator[_T]: """Make an iterator that filters elements from iterable returning only those for which the predicate is False. If predicate is None, return the items that are false. @@ -262,15 +288,21 @@ class groupby: # [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B # [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D - def __init__(self, iterable, key=None): + def __init__( + self, + iterable: Iterable[_T], + key: Optional[Callable[[_T], Any]] = None, + ): self.keyfunc = key if key is not None else lambda x: x self.it = iter(iterable) - self.tgtkey = self.currkey = self.currvalue = object() + # Sentinel values, not actually returned during iteration. + self.currvalue: _T = object() # type: ignore[assignment] + self.tgtkey = self.currkey = self.currvalue - def __iter__(self): + def __iter__(self) -> Iterator[Tuple[Any, Iterator[_T]]]: return self - def __next__(self): + def __next__(self) -> Tuple[Any, Iterator[_T]]: self.id = object() while self.currkey == self.tgtkey: self.currvalue = next(self.it) # Exit on StopIteration @@ -278,7 +310,7 @@ def __next__(self): self.tgtkey = self.currkey return (self.currkey, self._grouper(self.tgtkey, self.id)) - def _grouper(self, tgtkey, id): + def _grouper(self, tgtkey: Any, id: object) -> Iterator[_T]: while self.id is id and self.currkey == tgtkey: yield self.currvalue try: @@ -288,7 +320,12 @@ def _grouper(self, tgtkey, id): self.currkey = self.keyfunc(self.currvalue) -def islice(p, start, stop=(), step=1): +def islice( + p: Iterable[_T], + start: int, + stop: Optional[int] = (), # type: ignore[assignment] + step: int = 1, +) -> Iterator[_T]: """Make an iterator that returns selected elements from the iterable. If start is non-zero and stop is unspecified, then the value for start is used as end, and start is taken to be 0. Thus the @@ -316,23 +353,40 @@ def islice(p, start, stop=(), step=1): if stop == (): stop = start start = 0 + if stop is not None and stop < 0: + raise ValueError("stop must be None or >= 0") + if start < 0: + raise ValueError("start must be >= 0") + if step <= 0: + raise ValueError("step must be > 0") + # TODO: optimizing or breaking semantics? if stop is not None and start >= stop: return it = iter(p) for _ in range(start): - next(it) + try: + next(it) + except StopIteration: + return while True: - yield next(it) + try: + val = next(it) + except StopIteration: + return + yield val for _ in range(step - 1): - next(it) + try: + next(it) + except StopIteration: + return start += step if stop is not None and start >= stop: return -def permutations(iterable, r=None): +def permutations(iterable: Iterable[_T], r: Optional[int] = None) -> Iterator[Tuple[_T, ...]]: """Return successive r length permutations of elements in the iterable. If r is not specified or is None, then r defaults to the length of the @@ -375,7 +429,8 @@ def permutations(iterable, r=None): return -def product(*args, r=1): +# def product(*args: Iterable[_T], r: int = 1) -> Iterator[Tuple[_T, ...]]: +def product(*args: Iterable[Any], r: int = 1) -> Iterator[Tuple[Any, ...]]: """Cartesian product of input iterables. Roughly equivalent to nested for-loops in a generator expression. For @@ -399,14 +454,14 @@ def product(*args, r=1): # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111 pools = [tuple(pool) for pool in args] * r - result = [[]] + result: List[List[Any]] = [[]] for pool in pools: result = [x + [y] for x in result for y in pool] for prod in result: yield tuple(prod) -def repeat(el, n=None): +def repeat(el: _T, n: Optional[int] = None) -> Iterator[_T]: """Make an iterator that returns object over and over again. Runs indefinitely unless the times argument is specified. Used as argument to map() for invariant parameters to the called function. Also used with zip() @@ -424,7 +479,7 @@ def repeat(el, n=None): yield el -def starmap(function, iterable): +def starmap(function: Callable[..., _T], iterable: Iterable[Iterable[Any]]) -> Iterator[_T]: """Make an iterator that computes the function using arguments obtained from the iterable. Used instead of map() when argument parameters are already grouped in tuples from a single iterable (the data has been “pre-zipped”). @@ -439,7 +494,7 @@ def starmap(function, iterable): yield function(*args) -def takewhile(predicate, iterable): +def takewhile(predicate: _Predicate[_T], iterable: Iterable[_T]) -> Iterator[_T]: """Make an iterator that returns elements from the iterable as long as the predicate is true. @@ -455,7 +510,7 @@ def takewhile(predicate, iterable): break -def tee(iterable, n=2): +def tee(iterable: Iterable[_T], n: int = 2) -> Sequence[Iterator[_T]]: """Return n independent iterators from a single iterable. :param iterable: the iterator from which to make iterators. @@ -465,7 +520,7 @@ def tee(iterable, n=2): return [iter(iterable) for _ in range(n)] -def zip_longest(*args, fillvalue=None): +def zip_longest(*args: Iterable[Any], fillvalue: _OptionalFill = None) -> Iterator[Tuple[Any, ...]]: """Make an iterator that aggregates elements from each of the iterables. If the iterables are of uneven length, missing values are filled-in with fillvalue. Iteration continues until the longest @@ -475,7 +530,7 @@ def zip_longest(*args, fillvalue=None): :param fillvalue: value to fill in those missing from shorter iterables """ # zip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D- - iterators = [iter(it) for it in args] + iterators: List[Iterator[Any]] = [iter(it) for it in args] num_active = len(iterators) if not num_active: return diff --git a/adafruit_itertools/adafruit_itertools_extras.py b/adafruit_itertools/adafruit_itertools_extras.py index 8a41038..340be03 100644 --- a/adafruit_itertools/adafruit_itertools_extras.py +++ b/adafruit_itertools/adafruit_itertools_extras.py @@ -37,30 +37,57 @@ https://github.com/adafruit/circuitpython/releases """ -# pylint:disable=invalid-name,keyword-arg-before-vararg,relative-beyond-top-level - import adafruit_itertools as it +try: + from typing import ( + Any, + Callable, + Iterable, + Iterator, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, + ) + + from typing_extensions import TypeAlias + + _T = TypeVar("_T") + _N: TypeAlias = Union[int, float, complex] + _Predicate: TypeAlias = Callable[[_T], bool] +except ImportError: + pass + + __version__ = "0.0.0+auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Itertools.git" -def all_equal(iterable): +def all_equal(iterable: Iterable[Any]) -> bool: """Returns True if all the elements are equal to each other. :param iterable: source of values """ g = it.groupby(iterable) - next(g) # should succeed, value isn't relevant try: - next(g) # should fail: only 1 group + next(g) # value isn't relevant + except StopIteration: + # Empty iterable, return True to match cpython behavior. + return True + try: + next(g) + # more than one group, so we have different elements. return False except StopIteration: + # Only one group - all elements must be equal. return True -def dotproduct(vec1, vec2): +def dotproduct(vec1: Iterable[_N], vec2: Iterable[_N]) -> _N: """Compute the dot product of two vectors. :param vec1: the first vector @@ -71,7 +98,11 @@ def dotproduct(vec1, vec2): return sum(map(lambda x, y: x * y, vec1, vec2)) -def first_true(iterable, default=False, pred=None): +def first_true( + iterable: Iterable[_T], + default: Union[bool, _T] = False, + pred: Optional[_Predicate[_T]] = None, +) -> Union[bool, _T]: """Returns the first true value in the iterable. If no true value is found, returns *default* @@ -94,7 +125,7 @@ def first_true(iterable, default=False, pred=None): return default -def flatten(iterable_of_iterables): +def flatten(iterable_of_iterables: Iterable[Iterable[_T]]) -> Iterator[_T]: """Flatten one level of nesting. :param iterable_of_iterables: a sequence of iterables to flatten @@ -104,7 +135,9 @@ def flatten(iterable_of_iterables): return it.chain_from_iterable(iterable_of_iterables) -def grouper(iterable, n, fillvalue=None): +def grouper( + iterable: Iterable[_T], n: int, fillvalue: Optional[_T] = None +) -> Iterator[Tuple[_T, ...]]: """Collect data into fixed-length chunks or blocks. :param iterable: source of values @@ -118,7 +151,7 @@ def grouper(iterable, n, fillvalue=None): return it.zip_longest(*args, fillvalue=fillvalue) -def iter_except(func, exception): +def iter_except(func: Callable[[], _T], exception: Type[BaseException]) -> Iterator[_T]: """Call a function repeatedly, yielding the results, until exception is raised. Converts a call-until-exception interface to an iterator interface. @@ -143,7 +176,7 @@ def iter_except(func, exception): pass -def ncycles(iterable, n): +def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: """Returns the sequence elements a number of times. :param iterable: the source of values @@ -153,7 +186,7 @@ def ncycles(iterable, n): return it.chain_from_iterable(it.repeat(tuple(iterable), n)) -def nth(iterable, n, default=None): +def nth(iterable: Iterable[_T], n: int, default: Optional[_T] = None) -> Optional[_T]: """Returns the nth item or a default value. :param iterable: the source of values @@ -166,7 +199,7 @@ def nth(iterable, n, default=None): return default -def padnone(iterable): +def padnone(iterable: Iterable[_T]) -> Iterator[Optional[_T]]: """Returns the sequence elements and then returns None indefinitely. Useful for emulating the behavior of the built-in map() function. @@ -177,13 +210,17 @@ def padnone(iterable): return it.chain(iterable, it.repeat(None)) -def pairwise(iterable): - """Pair up valuesin the iterable. +def pairwise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T]]: + """Return successive overlapping pairs from the iterable. + + The number of tuples from the output will be one fewer than the + number of values in the input. It will be empty if the input has + fewer than two values. :param iterable: source of values """ - # pairwise(range(11)) -> (1, 2), (3, 4), (5, 6), (7, 8), (9, 10) + # pairwise(range(5)) -> (0, 1), (1, 2), (2, 3), (3, 4) a, b = it.tee(iterable) try: next(b) @@ -192,7 +229,7 @@ def pairwise(iterable): return zip(a, b) -def partition(pred, iterable): +def partition(pred: _Predicate[_T], iterable: Iterable[_T]) -> Tuple[Iterator[_T], Iterator[_T]]: """Use a predicate to partition entries into false entries and true entries. :param pred: the predicate that divides the values @@ -204,7 +241,7 @@ def partition(pred, iterable): return it.filterfalse(pred, t1), filter(pred, t2) -def prepend(value, iterator): +def prepend(value: _T, iterator: Iterable[_T]) -> Iterator[_T]: """Prepend a single value in front of an iterator :param value: the value to prepend @@ -215,7 +252,7 @@ def prepend(value, iterator): return it.chain([value], iterator) -def quantify(iterable, pred=bool): +def quantify(iterable: Iterable[_T], pred: _Predicate[_T] = bool) -> int: """Count how many times the predicate is true. :param iterable: source of values @@ -227,7 +264,7 @@ def quantify(iterable, pred=bool): return sum(map(pred, iterable)) -def repeatfunc(func, times=None, *args): +def repeatfunc(func: Callable[..., _T], times: Optional[int] = None, *args: Any) -> Iterator[_T]: """Repeat calls to func with specified arguments. Example: repeatfunc(random.random) @@ -242,7 +279,7 @@ def repeatfunc(func, times=None, *args): return it.starmap(func, it.repeat(args, times)) -def roundrobin(*iterables): +def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: """Return an iterable created by repeatedly picking value from each argument in order. @@ -263,18 +300,19 @@ def roundrobin(*iterables): nexts = it.cycle(it.islice(nexts, num_active)) -def tabulate(function, start=0): - """Apply a function to a sequence of consecutive integers. +def tabulate(function: Callable[[int], int], start: int = 0) -> Iterator[int]: + """Apply a function to a sequence of consecutive numbers. - :param function: the function of one integer argument + :param function: the function of one numeric argument. :param start: optional value to start at (default is 0) """ # take(5, tabulate(lambda x: x * x))) -> 0 1 4 9 16 - return map(function, it.count(start)) + counter: Iterator[int] = it.count(start) # type: ignore[assignment] + return map(function, counter) -def tail(n, iterable): +def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: """Return an iterator over the last n items :param n: how many values to return @@ -294,7 +332,7 @@ def tail(n, iterable): return iter(buf) -def take(n, iterable): +def take(n: int, iterable: Iterable[_T]) -> List[_T]: """Return first n items of the iterable as a list :param n: how many values to take diff --git a/docs/api.rst b/docs/api.rst index cd2ceca..f864f23 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -4,6 +4,9 @@ .. If your library file(s) are nested in a directory (e.g. /adafruit_foo/foo.py) .. use this format as the module name: "adafruit_foo.foo" +API Reference +############# + .. automodule:: adafruit_itertools :members: diff --git a/docs/conf.py b/docs/conf.py index 125d9c8..0f29eba 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,12 +1,10 @@ -# -*- coding: utf-8 -*- - # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # # SPDX-License-Identifier: MIT +import datetime import os import sys -import datetime sys.path.insert(0, os.path.abspath("..")) @@ -41,9 +39,7 @@ creation_year = "2019" current_year = str(datetime.datetime.now().year) year_duration = ( - current_year - if current_year == creation_year - else creation_year + " - " + current_year + current_year if current_year == creation_year else creation_year + " - " + current_year ) copyright = year_duration + " Dave Astels" author = "Dave Astels" @@ -97,7 +93,6 @@ import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/docs/requirements.txt b/docs/requirements.txt index 797aa04..979f568 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,5 +2,6 @@ # # SPDX-License-Identifier: Unlicense -sphinx>=4.0.0 +sphinx sphinxcontrib-jquery +sphinx-rtd-theme diff --git a/examples/itertools_simpletest.py b/examples/itertools_simpletest.py index daae496..5739d44 100644 --- a/examples/itertools_simpletest.py +++ b/examples/itertools_simpletest.py @@ -24,13 +24,14 @@ # THE SOFTWARE. import time + +import adafruit_si7021 import board import busio -import adafruit_si7021 + from adafruit_itertools import count from adafruit_itertools.adafruit_itertools_extras import repeatfunc - i2c = busio.I2C(board.SCL, board.SDA) sensor = adafruit_si7021.SI7021(i2c) diff --git a/optional_requirements.txt b/optional_requirements.txt index d4e27c4..1856c06 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -1,3 +1,6 @@ # SPDX-FileCopyrightText: 2022 Alec Delaney, for Adafruit Industries # # SPDX-License-Identifier: Unlicense + +# For comparison when running tests +more-itertools diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 0000000..6ff85fb --- /dev/null +++ b/ruff.toml @@ -0,0 +1,109 @@ +# SPDX-FileCopyrightText: 2024 Tim Cocks for Adafruit Industries +# +# SPDX-License-Identifier: MIT + +target-version = "py38" +line-length = 100 + +[lint] +preview = true +select = ["I", "PL", "UP"] + +extend-select = [ + "D419", # empty-docstring + "E501", # line-too-long + "W291", # trailing-whitespace + "PLC0414", # useless-import-alias + "PLC2401", # non-ascii-name + "PLC2801", # unnecessary-dunder-call + "PLC3002", # unnecessary-direct-lambda-call + "E999", # syntax-error + "PLE0101", # return-in-init + "F706", # return-outside-function + "F704", # yield-outside-function + "PLE0116", # continue-in-finally + "PLE0117", # nonlocal-without-binding + "PLE0241", # duplicate-bases + "PLE0302", # unexpected-special-method-signature + "PLE0604", # invalid-all-object + "PLE0605", # invalid-all-format + "PLE0643", # potential-index-error + "PLE0704", # misplaced-bare-raise + "PLE1141", # dict-iter-missing-items + "PLE1142", # await-outside-async + "PLE1205", # logging-too-many-args + "PLE1206", # logging-too-few-args + "PLE1307", # bad-string-format-type + "PLE1310", # bad-str-strip-call + "PLE1507", # invalid-envvar-value + "PLE2502", # bidirectional-unicode + "PLE2510", # invalid-character-backspace + "PLE2512", # invalid-character-sub + "PLE2513", # invalid-character-esc + "PLE2514", # invalid-character-nul + "PLE2515", # invalid-character-zero-width-space + "PLR0124", # comparison-with-itself + "PLR0202", # no-classmethod-decorator + "PLR0203", # no-staticmethod-decorator + "UP004", # useless-object-inheritance + "PLR0206", # property-with-parameters + "PLR0904", # too-many-public-methods + "PLR0911", # too-many-return-statements + "PLR0912", # too-many-branches + "PLR0913", # too-many-arguments + "PLR0914", # too-many-locals + "PLR0915", # too-many-statements + "PLR0916", # too-many-boolean-expressions + "PLR1702", # too-many-nested-blocks + "PLR1704", # redefined-argument-from-local + "PLR1711", # useless-return + "C416", # unnecessary-comprehension + "PLR1733", # unnecessary-dict-index-lookup + "PLR1736", # unnecessary-list-index-lookup + + # ruff reports this rule is unstable + #"PLR6301", # no-self-use + + "PLW0108", # unnecessary-lambda + "PLW0120", # useless-else-on-loop + "PLW0127", # self-assigning-variable + "PLW0129", # assert-on-string-literal + "B033", # duplicate-value + "PLW0131", # named-expr-without-context + "PLW0245", # super-without-brackets + "PLW0406", # import-self + "PLW0602", # global-variable-not-assigned + "PLW0603", # global-statement + "PLW0604", # global-at-module-level + + # fails on the try: import typing used by libraries + #"F401", # unused-import + + "F841", # unused-variable + "E722", # bare-except + "PLW0711", # binary-op-exception + "PLW1501", # bad-open-mode + "PLW1508", # invalid-envvar-default + "PLW1509", # subprocess-popen-preexec-fn + "PLW2101", # useless-with-lock + "PLW3301", # nested-min-max +] + +ignore = [ + "PLR2004", # magic-value-comparison + "UP030", # format literals + "PLW1514", # unspecified-encoding + "PLR0913", # too-many-arguments + "PLR0915", # too-many-statements + "PLR0917", # too-many-positional-arguments + "PLR0904", # too-many-public-methods + "PLR0912", # too-many-branches + "PLR0916", # too-many-boolean-expressions + "PLR6301", # could-be-static no-self-use + "PLC0415", # import outside toplevel + "PLC2701", # private import + "UP028", # for loop yield +] + +[format] +line-ending = "lf" diff --git a/tests/README.rst b/tests/README.rst new file mode 100644 index 0000000..c91f434 --- /dev/null +++ b/tests/README.rst @@ -0,0 +1,37 @@ +.. + SPDX-FileCopyrightText: KB Sriram + SPDX-License-Identifier: MIT +.. + +Itertools Tests +=============== + +These tests run under CPython, and are intended to verify that the +Adafruit library functions return the same outputs compared to ones in +the standard `itertools` module, and also to exercise some type +annotations. + +These tests run automatically from the standard `circuitpython github +workflow `_. To run them manually, first install these packages +if necessary:: + + $ pip3 install pytest + +Then ensure you're in the *root* directory of the repository and run +the following command:: + + $ python -m pytest + +Type annotation tests don't run automatically at this point. But to +verify type-related issues manually, first install these packages if +necessary:: + + $ pip3 install mypy + +Then ensure you're in the *root* directory of the repository and run +the following command:: + + $ mypy --warn-unused-ignores --disallow-untyped-defs tests + + +.. _wf: https://github.com/adafruit/workflows-circuitpython-libs/blob/6e1562eaabced4db1bd91173b698b1cc1dfd35ab/build/action.yml#L78-L84 diff --git a/tests/test_itertools.py b/tests/test_itertools.py new file mode 100644 index 0000000..105e06d --- /dev/null +++ b/tests/test_itertools.py @@ -0,0 +1,447 @@ +# SPDX-FileCopyrightText: KB Sriram +# SPDX-License-Identifier: MIT + +import itertools as it +from typing import Any, Callable, Iterator, Optional, Sequence, Tuple, TypeVar, Union + +import pytest + +import adafruit_itertools as ait + +_K = TypeVar("_K") +_T = TypeVar("_T") + + +def _take(n: int, iterator: Iterator[_T]) -> Sequence[_T]: + """Extract the first n elements from a long/infinite iterator.""" + return [v for _, v in zip(range(n), iterator)] + + +@pytest.mark.parametrize( + "seq, func", + [ + ([1, 2, 3, 4], lambda a, x: a - x), + ([], lambda a, _: a), + (["abc", "def"], lambda a, x: a + x), + ("abc", lambda a, x: a + x), + ], +) +def test_accumulate_with(seq: Sequence[_T], func: Callable[[_T, _T], _T]) -> None: + x: Sequence[_T] = list(it.accumulate(seq, func)) + y: Sequence[_T] = list(ait.accumulate(seq, func)) + assert x == y + + +def test_accumulate_types() -> None: + x_int: Iterator[int] = ait.accumulate([1, 2, 3]) + assert list(x_int) == list(it.accumulate([1, 2, 3])) + + x_bad_type: Iterator[str] = ait.accumulate([1, 2, 3]) # type: ignore[list-item] + assert list(x_bad_type) == list(it.accumulate([1, 2, 3])) + + x_str_f: Iterator[str] = ait.accumulate("abc", lambda a, x: a + x) + assert list(x_str_f) == list(it.accumulate("abc", lambda a, x: a + x)) + + x_bad_arg_f: Iterator[int] = ait.accumulate( + [1, 2], + lambda a, x: a + ord(x), # type: ignore[arg-type] + ) + with pytest.raises(TypeError): + list(x_bad_arg_f) + + # Note: technically, this works and produces [1, "12"]. But the annotated types + # are declared to be more strict, and reject accumulator functions that produce + # mixed types in the result. + inp = [1, 2] + + def _stringify(acc: Union[int, str], item: int) -> str: + return str(acc) + str(item) + + x_mixed_f: Iterator[Union[int, str]] = ait.accumulate(inp, _stringify) # type: ignore[arg-type] + assert [1, "12"] == list(x_mixed_f) + + +@pytest.mark.parametrize( + "arglist, partial", + [ + ([[1, 2], [3, 4]], 1), + ([[3]], 1), + ([[]], 0), + ([[]], 1), + ([[], [None]], 1), + ([[1, "a"], ["b", 2]], 1), + ([[1, 2, 3], [4, 5, 6]], 4), + ], +) +def test_chain_basic(arglist: Sequence[Sequence[_T]], partial: int) -> None: + x: Sequence[_T] = list(ait.chain(*arglist)) + y: Sequence[_T] = list(it.chain(*arglist)) + assert x == y + xit: Iterator[_T] = ait.chain(*arglist) + yit: Iterator[_T] = it.chain(*arglist) + assert _take(partial, xit) == _take(partial, yit) + + +@pytest.mark.parametrize( + "arglist, partial", + [ + ([[1, 2], [3, 4]], 1), + ([[3]], 1), + ([[]], 0), + ([[]], 1), + ([[], [None]], 1), + ([[1, "a"], ["b", 2]], 1), + ([[1, 2, 3], [4, 5, 6]], 4), + ], +) +def test_chain_from_iterable(arglist: Sequence[Sequence[_T]], partial: int) -> None: + x: Sequence[_T] = list(ait.chain_from_iterable(arglist)) + y: Sequence[_T] = list(it.chain.from_iterable(arglist)) + assert x == y + xit: Iterator[_T] = ait.chain_from_iterable(arglist) + yit: Iterator[_T] = it.chain.from_iterable(arglist) + assert _take(partial, xit) == _take(partial, yit) + + +@pytest.mark.parametrize( + "seq, n", + [ + ([1, 2, 3, 4], 2), + ([1, 2, 3, 4], 3), + ([1, 2, 3], 32), + ([1, 2, 3], 0), + ([], 0), + ([], 1), + ], +) +def test_combinations(seq: Sequence[_T], n: int) -> None: + x: Sequence[Tuple[_T, ...]] = list(ait.combinations(seq, n)) + y: Sequence[Tuple[_T, ...]] = list(it.combinations(seq, n)) + assert x == y + + +@pytest.mark.parametrize( + "seq, n", + [ + ([1, 2, 3, 4], 2), + ([1, 2, 3, 4], 3), + ([1, 2, 3], 32), + ([1, 2, 3], 0), + ([], 0), + ([], 1), + ], +) +def test_combo_with_replacement(seq: Sequence[_T], n: int) -> None: + x: Sequence[Tuple[_T, ...]] = list(ait.combinations_with_replacement(seq, n)) + y: Sequence[Tuple[_T, ...]] = list(it.combinations_with_replacement(seq, n)) + assert x == y + + +@pytest.mark.parametrize( + "data, selectors", + [ + ([1, 2, 3, 4, 5], [True, False, True, False, True]), + ([1, 2, 3, 4, 5], [True, "", True, True, ""]), + ([1, 2, 3, 4, 5], [0, 0, None, 0, 0]), + ([1, 2, 3, 4, 5], [1, 1, 1, True, 1]), + ([1, 2, 3, 4, 5], [1, 0, 1]), + ([1, 2, 3, 4, 5], []), + ([1, 2, 3], [1, 1, 0, 0, 0, 0, 0, 0]), + ([], [1, 2, 3]), + ([], []), + ], +) +def test_compress(data: Sequence[int], selectors: Sequence[Any]) -> None: + x: Sequence[int] = list(ait.compress(data, selectors)) + y: Sequence[int] = list(it.compress(data, selectors)) + assert x == y + + +def test_count() -> None: + assert _take(5, it.count()) == _take(5, ait.count()) + for start in range(-10, 10): + assert _take(5, it.count(start)) == _take(5, ait.count(start)) + + for step in range(-10, 10): + assert _take(5, it.count(step=step)) == _take(5, ait.count(step=step)) + + for start in range(-5, 5): + for step in range(-5, 5): + assert _take(10, it.count(start, step)) == _take(10, ait.count(start, step)) + + +@pytest.mark.parametrize( + "seq", + [ + ([]), + ([None]), + ([1, 2]), + ], +) +def test_cycle(seq: Sequence[_T]) -> None: + x: Iterator[_T] = ait.cycle(seq) + y: Iterator[_T] = it.cycle(seq) + assert _take(10, x) == _take(10, y) + + +@pytest.mark.parametrize( + "predicate, seq", + [ + (ord, ""), + (lambda x: x == 42, [1, 2]), + (lambda x: x == 42, [1, 42]), + ], +) +def test_dropwhile(predicate: Callable[[_T], object], seq: Sequence[_T]) -> None: + x: Iterator[_T] = ait.dropwhile(predicate, seq) + y: Iterator[_T] = it.dropwhile(predicate, seq) + assert list(x) == list(y) + bad_type: Iterator[int] = ait.dropwhile(ord, [1, 2]) # type: ignore[arg-type] + with pytest.raises(TypeError): + list(bad_type) + + +@pytest.mark.parametrize( + "predicate, seq", + [ + (None, []), + (None, [1, 0, 2]), + (lambda x: x % 2, range(10)), + ], +) +def test_filterfalse(predicate: Optional[Callable[[_T], object]], seq: Sequence[_T]) -> None: + x: Iterator[_T] = ait.filterfalse(predicate, seq) + y: Iterator[_T] = it.filterfalse(predicate, seq) + assert list(x) == list(y) + bad_type: Iterator[str] = ait.filterfalse(ord, [1, 2]) # type: ignore[list-item] + with pytest.raises(TypeError): + list(bad_type) + + +@pytest.mark.parametrize( + "data, key", + [ + ("abcd", ord), + ("", ord), + ("aabbcbbbaaa", ord), + ([(0, 1), (0, 2), (0, 3), (1, 4), (0, 5), (0, 6)], lambda x: x[0]), + ([(0, 1), (0, 2), (0, 3), (1, 4), (0, 5), (0, 6)], max), + ], +) +def test_groupby(data: Sequence[_T], key: Callable[[_T], _K]) -> None: + def _listify(iterable: Iterator[Tuple[_K, Iterator[_T]]]) -> Sequence[Tuple[_K, Sequence[_T]]]: + return [(k, list(group)) for k, group in iterable] + + it_l = _listify(it.groupby(data, key)) + ait_l = _listify(ait.groupby(data, key)) + assert it_l == ait_l + + +def test_groupby_types() -> None: + assert list(ait.groupby([])) == list(it.groupby([])) + assert list(ait.groupby([], key=id)) == list(it.groupby([], key=id)) + assert list(ait.groupby("", ord)) == list(it.groupby("", ord)) + + with pytest.raises(TypeError): + list(ait.groupby("abc", [])) # type: ignore[arg-type] + with pytest.raises(TypeError): + list(ait.groupby("abc", chr)) # type: ignore[arg-type] + with pytest.raises(TypeError): + ait.groupby(None) # type: ignore[arg-type] + + +@pytest.mark.parametrize( + "seq, start", + [ + ("", 0), + ("", 2), + ("ABCDEFG", 0), + ("ABCDEFG", 2), + ("ABCDEFG", 20), + ], +) +def test_islice_start(seq: Sequence[_T], start: int) -> None: + x: Iterator[_T] = ait.islice(seq, start) + y: Iterator[_T] = it.islice(seq, start) + assert list(x) == list(y) + + +@pytest.mark.parametrize( + "seq, start, stop", + [ + ("", 0, 5), + ("", 2, 5), + ("", 0, 0), + ("ABCDEFG", 2, 2), + ("ABCDEFG", 2, 6), + ("ABCDEFG", 2, None), + ("ABCDEFG", 2, 17), + ("ABCDEFG", 20, 30), + ], +) +def test_islice_start_stop(seq: Sequence[_T], start: int, stop: Optional[int]) -> None: + x: Iterator[_T] = ait.islice(seq, start, stop) + y: Iterator[_T] = it.islice(seq, start, stop) + assert list(x) == list(y) + + +@pytest.mark.parametrize( + "seq, start, stop, step", + [ + ("", 0, 5, 3), + ("", 2, 5, 2), + ("", 0, 0, 1), + ("ABCDEFG", 2, 2, 2), + ("ABCDEFG", 2, 6, 3), + ("ABCDEFG", 2, 17, 2), + ("ABCDEFG", 0, None, 2), + ("ABCDEFG", 20, 30, 3), + ("ABCDEFG", 0, None, 3), + ], +) +def test_islice_start_stop_step( + seq: Sequence[_T], start: int, stop: Optional[int], step: int +) -> None: + x: Iterator[_T] = ait.islice(seq, start, stop, step) + y: Iterator[_T] = it.islice(seq, start, stop, step) + assert list(x) == list(y) + + +def test_islice_error() -> None: + with pytest.raises(ValueError): + list(ait.islice("abc", -1)) + with pytest.raises(ValueError): + list(ait.islice("abc", 0, -1)) + with pytest.raises(ValueError): + list(ait.islice("abc", 0, 0, 0)) + + +@pytest.mark.parametrize( + "seq", + [ + "", + "A", + "ABCDEFGH", + ], +) +def test_permutations(seq: Sequence[_T]) -> None: + x: Iterator[Tuple[_T, ...]] = ait.permutations(seq) + y: Iterator[Tuple[_T, ...]] = it.permutations(seq) + assert list(x) == list(y) + + for r in range(3): + x = ait.permutations(seq, r) + y = it.permutations(seq, r) + assert list(x) == list(y) + + +@pytest.mark.parametrize( + "seq", + [ + "", + "A", + "ABCDEFGH", + [1, 2, "3", None, 4], + ], +) +def test_product_one(seq: Sequence[object]) -> None: + x: Iterator[Tuple[object, ...]] = ait.product(seq) + y: Iterator[Tuple[object, ...]] = it.product(seq) + assert list(x) == list(y) + + for r in range(3): + x = ait.product(seq, r=r) + y = it.product(seq, repeat=r) + assert list(x) == list(y) + + +@pytest.mark.parametrize( + "seq1, seq2", + [ + ("", []), + ("", [1, 2]), + ("AB", []), + ("ABCDEFGH", [1, 2, 3]), + ], +) +def test_product_two(seq1: Sequence[str], seq2: Sequence[int]) -> None: + x: Iterator[Tuple[str, int]] = ait.product(seq1, seq2) + y: Iterator[Tuple[str, int]] = it.product(seq1, seq2) + assert list(x) == list(y) + + for r in range(3): + x_repeat: Iterator[Tuple[object, ...]] = ait.product(seq1, seq2, r=r) + y_repeat: Iterator[Tuple[object, ...]] = it.product(seq1, seq2, repeat=r) + assert list(x_repeat) == list(y_repeat) + + +@pytest.mark.parametrize( + "element", + ["", None, 5, "abc"], +) +def test_repeat(element: _T) -> None: + x: Iterator[_T] = ait.repeat(element) + y: Iterator[_T] = it.repeat(element) + assert _take(5, x) == _take(5, y) + + for count in range(10): + x = ait.repeat(element, count) + y = it.repeat(element, count) + assert _take(5, x) == _take(5, y) + + +@pytest.mark.parametrize( + "func, seq", + [ + (pow, [(2, 3), (3, 2), (10, 2)]), + (lambda x, y: x + y, [("a", "b"), ("c", "d")]), + ], +) +def test_starmap(func: Callable[[_T, _T], _T], seq: Sequence[Sequence[_T]]) -> None: + x: Iterator[_T] = ait.starmap(func, seq) + y: Iterator[_T] = it.starmap(func, seq) + assert list(x) == list(y) + + +@pytest.mark.parametrize( + "func, seq", + [ + (lambda x: x, []), + (lambda x: x == 3, [1, 2, 3, 2, 3]), + (lambda x: x == 3, [1, 2]), + ], +) +def test_takewhile(func: Callable[[_T], bool], seq: Sequence[_T]) -> None: + x: Iterator[_T] = ait.takewhile(func, seq) + y: Iterator[_T] = it.takewhile(func, seq) + assert list(x) == list(y) + + +@pytest.mark.parametrize( + "seq", + ["", "abc"], +) +def test_tee(seq: Sequence[_T]) -> None: + x: Sequence[Iterator[_T]] = ait.tee(seq) + y: Sequence[Iterator[_T]] = it.tee(seq) + assert [list(v) for v in x] == [list(v) for v in y] + + for n in range(3): + x = ait.tee(seq, n) + y = it.tee(seq, n) + assert [list(v) for v in x] == [list(v) for v in y] + + +@pytest.mark.parametrize( + "seq1, seq2", + [ + ("", []), + ("", [1, 2]), + ("abc", []), + ("abc", [1, 2]), + ], +) +def test_zip_longest(seq1: Sequence[str], seq2: Sequence[int]) -> None: + x: Iterator[Tuple[str, int]] = ait.zip_longest(seq1, seq2) + y: Iterator[Tuple[str, int]] = it.zip_longest(seq1, seq2) + assert list(x) == list(y) diff --git a/tests/test_itertools_extras.py b/tests/test_itertools_extras.py new file mode 100644 index 0000000..f1f838e --- /dev/null +++ b/tests/test_itertools_extras.py @@ -0,0 +1,278 @@ +# SPDX-FileCopyrightText: KB Sriram +# SPDX-License-Identifier: MIT + +from typing import ( + Callable, + Iterator, + Optional, + Sequence, + TypeVar, +) + +import more_itertools as itextras +import pytest +from typing_extensions import TypeAlias + +from adafruit_itertools import adafruit_itertools_extras as aextras + +_K = TypeVar("_K") +_T = TypeVar("_T") +_S = TypeVar("_S") +_Predicate: TypeAlias = Callable[[_T], bool] + + +def _take(n: int, iterator: Iterator[_T]) -> Sequence[_T]: + """Extract the first n elements from a long/infinite iterator.""" + return [v for _, v in zip(range(n), iterator)] + + +@pytest.mark.parametrize( + "data", + [ + "aaaa", + "abcd", + "a", + "", + (1, 2), + (3, 3), + ("", False), + (42, True), + ], +) +def test_all_equal(data: Sequence[_T]) -> None: + assert itextras.all_equal(data) == aextras.all_equal(data) + + +@pytest.mark.parametrize( + ("vec1", "vec2"), + [ + ([1, 2], [3, 4]), + ([], []), + ([1], [2, 3]), + ([4, 5], [6]), + ], +) +def test_dotproduct(vec1: Sequence[int], vec2: Sequence[int]) -> None: + assert itextras.dotproduct(vec1, vec2) == aextras.dotproduct(vec1, vec2) + + +@pytest.mark.parametrize( + ("seq", "dflt", "pred"), + [ + ([0, 2], 0, None), + ([], 10, None), + ([False], True, None), + ([1, 2], -1, lambda _: False), + ([0, 1], -1, lambda _: True), + ([], -1, lambda _: True), + ], +) +def test_first_true(seq: Sequence[_T], dflt: _T, pred: Optional[_Predicate[_T]]) -> None: + assert itextras.first_true(seq, dflt, pred) == aextras.first_true(seq, dflt, pred) + + +@pytest.mark.parametrize( + ("seq1", "seq2"), + [ + ("abc", "def"), + ("", "def"), + ("abc", ""), + ("", ""), + ], +) +def test_flatten(seq1: str, seq2: str) -> None: + assert list(itextras.flatten(seq1 + seq2)) == list(aextras.flatten(seq1 + seq2)) + for repeat in range(3): + assert list(itextras.flatten([seq1] * repeat)) == list(aextras.flatten([seq1] * repeat)) + assert list(itextras.flatten([seq2] * repeat)) == list(aextras.flatten([seq2] * repeat)) + + +@pytest.mark.parametrize( + ("seq", "count", "fill"), + [ + ("abc", 3, None), + ("abcd", 3, None), + ("abc", 3, "x"), + ("abcd", 3, "x"), + ("abc", 0, None), + ("", 3, "xy"), + ], +) +def test_grouper(seq: Sequence[str], count: int, fill: Optional[str]) -> None: + assert list(itextras.grouper(seq, count, fillvalue=fill)) == list( + aextras.grouper(seq, count, fillvalue=fill) + ) + + +@pytest.mark.parametrize( + ("data"), + [ + (1, 2, 3), + (), + ], +) +def test_iter_except(data: Sequence[int]) -> None: + assert list(itextras.iter_except(list(data).pop, IndexError)) == list( + aextras.iter_except(list(data).pop, IndexError) + ) + + +@pytest.mark.parametrize( + ("seq", "count"), + [ + ("abc", 4), + ("abc", 0), + ("", 4), + ], +) +def test_ncycles(seq: str, count: int) -> None: + assert list(itextras.ncycles(seq, count)) == list(aextras.ncycles(seq, count)) + + +@pytest.mark.parametrize( + ("seq", "n", "dflt"), + [ + ("abc", 1, None), + ("abc", 10, None), + ("abc", 10, "x"), + ("", 0, None), + ], +) +def test_nth(seq: str, n: int, dflt: Optional[str]) -> None: + assert itextras.nth(seq, n, dflt) == aextras.nth(seq, n, dflt) + + +@pytest.mark.parametrize( + ("seq"), + [ + "abc", + "", + ], +) +def test_padnone(seq: str) -> None: + assert _take(10, itextras.padnone(seq)) == _take(10, aextras.padnone(seq)) + + +@pytest.mark.parametrize( + ("seq"), + [ + (), + (1,), + (1, 2), + (1, 2, 3), + (1, 2, 3, 4), + ], +) +def test_pairwise(seq: Sequence[int]) -> None: + assert list(itextras.pairwise(seq)) == list(aextras.pairwise(seq)) + + +@pytest.mark.parametrize( + ("pred", "seq"), + [ + (lambda x: x % 2, (0, 1, 2, 3)), + (lambda x: x % 2, (0, 2)), + (lambda x: x % 2, ()), + ], +) +def test_partition(pred: _Predicate[int], seq: Sequence[int]) -> None: + # assert list(itextras.partition(pred, seq)) == list(aextras.partition(pred, seq)) + true1, false1 = itextras.partition(pred, seq) + true2, false2 = aextras.partition(pred, seq) + assert list(true1) == list(true2) + assert list(false1) == list(false2) + + +@pytest.mark.parametrize( + ("value", "seq"), + [ + (1, (2, 3)), + (1, ()), + ], +) +def test_prepend(value: int, seq: Sequence[int]) -> None: + assert list(itextras.prepend(value, seq)) == list(aextras.prepend(value, seq)) + + +@pytest.mark.parametrize( + ("seq", "pred"), + [ + ((0, 1), lambda x: x % 2 == 0), + ((1, 1), lambda x: x % 2 == 0), + ((), lambda x: x % 2 == 0), + ], +) +def test_quantify(seq: Sequence[int], pred: _Predicate[int]) -> None: + assert itextras.quantify(seq) == aextras.quantify(seq) + assert itextras.quantify(seq, pred) == aextras.quantify(seq, pred) + + +@pytest.mark.parametrize( + ("func", "times", "args"), + [ + (lambda: 1, 5, []), + (lambda: 1, 0, []), + (lambda x: x + 1, 10, [3]), + (lambda x, y: x + y, 10, [3, 4]), + ], +) +def test_repeatfunc(func: Callable, times: int, args: Sequence[int]) -> None: + assert _take(5, itextras.repeatfunc(func, None, *args)) == _take( + 5, aextras.repeatfunc(func, None, *args) + ) + assert list(itextras.repeatfunc(func, times, *args)) == list( + aextras.repeatfunc(func, times, *args) + ) + + +@pytest.mark.parametrize( + ("seq1", "seq2"), + [ + ("abc", "def"), + ("a", "bc"), + ("ab", "c"), + ("", "abc"), + ("", ""), + ], +) +def test_roundrobin(seq1: str, seq2: str) -> None: + assert list(itextras.roundrobin(seq1)) == list(aextras.roundrobin(seq1)) + assert list(itextras.roundrobin(seq1, seq2)) == list(aextras.roundrobin(seq1, seq2)) + + +@pytest.mark.parametrize( + ("func", "start"), + [ + (lambda x: 2 * x, 17), + (lambda x: -x, -3), + ], +) +def test_tabulate(func: Callable[[int], int], start: int) -> None: + assert _take(5, itextras.tabulate(func)) == _take(5, aextras.tabulate(func)) + assert _take(5, itextras.tabulate(func, start)) == _take(5, aextras.tabulate(func, start)) + + +@pytest.mark.parametrize( + ("n", "seq"), + [ + (3, "abcdefg"), + (0, "abcdefg"), + (10, "abcdefg"), + (5, ""), + ], +) +def test_tail(n: int, seq: str) -> None: + assert list(itextras.tail(n, seq)) == list(aextras.tail(n, seq)) + + +@pytest.mark.parametrize( + ("n", "seq"), + [ + (3, "abcdefg"), + (0, "abcdefg"), + (10, "abcdefg"), + (5, ""), + ], +) +def test_take(n: int, seq: str) -> None: + assert list(itextras.take(n, seq)) == list(aextras.take(n, seq))