diff --git a/.venv/Lib/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz b/.venv/Lib/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..52ae92392967d187709107d1c1bc9709c085b519 --- /dev/null +++ b/.venv/Lib/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0703b0ae20b9ff75087dc601640ee58f1c2ad6768858ea21a245151da9ba8e4c +size 301 diff --git a/.venv/Lib/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz b/.venv/Lib/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..25bb1bc7760d28c156677d8d257421b3805299c1 --- /dev/null +++ b/.venv/Lib/site-packages/sklearn/datasets/tests/data/openml/id_61/data-v1-dl-61.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe4736924606638984e573235191025d419c545d31dc8874c96b72f5ec5db73 +size 2342 diff --git a/.venv/Lib/site-packages/sympy/plotting/__pycache__/__init__.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5f5b5e3bd9a14c5a9720e52571ccc34b3fa7530 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/__pycache__/plot.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/__pycache__/plot.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f470e9c729a359ee3dbaa15c4cee111d2eb9786a Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/__pycache__/plot.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/__pycache__/plot_implicit.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/__pycache__/plot_implicit.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..683aaa1aa7548979f7345c50abd557dff1bf764f Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/__pycache__/plot_implicit.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/__pycache__/plotgrid.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/__pycache__/plotgrid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0cb745416fa90060eb8cf2eb47fb92c07802106 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/__pycache__/plotgrid.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/__pycache__/series.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/__pycache__/series.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ce16a044db1f272f87c5e45e125eeb4cc90391d Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/__pycache__/series.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/__pycache__/textplot.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/__pycache__/textplot.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ffca6a6528e137f816e14b61e215509dca3d4e Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/__pycache__/textplot.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/__pycache__/utils.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5525a24de581c9ace36577a81b2c7791138eff6f Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/__pycache__/utils.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/__pycache__/__init__.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/backends/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f6eb624cb4a95322cc364f8cd3cca3b35afe720 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/backends/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/__pycache__/base_backend.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/backends/__pycache__/base_backend.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9efc599865b5d0e8bee8b859c3f75697998c612 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/backends/__pycache__/base_backend.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__init__.py b/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8623940dadb9272730fdeccc1668374781c2e5cf --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__init__.py @@ -0,0 +1,5 @@ +from sympy.plotting.backends.matplotlibbackend.matplotlib import ( + MatplotlibBackend, _matplotlib_list +) + +__all__ = ["MatplotlibBackend", "_matplotlib_list"] diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/__init__.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..114fb523d701247ff119eb4bc44fc9dde9e1a2ff Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/matplotlib.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/matplotlib.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..620b73df3bb6569d2abe39e3de94009be8393efc Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/matplotlib.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__init__.py b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca4685e4b7790653a97b712c27b240ade5bb481a --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__init__.py @@ -0,0 +1,3 @@ +from sympy.plotting.backends.textbackend.text import TextBackend + +__all__ = ["TextBackend"] diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__pycache__/__init__.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae8a7017398d84b5554fc15b8a593af68af177f9 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__pycache__/text.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__pycache__/text.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..360a4251dcbfb35382327c798aa884fdbe1f4098 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/__pycache__/text.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/text.py b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/text.py new file mode 100644 index 0000000000000000000000000000000000000000..0917ec78b3463a929c373c98fdd279d84ce4c9e5 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/backends/textbackend/text.py @@ -0,0 +1,24 @@ +import sympy.plotting.backends.base_backend as base_backend +from sympy.plotting.series import LineOver1DRangeSeries +from sympy.plotting.textplot import textplot + + +class TextBackend(base_backend.Plot): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def show(self): + if not base_backend._show: + return + if len(self._series) != 1: + raise ValueError( + 'The TextBackend supports only one graph per Plot.') + elif not isinstance(self._series[0], LineOver1DRangeSeries): + raise ValueError( + 'The TextBackend supports only expressions over a 1D range') + else: + ser = self._series[0] + textplot(ser.expr, ser.start, ser.end) + + def close(self): + pass diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/__init__.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb9a6a57f94e931f0c5f5b3dda7b0b6fd31841f4 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__init__.py @@ -0,0 +1,12 @@ +from .interval_arithmetic import interval +from .lib_interval import (Abs, exp, log, log10, sin, cos, tan, sqrt, + imin, imax, sinh, cosh, tanh, acosh, asinh, atanh, + asin, acos, atan, ceil, floor, And, Or) + +__all__ = [ + 'interval', + + 'Abs', 'exp', 'log', 'log10', 'sin', 'cos', 'tan', 'sqrt', 'imin', 'imax', + 'sinh', 'cosh', 'tanh', 'acosh', 'asinh', 'atanh', 'asin', 'acos', 'atan', + 'ceil', 'floor', 'And', 'Or', +] diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/__init__.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72ddc2e42f582a5ea13d3aee3b0b10699585027b Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/interval_arithmetic.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/interval_arithmetic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c755c590479ac23e35cef09fb7347584ce629169 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/interval_arithmetic.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/interval_membership.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/interval_membership.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f042cab023b7dfbc3423daf719df24bc4141c48 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/interval_membership.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/lib_interval.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/lib_interval.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b31ee813fb79fbfeb949e8811d890e9ca946f0f Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/intervalmath/__pycache__/lib_interval.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/interval_arithmetic.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/interval_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..fc5c0e2ef118c7cf4f80de53a3590de11130410e --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/intervalmath/interval_arithmetic.py @@ -0,0 +1,413 @@ +""" +Interval Arithmetic for plotting. +This module does not implement interval arithmetic accurately and +hence cannot be used for purposes other than plotting. If you want +to use interval arithmetic, use mpmath's interval arithmetic. + +The module implements interval arithmetic using numpy and +python floating points. The rounding up and down is not handled +and hence this is not an accurate implementation of interval +arithmetic. + +The module uses numpy for speed which cannot be achieved with mpmath. +""" + +# Q: Why use numpy? Why not simply use mpmath's interval arithmetic? +# A: mpmath's interval arithmetic simulates a floating point unit +# and hence is slow, while numpy evaluations are orders of magnitude +# faster. + +# Q: Why create a separate class for intervals? Why not use SymPy's +# Interval Sets? +# A: The functionalities that will be required for plotting is quite +# different from what Interval Sets implement. + +# Q: Why is rounding up and down according to IEEE754 not handled? +# A: It is not possible to do it in both numpy and python. An external +# library has to used, which defeats the whole purpose i.e., speed. Also +# rounding is handled for very few functions in those libraries. + +# Q Will my plots be affected? +# A It will not affect most of the plots. The interval arithmetic +# module based suffers the same problems as that of floating point +# arithmetic. + +from sympy.core.numbers import int_valued +from sympy.core.logic import fuzzy_and +from sympy.simplify.simplify import nsimplify + +from .interval_membership import intervalMembership + + +class interval: + """ Represents an interval containing floating points as start and + end of the interval + The is_valid variable tracks whether the interval obtained as the + result of the function is in the domain and is continuous. + - True: Represents the interval result of a function is continuous and + in the domain of the function. + - False: The interval argument of the function was not in the domain of + the function, hence the is_valid of the result interval is False + - None: The function was not continuous over the interval or + the function's argument interval is partly in the domain of the + function + + A comparison between an interval and a real number, or a + comparison between two intervals may return ``intervalMembership`` + of two 3-valued logic values. + """ + + def __init__(self, *args, is_valid=True, **kwargs): + self.is_valid = is_valid + if len(args) == 1: + if isinstance(args[0], interval): + self.start, self.end = args[0].start, args[0].end + else: + self.start = float(args[0]) + self.end = float(args[0]) + elif len(args) == 2: + if args[0] < args[1]: + self.start = float(args[0]) + self.end = float(args[1]) + else: + self.start = float(args[1]) + self.end = float(args[0]) + + else: + raise ValueError("interval takes a maximum of two float values " + "as arguments") + + @property + def mid(self): + return (self.start + self.end) / 2.0 + + @property + def width(self): + return self.end - self.start + + def __repr__(self): + return "interval(%f, %f)" % (self.start, self.end) + + def __str__(self): + return "[%f, %f]" % (self.start, self.end) + + def __lt__(self, other): + if isinstance(other, (int, float)): + if self.end < other: + return intervalMembership(True, self.is_valid) + elif self.start > other: + return intervalMembership(False, self.is_valid) + else: + return intervalMembership(None, self.is_valid) + + elif isinstance(other, interval): + valid = fuzzy_and([self.is_valid, other.is_valid]) + if self.end < other. start: + return intervalMembership(True, valid) + if self.start > other.end: + return intervalMembership(False, valid) + return intervalMembership(None, valid) + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, (int, float)): + if self.start > other: + return intervalMembership(True, self.is_valid) + elif self.end < other: + return intervalMembership(False, self.is_valid) + else: + return intervalMembership(None, self.is_valid) + elif isinstance(other, interval): + return other.__lt__(self) + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, (int, float)): + if self.start == other and self.end == other: + return intervalMembership(True, self.is_valid) + if other in self: + return intervalMembership(None, self.is_valid) + else: + return intervalMembership(False, self.is_valid) + + if isinstance(other, interval): + valid = fuzzy_and([self.is_valid, other.is_valid]) + if self.start == other.start and self.end == other.end: + return intervalMembership(True, valid) + elif self.__lt__(other)[0] is not None: + return intervalMembership(False, valid) + else: + return intervalMembership(None, valid) + else: + return NotImplemented + + def __ne__(self, other): + if isinstance(other, (int, float)): + if self.start == other and self.end == other: + return intervalMembership(False, self.is_valid) + if other in self: + return intervalMembership(None, self.is_valid) + else: + return intervalMembership(True, self.is_valid) + + if isinstance(other, interval): + valid = fuzzy_and([self.is_valid, other.is_valid]) + if self.start == other.start and self.end == other.end: + return intervalMembership(False, valid) + if not self.__lt__(other)[0] is None: + return intervalMembership(True, valid) + return intervalMembership(None, valid) + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, (int, float)): + if self.end <= other: + return intervalMembership(True, self.is_valid) + if self.start > other: + return intervalMembership(False, self.is_valid) + else: + return intervalMembership(None, self.is_valid) + + if isinstance(other, interval): + valid = fuzzy_and([self.is_valid, other.is_valid]) + if self.end <= other.start: + return intervalMembership(True, valid) + if self.start > other.end: + return intervalMembership(False, valid) + return intervalMembership(None, valid) + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, (int, float)): + if self.start >= other: + return intervalMembership(True, self.is_valid) + elif self.end < other: + return intervalMembership(False, self.is_valid) + else: + return intervalMembership(None, self.is_valid) + elif isinstance(other, interval): + return other.__le__(self) + + def __add__(self, other): + if isinstance(other, (int, float)): + if self.is_valid: + return interval(self.start + other, self.end + other) + else: + start = self.start + other + end = self.end + other + return interval(start, end, is_valid=self.is_valid) + + elif isinstance(other, interval): + start = self.start + other.start + end = self.end + other.end + valid = fuzzy_and([self.is_valid, other.is_valid]) + return interval(start, end, is_valid=valid) + else: + return NotImplemented + + __radd__ = __add__ + + def __sub__(self, other): + if isinstance(other, (int, float)): + start = self.start - other + end = self.end - other + return interval(start, end, is_valid=self.is_valid) + + elif isinstance(other, interval): + start = self.start - other.end + end = self.end - other.start + valid = fuzzy_and([self.is_valid, other.is_valid]) + return interval(start, end, is_valid=valid) + else: + return NotImplemented + + def __rsub__(self, other): + if isinstance(other, (int, float)): + start = other - self.end + end = other - self.start + return interval(start, end, is_valid=self.is_valid) + elif isinstance(other, interval): + return other.__sub__(self) + else: + return NotImplemented + + def __neg__(self): + if self.is_valid: + return interval(-self.end, -self.start) + else: + return interval(-self.end, -self.start, is_valid=self.is_valid) + + def __mul__(self, other): + if isinstance(other, interval): + if self.is_valid is False or other.is_valid is False: + return interval(-float('inf'), float('inf'), is_valid=False) + elif self.is_valid is None or other.is_valid is None: + return interval(-float('inf'), float('inf'), is_valid=None) + else: + inters = [] + inters.append(self.start * other.start) + inters.append(self.end * other.start) + inters.append(self.start * other.end) + inters.append(self.end * other.end) + start = min(inters) + end = max(inters) + return interval(start, end) + elif isinstance(other, (int, float)): + return interval(self.start*other, self.end*other, is_valid=self.is_valid) + else: + return NotImplemented + + __rmul__ = __mul__ + + def __contains__(self, other): + if isinstance(other, (int, float)): + return self.start <= other and self.end >= other + else: + return self.start <= other.start and other.end <= self.end + + def __rtruediv__(self, other): + if isinstance(other, (int, float)): + other = interval(other) + return other.__truediv__(self) + elif isinstance(other, interval): + return other.__truediv__(self) + else: + return NotImplemented + + def __truediv__(self, other): + # Both None and False are handled + if not self.is_valid: + # Don't divide as the value is not valid + return interval(-float('inf'), float('inf'), is_valid=self.is_valid) + if isinstance(other, (int, float)): + if other == 0: + # Divide by zero encountered. valid nowhere + return interval(-float('inf'), float('inf'), is_valid=False) + else: + return interval(self.start / other, self.end / other) + + elif isinstance(other, interval): + if other.is_valid is False or self.is_valid is False: + return interval(-float('inf'), float('inf'), is_valid=False) + elif other.is_valid is None or self.is_valid is None: + return interval(-float('inf'), float('inf'), is_valid=None) + else: + # denominator contains both signs, i.e. being divided by zero + # return the whole real line with is_valid = None + if 0 in other: + return interval(-float('inf'), float('inf'), is_valid=None) + + # denominator negative + this = self + if other.end < 0: + this = -this + other = -other + + # denominator positive + inters = [] + inters.append(this.start / other.start) + inters.append(this.end / other.start) + inters.append(this.start / other.end) + inters.append(this.end / other.end) + start = max(inters) + end = min(inters) + return interval(start, end) + else: + return NotImplemented + + def __pow__(self, other): + # Implements only power to an integer. + from .lib_interval import exp, log + if not self.is_valid: + return self + if isinstance(other, interval): + return exp(other * log(self)) + elif isinstance(other, (float, int)): + if other < 0: + return 1 / self.__pow__(abs(other)) + else: + if int_valued(other): + return _pow_int(self, other) + else: + return _pow_float(self, other) + else: + return NotImplemented + + def __rpow__(self, other): + if isinstance(other, (float, int)): + if not self.is_valid: + #Don't do anything + return self + elif other < 0: + if self.width > 0: + return interval(-float('inf'), float('inf'), is_valid=False) + else: + power_rational = nsimplify(self.start) + num, denom = power_rational.as_numer_denom() + if denom % 2 == 0: + return interval(-float('inf'), float('inf'), + is_valid=False) + else: + start = -abs(other)**self.start + end = start + return interval(start, end) + else: + return interval(other**self.start, other**self.end) + elif isinstance(other, interval): + return other.__pow__(self) + else: + return NotImplemented + + def __hash__(self): + return hash((self.is_valid, self.start, self.end)) + + +def _pow_float(inter, power): + """Evaluates an interval raised to a floating point.""" + power_rational = nsimplify(power) + num, denom = power_rational.as_numer_denom() + if num % 2 == 0: + start = abs(inter.start)**power + end = abs(inter.end)**power + if start < 0: + ret = interval(0, max(start, end)) + else: + ret = interval(start, end) + return ret + elif denom % 2 == 0: + if inter.end < 0: + return interval(-float('inf'), float('inf'), is_valid=False) + elif inter.start < 0: + return interval(0, inter.end**power, is_valid=None) + else: + return interval(inter.start**power, inter.end**power) + else: + if inter.start < 0: + start = -abs(inter.start)**power + else: + start = inter.start**power + + if inter.end < 0: + end = -abs(inter.end)**power + else: + end = inter.end**power + + return interval(start, end, is_valid=inter.is_valid) + + +def _pow_int(inter, power): + """Evaluates an interval raised to an integer power""" + power = int(power) + if power & 1: + return interval(inter.start**power, inter.end**power) + else: + if inter.start < 0 and inter.end > 0: + start = 0 + end = max(inter.start**power, inter.end**power) + return interval(start, end) + else: + return interval(inter.start**power, inter.end**power) diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/interval_membership.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/interval_membership.py new file mode 100644 index 0000000000000000000000000000000000000000..c4887c2d96f0d006b95a8e207a4f4a75940aec23 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/intervalmath/interval_membership.py @@ -0,0 +1,78 @@ +from sympy.core.logic import fuzzy_and, fuzzy_or, fuzzy_not, fuzzy_xor + + +class intervalMembership: + """Represents a boolean expression returned by the comparison of + the interval object. + + Parameters + ========== + + (a, b) : (bool, bool) + The first value determines the comparison as follows: + - True: If the comparison is True throughout the intervals. + - False: If the comparison is False throughout the intervals. + - None: If the comparison is True for some part of the intervals. + + The second value is determined as follows: + - True: If both the intervals in comparison are valid. + - False: If at least one of the intervals is False, else + - None + """ + def __init__(self, a, b): + self._wrapped = (a, b) + + def __getitem__(self, i): + try: + return self._wrapped[i] + except IndexError: + raise IndexError( + "{} must be a valid indexing for the 2-tuple." + .format(i)) + + def __len__(self): + return 2 + + def __iter__(self): + return iter(self._wrapped) + + def __str__(self): + return "intervalMembership({}, {})".format(*self) + __repr__ = __str__ + + def __and__(self, other): + if not isinstance(other, intervalMembership): + raise ValueError( + "The comparison is not supported for {}.".format(other)) + + a1, b1 = self + a2, b2 = other + return intervalMembership(fuzzy_and([a1, a2]), fuzzy_and([b1, b2])) + + def __or__(self, other): + if not isinstance(other, intervalMembership): + raise ValueError( + "The comparison is not supported for {}.".format(other)) + + a1, b1 = self + a2, b2 = other + return intervalMembership(fuzzy_or([a1, a2]), fuzzy_and([b1, b2])) + + def __invert__(self): + a, b = self + return intervalMembership(fuzzy_not(a), b) + + def __xor__(self, other): + if not isinstance(other, intervalMembership): + raise ValueError( + "The comparison is not supported for {}.".format(other)) + + a1, b1 = self + a2, b2 = other + return intervalMembership(fuzzy_xor([a1, a2]), fuzzy_and([b1, b2])) + + def __eq__(self, other): + return self._wrapped == other + + def __ne__(self, other): + return self._wrapped != other diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/lib_interval.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/lib_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..7549a05820d747ce057892f8df1fbcbc61cc3f43 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/intervalmath/lib_interval.py @@ -0,0 +1,452 @@ +""" The module contains implemented functions for interval arithmetic.""" +from functools import reduce + +from sympy.plotting.intervalmath import interval +from sympy.external import import_module + + +def Abs(x): + if isinstance(x, (int, float)): + return interval(abs(x)) + elif isinstance(x, interval): + if x.start < 0 and x.end > 0: + return interval(0, max(abs(x.start), abs(x.end)), is_valid=x.is_valid) + else: + return interval(abs(x.start), abs(x.end)) + else: + raise NotImplementedError + +#Monotonic + + +def exp(x): + """evaluates the exponential of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.exp(x), np.exp(x)) + elif isinstance(x, interval): + return interval(np.exp(x.start), np.exp(x.end), is_valid=x.is_valid) + else: + raise NotImplementedError + + +#Monotonic +def log(x): + """evaluates the natural logarithm of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + if x <= 0: + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(np.log(x)) + elif isinstance(x, interval): + if not x.is_valid: + return interval(-np.inf, np.inf, is_valid=x.is_valid) + elif x.end <= 0: + return interval(-np.inf, np.inf, is_valid=False) + elif x.start <= 0: + return interval(-np.inf, np.inf, is_valid=None) + + return interval(np.log(x.start), np.log(x.end)) + else: + raise NotImplementedError + + +#Monotonic +def log10(x): + """evaluates the logarithm to the base 10 of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + if x <= 0: + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(np.log10(x)) + elif isinstance(x, interval): + if not x.is_valid: + return interval(-np.inf, np.inf, is_valid=x.is_valid) + elif x.end <= 0: + return interval(-np.inf, np.inf, is_valid=False) + elif x.start <= 0: + return interval(-np.inf, np.inf, is_valid=None) + return interval(np.log10(x.start), np.log10(x.end)) + else: + raise NotImplementedError + + +#Monotonic +def atan(x): + """evaluates the tan inverse of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.arctan(x)) + elif isinstance(x, interval): + start = np.arctan(x.start) + end = np.arctan(x.end) + return interval(start, end, is_valid=x.is_valid) + else: + raise NotImplementedError + + +#periodic +def sin(x): + """evaluates the sine of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.sin(x)) + elif isinstance(x, interval): + if not x.is_valid: + return interval(-1, 1, is_valid=x.is_valid) + na, __ = divmod(x.start, np.pi / 2.0) + nb, __ = divmod(x.end, np.pi / 2.0) + start = min(np.sin(x.start), np.sin(x.end)) + end = max(np.sin(x.start), np.sin(x.end)) + if nb - na > 4: + return interval(-1, 1, is_valid=x.is_valid) + elif na == nb: + return interval(start, end, is_valid=x.is_valid) + else: + if (na - 1) // 4 != (nb - 1) // 4: + #sin has max + end = 1 + if (na - 3) // 4 != (nb - 3) // 4: + #sin has min + start = -1 + return interval(start, end) + else: + raise NotImplementedError + + +#periodic +def cos(x): + """Evaluates the cos of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.sin(x)) + elif isinstance(x, interval): + if not (np.isfinite(x.start) and np.isfinite(x.end)): + return interval(-1, 1, is_valid=x.is_valid) + na, __ = divmod(x.start, np.pi / 2.0) + nb, __ = divmod(x.end, np.pi / 2.0) + start = min(np.cos(x.start), np.cos(x.end)) + end = max(np.cos(x.start), np.cos(x.end)) + if nb - na > 4: + #differ more than 2*pi + return interval(-1, 1, is_valid=x.is_valid) + elif na == nb: + #in the same quadarant + return interval(start, end, is_valid=x.is_valid) + else: + if (na) // 4 != (nb) // 4: + #cos has max + end = 1 + if (na - 2) // 4 != (nb - 2) // 4: + #cos has min + start = -1 + return interval(start, end, is_valid=x.is_valid) + else: + raise NotImplementedError + + +def tan(x): + """Evaluates the tan of an interval""" + return sin(x) / cos(x) + + +#Monotonic +def sqrt(x): + """Evaluates the square root of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + if x > 0: + return interval(np.sqrt(x)) + else: + return interval(-np.inf, np.inf, is_valid=False) + elif isinstance(x, interval): + #Outside the domain + if x.end < 0: + return interval(-np.inf, np.inf, is_valid=False) + #Partially outside the domain + elif x.start < 0: + return interval(-np.inf, np.inf, is_valid=None) + else: + return interval(np.sqrt(x.start), np.sqrt(x.end), + is_valid=x.is_valid) + else: + raise NotImplementedError + + +def imin(*args): + """Evaluates the minimum of a list of intervals""" + np = import_module('numpy') + if not all(isinstance(arg, (int, float, interval)) for arg in args): + return NotImplementedError + else: + new_args = [a for a in args if isinstance(a, (int, float)) + or a.is_valid] + if len(new_args) == 0: + if all(a.is_valid is False for a in args): + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(-np.inf, np.inf, is_valid=None) + start_array = [a if isinstance(a, (int, float)) else a.start + for a in new_args] + + end_array = [a if isinstance(a, (int, float)) else a.end + for a in new_args] + return interval(min(start_array), min(end_array)) + + +def imax(*args): + """Evaluates the maximum of a list of intervals""" + np = import_module('numpy') + if not all(isinstance(arg, (int, float, interval)) for arg in args): + return NotImplementedError + else: + new_args = [a for a in args if isinstance(a, (int, float)) + or a.is_valid] + if len(new_args) == 0: + if all(a.is_valid is False for a in args): + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(-np.inf, np.inf, is_valid=None) + start_array = [a if isinstance(a, (int, float)) else a.start + for a in new_args] + + end_array = [a if isinstance(a, (int, float)) else a.end + for a in new_args] + + return interval(max(start_array), max(end_array)) + + +#Monotonic +def sinh(x): + """Evaluates the hyperbolic sine of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.sinh(x), np.sinh(x)) + elif isinstance(x, interval): + return interval(np.sinh(x.start), np.sinh(x.end), is_valid=x.is_valid) + else: + raise NotImplementedError + + +def cosh(x): + """Evaluates the hyperbolic cos of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.cosh(x), np.cosh(x)) + elif isinstance(x, interval): + #both signs + if x.start < 0 and x.end > 0: + end = max(np.cosh(x.start), np.cosh(x.end)) + return interval(1, end, is_valid=x.is_valid) + else: + #Monotonic + start = np.cosh(x.start) + end = np.cosh(x.end) + return interval(start, end, is_valid=x.is_valid) + else: + raise NotImplementedError + + +#Monotonic +def tanh(x): + """Evaluates the hyperbolic tan of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.tanh(x), np.tanh(x)) + elif isinstance(x, interval): + return interval(np.tanh(x.start), np.tanh(x.end), is_valid=x.is_valid) + else: + raise NotImplementedError + + +def asin(x): + """Evaluates the inverse sine of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + #Outside the domain + if abs(x) > 1: + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(np.arcsin(x), np.arcsin(x)) + elif isinstance(x, interval): + #Outside the domain + if x.is_valid is False or x.start > 1 or x.end < -1: + return interval(-np.inf, np.inf, is_valid=False) + #Partially outside the domain + elif x.start < -1 or x.end > 1: + return interval(-np.inf, np.inf, is_valid=None) + else: + start = np.arcsin(x.start) + end = np.arcsin(x.end) + return interval(start, end, is_valid=x.is_valid) + + +def acos(x): + """Evaluates the inverse cos of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + if abs(x) > 1: + #Outside the domain + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(np.arccos(x), np.arccos(x)) + elif isinstance(x, interval): + #Outside the domain + if x.is_valid is False or x.start > 1 or x.end < -1: + return interval(-np.inf, np.inf, is_valid=False) + #Partially outside the domain + elif x.start < -1 or x.end > 1: + return interval(-np.inf, np.inf, is_valid=None) + else: + start = np.arccos(x.start) + end = np.arccos(x.end) + return interval(start, end, is_valid=x.is_valid) + + +def ceil(x): + """Evaluates the ceiling of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.ceil(x)) + elif isinstance(x, interval): + if x.is_valid is False: + return interval(-np.inf, np.inf, is_valid=False) + else: + start = np.ceil(x.start) + end = np.ceil(x.end) + #Continuous over the interval + if start == end: + return interval(start, end, is_valid=x.is_valid) + else: + #Not continuous over the interval + return interval(start, end, is_valid=None) + else: + return NotImplementedError + + +def floor(x): + """Evaluates the floor of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.floor(x)) + elif isinstance(x, interval): + if x.is_valid is False: + return interval(-np.inf, np.inf, is_valid=False) + else: + start = np.floor(x.start) + end = np.floor(x.end) + #continuous over the argument + if start == end: + return interval(start, end, is_valid=x.is_valid) + else: + #not continuous over the interval + return interval(start, end, is_valid=None) + else: + return NotImplementedError + + +def acosh(x): + """Evaluates the inverse hyperbolic cosine of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + #Outside the domain + if x < 1: + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(np.arccosh(x)) + elif isinstance(x, interval): + #Outside the domain + if x.end < 1: + return interval(-np.inf, np.inf, is_valid=False) + #Partly outside the domain + elif x.start < 1: + return interval(-np.inf, np.inf, is_valid=None) + else: + start = np.arccosh(x.start) + end = np.arccosh(x.end) + return interval(start, end, is_valid=x.is_valid) + else: + return NotImplementedError + + +#Monotonic +def asinh(x): + """Evaluates the inverse hyperbolic sine of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + return interval(np.arcsinh(x)) + elif isinstance(x, interval): + start = np.arcsinh(x.start) + end = np.arcsinh(x.end) + return interval(start, end, is_valid=x.is_valid) + else: + return NotImplementedError + + +def atanh(x): + """Evaluates the inverse hyperbolic tangent of an interval""" + np = import_module('numpy') + if isinstance(x, (int, float)): + #Outside the domain + if abs(x) >= 1: + return interval(-np.inf, np.inf, is_valid=False) + else: + return interval(np.arctanh(x)) + elif isinstance(x, interval): + #outside the domain + if x.is_valid is False or x.start >= 1 or x.end <= -1: + return interval(-np.inf, np.inf, is_valid=False) + #partly outside the domain + elif x.start <= -1 or x.end >= 1: + return interval(-np.inf, np.inf, is_valid=None) + else: + start = np.arctanh(x.start) + end = np.arctanh(x.end) + return interval(start, end, is_valid=x.is_valid) + else: + return NotImplementedError + + +#Three valued logic for interval plotting. + +def And(*args): + """Defines the three valued ``And`` behaviour for a 2-tuple of + three valued logic values""" + def reduce_and(cmp_intervala, cmp_intervalb): + if cmp_intervala[0] is False or cmp_intervalb[0] is False: + first = False + elif cmp_intervala[0] is None or cmp_intervalb[0] is None: + first = None + else: + first = True + if cmp_intervala[1] is False or cmp_intervalb[1] is False: + second = False + elif cmp_intervala[1] is None or cmp_intervalb[1] is None: + second = None + else: + second = True + return (first, second) + return reduce(reduce_and, args) + + +def Or(*args): + """Defines the three valued ``Or`` behaviour for a 2-tuple of + three valued logic values""" + def reduce_or(cmp_intervala, cmp_intervalb): + if cmp_intervala[0] is True or cmp_intervalb[0] is True: + first = True + elif cmp_intervala[0] is None or cmp_intervalb[0] is None: + first = None + else: + first = False + + if cmp_intervala[1] is True or cmp_intervalb[1] is True: + second = True + elif cmp_intervala[1] is None or cmp_intervalb[1] is None: + second = None + else: + second = False + return (first, second) + return reduce(reduce_or, args) diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/__init__.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_interval_functions.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_interval_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..861c3660df024d3fbec788a027708348e9929655 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_interval_functions.py @@ -0,0 +1,415 @@ +from sympy.external import import_module +from sympy.plotting.intervalmath import ( + Abs, acos, acosh, And, asin, asinh, atan, atanh, ceil, cos, cosh, + exp, floor, imax, imin, interval, log, log10, Or, sin, sinh, sqrt, + tan, tanh, +) + +np = import_module('numpy') +if not np: + disabled = True + + +#requires Numpy. Hence included in interval_functions + + +def test_interval_pow(): + a = 2**interval(1, 2) == interval(2, 4) + assert a == (True, True) + a = interval(1, 2)**interval(1, 2) == interval(1, 4) + assert a == (True, True) + a = interval(-1, 1)**interval(0.5, 2) + assert a.is_valid is None + a = interval(-2, -1) ** interval(1, 2) + assert a.is_valid is False + a = interval(-2, -1) ** (1.0 / 2) + assert a.is_valid is False + a = interval(-1, 1)**(1.0 / 2) + assert a.is_valid is None + a = interval(-1, 1)**(1.0 / 3) == interval(-1, 1) + assert a == (True, True) + a = interval(-1, 1)**2 == interval(0, 1) + assert a == (True, True) + a = interval(-1, 1) ** (1.0 / 29) == interval(-1, 1) + assert a == (True, True) + a = -2**interval(1, 1) == interval(-2, -2) + assert a == (True, True) + + a = interval(1, 2, is_valid=False)**2 + assert a.is_valid is False + + a = (-3)**interval(1, 2) + assert a.is_valid is False + a = (-4)**interval(0.5, 0.5) + assert a.is_valid is False + assert ((-3)**interval(1, 1) == interval(-3, -3)) == (True, True) + + a = interval(8, 64)**(2.0 / 3) + assert abs(a.start - 4) < 1e-10 # eps + assert abs(a.end - 16) < 1e-10 + a = interval(-8, 64)**(2.0 / 3) + assert abs(a.start - 4) < 1e-10 # eps + assert abs(a.end - 16) < 1e-10 + + +def test_exp(): + a = exp(interval(-np.inf, 0)) + assert a.start == np.exp(-np.inf) + assert a.end == np.exp(0) + a = exp(interval(1, 2)) + assert a.start == np.exp(1) + assert a.end == np.exp(2) + a = exp(1) + assert a.start == np.exp(1) + assert a.end == np.exp(1) + + +def test_log(): + a = log(interval(1, 2)) + assert a.start == 0 + assert a.end == np.log(2) + a = log(interval(-1, 1)) + assert a.is_valid is None + a = log(interval(-3, -1)) + assert a.is_valid is False + a = log(-3) + assert a.is_valid is False + a = log(2) + assert a.start == np.log(2) + assert a.end == np.log(2) + + +def test_log10(): + a = log10(interval(1, 2)) + assert a.start == 0 + assert a.end == np.log10(2) + a = log10(interval(-1, 1)) + assert a.is_valid is None + a = log10(interval(-3, -1)) + assert a.is_valid is False + a = log10(-3) + assert a.is_valid is False + a = log10(2) + assert a.start == np.log10(2) + assert a.end == np.log10(2) + + +def test_atan(): + a = atan(interval(0, 1)) + assert a.start == np.arctan(0) + assert a.end == np.arctan(1) + a = atan(1) + assert a.start == np.arctan(1) + assert a.end == np.arctan(1) + + +def test_sin(): + a = sin(interval(0, np.pi / 4)) + assert a.start == np.sin(0) + assert a.end == np.sin(np.pi / 4) + + a = sin(interval(-np.pi / 4, np.pi / 4)) + assert a.start == np.sin(-np.pi / 4) + assert a.end == np.sin(np.pi / 4) + + a = sin(interval(np.pi / 4, 3 * np.pi / 4)) + assert a.start == np.sin(np.pi / 4) + assert a.end == 1 + + a = sin(interval(7 * np.pi / 6, 7 * np.pi / 4)) + assert a.start == -1 + assert a.end == np.sin(7 * np.pi / 6) + + a = sin(interval(0, 3 * np.pi)) + assert a.start == -1 + assert a.end == 1 + + a = sin(interval(np.pi / 3, 7 * np.pi / 4)) + assert a.start == -1 + assert a.end == 1 + + a = sin(np.pi / 4) + assert a.start == np.sin(np.pi / 4) + assert a.end == np.sin(np.pi / 4) + + a = sin(interval(1, 2, is_valid=False)) + assert a.is_valid is False + + +def test_cos(): + a = cos(interval(0, np.pi / 4)) + assert a.start == np.cos(np.pi / 4) + assert a.end == 1 + + a = cos(interval(-np.pi / 4, np.pi / 4)) + assert a.start == np.cos(-np.pi / 4) + assert a.end == 1 + + a = cos(interval(np.pi / 4, 3 * np.pi / 4)) + assert a.start == np.cos(3 * np.pi / 4) + assert a.end == np.cos(np.pi / 4) + + a = cos(interval(3 * np.pi / 4, 5 * np.pi / 4)) + assert a.start == -1 + assert a.end == np.cos(3 * np.pi / 4) + + a = cos(interval(0, 3 * np.pi)) + assert a.start == -1 + assert a.end == 1 + + a = cos(interval(- np.pi / 3, 5 * np.pi / 4)) + assert a.start == -1 + assert a.end == 1 + + a = cos(interval(1, 2, is_valid=False)) + assert a.is_valid is False + + +def test_tan(): + a = tan(interval(0, np.pi / 4)) + assert a.start == 0 + # must match lib_interval definition of tan: + assert a.end == np.sin(np.pi / 4)/np.cos(np.pi / 4) + + a = tan(interval(np.pi / 4, 3 * np.pi / 4)) + #discontinuity + assert a.is_valid is None + + +def test_sqrt(): + a = sqrt(interval(1, 4)) + assert a.start == 1 + assert a.end == 2 + + a = sqrt(interval(0.01, 1)) + assert a.start == np.sqrt(0.01) + assert a.end == 1 + + a = sqrt(interval(-1, 1)) + assert a.is_valid is None + + a = sqrt(interval(-3, -1)) + assert a.is_valid is False + + a = sqrt(4) + assert (a == interval(2, 2)) == (True, True) + + a = sqrt(-3) + assert a.is_valid is False + + +def test_imin(): + a = imin(interval(1, 3), interval(2, 5), interval(-1, 3)) + assert a.start == -1 + assert a.end == 3 + + a = imin(-2, interval(1, 4)) + assert a.start == -2 + assert a.end == -2 + + a = imin(5, interval(3, 4), interval(-2, 2, is_valid=False)) + assert a.start == 3 + assert a.end == 4 + + +def test_imax(): + a = imax(interval(-2, 2), interval(2, 7), interval(-3, 9)) + assert a.start == 2 + assert a.end == 9 + + a = imax(8, interval(1, 4)) + assert a.start == 8 + assert a.end == 8 + + a = imax(interval(1, 2), interval(3, 4), interval(-2, 2, is_valid=False)) + assert a.start == 3 + assert a.end == 4 + + +def test_sinh(): + a = sinh(interval(-1, 1)) + assert a.start == np.sinh(-1) + assert a.end == np.sinh(1) + + a = sinh(1) + assert a.start == np.sinh(1) + assert a.end == np.sinh(1) + + +def test_cosh(): + a = cosh(interval(1, 2)) + assert a.start == np.cosh(1) + assert a.end == np.cosh(2) + a = cosh(interval(-2, -1)) + assert a.start == np.cosh(-1) + assert a.end == np.cosh(-2) + + a = cosh(interval(-2, 1)) + assert a.start == 1 + assert a.end == np.cosh(-2) + + a = cosh(1) + assert a.start == np.cosh(1) + assert a.end == np.cosh(1) + + +def test_tanh(): + a = tanh(interval(-3, 3)) + assert a.start == np.tanh(-3) + assert a.end == np.tanh(3) + + a = tanh(3) + assert a.start == np.tanh(3) + assert a.end == np.tanh(3) + + +def test_asin(): + a = asin(interval(-0.5, 0.5)) + assert a.start == np.arcsin(-0.5) + assert a.end == np.arcsin(0.5) + + a = asin(interval(-1.5, 1.5)) + assert a.is_valid is None + a = asin(interval(-2, -1.5)) + assert a.is_valid is False + + a = asin(interval(0, 2)) + assert a.is_valid is None + + a = asin(interval(2, 5)) + assert a.is_valid is False + + a = asin(0.5) + assert a.start == np.arcsin(0.5) + assert a.end == np.arcsin(0.5) + + a = asin(1.5) + assert a.is_valid is False + + +def test_acos(): + a = acos(interval(-0.5, 0.5)) + assert a.start == np.arccos(0.5) + assert a.end == np.arccos(-0.5) + + a = acos(interval(-1.5, 1.5)) + assert a.is_valid is None + a = acos(interval(-2, -1.5)) + assert a.is_valid is False + + a = acos(interval(0, 2)) + assert a.is_valid is None + + a = acos(interval(2, 5)) + assert a.is_valid is False + + a = acos(0.5) + assert a.start == np.arccos(0.5) + assert a.end == np.arccos(0.5) + + a = acos(1.5) + assert a.is_valid is False + + +def test_ceil(): + a = ceil(interval(0.2, 0.5)) + assert a.start == 1 + assert a.end == 1 + + a = ceil(interval(0.5, 1.5)) + assert a.start == 1 + assert a.end == 2 + assert a.is_valid is None + + a = ceil(interval(-5, 5)) + assert a.is_valid is None + + a = ceil(5.4) + assert a.start == 6 + assert a.end == 6 + + +def test_floor(): + a = floor(interval(0.2, 0.5)) + assert a.start == 0 + assert a.end == 0 + + a = floor(interval(0.5, 1.5)) + assert a.start == 0 + assert a.end == 1 + assert a.is_valid is None + + a = floor(interval(-5, 5)) + assert a.is_valid is None + + a = floor(5.4) + assert a.start == 5 + assert a.end == 5 + + +def test_asinh(): + a = asinh(interval(1, 2)) + assert a.start == np.arcsinh(1) + assert a.end == np.arcsinh(2) + + a = asinh(0.5) + assert a.start == np.arcsinh(0.5) + assert a.end == np.arcsinh(0.5) + + +def test_acosh(): + a = acosh(interval(3, 5)) + assert a.start == np.arccosh(3) + assert a.end == np.arccosh(5) + + a = acosh(interval(0, 3)) + assert a.is_valid is None + a = acosh(interval(-3, 0.5)) + assert a.is_valid is False + + a = acosh(0.5) + assert a.is_valid is False + + a = acosh(2) + assert a.start == np.arccosh(2) + assert a.end == np.arccosh(2) + + +def test_atanh(): + a = atanh(interval(-0.5, 0.5)) + assert a.start == np.arctanh(-0.5) + assert a.end == np.arctanh(0.5) + + a = atanh(interval(0, 3)) + assert a.is_valid is None + + a = atanh(interval(-3, -2)) + assert a.is_valid is False + + a = atanh(0.5) + assert a.start == np.arctanh(0.5) + assert a.end == np.arctanh(0.5) + + a = atanh(1.5) + assert a.is_valid is False + + +def test_Abs(): + assert (Abs(interval(-0.5, 0.5)) == interval(0, 0.5)) == (True, True) + assert (Abs(interval(-3, -2)) == interval(2, 3)) == (True, True) + assert (Abs(-3) == interval(3, 3)) == (True, True) + + +def test_And(): + args = [(True, True), (True, False), (True, None)] + assert And(*args) == (True, False) + + args = [(False, True), (None, None), (True, True)] + assert And(*args) == (False, None) + + +def test_Or(): + args = [(True, True), (True, False), (False, None)] + assert Or(*args) == (True, True) + args = [(None, None), (False, None), (False, False)] + assert Or(*args) == (None, None) diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_interval_membership.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_interval_membership.py new file mode 100644 index 0000000000000000000000000000000000000000..7b7f23680d60a64a6257a84c2476e31a8b5dfce8 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_interval_membership.py @@ -0,0 +1,150 @@ +from sympy.core.symbol import Symbol +from sympy.plotting.intervalmath import interval +from sympy.plotting.intervalmath.interval_membership import intervalMembership +from sympy.plotting.experimental_lambdify import experimental_lambdify +from sympy.testing.pytest import raises + + +def test_creation(): + assert intervalMembership(True, True) + raises(TypeError, lambda: intervalMembership(True)) + raises(TypeError, lambda: intervalMembership(True, True, True)) + + +def test_getitem(): + a = intervalMembership(True, False) + assert a[0] is True + assert a[1] is False + raises(IndexError, lambda: a[2]) + + +def test_str(): + a = intervalMembership(True, False) + assert str(a) == 'intervalMembership(True, False)' + assert repr(a) == 'intervalMembership(True, False)' + + +def test_equivalence(): + a = intervalMembership(True, True) + b = intervalMembership(True, False) + assert (a == b) is False + assert (a != b) is True + + a = intervalMembership(True, False) + b = intervalMembership(True, False) + assert (a == b) is True + assert (a != b) is False + + +def test_not(): + x = Symbol('x') + + r1 = x > -1 + r2 = x <= -1 + + i = interval + + f1 = experimental_lambdify((x,), r1) + f2 = experimental_lambdify((x,), r2) + + tt = i(-0.1, 0.1, is_valid=True) + tn = i(-0.1, 0.1, is_valid=None) + tf = i(-0.1, 0.1, is_valid=False) + + assert f1(tt) == ~f2(tt) + assert f1(tn) == ~f2(tn) + assert f1(tf) == ~f2(tf) + + nt = i(0.9, 1.1, is_valid=True) + nn = i(0.9, 1.1, is_valid=None) + nf = i(0.9, 1.1, is_valid=False) + + assert f1(nt) == ~f2(nt) + assert f1(nn) == ~f2(nn) + assert f1(nf) == ~f2(nf) + + ft = i(1.9, 2.1, is_valid=True) + fn = i(1.9, 2.1, is_valid=None) + ff = i(1.9, 2.1, is_valid=False) + + assert f1(ft) == ~f2(ft) + assert f1(fn) == ~f2(fn) + assert f1(ff) == ~f2(ff) + + +def test_boolean(): + # There can be 9*9 test cases in full mapping of the cartesian product. + # But we only consider 3*3 cases for simplicity. + s = [ + intervalMembership(False, False), + intervalMembership(None, None), + intervalMembership(True, True) + ] + + # Reduced tests for 'And' + a1 = [ + intervalMembership(False, False), + intervalMembership(False, False), + intervalMembership(False, False), + intervalMembership(False, False), + intervalMembership(None, None), + intervalMembership(None, None), + intervalMembership(False, False), + intervalMembership(None, None), + intervalMembership(True, True) + ] + a1_iter = iter(a1) + for i in range(len(s)): + for j in range(len(s)): + assert s[i] & s[j] == next(a1_iter) + + # Reduced tests for 'Or' + a1 = [ + intervalMembership(False, False), + intervalMembership(None, False), + intervalMembership(True, False), + intervalMembership(None, False), + intervalMembership(None, None), + intervalMembership(True, None), + intervalMembership(True, False), + intervalMembership(True, None), + intervalMembership(True, True) + ] + a1_iter = iter(a1) + for i in range(len(s)): + for j in range(len(s)): + assert s[i] | s[j] == next(a1_iter) + + # Reduced tests for 'Xor' + a1 = [ + intervalMembership(False, False), + intervalMembership(None, False), + intervalMembership(True, False), + intervalMembership(None, False), + intervalMembership(None, None), + intervalMembership(None, None), + intervalMembership(True, False), + intervalMembership(None, None), + intervalMembership(False, True) + ] + a1_iter = iter(a1) + for i in range(len(s)): + for j in range(len(s)): + assert s[i] ^ s[j] == next(a1_iter) + + # Reduced tests for 'Not' + a1 = [ + intervalMembership(True, False), + intervalMembership(None, None), + intervalMembership(False, True) + ] + a1_iter = iter(a1) + for i in range(len(s)): + assert ~s[i] == next(a1_iter) + + +def test_boolean_errors(): + a = intervalMembership(True, True) + raises(ValueError, lambda: a & 1) + raises(ValueError, lambda: a | 1) + raises(ValueError, lambda: a ^ 1) diff --git a/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_intervalmath.py b/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_intervalmath.py new file mode 100644 index 0000000000000000000000000000000000000000..e30f217a44b4ea795270c0e2c66b6813b05e63ea --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/intervalmath/tests/test_intervalmath.py @@ -0,0 +1,213 @@ +from sympy.plotting.intervalmath import interval +from sympy.testing.pytest import raises + + +def test_interval(): + assert (interval(1, 1) == interval(1, 1, is_valid=True)) == (True, True) + assert (interval(1, 1) == interval(1, 1, is_valid=False)) == (True, False) + assert (interval(1, 1) == interval(1, 1, is_valid=None)) == (True, None) + assert (interval(1, 1.5) == interval(1, 2)) == (None, True) + assert (interval(0, 1) == interval(2, 3)) == (False, True) + assert (interval(0, 1) == interval(1, 2)) == (None, True) + assert (interval(1, 2) != interval(1, 2)) == (False, True) + assert (interval(1, 3) != interval(2, 3)) == (None, True) + assert (interval(1, 3) != interval(-5, -3)) == (True, True) + assert ( + interval(1, 3, is_valid=False) != interval(-5, -3)) == (True, False) + assert (interval(1, 3, is_valid=None) != interval(-5, 3)) == (None, None) + assert (interval(4, 4) != 4) == (False, True) + assert (interval(1, 1) == 1) == (True, True) + assert (interval(1, 3, is_valid=False) == interval(1, 3)) == (True, False) + assert (interval(1, 3, is_valid=None) == interval(1, 3)) == (True, None) + inter = interval(-5, 5) + assert (interval(inter) == interval(-5, 5)) == (True, True) + assert inter.width == 10 + assert 0 in inter + assert -5 in inter + assert 5 in inter + assert interval(0, 3) in inter + assert interval(-6, 2) not in inter + assert -5.05 not in inter + assert 5.3 not in inter + interb = interval(-float('inf'), float('inf')) + assert 0 in inter + assert inter in interb + assert interval(0, float('inf')) in interb + assert interval(-float('inf'), 5) in interb + assert interval(-1e50, 1e50) in interb + assert ( + -interval(-1, -2, is_valid=False) == interval(1, 2)) == (True, False) + raises(ValueError, lambda: interval(1, 2, 3)) + + +def test_interval_add(): + assert (interval(1, 2) + interval(2, 3) == interval(3, 5)) == (True, True) + assert (1 + interval(1, 2) == interval(2, 3)) == (True, True) + assert (interval(1, 2) + 1 == interval(2, 3)) == (True, True) + compare = (1 + interval(0, float('inf')) == interval(1, float('inf'))) + assert compare == (True, True) + a = 1 + interval(2, 5, is_valid=False) + assert a.is_valid is False + a = 1 + interval(2, 5, is_valid=None) + assert a.is_valid is None + a = interval(2, 5, is_valid=False) + interval(3, 5, is_valid=None) + assert a.is_valid is False + a = interval(3, 5) + interval(-1, 1, is_valid=None) + assert a.is_valid is None + a = interval(2, 5, is_valid=False) + 1 + assert a.is_valid is False + + +def test_interval_sub(): + assert (interval(1, 2) - interval(1, 5) == interval(-4, 1)) == (True, True) + assert (interval(1, 2) - 1 == interval(0, 1)) == (True, True) + assert (1 - interval(1, 2) == interval(-1, 0)) == (True, True) + a = 1 - interval(1, 2, is_valid=False) + assert a.is_valid is False + a = interval(1, 4, is_valid=None) - 1 + assert a.is_valid is None + a = interval(1, 3, is_valid=False) - interval(1, 3) + assert a.is_valid is False + a = interval(1, 3, is_valid=None) - interval(1, 3) + assert a.is_valid is None + + +def test_interval_inequality(): + assert (interval(1, 2) < interval(3, 4)) == (True, True) + assert (interval(1, 2) < interval(2, 4)) == (None, True) + assert (interval(1, 2) < interval(-2, 0)) == (False, True) + assert (interval(1, 2) <= interval(2, 4)) == (True, True) + assert (interval(1, 2) <= interval(1.5, 6)) == (None, True) + assert (interval(2, 3) <= interval(1, 2)) == (None, True) + assert (interval(2, 3) <= interval(1, 1.5)) == (False, True) + assert ( + interval(1, 2, is_valid=False) <= interval(-2, 0)) == (False, False) + assert (interval(1, 2, is_valid=None) <= interval(-2, 0)) == (False, None) + assert (interval(1, 2) <= 1.5) == (None, True) + assert (interval(1, 2) <= 3) == (True, True) + assert (interval(1, 2) <= 0) == (False, True) + assert (interval(5, 8) > interval(2, 3)) == (True, True) + assert (interval(2, 5) > interval(1, 3)) == (None, True) + assert (interval(2, 3) > interval(3.1, 5)) == (False, True) + + assert (interval(-1, 1) == 0) == (None, True) + assert (interval(-1, 1) == 2) == (False, True) + assert (interval(-1, 1) != 0) == (None, True) + assert (interval(-1, 1) != 2) == (True, True) + + assert (interval(3, 5) > 2) == (True, True) + assert (interval(3, 5) < 2) == (False, True) + assert (interval(1, 5) < 2) == (None, True) + assert (interval(1, 5) > 2) == (None, True) + assert (interval(0, 1) > 2) == (False, True) + assert (interval(1, 2) >= interval(0, 1)) == (True, True) + assert (interval(1, 2) >= interval(0, 1.5)) == (None, True) + assert (interval(1, 2) >= interval(3, 4)) == (False, True) + assert (interval(1, 2) >= 0) == (True, True) + assert (interval(1, 2) >= 1.2) == (None, True) + assert (interval(1, 2) >= 3) == (False, True) + assert (2 > interval(0, 1)) == (True, True) + a = interval(-1, 1, is_valid=False) < interval(2, 5, is_valid=None) + assert a == (True, False) + a = interval(-1, 1, is_valid=None) < interval(2, 5, is_valid=False) + assert a == (True, False) + a = interval(-1, 1, is_valid=None) < interval(2, 5, is_valid=None) + assert a == (True, None) + a = interval(-1, 1, is_valid=False) > interval(-5, -2, is_valid=None) + assert a == (True, False) + a = interval(-1, 1, is_valid=None) > interval(-5, -2, is_valid=False) + assert a == (True, False) + a = interval(-1, 1, is_valid=None) > interval(-5, -2, is_valid=None) + assert a == (True, None) + + +def test_interval_mul(): + assert ( + interval(1, 5) * interval(2, 10) == interval(2, 50)) == (True, True) + a = interval(-1, 1) * interval(2, 10) == interval(-10, 10) + assert a == (True, True) + + a = interval(-1, 1) * interval(-5, 3) == interval(-5, 5) + assert a == (True, True) + + assert (interval(1, 3) * 2 == interval(2, 6)) == (True, True) + assert (3 * interval(-1, 2) == interval(-3, 6)) == (True, True) + + a = 3 * interval(1, 2, is_valid=False) + assert a.is_valid is False + + a = 3 * interval(1, 2, is_valid=None) + assert a.is_valid is None + + a = interval(1, 5, is_valid=False) * interval(1, 2, is_valid=None) + assert a.is_valid is False + + +def test_interval_div(): + div = interval(1, 2, is_valid=False) / 3 + assert div == interval(-float('inf'), float('inf'), is_valid=False) + + div = interval(1, 2, is_valid=None) / 3 + assert div == interval(-float('inf'), float('inf'), is_valid=None) + + div = 3 / interval(1, 2, is_valid=None) + assert div == interval(-float('inf'), float('inf'), is_valid=None) + a = interval(1, 2) / 0 + assert a.is_valid is False + a = interval(0.5, 1) / interval(-1, 0) + assert a.is_valid is None + a = interval(0, 1) / interval(0, 1) + assert a.is_valid is None + + a = interval(-1, 1) / interval(-1, 1) + assert a.is_valid is None + + a = interval(-1, 2) / interval(0.5, 1) == interval(-2.0, 4.0) + assert a == (True, True) + a = interval(0, 1) / interval(0.5, 1) == interval(0.0, 2.0) + assert a == (True, True) + a = interval(-1, 0) / interval(0.5, 1) == interval(-2.0, 0.0) + assert a == (True, True) + a = interval(-0.5, -0.25) / interval(0.5, 1) == interval(-1.0, -0.25) + assert a == (True, True) + a = interval(0.5, 1) / interval(0.5, 1) == interval(0.5, 2.0) + assert a == (True, True) + a = interval(0.5, 4) / interval(0.5, 1) == interval(0.5, 8.0) + assert a == (True, True) + a = interval(-1, -0.5) / interval(0.5, 1) == interval(-2.0, -0.5) + assert a == (True, True) + a = interval(-4, -0.5) / interval(0.5, 1) == interval(-8.0, -0.5) + assert a == (True, True) + a = interval(-1, 2) / interval(-2, -0.5) == interval(-4.0, 2.0) + assert a == (True, True) + a = interval(0, 1) / interval(-2, -0.5) == interval(-2.0, 0.0) + assert a == (True, True) + a = interval(-1, 0) / interval(-2, -0.5) == interval(0.0, 2.0) + assert a == (True, True) + a = interval(-0.5, -0.25) / interval(-2, -0.5) == interval(0.125, 1.0) + assert a == (True, True) + a = interval(0.5, 1) / interval(-2, -0.5) == interval(-2.0, -0.25) + assert a == (True, True) + a = interval(0.5, 4) / interval(-2, -0.5) == interval(-8.0, -0.25) + assert a == (True, True) + a = interval(-1, -0.5) / interval(-2, -0.5) == interval(0.25, 2.0) + assert a == (True, True) + a = interval(-4, -0.5) / interval(-2, -0.5) == interval(0.25, 8.0) + assert a == (True, True) + a = interval(-5, 5, is_valid=False) / 2 + assert a.is_valid is False + +def test_hashable(): + ''' + test that interval objects are hashable. + this is required in order to be able to put them into the cache, which + appears to be necessary for plotting in py3k. For details, see: + + https://github.com/sympy/sympy/pull/2101 + https://github.com/sympy/sympy/issues/6533 + ''' + hash(interval(1, 1)) + hash(interval(1, 1, is_valid=True)) + hash(interval(-4, -0.5)) + hash(interval(-2, -0.5)) + hash(interval(0.25, 8.0)) diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/__init__.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd86a505d8c4b8026bd91cde27d441e00223a8bc --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/__init__.py @@ -0,0 +1,138 @@ +"""Plotting module that can plot 2D and 3D functions +""" + +from sympy.utilities.decorator import doctest_depends_on + +@doctest_depends_on(modules=('pyglet',)) +def PygletPlot(*args, **kwargs): + """ + + Plot Examples + ============= + + See examples/advanced/pyglet_plotting.py for many more examples. + + >>> from sympy.plotting.pygletplot import PygletPlot as Plot + >>> from sympy.abc import x, y, z + + >>> Plot(x*y**3-y*x**3) + [0]: -x**3*y + x*y**3, 'mode=cartesian' + + >>> p = Plot() + >>> p[1] = x*y + >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4) + + >>> p = Plot() + >>> p[1] = x**2+y**2 + >>> p[2] = -x**2-y**2 + + + Variable Intervals + ================== + + The basic format is [var, min, max, steps], but the + syntax is flexible and arguments left out are taken + from the defaults for the current coordinate mode: + + >>> Plot(x**2) # implies [x,-5,5,100] + [0]: x**2, 'mode=cartesian' + + >>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40] + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100] + [0]: x**2 - y**2, 'mode=cartesian' + >>> Plot(x**2, [x,-13,13,100]) + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2, [-13,13]) # [x,-13,13,100] + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2, [x,-13,13]) # [x,-13,13,100] + [0]: x**2, 'mode=cartesian' + >>> Plot(1*x, [], [x], mode='cylindrical') + ... # [unbound_theta,0,2*Pi,40], [x,-1,1,20] + [0]: x, 'mode=cartesian' + + + Coordinate Modes + ================ + + Plot supports several curvilinear coordinate modes, and + they independent for each plotted function. You can specify + a coordinate mode explicitly with the 'mode' named argument, + but it can be automatically determined for Cartesian or + parametric plots, and therefore must only be specified for + polar, cylindrical, and spherical modes. + + Specifically, Plot(function arguments) and Plot[n] = + (function arguments) will interpret your arguments as a + Cartesian plot if you provide one function and a parametric + plot if you provide two or three functions. Similarly, the + arguments will be interpreted as a curve if one variable is + used, and a surface if two are used. + + Supported mode names by number of variables: + + 1: parametric, cartesian, polar + 2: parametric, cartesian, cylindrical = polar, spherical + + >>> Plot(1, mode='spherical') + + + Calculator-like Interface + ========================= + + >>> p = Plot(visible=False) + >>> f = x**2 + >>> p[1] = f + >>> p[2] = f.diff(x) + >>> p[3] = f.diff(x).diff(x) + >>> p + [1]: x**2, 'mode=cartesian' + [2]: 2*x, 'mode=cartesian' + [3]: 2, 'mode=cartesian' + >>> p.show() + >>> p.clear() + >>> p + + >>> p[1] = x**2+y**2 + >>> p[1].style = 'solid' + >>> p[2] = -x**2-y**2 + >>> p[2].style = 'wireframe' + >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4) + >>> p[1].style = 'both' + >>> p[2].style = 'both' + >>> p.close() + + + Plot Window Keyboard Controls + ============================= + + Screen Rotation: + X,Y axis Arrow Keys, A,S,D,W, Numpad 4,6,8,2 + Z axis Q,E, Numpad 7,9 + + Model Rotation: + Z axis Z,C, Numpad 1,3 + + Zoom: R,F, PgUp,PgDn, Numpad +,- + + Reset Camera: X, Numpad 5 + + Camera Presets: + XY F1 + XZ F2 + YZ F3 + Perspective F4 + + Sensitivity Modifier: SHIFT + + Axes Toggle: + Visible F5 + Colors F6 + + Close Window: ESCAPE + + ============================= + """ + + from sympy.plotting.pygletplot.plot import PygletPlot + return PygletPlot(*args, **kwargs) diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/__pycache__/__init__.cpython-39.pyc b/.venv/Lib/site-packages/sympy/plotting/pygletplot/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c5353affe1afe992184c8605965d2fc200814a4 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/pygletplot/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/color_scheme.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/color_scheme.py new file mode 100644 index 0000000000000000000000000000000000000000..613e777a7f45f54349c47d272aa6d1c157bcd117 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/color_scheme.py @@ -0,0 +1,336 @@ +from sympy.core.basic import Basic +from sympy.core.symbol import (Symbol, symbols) +from sympy.utilities.lambdify import lambdify +from .util import interpolate, rinterpolate, create_bounds, update_bounds +from sympy.utilities.iterables import sift + + +class ColorGradient: + colors = [0.4, 0.4, 0.4], [0.9, 0.9, 0.9] + intervals = 0.0, 1.0 + + def __init__(self, *args): + if len(args) == 2: + self.colors = list(args) + self.intervals = [0.0, 1.0] + elif len(args) > 0: + if len(args) % 2 != 0: + raise ValueError("len(args) should be even") + self.colors = [args[i] for i in range(1, len(args), 2)] + self.intervals = [args[i] for i in range(0, len(args), 2)] + assert len(self.colors) == len(self.intervals) + + def copy(self): + c = ColorGradient() + c.colors = [e[::] for e in self.colors] + c.intervals = self.intervals[::] + return c + + def _find_interval(self, v): + m = len(self.intervals) + i = 0 + while i < m - 1 and self.intervals[i] <= v: + i += 1 + return i + + def _interpolate_axis(self, axis, v): + i = self._find_interval(v) + v = rinterpolate(self.intervals[i - 1], self.intervals[i], v) + return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v) + + def __call__(self, r, g, b): + c = self._interpolate_axis + return c(0, r), c(1, g), c(2, b) + +default_color_schemes = {} # defined at the bottom of this file + + +class ColorScheme: + + def __init__(self, *args, **kwargs): + self.args = args + self.f, self.gradient = None, ColorGradient() + + if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]): + self.f = args[0] + elif len(args) == 1 and isinstance(args[0], str): + if args[0] in default_color_schemes: + cs = default_color_schemes[args[0]] + self.f, self.gradient = cs.f, cs.gradient.copy() + else: + self.f = lambdify('x,y,z,u,v', args[0]) + else: + self.f, self.gradient = self._interpret_args(args) + self._test_color_function() + if not isinstance(self.gradient, ColorGradient): + raise ValueError("Color gradient not properly initialized. " + "(Not a ColorGradient instance.)") + + def _interpret_args(self, args): + f, gradient = None, self.gradient + atoms, lists = self._sort_args(args) + s = self._pop_symbol_list(lists) + s = self._fill_in_vars(s) + + # prepare the error message for lambdification failure + f_str = ', '.join(str(fa) for fa in atoms) + s_str = (str(sa) for sa in s) + s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0) + f_error = ValueError("Could not interpret arguments " + "%s as functions of %s." % (f_str, s_str)) + + # try to lambdify args + if len(atoms) == 1: + fv = atoms[0] + try: + f = lambdify(s, [fv, fv, fv]) + except TypeError: + raise f_error + + elif len(atoms) == 3: + fr, fg, fb = atoms + try: + f = lambdify(s, [fr, fg, fb]) + except TypeError: + raise f_error + + else: + raise ValueError("A ColorScheme must provide 1 or 3 " + "functions in x, y, z, u, and/or v.") + + # try to intrepret any given color information + if len(lists) == 0: + gargs = [] + + elif len(lists) == 1: + gargs = lists[0] + + elif len(lists) == 2: + try: + (r1, g1, b1), (r2, g2, b2) = lists + except TypeError: + raise ValueError("If two color arguments are given, " + "they must be given in the format " + "(r1, g1, b1), (r2, g2, b2).") + gargs = lists + + elif len(lists) == 3: + try: + (r1, r2), (g1, g2), (b1, b2) = lists + except Exception: + raise ValueError("If three color arguments are given, " + "they must be given in the format " + "(r1, r2), (g1, g2), (b1, b2). To create " + "a multi-step gradient, use the syntax " + "[0, colorStart, step1, color1, ..., 1, " + "colorEnd].") + gargs = [[r1, g1, b1], [r2, g2, b2]] + + else: + raise ValueError("Don't know what to do with collection " + "arguments %s." % (', '.join(str(l) for l in lists))) + + if gargs: + try: + gradient = ColorGradient(*gargs) + except Exception as ex: + raise ValueError(("Could not initialize a gradient " + "with arguments %s. Inner " + "exception: %s") % (gargs, str(ex))) + + return f, gradient + + def _pop_symbol_list(self, lists): + symbol_lists = [] + for l in lists: + mark = True + for s in l: + if s is not None and not isinstance(s, Symbol): + mark = False + break + if mark: + lists.remove(l) + symbol_lists.append(l) + if len(symbol_lists) == 1: + return symbol_lists[0] + elif len(symbol_lists) == 0: + return [] + else: + raise ValueError("Only one list of Symbols " + "can be given for a color scheme.") + + def _fill_in_vars(self, args): + defaults = symbols('x,y,z,u,v') + v_error = ValueError("Could not find what to plot.") + if len(args) == 0: + return defaults + if not isinstance(args, (tuple, list)): + raise v_error + if len(args) == 0: + return defaults + for s in args: + if s is not None and not isinstance(s, Symbol): + raise v_error + # when vars are given explicitly, any vars + # not given are marked 'unbound' as to not + # be accidentally used in an expression + vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)] + # interpret as t + if len(args) == 1: + vars[3] = args[0] + # interpret as u,v + elif len(args) == 2: + if args[0] is not None: + vars[3] = args[0] + if args[1] is not None: + vars[4] = args[1] + # interpret as x,y,z + elif len(args) >= 3: + # allow some of x,y,z to be + # left unbound if not given + if args[0] is not None: + vars[0] = args[0] + if args[1] is not None: + vars[1] = args[1] + if args[2] is not None: + vars[2] = args[2] + # interpret the rest as t + if len(args) >= 4: + vars[3] = args[3] + # ...or u,v + if len(args) >= 5: + vars[4] = args[4] + return vars + + def _sort_args(self, args): + lists, atoms = sift(args, + lambda a: isinstance(a, (tuple, list)), binary=True) + return atoms, lists + + def _test_color_function(self): + if not callable(self.f): + raise ValueError("Color function is not callable.") + try: + result = self.f(0, 0, 0, 0, 0) + if len(result) != 3: + raise ValueError("length should be equal to 3") + except TypeError: + raise ValueError("Color function needs to accept x,y,z,u,v, " + "as arguments even if it doesn't use all of them.") + except AssertionError: + raise ValueError("Color function needs to return 3-tuple r,g,b.") + except Exception: + pass # color function probably not valid at 0,0,0,0,0 + + def __call__(self, x, y, z, u, v): + try: + return self.f(x, y, z, u, v) + except Exception: + return None + + def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None): + """ + Apply this color scheme to a + set of vertices over a single + independent variable u. + """ + bounds = create_bounds() + cverts = [] + if callable(set_len): + set_len(len(u_set)*2) + # calculate f() = r,g,b for each vert + # and find the min and max for r,g,b + for _u in range(len(u_set)): + if verts[_u] is None: + cverts.append(None) + else: + x, y, z = verts[_u] + u, v = u_set[_u], None + c = self(x, y, z, u, v) + if c is not None: + c = list(c) + update_bounds(bounds, c) + cverts.append(c) + if callable(inc_pos): + inc_pos() + # scale and apply gradient + for _u in range(len(u_set)): + if cverts[_u] is not None: + for _c in range(3): + # scale from [f_min, f_max] to [0,1] + cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1], + cverts[_u][_c]) + # apply gradient + cverts[_u] = self.gradient(*cverts[_u]) + if callable(inc_pos): + inc_pos() + return cverts + + def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None): + """ + Apply this color scheme to a + set of vertices over two + independent variables u and v. + """ + bounds = create_bounds() + cverts = [] + if callable(set_len): + set_len(len(u_set)*len(v_set)*2) + # calculate f() = r,g,b for each vert + # and find the min and max for r,g,b + for _u in range(len(u_set)): + column = [] + for _v in range(len(v_set)): + if verts[_u][_v] is None: + column.append(None) + else: + x, y, z = verts[_u][_v] + u, v = u_set[_u], v_set[_v] + c = self(x, y, z, u, v) + if c is not None: + c = list(c) + update_bounds(bounds, c) + column.append(c) + if callable(inc_pos): + inc_pos() + cverts.append(column) + # scale and apply gradient + for _u in range(len(u_set)): + for _v in range(len(v_set)): + if cverts[_u][_v] is not None: + # scale from [f_min, f_max] to [0,1] + for _c in range(3): + cverts[_u][_v][_c] = rinterpolate(bounds[_c][0], + bounds[_c][1], cverts[_u][_v][_c]) + # apply gradient + cverts[_u][_v] = self.gradient(*cverts[_u][_v]) + if callable(inc_pos): + inc_pos() + return cverts + + def str_base(self): + return ", ".join(str(a) for a in self.args) + + def __repr__(self): + return "%s" % (self.str_base()) + + +x, y, z, t, u, v = symbols('x,y,z,t,u,v') + +default_color_schemes['rainbow'] = ColorScheme(z, y, x) +default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97), + (0.97, 0.4, 0.4), (None, None, z)) +default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z), + [0.00, (0.2, 0.2, 1.0), + 0.35, (0.2, 0.8, 0.4), + 0.50, (0.3, 0.9, 0.3), + 0.65, (0.4, 0.8, 0.2), + 1.00, (1.0, 0.2, 0.2)]) + +default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z), + [0.0, (0.3, 0.3, 1.0), + 0.30, (0.3, 1.0, 0.3), + 0.55, (0.95, 1.0, 0.2), + 0.65, (1.0, 0.95, 0.2), + 0.85, (1.0, 0.7, 0.2), + 1.0, (1.0, 0.3, 0.2)]) diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/managed_window.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/managed_window.py new file mode 100644 index 0000000000000000000000000000000000000000..81fa2541b4dd9e13534aabfd2a11bf88c479daf8 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/managed_window.py @@ -0,0 +1,106 @@ +from pyglet.window import Window +from pyglet.clock import Clock + +from threading import Thread, Lock + +gl_lock = Lock() + + +class ManagedWindow(Window): + """ + A pyglet window with an event loop which executes automatically + in a separate thread. Behavior is added by creating a subclass + which overrides setup, update, and/or draw. + """ + fps_limit = 30 + default_win_args = {"width": 600, + "height": 500, + "vsync": False, + "resizable": True} + + def __init__(self, **win_args): + """ + It is best not to override this function in the child + class, unless you need to take additional arguments. + Do any OpenGL initialization calls in setup(). + """ + + # check if this is run from the doctester + if win_args.get('runfromdoctester', False): + return + + self.win_args = dict(self.default_win_args, **win_args) + self.Thread = Thread(target=self.__event_loop__) + self.Thread.start() + + def __event_loop__(self, **win_args): + """ + The event loop thread function. Do not override or call + directly (it is called by __init__). + """ + gl_lock.acquire() + try: + try: + super().__init__(**self.win_args) + self.switch_to() + self.setup() + except Exception as e: + print("Window initialization failed: %s" % (str(e))) + self.has_exit = True + finally: + gl_lock.release() + + clock = Clock() + clock.fps_limit = self.fps_limit + while not self.has_exit: + dt = clock.tick() + gl_lock.acquire() + try: + try: + self.switch_to() + self.dispatch_events() + self.clear() + self.update(dt) + self.draw() + self.flip() + except Exception as e: + print("Uncaught exception in event loop: %s" % str(e)) + self.has_exit = True + finally: + gl_lock.release() + super().close() + + def close(self): + """ + Closes the window. + """ + self.has_exit = True + + def setup(self): + """ + Called once before the event loop begins. + Override this method in a child class. This + is the best place to put things like OpenGL + initialization calls. + """ + pass + + def update(self, dt): + """ + Called before draw during each iteration of + the event loop. dt is the elapsed time in + seconds since the last update. OpenGL rendering + calls are best put in draw() rather than here. + """ + pass + + def draw(self): + """ + Called after update during each iteration of + the event loop. Put OpenGL rendering calls + here. + """ + pass + +if __name__ == '__main__': + ManagedWindow() diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot.py new file mode 100644 index 0000000000000000000000000000000000000000..8c3dd3c8d4ce6c660cc07f93a55029eef98e55a2 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot.py @@ -0,0 +1,464 @@ +from threading import RLock + +# it is sufficient to import "pyglet" here once +try: + import pyglet.gl as pgl +except ImportError: + raise ImportError("pyglet is required for plotting.\n " + "visit https://pyglet.org/") + +from sympy.core.numbers import Integer +from sympy.external.gmpy import SYMPY_INTS +from sympy.geometry.entity import GeometryEntity +from sympy.plotting.pygletplot.plot_axes import PlotAxes +from sympy.plotting.pygletplot.plot_mode import PlotMode +from sympy.plotting.pygletplot.plot_object import PlotObject +from sympy.plotting.pygletplot.plot_window import PlotWindow +from sympy.plotting.pygletplot.util import parse_option_string +from sympy.utilities.decorator import doctest_depends_on +from sympy.utilities.iterables import is_sequence + +from time import sleep +from os import getcwd, listdir + +import ctypes + +@doctest_depends_on(modules=('pyglet',)) +class PygletPlot: + """ + Plot Examples + ============= + + See examples/advanced/pyglet_plotting.py for many more examples. + + >>> from sympy.plotting.pygletplot import PygletPlot as Plot + >>> from sympy.abc import x, y, z + + >>> Plot(x*y**3-y*x**3) + [0]: -x**3*y + x*y**3, 'mode=cartesian' + + >>> p = Plot() + >>> p[1] = x*y + >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4) + + >>> p = Plot() + >>> p[1] = x**2+y**2 + >>> p[2] = -x**2-y**2 + + + Variable Intervals + ================== + + The basic format is [var, min, max, steps], but the + syntax is flexible and arguments left out are taken + from the defaults for the current coordinate mode: + + >>> Plot(x**2) # implies [x,-5,5,100] + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40] + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100] + [0]: x**2 - y**2, 'mode=cartesian' + >>> Plot(x**2, [x,-13,13,100]) + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2, [-13,13]) # [x,-13,13,100] + [0]: x**2, 'mode=cartesian' + >>> Plot(x**2, [x,-13,13]) # [x,-13,13,10] + [0]: x**2, 'mode=cartesian' + >>> Plot(1*x, [], [x], mode='cylindrical') + ... # [unbound_theta,0,2*Pi,40], [x,-1,1,20] + [0]: x, 'mode=cartesian' + + + Coordinate Modes + ================ + + Plot supports several curvilinear coordinate modes, and + they independent for each plotted function. You can specify + a coordinate mode explicitly with the 'mode' named argument, + but it can be automatically determined for Cartesian or + parametric plots, and therefore must only be specified for + polar, cylindrical, and spherical modes. + + Specifically, Plot(function arguments) and Plot[n] = + (function arguments) will interpret your arguments as a + Cartesian plot if you provide one function and a parametric + plot if you provide two or three functions. Similarly, the + arguments will be interpreted as a curve if one variable is + used, and a surface if two are used. + + Supported mode names by number of variables: + + 1: parametric, cartesian, polar + 2: parametric, cartesian, cylindrical = polar, spherical + + >>> Plot(1, mode='spherical') + + + Calculator-like Interface + ========================= + + >>> p = Plot(visible=False) + >>> f = x**2 + >>> p[1] = f + >>> p[2] = f.diff(x) + >>> p[3] = f.diff(x).diff(x) + >>> p + [1]: x**2, 'mode=cartesian' + [2]: 2*x, 'mode=cartesian' + [3]: 2, 'mode=cartesian' + >>> p.show() + >>> p.clear() + >>> p + + >>> p[1] = x**2+y**2 + >>> p[1].style = 'solid' + >>> p[2] = -x**2-y**2 + >>> p[2].style = 'wireframe' + >>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4) + >>> p[1].style = 'both' + >>> p[2].style = 'both' + >>> p.close() + + + Plot Window Keyboard Controls + ============================= + + Screen Rotation: + X,Y axis Arrow Keys, A,S,D,W, Numpad 4,6,8,2 + Z axis Q,E, Numpad 7,9 + + Model Rotation: + Z axis Z,C, Numpad 1,3 + + Zoom: R,F, PgUp,PgDn, Numpad +,- + + Reset Camera: X, Numpad 5 + + Camera Presets: + XY F1 + XZ F2 + YZ F3 + Perspective F4 + + Sensitivity Modifier: SHIFT + + Axes Toggle: + Visible F5 + Colors F6 + + Close Window: ESCAPE + + ============================= + + """ + + @doctest_depends_on(modules=('pyglet',)) + def __init__(self, *fargs, **win_args): + """ + Positional Arguments + ==================== + + Any given positional arguments are used to + initialize a plot function at index 1. In + other words... + + >>> from sympy.plotting.pygletplot import PygletPlot as Plot + >>> from sympy.abc import x + >>> p = Plot(x**2, visible=False) + + ...is equivalent to... + + >>> p = Plot(visible=False) + >>> p[1] = x**2 + + Note that in earlier versions of the plotting + module, you were able to specify multiple + functions in the initializer. This functionality + has been dropped in favor of better automatic + plot plot_mode detection. + + + Named Arguments + =============== + + axes + An option string of the form + "key1=value1; key2 = value2" which + can use the following options: + + style = ordinate + none OR frame OR box OR ordinate + + stride = 0.25 + val OR (val_x, val_y, val_z) + + overlay = True (draw on top of plot) + True OR False + + colored = False (False uses Black, + True uses colors + R,G,B = X,Y,Z) + True OR False + + label_axes = False (display axis names + at endpoints) + True OR False + + visible = True (show immediately + True OR False + + + The following named arguments are passed as + arguments to window initialization: + + antialiasing = True + True OR False + + ortho = False + True OR False + + invert_mouse_zoom = False + True OR False + + """ + # Register the plot modes + from . import plot_modes # noqa + + self._win_args = win_args + self._window = None + + self._render_lock = RLock() + + self._functions = {} + self._pobjects = [] + self._screenshot = ScreenShot(self) + + axe_options = parse_option_string(win_args.pop('axes', '')) + self.axes = PlotAxes(**axe_options) + self._pobjects.append(self.axes) + + self[0] = fargs + if win_args.get('visible', True): + self.show() + + ## Window Interfaces + + def show(self): + """ + Creates and displays a plot window, or activates it + (gives it focus) if it has already been created. + """ + if self._window and not self._window.has_exit: + self._window.activate() + else: + self._win_args['visible'] = True + self.axes.reset_resources() + + #if hasattr(self, '_doctest_depends_on'): + # self._win_args['runfromdoctester'] = True + + self._window = PlotWindow(self, **self._win_args) + + def close(self): + """ + Closes the plot window. + """ + if self._window: + self._window.close() + + def saveimage(self, outfile=None, format='', size=(600, 500)): + """ + Saves a screen capture of the plot window to an + image file. + + If outfile is given, it can either be a path + or a file object. Otherwise a png image will + be saved to the current working directory. + If the format is omitted, it is determined from + the filename extension. + """ + self._screenshot.save(outfile, format, size) + + ## Function List Interfaces + + def clear(self): + """ + Clears the function list of this plot. + """ + self._render_lock.acquire() + self._functions = {} + self.adjust_all_bounds() + self._render_lock.release() + + def __getitem__(self, i): + """ + Returns the function at position i in the + function list. + """ + return self._functions[i] + + def __setitem__(self, i, args): + """ + Parses and adds a PlotMode to the function + list. + """ + if not (isinstance(i, (SYMPY_INTS, Integer)) and i >= 0): + raise ValueError("Function index must " + "be an integer >= 0.") + + if isinstance(args, PlotObject): + f = args + else: + if (not is_sequence(args)) or isinstance(args, GeometryEntity): + args = [args] + if len(args) == 0: + return # no arguments given + kwargs = {"bounds_callback": self.adjust_all_bounds} + f = PlotMode(*args, **kwargs) + + if f: + self._render_lock.acquire() + self._functions[i] = f + self._render_lock.release() + else: + raise ValueError("Failed to parse '%s'." + % ', '.join(str(a) for a in args)) + + def __delitem__(self, i): + """ + Removes the function in the function list at + position i. + """ + self._render_lock.acquire() + del self._functions[i] + self.adjust_all_bounds() + self._render_lock.release() + + def firstavailableindex(self): + """ + Returns the first unused index in the function list. + """ + i = 0 + self._render_lock.acquire() + while i in self._functions: + i += 1 + self._render_lock.release() + return i + + def append(self, *args): + """ + Parses and adds a PlotMode to the function + list at the first available index. + """ + self.__setitem__(self.firstavailableindex(), args) + + def __len__(self): + """ + Returns the number of functions in the function list. + """ + return len(self._functions) + + def __iter__(self): + """ + Allows iteration of the function list. + """ + return self._functions.itervalues() + + def __repr__(self): + return str(self) + + def __str__(self): + """ + Returns a string containing a new-line separated + list of the functions in the function list. + """ + s = "" + if len(self._functions) == 0: + s += "" + else: + self._render_lock.acquire() + s += "\n".join(["%s[%i]: %s" % ("", i, str(self._functions[i])) + for i in self._functions]) + self._render_lock.release() + return s + + def adjust_all_bounds(self): + self._render_lock.acquire() + self.axes.reset_bounding_box() + for f in self._functions: + self.axes.adjust_bounds(self._functions[f].bounds) + self._render_lock.release() + + def wait_for_calculations(self): + sleep(0) + self._render_lock.acquire() + for f in self._functions: + a = self._functions[f]._get_calculating_verts + b = self._functions[f]._get_calculating_cverts + while a() or b(): + sleep(0) + self._render_lock.release() + +class ScreenShot: + def __init__(self, plot): + self._plot = plot + self.screenshot_requested = False + self.outfile = None + self.format = '' + self.invisibleMode = False + self.flag = 0 + + def __bool__(self): + return self.screenshot_requested + + def _execute_saving(self): + if self.flag < 3: + self.flag += 1 + return + + size_x, size_y = self._plot._window.get_size() + size = size_x*size_y*4*ctypes.sizeof(ctypes.c_ubyte) + image = ctypes.create_string_buffer(size) + pgl.glReadPixels(0, 0, size_x, size_y, pgl.GL_RGBA, pgl.GL_UNSIGNED_BYTE, image) + from PIL import Image + im = Image.frombuffer('RGBA', (size_x, size_y), + image.raw, 'raw', 'RGBA', 0, 1) + im.transpose(Image.FLIP_TOP_BOTTOM).save(self.outfile, self.format) + + self.flag = 0 + self.screenshot_requested = False + if self.invisibleMode: + self._plot._window.close() + + def save(self, outfile=None, format='', size=(600, 500)): + self.outfile = outfile + self.format = format + self.size = size + self.screenshot_requested = True + + if not self._plot._window or self._plot._window.has_exit: + self._plot._win_args['visible'] = False + + self._plot._win_args['width'] = size[0] + self._plot._win_args['height'] = size[1] + + self._plot.axes.reset_resources() + self._plot._window = PlotWindow(self._plot, **self._plot._win_args) + self.invisibleMode = True + + if self.outfile is None: + self.outfile = self._create_unique_path() + print(self.outfile) + + def _create_unique_path(self): + cwd = getcwd() + l = listdir(cwd) + path = '' + i = 0 + while True: + if not 'plot_%s.png' % i in l: + path = cwd + '/plot_%s.png' % i + break + i += 1 + return path diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_axes.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_axes.py new file mode 100644 index 0000000000000000000000000000000000000000..ae26fb0b2fa64e7f7318c51ce3fe5afaa276b48e --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_axes.py @@ -0,0 +1,251 @@ +import pyglet.gl as pgl +from pyglet import font + +from sympy.core import S +from sympy.plotting.pygletplot.plot_object import PlotObject +from sympy.plotting.pygletplot.util import billboard_matrix, dot_product, \ + get_direction_vectors, strided_range, vec_mag, vec_sub +from sympy.utilities.iterables import is_sequence + + +class PlotAxes(PlotObject): + + def __init__(self, *args, + style='', none=None, frame=None, box=None, ordinate=None, + stride=0.25, + visible='', overlay='', colored='', label_axes='', label_ticks='', + tick_length=0.1, + font_face='Arial', font_size=28, + **kwargs): + # initialize style parameter + style = style.lower() + + # allow alias kwargs to override style kwarg + if none is not None: + style = 'none' + if frame is not None: + style = 'frame' + if box is not None: + style = 'box' + if ordinate is not None: + style = 'ordinate' + + if style in ['', 'ordinate']: + self._render_object = PlotAxesOrdinate(self) + elif style in ['frame', 'box']: + self._render_object = PlotAxesFrame(self) + elif style in ['none']: + self._render_object = None + else: + raise ValueError(("Unrecognized axes style %s.") % (style)) + + # initialize stride parameter + try: + stride = eval(stride) + except TypeError: + pass + if is_sequence(stride): + if len(stride) != 3: + raise ValueError("length should be equal to 3") + self._stride = stride + else: + self._stride = [stride, stride, stride] + self._tick_length = float(tick_length) + + # setup bounding box and ticks + self._origin = [0, 0, 0] + self.reset_bounding_box() + + def flexible_boolean(input, default): + if input in [True, False]: + return input + if input in ('f', 'F', 'false', 'False'): + return False + if input in ('t', 'T', 'true', 'True'): + return True + return default + + # initialize remaining parameters + self.visible = flexible_boolean(kwargs, True) + self._overlay = flexible_boolean(overlay, True) + self._colored = flexible_boolean(colored, False) + self._label_axes = flexible_boolean(label_axes, False) + self._label_ticks = flexible_boolean(label_ticks, True) + + # setup label font + self.font_face = font_face + self.font_size = font_size + + # this is also used to reinit the + # font on window close/reopen + self.reset_resources() + + def reset_resources(self): + self.label_font = None + + def reset_bounding_box(self): + self._bounding_box = [[None, None], [None, None], [None, None]] + self._axis_ticks = [[], [], []] + + def draw(self): + if self._render_object: + pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT | pgl.GL_DEPTH_BUFFER_BIT) + if self._overlay: + pgl.glDisable(pgl.GL_DEPTH_TEST) + self._render_object.draw() + pgl.glPopAttrib() + + def adjust_bounds(self, child_bounds): + b = self._bounding_box + c = child_bounds + for i in range(3): + if abs(c[i][0]) is S.Infinity or abs(c[i][1]) is S.Infinity: + continue + b[i][0] = c[i][0] if b[i][0] is None else min([b[i][0], c[i][0]]) + b[i][1] = c[i][1] if b[i][1] is None else max([b[i][1], c[i][1]]) + self._bounding_box = b + self._recalculate_axis_ticks(i) + + def _recalculate_axis_ticks(self, axis): + b = self._bounding_box + if b[axis][0] is None or b[axis][1] is None: + self._axis_ticks[axis] = [] + else: + self._axis_ticks[axis] = strided_range(b[axis][0], b[axis][1], + self._stride[axis]) + + def toggle_visible(self): + self.visible = not self.visible + + def toggle_colors(self): + self._colored = not self._colored + + +class PlotAxesBase(PlotObject): + + def __init__(self, parent_axes): + self._p = parent_axes + + def draw(self): + color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]), + ([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9])][self._p._colored] + self.draw_background(color) + self.draw_axis(2, color[2]) + self.draw_axis(1, color[1]) + self.draw_axis(0, color[0]) + + def draw_background(self, color): + pass # optional + + def draw_axis(self, axis, color): + raise NotImplementedError() + + def draw_text(self, text, position, color, scale=1.0): + if len(color) == 3: + color = (color[0], color[1], color[2], 1.0) + + if self._p.label_font is None: + self._p.label_font = font.load(self._p.font_face, + self._p.font_size, + bold=True, italic=False) + + label = font.Text(self._p.label_font, text, + color=color, + valign=font.Text.BASELINE, + halign=font.Text.CENTER) + + pgl.glPushMatrix() + pgl.glTranslatef(*position) + billboard_matrix() + scale_factor = 0.005 * scale + pgl.glScalef(scale_factor, scale_factor, scale_factor) + pgl.glColor4f(0, 0, 0, 0) + label.draw() + pgl.glPopMatrix() + + def draw_line(self, v, color): + o = self._p._origin + pgl.glBegin(pgl.GL_LINES) + pgl.glColor3f(*color) + pgl.glVertex3f(v[0][0] + o[0], v[0][1] + o[1], v[0][2] + o[2]) + pgl.glVertex3f(v[1][0] + o[0], v[1][1] + o[1], v[1][2] + o[2]) + pgl.glEnd() + + +class PlotAxesOrdinate(PlotAxesBase): + + def __init__(self, parent_axes): + super().__init__(parent_axes) + + def draw_axis(self, axis, color): + ticks = self._p._axis_ticks[axis] + radius = self._p._tick_length / 2.0 + if len(ticks) < 2: + return + + # calculate the vector for this axis + axis_lines = [[0, 0, 0], [0, 0, 0]] + axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1] + axis_vector = vec_sub(axis_lines[1], axis_lines[0]) + + # calculate angle to the z direction vector + pos_z = get_direction_vectors()[2] + d = abs(dot_product(axis_vector, pos_z)) + d = d / vec_mag(axis_vector) + + # don't draw labels if we're looking down the axis + labels_visible = abs(d - 1.0) > 0.02 + + # draw the ticks and labels + for tick in ticks: + self.draw_tick_line(axis, color, radius, tick, labels_visible) + + # draw the axis line and labels + self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible) + + def draw_axis_line(self, axis, color, a_min, a_max, labels_visible): + axis_line = [[0, 0, 0], [0, 0, 0]] + axis_line[0][axis], axis_line[1][axis] = a_min, a_max + self.draw_line(axis_line, color) + if labels_visible: + self.draw_axis_line_labels(axis, color, axis_line) + + def draw_axis_line_labels(self, axis, color, axis_line): + if not self._p._label_axes: + return + axis_labels = [axis_line[0][::], axis_line[1][::]] + axis_labels[0][axis] -= 0.3 + axis_labels[1][axis] += 0.3 + a_str = ['X', 'Y', 'Z'][axis] + self.draw_text("-" + a_str, axis_labels[0], color) + self.draw_text("+" + a_str, axis_labels[1], color) + + def draw_tick_line(self, axis, color, radius, tick, labels_visible): + tick_axis = {0: 1, 1: 0, 2: 1}[axis] + tick_line = [[0, 0, 0], [0, 0, 0]] + tick_line[0][axis] = tick_line[1][axis] = tick + tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius + self.draw_line(tick_line, color) + if labels_visible: + self.draw_tick_line_label(axis, color, radius, tick) + + def draw_tick_line_label(self, axis, color, radius, tick): + if not self._p._label_axes: + return + tick_label_vector = [0, 0, 0] + tick_label_vector[axis] = tick + tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1, 1, 1][ + axis] * radius * 3.5 + self.draw_text(str(tick), tick_label_vector, color, scale=0.5) + + +class PlotAxesFrame(PlotAxesBase): + + def __init__(self, parent_axes): + super().__init__(parent_axes) + + def draw_background(self, color): + pass + + def draw_axis(self, axis, color): + raise NotImplementedError() diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_camera.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_camera.py new file mode 100644 index 0000000000000000000000000000000000000000..43598debac252ffd22beb8690fef30745259c634 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_camera.py @@ -0,0 +1,124 @@ +import pyglet.gl as pgl +from sympy.plotting.pygletplot.plot_rotation import get_spherical_rotatation +from sympy.plotting.pygletplot.util import get_model_matrix, model_to_screen, \ + screen_to_model, vec_subs + + +class PlotCamera: + + min_dist = 0.05 + max_dist = 500.0 + + min_ortho_dist = 100.0 + max_ortho_dist = 10000.0 + + _default_dist = 6.0 + _default_ortho_dist = 600.0 + + rot_presets = { + 'xy': (0, 0, 0), + 'xz': (-90, 0, 0), + 'yz': (0, 90, 0), + 'perspective': (-45, 0, -45) + } + + def __init__(self, window, ortho=False): + self.window = window + self.axes = self.window.plot.axes + self.ortho = ortho + self.reset() + + def init_rot_matrix(self): + pgl.glPushMatrix() + pgl.glLoadIdentity() + self._rot = get_model_matrix() + pgl.glPopMatrix() + + def set_rot_preset(self, preset_name): + self.init_rot_matrix() + if preset_name not in self.rot_presets: + raise ValueError( + "%s is not a valid rotation preset." % preset_name) + r = self.rot_presets[preset_name] + self.euler_rotate(r[0], 1, 0, 0) + self.euler_rotate(r[1], 0, 1, 0) + self.euler_rotate(r[2], 0, 0, 1) + + def reset(self): + self._dist = 0.0 + self._x, self._y = 0.0, 0.0 + self._rot = None + if self.ortho: + self._dist = self._default_ortho_dist + else: + self._dist = self._default_dist + self.init_rot_matrix() + + def mult_rot_matrix(self, rot): + pgl.glPushMatrix() + pgl.glLoadMatrixf(rot) + pgl.glMultMatrixf(self._rot) + self._rot = get_model_matrix() + pgl.glPopMatrix() + + def setup_projection(self): + pgl.glMatrixMode(pgl.GL_PROJECTION) + pgl.glLoadIdentity() + if self.ortho: + # yep, this is pseudo ortho (don't tell anyone) + pgl.gluPerspective( + 0.3, float(self.window.width)/float(self.window.height), + self.min_ortho_dist - 0.01, self.max_ortho_dist + 0.01) + else: + pgl.gluPerspective( + 30.0, float(self.window.width)/float(self.window.height), + self.min_dist - 0.01, self.max_dist + 0.01) + pgl.glMatrixMode(pgl.GL_MODELVIEW) + + def _get_scale(self): + return 1.0, 1.0, 1.0 + + def apply_transformation(self): + pgl.glLoadIdentity() + pgl.glTranslatef(self._x, self._y, -self._dist) + if self._rot is not None: + pgl.glMultMatrixf(self._rot) + pgl.glScalef(*self._get_scale()) + + def spherical_rotate(self, p1, p2, sensitivity=1.0): + mat = get_spherical_rotatation(p1, p2, self.window.width, + self.window.height, sensitivity) + if mat is not None: + self.mult_rot_matrix(mat) + + def euler_rotate(self, angle, x, y, z): + pgl.glPushMatrix() + pgl.glLoadMatrixf(self._rot) + pgl.glRotatef(angle, x, y, z) + self._rot = get_model_matrix() + pgl.glPopMatrix() + + def zoom_relative(self, clicks, sensitivity): + + if self.ortho: + dist_d = clicks * sensitivity * 50.0 + min_dist = self.min_ortho_dist + max_dist = self.max_ortho_dist + else: + dist_d = clicks * sensitivity + min_dist = self.min_dist + max_dist = self.max_dist + + new_dist = (self._dist - dist_d) + if (clicks < 0 and new_dist < max_dist) or new_dist > min_dist: + self._dist = new_dist + + def mouse_translate(self, x, y, dx, dy): + pgl.glPushMatrix() + pgl.glLoadIdentity() + pgl.glTranslatef(0, 0, -self._dist) + z = model_to_screen(0, 0, 0)[2] + d = vec_subs(screen_to_model(x, y, z), screen_to_model(x - dx, y - dy, z)) + pgl.glPopMatrix() + self._x += d[0] + self._y += d[1] diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_controller.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_controller.py new file mode 100644 index 0000000000000000000000000000000000000000..aa7e01e6fd17fddf07b733442208a0a4c9d87d5b --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_controller.py @@ -0,0 +1,218 @@ +from pyglet.window import key +from pyglet.window.mouse import LEFT, RIGHT, MIDDLE +from sympy.plotting.pygletplot.util import get_direction_vectors, get_basis_vectors + + +class PlotController: + + normal_mouse_sensitivity = 4.0 + modified_mouse_sensitivity = 1.0 + + normal_key_sensitivity = 160.0 + modified_key_sensitivity = 40.0 + + keymap = { + key.LEFT: 'left', + key.A: 'left', + key.NUM_4: 'left', + + key.RIGHT: 'right', + key.D: 'right', + key.NUM_6: 'right', + + key.UP: 'up', + key.W: 'up', + key.NUM_8: 'up', + + key.DOWN: 'down', + key.S: 'down', + key.NUM_2: 'down', + + key.Z: 'rotate_z_neg', + key.NUM_1: 'rotate_z_neg', + + key.C: 'rotate_z_pos', + key.NUM_3: 'rotate_z_pos', + + key.Q: 'spin_left', + key.NUM_7: 'spin_left', + key.E: 'spin_right', + key.NUM_9: 'spin_right', + + key.X: 'reset_camera', + key.NUM_5: 'reset_camera', + + key.NUM_ADD: 'zoom_in', + key.PAGEUP: 'zoom_in', + key.R: 'zoom_in', + + key.NUM_SUBTRACT: 'zoom_out', + key.PAGEDOWN: 'zoom_out', + key.F: 'zoom_out', + + key.RSHIFT: 'modify_sensitivity', + key.LSHIFT: 'modify_sensitivity', + + key.F1: 'rot_preset_xy', + key.F2: 'rot_preset_xz', + key.F3: 'rot_preset_yz', + key.F4: 'rot_preset_perspective', + + key.F5: 'toggle_axes', + key.F6: 'toggle_axe_colors', + + key.F8: 'save_image' + } + + def __init__(self, window, *, invert_mouse_zoom=False, **kwargs): + self.invert_mouse_zoom = invert_mouse_zoom + self.window = window + self.camera = window.camera + self.action = { + # Rotation around the view Y (up) vector + 'left': False, + 'right': False, + # Rotation around the view X vector + 'up': False, + 'down': False, + # Rotation around the view Z vector + 'spin_left': False, + 'spin_right': False, + # Rotation around the model Z vector + 'rotate_z_neg': False, + 'rotate_z_pos': False, + # Reset to the default rotation + 'reset_camera': False, + # Performs camera z-translation + 'zoom_in': False, + 'zoom_out': False, + # Use alternative sensitivity (speed) + 'modify_sensitivity': False, + # Rotation presets + 'rot_preset_xy': False, + 'rot_preset_xz': False, + 'rot_preset_yz': False, + 'rot_preset_perspective': False, + # axes + 'toggle_axes': False, + 'toggle_axe_colors': False, + # screenshot + 'save_image': False + } + + def update(self, dt): + z = 0 + if self.action['zoom_out']: + z -= 1 + if self.action['zoom_in']: + z += 1 + if z != 0: + self.camera.zoom_relative(z/10.0, self.get_key_sensitivity()/10.0) + + dx, dy, dz = 0, 0, 0 + if self.action['left']: + dx -= 1 + if self.action['right']: + dx += 1 + if self.action['up']: + dy -= 1 + if self.action['down']: + dy += 1 + if self.action['spin_left']: + dz += 1 + if self.action['spin_right']: + dz -= 1 + + if not self.is_2D(): + if dx != 0: + self.camera.euler_rotate(dx*dt*self.get_key_sensitivity(), + *(get_direction_vectors()[1])) + if dy != 0: + self.camera.euler_rotate(dy*dt*self.get_key_sensitivity(), + *(get_direction_vectors()[0])) + if dz != 0: + self.camera.euler_rotate(dz*dt*self.get_key_sensitivity(), + *(get_direction_vectors()[2])) + else: + self.camera.mouse_translate(0, 0, dx*dt*self.get_key_sensitivity(), + -dy*dt*self.get_key_sensitivity()) + + rz = 0 + if self.action['rotate_z_neg'] and not self.is_2D(): + rz -= 1 + if self.action['rotate_z_pos'] and not self.is_2D(): + rz += 1 + + if rz != 0: + self.camera.euler_rotate(rz*dt*self.get_key_sensitivity(), + *(get_basis_vectors()[2])) + + if self.action['reset_camera']: + self.camera.reset() + + if self.action['rot_preset_xy']: + self.camera.set_rot_preset('xy') + if self.action['rot_preset_xz']: + self.camera.set_rot_preset('xz') + if self.action['rot_preset_yz']: + self.camera.set_rot_preset('yz') + if self.action['rot_preset_perspective']: + self.camera.set_rot_preset('perspective') + + if self.action['toggle_axes']: + self.action['toggle_axes'] = False + self.camera.axes.toggle_visible() + + if self.action['toggle_axe_colors']: + self.action['toggle_axe_colors'] = False + self.camera.axes.toggle_colors() + + if self.action['save_image']: + self.action['save_image'] = False + self.window.plot.saveimage() + + return True + + def get_mouse_sensitivity(self): + if self.action['modify_sensitivity']: + return self.modified_mouse_sensitivity + else: + return self.normal_mouse_sensitivity + + def get_key_sensitivity(self): + if self.action['modify_sensitivity']: + return self.modified_key_sensitivity + else: + return self.normal_key_sensitivity + + def on_key_press(self, symbol, modifiers): + if symbol in self.keymap: + self.action[self.keymap[symbol]] = True + + def on_key_release(self, symbol, modifiers): + if symbol in self.keymap: + self.action[self.keymap[symbol]] = False + + def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers): + if buttons & LEFT: + if self.is_2D(): + self.camera.mouse_translate(x, y, dx, dy) + else: + self.camera.spherical_rotate((x - dx, y - dy), (x, y), + self.get_mouse_sensitivity()) + if buttons & MIDDLE: + self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy, + self.get_mouse_sensitivity()/20.0) + if buttons & RIGHT: + self.camera.mouse_translate(x, y, dx, dy) + + def on_mouse_scroll(self, x, y, dx, dy): + self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy, + self.get_mouse_sensitivity()) + + def is_2D(self): + functions = self.window.plot._functions + for i in functions: + if len(functions[i].i_vars) > 1 or len(functions[i].d_vars) > 2: + return False + return True diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_curve.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_curve.py new file mode 100644 index 0000000000000000000000000000000000000000..6b97dac843f58c76694d424f0b0b7e3499ba5202 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_curve.py @@ -0,0 +1,82 @@ +import pyglet.gl as pgl +from sympy.core import S +from sympy.plotting.pygletplot.plot_mode_base import PlotModeBase + + +class PlotCurve(PlotModeBase): + + style_override = 'wireframe' + + def _on_calculate_verts(self): + self.t_interval = self.intervals[0] + self.t_set = list(self.t_interval.frange()) + self.bounds = [[S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0]] + evaluate = self._get_evaluator() + + self._calculating_verts_pos = 0.0 + self._calculating_verts_len = float(self.t_interval.v_len) + + self.verts = [] + b = self.bounds + for t in self.t_set: + try: + _e = evaluate(t) # calculate vertex + except (NameError, ZeroDivisionError): + _e = None + if _e is not None: # update bounding box + for axis in range(3): + b[axis][0] = min([b[axis][0], _e[axis]]) + b[axis][1] = max([b[axis][1], _e[axis]]) + self.verts.append(_e) + self._calculating_verts_pos += 1.0 + + for axis in range(3): + b[axis][2] = b[axis][1] - b[axis][0] + if b[axis][2] == 0.0: + b[axis][2] = 1.0 + + self.push_wireframe(self.draw_verts(False)) + + def _on_calculate_cverts(self): + if not self.verts or not self.color: + return + + def set_work_len(n): + self._calculating_cverts_len = float(n) + + def inc_work_pos(): + self._calculating_cverts_pos += 1.0 + set_work_len(1) + self._calculating_cverts_pos = 0 + self.cverts = self.color.apply_to_curve(self.verts, + self.t_set, + set_len=set_work_len, + inc_pos=inc_work_pos) + self.push_wireframe(self.draw_verts(True)) + + def calculate_one_cvert(self, t): + vert = self.verts[t] + return self.color(vert[0], vert[1], vert[2], + self.t_set[t], None) + + def draw_verts(self, use_cverts): + def f(): + pgl.glBegin(pgl.GL_LINE_STRIP) + for t in range(len(self.t_set)): + p = self.verts[t] + if p is None: + pgl.glEnd() + pgl.glBegin(pgl.GL_LINE_STRIP) + continue + if use_cverts: + c = self.cverts[t] + if c is None: + c = (0, 0, 0) + pgl.glColor3f(*c) + else: + pgl.glColor3f(*self.default_wireframe_color) + pgl.glVertex3f(*p) + pgl.glEnd() + return f diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_interval.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..085ab096915bbc4a3761b71736b4dd14f1ff779f --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_interval.py @@ -0,0 +1,181 @@ +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.core.sympify import sympify +from sympy.core.numbers import Integer + + +class PlotInterval: + """ + """ + _v, _v_min, _v_max, _v_steps = None, None, None, None + + def require_all_args(f): + def check(self, *args, **kwargs): + for g in [self._v, self._v_min, self._v_max, self._v_steps]: + if g is None: + raise ValueError("PlotInterval is incomplete.") + return f(self, *args, **kwargs) + return check + + def __init__(self, *args): + if len(args) == 1: + if isinstance(args[0], PlotInterval): + self.fill_from(args[0]) + return + elif isinstance(args[0], str): + try: + args = eval(args[0]) + except TypeError: + s_eval_error = "Could not interpret string %s." + raise ValueError(s_eval_error % (args[0])) + elif isinstance(args[0], (tuple, list)): + args = args[0] + else: + raise ValueError("Not an interval.") + if not isinstance(args, (tuple, list)) or len(args) > 4: + f_error = "PlotInterval must be a tuple or list of length 4 or less." + raise ValueError(f_error) + + args = list(args) + if len(args) > 0 and (args[0] is None or isinstance(args[0], Symbol)): + self.v = args.pop(0) + if len(args) in [2, 3]: + self.v_min = args.pop(0) + self.v_max = args.pop(0) + if len(args) == 1: + self.v_steps = args.pop(0) + elif len(args) == 1: + self.v_steps = args.pop(0) + + def get_v(self): + return self._v + + def set_v(self, v): + if v is None: + self._v = None + return + if not isinstance(v, Symbol): + raise ValueError("v must be a SymPy Symbol.") + self._v = v + + def get_v_min(self): + return self._v_min + + def set_v_min(self, v_min): + if v_min is None: + self._v_min = None + return + try: + self._v_min = sympify(v_min) + float(self._v_min.evalf()) + except TypeError: + raise ValueError("v_min could not be interpreted as a number.") + + def get_v_max(self): + return self._v_max + + def set_v_max(self, v_max): + if v_max is None: + self._v_max = None + return + try: + self._v_max = sympify(v_max) + float(self._v_max.evalf()) + except TypeError: + raise ValueError("v_max could not be interpreted as a number.") + + def get_v_steps(self): + return self._v_steps + + def set_v_steps(self, v_steps): + if v_steps is None: + self._v_steps = None + return + if isinstance(v_steps, int): + v_steps = Integer(v_steps) + elif not isinstance(v_steps, Integer): + raise ValueError("v_steps must be an int or SymPy Integer.") + if v_steps <= S.Zero: + raise ValueError("v_steps must be positive.") + self._v_steps = v_steps + + @require_all_args + def get_v_len(self): + return self.v_steps + 1 + + v = property(get_v, set_v) + v_min = property(get_v_min, set_v_min) + v_max = property(get_v_max, set_v_max) + v_steps = property(get_v_steps, set_v_steps) + v_len = property(get_v_len) + + def fill_from(self, b): + if b.v is not None: + self.v = b.v + if b.v_min is not None: + self.v_min = b.v_min + if b.v_max is not None: + self.v_max = b.v_max + if b.v_steps is not None: + self.v_steps = b.v_steps + + @staticmethod + def try_parse(*args): + """ + Returns a PlotInterval if args can be interpreted + as such, otherwise None. + """ + if len(args) == 1 and isinstance(args[0], PlotInterval): + return args[0] + try: + return PlotInterval(*args) + except ValueError: + return None + + def _str_base(self): + return ",".join([str(self.v), str(self.v_min), + str(self.v_max), str(self.v_steps)]) + + def __repr__(self): + """ + A string representing the interval in class constructor form. + """ + return "PlotInterval(%s)" % (self._str_base()) + + def __str__(self): + """ + A string representing the interval in list form. + """ + return "[%s]" % (self._str_base()) + + @require_all_args + def assert_complete(self): + pass + + @require_all_args + def vrange(self): + """ + Yields v_steps+1 SymPy numbers ranging from + v_min to v_max. + """ + d = (self.v_max - self.v_min) / self.v_steps + for i in range(self.v_steps + 1): + a = self.v_min + (d * Integer(i)) + yield a + + @require_all_args + def vrange2(self): + """ + Yields v_steps pairs of SymPy numbers ranging from + (v_min, v_min + step) to (v_max - step, v_max). + """ + d = (self.v_max - self.v_min) / self.v_steps + a = self.v_min + (d * S.Zero) + for i in range(self.v_steps): + b = self.v_min + (d * Integer(i + 1)) + yield a, b + a = b + + def frange(self): + for i in self.vrange(): + yield float(i.evalf()) diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_mode.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ee00db9177b98b3259438949836fe5b69416c2 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_mode.py @@ -0,0 +1,400 @@ +from .plot_interval import PlotInterval +from .plot_object import PlotObject +from .util import parse_option_string +from sympy.core.symbol import Symbol +from sympy.core.sympify import sympify +from sympy.geometry.entity import GeometryEntity +from sympy.utilities.iterables import is_sequence + + +class PlotMode(PlotObject): + """ + Grandparent class for plotting + modes. Serves as interface for + registration, lookup, and init + of modes. + + To create a new plot mode, + inherit from PlotModeBase + or one of its children, such + as PlotSurface or PlotCurve. + """ + + ## Class-level attributes + ## used to register and lookup + ## plot modes. See PlotModeBase + ## for descriptions and usage. + + i_vars, d_vars = '', '' + intervals = [] + aliases = [] + is_default = False + + ## Draw is the only method here which + ## is meant to be overridden in child + ## classes, and PlotModeBase provides + ## a base implementation. + def draw(self): + raise NotImplementedError() + + ## Everything else in this file has to + ## do with registration and retrieval + ## of plot modes. This is where I've + ## hidden much of the ugliness of automatic + ## plot mode divination... + + ## Plot mode registry data structures + _mode_alias_list = [] + _mode_map = { + 1: {1: {}, 2: {}}, + 2: {1: {}, 2: {}}, + 3: {1: {}, 2: {}}, + } # [d][i][alias_str]: class + _mode_default_map = { + 1: {}, + 2: {}, + 3: {}, + } # [d][i]: class + _i_var_max, _d_var_max = 2, 3 + + def __new__(cls, *args, **kwargs): + """ + This is the function which interprets + arguments given to Plot.__init__ and + Plot.__setattr__. Returns an initialized + instance of the appropriate child class. + """ + + newargs, newkwargs = PlotMode._extract_options(args, kwargs) + mode_arg = newkwargs.get('mode', '') + + # Interpret the arguments + d_vars, intervals = PlotMode._interpret_args(newargs) + i_vars = PlotMode._find_i_vars(d_vars, intervals) + i, d = max([len(i_vars), len(intervals)]), len(d_vars) + + # Find the appropriate mode + subcls = PlotMode._get_mode(mode_arg, i, d) + + # Create the object + o = object.__new__(subcls) + + # Do some setup for the mode instance + o.d_vars = d_vars + o._fill_i_vars(i_vars) + o._fill_intervals(intervals) + o.options = newkwargs + + return o + + @staticmethod + def _get_mode(mode_arg, i_var_count, d_var_count): + """ + Tries to return an appropriate mode class. + Intended to be called only by __new__. + + mode_arg + Can be a string or a class. If it is a + PlotMode subclass, it is simply returned. + If it is a string, it can an alias for + a mode or an empty string. In the latter + case, we try to find a default mode for + the i_var_count and d_var_count. + + i_var_count + The number of independent variables + needed to evaluate the d_vars. + + d_var_count + The number of dependent variables; + usually the number of functions to + be evaluated in plotting. + + For example, a Cartesian function y = f(x) has + one i_var (x) and one d_var (y). A parametric + form x,y,z = f(u,v), f(u,v), f(u,v) has two + two i_vars (u,v) and three d_vars (x,y,z). + """ + # if the mode_arg is simply a PlotMode class, + # check that the mode supports the numbers + # of independent and dependent vars, then + # return it + try: + m = None + if issubclass(mode_arg, PlotMode): + m = mode_arg + except TypeError: + pass + if m: + if not m._was_initialized: + raise ValueError(("To use unregistered plot mode %s " + "you must first call %s._init_mode().") + % (m.__name__, m.__name__)) + if d_var_count != m.d_var_count: + raise ValueError(("%s can only plot functions " + "with %i dependent variables.") + % (m.__name__, + m.d_var_count)) + if i_var_count > m.i_var_count: + raise ValueError(("%s cannot plot functions " + "with more than %i independent " + "variables.") + % (m.__name__, + m.i_var_count)) + return m + # If it is a string, there are two possibilities. + if isinstance(mode_arg, str): + i, d = i_var_count, d_var_count + if i > PlotMode._i_var_max: + raise ValueError(var_count_error(True, True)) + if d > PlotMode._d_var_max: + raise ValueError(var_count_error(False, True)) + # If the string is '', try to find a suitable + # default mode + if not mode_arg: + return PlotMode._get_default_mode(i, d) + # Otherwise, interpret the string as a mode + # alias (e.g. 'cartesian', 'parametric', etc) + else: + return PlotMode._get_aliased_mode(mode_arg, i, d) + else: + raise ValueError("PlotMode argument must be " + "a class or a string") + + @staticmethod + def _get_default_mode(i, d, i_vars=-1): + if i_vars == -1: + i_vars = i + try: + return PlotMode._mode_default_map[d][i] + except KeyError: + # Keep looking for modes in higher i var counts + # which support the given d var count until we + # reach the max i_var count. + if i < PlotMode._i_var_max: + return PlotMode._get_default_mode(i + 1, d, i_vars) + else: + raise ValueError(("Couldn't find a default mode " + "for %i independent and %i " + "dependent variables.") % (i_vars, d)) + + @staticmethod + def _get_aliased_mode(alias, i, d, i_vars=-1): + if i_vars == -1: + i_vars = i + if alias not in PlotMode._mode_alias_list: + raise ValueError(("Couldn't find a mode called" + " %s. Known modes: %s.") + % (alias, ", ".join(PlotMode._mode_alias_list))) + try: + return PlotMode._mode_map[d][i][alias] + except TypeError: + # Keep looking for modes in higher i var counts + # which support the given d var count and alias + # until we reach the max i_var count. + if i < PlotMode._i_var_max: + return PlotMode._get_aliased_mode(alias, i + 1, d, i_vars) + else: + raise ValueError(("Couldn't find a %s mode " + "for %i independent and %i " + "dependent variables.") + % (alias, i_vars, d)) + + @classmethod + def _register(cls): + """ + Called once for each user-usable plot mode. + For Cartesian2D, it is invoked after the + class definition: Cartesian2D._register() + """ + name = cls.__name__ + cls._init_mode() + + try: + i, d = cls.i_var_count, cls.d_var_count + # Add the mode to _mode_map under all + # given aliases + for a in cls.aliases: + if a not in PlotMode._mode_alias_list: + # Also track valid aliases, so + # we can quickly know when given + # an invalid one in _get_mode. + PlotMode._mode_alias_list.append(a) + PlotMode._mode_map[d][i][a] = cls + if cls.is_default: + # If this mode was marked as the + # default for this d,i combination, + # also set that. + PlotMode._mode_default_map[d][i] = cls + + except Exception as e: + raise RuntimeError(("Failed to register " + "plot mode %s. Reason: %s") + % (name, (str(e)))) + + @classmethod + def _init_mode(cls): + """ + Initializes the plot mode based on + the 'mode-specific parameters' above. + Only intended to be called by + PlotMode._register(). To use a mode without + registering it, you can directly call + ModeSubclass._init_mode(). + """ + def symbols_list(symbol_str): + return [Symbol(s) for s in symbol_str] + + # Convert the vars strs into + # lists of symbols. + cls.i_vars = symbols_list(cls.i_vars) + cls.d_vars = symbols_list(cls.d_vars) + + # Var count is used often, calculate + # it once here + cls.i_var_count = len(cls.i_vars) + cls.d_var_count = len(cls.d_vars) + + if cls.i_var_count > PlotMode._i_var_max: + raise ValueError(var_count_error(True, False)) + if cls.d_var_count > PlotMode._d_var_max: + raise ValueError(var_count_error(False, False)) + + # Try to use first alias as primary_alias + if len(cls.aliases) > 0: + cls.primary_alias = cls.aliases[0] + else: + cls.primary_alias = cls.__name__ + + di = cls.intervals + if len(di) != cls.i_var_count: + raise ValueError("Plot mode must provide a " + "default interval for each i_var.") + for i in range(cls.i_var_count): + # default intervals must be given [min,max,steps] + # (no var, but they must be in the same order as i_vars) + if len(di[i]) != 3: + raise ValueError("length should be equal to 3") + + # Initialize an incomplete interval, + # to later be filled with a var when + # the mode is instantiated. + di[i] = PlotInterval(None, *di[i]) + + # To prevent people from using modes + # without these required fields set up. + cls._was_initialized = True + + _was_initialized = False + + ## Initializer Helper Methods + + @staticmethod + def _find_i_vars(functions, intervals): + i_vars = [] + + # First, collect i_vars in the + # order they are given in any + # intervals. + for i in intervals: + if i.v is None: + continue + elif i.v in i_vars: + raise ValueError(("Multiple intervals given " + "for %s.") % (str(i.v))) + i_vars.append(i.v) + + # Then, find any remaining + # i_vars in given functions + # (aka d_vars) + for f in functions: + for a in f.free_symbols: + if a not in i_vars: + i_vars.append(a) + + return i_vars + + def _fill_i_vars(self, i_vars): + # copy default i_vars + self.i_vars = [Symbol(str(i)) for i in self.i_vars] + # replace with given i_vars + for i in range(len(i_vars)): + self.i_vars[i] = i_vars[i] + + def _fill_intervals(self, intervals): + # copy default intervals + self.intervals = [PlotInterval(i) for i in self.intervals] + # track i_vars used so far + v_used = [] + # fill copy of default + # intervals with given info + for i in range(len(intervals)): + self.intervals[i].fill_from(intervals[i]) + if self.intervals[i].v is not None: + v_used.append(self.intervals[i].v) + # Find any orphan intervals and + # assign them i_vars + for i in range(len(self.intervals)): + if self.intervals[i].v is None: + u = [v for v in self.i_vars if v not in v_used] + if len(u) == 0: + raise ValueError("length should not be equal to 0") + self.intervals[i].v = u[0] + v_used.append(u[0]) + + @staticmethod + def _interpret_args(args): + interval_wrong_order = "PlotInterval %s was given before any function(s)." + interpret_error = "Could not interpret %s as a function or interval." + + functions, intervals = [], [] + if isinstance(args[0], GeometryEntity): + for coords in list(args[0].arbitrary_point()): + functions.append(coords) + intervals.append(PlotInterval.try_parse(args[0].plot_interval())) + else: + for a in args: + i = PlotInterval.try_parse(a) + if i is not None: + if len(functions) == 0: + raise ValueError(interval_wrong_order % (str(i))) + else: + intervals.append(i) + else: + if is_sequence(a, include=str): + raise ValueError(interpret_error % (str(a))) + try: + f = sympify(a) + functions.append(f) + except TypeError: + raise ValueError(interpret_error % str(a)) + + return functions, intervals + + @staticmethod + def _extract_options(args, kwargs): + newkwargs, newargs = {}, [] + for a in args: + if isinstance(a, str): + newkwargs = dict(newkwargs, **parse_option_string(a)) + else: + newargs.append(a) + newkwargs = dict(newkwargs, **kwargs) + return newargs, newkwargs + + +def var_count_error(is_independent, is_plotting): + """ + Used to format an error message which differs + slightly in 4 places. + """ + if is_plotting: + v = "Plotting" + else: + v = "Registering plot modes" + if is_independent: + n, s = PlotMode._i_var_max, "independent" + else: + n, s = PlotMode._d_var_max, "dependent" + return ("%s with more than %i %s variables " + "is not supported.") % (v, n, s) diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_mode_base.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_mode_base.py new file mode 100644 index 0000000000000000000000000000000000000000..2c6503650afda122e271bdecb2365c8fa20f2376 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_mode_base.py @@ -0,0 +1,378 @@ +import pyglet.gl as pgl +from sympy.core import S +from sympy.plotting.pygletplot.color_scheme import ColorScheme +from sympy.plotting.pygletplot.plot_mode import PlotMode +from sympy.utilities.iterables import is_sequence +from time import sleep +from threading import Thread, Event, RLock +import warnings + + +class PlotModeBase(PlotMode): + """ + Intended parent class for plotting + modes. Provides base functionality + in conjunction with its parent, + PlotMode. + """ + + ## + ## Class-Level Attributes + ## + + """ + The following attributes are meant + to be set at the class level, and serve + as parameters to the plot mode registry + (in PlotMode). See plot_modes.py for + concrete examples. + """ + + """ + i_vars + 'x' for Cartesian2D + 'xy' for Cartesian3D + etc. + + d_vars + 'y' for Cartesian2D + 'r' for Polar + etc. + """ + i_vars, d_vars = '', '' + + """ + intervals + Default intervals for each i_var, and in the + same order. Specified [min, max, steps]. + No variable can be given (it is bound later). + """ + intervals = [] + + """ + aliases + A list of strings which can be used to + access this mode. + 'cartesian' for Cartesian2D and Cartesian3D + 'polar' for Polar + 'cylindrical', 'polar' for Cylindrical + + Note that _init_mode chooses the first alias + in the list as the mode's primary_alias, which + will be displayed to the end user in certain + contexts. + """ + aliases = [] + + """ + is_default + Whether to set this mode as the default + for arguments passed to PlotMode() containing + the same number of d_vars as this mode and + at most the same number of i_vars. + """ + is_default = False + + """ + All of the above attributes are defined in PlotMode. + The following ones are specific to PlotModeBase. + """ + + """ + A list of the render styles. Do not modify. + """ + styles = {'wireframe': 1, 'solid': 2, 'both': 3} + + """ + style_override + Always use this style if not blank. + """ + style_override = '' + + """ + default_wireframe_color + default_solid_color + Can be used when color is None or being calculated. + Used by PlotCurve and PlotSurface, but not anywhere + in PlotModeBase. + """ + + default_wireframe_color = (0.85, 0.85, 0.85) + default_solid_color = (0.6, 0.6, 0.9) + default_rot_preset = 'xy' + + ## + ## Instance-Level Attributes + ## + + ## 'Abstract' member functions + def _get_evaluator(self): + if self.use_lambda_eval: + try: + e = self._get_lambda_evaluator() + return e + except Exception: + warnings.warn("\nWarning: creating lambda evaluator failed. " + "Falling back on SymPy subs evaluator.") + return self._get_sympy_evaluator() + + def _get_sympy_evaluator(self): + raise NotImplementedError() + + def _get_lambda_evaluator(self): + raise NotImplementedError() + + def _on_calculate_verts(self): + raise NotImplementedError() + + def _on_calculate_cverts(self): + raise NotImplementedError() + + ## Base member functions + def __init__(self, *args, bounds_callback=None, **kwargs): + self.verts = [] + self.cverts = [] + self.bounds = [[S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0]] + self.cbounds = [[S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0]] + + self._draw_lock = RLock() + + self._calculating_verts = Event() + self._calculating_cverts = Event() + self._calculating_verts_pos = 0.0 + self._calculating_verts_len = 0.0 + self._calculating_cverts_pos = 0.0 + self._calculating_cverts_len = 0.0 + + self._max_render_stack_size = 3 + self._draw_wireframe = [-1] + self._draw_solid = [-1] + + self._style = None + self._color = None + + self.predraw = [] + self.postdraw = [] + + self.use_lambda_eval = self.options.pop('use_sympy_eval', None) is None + self.style = self.options.pop('style', '') + self.color = self.options.pop('color', 'rainbow') + self.bounds_callback = bounds_callback + + self._on_calculate() + + def synchronized(f): + def w(self, *args, **kwargs): + self._draw_lock.acquire() + try: + r = f(self, *args, **kwargs) + return r + finally: + self._draw_lock.release() + return w + + @synchronized + def push_wireframe(self, function): + """ + Push a function which performs gl commands + used to build a display list. (The list is + built outside of the function) + """ + assert callable(function) + self._draw_wireframe.append(function) + if len(self._draw_wireframe) > self._max_render_stack_size: + del self._draw_wireframe[1] # leave marker element + + @synchronized + def push_solid(self, function): + """ + Push a function which performs gl commands + used to build a display list. (The list is + built outside of the function) + """ + assert callable(function) + self._draw_solid.append(function) + if len(self._draw_solid) > self._max_render_stack_size: + del self._draw_solid[1] # leave marker element + + def _create_display_list(self, function): + dl = pgl.glGenLists(1) + pgl.glNewList(dl, pgl.GL_COMPILE) + function() + pgl.glEndList() + return dl + + def _render_stack_top(self, render_stack): + top = render_stack[-1] + if top == -1: + return -1 # nothing to display + elif callable(top): + dl = self._create_display_list(top) + render_stack[-1] = (dl, top) + return dl # display newly added list + elif len(top) == 2: + if pgl.GL_TRUE == pgl.glIsList(top[0]): + return top[0] # display stored list + dl = self._create_display_list(top[1]) + render_stack[-1] = (dl, top[1]) + return dl # display regenerated list + + def _draw_solid_display_list(self, dl): + pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT) + pgl.glPolygonMode(pgl.GL_FRONT_AND_BACK, pgl.GL_FILL) + pgl.glCallList(dl) + pgl.glPopAttrib() + + def _draw_wireframe_display_list(self, dl): + pgl.glPushAttrib(pgl.GL_ENABLE_BIT | pgl.GL_POLYGON_BIT) + pgl.glPolygonMode(pgl.GL_FRONT_AND_BACK, pgl.GL_LINE) + pgl.glEnable(pgl.GL_POLYGON_OFFSET_LINE) + pgl.glPolygonOffset(-0.005, -50.0) + pgl.glCallList(dl) + pgl.glPopAttrib() + + @synchronized + def draw(self): + for f in self.predraw: + if callable(f): + f() + if self.style_override: + style = self.styles[self.style_override] + else: + style = self.styles[self._style] + # Draw solid component if style includes solid + if style & 2: + dl = self._render_stack_top(self._draw_solid) + if dl > 0 and pgl.GL_TRUE == pgl.glIsList(dl): + self._draw_solid_display_list(dl) + # Draw wireframe component if style includes wireframe + if style & 1: + dl = self._render_stack_top(self._draw_wireframe) + if dl > 0 and pgl.GL_TRUE == pgl.glIsList(dl): + self._draw_wireframe_display_list(dl) + for f in self.postdraw: + if callable(f): + f() + + def _on_change_color(self, color): + Thread(target=self._calculate_cverts).start() + + def _on_calculate(self): + Thread(target=self._calculate_all).start() + + def _calculate_all(self): + self._calculate_verts() + self._calculate_cverts() + + def _calculate_verts(self): + if self._calculating_verts.is_set(): + return + self._calculating_verts.set() + try: + self._on_calculate_verts() + finally: + self._calculating_verts.clear() + if callable(self.bounds_callback): + self.bounds_callback() + + def _calculate_cverts(self): + if self._calculating_verts.is_set(): + return + while self._calculating_cverts.is_set(): + sleep(0) # wait for previous calculation + self._calculating_cverts.set() + try: + self._on_calculate_cverts() + finally: + self._calculating_cverts.clear() + + def _get_calculating_verts(self): + return self._calculating_verts.is_set() + + def _get_calculating_verts_pos(self): + return self._calculating_verts_pos + + def _get_calculating_verts_len(self): + return self._calculating_verts_len + + def _get_calculating_cverts(self): + return self._calculating_cverts.is_set() + + def _get_calculating_cverts_pos(self): + return self._calculating_cverts_pos + + def _get_calculating_cverts_len(self): + return self._calculating_cverts_len + + ## Property handlers + def _get_style(self): + return self._style + + @synchronized + def _set_style(self, v): + if v is None: + return + if v == '': + step_max = 0 + for i in self.intervals: + if i.v_steps is None: + continue + step_max = max([step_max, int(i.v_steps)]) + v = ['both', 'solid'][step_max > 40] + if v not in self.styles: + raise ValueError("v should be there in self.styles") + if v == self._style: + return + self._style = v + + def _get_color(self): + return self._color + + @synchronized + def _set_color(self, v): + try: + if v is not None: + if is_sequence(v): + v = ColorScheme(*v) + else: + v = ColorScheme(v) + if repr(v) == repr(self._color): + return + self._on_change_color(v) + self._color = v + except Exception as e: + raise RuntimeError("Color change failed. " + "Reason: %s" % (str(e))) + + style = property(_get_style, _set_style) + color = property(_get_color, _set_color) + + calculating_verts = property(_get_calculating_verts) + calculating_verts_pos = property(_get_calculating_verts_pos) + calculating_verts_len = property(_get_calculating_verts_len) + + calculating_cverts = property(_get_calculating_cverts) + calculating_cverts_pos = property(_get_calculating_cverts_pos) + calculating_cverts_len = property(_get_calculating_cverts_len) + + ## String representations + + def __str__(self): + f = ", ".join(str(d) for d in self.d_vars) + o = "'mode=%s'" % (self.primary_alias) + return ", ".join([f, o]) + + def __repr__(self): + f = ", ".join(str(d) for d in self.d_vars) + i = ", ".join(str(i) for i in self.intervals) + d = [('mode', self.primary_alias), + ('color', str(self.color)), + ('style', str(self.style))] + + o = "'%s'" % ("; ".join("%s=%s" % (k, v) + for k, v in d if v != 'None')) + return ", ".join([f, i, o]) diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_modes.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_modes.py new file mode 100644 index 0000000000000000000000000000000000000000..e78e0b4ce291b071f684fa3ffc02f456dffe0023 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_modes.py @@ -0,0 +1,209 @@ +from sympy.utilities.lambdify import lambdify +from sympy.core.numbers import pi +from sympy.functions import sin, cos +from sympy.plotting.pygletplot.plot_curve import PlotCurve +from sympy.plotting.pygletplot.plot_surface import PlotSurface + +from math import sin as p_sin +from math import cos as p_cos + + +def float_vec3(f): + def inner(*args): + v = f(*args) + return float(v[0]), float(v[1]), float(v[2]) + return inner + + +class Cartesian2D(PlotCurve): + i_vars, d_vars = 'x', 'y' + intervals = [[-5, 5, 100]] + aliases = ['cartesian'] + is_default = True + + def _get_sympy_evaluator(self): + fy = self.d_vars[0] + x = self.t_interval.v + + @float_vec3 + def e(_x): + return (_x, fy.subs(x, _x), 0.0) + return e + + def _get_lambda_evaluator(self): + fy = self.d_vars[0] + x = self.t_interval.v + return lambdify([x], [x, fy, 0.0]) + + +class Cartesian3D(PlotSurface): + i_vars, d_vars = 'xy', 'z' + intervals = [[-1, 1, 40], [-1, 1, 40]] + aliases = ['cartesian', 'monge'] + is_default = True + + def _get_sympy_evaluator(self): + fz = self.d_vars[0] + x = self.u_interval.v + y = self.v_interval.v + + @float_vec3 + def e(_x, _y): + return (_x, _y, fz.subs(x, _x).subs(y, _y)) + return e + + def _get_lambda_evaluator(self): + fz = self.d_vars[0] + x = self.u_interval.v + y = self.v_interval.v + return lambdify([x, y], [x, y, fz]) + + +class ParametricCurve2D(PlotCurve): + i_vars, d_vars = 't', 'xy' + intervals = [[0, 2*pi, 100]] + aliases = ['parametric'] + is_default = True + + def _get_sympy_evaluator(self): + fx, fy = self.d_vars + t = self.t_interval.v + + @float_vec3 + def e(_t): + return (fx.subs(t, _t), fy.subs(t, _t), 0.0) + return e + + def _get_lambda_evaluator(self): + fx, fy = self.d_vars + t = self.t_interval.v + return lambdify([t], [fx, fy, 0.0]) + + +class ParametricCurve3D(PlotCurve): + i_vars, d_vars = 't', 'xyz' + intervals = [[0, 2*pi, 100]] + aliases = ['parametric'] + is_default = True + + def _get_sympy_evaluator(self): + fx, fy, fz = self.d_vars + t = self.t_interval.v + + @float_vec3 + def e(_t): + return (fx.subs(t, _t), fy.subs(t, _t), fz.subs(t, _t)) + return e + + def _get_lambda_evaluator(self): + fx, fy, fz = self.d_vars + t = self.t_interval.v + return lambdify([t], [fx, fy, fz]) + + +class ParametricSurface(PlotSurface): + i_vars, d_vars = 'uv', 'xyz' + intervals = [[-1, 1, 40], [-1, 1, 40]] + aliases = ['parametric'] + is_default = True + + def _get_sympy_evaluator(self): + fx, fy, fz = self.d_vars + u = self.u_interval.v + v = self.v_interval.v + + @float_vec3 + def e(_u, _v): + return (fx.subs(u, _u).subs(v, _v), + fy.subs(u, _u).subs(v, _v), + fz.subs(u, _u).subs(v, _v)) + return e + + def _get_lambda_evaluator(self): + fx, fy, fz = self.d_vars + u = self.u_interval.v + v = self.v_interval.v + return lambdify([u, v], [fx, fy, fz]) + + +class Polar(PlotCurve): + i_vars, d_vars = 't', 'r' + intervals = [[0, 2*pi, 100]] + aliases = ['polar'] + is_default = False + + def _get_sympy_evaluator(self): + fr = self.d_vars[0] + t = self.t_interval.v + + def e(_t): + _r = float(fr.subs(t, _t)) + return (_r*p_cos(_t), _r*p_sin(_t), 0.0) + return e + + def _get_lambda_evaluator(self): + fr = self.d_vars[0] + t = self.t_interval.v + fx, fy = fr*cos(t), fr*sin(t) + return lambdify([t], [fx, fy, 0.0]) + + +class Cylindrical(PlotSurface): + i_vars, d_vars = 'th', 'r' + intervals = [[0, 2*pi, 40], [-1, 1, 20]] + aliases = ['cylindrical', 'polar'] + is_default = False + + def _get_sympy_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + h = self.v_interval.v + + def e(_t, _h): + _r = float(fr.subs(t, _t).subs(h, _h)) + return (_r*p_cos(_t), _r*p_sin(_t), _h) + return e + + def _get_lambda_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + h = self.v_interval.v + fx, fy = fr*cos(t), fr*sin(t) + return lambdify([t, h], [fx, fy, h]) + + +class Spherical(PlotSurface): + i_vars, d_vars = 'tp', 'r' + intervals = [[0, 2*pi, 40], [0, pi, 20]] + aliases = ['spherical'] + is_default = False + + def _get_sympy_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + p = self.v_interval.v + + def e(_t, _p): + _r = float(fr.subs(t, _t).subs(p, _p)) + return (_r*p_cos(_t)*p_sin(_p), + _r*p_sin(_t)*p_sin(_p), + _r*p_cos(_p)) + return e + + def _get_lambda_evaluator(self): + fr = self.d_vars[0] + t = self.u_interval.v + p = self.v_interval.v + fx = fr * cos(t) * sin(p) + fy = fr * sin(t) * sin(p) + fz = fr * cos(p) + return lambdify([t, p], [fx, fy, fz]) + +Cartesian2D._register() +Cartesian3D._register() +ParametricCurve2D._register() +ParametricCurve3D._register() +ParametricSurface._register() +Polar._register() +Cylindrical._register() +Spherical._register() diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_object.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_object.py new file mode 100644 index 0000000000000000000000000000000000000000..e51040fb8b1a52c49d849b96692f6c0dba329d75 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_object.py @@ -0,0 +1,17 @@ +class PlotObject: + """ + Base class for objects which can be displayed in + a Plot. + """ + visible = True + + def _draw(self): + if self.visible: + self.draw() + + def draw(self): + """ + OpenGL rendering code for the plot object. + Override in base class. + """ + pass diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_rotation.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_rotation.py new file mode 100644 index 0000000000000000000000000000000000000000..7f568964997634121946a761b55ef0f916ac633f --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_rotation.py @@ -0,0 +1,68 @@ +try: + from ctypes import c_float +except ImportError: + pass + +import pyglet.gl as pgl +from math import sqrt as _sqrt, acos as _acos + + +def cross(a, b): + return (a[1] * b[2] - a[2] * b[1], + a[2] * b[0] - a[0] * b[2], + a[0] * b[1] - a[1] * b[0]) + + +def dot(a, b): + return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + + +def mag(a): + return _sqrt(a[0]**2 + a[1]**2 + a[2]**2) + + +def norm(a): + m = mag(a) + return (a[0] / m, a[1] / m, a[2] / m) + + +def get_sphere_mapping(x, y, width, height): + x = min([max([x, 0]), width]) + y = min([max([y, 0]), height]) + + sr = _sqrt((width/2)**2 + (height/2)**2) + sx = ((x - width / 2) / sr) + sy = ((y - height / 2) / sr) + + sz = 1.0 - sx**2 - sy**2 + + if sz > 0.0: + sz = _sqrt(sz) + return (sx, sy, sz) + else: + sz = 0 + return norm((sx, sy, sz)) + +rad2deg = 180.0 / 3.141592 + + +def get_spherical_rotatation(p1, p2, width, height, theta_multiplier): + v1 = get_sphere_mapping(p1[0], p1[1], width, height) + v2 = get_sphere_mapping(p2[0], p2[1], width, height) + + d = min(max([dot(v1, v2), -1]), 1) + + if abs(d - 1.0) < 0.000001: + return None + + raxis = norm( cross(v1, v2) ) + rtheta = theta_multiplier * rad2deg * _acos(d) + + pgl.glPushMatrix() + pgl.glLoadIdentity() + pgl.glRotatef(rtheta, *raxis) + mat = (c_float*16)() + pgl.glGetFloatv(pgl.GL_MODELVIEW_MATRIX, mat) + pgl.glPopMatrix() + + return mat diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_surface.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_surface.py new file mode 100644 index 0000000000000000000000000000000000000000..ed421eebb441d193f4d9b763f56e146c11e5a42c --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_surface.py @@ -0,0 +1,102 @@ +import pyglet.gl as pgl + +from sympy.core import S +from sympy.plotting.pygletplot.plot_mode_base import PlotModeBase + + +class PlotSurface(PlotModeBase): + + default_rot_preset = 'perspective' + + def _on_calculate_verts(self): + self.u_interval = self.intervals[0] + self.u_set = list(self.u_interval.frange()) + self.v_interval = self.intervals[1] + self.v_set = list(self.v_interval.frange()) + self.bounds = [[S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0]] + evaluate = self._get_evaluator() + + self._calculating_verts_pos = 0.0 + self._calculating_verts_len = float( + self.u_interval.v_len*self.v_interval.v_len) + + verts = [] + b = self.bounds + for u in self.u_set: + column = [] + for v in self.v_set: + try: + _e = evaluate(u, v) # calculate vertex + except ZeroDivisionError: + _e = None + if _e is not None: # update bounding box + for axis in range(3): + b[axis][0] = min([b[axis][0], _e[axis]]) + b[axis][1] = max([b[axis][1], _e[axis]]) + column.append(_e) + self._calculating_verts_pos += 1.0 + + verts.append(column) + for axis in range(3): + b[axis][2] = b[axis][1] - b[axis][0] + if b[axis][2] == 0.0: + b[axis][2] = 1.0 + + self.verts = verts + self.push_wireframe(self.draw_verts(False, False)) + self.push_solid(self.draw_verts(False, True)) + + def _on_calculate_cverts(self): + if not self.verts or not self.color: + return + + def set_work_len(n): + self._calculating_cverts_len = float(n) + + def inc_work_pos(): + self._calculating_cverts_pos += 1.0 + set_work_len(1) + self._calculating_cverts_pos = 0 + self.cverts = self.color.apply_to_surface(self.verts, + self.u_set, + self.v_set, + set_len=set_work_len, + inc_pos=inc_work_pos) + self.push_solid(self.draw_verts(True, True)) + + def calculate_one_cvert(self, u, v): + vert = self.verts[u][v] + return self.color(vert[0], vert[1], vert[2], + self.u_set[u], self.v_set[v]) + + def draw_verts(self, use_cverts, use_solid_color): + def f(): + for u in range(1, len(self.u_set)): + pgl.glBegin(pgl.GL_QUAD_STRIP) + for v in range(len(self.v_set)): + pa = self.verts[u - 1][v] + pb = self.verts[u][v] + if pa is None or pb is None: + pgl.glEnd() + pgl.glBegin(pgl.GL_QUAD_STRIP) + continue + if use_cverts: + ca = self.cverts[u - 1][v] + cb = self.cverts[u][v] + if ca is None: + ca = (0, 0, 0) + if cb is None: + cb = (0, 0, 0) + else: + if use_solid_color: + ca = cb = self.default_solid_color + else: + ca = cb = self.default_wireframe_color + pgl.glColor3f(*ca) + pgl.glVertex3f(*pa) + pgl.glColor3f(*cb) + pgl.glVertex3f(*pb) + pgl.glEnd() + return f diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_window.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_window.py new file mode 100644 index 0000000000000000000000000000000000000000..d9df4cc453acb05d7c2d871e9e8efeb36905de5d --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/plot_window.py @@ -0,0 +1,144 @@ +from time import perf_counter + + +import pyglet.gl as pgl + +from sympy.plotting.pygletplot.managed_window import ManagedWindow +from sympy.plotting.pygletplot.plot_camera import PlotCamera +from sympy.plotting.pygletplot.plot_controller import PlotController + + +class PlotWindow(ManagedWindow): + + def __init__(self, plot, antialiasing=True, ortho=False, + invert_mouse_zoom=False, linewidth=1.5, caption="SymPy Plot", + **kwargs): + """ + Named Arguments + =============== + + antialiasing = True + True OR False + ortho = False + True OR False + invert_mouse_zoom = False + True OR False + """ + self.plot = plot + + self.camera = None + self._calculating = False + + self.antialiasing = antialiasing + self.ortho = ortho + self.invert_mouse_zoom = invert_mouse_zoom + self.linewidth = linewidth + self.title = caption + self.last_caption_update = 0 + self.caption_update_interval = 0.2 + self.drawing_first_object = True + + super().__init__(**kwargs) + + def setup(self): + self.camera = PlotCamera(self, ortho=self.ortho) + self.controller = PlotController(self, + invert_mouse_zoom=self.invert_mouse_zoom) + self.push_handlers(self.controller) + + pgl.glClearColor(1.0, 1.0, 1.0, 0.0) + pgl.glClearDepth(1.0) + + pgl.glDepthFunc(pgl.GL_LESS) + pgl.glEnable(pgl.GL_DEPTH_TEST) + + pgl.glEnable(pgl.GL_LINE_SMOOTH) + pgl.glShadeModel(pgl.GL_SMOOTH) + pgl.glLineWidth(self.linewidth) + + pgl.glEnable(pgl.GL_BLEND) + pgl.glBlendFunc(pgl.GL_SRC_ALPHA, pgl.GL_ONE_MINUS_SRC_ALPHA) + + if self.antialiasing: + pgl.glHint(pgl.GL_LINE_SMOOTH_HINT, pgl.GL_NICEST) + pgl.glHint(pgl.GL_POLYGON_SMOOTH_HINT, pgl.GL_NICEST) + + self.camera.setup_projection() + + def on_resize(self, w, h): + super().on_resize(w, h) + if self.camera is not None: + self.camera.setup_projection() + + def update(self, dt): + self.controller.update(dt) + + def draw(self): + self.plot._render_lock.acquire() + self.camera.apply_transformation() + + calc_verts_pos, calc_verts_len = 0, 0 + calc_cverts_pos, calc_cverts_len = 0, 0 + + should_update_caption = (perf_counter() - self.last_caption_update > + self.caption_update_interval) + + if len(self.plot._functions.values()) == 0: + self.drawing_first_object = True + + iterfunctions = iter(self.plot._functions.values()) + + for r in iterfunctions: + if self.drawing_first_object: + self.camera.set_rot_preset(r.default_rot_preset) + self.drawing_first_object = False + + pgl.glPushMatrix() + r._draw() + pgl.glPopMatrix() + + # might as well do this while we are + # iterating and have the lock rather + # than locking and iterating twice + # per frame: + + if should_update_caption: + try: + if r.calculating_verts: + calc_verts_pos += r.calculating_verts_pos + calc_verts_len += r.calculating_verts_len + if r.calculating_cverts: + calc_cverts_pos += r.calculating_cverts_pos + calc_cverts_len += r.calculating_cverts_len + except ValueError: + pass + + for r in self.plot._pobjects: + pgl.glPushMatrix() + r._draw() + pgl.glPopMatrix() + + if should_update_caption: + self.update_caption(calc_verts_pos, calc_verts_len, + calc_cverts_pos, calc_cverts_len) + self.last_caption_update = perf_counter() + + if self.plot._screenshot: + self.plot._screenshot._execute_saving() + + self.plot._render_lock.release() + + def update_caption(self, calc_verts_pos, calc_verts_len, + calc_cverts_pos, calc_cverts_len): + caption = self.title + if calc_verts_len or calc_cverts_len: + caption += " (calculating" + if calc_verts_len > 0: + p = (calc_verts_pos / calc_verts_len) * 100 + caption += " vertices %i%%" % (p) + if calc_cverts_len > 0: + p = (calc_cverts_pos / calc_cverts_len) * 100 + caption += " colors %i%%" % (p) + caption += ")" + if self.caption != caption: + self.set_caption(caption) diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/tests/__init__.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/tests/test_plotting.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/tests/test_plotting.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc4aaf3621a8c9056ce0d81c89ca6a0a681bbdb --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/tests/test_plotting.py @@ -0,0 +1,88 @@ +from sympy.external.importtools import import_module + +disabled = False + +# if pyglet.gl fails to import, e.g. opengl is missing, we disable the tests +pyglet_gl = import_module("pyglet.gl", catch=(OSError,)) +pyglet_window = import_module("pyglet.window", catch=(OSError,)) +if not pyglet_gl or not pyglet_window: + disabled = True + + +from sympy.core.symbol import symbols +from sympy.functions.elementary.exponential import log +from sympy.functions.elementary.trigonometric import (cos, sin) +x, y, z = symbols('x, y, z') + + +def test_plot_2d(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(x, [x, -5, 5, 4], visible=False) + p.wait_for_calculations() + + +def test_plot_2d_discontinuous(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(1/x, [x, -1, 1, 2], visible=False) + p.wait_for_calculations() + + +def test_plot_3d(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(x*y, [x, -5, 5, 5], [y, -5, 5, 5], visible=False) + p.wait_for_calculations() + + +def test_plot_3d_discontinuous(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(1/x, [x, -3, 3, 6], [y, -1, 1, 1], visible=False) + p.wait_for_calculations() + + +def test_plot_2d_polar(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(1/x, [x, -1, 1, 4], 'mode=polar', visible=False) + p.wait_for_calculations() + + +def test_plot_3d_cylinder(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot( + 1/y, [x, 0, 6.282, 4], [y, -1, 1, 4], 'mode=polar;style=solid', + visible=False) + p.wait_for_calculations() + + +def test_plot_3d_spherical(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot( + 1, [x, 0, 6.282, 4], [y, 0, 3.141, + 4], 'mode=spherical;style=wireframe', + visible=False) + p.wait_for_calculations() + + +def test_plot_2d_parametric(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(sin(x), cos(x), [x, 0, 6.282, 4], visible=False) + p.wait_for_calculations() + + +def test_plot_3d_parametric(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(sin(x), cos(x), x/5.0, [x, 0, 6.282, 4], visible=False) + p.wait_for_calculations() + + +def _test_plot_log(): + from sympy.plotting.pygletplot import PygletPlot + p = PygletPlot(log(x), [x, 0, 6.282, 4], 'mode=polar', visible=False) + p.wait_for_calculations() + + +def test_plot_integral(): + # Make sure it doesn't treat x as an independent variable + from sympy.plotting.pygletplot import PygletPlot + from sympy.integrals.integrals import Integral + p = PygletPlot(Integral(z*x, (x, 1, z), (z, 1, y)), visible=False) + p.wait_for_calculations() diff --git a/.venv/Lib/site-packages/sympy/plotting/pygletplot/util.py b/.venv/Lib/site-packages/sympy/plotting/pygletplot/util.py new file mode 100644 index 0000000000000000000000000000000000000000..43b882ca18274dcdb273cf35680016453db3c698 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/pygletplot/util.py @@ -0,0 +1,188 @@ +try: + from ctypes import c_float, c_int, c_double +except ImportError: + pass + +import pyglet.gl as pgl +from sympy.core import S + + +def get_model_matrix(array_type=c_float, glGetMethod=pgl.glGetFloatv): + """ + Returns the current modelview matrix. + """ + m = (array_type*16)() + glGetMethod(pgl.GL_MODELVIEW_MATRIX, m) + return m + + +def get_projection_matrix(array_type=c_float, glGetMethod=pgl.glGetFloatv): + """ + Returns the current modelview matrix. + """ + m = (array_type*16)() + glGetMethod(pgl.GL_PROJECTION_MATRIX, m) + return m + + +def get_viewport(): + """ + Returns the current viewport. + """ + m = (c_int*4)() + pgl.glGetIntegerv(pgl.GL_VIEWPORT, m) + return m + + +def get_direction_vectors(): + m = get_model_matrix() + return ((m[0], m[4], m[8]), + (m[1], m[5], m[9]), + (m[2], m[6], m[10])) + + +def get_view_direction_vectors(): + m = get_model_matrix() + return ((m[0], m[1], m[2]), + (m[4], m[5], m[6]), + (m[8], m[9], m[10])) + + +def get_basis_vectors(): + return ((1, 0, 0), (0, 1, 0), (0, 0, 1)) + + +def screen_to_model(x, y, z): + m = get_model_matrix(c_double, pgl.glGetDoublev) + p = get_projection_matrix(c_double, pgl.glGetDoublev) + w = get_viewport() + mx, my, mz = c_double(), c_double(), c_double() + pgl.gluUnProject(x, y, z, m, p, w, mx, my, mz) + return float(mx.value), float(my.value), float(mz.value) + + +def model_to_screen(x, y, z): + m = get_model_matrix(c_double, pgl.glGetDoublev) + p = get_projection_matrix(c_double, pgl.glGetDoublev) + w = get_viewport() + mx, my, mz = c_double(), c_double(), c_double() + pgl.gluProject(x, y, z, m, p, w, mx, my, mz) + return float(mx.value), float(my.value), float(mz.value) + + +def vec_subs(a, b): + return tuple(a[i] - b[i] for i in range(len(a))) + + +def billboard_matrix(): + """ + Removes rotational components of + current matrix so that primitives + are always drawn facing the viewer. + + |1|0|0|x| + |0|1|0|x| + |0|0|1|x| (x means left unchanged) + |x|x|x|x| + """ + m = get_model_matrix() + # XXX: for i in range(11): m[i] = i ? + m[0] = 1 + m[1] = 0 + m[2] = 0 + m[4] = 0 + m[5] = 1 + m[6] = 0 + m[8] = 0 + m[9] = 0 + m[10] = 1 + pgl.glLoadMatrixf(m) + + +def create_bounds(): + return [[S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0], + [S.Infinity, S.NegativeInfinity, 0]] + + +def update_bounds(b, v): + if v is None: + return + for axis in range(3): + b[axis][0] = min([b[axis][0], v[axis]]) + b[axis][1] = max([b[axis][1], v[axis]]) + + +def interpolate(a_min, a_max, a_ratio): + return a_min + a_ratio * (a_max - a_min) + + +def rinterpolate(a_min, a_max, a_value): + a_range = a_max - a_min + if a_max == a_min: + a_range = 1.0 + return (a_value - a_min) / float(a_range) + + +def interpolate_color(color1, color2, ratio): + return tuple(interpolate(color1[i], color2[i], ratio) for i in range(3)) + + +def scale_value(v, v_min, v_len): + return (v - v_min) / v_len + + +def scale_value_list(flist): + v_min, v_max = min(flist), max(flist) + v_len = v_max - v_min + return [scale_value(f, v_min, v_len) for f in flist] + + +def strided_range(r_min, r_max, stride, max_steps=50): + o_min, o_max = r_min, r_max + if abs(r_min - r_max) < 0.001: + return [] + try: + range(int(r_min - r_max)) + except (TypeError, OverflowError): + return [] + if r_min > r_max: + raise ValueError("r_min cannot be greater than r_max") + r_min_s = (r_min % stride) + r_max_s = stride - (r_max % stride) + if abs(r_max_s - stride) < 0.001: + r_max_s = 0.0 + r_min -= r_min_s + r_max += r_max_s + r_steps = int((r_max - r_min)/stride) + if max_steps and r_steps > max_steps: + return strided_range(o_min, o_max, stride*2) + return [r_min] + [r_min + e*stride for e in range(1, r_steps + 1)] + [r_max] + + +def parse_option_string(s): + if not isinstance(s, str): + return None + options = {} + for token in s.split(';'): + pieces = token.split('=') + if len(pieces) == 1: + option, value = pieces[0], "" + elif len(pieces) == 2: + option, value = pieces + else: + raise ValueError("Plot option string '%s' is malformed." % (s)) + options[option.strip()] = value.strip() + return options + + +def dot_product(v1, v2): + return sum(v1[i]*v2[i] for i in range(3)) + + +def vec_sub(v1, v2): + return tuple(v1[i] - v2[i] for i in range(3)) + + +def vec_mag(v): + return sum(v[i]**2 for i in range(3))**(0.5) diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/__init__.py b/.venv/Lib/site-packages/sympy/plotting/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_experimental_lambdify.py b/.venv/Lib/site-packages/sympy/plotting/tests/test_experimental_lambdify.py new file mode 100644 index 0000000000000000000000000000000000000000..95839d668762be7be94d0de5092594306ceeadbd --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/tests/test_experimental_lambdify.py @@ -0,0 +1,77 @@ +from sympy.core.symbol import symbols, Symbol +from sympy.functions import Max +from sympy.plotting.experimental_lambdify import experimental_lambdify +from sympy.plotting.intervalmath.interval_arithmetic import \ + interval, intervalMembership + + +# Tests for exception handling in experimental_lambdify +def test_experimental_lambify(): + x = Symbol('x') + f = experimental_lambdify([x], Max(x, 5)) + # XXX should f be tested? If f(2) is attempted, an + # error is raised because a complex produced during wrapping of the arg + # is being compared with an int. + assert Max(2, 5) == 5 + assert Max(5, 7) == 7 + + x = Symbol('x-3') + f = experimental_lambdify([x], x + 1) + assert f(1) == 2 + + +def test_composite_boolean_region(): + x, y = symbols('x y') + + r1 = (x - 1)**2 + y**2 < 2 + r2 = (x + 1)**2 + y**2 < 2 + + f = experimental_lambdify((x, y), r1 & r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), r1 | r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), r1 & ~r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), ~r1 & r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), ~r1 & ~r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(True, True) diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_plot.py b/.venv/Lib/site-packages/sympy/plotting/tests/test_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..bf09e825e7444cfdaf42e8c419dc50170168365b --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/tests/test_plot.py @@ -0,0 +1,1343 @@ +import os +from tempfile import TemporaryDirectory +import pytest +from sympy.concrete.summations import Sum +from sympy.core.numbers import (I, oo, pi) +from sympy.core.relational import Ne +from sympy.core.symbol import Symbol, symbols +from sympy.functions.elementary.exponential import (LambertW, exp, exp_polar, log) +from sympy.functions.elementary.miscellaneous import (real_root, sqrt) +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.functions.elementary.miscellaneous import Min +from sympy.functions.special.hyper import meijerg +from sympy.integrals.integrals import Integral +from sympy.logic.boolalg import And +from sympy.core.singleton import S +from sympy.core.sympify import sympify +from sympy.external import import_module +from sympy.plotting.plot import ( + Plot, plot, plot_parametric, plot3d_parametric_line, plot3d, + plot3d_parametric_surface) +from sympy.plotting.plot import ( + unset_show, plot_contour, PlotGrid, MatplotlibBackend, TextBackend) +from sympy.plotting.series import ( + LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries, + ParametricSurfaceSeries, SurfaceOver2DRangeSeries) +from sympy.testing.pytest import skip, warns, raises, warns_deprecated_sympy +from sympy.utilities import lambdify as lambdify_ +from sympy.utilities.exceptions import ignore_warnings + +unset_show() + + +matplotlib = import_module( + 'matplotlib', min_module_version='1.1.0', catch=(RuntimeError,)) + + +class DummyBackendNotOk(Plot): + """ Used to verify if users can create their own backends. + This backend is meant to raise NotImplementedError for methods `show`, + `save`, `close`. + """ + def __new__(cls, *args, **kwargs): + return object.__new__(cls) + + +class DummyBackendOk(Plot): + """ Used to verify if users can create their own backends. + This backend is meant to pass all tests. + """ + def __new__(cls, *args, **kwargs): + return object.__new__(cls) + + def show(self): + pass + + def save(self): + pass + + def close(self): + pass + +def test_basic_plotting_backend(): + x = Symbol('x') + plot(x, (x, 0, 3), backend='text') + plot(x**2 + 1, (x, 0, 3), backend='text') + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_1(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + ### + # Examples from the 'introduction' notebook + ### + p = plot(x, legend=True, label='f1', adaptive=adaptive, n=10) + p = plot(x*sin(x), x*cos(x), label='f2', adaptive=adaptive, n=10) + p.extend(p) + p[0].line_color = lambda a: a + p[1].line_color = 'b' + p.title = 'Big title' + p.xlabel = 'the x axis' + p[1].label = 'straight line' + p.legend = True + p.aspect_ratio = (1, 1) + p.xlim = (-15, 20) + filename = 'test_basic_options_and_colors.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p.extend(plot(x + 1, adaptive=adaptive, n=10)) + p.append(plot(x + 3, x**2, adaptive=adaptive, n=10)[1]) + filename = 'test_plot_extend_append.png' + p.save(os.path.join(tmpdir, filename)) + + p[2] = plot(x**2, (x, -2, 3), adaptive=adaptive, n=10) + filename = 'test_plot_setitem.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(sin(x), (x, -2*pi, 4*pi), adaptive=adaptive, n=10) + filename = 'test_line_explicit.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(sin(x), adaptive=adaptive, n=10) + filename = 'test_line_default_range.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)), adaptive=adaptive, n=10) + filename = 'test_line_multiple_range.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + raises(ValueError, lambda: plot(x, y)) + + #Piecewise plots + p = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1), adaptive=adaptive, n=10) + filename = 'test_plot_piecewise.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(Piecewise((x, x < 1), (x**2, True)), (x, -3, 3), adaptive=adaptive, n=10) + filename = 'test_plot_piecewise_2.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # test issue 7471 + p1 = plot(x, adaptive=adaptive, n=10) + p2 = plot(3, adaptive=adaptive, n=10) + p1.extend(p2) + filename = 'test_horizontal_line.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # test issue 10925 + f = Piecewise((-1, x < -1), (x, And(-1 <= x, x < 0)), \ + (x**2, And(0 <= x, x < 1)), (x**3, x >= 1)) + p = plot(f, (x, -3, 3), adaptive=adaptive, n=10) + filename = 'test_plot_piecewise_3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_2(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + z = Symbol('z') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + #parametric 2d plots. + #Single plot with default range. + p = plot_parametric(sin(x), cos(x), adaptive=adaptive, n=10) + filename = 'test_parametric.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #Single plot with range. + p = plot_parametric( + sin(x), cos(x), (x, -5, 5), legend=True, label='parametric_plot', + adaptive=adaptive, n=10) + filename = 'test_parametric_range.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #Multiple plots with same range. + p = plot_parametric((sin(x), cos(x)), (x, sin(x)), + adaptive=adaptive, n=10) + filename = 'test_parametric_multiple.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #Multiple plots with different ranges. + p = plot_parametric( + (sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)), + adaptive=adaptive, n=10) + filename = 'test_parametric_multiple_ranges.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #depth of recursion specified. + p = plot_parametric(x, sin(x), depth=13, + adaptive=adaptive, n=10) + filename = 'test_recursion_depth.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #No adaptive sampling. + p = plot_parametric(cos(x), sin(x), adaptive=False, n=500) + filename = 'test_adaptive.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #3d parametric plots + p = plot3d_parametric_line( + sin(x), cos(x), x, legend=True, label='3d_parametric_plot', + adaptive=adaptive, n=10) + filename = 'test_3d_line.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_line( + (sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)), + adaptive=adaptive, n=10) + filename = 'test_3d_line_multiple.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_line(sin(x), cos(x), x, n=30, + adaptive=adaptive) + filename = 'test_3d_line_points.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # 3d surface single plot. + p = plot3d(x * y, adaptive=adaptive, n=10) + filename = 'test_surface.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple 3D plots with same range. + p = plot3d(-x * y, x * y, (x, -5, 5), adaptive=adaptive, n=10) + filename = 'test_surface_multiple.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple 3D plots with different ranges. + p = plot3d( + (x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)), + adaptive=adaptive, n=10) + filename = 'test_surface_multiple_ranges.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Single Parametric 3D plot + p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y, + adaptive=adaptive, n=10) + filename = 'test_parametric_surface.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple Parametric 3D plots. + p = plot3d_parametric_surface( + (x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)), + (sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)), + adaptive=adaptive, n=10) + filename = 'test_parametric_surface.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Single Contour plot. + p = plot_contour(sin(x)*sin(y), (x, -5, 5), (y, -5, 5), + adaptive=adaptive, n=10) + filename = 'test_contour_plot.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple Contour plots with same range. + p = plot_contour(x**2 + y**2, x**3 + y**3, (x, -5, 5), (y, -5, 5), + adaptive=adaptive, n=10) + filename = 'test_contour_plot.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple Contour plots with different range. + p = plot_contour( + (x**2 + y**2, (x, -5, 5), (y, -5, 5)), + (x**3 + y**3, (x, -3, 3), (y, -3, 3)), + adaptive=adaptive, n=10) + filename = 'test_contour_plot.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_3(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + z = Symbol('z') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + ### + # Examples from the 'colors' notebook + ### + + p = plot(sin(x), adaptive=adaptive, n=10) + p[0].line_color = lambda a: a + filename = 'test_colors_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + + p[0].line_color = lambda a, b: b + filename = 'test_colors_line_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(x*sin(x), x*cos(x), (x, 0, 10), adaptive=adaptive, n=10) + p[0].line_color = lambda a: a + filename = 'test_colors_param_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + + p[0].line_color = lambda a, b: a + filename = 'test_colors_param_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + + p[0].line_color = lambda a, b: b + filename = 'test_colors_param_line_arity2b.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_line( + sin(x) + 0.1*sin(x)*cos(7*x), + cos(x) + 0.1*cos(x)*cos(7*x), + 0.1*sin(7*x), + (x, 0, 2*pi), adaptive=adaptive, n=10) + p[0].line_color = lambdify_(x, sin(4*x)) + filename = 'test_colors_3d_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + p[0].line_color = lambda a, b: b + filename = 'test_colors_3d_line_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p[0].line_color = lambda a, b, c: c + filename = 'test_colors_3d_line_arity3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5), adaptive=adaptive, n=10) + p[0].surface_color = lambda a: a + filename = 'test_colors_surface_arity1.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambda a, b: b + filename = 'test_colors_surface_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambda a, b, c: c + filename = 'test_colors_surface_arity3a.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambdify_((x, y, z), sqrt((x - 3*pi)**2 + y**2)) + filename = 'test_colors_surface_arity3b.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y, + (x, -1, 1), (y, -1, 1), adaptive=adaptive, n=10) + p[0].surface_color = lambda a: a + filename = 'test_colors_param_surf_arity1.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambda a, b: a*b + filename = 'test_colors_param_surf_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambdify_((x, y, z), sqrt(x**2 + y**2 + z**2)) + filename = 'test_colors_param_surf_arity3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True]) +def test_plot_and_save_4(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + ### + # Examples from the 'advanced' notebook + ### + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y)) + p = plot(i, (y, 1, 5), adaptive=adaptive, n=10, force_real_eval=True) + filename = 'test_advanced_integral.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_5(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + s = Sum(1/x**y, (x, 1, oo)) + p = plot(s, (y, 2, 10), adaptive=adaptive, n=10) + filename = 'test_advanced_inf_sum.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(Sum(1/x, (x, 1, y)), (y, 2, 10), show=False, + adaptive=adaptive, n=10) + p[0].only_integers = True + p[0].steps = True + filename = 'test_advanced_fin_sum.png' + + # XXX: This should be fixed in experimental_lambdify or by using + # ordinary lambdify so that it doesn't warn. The error results from + # passing an array of values as the integration limit. + # + # UserWarning: The evaluation of the expression is problematic. We are + # trying a failback method that may still work. Please report this as a + # bug. + with ignore_warnings(UserWarning): + p.save(os.path.join(tmpdir, filename)) + + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_6(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + filename = 'test.png' + ### + # Test expressions that can not be translated to np and generate complex + # results. + ### + p = plot(sin(x) + I*cos(x)) + p.save(os.path.join(tmpdir, filename)) + + with ignore_warnings(RuntimeWarning): + p = plot(sqrt(sqrt(-x))) + p.save(os.path.join(tmpdir, filename)) + + p = plot(LambertW(x)) + p.save(os.path.join(tmpdir, filename)) + p = plot(sqrt(LambertW(x))) + p.save(os.path.join(tmpdir, filename)) + + #Characteristic function of a StudentT distribution with nu=10 + x1 = 5 * x**2 * exp_polar(-I*pi)/2 + m1 = meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), x1) + x2 = 5*x**2 * exp_polar(I*pi)/2 + m2 = meijerg(((1/2,), ()), ((5, 0, 1/2), ()), x2) + expr = (m1 + m2) / (48 * pi) + with warns( + UserWarning, + match="The evaluation with NumPy/SciPy failed", + test_stacklevel=False, + ): + p = plot(expr, (x, 1e-6, 1e-2), adaptive=adaptive, n=10) + p.save(os.path.join(tmpdir, filename)) + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plotgrid_and_save(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + p1 = plot(x, adaptive=adaptive, n=10) + p2 = plot_parametric((sin(x), cos(x)), (x, sin(x)), show=False, + adaptive=adaptive, n=10) + p3 = plot_parametric( + cos(x), sin(x), adaptive=adaptive, n=10, show=False) + p4 = plot3d_parametric_line(sin(x), cos(x), x, show=False, + adaptive=adaptive, n=10) + # symmetric grid + p = PlotGrid(2, 2, p1, p2, p3, p4) + filename = 'test_grid1.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # grid size greater than the number of subplots + p = PlotGrid(3, 4, p1, p2, p3, p4) + filename = 'test_grid2.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p5 = plot(cos(x),(x, -pi, pi), show=False, adaptive=adaptive, n=10) + p5[0].line_color = lambda a: a + p6 = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1), show=False, + adaptive=adaptive, n=10) + p7 = plot_contour( + (x**2 + y**2, (x, -5, 5), (y, -5, 5)), + (x**3 + y**3, (x, -3, 3), (y, -3, 3)), show=False, + adaptive=adaptive, n=10) + # unsymmetric grid (subplots in one line) + p = PlotGrid(1, 3, p5, p6, p7) + filename = 'test_grid3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_append_issue_7140(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p1 = plot(x, adaptive=adaptive, n=10) + p2 = plot(x**2, adaptive=adaptive, n=10) + plot(x + 2, adaptive=adaptive, n=10) + + # append a series + p2.append(p1[0]) + assert len(p2._series) == 2 + + with raises(TypeError): + p1.append(p2) + + with raises(TypeError): + p1.append(p2._series) + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_15265(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + eqn = sin(x) + + p = plot(eqn, xlim=(-S.Pi, S.Pi), ylim=(-1, 1), adaptive=adaptive, n=10) + p._backend.close() + + p = plot(eqn, xlim=(-1, 1), ylim=(-S.Pi, S.Pi), adaptive=adaptive, n=10) + p._backend.close() + + p = plot(eqn, xlim=(-1, 1), adaptive=adaptive, n=10, + ylim=(sympify('-3.14'), sympify('3.14'))) + p._backend.close() + + p = plot(eqn, adaptive=adaptive, n=10, + xlim=(sympify('-3.14'), sympify('3.14')), ylim=(-1, 1)) + p._backend.close() + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(-S.ImaginaryUnit, 1), ylim=(-1, 1))) + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(-1, 1), ylim=(-1, S.ImaginaryUnit))) + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(S.NegativeInfinity, 1), ylim=(-1, 1))) + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(-1, 1), ylim=(-1, S.Infinity))) + + +def test_empty_Plot(): + if not matplotlib: + skip("Matplotlib not the default backend") + + # No exception showing an empty plot + plot() + # Plot is only a base class: doesn't implement any logic for showing + # images + p = Plot() + raises(NotImplementedError, lambda: p.show()) + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_17405(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + f = x**0.3 - 10*x**3 + x**2 + p = plot(f, (x, -10, 10), adaptive=adaptive, n=30, show=False) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + + # RuntimeWarning: invalid value encountered in double_scalars + with ignore_warnings(RuntimeWarning): + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_logplot_PR_16796(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(x, (x, .001, 100), adaptive=adaptive, n=30, + xscale='log', show=False) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + assert len(p[0].get_data()[0]) >= 30 + assert p[0].end == 100.0 + assert p[0].start == .001 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_16572(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(LambertW(x), show=False, adaptive=adaptive, n=30) + # Random number of segments, probably more than 50, but we want to see + # that there are segments generated, as opposed to when the bug was present + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_11865(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + k = Symbol('k', integer=True) + f = Piecewise((-I*exp(I*pi*k)/k + I*exp(-I*pi*k)/k, Ne(k, 0)), (2*pi, True)) + p = plot(f, show=False, adaptive=adaptive, n=30) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + # and that there are no exceptions. + assert len(p[0].get_data()[0]) >= 30 + + +def test_issue_11461(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(real_root((log(x/(x-2))), 3), show=False, adaptive=True) + with warns( + RuntimeWarning, + match="invalid value encountered in", + test_stacklevel=False, + ): + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + # and that there are no exceptions. + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_11764(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot_parametric(cos(x), sin(x), (x, 0, 2 * pi), + aspect_ratio=(1,1), show=False, adaptive=adaptive, n=30) + assert p.aspect_ratio == (1, 1) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_13516(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + pm = plot(sin(x), backend="matplotlib", show=False, adaptive=adaptive, n=30) + assert pm.backend == MatplotlibBackend + assert len(pm[0].get_data()[0]) >= 30 + + pt = plot(sin(x), backend="text", show=False, adaptive=adaptive, n=30) + assert pt.backend == TextBackend + assert len(pt[0].get_data()[0]) >= 30 + + pd = plot(sin(x), backend="default", show=False, adaptive=adaptive, n=30) + assert pd.backend == MatplotlibBackend + assert len(pd[0].get_data()[0]) >= 30 + + p = plot(sin(x), show=False, adaptive=adaptive, n=30) + assert p.backend == MatplotlibBackend + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_limits(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(x, x**2, (x, -10, 10), adaptive=adaptive, n=10) + backend = p._backend + + xmin, xmax = backend.ax.get_xlim() + assert abs(xmin + 10) < 2 + assert abs(xmax - 10) < 2 + ymin, ymax = backend.ax.get_ylim() + assert abs(ymin + 10) < 10 + assert abs(ymax - 100) < 10 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot3d_parametric_line_limits(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + v1 = (2*cos(x), 2*sin(x), 2*x, (x, -5, 5)) + v2 = (sin(x), cos(x), x, (x, -5, 5)) + p = plot3d_parametric_line(v1, v2, adaptive=adaptive, n=60) + backend = p._backend + + xmin, xmax = backend.ax.get_xlim() + assert abs(xmin + 2) < 1e-2 + assert abs(xmax - 2) < 1e-2 + ymin, ymax = backend.ax.get_ylim() + assert abs(ymin + 2) < 1e-2 + assert abs(ymax - 2) < 1e-2 + zmin, zmax = backend.ax.get_zlim() + assert abs(zmin + 10) < 1e-2 + assert abs(zmax - 10) < 1e-2 + + p = plot3d_parametric_line(v2, v1, adaptive=adaptive, n=60) + backend = p._backend + + xmin, xmax = backend.ax.get_xlim() + assert abs(xmin + 2) < 1e-2 + assert abs(xmax - 2) < 1e-2 + ymin, ymax = backend.ax.get_ylim() + assert abs(ymin + 2) < 1e-2 + assert abs(ymax - 2) < 1e-2 + zmin, zmax = backend.ax.get_zlim() + assert abs(zmin + 10) < 1e-2 + assert abs(zmax - 10) < 1e-2 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_size(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + p1 = plot(sin(x), backend="matplotlib", size=(8, 4), + adaptive=adaptive, n=10) + s1 = p1._backend.fig.get_size_inches() + assert (s1[0] == 8) and (s1[1] == 4) + p2 = plot(sin(x), backend="matplotlib", size=(5, 10), + adaptive=adaptive, n=10) + s2 = p2._backend.fig.get_size_inches() + assert (s2[0] == 5) and (s2[1] == 10) + p3 = PlotGrid(2, 1, p1, p2, size=(6, 2), + adaptive=adaptive, n=10) + s3 = p3._backend.fig.get_size_inches() + assert (s3[0] == 6) and (s3[1] == 2) + + with raises(ValueError): + plot(sin(x), backend="matplotlib", size=(-1, 3)) + + +def test_issue_20113(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + # verify the capability to use custom backends + plot(sin(x), backend=Plot, show=False) + p2 = plot(sin(x), backend=MatplotlibBackend, show=False) + assert p2.backend == MatplotlibBackend + assert len(p2[0].get_data()[0]) >= 30 + p3 = plot(sin(x), backend=DummyBackendOk, show=False) + assert p3.backend == DummyBackendOk + assert len(p3[0].get_data()[0]) >= 30 + + # test for an improper coded backend + p4 = plot(sin(x), backend=DummyBackendNotOk, show=False) + assert p4.backend == DummyBackendNotOk + assert len(p4[0].get_data()[0]) >= 30 + with raises(NotImplementedError): + p4.show() + with raises(NotImplementedError): + p4.save("test/path") + with raises(NotImplementedError): + p4._backend.close() + + +def test_custom_coloring(): + x = Symbol('x') + y = Symbol('y') + plot(cos(x), line_color=lambda a: a) + plot(cos(x), line_color=1) + plot(cos(x), line_color="r") + plot_parametric(cos(x), sin(x), line_color=lambda a: a) + plot_parametric(cos(x), sin(x), line_color=1) + plot_parametric(cos(x), sin(x), line_color="r") + plot3d_parametric_line(cos(x), sin(x), x, line_color=lambda a: a) + plot3d_parametric_line(cos(x), sin(x), x, line_color=1) + plot3d_parametric_line(cos(x), sin(x), x, line_color="r") + plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, + (x, -5, 5), (y, -5, 5), + surface_color=lambda a, b: a**2 + b**2) + plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, + (x, -5, 5), (y, -5, 5), + surface_color=1) + plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, + (x, -5, 5), (y, -5, 5), + surface_color="r") + plot3d(x*y, (x, -5, 5), (y, -5, 5), + surface_color=lambda a, b: a**2 + b**2) + plot3d(x*y, (x, -5, 5), (y, -5, 5), surface_color=1) + plot3d(x*y, (x, -5, 5), (y, -5, 5), surface_color="r") + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_deprecated_get_segments(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + f = sin(x) + p = plot(f, (x, -10, 10), show=False, adaptive=adaptive, n=10) + with warns_deprecated_sympy(): + p[0].get_segments() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_generic_data_series(adaptive): + # verify that no errors are raised when generic data series are used + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol("x") + p = plot(x, + markers=[{"args":[[0, 1], [0, 1]], "marker": "*", "linestyle": "none"}], + annotations=[{"text": "test", "xy": (0, 0)}], + fill={"x": [0, 1, 2, 3], "y1": [0, 1, 2, 3]}, + rectangles=[{"xy": (0, 0), "width": 5, "height": 1}], + adaptive=adaptive, n=10) + assert len(p._backend.ax.collections) == 1 + assert len(p._backend.ax.patches) == 1 + assert len(p._backend.ax.lines) == 2 + assert len(p._backend.ax.texts) == 1 + + +def test_deprecated_markers_annotations_rectangles_fill(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(sin(x), (x, -10, 10), show=False) + with warns_deprecated_sympy(): + p.markers = [{"args":[[0, 1], [0, 1]], "marker": "*", "linestyle": "none"}] + assert len(p._series) == 2 + with warns_deprecated_sympy(): + p.annotations = [{"text": "test", "xy": (0, 0)}] + assert len(p._series) == 3 + with warns_deprecated_sympy(): + p.fill = {"x": [0, 1, 2, 3], "y1": [0, 1, 2, 3]} + assert len(p._series) == 4 + with warns_deprecated_sympy(): + p.rectangles = [{"xy": (0, 0), "width": 5, "height": 1}] + assert len(p._series) == 5 + + +def test_back_compatibility(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + p = plot(sin(x), adaptive=False, n=5) + assert len(p[0].get_points()) == 2 + assert len(p[0].get_data()) == 2 + p = plot_parametric(cos(x), sin(x), (x, 0, 2), adaptive=False, n=5) + assert len(p[0].get_points()) == 2 + assert len(p[0].get_data()) == 3 + p = plot3d_parametric_line(cos(x), sin(x), x, (x, 0, 2), + adaptive=False, n=5) + assert len(p[0].get_points()) == 3 + assert len(p[0].get_data()) == 4 + p = plot3d(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), n=5) + assert len(p[0].get_meshes()) == 3 + assert len(p[0].get_data()) == 3 + p = plot_contour(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), n=5) + assert len(p[0].get_meshes()) == 3 + assert len(p[0].get_data()) == 3 + p = plot3d_parametric_surface(x * cos(y), x * sin(y), x * cos(4 * y) / 2, + (x, 0, pi), (y, 0, 2*pi), n=5) + assert len(p[0].get_meshes()) == 3 + assert len(p[0].get_data()) == 5 + + +def test_plot_arguments(): + ### Test arguments for plot() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single expressions + p = plot(x + 1) + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x + 1" + assert p[0].rendering_kw == {} + + # single expressions custom label + p = plot(x + 1, "label") + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "label" + assert p[0].rendering_kw == {} + + # single expressions with range + p = plot(x + 1, (x, -2, 2)) + assert p[0].ranges == [(x, -2, 2)] + + # single expressions with range, label and rendering-kw dictionary + p = plot(x + 1, (x, -2, 2), "test", {"color": "r"}) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"color": "r"} + + # multiple expressions + p = plot(x + 1, x**2) + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x + 1" + assert p[0].rendering_kw == {} + assert isinstance(p[1], LineOver1DRangeSeries) + assert p[1].expr == x**2 + assert p[1].ranges == [(x, -10, 10)] + assert p[1].get_label(False) == "x**2" + assert p[1].rendering_kw == {} + + # multiple expressions over the same range + p = plot(x + 1, x**2, (x, 0, 5)) + assert p[0].ranges == [(x, 0, 5)] + assert p[1].ranges == [(x, 0, 5)] + + # multiple expressions over the same range with the same rendering kws + p = plot(x + 1, x**2, (x, 0, 5), {"color": "r"}) + assert p[0].ranges == [(x, 0, 5)] + assert p[1].ranges == [(x, 0, 5)] + assert p[0].rendering_kw == {"color": "r"} + assert p[1].rendering_kw == {"color": "r"} + + # multiple expressions with different ranges, labels and rendering kws + p = plot( + (x + 1, (x, 0, 5)), + (x**2, (x, -2, 2), "test", {"color": "r"})) + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, 0, 5)] + assert p[0].get_label(False) == "x + 1" + assert p[0].rendering_kw == {} + assert isinstance(p[1], LineOver1DRangeSeries) + assert p[1].expr == x**2 + assert p[1].ranges == [(x, -2, 2)] + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {"color": "r"} + + # single argument: lambda function + f = lambda t: t + p = plot(lambda t: t) + assert isinstance(p[0], LineOver1DRangeSeries) + assert callable(p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + + # single argument: lambda function + custom range and label + p = plot(f, ("t", -5, 6), "test") + assert p[0].ranges[0][1:] == (-5, 6) + assert p[0].get_label(False) == "test" + + +def test_plot_parametric_arguments(): + ### Test arguments for plot_parametric() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single parametric expression + p = plot_parametric(x + 1, x) + assert isinstance(p[0], Parametric2DLineSeries) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + + # single parametric expression with custom range, label and rendering kws + p = plot_parametric(x + 1, x, (x, -2, 2), "test", + {"cmap": "Reds"}) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + p = plot_parametric((x + 1, x), (x, -2, 2), "test") + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + # multiple parametric expressions same symbol + p = plot_parametric((x + 1, x), (x ** 2, x + 1)) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, x + 1) + assert p[1].ranges == [(x, -10, 10)] + assert p[1].get_label(False) == "x" + assert p[1].rendering_kw == {} + + # multiple parametric expressions different symbols + p = plot_parametric((x + 1, x), (y ** 2, y + 1, "test")) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (y ** 2, y + 1) + assert p[1].ranges == [(y, -10, 10)] + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {} + + # multiple parametric expressions same range + p = plot_parametric((x + 1, x), (x ** 2, x + 1), (x, -2, 2)) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, x + 1) + assert p[1].ranges == [(x, -2, 2)] + assert p[1].get_label(False) == "x" + assert p[1].rendering_kw == {} + + # multiple parametric expressions, custom ranges and labels + p = plot_parametric( + (x + 1, x, (x, -2, 2), "test1"), + (x ** 2, x + 1, (x, -3, 3), "test2", {"cmap": "Reds"})) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test1" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, x + 1) + assert p[1].ranges == [(x, -3, 3)] + assert p[1].get_label(False) == "test2" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # single argument: lambda function + fx = lambda t: t + fy = lambda t: 2 * t + p = plot_parametric(fx, fy) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert "Dummy" in p[0].get_label(False) + assert p[0].rendering_kw == {} + + # single argument: lambda function + custom range + label + p = plot_parametric(fx, fy, ("t", 0, 2), "test") + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (0, 2) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + +def test_plot3d_parametric_line_arguments(): + ### Test arguments for plot3d_parametric_line() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single parametric expression + p = plot3d_parametric_line(x + 1, x, sin(x)) + assert isinstance(p[0], Parametric3DLineSeries) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + + # single parametric expression with custom range, label and rendering kws + p = plot3d_parametric_line(x + 1, x, sin(x), (x, -2, 2), + "test", {"cmap": "Reds"}) + assert isinstance(p[0], Parametric3DLineSeries) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + p = plot3d_parametric_line((x + 1, x, sin(x)), (x, -2, 2), "test") + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + # multiple parametric expression same symbol + p = plot3d_parametric_line( + (x + 1, x, sin(x)), (x ** 2, 1, cos(x), {"cmap": "Reds"})) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, 1, cos(x)) + assert p[1].ranges == [(x, -10, 10)] + assert p[1].get_label(False) == "x" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # multiple parametric expression different symbols + p = plot3d_parametric_line((x + 1, x, sin(x)), (y ** 2, 1, cos(y))) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (y ** 2, 1, cos(y)) + assert p[1].ranges == [(y, -10, 10)] + assert p[1].get_label(False) == "y" + assert p[1].rendering_kw == {} + + # multiple parametric expression, custom ranges and labels + p = plot3d_parametric_line( + (x + 1, x, sin(x)), + (x ** 2, 1, cos(x), (x, -2, 2), "test", {"cmap": "Reds"})) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, 1, cos(x)) + assert p[1].ranges == [(x, -2, 2)] + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # single argument: lambda function + fx = lambda t: t + fy = lambda t: 2 * t + fz = lambda t: 3 * t + p = plot3d_parametric_line(fx, fy, fz) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert "Dummy" in p[0].get_label(False) + assert p[0].rendering_kw == {} + + # single argument: lambda function + custom range + label + p = plot3d_parametric_line(fx, fy, fz, ("t", 0, 2), "test") + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (0, 2) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + +def test_plot3d_plot_contour_arguments(): + ### Test arguments for plot3d() and plot_contour() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single expression + p = plot3d(x + y) + assert isinstance(p[0], SurfaceOver2DRangeSeries) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + + # single expression, custom range, label and rendering kws + p = plot3d(x + y, (x, -2, 2), "test", {"cmap": "Reds"}) + assert isinstance(p[0], SurfaceOver2DRangeSeries) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -10, 10) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + p = plot3d(x + y, (x, -2, 2), (y, -4, 4), "test") + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + + # multiple expressions + p = plot3d(x + y, x * y) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + assert p[1].expr == x * y + assert p[1].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[1].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[1].get_label(False) == "x*y" + assert p[1].rendering_kw == {} + + # multiple expressions, same custom ranges + p = plot3d(x + y, x * y, (x, -2, 2), (y, -4, 4)) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + assert p[1].expr == x * y + assert p[1].ranges[0] == (x, -2, 2) + assert p[1].ranges[1] == (y, -4, 4) + assert p[1].get_label(False) == "x*y" + assert p[1].rendering_kw == {} + + # multiple expressions, custom ranges, labels and rendering kws + p = plot3d( + (x + y, (x, -2, 2), (y, -4, 4)), + (x * y, (x, -3, 3), (y, -6, 6), "test", {"cmap": "Reds"})) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + assert p[1].expr == x * y + assert p[1].ranges[0] == (x, -3, 3) + assert p[1].ranges[1] == (y, -6, 6) + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # single expression: lambda function + f = lambda x, y: x + y + p = plot3d(f) + assert callable(p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert p[0].ranges[1][1:] == (-10, 10) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + + # single expression: lambda function + custom ranges + label + p = plot3d(f, ("a", -5, 3), ("b", -2, 1), "test") + assert callable(p[0].expr) + assert p[0].ranges[0][1:] == (-5, 3) + assert p[0].ranges[1][1:] == (-2, 1) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + # test issue 25818 + # single expression, custom range, min/max functions + p = plot3d(Min(x, y), (x, 0, 10), (y, 0, 10)) + assert isinstance(p[0], SurfaceOver2DRangeSeries) + assert p[0].expr == Min(x, y) + assert p[0].ranges[0] == (x, 0, 10) + assert p[0].ranges[1] == (y, 0, 10) + assert p[0].get_label(False) == "Min(x, y)" + assert p[0].rendering_kw == {} + + +def test_plot3d_parametric_surface_arguments(): + ### Test arguments for plot3d_parametric_surface() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single parametric expression + p = plot3d_parametric_surface(x + y, cos(x + y), sin(x + y)) + assert isinstance(p[0], ParametricSurfaceSeries) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "(x + y, cos(x + y), sin(x + y))" + assert p[0].rendering_kw == {} + + # single parametric expression, custom ranges, labels and rendering kws + p = plot3d_parametric_surface(x + y, cos(x + y), sin(x + y), + (x, -2, 2), (y, -4, 4), "test", {"cmap": "Reds"}) + assert isinstance(p[0], ParametricSurfaceSeries) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + # multiple parametric expressions + p = plot3d_parametric_surface( + (x + y, cos(x + y), sin(x + y)), + (x - y, cos(x - y), sin(x - y), "test")) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "(x + y, cos(x + y), sin(x + y))" + assert p[0].rendering_kw == {} + assert p[1].expr == (x - y, cos(x - y), sin(x - y)) + assert p[1].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[1].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {} + + # multiple parametric expressions, custom ranges and labels + p = plot3d_parametric_surface( + (x + y, cos(x + y), sin(x + y), (x, -2, 2), "test"), + (x - y, cos(x - y), sin(x - y), (x, -3, 3), (y, -4, 4), + "test2", {"cmap": "Reds"})) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -10, 10) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + assert p[1].expr == (x - y, cos(x - y), sin(x - y)) + assert p[1].ranges[0] == (x, -3, 3) + assert p[1].ranges[1] == (y, -4, 4) + assert p[1].get_label(False) == "test2" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # lambda functions instead of symbolic expressions for a single 3D + # parametric surface + p = plot3d_parametric_surface( + lambda u, v: u, lambda u, v: v, lambda u, v: u + v, + ("u", 0, 2), ("v", -3, 4)) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (-0, 2) + assert p[0].ranges[1][1:] == (-3, 4) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + + # lambda functions instead of symbolic expressions for multiple 3D + # parametric surfaces + p = plot3d_parametric_surface( + (lambda u, v: u, lambda u, v: v, lambda u, v: u + v, + ("u", 0, 2), ("v", -3, 4)), + (lambda u, v: v, lambda u, v: u, lambda u, v: u - v, + ("u", -2, 3), ("v", -4, 5), "test")) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (0, 2) + assert p[0].ranges[1][1:] == (-3, 4) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + assert all(callable(t) for t in p[1].expr) + assert p[1].ranges[0][1:] == (-2, 3) + assert p[1].ranges[1][1:] == (-4, 5) + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {} diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_plot_implicit.py b/.venv/Lib/site-packages/sympy/plotting/tests/test_plot_implicit.py new file mode 100644 index 0000000000000000000000000000000000000000..73c7b186c83f0b64d5f6f4cc5cd9f6a08efef43a --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/tests/test_plot_implicit.py @@ -0,0 +1,146 @@ +from sympy.core.numbers import (I, pi) +from sympy.core.relational import Eq +from sympy.core.symbol import (Symbol, symbols) +from sympy.functions.elementary.complexes import re +from sympy.functions.elementary.exponential import exp +from sympy.functions.elementary.trigonometric import (cos, sin, tan) +from sympy.logic.boolalg import (And, Or) +from sympy.plotting.plot_implicit import plot_implicit +from sympy.plotting.plot import unset_show +from tempfile import NamedTemporaryFile, mkdtemp +from sympy.testing.pytest import skip, warns, XFAIL +from sympy.external import import_module +from sympy.testing.tmpfiles import TmpFileManager + +import os + +#Set plots not to show +unset_show() + +def tmp_file(dir=None, name=''): + return NamedTemporaryFile( + suffix='.png', dir=dir, delete=False).name + +def plot_and_save(expr, *args, name='', dir=None, **kwargs): + p = plot_implicit(expr, *args, **kwargs) + p.save(tmp_file(dir=dir, name=name)) + # Close the plot to avoid a warning from matplotlib + p._backend.close() + +def plot_implicit_tests(name): + temp_dir = mkdtemp() + TmpFileManager.tmp_folder(temp_dir) + x = Symbol('x') + y = Symbol('y') + #implicit plot tests + plot_and_save(Eq(y, cos(x)), (x, -5, 5), (y, -2, 2), name=name, dir=temp_dir) + plot_and_save(Eq(y**2, x**3 - x), (x, -5, 5), + (y, -4, 4), name=name, dir=temp_dir) + plot_and_save(y > 1 / x, (x, -5, 5), + (y, -2, 2), name=name, dir=temp_dir) + plot_and_save(y < 1 / tan(x), (x, -5, 5), + (y, -2, 2), name=name, dir=temp_dir) + plot_and_save(y >= 2 * sin(x) * cos(x), (x, -5, 5), + (y, -2, 2), name=name, dir=temp_dir) + plot_and_save(y <= x**2, (x, -3, 3), + (y, -1, 5), name=name, dir=temp_dir) + + #Test all input args for plot_implicit + plot_and_save(Eq(y**2, x**3 - x), dir=temp_dir) + plot_and_save(Eq(y**2, x**3 - x), adaptive=False, dir=temp_dir) + plot_and_save(Eq(y**2, x**3 - x), adaptive=False, n=500, dir=temp_dir) + plot_and_save(y > x, (x, -5, 5), dir=temp_dir) + plot_and_save(And(y > exp(x), y > x + 2), dir=temp_dir) + plot_and_save(Or(y > x, y > -x), dir=temp_dir) + plot_and_save(x**2 - 1, (x, -5, 5), dir=temp_dir) + plot_and_save(x**2 - 1, dir=temp_dir) + plot_and_save(y > x, depth=-5, dir=temp_dir) + plot_and_save(y > x, depth=5, dir=temp_dir) + plot_and_save(y > cos(x), adaptive=False, dir=temp_dir) + plot_and_save(y < cos(x), adaptive=False, dir=temp_dir) + plot_and_save(And(y > cos(x), Or(y > x, Eq(y, x))), dir=temp_dir) + plot_and_save(y - cos(pi / x), dir=temp_dir) + + plot_and_save(x**2 - 1, title='An implicit plot', dir=temp_dir) + +@XFAIL +def test_no_adaptive_meshing(): + matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,)) + if matplotlib: + try: + temp_dir = mkdtemp() + TmpFileManager.tmp_folder(temp_dir) + x = Symbol('x') + y = Symbol('y') + # Test plots which cannot be rendered using the adaptive algorithm + + # This works, but it triggers a deprecation warning from sympify(). The + # code needs to be updated to detect if interval math is supported without + # relying on random AttributeErrors. + with warns(UserWarning, match="Adaptive meshing could not be applied"): + plot_and_save(Eq(y, re(cos(x) + I*sin(x))), name='test', dir=temp_dir) + finally: + TmpFileManager.cleanup() + else: + skip("Matplotlib not the default backend") +def test_line_color(): + x, y = symbols('x, y') + p = plot_implicit(x**2 + y**2 - 1, line_color="green", show=False) + assert p._series[0].line_color == "green" + p = plot_implicit(x**2 + y**2 - 1, line_color='r', show=False) + assert p._series[0].line_color == "r" + +def test_matplotlib(): + matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,)) + if matplotlib: + try: + plot_implicit_tests('test') + test_line_color() + finally: + TmpFileManager.cleanup() + else: + skip("Matplotlib not the default backend") + + +def test_region_and(): + matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,)) + if not matplotlib: + skip("Matplotlib not the default backend") + + from matplotlib.testing.compare import compare_images + test_directory = os.path.dirname(os.path.abspath(__file__)) + + try: + temp_dir = mkdtemp() + TmpFileManager.tmp_folder(temp_dir) + + x, y = symbols('x y') + + r1 = (x - 1)**2 + y**2 < 2 + r2 = (x + 1)**2 + y**2 < 2 + + test_filename = tmp_file(dir=temp_dir, name="test_region_and") + cmp_filename = os.path.join(test_directory, "test_region_and.png") + p = plot_implicit(r1 & r2, x, y) + p.save(test_filename) + compare_images(cmp_filename, test_filename, 0.005) + + test_filename = tmp_file(dir=temp_dir, name="test_region_or") + cmp_filename = os.path.join(test_directory, "test_region_or.png") + p = plot_implicit(r1 | r2, x, y) + p.save(test_filename) + compare_images(cmp_filename, test_filename, 0.005) + + test_filename = tmp_file(dir=temp_dir, name="test_region_not") + cmp_filename = os.path.join(test_directory, "test_region_not.png") + p = plot_implicit(~r1, x, y) + p.save(test_filename) + compare_images(cmp_filename, test_filename, 0.005) + + test_filename = tmp_file(dir=temp_dir, name="test_region_xor") + cmp_filename = os.path.join(test_directory, "test_region_xor.png") + p = plot_implicit(r1 ^ r2, x, y) + p.save(test_filename) + compare_images(cmp_filename, test_filename, 0.005) + finally: + TmpFileManager.cleanup() diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_region_and.png b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_and.png new file mode 100644 index 0000000000000000000000000000000000000000..61dda4c2054e5e4bd5018cb84af86a832e81886a Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_and.png differ diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_region_not.png b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_not.png new file mode 100644 index 0000000000000000000000000000000000000000..29d3d47b5a95346cb7c44655c12a2a63e6c7a857 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_not.png differ diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_region_or.png b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_or.png new file mode 100644 index 0000000000000000000000000000000000000000..8a6329dd8dd368c37e431a7741e0869ec84f8f68 Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_or.png differ diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_region_xor.png b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_xor.png new file mode 100644 index 0000000000000000000000000000000000000000..1a48862909d3ad09a5f4d306bf6c8f96117d080c Binary files /dev/null and b/.venv/Lib/site-packages/sympy/plotting/tests/test_region_xor.png differ diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_series.py b/.venv/Lib/site-packages/sympy/plotting/tests/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..e23aa719153d20dc9b9e911e5cee097f0ea56211 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/tests/test_series.py @@ -0,0 +1,1771 @@ +from sympy import ( + latex, exp, symbols, I, pi, sin, cos, tan, log, sqrt, + re, im, arg, frac, Sum, S, Abs, lambdify, + Function, dsolve, Eq, floor, Tuple +) +from sympy.external import import_module +from sympy.plotting.series import ( + LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries, + SurfaceOver2DRangeSeries, ContourSeries, ParametricSurfaceSeries, + ImplicitSeries, _set_discretization_points, List2DSeries +) +from sympy.testing.pytest import raises, warns, XFAIL, skip, ignore_warnings + +np = import_module('numpy') + + +def test_adaptive(): + # verify that adaptive-related keywords produces the expected results + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + + s1 = LineOver1DRangeSeries(sin(x), (x, -10, 10), "", adaptive=True, + depth=2) + x1, _ = s1.get_data() + s2 = LineOver1DRangeSeries(sin(x), (x, -10, 10), "", adaptive=True, + depth=5) + x2, _ = s2.get_data() + s3 = LineOver1DRangeSeries(sin(x), (x, -10, 10), "", adaptive=True) + x3, _ = s3.get_data() + assert len(x1) < len(x2) < len(x3) + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=True, depth=2) + x1, _, _, = s1.get_data() + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=True, depth=5) + x2, _, _ = s2.get_data() + s3 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=True) + x3, _, _ = s3.get_data() + assert len(x1) < len(x2) < len(x3) + + +def test_detect_poles(): + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + + s1 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles=False) + xx1, yy1 = s1.get_data() + s2 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles=True, eps=0.01) + xx2, yy2 = s2.get_data() + # eps is too small: doesn't detect any poles + s3 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles=True, eps=1e-06) + xx3, yy3 = s3.get_data() + s4 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles="symbolic") + xx4, yy4 = s4.get_data() + + assert np.allclose(xx1, xx2) and np.allclose(xx1, xx3) and np.allclose(xx1, xx4) + assert not np.any(np.isnan(yy1)) + assert not np.any(np.isnan(yy3)) + assert np.any(np.isnan(yy2)) + assert np.any(np.isnan(yy4)) + assert len(s2.poles_locations) == len(s3.poles_locations) == 0 + assert len(s4.poles_locations) == 2 + assert np.allclose(np.abs(s4.poles_locations), np.pi / 2) + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some of", + test_stacklevel=False, + ): + s1 = LineOver1DRangeSeries(frac(x), (x, -10, 10), + adaptive=False, n=1000, detect_poles=False) + s2 = LineOver1DRangeSeries(frac(x), (x, -10, 10), + adaptive=False, n=1000, detect_poles=True, eps=0.05) + s3 = LineOver1DRangeSeries(frac(x), (x, -10, 10), + adaptive=False, n=1000, detect_poles="symbolic") + xx1, yy1 = s1.get_data() + xx2, yy2 = s2.get_data() + xx3, yy3 = s3.get_data() + assert np.allclose(xx1, xx2) and np.allclose(xx1, xx3) + assert not np.any(np.isnan(yy1)) + assert np.any(np.isnan(yy2)) and np.any(np.isnan(yy2)) + assert not np.allclose(yy1, yy2, equal_nan=True) + # The poles below are actually step discontinuities. + assert len(s3.poles_locations) == 21 + + s1 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles=False) + xx1, yy1 = s1.get_data() + s2 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles=True, eps=0.01) + xx2, yy2 = s2.get_data() + # eps is too small: doesn't detect any poles + s3 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles=True, eps=1e-06) + xx3, yy3 = s3.get_data() + s4 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles="symbolic") + xx4, yy4 = s4.get_data() + + assert np.allclose(xx1, xx2) and np.allclose(xx1, xx3) and np.allclose(xx1, xx4) + assert not np.any(np.isnan(yy1)) + assert not np.any(np.isnan(yy3)) + assert np.any(np.isnan(yy2)) + assert np.any(np.isnan(yy4)) + assert len(s2.poles_locations) == len(s3.poles_locations) == 0 + assert len(s4.poles_locations) == 2 + assert np.allclose(np.abs(s4.poles_locations), np.pi / 2) + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some of", + test_stacklevel=False, + ): + u, v = symbols("u, v", real=True) + n = S(1) / 3 + f = (u + I * v)**n + r, i = re(f), im(f) + s1 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), (v, -2, 2), + adaptive=False, n=1000, detect_poles=False) + s2 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), (v, -2, 2), + adaptive=False, n=1000, detect_poles=True) + with ignore_warnings(RuntimeWarning): + xx1, yy1, pp1 = s1.get_data() + assert not np.isnan(yy1).any() + xx2, yy2, pp2 = s2.get_data() + assert np.isnan(yy2).any() + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some of", + test_stacklevel=False, + ): + f = (x * u + x * I * v)**n + r, i = re(f), im(f) + s1 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), + (v, -2, 2), params={x: 1}, + adaptive=False, n1=1000, detect_poles=False) + s2 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), + (v, -2, 2), params={x: 1}, + adaptive=False, n1=1000, detect_poles=True) + with ignore_warnings(RuntimeWarning): + xx1, yy1, pp1 = s1.get_data() + assert not np.isnan(yy1).any() + xx2, yy2, pp2 = s2.get_data() + assert np.isnan(yy2).any() + + +def test_number_discretization_points(): + # verify that the different ways to set the number of discretization + # points are consistent with each other. + if not np: + skip("numpy not installed.") + + x, y, z = symbols("x:z") + + for pt in [LineOver1DRangeSeries, Parametric2DLineSeries, + Parametric3DLineSeries]: + kw1 = _set_discretization_points({"n": 10}, pt) + kw2 = _set_discretization_points({"n": [10, 20, 30]}, pt) + kw3 = _set_discretization_points({"n1": 10}, pt) + assert all(("n1" in kw) and kw["n1"] == 10 for kw in [kw1, kw2, kw3]) + + for pt in [SurfaceOver2DRangeSeries, ContourSeries, ParametricSurfaceSeries, + ImplicitSeries]: + kw1 = _set_discretization_points({"n": 10}, pt) + kw2 = _set_discretization_points({"n": [10, 20, 30]}, pt) + kw3 = _set_discretization_points({"n1": 10, "n2": 20}, pt) + assert kw1["n1"] == kw1["n2"] == 10 + assert all((kw["n1"] == 10) and (kw["n2"] == 20) for kw in [kw2, kw3]) + + # verify that line-related series can deal with large float number of + # discretization points + LineOver1DRangeSeries(cos(x), (x, -5, 5), adaptive=False, n=1e04).get_data() + + +def test_list2dseries(): + if not np: + skip("numpy not installed.") + + xx = np.linspace(-3, 3, 10) + yy1 = np.cos(xx) + yy2 = np.linspace(-3, 3, 20) + + # same number of elements: everything is fine + s = List2DSeries(xx, yy1) + assert not s.is_parametric + # different number of elements: error + raises(ValueError, lambda: List2DSeries(xx, yy2)) + + # no color func: returns only x, y components and s in not parametric + s = List2DSeries(xx, yy1) + xxs, yys = s.get_data() + assert np.allclose(xx, xxs) + assert np.allclose(yy1, yys) + assert not s.is_parametric + + +def test_interactive_vs_noninteractive(): + # verify that if a *Series class receives a `params` dictionary, it sets + # is_interactive=True + x, y, z, u, v = symbols("x, y, z, u, v") + + s = LineOver1DRangeSeries(cos(x), (x, -5, 5)) + assert not s.is_interactive + s = LineOver1DRangeSeries(u * cos(x), (x, -5, 5), params={u: 1}) + assert s.is_interactive + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5)) + assert not s.is_interactive + s = Parametric2DLineSeries(u * cos(x), u * sin(x), (x, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5)) + assert not s.is_interactive + s = Parametric3DLineSeries(u * cos(x), u * sin(x), x, (x, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -5, 5), (y, -5, 5)) + assert not s.is_interactive + s = SurfaceOver2DRangeSeries(u * cos(x * y), (x, -5, 5), (y, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = ContourSeries(cos(x * y), (x, -5, 5), (y, -5, 5)) + assert not s.is_interactive + s = ContourSeries(u * cos(x * y), (x, -5, 5), (y, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = ParametricSurfaceSeries(u * cos(v), v * sin(u), u + v, + (u, -5, 5), (v, -5, 5)) + assert not s.is_interactive + s = ParametricSurfaceSeries(u * cos(v * x), v * sin(u), u + v, + (u, -5, 5), (v, -5, 5), params={x: 1}) + assert s.is_interactive + + +def test_lin_log_scale(): + # Verify that data series create the correct spacing in the data. + if not np: + skip("numpy not installed.") + + x, y, z = symbols("x, y, z") + + s = LineOver1DRangeSeries(x, (x, 1, 10), adaptive=False, n=50, + xscale="linear") + xx, _ = s.get_data() + assert np.isclose(xx[1] - xx[0], xx[-1] - xx[-2]) + + s = LineOver1DRangeSeries(x, (x, 1, 10), adaptive=False, n=50, + xscale="log") + xx, _ = s.get_data() + assert not np.isclose(xx[1] - xx[0], xx[-1] - xx[-2]) + + s = Parametric2DLineSeries( + cos(x), sin(x), (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="linear") + _, _, param = s.get_data() + assert np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = Parametric2DLineSeries( + cos(x), sin(x), (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="log") + _, _, param = s.get_data() + assert not np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = Parametric3DLineSeries( + cos(x), sin(x), x, (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="linear") + _, _, _, param = s.get_data() + assert np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = Parametric3DLineSeries( + cos(x), sin(x), x, (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="log") + _, _, _, param = s.get_data() + assert not np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = SurfaceOver2DRangeSeries( + cos(x ** 2 + y ** 2), (x, 1, 5), (y, 1, 5), n=10, + xscale="linear", yscale="linear") + xx, yy, _ = s.get_data() + assert np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + s = SurfaceOver2DRangeSeries( + cos(x ** 2 + y ** 2), (x, 1, 5), (y, 1, 5), n=10, + xscale="log", yscale="log") + xx, yy, _ = s.get_data() + assert not np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert not np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + s = ImplicitSeries( + cos(x ** 2 + y ** 2) > 0, (x, 1, 5), (y, 1, 5), + n1=10, n2=10, xscale="linear", yscale="linear", adaptive=False) + xx, yy, _, _ = s.get_data() + assert np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + s = ImplicitSeries( + cos(x ** 2 + y ** 2) > 0, (x, 1, 5), (y, 1, 5), + n=10, xscale="log", yscale="log", adaptive=False) + xx, yy, _, _ = s.get_data() + assert not np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert not np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + +def test_rendering_kw(): + # verify that each series exposes the `rendering_kw` attribute + if not np: + skip("numpy not installed.") + + u, v, x, y, z = symbols("u, v, x:z") + + s = List2DSeries([1, 2, 3], [4, 5, 6]) + assert isinstance(s.rendering_kw, dict) + + s = LineOver1DRangeSeries(1, (x, -5, 5)) + assert isinstance(s.rendering_kw, dict) + + s = Parametric2DLineSeries(sin(x), cos(x), (x, 0, pi)) + assert isinstance(s.rendering_kw, dict) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2 * pi)) + assert isinstance(s.rendering_kw, dict) + + s = SurfaceOver2DRangeSeries(x + y, (x, -2, 2), (y, -3, 3)) + assert isinstance(s.rendering_kw, dict) + + s = ContourSeries(x + y, (x, -2, 2), (y, -3, 3)) + assert isinstance(s.rendering_kw, dict) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1)) + assert isinstance(s.rendering_kw, dict) + + +def test_data_shape(): + # Verify that the series produces the correct data shape when the input + # expression is a number. + if not np: + skip("numpy not installed.") + + u, x, y, z = symbols("u, x:z") + + # scalar expression: it should return a numpy ones array + s = LineOver1DRangeSeries(1, (x, -5, 5)) + xx, yy = s.get_data() + assert len(xx) == len(yy) + assert np.all(yy == 1) + + s = LineOver1DRangeSeries(1, (x, -5, 5), adaptive=False, n=10) + xx, yy = s.get_data() + assert len(xx) == len(yy) == 10 + assert np.all(yy == 1) + + s = Parametric2DLineSeries(sin(x), 1, (x, 0, pi)) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(yy == 1) + + s = Parametric2DLineSeries(1, sin(x), (x, 0, pi)) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(xx == 1) + + s = Parametric2DLineSeries(sin(x), 1, (x, 0, pi), adaptive=False) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(yy == 1) + + s = Parametric2DLineSeries(1, sin(x), (x, 0, pi), adaptive=False) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(xx == 1) + + s = Parametric3DLineSeries(cos(x), sin(x), 1, (x, 0, 2 * pi)) + xx, yy, zz, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param)) + assert np.all(zz == 1) + + s = Parametric3DLineSeries(cos(x), 1, x, (x, 0, 2 * pi)) + xx, yy, zz, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param)) + assert np.all(yy == 1) + + s = Parametric3DLineSeries(1, sin(x), x, (x, 0, 2 * pi)) + xx, yy, zz, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param)) + assert np.all(xx == 1) + + s = SurfaceOver2DRangeSeries(1, (x, -2, 2), (y, -3, 3)) + xx, yy, zz = s.get_data() + assert (xx.shape == yy.shape) and (xx.shape == zz.shape) + assert np.all(zz == 1) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1)) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape + assert np.all(xx == 1) + + s = ParametricSurfaceSeries(1, 1, y, (x, 0, 1), (y, 0, 1)) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape + assert np.all(yy == 1) + + s = ParametricSurfaceSeries(x, 1, 1, (x, 0, 1), (y, 0, 1)) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape + assert np.all(zz == 1) + + +def test_only_integers(): + if not np: + skip("numpy not installed.") + + x, y, u, v = symbols("x, y, u, v") + + s = LineOver1DRangeSeries(sin(x), (x, -5.5, 4.5), "", + adaptive=False, only_integers=True) + xx, _ = s.get_data() + assert len(xx) == 10 + assert xx[0] == -5 and xx[-1] == 4 + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2 * pi), "", + adaptive=False, only_integers=True) + _, _, p = s.get_data() + assert len(p) == 7 + assert p[0] == 0 and p[-1] == 6 + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2 * pi), "", + adaptive=False, only_integers=True) + _, _, _, p = s.get_data() + assert len(p) == 7 + assert p[0] == 0 and p[-1] == 6 + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -5.5, 5.5), + (y, -3.5, 3.5), "", + adaptive=False, only_integers=True) + xx, yy, _ = s.get_data() + assert xx.shape == yy.shape == (7, 11) + assert np.allclose(xx[:, 0] - (-5) * np.ones(7), 0) + assert np.allclose(xx[0, :] - np.linspace(-5, 5, 11), 0) + assert np.allclose(yy[:, 0] - np.linspace(-3, 3, 7), 0) + assert np.allclose(yy[0, :] - (-3) * np.ones(11), 0) + + r = 2 + sin(7 * u + 5 * v) + expr = ( + r * cos(u) * sin(v), + r * sin(u) * sin(v), + r * cos(v) + ) + s = ParametricSurfaceSeries(*expr, (u, 0, 2 * pi), (v, 0, pi), "", + adaptive=False, only_integers=True) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape == (4, 7) + + # only_integers also works with scalar expressions + s = LineOver1DRangeSeries(1, (x, -5.5, 4.5), "", + adaptive=False, only_integers=True) + xx, _ = s.get_data() + assert len(xx) == 10 + assert xx[0] == -5 and xx[-1] == 4 + + s = Parametric2DLineSeries(cos(x), 1, (x, 0, 2 * pi), "", + adaptive=False, only_integers=True) + _, _, p = s.get_data() + assert len(p) == 7 + assert p[0] == 0 and p[-1] == 6 + + s = SurfaceOver2DRangeSeries(1, (x, -5.5, 5.5), (y, -3.5, 3.5), "", + adaptive=False, only_integers=True) + xx, yy, _ = s.get_data() + assert xx.shape == yy.shape == (7, 11) + assert np.allclose(xx[:, 0] - (-5) * np.ones(7), 0) + assert np.allclose(xx[0, :] - np.linspace(-5, 5, 11), 0) + assert np.allclose(yy[:, 0] - np.linspace(-3, 3, 7), 0) + assert np.allclose(yy[0, :] - (-3) * np.ones(11), 0) + + r = 2 + sin(7 * u + 5 * v) + expr = ( + r * cos(u) * sin(v), + 1, + r * cos(v) + ) + s = ParametricSurfaceSeries(*expr, (u, 0, 2 * pi), (v, 0, pi), "", + adaptive=False, only_integers=True) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape == (4, 7) + + +def test_is_point_is_filled(): + # verify that `is_point` and `is_filled` are attributes and that they + # they receive the correct values + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + + s = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + s = List2DSeries([0, 1, 2], [3, 4, 5], + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = List2DSeries([0, 1, 2], [3, 4, 5], + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + +def test_is_filled_2d(): + # verify that the is_filled attribute is exposed by the following series + x, y = symbols("x, y") + + expr = cos(x**2 + y**2) + ranges = (x, -2, 2), (y, -2, 2) + + s = ContourSeries(expr, *ranges) + assert s.is_filled + s = ContourSeries(expr, *ranges, is_filled=True) + assert s.is_filled + s = ContourSeries(expr, *ranges, is_filled=False) + assert not s.is_filled + + +def test_steps(): + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + + def do_test(s1, s2): + if (not s1.is_parametric) and s1.is_2Dline: + xx1, _ = s1.get_data() + xx2, _ = s2.get_data() + elif s1.is_parametric and s1.is_2Dline: + xx1, _, _ = s1.get_data() + xx2, _, _ = s2.get_data() + elif (not s1.is_parametric) and s1.is_3Dline: + xx1, _, _ = s1.get_data() + xx2, _, _ = s2.get_data() + else: + xx1, _, _, _ = s1.get_data() + xx2, _, _, _ = s2.get_data() + assert len(xx1) != len(xx2) + + s1 = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + adaptive=False, n=40, steps=False) + s2 = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + adaptive=False, n=40, steps=True) + do_test(s1, s2) + + s1 = List2DSeries([0, 1, 2], [3, 4, 5], steps=False) + s2 = List2DSeries([0, 1, 2], [3, 4, 5], steps=True) + do_test(s1, s2) + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + adaptive=False, n=40, steps=False) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + adaptive=False, n=40, steps=True) + do_test(s1, s2) + + s1 = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + adaptive=False, n=40, steps=False) + s2 = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + adaptive=False, n=40, steps=True) + do_test(s1, s2) + + +def test_interactive_data(): + # verify that InteractiveSeries produces the same numerical data as their + # corresponding non-interactive series. + if not np: + skip("numpy not installed.") + + u, x, y, z = symbols("u, x:z") + + def do_test(data1, data2): + assert len(data1) == len(data2) + for d1, d2 in zip(data1, data2): + assert np.allclose(d1, d2) + + s1 = LineOver1DRangeSeries(u * cos(x), (x, -5, 5), params={u: 1}, n=50) + s2 = LineOver1DRangeSeries(cos(x), (x, -5, 5), adaptive=False, n=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = Parametric2DLineSeries( + u * cos(x), u * sin(x), (x, -5, 5), params={u: 1}, n=50) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + adaptive=False, n=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = Parametric3DLineSeries( + u * cos(x), u * sin(x), u * x, (x, -5, 5), + params={u: 1}, n=50) + s2 = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + adaptive=False, n=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = SurfaceOver2DRangeSeries( + u * cos(x ** 2 + y ** 2), (x, -3, 3), (y, -3, 3), + params={u: 1}, n1=50, n2=50,) + s2 = SurfaceOver2DRangeSeries( + cos(x ** 2 + y ** 2), (x, -3, 3), (y, -3, 3), + adaptive=False, n1=50, n2=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = ParametricSurfaceSeries( + u * cos(x + y), sin(x + y), x - y, (x, -3, 3), (y, -3, 3), + params={u: 1}, n1=50, n2=50,) + s2 = ParametricSurfaceSeries( + cos(x + y), sin(x + y), x - y, (x, -3, 3), (y, -3, 3), + adaptive=False, n1=50, n2=50,) + do_test(s1.get_data(), s2.get_data()) + + # real part of a complex function evaluated over a real line with numpy + expr = re((z ** 2 + 1) / (z ** 2 - 1)) + s1 = LineOver1DRangeSeries(u * expr, (z, -3, 3), adaptive=False, n=50, + modules=None, params={u: 1}) + s2 = LineOver1DRangeSeries(expr, (z, -3, 3), adaptive=False, n=50, + modules=None) + do_test(s1.get_data(), s2.get_data()) + + # real part of a complex function evaluated over a real line with mpmath + expr = re((z ** 2 + 1) / (z ** 2 - 1)) + s1 = LineOver1DRangeSeries(u * expr, (z, -3, 3), n=50, modules="mpmath", + params={u: 1}) + s2 = LineOver1DRangeSeries(expr, (z, -3, 3), + adaptive=False, n=50, modules="mpmath") + do_test(s1.get_data(), s2.get_data()) + + +def test_list2dseries_interactive(): + if not np: + skip("numpy not installed.") + + x, y, u = symbols("x, y, u") + + s = List2DSeries([1, 2, 3], [1, 2, 3]) + assert not s.is_interactive + + # symbolic expressions as coordinates, but no ``params`` + raises(ValueError, lambda: List2DSeries([cos(x)], [sin(x)])) + + # too few parameters + raises(ValueError, + lambda: List2DSeries([cos(x), y], [sin(x), 2], params={u: 1})) + + s = List2DSeries([cos(x)], [sin(x)], params={x: 1}) + assert s.is_interactive + + s = List2DSeries([x, 2, 3, 4], [4, 3, 2, x], params={x: 3}) + xx, yy = s.get_data() + assert np.allclose(xx, [3, 2, 3, 4]) + assert np.allclose(yy, [4, 3, 2, 3]) + assert not s.is_parametric + + # numeric lists + params is present -> interactive series and + # lists are converted to Tuple. + s = List2DSeries([1, 2, 3], [1, 2, 3], params={x: 1}) + assert s.is_interactive + assert isinstance(s.list_x, Tuple) + assert isinstance(s.list_y, Tuple) + + +def test_mpmath(): + # test that the argument of complex functions evaluated with mpmath + # might be different than the one computed with Numpy (different + # behaviour at branch cuts) + if not np: + skip("numpy not installed.") + + z, u = symbols("z, u") + + s1 = LineOver1DRangeSeries(im(sqrt(-z)), (z, 1e-03, 5), + adaptive=True, modules=None, force_real_eval=True) + s2 = LineOver1DRangeSeries(im(sqrt(-z)), (z, 1e-03, 5), + adaptive=True, modules="mpmath", force_real_eval=True) + xx1, yy1 = s1.get_data() + xx2, yy2 = s2.get_data() + assert np.all(yy1 < 0) + assert np.all(yy2 > 0) + + s1 = LineOver1DRangeSeries(im(sqrt(-z)), (z, -5, 5), + adaptive=False, n=20, modules=None, force_real_eval=True) + s2 = LineOver1DRangeSeries(im(sqrt(-z)), (z, -5, 5), + adaptive=False, n=20, modules="mpmath", force_real_eval=True) + xx1, yy1 = s1.get_data() + xx2, yy2 = s2.get_data() + assert np.allclose(xx1, xx2) + assert not np.allclose(yy1, yy2) + + +def test_str(): + u, x, y, z = symbols("u, x:z") + + s = LineOver1DRangeSeries(cos(x), (x, -4, 3)) + assert str(s) == "cartesian line: cos(x) for x over (-4.0, 3.0)" + + d = {"return": "real"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: re(cos(x)) for x over (-4.0, 3.0)" + + d = {"return": "imag"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: im(cos(x)) for x over (-4.0, 3.0)" + + d = {"return": "abs"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: abs(cos(x)) for x over (-4.0, 3.0)" + + d = {"return": "arg"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: arg(cos(x)) for x over (-4.0, 3.0)" + + s = LineOver1DRangeSeries(cos(u * x), (x, -4, 3), params={u: 1}) + assert str(s) == "interactive cartesian line: cos(u*x) for x over (-4.0, 3.0) and parameters (u,)" + + s = LineOver1DRangeSeries(cos(u * x), (x, -u, 3*y), params={u: 1, y: 1}) + assert str(s) == "interactive cartesian line: cos(u*x) for x over (-u, 3*y) and parameters (u, y)" + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -4, 3)) + assert str(s) == "parametric cartesian line: (cos(x), sin(x)) for x over (-4.0, 3.0)" + + s = Parametric2DLineSeries(cos(u * x), sin(x), (x, -4, 3), params={u: 1}) + assert str(s) == "interactive parametric cartesian line: (cos(u*x), sin(x)) for x over (-4.0, 3.0) and parameters (u,)" + + s = Parametric2DLineSeries(cos(u * x), sin(x), (x, -u, 3*y), params={u: 1, y:1}) + assert str(s) == "interactive parametric cartesian line: (cos(u*x), sin(x)) for x over (-u, 3*y) and parameters (u, y)" + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -4, 3)) + assert str(s) == "3D parametric cartesian line: (cos(x), sin(x), x) for x over (-4.0, 3.0)" + + s = Parametric3DLineSeries(cos(u*x), sin(x), x, (x, -4, 3), params={u: 1}) + assert str(s) == "interactive 3D parametric cartesian line: (cos(u*x), sin(x), x) for x over (-4.0, 3.0) and parameters (u,)" + + s = Parametric3DLineSeries(cos(u*x), sin(x), x, (x, -u, 3*y), params={u: 1, y: 1}) + assert str(s) == "interactive 3D parametric cartesian line: (cos(u*x), sin(x), x) for x over (-u, 3*y) and parameters (u, y)" + + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -4, 3), (y, -2, 5)) + assert str(s) == "cartesian surface: cos(x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)" + + s = SurfaceOver2DRangeSeries(cos(u * x * y), (x, -4, 3), (y, -2, 5), params={u: 1}) + assert str(s) == "interactive cartesian surface: cos(u*x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0) and parameters (u,)" + + s = SurfaceOver2DRangeSeries(cos(u * x * y), (x, -4*u, 3), (y, -2, 5*u), params={u: 1}) + assert str(s) == "interactive cartesian surface: cos(u*x*y) for x over (-4*u, 3.0) and y over (-2.0, 5*u) and parameters (u,)" + + s = ContourSeries(cos(x * y), (x, -4, 3), (y, -2, 5)) + assert str(s) == "contour: cos(x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)" + + s = ContourSeries(cos(u * x * y), (x, -4, 3), (y, -2, 5), params={u: 1}) + assert str(s) == "interactive contour: cos(u*x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0) and parameters (u,)" + + s = ParametricSurfaceSeries(cos(x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5)) + assert str(s) == "parametric cartesian surface: (cos(x*y), sin(x*y), x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)" + + s = ParametricSurfaceSeries(cos(u * x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5), params={u: 1}) + assert str(s) == "interactive parametric cartesian surface: (cos(u*x*y), sin(x*y), x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0) and parameters (u,)" + + s = ImplicitSeries(x < y, (x, -5, 4), (y, -3, 2)) + assert str(s) == "Implicit expression: x < y for x over (-5.0, 4.0) and y over (-3.0, 2.0)" + + +def test_use_cm(): + # verify that the `use_cm` attribute is implemented. + if not np: + skip("numpy not installed.") + + u, x, y, z = symbols("u, x:z") + + s = List2DSeries([1, 2, 3, 4], [5, 6, 7, 8], use_cm=True) + assert s.use_cm + s = List2DSeries([1, 2, 3, 4], [5, 6, 7, 8], use_cm=False) + assert not s.use_cm + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -4, 3), use_cm=True) + assert s.use_cm + s = Parametric2DLineSeries(cos(x), sin(x), (x, -4, 3), use_cm=False) + assert not s.use_cm + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -4, 3), + use_cm=True) + assert s.use_cm + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -4, 3), + use_cm=False) + assert not s.use_cm + + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -4, 3), (y, -2, 5), + use_cm=True) + assert s.use_cm + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -4, 3), (y, -2, 5), + use_cm=False) + assert not s.use_cm + + s = ParametricSurfaceSeries(cos(x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5), use_cm=True) + assert s.use_cm + s = ParametricSurfaceSeries(cos(x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5), use_cm=False) + assert not s.use_cm + + +def test_surface_use_cm(): + # verify that SurfaceOver2DRangeSeries and ParametricSurfaceSeries get + # the same value for use_cm + + x, y, u, v = symbols("x, y, u, v") + + # they read the same value from default settings + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2)) + s2 = ParametricSurfaceSeries(u * cos(v), u * sin(v), u, + (u, 0, 1), (v, 0 , 2*pi)) + assert s1.use_cm == s2.use_cm + + # they get the same value + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + use_cm=False) + s2 = ParametricSurfaceSeries(u * cos(v), u * sin(v), u, + (u, 0, 1), (v, 0 , 2*pi), use_cm=False) + assert s1.use_cm == s2.use_cm + + # they get the same value + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + use_cm=True) + s2 = ParametricSurfaceSeries(u * cos(v), u * sin(v), u, + (u, 0, 1), (v, 0 , 2*pi), use_cm=True) + assert s1.use_cm == s2.use_cm + + +def test_sums(): + # test that data series are able to deal with sums + if not np: + skip("numpy not installed.") + + x, y, u = symbols("x, y, u") + + def do_test(data1, data2): + assert len(data1) == len(data2) + for d1, d2 in zip(data1, data2): + assert np.allclose(d1, d2) + + s = LineOver1DRangeSeries(Sum(1 / x ** y, (x, 1, 1000)), (y, 2, 10), + adaptive=False, only_integers=True) + xx, yy = s.get_data() + + s1 = LineOver1DRangeSeries(Sum(1 / x, (x, 1, y)), (y, 2, 10), + adaptive=False, only_integers=True) + xx1, yy1 = s1.get_data() + + s2 = LineOver1DRangeSeries(Sum(u / x, (x, 1, y)), (y, 2, 10), + params={u: 1}, only_integers=True) + xx2, yy2 = s2.get_data() + xx1 = xx1.astype(float) + xx2 = xx2.astype(float) + do_test([xx1, yy1], [xx2, yy2]) + + s = LineOver1DRangeSeries(Sum(1 / x, (x, 1, y)), (y, 2, 10), + adaptive=True) + with warns( + UserWarning, + match="The evaluation with NumPy/SciPy failed", + test_stacklevel=False, + ): + raises(TypeError, lambda: s.get_data()) + + +def test_apply_transforms(): + # verify that transformation functions get applied to the output + # of data series + if not np: + skip("numpy not installed.") + + x, y, z, u, v = symbols("x:z, u, v") + + s1 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10) + s2 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10, + tx=np.rad2deg) + s3 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10, + ty=np.rad2deg) + s4 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10, + tx=np.rad2deg, ty=np.rad2deg) + + x1, y1 = s1.get_data() + x2, y2 = s2.get_data() + x3, y3 = s3.get_data() + x4, y4 = s4.get_data() + assert np.isclose(x1[0], -2*np.pi) and np.isclose(x1[-1], 2*np.pi) + assert (y1.min() < -0.9) and (y1.max() > 0.9) + assert np.isclose(x2[0], -360) and np.isclose(x2[-1], 360) + assert (y2.min() < -0.9) and (y2.max() > 0.9) + assert np.isclose(x3[0], -2*np.pi) and np.isclose(x3[-1], 2*np.pi) + assert (y3.min() < -52) and (y3.max() > 52) + assert np.isclose(x4[0], -360) and np.isclose(x4[-1], 360) + assert (y4.min() < -52) and (y4.max() > 52) + + xx = np.linspace(-2*np.pi, 2*np.pi, 10) + yy = np.cos(xx) + s1 = List2DSeries(xx, yy) + s2 = List2DSeries(xx, yy, tx=np.rad2deg, ty=np.rad2deg) + x1, y1 = s1.get_data() + x2, y2 = s2.get_data() + assert np.isclose(x1[0], -2*np.pi) and np.isclose(x1[-1], 2*np.pi) + assert (y1.min() < -0.9) and (y1.max() > 0.9) + assert np.isclose(x2[0], -360) and np.isclose(x2[-1], 360) + assert (y2.min() < -52) and (y2.max() > 52) + + s1 = Parametric2DLineSeries( + sin(x), cos(x), (x, -pi, pi), adaptive=False, n=10) + s2 = Parametric2DLineSeries( + sin(x), cos(x), (x, -pi, pi), adaptive=False, n=10, + tx=np.rad2deg, ty=np.rad2deg, tp=np.rad2deg) + x1, y1, a1 = s1.get_data() + x2, y2, a2 = s2.get_data() + assert np.allclose(x1, np.deg2rad(x2)) + assert np.allclose(y1, np.deg2rad(y2)) + assert np.allclose(a1, np.deg2rad(a2)) + + s1 = Parametric3DLineSeries( + sin(x), cos(x), x, (x, -pi, pi), adaptive=False, n=10) + s2 = Parametric3DLineSeries( + sin(x), cos(x), x, (x, -pi, pi), adaptive=False, n=10, tp=np.rad2deg) + x1, y1, z1, a1 = s1.get_data() + x2, y2, z2, a2 = s2.get_data() + assert np.allclose(x1, x2) + assert np.allclose(y1, y2) + assert np.allclose(z1, z2) + assert np.allclose(a1, np.deg2rad(a2)) + + s1 = SurfaceOver2DRangeSeries( + cos(x**2 + y**2), (x, -2*pi, 2*pi), (y, -2*pi, 2*pi), + adaptive=False, n1=10, n2=10) + s2 = SurfaceOver2DRangeSeries( + cos(x**2 + y**2), (x, -2*pi, 2*pi), (y, -2*pi, 2*pi), + adaptive=False, n1=10, n2=10, + tx=np.rad2deg, ty=lambda x: 2*x, tz=lambda x: 3*x) + x1, y1, z1 = s1.get_data() + x2, y2, z2 = s2.get_data() + assert np.allclose(x1, np.deg2rad(x2)) + assert np.allclose(y1, y2 / 2) + assert np.allclose(z1, z2 / 3) + + s1 = ParametricSurfaceSeries( + u + v, u - v, u * v, (u, 0, 2*pi), (v, 0, pi), + adaptive=False, n1=10, n2=10) + s2 = ParametricSurfaceSeries( + u + v, u - v, u * v, (u, 0, 2*pi), (v, 0, pi), + adaptive=False, n1=10, n2=10, + tx=np.rad2deg, ty=lambda x: 2*x, tz=lambda x: 3*x) + x1, y1, z1, u1, v1 = s1.get_data() + x2, y2, z2, u2, v2 = s2.get_data() + assert np.allclose(x1, np.deg2rad(x2)) + assert np.allclose(y1, y2 / 2) + assert np.allclose(z1, z2 / 3) + assert np.allclose(u1, u2) + assert np.allclose(v1, v2) + + +def test_series_labels(): + # verify that series return the correct label, depending on the plot + # type and input arguments. If the user set custom label on a data series, + # it should returned un-modified. + if not np: + skip("numpy not installed.") + + x, y, z, u, v = symbols("x, y, z, u, v") + wrapper = "$%s$" + + expr = cos(x) + s1 = LineOver1DRangeSeries(expr, (x, -2, 2), None) + s2 = LineOver1DRangeSeries(expr, (x, -2, 2), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + s1 = List2DSeries([0, 1, 2, 3], [0, 1, 2, 3], "test") + assert s1.get_label(False) == "test" + assert s1.get_label(True) == "test" + + expr = (cos(x), sin(x)) + s1 = Parametric2DLineSeries(*expr, (x, -2, 2), None, use_cm=True) + s2 = Parametric2DLineSeries(*expr, (x, -2, 2), "test", use_cm=True) + s3 = Parametric2DLineSeries(*expr, (x, -2, 2), None, use_cm=False) + s4 = Parametric2DLineSeries(*expr, (x, -2, 2), "test", use_cm=False) + assert s1.get_label(False) == "x" + assert s1.get_label(True) == wrapper % "x" + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + assert s3.get_label(False) == str(expr) + assert s3.get_label(True) == wrapper % latex(expr) + assert s4.get_label(False) == "test" + assert s4.get_label(True) == "test" + + expr = (cos(x), sin(x), x) + s1 = Parametric3DLineSeries(*expr, (x, -2, 2), None, use_cm=True) + s2 = Parametric3DLineSeries(*expr, (x, -2, 2), "test", use_cm=True) + s3 = Parametric3DLineSeries(*expr, (x, -2, 2), None, use_cm=False) + s4 = Parametric3DLineSeries(*expr, (x, -2, 2), "test", use_cm=False) + assert s1.get_label(False) == "x" + assert s1.get_label(True) == wrapper % "x" + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + assert s3.get_label(False) == str(expr) + assert s3.get_label(True) == wrapper % latex(expr) + assert s4.get_label(False) == "test" + assert s4.get_label(True) == "test" + + expr = cos(x**2 + y**2) + s1 = SurfaceOver2DRangeSeries(expr, (x, -2, 2), (y, -2, 2), None) + s2 = SurfaceOver2DRangeSeries(expr, (x, -2, 2), (y, -2, 2), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + expr = (cos(x - y), sin(x + y), x - y) + s1 = ParametricSurfaceSeries(*expr, (x, -2, 2), (y, -2, 2), None) + s2 = ParametricSurfaceSeries(*expr, (x, -2, 2), (y, -2, 2), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + expr = Eq(cos(x - y), 0) + s1 = ImplicitSeries(expr, (x, -10, 10), (y, -10, 10), None) + s2 = ImplicitSeries(expr, (x, -10, 10), (y, -10, 10), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + +def test_is_polar_2d_parametric(): + # verify that Parametric2DLineSeries isable to apply polar discretization, + # which is used when polar_plot is executed with polar_axis=True + if not np: + skip("numpy not installed.") + + t, u = symbols("t u") + + # NOTE: a sufficiently big n must be provided, or else tests + # are going to fail + # No colormap + f = sin(4 * t) + s1 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=False, use_cm=False) + x1, y1, p1 = s1.get_data() + s2 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=True, use_cm=False) + th, r, p2 = s2.get_data() + assert (not np.allclose(x1, th)) and (not np.allclose(y1, r)) + assert np.allclose(p1, p2) + + # With colormap + s3 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=False, color_func=lambda t: 2*t) + x3, y3, p3 = s3.get_data() + s4 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=True, color_func=lambda t: 2*t) + th4, r4, p4 = s4.get_data() + assert np.allclose(p3, p4) and (not np.allclose(p1, p3)) + assert np.allclose(x3, x1) and np.allclose(y3, y1) + assert np.allclose(th4, th) and np.allclose(r4, r) + + +def test_is_polar_3d(): + # verify that SurfaceOver2DRangeSeries is able to apply + # polar discretization + if not np: + skip("numpy not installed.") + + x, y, t = symbols("x, y, t") + expr = (x**2 - 1)**2 + s1 = SurfaceOver2DRangeSeries(expr, (x, 0, 1.5), (y, 0, 2 * pi), + n=10, adaptive=False, is_polar=False) + s2 = SurfaceOver2DRangeSeries(expr, (x, 0, 1.5), (y, 0, 2 * pi), + n=10, adaptive=False, is_polar=True) + x1, y1, z1 = s1.get_data() + x2, y2, z2 = s2.get_data() + x22, y22 = x1 * np.cos(y1), x1 * np.sin(y1) + assert np.allclose(x2, x22) + assert np.allclose(y2, y22) + + +def test_color_func(): + # verify that eval_color_func produces the expected results in order to + # maintain back compatibility with the old sympy.plotting module + if not np: + skip("numpy not installed.") + + x, y, z, u, v = symbols("x, y, z, u, v") + + # color func: returns x, y, color and s is parametric + xx = np.linspace(-3, 3, 10) + yy1 = np.cos(xx) + s = List2DSeries(xx, yy1, color_func=lambda x, y: 2 * x, use_cm=True) + xxs, yys, col = s.get_data() + assert np.allclose(xx, xxs) + assert np.allclose(yy1, yys) + assert np.allclose(2 * xx, col) + assert s.is_parametric + + s = List2DSeries(xx, yy1, color_func=lambda x, y: 2 * x, use_cm=False) + assert len(s.get_data()) == 2 + assert not s.is_parametric + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: t) + xx, yy, col = s.get_data() + assert (not np.allclose(xx, col)) and (not np.allclose(yy, col)) + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y: x * y) + xx, yy, col = s.get_data() + assert np.allclose(col, xx * yy) + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y, t: x * y * t) + xx, yy, col = s.get_data() + assert np.allclose(col, xx * yy * np.linspace(0, 2*np.pi, 10)) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: t) + xx, yy, zz, col = s.get_data() + assert (not np.allclose(xx, col)) and (not np.allclose(yy, col)) + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y, z: x * y * z) + xx, yy, zz, col = s.get_data() + assert np.allclose(col, xx * yy * zz) + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y, z, t: x * y * z * t) + xx, yy, zz, col = s.get_data() + assert np.allclose(col, xx * yy * zz * np.linspace(0, 2*np.pi, 10)) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x: x) + xx, yy, zz = s.get_data() + col = s.eval_color_func(xx, yy, zz) + assert np.allclose(xx, col) + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x, y: x * y) + xx, yy, zz = s.get_data() + col = s.eval_color_func(xx, yy, zz) + assert np.allclose(xx * yy, col) + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x, y, z: x * y * z) + xx, yy, zz = s.get_data() + col = s.eval_color_func(xx, yy, zz) + assert np.allclose(xx * yy * zz, col) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda u:u) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(uu, col) + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda u, v: u * v) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(uu * vv, col) + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda x, y, z: x * y * z) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(xx * yy * zz, col) + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda x, y, z, u, v: x * y * z * u * v) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(xx * yy * zz * uu * vv, col) + + # Interactive Series + s = List2DSeries([0, 1, 2, x], [x, 2, 3, 4], + color_func=lambda x, y: 2 * x, params={x: 1}, use_cm=True) + xx, yy, col = s.get_data() + assert np.allclose(xx, [0, 1, 2, 1]) + assert np.allclose(yy, [1, 2, 3, 4]) + assert np.allclose(2 * xx, col) + assert s.is_parametric and s.use_cm + + s = List2DSeries([0, 1, 2, x], [x, 2, 3, 4], + color_func=lambda x, y: 2 * x, params={x: 1}, use_cm=False) + assert len(s.get_data()) == 2 + assert not s.is_parametric + + +def test_color_func_scalar_val(): + # verify that eval_color_func returns a numpy array even when color_func + # evaluates to a scalar value + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: 1) + xx, yy, col = s.get_data() + assert np.allclose(col, np.ones(xx.shape)) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: 1) + xx, yy, zz, col = s.get_data() + assert np.allclose(col, np.ones(xx.shape)) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x: 1) + xx, yy, zz = s.get_data() + assert np.allclose(s.eval_color_func(xx), np.ones(xx.shape)) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda u: 1) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(col, np.ones(xx.shape)) + + +def test_color_func_expression(): + # verify that color_func is able to deal with instances of Expr: they will + # be lambdified with the same signature used for the main expression. + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + color_func=sin(x), adaptive=False, n=10, use_cm=True) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + color_func=lambda x: np.cos(x), adaptive=False, n=10, use_cm=True) + # the following statement should not raise errors + d1 = s1.get_data() + assert callable(s1.color_func) + d2 = s2.get_data() + assert not np.allclose(d1[-1], d2[-1]) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), + color_func=sin(x**2 + y**2), adaptive=False, n1=5, n2=5) + # the following statement should not raise errors + s.get_data() + assert callable(s.color_func) + + xx = [1, 2, 3, 4, 5] + yy = [1, 2, 3, 4, 5] + raises(TypeError, + lambda : List2DSeries(xx, yy, use_cm=True, color_func=sin(x))) + + +def test_line_surface_color(): + # verify the back-compatibility with the old sympy.plotting module. + # By setting line_color or surface_color to be a callable, it will set + # the color_func attribute. + + x, y, z = symbols("x, y, z") + + s = LineOver1DRangeSeries(sin(x), (x, -5, 5), adaptive=False, n=10, + line_color=lambda x: x) + assert (s.line_color is None) and callable(s.color_func) + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, line_color=lambda t: t) + assert (s.line_color is None) and callable(s.color_func) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + n1=10, n2=10, surface_color=lambda x: x) + assert (s.surface_color is None) and callable(s.color_func) + + +def test_complex_adaptive_false(): + # verify that series with adaptive=False is evaluated with discretized + # ranges of type complex. + if not np: + skip("numpy not installed.") + + x, y, u = symbols("x y u") + + def do_test(data1, data2): + assert len(data1) == len(data2) + for d1, d2 in zip(data1, data2): + assert np.allclose(d1, d2) + + expr1 = sqrt(x) * exp(-x**2) + expr2 = sqrt(u * x) * exp(-x**2) + s1 = LineOver1DRangeSeries(im(expr1), (x, -5, 5), adaptive=False, n=10) + s2 = LineOver1DRangeSeries(im(expr2), (x, -5, 5), + adaptive=False, n=10, params={u: 1}) + data1 = s1.get_data() + data2 = s2.get_data() + + do_test(data1, data2) + assert (not np.allclose(data1[1], 0)) and (not np.allclose(data2[1], 0)) + + s1 = Parametric2DLineSeries(re(expr1), im(expr1), (x, -pi, pi), + adaptive=False, n=10) + s2 = Parametric2DLineSeries(re(expr2), im(expr2), (x, -pi, pi), + adaptive=False, n=10, params={u: 1}) + data1 = s1.get_data() + data2 = s2.get_data() + do_test(data1, data2) + assert (not np.allclose(data1[1], 0)) and (not np.allclose(data2[1], 0)) + + s1 = SurfaceOver2DRangeSeries(im(expr1), (x, -5, 5), (y, -10, 10), + adaptive=False, n1=30, n2=3) + s2 = SurfaceOver2DRangeSeries(im(expr2), (x, -5, 5), (y, -10, 10), + adaptive=False, n1=30, n2=3, params={u: 1}) + data1 = s1.get_data() + data2 = s2.get_data() + do_test(data1, data2) + assert (not np.allclose(data1[1], 0)) and (not np.allclose(data2[1], 0)) + + +def test_expr_is_lambda_function(): + # verify that when a numpy function is provided, the series will be able + # to evaluate it. Also, label should be empty in order to prevent some + # backend from crashing. + if not np: + skip("numpy not installed.") + + f = lambda x: np.cos(x) + s1 = LineOver1DRangeSeries(f, ("x", -5, 5), adaptive=True, depth=3) + s1.get_data() + s2 = LineOver1DRangeSeries(f, ("x", -5, 5), adaptive=False, n=10) + s2.get_data() + assert s1.label == s2.label == "" + + fx = lambda x: np.cos(x) + fy = lambda x: np.sin(x) + s1 = Parametric2DLineSeries(fx, fy, ("x", 0, 2*pi), + adaptive=True, adaptive_goal=0.1) + s1.get_data() + s2 = Parametric2DLineSeries(fx, fy, ("x", 0, 2*pi), + adaptive=False, n=10) + s2.get_data() + assert s1.label == s2.label == "" + + fz = lambda x: x + s1 = Parametric3DLineSeries(fx, fy, fz, ("x", 0, 2*pi), + adaptive=True, adaptive_goal=0.1) + s1.get_data() + s2 = Parametric3DLineSeries(fx, fy, fz, ("x", 0, 2*pi), + adaptive=False, n=10) + s2.get_data() + assert s1.label == s2.label == "" + + f = lambda x, y: np.cos(x**2 + y**2) + s1 = SurfaceOver2DRangeSeries(f, ("a", -2, 2), ("b", -3, 3), + adaptive=False, n1=10, n2=10) + s1.get_data() + s2 = ContourSeries(f, ("a", -2, 2), ("b", -3, 3), + adaptive=False, n1=10, n2=10) + s2.get_data() + assert s1.label == s2.label == "" + + fx = lambda u, v: np.cos(u + v) + fy = lambda u, v: np.sin(u - v) + fz = lambda u, v: u * v + s1 = ParametricSurfaceSeries(fx, fy, fz, ("u", 0, pi), ("v", 0, 2*pi), + adaptive=False, n1=10, n2=10) + s1.get_data() + assert s1.label == "" + + raises(TypeError, lambda: List2DSeries(lambda t: t, lambda t: t)) + raises(TypeError, lambda : ImplicitSeries(lambda t: np.sin(t), + ("x", -5, 5), ("y", -6, 6))) + + +def test_show_in_legend_lines(): + # verify that lines series correctly set the show_in_legend attribute + x, u = symbols("x, u") + + s = LineOver1DRangeSeries(cos(x), (x, -2, 2), "test", show_in_legend=True) + assert s.show_in_legend + s = LineOver1DRangeSeries(cos(x), (x, -2, 2), "test", show_in_legend=False) + assert not s.show_in_legend + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 1), "test", + show_in_legend=True) + assert s.show_in_legend + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 1), "test", + show_in_legend=False) + assert not s.show_in_legend + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 1), "test", + show_in_legend=True) + assert s.show_in_legend + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 1), "test", + show_in_legend=False) + assert not s.show_in_legend + + +@XFAIL +def test_particular_case_1_with_adaptive_true(): + # Verify that symbolic expressions and numerical lambda functions are + # evaluated with the same algorithm. + if not np: + skip("numpy not installed.") + + # NOTE: xfail because sympy's adaptive algorithm is not deterministic + + def do_test(a, b): + with warns( + RuntimeWarning, + match="invalid value encountered in scalar power", + test_stacklevel=False, + ): + d1 = a.get_data() + d2 = b.get_data() + for t, v in zip(d1, d2): + assert np.allclose(t, v) + + n = symbols("n") + a = S(2) / 3 + epsilon = 0.01 + xn = (n**3 + n**2)**(S(1)/3) - (n**3 - n**2)**(S(1)/3) + expr = Abs(xn - a) - epsilon + math_func = lambdify([n], expr) + s1 = LineOver1DRangeSeries(expr, (n, -10, 10), "", + adaptive=True, depth=3) + s2 = LineOver1DRangeSeries(math_func, ("n", -10, 10), "", + adaptive=True, depth=3) + do_test(s1, s2) + + +def test_particular_case_1_with_adaptive_false(): + # Verify that symbolic expressions and numerical lambda functions are + # evaluated with the same algorithm. In particular, uniform evaluation + # is going to use np.vectorize, which correctly evaluates the following + # mathematical function. + if not np: + skip("numpy not installed.") + + def do_test(a, b): + d1 = a.get_data() + d2 = b.get_data() + for t, v in zip(d1, d2): + assert np.allclose(t, v) + + n = symbols("n") + a = S(2) / 3 + epsilon = 0.01 + xn = (n**3 + n**2)**(S(1)/3) - (n**3 - n**2)**(S(1)/3) + expr = Abs(xn - a) - epsilon + math_func = lambdify([n], expr) + + s3 = LineOver1DRangeSeries(expr, (n, -10, 10), "", + adaptive=False, n=10) + s4 = LineOver1DRangeSeries(math_func, ("n", -10, 10), "", + adaptive=False, n=10) + do_test(s3, s4) + + +def test_complex_params_number_eval(): + # The main expression contains terms like sqrt(xi - 1), with + # parameter (0 <= xi <= 1). + # There shouldn't be any NaN values on the output. + if not np: + skip("numpy not installed.") + + xi, wn, x0, v0, t = symbols("xi, omega_n, x0, v0, t") + x = Function("x")(t) + eq = x.diff(t, 2) + 2 * xi * wn * x.diff(t) + wn**2 * x + sol = dsolve(eq, x, ics={x.subs(t, 0): x0, x.diff(t).subs(t, 0): v0}) + params = { + wn: 0.5, + xi: 0.25, + x0: 0.45, + v0: 0.0 + } + s = LineOver1DRangeSeries(sol.rhs, (t, 0, 100), adaptive=False, n=5, + params=params) + x, y = s.get_data() + assert not np.isnan(x).any() + assert not np.isnan(y).any() + + + # Fourier Series of a sawtooth wave + # The main expression contains a Sum with a symbolic upper range. + # The lambdified code looks like: + # sum(blablabla for for n in range(1, m+1)) + # But range requires integer numbers, whereas per above example, the series + # casts parameters to complex. Verify that the series is able to detect + # upper bounds in summations and cast it to int in order to get successfull + # evaluation + x, T, n, m = symbols("x, T, n, m") + fs = S(1) / 2 - (1 / pi) * Sum(sin(2 * n * pi * x / T) / n, (n, 1, m)) + params = { + T: 4.5, + m: 5 + } + s = LineOver1DRangeSeries(fs, (x, 0, 10), adaptive=False, n=5, + params=params) + x, y = s.get_data() + assert not np.isnan(x).any() + assert not np.isnan(y).any() + + +def test_complex_range_line_plot_1(): + # verify that univariate functions are evaluated with a complex + # data range (with zero imaginary part). There shouln't be any + # NaN value in the output. + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + expr1 = im(sqrt(x) * exp(-x**2)) + expr2 = im(sqrt(u * x) * exp(-x**2)) + s1 = LineOver1DRangeSeries(expr1, (x, -10, 10), adaptive=True, + adaptive_goal=0.1) + s2 = LineOver1DRangeSeries(expr1, (x, -10, 10), adaptive=False, n=30) + s3 = LineOver1DRangeSeries(expr2, (x, -10, 10), adaptive=False, n=30, + params={u: 1}) + + with ignore_warnings(RuntimeWarning): + data1 = s1.get_data() + data2 = s2.get_data() + data3 = s3.get_data() + + assert not np.isnan(data1[1]).any() + assert not np.isnan(data2[1]).any() + assert not np.isnan(data3[1]).any() + assert np.allclose(data2[0], data3[0]) and np.allclose(data2[1], data3[1]) + + +@XFAIL +def test_complex_range_line_plot_2(): + # verify that univariate functions are evaluated with a complex + # data range (with non-zero imaginary part). There shouln't be any + # NaN value in the output. + if not np: + skip("numpy not installed.") + + # NOTE: xfail because sympy's adaptive algorithm is unable to deal with + # complex number. + + x, u = symbols("x, u") + + # adaptive and uniform meshing should produce the same data. + # because of the adaptive nature, just compare the first and last points + # of both series. + s1 = LineOver1DRangeSeries(abs(sqrt(x)), (x, -5-2j, 5-2j), adaptive=True) + s2 = LineOver1DRangeSeries(abs(sqrt(x)), (x, -5-2j, 5-2j), adaptive=False, + n=10) + with warns( + RuntimeWarning, + match="invalid value encountered in sqrt", + test_stacklevel=False, + ): + d1 = s1.get_data() + d2 = s2.get_data() + xx1 = [d1[0][0], d1[0][-1]] + xx2 = [d2[0][0], d2[0][-1]] + yy1 = [d1[1][0], d1[1][-1]] + yy2 = [d2[1][0], d2[1][-1]] + assert np.allclose(xx1, xx2) + assert np.allclose(yy1, yy2) + + +def test_force_real_eval(): + # verify that force_real_eval=True produces inconsistent results when + # compared with evaluation of complex domain. + if not np: + skip("numpy not installed.") + + x = symbols("x") + + expr = im(sqrt(x) * exp(-x**2)) + s1 = LineOver1DRangeSeries(expr, (x, -10, 10), adaptive=False, n=10, + force_real_eval=False) + s2 = LineOver1DRangeSeries(expr, (x, -10, 10), adaptive=False, n=10, + force_real_eval=True) + d1 = s1.get_data() + with ignore_warnings(RuntimeWarning): + d2 = s2.get_data() + assert not np.allclose(d1[1], 0) + assert np.allclose(d2[1], 0) + + +def test_contour_series_show_clabels(): + # verify that a contour series has the abiliy to set the visibility of + # labels to contour lines + + x, y = symbols("x, y") + s = ContourSeries(cos(x*y), (x, -2, 2), (y, -2, 2)) + assert s.show_clabels + + s = ContourSeries(cos(x*y), (x, -2, 2), (y, -2, 2), clabels=True) + assert s.show_clabels + + s = ContourSeries(cos(x*y), (x, -2, 2), (y, -2, 2), clabels=False) + assert not s.show_clabels + + +def test_LineOver1DRangeSeries_complex_range(): + # verify that LineOver1DRangeSeries can accept a complex range + # if the imaginary part of the start and end values are the same + + x = symbols("x") + + LineOver1DRangeSeries(sqrt(x), (x, -10, 10)) + LineOver1DRangeSeries(sqrt(x), (x, -10-2j, 10-2j)) + raises(ValueError, + lambda : LineOver1DRangeSeries(sqrt(x), (x, -10-2j, 10+2j))) + + +def test_symbolic_plotting_ranges(): + # verify that data series can use symbolic plotting ranges + if not np: + skip("numpy not installed.") + + x, y, z, a, b = symbols("x, y, z, a, b") + + def do_test(s1, s2, new_params): + d1 = s1.get_data() + d2 = s2.get_data() + for u, v in zip(d1, d2): + assert np.allclose(u, v) + s2.params = new_params + d2 = s2.get_data() + for u, v in zip(d1, d2): + assert not np.allclose(u, v) + + s1 = LineOver1DRangeSeries(sin(x), (x, 0, 1), adaptive=False, n=10) + s2 = LineOver1DRangeSeries(sin(x), (x, a, b), params={a: 0, b: 1}, + adaptive=False, n=10) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : LineOver1DRangeSeries(sin(x), (x, a, b), params={a: 1}, n=10)) + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 1), adaptive=False, n=10) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, a, b), params={a: 0, b: 1}, + adaptive=False, n=10) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : Parametric2DLineSeries(cos(x), sin(x), (x, a, b), + params={a: 0}, adaptive=False, n=10)) + + s1 = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 1), + adaptive=False, n=10) + s2 = Parametric3DLineSeries(cos(x), sin(x), x, (x, a, b), + params={a: 0, b: 1}, adaptive=False, n=10) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : Parametric3DLineSeries(cos(x), sin(x), x, (x, a, b), + params={a: 0}, adaptive=False, n=10)) + + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), + adaptive=False, n1=5, n2=5) + s2 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -pi * a, pi * a), + (y, -pi * b, pi * b), params={a: 1, b: 1}, + adaptive=False, n1=5, n2=5) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : SurfaceOver2DRangeSeries(cos(x**2 + y**2), + (x, -pi * a, pi * a), (y, -pi * b, pi * b), params={a: 1}, + adaptive=False, n1=5, n2=5)) + # one range symbol is included into another range's minimum or maximum val + raises(ValueError, + lambda : SurfaceOver2DRangeSeries(cos(x**2 + y**2), + (x, -pi * a + y, pi * a), (y, -pi * b, pi * b), params={a: 1}, + adaptive=False, n1=5, n2=5)) + + s1 = ParametricSurfaceSeries( + cos(x - y), sin(x + y), x - y, (x, -2, 2), (y, -2, 2), n1=5, n2=5) + s2 = ParametricSurfaceSeries( + cos(x - y), sin(x + y), x - y, (x, -2 * a, 2), (y, -2, 2 * b), + params={a: 1, b: 1}, n1=5, n2=5) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : ParametricSurfaceSeries( + cos(x - y), sin(x + y), x - y, (x, -2 * a, 2), (y, -2, 2 * b), + params={a: 1}, n1=5, n2=5)) + + +def test_exclude_points(): + # verify that exclude works as expected + if not np: + skip("numpy not installed.") + + x = symbols("x") + + expr = (floor(x) + S.Half) / (1 - (x - S.Half)**2) + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some", + test_stacklevel=False, + ): + s = LineOver1DRangeSeries(expr, (x, -3.5, 3.5), adaptive=False, n=100, + exclude=list(range(-3, 4))) + xx, yy = s.get_data() + assert not np.isnan(xx).any() + assert np.count_nonzero(np.isnan(yy)) == 7 + assert len(xx) > 100 + + e1 = log(floor(x)) * cos(x) + e2 = log(floor(x)) * sin(x) + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some", + test_stacklevel=False, + ): + s = Parametric2DLineSeries(e1, e2, (x, 1, 12), adaptive=False, n=100, + exclude=list(range(1, 13))) + xx, yy, pp = s.get_data() + assert not np.isnan(pp).any() + assert np.count_nonzero(np.isnan(xx)) == 11 + assert np.count_nonzero(np.isnan(yy)) == 11 + assert len(xx) > 100 + + +def test_unwrap(): + # verify that unwrap works as expected + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + expr = 1 / (x**3 + 2*x**2 + x) + expr = arg(expr.subs(x, I*y*2*pi)) + s1 = LineOver1DRangeSeries(expr, (y, 1e-05, 1e05), xscale="log", + adaptive=False, n=10, unwrap=False) + s2 = LineOver1DRangeSeries(expr, (y, 1e-05, 1e05), xscale="log", + adaptive=False, n=10, unwrap=True) + s3 = LineOver1DRangeSeries(expr, (y, 1e-05, 1e05), xscale="log", + adaptive=False, n=10, unwrap={"period": 4}) + x1, y1 = s1.get_data() + x2, y2 = s2.get_data() + x3, y3 = s3.get_data() + assert np.allclose(x1, x2) + # there must not be nan values in the results of these evaluations + assert all(not np.isnan(t).any() for t in [y1, y2, y3]) + assert not np.allclose(y1, y2) + assert not np.allclose(y1, y3) + assert not np.allclose(y2, y3) diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_textplot.py b/.venv/Lib/site-packages/sympy/plotting/tests/test_textplot.py new file mode 100644 index 0000000000000000000000000000000000000000..928085c627e5230f2ac4a8ce0bbac5354ab35d51 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/tests/test_textplot.py @@ -0,0 +1,203 @@ +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.functions.elementary.exponential import log +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import sin +from sympy.plotting.textplot import textplot_str + +from sympy.utilities.exceptions import ignore_warnings + + +def test_axes_alignment(): + x = Symbol('x') + lines = [ + ' 1 | ..', + ' | ... ', + ' | .. ', + ' | ... ', + ' | ... ', + ' | .. ', + ' | ... ', + ' | ... ', + ' | .. ', + ' | ... ', + ' 0 |--------------------------...--------------------------', + ' | ... ', + ' | .. ', + ' | ... ', + ' | ... ', + ' | .. ', + ' | ... ', + ' | ... ', + ' | .. ', + ' | ... ', + ' -1 |_______________________________________________________', + ' -1 0 1' + ] + assert lines == list(textplot_str(x, -1, 1)) + + lines = [ + ' 1 | ..', + ' | .... ', + ' | ... ', + ' | ... ', + ' | .... ', + ' | ... ', + ' | ... ', + ' | .... ', + ' 0 |--------------------------...--------------------------', + ' | .... ', + ' | ... ', + ' | ... ', + ' | .... ', + ' | ... ', + ' | ... ', + ' | .... ', + ' -1 |_______________________________________________________', + ' -1 0 1' + ] + assert lines == list(textplot_str(x, -1, 1, H=17)) + + +def test_singularity(): + x = Symbol('x') + lines = [ + ' 54 | . ', + ' | ', + ' | ', + ' | ', + ' | ',' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' 27.5 |--.----------------------------------------------------', + ' | ', + ' | ', + ' | ', + ' | . ', + ' | \\ ', + ' | \\ ', + ' | .. ', + ' | ... ', + ' | ............. ', + ' 1 |_______________________________________________________', + ' 0 0.5 1' + ] + assert lines == list(textplot_str(1/x, 0, 1)) + + lines = [ + ' 0 | ......', + ' | ........ ', + ' | ........ ', + ' | ...... ', + ' | ..... ', + ' | .... ', + ' | ... ', + ' | .. ', + ' | ... ', + ' | / ', + ' -2 |-------..----------------------------------------------', + ' | / ', + ' | / ', + ' | / ', + ' | . ', + ' | ', + ' | . ', + ' | ', + ' | ', + ' | ', + ' -4 |_______________________________________________________', + ' 0 0.5 1' + ] + # RuntimeWarning: divide by zero encountered in log + with ignore_warnings(RuntimeWarning): + assert lines == list(textplot_str(log(x), 0, 1)) + + +def test_sinc(): + x = Symbol('x') + lines = [ + ' 1 | . . ', + ' | . . ', + ' | ', + ' | . . ', + ' | ', + ' | . . ', + ' | ', + ' | ', + ' | . . ', + ' | ', + ' 0.4 |-------------------------------------------------------', + ' | . . ', + ' | ', + ' | . . ', + ' | ', + ' | ..... ..... ', + ' | .. \\ . . / .. ', + ' | / \\ / \\ ', + ' |/ \\ . . / \\', + ' | \\ / \\ / ', + ' -0.2 |_______________________________________________________', + ' -10 0 10' + ] + # RuntimeWarning: invalid value encountered in double_scalars + with ignore_warnings(RuntimeWarning): + assert lines == list(textplot_str(sin(x)/x, -10, 10)) + + +def test_imaginary(): + x = Symbol('x') + lines = [ + ' 1 | ..', + ' | .. ', + ' | ... ', + ' | .. ', + ' | .. ', + ' | .. ', + ' | .. ', + ' | .. ', + ' | .. ', + ' | / ', + ' 0.5 |----------------------------------/--------------------', + ' | .. ', + ' | / ', + ' | . ', + ' | ', + ' | . ', + ' | . ', + ' | ', + ' | ', + ' | ', + ' 0 |_______________________________________________________', + ' -1 0 1' + ] + # RuntimeWarning: invalid value encountered in sqrt + with ignore_warnings(RuntimeWarning): + assert list(textplot_str(sqrt(x), -1, 1)) == lines + + lines = [ + ' 1 | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' 0 |-------------------------------------------------------', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' | ', + ' -1 |_______________________________________________________', + ' -1 0 1' + ] + assert list(textplot_str(S.ImaginaryUnit, -1, 1)) == lines diff --git a/.venv/Lib/site-packages/sympy/plotting/tests/test_utils.py b/.venv/Lib/site-packages/sympy/plotting/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4206a8b001319552c2e2be1aeb46057e6f708912 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/plotting/tests/test_utils.py @@ -0,0 +1,110 @@ +from pytest import raises +from sympy import ( + symbols, Expr, Tuple, Integer, cos, solveset, FiniteSet, ImageSet) +from sympy.plotting.utils import ( + _create_ranges, _plot_sympify, extract_solution) +from sympy.physics.mechanics import ReferenceFrame, Vector as MechVector +from sympy.vector import CoordSys3D, Vector + + +def test_plot_sympify(): + x, y = symbols("x, y") + + # argument is already sympified + args = x + y + r = _plot_sympify(args) + assert r == args + + # one argument needs to be sympified + args = (x + y, 1) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], Expr) + assert isinstance(r[1], Integer) + + # string and dict should not be sympified + args = (x + y, (x, 0, 1), "str", 1, {1: 1, 2: 2.0}) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 5 + assert isinstance(r[0], Expr) + assert isinstance(r[1], Tuple) + assert isinstance(r[2], str) + assert isinstance(r[3], Integer) + assert isinstance(r[4], dict) and isinstance(r[4][1], int) and isinstance(r[4][2], float) + + # nested arguments containing strings + args = ((x + y, (y, 0, 1), "a"), (x + 1, (x, 0, 1), "$f_{1}$")) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], Tuple) + assert isinstance(r[0][1], Tuple) + assert isinstance(r[0][1][1], Integer) + assert isinstance(r[0][2], str) + assert isinstance(r[1], Tuple) + assert isinstance(r[1][1], Tuple) + assert isinstance(r[1][1][1], Integer) + assert isinstance(r[1][2], str) + + # vectors from sympy.physics.vectors module are not sympified + # vectors from sympy.vectors are sympified + # in both cases, no error should be raised + R = ReferenceFrame("R") + v1 = 2 * R.x + R.y + C = CoordSys3D("C") + v2 = 2 * C.i + C.j + args = (v1, v2) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(v1, MechVector) + assert isinstance(v2, Vector) + + +def test_create_ranges(): + x, y = symbols("x, y") + + # user don't provide any range -> return a default range + r = _create_ranges({x}, [], 1) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 1 + assert isinstance(r[0], (Tuple, tuple)) + assert r[0] == (x, -10, 10) + + r = _create_ranges({x, y}, [], 2) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], (Tuple, tuple)) + assert isinstance(r[1], (Tuple, tuple)) + assert r[0] == (x, -10, 10) or (y, -10, 10) + assert r[1] == (y, -10, 10) or (x, -10, 10) + assert r[0] != r[1] + + # not enough ranges provided by the user -> create default ranges + r = _create_ranges( + {x, y}, + [ + (x, 0, 1), + ], + 2, + ) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], (Tuple, tuple)) + assert isinstance(r[1], (Tuple, tuple)) + assert r[0] == (x, 0, 1) or (y, -10, 10) + assert r[1] == (y, -10, 10) or (x, 0, 1) + assert r[0] != r[1] + + # too many free symbols + raises(ValueError, lambda: _create_ranges({x, y}, [], 1)) + raises(ValueError, lambda: _create_ranges({x, y}, [(x, 0, 5), (y, 0, 1)], 1)) + + +def test_extract_solution(): + x = symbols("x") + + sol = solveset(cos(10 * x)) + assert sol.has(ImageSet) + res = extract_solution(sol) + assert len(res) == 20 + assert isinstance(res, FiniteSet) + + res = extract_solution(sol, 20) + assert len(res) == 40 + assert isinstance(res, FiniteSet) diff --git a/.venv/Lib/site-packages/sympy/polys/__init__.py b/.venv/Lib/site-packages/sympy/polys/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8055ed12d213de3ebc7a1f17100607fb1e3b89b8 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/__init__.py @@ -0,0 +1,130 @@ +"""Polynomial manipulation algorithms and algebraic objects. """ + +__all__ = [ + 'Poly', 'PurePoly', 'poly_from_expr', 'parallel_poly_from_expr', 'degree', + 'total_degree', 'degree_list', 'LC', 'LM', 'LT', 'pdiv', 'prem', 'pquo', + 'pexquo', 'div', 'rem', 'quo', 'exquo', 'half_gcdex', 'gcdex', 'invert', + 'subresultants', 'resultant', 'discriminant', 'cofactors', 'gcd_list', + 'gcd', 'lcm_list', 'lcm', 'terms_gcd', 'trunc', 'monic', 'content', + 'primitive', 'compose', 'decompose', 'sturm', 'gff_list', 'gff', + 'sqf_norm', 'sqf_part', 'sqf_list', 'sqf', 'factor_list', 'factor', + 'intervals', 'refine_root', 'count_roots', 'all_roots', 'real_roots', + 'nroots', 'ground_roots', 'nth_power_roots_poly', 'cancel', 'reduced', + 'groebner', 'is_zero_dimensional', 'GroebnerBasis', 'poly', + + 'symmetrize', 'horner', 'interpolate', 'rational_interpolate', 'viete', + + 'together', + + 'BasePolynomialError', 'ExactQuotientFailed', 'PolynomialDivisionFailed', + 'OperationNotSupported', 'HeuristicGCDFailed', 'HomomorphismFailed', + 'IsomorphismFailed', 'ExtraneousFactors', 'EvaluationFailed', + 'RefinementFailed', 'CoercionFailed', 'NotInvertible', 'NotReversible', + 'NotAlgebraic', 'DomainError', 'PolynomialError', 'UnificationFailed', + 'GeneratorsError', 'GeneratorsNeeded', 'ComputationFailed', + 'UnivariatePolynomialError', 'MultivariatePolynomialError', + 'PolificationFailed', 'OptionError', 'FlagError', + + 'minpoly', 'minimal_polynomial', 'primitive_element', 'field_isomorphism', + 'to_number_field', 'isolate', 'round_two', 'prime_decomp', + 'prime_valuation', 'galois_group', + + 'itermonomials', 'Monomial', + + 'lex', 'grlex', 'grevlex', 'ilex', 'igrlex', 'igrevlex', + + 'CRootOf', 'rootof', 'RootOf', 'ComplexRootOf', 'RootSum', + + 'roots', + + 'Domain', 'FiniteField', 'IntegerRing', 'RationalField', 'RealField', + 'ComplexField', 'PythonFiniteField', 'GMPYFiniteField', + 'PythonIntegerRing', 'GMPYIntegerRing', 'PythonRational', + 'GMPYRationalField', 'AlgebraicField', 'PolynomialRing', 'FractionField', + 'ExpressionDomain', 'FF_python', 'FF_gmpy', 'ZZ_python', 'ZZ_gmpy', + 'QQ_python', 'QQ_gmpy', 'GF', 'FF', 'ZZ', 'QQ', 'ZZ_I', 'QQ_I', 'RR', + 'CC', 'EX', 'EXRAW', + + 'construct_domain', + + 'swinnerton_dyer_poly', 'cyclotomic_poly', 'symmetric_poly', + 'random_poly', 'interpolating_poly', + + 'jacobi_poly', 'chebyshevt_poly', 'chebyshevu_poly', 'hermite_poly', + 'hermite_prob_poly', 'legendre_poly', 'laguerre_poly', + + 'bernoulli_poly', 'bernoulli_c_poly', 'genocchi_poly', 'euler_poly', + 'andre_poly', + + 'apart', 'apart_list', 'assemble_partfrac_list', + + 'Options', + + 'ring', 'xring', 'vring', 'sring', + + 'field', 'xfield', 'vfield', 'sfield' +] + +from .polytools import (Poly, PurePoly, poly_from_expr, + parallel_poly_from_expr, degree, total_degree, degree_list, LC, LM, + LT, pdiv, prem, pquo, pexquo, div, rem, quo, exquo, half_gcdex, gcdex, + invert, subresultants, resultant, discriminant, cofactors, gcd_list, + gcd, lcm_list, lcm, terms_gcd, trunc, monic, content, primitive, + compose, decompose, sturm, gff_list, gff, sqf_norm, sqf_part, + sqf_list, sqf, factor_list, factor, intervals, refine_root, + count_roots, all_roots, real_roots, nroots, ground_roots, + nth_power_roots_poly, cancel, reduced, groebner, is_zero_dimensional, + GroebnerBasis, poly) + +from .polyfuncs import (symmetrize, horner, interpolate, + rational_interpolate, viete) + +from .rationaltools import together + +from .polyerrors import (BasePolynomialError, ExactQuotientFailed, + PolynomialDivisionFailed, OperationNotSupported, HeuristicGCDFailed, + HomomorphismFailed, IsomorphismFailed, ExtraneousFactors, + EvaluationFailed, RefinementFailed, CoercionFailed, NotInvertible, + NotReversible, NotAlgebraic, DomainError, PolynomialError, + UnificationFailed, GeneratorsError, GeneratorsNeeded, + ComputationFailed, UnivariatePolynomialError, + MultivariatePolynomialError, PolificationFailed, OptionError, + FlagError) + +from .numberfields import (minpoly, minimal_polynomial, primitive_element, + field_isomorphism, to_number_field, isolate, round_two, prime_decomp, + prime_valuation, galois_group) + +from .monomials import itermonomials, Monomial + +from .orderings import lex, grlex, grevlex, ilex, igrlex, igrevlex + +from .rootoftools import CRootOf, rootof, RootOf, ComplexRootOf, RootSum + +from .polyroots import roots + +from .domains import (Domain, FiniteField, IntegerRing, RationalField, + RealField, ComplexField, PythonFiniteField, GMPYFiniteField, + PythonIntegerRing, GMPYIntegerRing, PythonRational, GMPYRationalField, + AlgebraicField, PolynomialRing, FractionField, ExpressionDomain, + FF_python, FF_gmpy, ZZ_python, ZZ_gmpy, QQ_python, QQ_gmpy, GF, FF, + ZZ, QQ, ZZ_I, QQ_I, RR, CC, EX, EXRAW) + +from .constructor import construct_domain + +from .specialpolys import (swinnerton_dyer_poly, cyclotomic_poly, + symmetric_poly, random_poly, interpolating_poly) + +from .orthopolys import (jacobi_poly, chebyshevt_poly, chebyshevu_poly, + hermite_poly, hermite_prob_poly, legendre_poly, laguerre_poly) + +from .appellseqs import (bernoulli_poly, bernoulli_c_poly, genocchi_poly, + euler_poly, andre_poly) + +from .partfrac import apart, apart_list, assemble_partfrac_list + +from .polyoptions import Options + +from .rings import ring, xring, vring, sring + +from .fields import field, xfield, vfield, sfield diff --git a/.venv/Lib/site-packages/sympy/polys/agca/extensions.py b/.venv/Lib/site-packages/sympy/polys/agca/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..2668f792b5721db877f275e57ed54961b2e4df93 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/agca/extensions.py @@ -0,0 +1,356 @@ +"""Finite extensions of ring domains.""" + +from sympy.polys.domains.domain import Domain +from sympy.polys.domains.domainelement import DomainElement +from sympy.polys.polyerrors import (CoercionFailed, NotInvertible, + GeneratorsError) +from sympy.polys.polytools import Poly +from sympy.printing.defaults import DefaultPrinting + + +class ExtensionElement(DomainElement, DefaultPrinting): + """ + Element of a finite extension. + + A class of univariate polynomials modulo the ``modulus`` + of the extension ``ext``. It is represented by the + unique polynomial ``rep`` of lowest degree. Both + ``rep`` and the representation ``mod`` of ``modulus`` + are of class DMP. + + """ + __slots__ = ('rep', 'ext') + + def __init__(self, rep, ext): + self.rep = rep + self.ext = ext + + def parent(f): + return f.ext + + def as_expr(f): + return f.ext.to_sympy(f) + + def __bool__(f): + return bool(f.rep) + + def __pos__(f): + return f + + def __neg__(f): + return ExtElem(-f.rep, f.ext) + + def _get_rep(f, g): + if isinstance(g, ExtElem): + if g.ext == f.ext: + return g.rep + else: + return None + else: + try: + g = f.ext.convert(g) + return g.rep + except CoercionFailed: + return None + + def __add__(f, g): + rep = f._get_rep(g) + if rep is not None: + return ExtElem(f.rep + rep, f.ext) + else: + return NotImplemented + + __radd__ = __add__ + + def __sub__(f, g): + rep = f._get_rep(g) + if rep is not None: + return ExtElem(f.rep - rep, f.ext) + else: + return NotImplemented + + def __rsub__(f, g): + rep = f._get_rep(g) + if rep is not None: + return ExtElem(rep - f.rep, f.ext) + else: + return NotImplemented + + def __mul__(f, g): + rep = f._get_rep(g) + if rep is not None: + return ExtElem((f.rep * rep) % f.ext.mod, f.ext) + else: + return NotImplemented + + __rmul__ = __mul__ + + def _divcheck(f): + """Raise if division is not implemented for this divisor""" + if not f: + raise NotInvertible('Zero divisor') + elif f.ext.is_Field: + return True + elif f.rep.is_ground and f.ext.domain.is_unit(f.rep.LC()): + return True + else: + # Some cases like (2*x + 2)/2 over ZZ will fail here. It is + # unclear how to implement division in general if the ground + # domain is not a field so for now it was decided to restrict the + # implementation to division by invertible constants. + msg = (f"Can not invert {f} in {f.ext}. " + "Only division by invertible constants is implemented.") + raise NotImplementedError(msg) + + def inverse(f): + """Multiplicative inverse. + + Raises + ====== + + NotInvertible + If the element is a zero divisor. + + """ + f._divcheck() + + if f.ext.is_Field: + invrep = f.rep.invert(f.ext.mod) + else: + R = f.ext.ring + invrep = R.exquo(R.one, f.rep) + + return ExtElem(invrep, f.ext) + + def __truediv__(f, g): + rep = f._get_rep(g) + if rep is None: + return NotImplemented + g = ExtElem(rep, f.ext) + + try: + ginv = g.inverse() + except NotInvertible: + raise ZeroDivisionError(f"{f} / {g}") + + return f * ginv + + __floordiv__ = __truediv__ + + def __rtruediv__(f, g): + try: + g = f.ext.convert(g) + except CoercionFailed: + return NotImplemented + return g / f + + __rfloordiv__ = __rtruediv__ + + def __mod__(f, g): + rep = f._get_rep(g) + if rep is None: + return NotImplemented + g = ExtElem(rep, f.ext) + + try: + g._divcheck() + except NotInvertible: + raise ZeroDivisionError(f"{f} % {g}") + + # Division where defined is always exact so there is no remainder + return f.ext.zero + + def __rmod__(f, g): + try: + g = f.ext.convert(g) + except CoercionFailed: + return NotImplemented + return g % f + + def __pow__(f, n): + if not isinstance(n, int): + raise TypeError("exponent of type 'int' expected") + if n < 0: + try: + f, n = f.inverse(), -n + except NotImplementedError: + raise ValueError("negative powers are not defined") + + b = f.rep + m = f.ext.mod + r = f.ext.one.rep + while n > 0: + if n % 2: + r = (r*b) % m + b = (b*b) % m + n //= 2 + + return ExtElem(r, f.ext) + + def __eq__(f, g): + if isinstance(g, ExtElem): + return f.rep == g.rep and f.ext == g.ext + else: + return NotImplemented + + def __ne__(f, g): + return not f == g + + def __hash__(f): + return hash((f.rep, f.ext)) + + def __str__(f): + from sympy.printing.str import sstr + return sstr(f.as_expr()) + + __repr__ = __str__ + + @property + def is_ground(f): + return f.rep.is_ground + + def to_ground(f): + [c] = f.rep.to_list() + return c + +ExtElem = ExtensionElement + + +class MonogenicFiniteExtension(Domain): + r""" + Finite extension generated by an integral element. + + The generator is defined by a monic univariate + polynomial derived from the argument ``mod``. + + A shorter alias is ``FiniteExtension``. + + Examples + ======== + + Quadratic integer ring $\mathbb{Z}[\sqrt2]$: + + >>> from sympy import Symbol, Poly + >>> from sympy.polys.agca.extensions import FiniteExtension + >>> x = Symbol('x') + >>> R = FiniteExtension(Poly(x**2 - 2)); R + ZZ[x]/(x**2 - 2) + >>> R.rank + 2 + >>> R(1 + x)*(3 - 2*x) + x - 1 + + Finite field $GF(5^3)$ defined by the primitive + polynomial $x^3 + x^2 + 2$ (over $\mathbb{Z}_5$). + + >>> F = FiniteExtension(Poly(x**3 + x**2 + 2, modulus=5)); F + GF(5)[x]/(x**3 + x**2 + 2) + >>> F.basis + (1, x, x**2) + >>> F(x + 3)/(x**2 + 2) + -2*x**2 + x + 2 + + Function field of an elliptic curve: + + >>> t = Symbol('t') + >>> FiniteExtension(Poly(t**2 - x**3 - x + 1, t, field=True)) + ZZ(x)[t]/(t**2 - x**3 - x + 1) + + """ + is_FiniteExtension = True + + dtype = ExtensionElement + + def __init__(self, mod): + if not (isinstance(mod, Poly) and mod.is_univariate): + raise TypeError("modulus must be a univariate Poly") + + # Using auto=True (default) potentially changes the ground domain to a + # field whereas auto=False raises if division is not exact. We'll let + # the caller decide whether or not they want to put the ground domain + # over a field. In most uses mod is already monic. + mod = mod.monic(auto=False) + + self.rank = mod.degree() + self.modulus = mod + self.mod = mod.rep # DMP representation + + self.domain = dom = mod.domain + self.ring = dom.old_poly_ring(*mod.gens) + + self.zero = self.convert(self.ring.zero) + self.one = self.convert(self.ring.one) + + gen = self.ring.gens[0] + self.symbol = self.ring.symbols[0] + self.generator = self.convert(gen) + self.basis = tuple(self.convert(gen**i) for i in range(self.rank)) + + # XXX: It might be necessary to check mod.is_irreducible here + self.is_Field = self.domain.is_Field + + def new(self, arg): + rep = self.ring.convert(arg) + return ExtElem(rep % self.mod, self) + + def __eq__(self, other): + if not isinstance(other, FiniteExtension): + return False + return self.modulus == other.modulus + + def __hash__(self): + return hash((self.__class__.__name__, self.modulus)) + + def __str__(self): + return "%s/(%s)" % (self.ring, self.modulus.as_expr()) + + __repr__ = __str__ + + @property + def has_CharacteristicZero(self): + return self.domain.has_CharacteristicZero + + def characteristic(self): + return self.domain.characteristic() + + def convert(self, f, base=None): + rep = self.ring.convert(f, base) + return ExtElem(rep % self.mod, self) + + def convert_from(self, f, base): + rep = self.ring.convert(f, base) + return ExtElem(rep % self.mod, self) + + def to_sympy(self, f): + return self.ring.to_sympy(f.rep) + + def from_sympy(self, f): + return self.convert(f) + + def set_domain(self, K): + mod = self.modulus.set_domain(K) + return self.__class__(mod) + + def drop(self, *symbols): + if self.symbol in symbols: + raise GeneratorsError('Can not drop generator from FiniteExtension') + K = self.domain.drop(*symbols) + return self.set_domain(K) + + def quo(self, f, g): + return self.exquo(f, g) + + def exquo(self, f, g): + rep = self.ring.exquo(f.rep, g.rep) + return ExtElem(rep % self.mod, self) + + def is_negative(self, a): + return False + + def is_unit(self, a): + if self.is_Field: + return bool(a) + elif a.is_ground: + return self.domain.is_unit(a.to_ground()) + +FiniteExtension = MonogenicFiniteExtension diff --git a/.venv/Lib/site-packages/sympy/polys/agca/homomorphisms.py b/.venv/Lib/site-packages/sympy/polys/agca/homomorphisms.py new file mode 100644 index 0000000000000000000000000000000000000000..45e9549980a8848eee944000d321922576961a00 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/agca/homomorphisms.py @@ -0,0 +1,691 @@ +""" +Computations with homomorphisms of modules and rings. + +This module implements classes for representing homomorphisms of rings and +their modules. Instead of instantiating the classes directly, you should use +the function ``homomorphism(from, to, matrix)`` to create homomorphism objects. +""" + + +from sympy.polys.agca.modules import (Module, FreeModule, QuotientModule, + SubModule, SubQuotientModule) +from sympy.polys.polyerrors import CoercionFailed + +# The main computational task for module homomorphisms is kernels. +# For this reason, the concrete classes are organised by domain module type. + + +class ModuleHomomorphism: + """ + Abstract base class for module homomoprhisms. Do not instantiate. + + Instead, use the ``homomorphism`` function: + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> homomorphism(F, F, [[1, 0], [0, 1]]) + Matrix([ + [1, 0], : QQ[x]**2 -> QQ[x]**2 + [0, 1]]) + + Attributes: + + - ring - the ring over which we are considering modules + - domain - the domain module + - codomain - the codomain module + - _ker - cached kernel + - _img - cached image + + Non-implemented methods: + + - _kernel + - _image + - _restrict_domain + - _restrict_codomain + - _quotient_domain + - _quotient_codomain + - _apply + - _mul_scalar + - _compose + - _add + """ + + def __init__(self, domain, codomain): + if not isinstance(domain, Module): + raise TypeError('Source must be a module, got %s' % domain) + if not isinstance(codomain, Module): + raise TypeError('Target must be a module, got %s' % codomain) + if domain.ring != codomain.ring: + raise ValueError('Source and codomain must be over same ring, ' + 'got %s != %s' % (domain, codomain)) + self.domain = domain + self.codomain = codomain + self.ring = domain.ring + self._ker = None + self._img = None + + def kernel(self): + r""" + Compute the kernel of ``self``. + + That is, if ``self`` is the homomorphism `\phi: M \to N`, then compute + `ker(\phi) = \{x \in M | \phi(x) = 0\}`. This is a submodule of `M`. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> homomorphism(F, F, [[1, 0], [x, 0]]).kernel() + <[x, -1]> + """ + if self._ker is None: + self._ker = self._kernel() + return self._ker + + def image(self): + r""" + Compute the image of ``self``. + + That is, if ``self`` is the homomorphism `\phi: M \to N`, then compute + `im(\phi) = \{\phi(x) | x \in M \}`. This is a submodule of `N`. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> homomorphism(F, F, [[1, 0], [x, 0]]).image() == F.submodule([1, 0]) + True + """ + if self._img is None: + self._img = self._image() + return self._img + + def _kernel(self): + """Compute the kernel of ``self``.""" + raise NotImplementedError + + def _image(self): + """Compute the image of ``self``.""" + raise NotImplementedError + + def _restrict_domain(self, sm): + """Implementation of domain restriction.""" + raise NotImplementedError + + def _restrict_codomain(self, sm): + """Implementation of codomain restriction.""" + raise NotImplementedError + + def _quotient_domain(self, sm): + """Implementation of domain quotient.""" + raise NotImplementedError + + def _quotient_codomain(self, sm): + """Implementation of codomain quotient.""" + raise NotImplementedError + + def restrict_domain(self, sm): + """ + Return ``self``, with the domain restricted to ``sm``. + + Here ``sm`` has to be a submodule of ``self.domain``. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h + Matrix([ + [1, x], : QQ[x]**2 -> QQ[x]**2 + [0, 0]]) + >>> h.restrict_domain(F.submodule([1, 0])) + Matrix([ + [1, x], : <[1, 0]> -> QQ[x]**2 + [0, 0]]) + + This is the same as just composing on the right with the submodule + inclusion: + + >>> h * F.submodule([1, 0]).inclusion_hom() + Matrix([ + [1, x], : <[1, 0]> -> QQ[x]**2 + [0, 0]]) + """ + if not self.domain.is_submodule(sm): + raise ValueError('sm must be a submodule of %s, got %s' + % (self.domain, sm)) + if sm == self.domain: + return self + return self._restrict_domain(sm) + + def restrict_codomain(self, sm): + """ + Return ``self``, with codomain restricted to to ``sm``. + + Here ``sm`` has to be a submodule of ``self.codomain`` containing the + image. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h + Matrix([ + [1, x], : QQ[x]**2 -> QQ[x]**2 + [0, 0]]) + >>> h.restrict_codomain(F.submodule([1, 0])) + Matrix([ + [1, x], : QQ[x]**2 -> <[1, 0]> + [0, 0]]) + """ + if not sm.is_submodule(self.image()): + raise ValueError('the image %s must contain sm, got %s' + % (self.image(), sm)) + if sm == self.codomain: + return self + return self._restrict_codomain(sm) + + def quotient_domain(self, sm): + """ + Return ``self`` with domain replaced by ``domain/sm``. + + Here ``sm`` must be a submodule of ``self.kernel()``. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h + Matrix([ + [1, x], : QQ[x]**2 -> QQ[x]**2 + [0, 0]]) + >>> h.quotient_domain(F.submodule([-x, 1])) + Matrix([ + [1, x], : QQ[x]**2/<[-x, 1]> -> QQ[x]**2 + [0, 0]]) + """ + if not self.kernel().is_submodule(sm): + raise ValueError('kernel %s must contain sm, got %s' % + (self.kernel(), sm)) + if sm.is_zero(): + return self + return self._quotient_domain(sm) + + def quotient_codomain(self, sm): + """ + Return ``self`` with codomain replaced by ``codomain/sm``. + + Here ``sm`` must be a submodule of ``self.codomain``. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h + Matrix([ + [1, x], : QQ[x]**2 -> QQ[x]**2 + [0, 0]]) + >>> h.quotient_codomain(F.submodule([1, 1])) + Matrix([ + [1, x], : QQ[x]**2 -> QQ[x]**2/<[1, 1]> + [0, 0]]) + + This is the same as composing with the quotient map on the left: + + >>> (F/[(1, 1)]).quotient_hom() * h + Matrix([ + [1, x], : QQ[x]**2 -> QQ[x]**2/<[1, 1]> + [0, 0]]) + """ + if not self.codomain.is_submodule(sm): + raise ValueError('sm must be a submodule of codomain %s, got %s' + % (self.codomain, sm)) + if sm.is_zero(): + return self + return self._quotient_codomain(sm) + + def _apply(self, elem): + """Apply ``self`` to ``elem``.""" + raise NotImplementedError + + def __call__(self, elem): + return self.codomain.convert(self._apply(self.domain.convert(elem))) + + def _compose(self, oth): + """ + Compose ``self`` with ``oth``, that is, return the homomorphism + obtained by first applying then ``self``, then ``oth``. + + (This method is private since in this syntax, it is non-obvious which + homomorphism is executed first.) + """ + raise NotImplementedError + + def _mul_scalar(self, c): + """Scalar multiplication. ``c`` is guaranteed in self.ring.""" + raise NotImplementedError + + def _add(self, oth): + """ + Homomorphism addition. + ``oth`` is guaranteed to be a homomorphism with same domain/codomain. + """ + raise NotImplementedError + + def _check_hom(self, oth): + """Helper to check that oth is a homomorphism with same domain/codomain.""" + if not isinstance(oth, ModuleHomomorphism): + return False + return oth.domain == self.domain and oth.codomain == self.codomain + + def __mul__(self, oth): + if isinstance(oth, ModuleHomomorphism) and self.domain == oth.codomain: + return oth._compose(self) + try: + return self._mul_scalar(self.ring.convert(oth)) + except CoercionFailed: + return NotImplemented + + # NOTE: _compose will never be called from rmul + __rmul__ = __mul__ + + def __truediv__(self, oth): + try: + return self._mul_scalar(1/self.ring.convert(oth)) + except CoercionFailed: + return NotImplemented + + def __add__(self, oth): + if self._check_hom(oth): + return self._add(oth) + return NotImplemented + + def __sub__(self, oth): + if self._check_hom(oth): + return self._add(oth._mul_scalar(self.ring.convert(-1))) + return NotImplemented + + def is_injective(self): + """ + Return True if ``self`` is injective. + + That is, check if the elements of the domain are mapped to the same + codomain element. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h.is_injective() + False + >>> h.quotient_domain(h.kernel()).is_injective() + True + """ + return self.kernel().is_zero() + + def is_surjective(self): + """ + Return True if ``self`` is surjective. + + That is, check if every element of the codomain has at least one + preimage. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h.is_surjective() + False + >>> h.restrict_codomain(h.image()).is_surjective() + True + """ + return self.image() == self.codomain + + def is_isomorphism(self): + """ + Return True if ``self`` is an isomorphism. + + That is, check if every element of the codomain has precisely one + preimage. Equivalently, ``self`` is both injective and surjective. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h = h.restrict_codomain(h.image()) + >>> h.is_isomorphism() + False + >>> h.quotient_domain(h.kernel()).is_isomorphism() + True + """ + return self.is_injective() and self.is_surjective() + + def is_zero(self): + """ + Return True if ``self`` is a zero morphism. + + That is, check if every element of the domain is mapped to zero + under self. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> h = homomorphism(F, F, [[1, 0], [x, 0]]) + >>> h.is_zero() + False + >>> h.restrict_domain(F.submodule()).is_zero() + True + >>> h.quotient_codomain(h.image()).is_zero() + True + """ + return self.image().is_zero() + + def __eq__(self, oth): + try: + return (self - oth).is_zero() + except TypeError: + return False + + def __ne__(self, oth): + return not (self == oth) + + +class MatrixHomomorphism(ModuleHomomorphism): + r""" + Helper class for all homomoprhisms which are expressed via a matrix. + + That is, for such homomorphisms ``domain`` is contained in a module + generated by finitely many elements `e_1, \ldots, e_n`, so that the + homomorphism is determined uniquely by its action on the `e_i`. It + can thus be represented as a vector of elements of the codomain module, + or potentially a supermodule of the codomain module + (and hence conventionally as a matrix, if there is a similar interpretation + for elements of the codomain module). + + Note that this class does *not* assume that the `e_i` freely generate a + submodule, nor that ``domain`` is even all of this submodule. It exists + only to unify the interface. + + Do not instantiate. + + Attributes: + + - matrix - the list of images determining the homomorphism. + NOTE: the elements of matrix belong to either self.codomain or + self.codomain.container + + Still non-implemented methods: + + - kernel + - _apply + """ + + def __init__(self, domain, codomain, matrix): + ModuleHomomorphism.__init__(self, domain, codomain) + if len(matrix) != domain.rank: + raise ValueError('Need to provide %s elements, got %s' + % (domain.rank, len(matrix))) + + converter = self.codomain.convert + if isinstance(self.codomain, (SubModule, SubQuotientModule)): + converter = self.codomain.container.convert + self.matrix = tuple(converter(x) for x in matrix) + + def _sympy_matrix(self): + """Helper function which returns a SymPy matrix ``self.matrix``.""" + from sympy.matrices import Matrix + c = lambda x: x + if isinstance(self.codomain, (QuotientModule, SubQuotientModule)): + c = lambda x: x.data + return Matrix([[self.ring.to_sympy(y) for y in c(x)] for x in self.matrix]).T + + def __repr__(self): + lines = repr(self._sympy_matrix()).split('\n') + t = " : %s -> %s" % (self.domain, self.codomain) + s = ' '*len(t) + n = len(lines) + for i in range(n // 2): + lines[i] += s + lines[n // 2] += t + for i in range(n//2 + 1, n): + lines[i] += s + return '\n'.join(lines) + + def _restrict_domain(self, sm): + """Implementation of domain restriction.""" + return SubModuleHomomorphism(sm, self.codomain, self.matrix) + + def _restrict_codomain(self, sm): + """Implementation of codomain restriction.""" + return self.__class__(self.domain, sm, self.matrix) + + def _quotient_domain(self, sm): + """Implementation of domain quotient.""" + return self.__class__(self.domain/sm, self.codomain, self.matrix) + + def _quotient_codomain(self, sm): + """Implementation of codomain quotient.""" + Q = self.codomain/sm + converter = Q.convert + if isinstance(self.codomain, SubModule): + converter = Q.container.convert + return self.__class__(self.domain, self.codomain/sm, + [converter(x) for x in self.matrix]) + + def _add(self, oth): + return self.__class__(self.domain, self.codomain, + [x + y for x, y in zip(self.matrix, oth.matrix)]) + + def _mul_scalar(self, c): + return self.__class__(self.domain, self.codomain, [c*x for x in self.matrix]) + + def _compose(self, oth): + return self.__class__(self.domain, oth.codomain, [oth(x) for x in self.matrix]) + + +class FreeModuleHomomorphism(MatrixHomomorphism): + """ + Concrete class for homomorphisms with domain a free module or a quotient + thereof. + + Do not instantiate; the constructor does not check that your data is well + defined. Use the ``homomorphism`` function instead: + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> homomorphism(F, F, [[1, 0], [0, 1]]) + Matrix([ + [1, 0], : QQ[x]**2 -> QQ[x]**2 + [0, 1]]) + """ + + def _apply(self, elem): + if isinstance(self.domain, QuotientModule): + elem = elem.data + return sum(x * e for x, e in zip(elem, self.matrix)) + + def _image(self): + return self.codomain.submodule(*self.matrix) + + def _kernel(self): + # The domain is either a free module or a quotient thereof. + # It does not matter if it is a quotient, because that won't increase + # the kernel. + # Our generators {e_i} are sent to the matrix entries {b_i}. + # The kernel is essentially the syzygy module of these {b_i}. + syz = self.image().syzygy_module() + return self.domain.submodule(*syz.gens) + + +class SubModuleHomomorphism(MatrixHomomorphism): + """ + Concrete class for homomorphism with domain a submodule of a free module + or a quotient thereof. + + Do not instantiate; the constructor does not check that your data is well + defined. Use the ``homomorphism`` function instead: + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> M = QQ.old_poly_ring(x).free_module(2)*x + >>> homomorphism(M, M, [[1, 0], [0, 1]]) + Matrix([ + [1, 0], : <[x, 0], [0, x]> -> <[x, 0], [0, x]> + [0, 1]]) + """ + + def _apply(self, elem): + if isinstance(self.domain, SubQuotientModule): + elem = elem.data + return sum(x * e for x, e in zip(elem, self.matrix)) + + def _image(self): + return self.codomain.submodule(*[self(x) for x in self.domain.gens]) + + def _kernel(self): + syz = self.image().syzygy_module() + return self.domain.submodule( + *[sum(xi*gi for xi, gi in zip(s, self.domain.gens)) + for s in syz.gens]) + + +def homomorphism(domain, codomain, matrix): + r""" + Create a homomorphism object. + + This function tries to build a homomorphism from ``domain`` to ``codomain`` + via the matrix ``matrix``. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> from sympy.polys.agca import homomorphism + + >>> R = QQ.old_poly_ring(x) + >>> T = R.free_module(2) + + If ``domain`` is a free module generated by `e_1, \ldots, e_n`, then + ``matrix`` should be an n-element iterable `(b_1, \ldots, b_n)` where + the `b_i` are elements of ``codomain``. The constructed homomorphism is the + unique homomorphism sending `e_i` to `b_i`. + + >>> F = R.free_module(2) + >>> h = homomorphism(F, T, [[1, x], [x**2, 0]]) + >>> h + Matrix([ + [1, x**2], : QQ[x]**2 -> QQ[x]**2 + [x, 0]]) + >>> h([1, 0]) + [1, x] + >>> h([0, 1]) + [x**2, 0] + >>> h([1, 1]) + [x**2 + 1, x] + + If ``domain`` is a submodule of a free module, them ``matrix`` determines + a homomoprhism from the containing free module to ``codomain``, and the + homomorphism returned is obtained by restriction to ``domain``. + + >>> S = F.submodule([1, 0], [0, x]) + >>> homomorphism(S, T, [[1, x], [x**2, 0]]) + Matrix([ + [1, x**2], : <[1, 0], [0, x]> -> QQ[x]**2 + [x, 0]]) + + If ``domain`` is a (sub)quotient `N/K`, then ``matrix`` determines a + homomorphism from `N` to ``codomain``. If the kernel contains `K`, this + homomorphism descends to ``domain`` and is returned; otherwise an exception + is raised. + + >>> homomorphism(S/[(1, 0)], T, [0, [x**2, 0]]) + Matrix([ + [0, x**2], : <[1, 0] + <[1, 0]>, [0, x] + <[1, 0]>, [1, 0] + <[1, 0]>> -> QQ[x]**2 + [0, 0]]) + >>> homomorphism(S/[(0, x)], T, [0, [x**2, 0]]) + Traceback (most recent call last): + ... + ValueError: kernel <[1, 0], [0, 0]> must contain sm, got <[0,x]> + + """ + def freepres(module): + """ + Return a tuple ``(F, S, Q, c)`` where ``F`` is a free module, ``S`` is a + submodule of ``F``, and ``Q`` a submodule of ``S``, such that + ``module = S/Q``, and ``c`` is a conversion function. + """ + if isinstance(module, FreeModule): + return module, module, module.submodule(), lambda x: module.convert(x) + if isinstance(module, QuotientModule): + return (module.base, module.base, module.killed_module, + lambda x: module.convert(x).data) + if isinstance(module, SubQuotientModule): + return (module.base.container, module.base, module.killed_module, + lambda x: module.container.convert(x).data) + # an ordinary submodule + return (module.container, module, module.submodule(), + lambda x: module.container.convert(x)) + + SF, SS, SQ, _ = freepres(domain) + TF, TS, TQ, c = freepres(codomain) + # NOTE this is probably a bit inefficient (redundant checks) + return FreeModuleHomomorphism(SF, TF, [c(x) for x in matrix] + ).restrict_domain(SS).restrict_codomain(TS + ).quotient_codomain(TQ).quotient_domain(SQ) diff --git a/.venv/Lib/site-packages/sympy/polys/agca/ideals.py b/.venv/Lib/site-packages/sympy/polys/agca/ideals.py new file mode 100644 index 0000000000000000000000000000000000000000..1969554a1d674bc36ded1a3e312d587c66104086 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/agca/ideals.py @@ -0,0 +1,395 @@ +"""Computations with ideals of polynomial rings.""" + +from sympy.polys.polyerrors import CoercionFailed +from sympy.polys.polyutils import IntegerPowerable + + +class Ideal(IntegerPowerable): + """ + Abstract base class for ideals. + + Do not instantiate - use explicit constructors in the ring class instead: + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> QQ.old_poly_ring(x).ideal(x+1) + + + Attributes + + - ring - the ring this ideal belongs to + + Non-implemented methods: + + - _contains_elem + - _contains_ideal + - _quotient + - _intersect + - _union + - _product + - is_whole_ring + - is_zero + - is_prime, is_maximal, is_primary, is_radical + - is_principal + - height, depth + - radical + + Methods that likely should be overridden in subclasses: + + - reduce_element + """ + + def _contains_elem(self, x): + """Implementation of element containment.""" + raise NotImplementedError + + def _contains_ideal(self, I): + """Implementation of ideal containment.""" + raise NotImplementedError + + def _quotient(self, J): + """Implementation of ideal quotient.""" + raise NotImplementedError + + def _intersect(self, J): + """Implementation of ideal intersection.""" + raise NotImplementedError + + def is_whole_ring(self): + """Return True if ``self`` is the whole ring.""" + raise NotImplementedError + + def is_zero(self): + """Return True if ``self`` is the zero ideal.""" + raise NotImplementedError + + def _equals(self, J): + """Implementation of ideal equality.""" + return self._contains_ideal(J) and J._contains_ideal(self) + + def is_prime(self): + """Return True if ``self`` is a prime ideal.""" + raise NotImplementedError + + def is_maximal(self): + """Return True if ``self`` is a maximal ideal.""" + raise NotImplementedError + + def is_radical(self): + """Return True if ``self`` is a radical ideal.""" + raise NotImplementedError + + def is_primary(self): + """Return True if ``self`` is a primary ideal.""" + raise NotImplementedError + + def is_principal(self): + """Return True if ``self`` is a principal ideal.""" + raise NotImplementedError + + def radical(self): + """Compute the radical of ``self``.""" + raise NotImplementedError + + def depth(self): + """Compute the depth of ``self``.""" + raise NotImplementedError + + def height(self): + """Compute the height of ``self``.""" + raise NotImplementedError + + # TODO more + + # non-implemented methods end here + + def __init__(self, ring): + self.ring = ring + + def _check_ideal(self, J): + """Helper to check ``J`` is an ideal of our ring.""" + if not isinstance(J, Ideal) or J.ring != self.ring: + raise ValueError( + 'J must be an ideal of %s, got %s' % (self.ring, J)) + + def contains(self, elem): + """ + Return True if ``elem`` is an element of this ideal. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).ideal(x+1, x-1).contains(3) + True + >>> QQ.old_poly_ring(x).ideal(x**2, x**3).contains(x) + False + """ + return self._contains_elem(self.ring.convert(elem)) + + def subset(self, other): + """ + Returns True if ``other`` is is a subset of ``self``. + + Here ``other`` may be an ideal. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> I = QQ.old_poly_ring(x).ideal(x+1) + >>> I.subset([x**2 - 1, x**2 + 2*x + 1]) + True + >>> I.subset([x**2 + 1, x + 1]) + False + >>> I.subset(QQ.old_poly_ring(x).ideal(x**2 - 1)) + True + """ + if isinstance(other, Ideal): + return self._contains_ideal(other) + return all(self._contains_elem(x) for x in other) + + def quotient(self, J, **opts): + r""" + Compute the ideal quotient of ``self`` by ``J``. + + That is, if ``self`` is the ideal `I`, compute the set + `I : J = \{x \in R | xJ \subset I \}`. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> R = QQ.old_poly_ring(x, y) + >>> R.ideal(x*y).quotient(R.ideal(x)) + + """ + self._check_ideal(J) + return self._quotient(J, **opts) + + def intersect(self, J): + """ + Compute the intersection of self with ideal J. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> R = QQ.old_poly_ring(x, y) + >>> R.ideal(x).intersect(R.ideal(y)) + + """ + self._check_ideal(J) + return self._intersect(J) + + def saturate(self, J): + r""" + Compute the ideal saturation of ``self`` by ``J``. + + That is, if ``self`` is the ideal `I`, compute the set + `I : J^\infty = \{x \in R | xJ^n \subset I \text{ for some } n\}`. + """ + raise NotImplementedError + # Note this can be implemented using repeated quotient + + def union(self, J): + """ + Compute the ideal generated by the union of ``self`` and ``J``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).ideal(x**2 - 1).union(QQ.old_poly_ring(x).ideal((x+1)**2)) == QQ.old_poly_ring(x).ideal(x+1) + True + """ + self._check_ideal(J) + return self._union(J) + + def product(self, J): + r""" + Compute the ideal product of ``self`` and ``J``. + + That is, compute the ideal generated by products `xy`, for `x` an element + of ``self`` and `y \in J`. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> QQ.old_poly_ring(x, y).ideal(x).product(QQ.old_poly_ring(x, y).ideal(y)) + + """ + self._check_ideal(J) + return self._product(J) + + def reduce_element(self, x): + """ + Reduce the element ``x`` of our ring modulo the ideal ``self``. + + Here "reduce" has no specific meaning: it could return a unique normal + form, simplify the expression a bit, or just do nothing. + """ + return x + + def __add__(self, e): + if not isinstance(e, Ideal): + R = self.ring.quotient_ring(self) + if isinstance(e, R.dtype): + return e + if isinstance(e, R.ring.dtype): + return R(e) + return R.convert(e) + self._check_ideal(e) + return self.union(e) + + __radd__ = __add__ + + def __mul__(self, e): + if not isinstance(e, Ideal): + try: + e = self.ring.ideal(e) + except CoercionFailed: + return NotImplemented + self._check_ideal(e) + return self.product(e) + + __rmul__ = __mul__ + + def _zeroth_power(self): + return self.ring.ideal(1) + + def _first_power(self): + # Raising to any power but 1 returns a new instance. So we mult by 1 + # here so that the first power is no exception. + return self * 1 + + def __eq__(self, e): + if not isinstance(e, Ideal) or e.ring != self.ring: + return False + return self._equals(e) + + def __ne__(self, e): + return not (self == e) + + +class ModuleImplementedIdeal(Ideal): + """ + Ideal implementation relying on the modules code. + + Attributes: + + - _module - the underlying module + """ + + def __init__(self, ring, module): + Ideal.__init__(self, ring) + self._module = module + + def _contains_elem(self, x): + return self._module.contains([x]) + + def _contains_ideal(self, J): + if not isinstance(J, ModuleImplementedIdeal): + raise NotImplementedError + return self._module.is_submodule(J._module) + + def _intersect(self, J): + if not isinstance(J, ModuleImplementedIdeal): + raise NotImplementedError + return self.__class__(self.ring, self._module.intersect(J._module)) + + def _quotient(self, J, **opts): + if not isinstance(J, ModuleImplementedIdeal): + raise NotImplementedError + return self._module.module_quotient(J._module, **opts) + + def _union(self, J): + if not isinstance(J, ModuleImplementedIdeal): + raise NotImplementedError + return self.__class__(self.ring, self._module.union(J._module)) + + @property + def gens(self): + """ + Return generators for ``self``. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x, y + >>> list(QQ.old_poly_ring(x, y).ideal(x, y, x**2 + y).gens) + [DMP_Python([[1], []], QQ), DMP_Python([[1, 0]], QQ), DMP_Python([[1], [], [1, 0]], QQ)] + """ + return (x[0] for x in self._module.gens) + + def is_zero(self): + """ + Return True if ``self`` is the zero ideal. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).ideal(x).is_zero() + False + >>> QQ.old_poly_ring(x).ideal().is_zero() + True + """ + return self._module.is_zero() + + def is_whole_ring(self): + """ + Return True if ``self`` is the whole ring, i.e. one generator is a unit. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ, ilex + >>> QQ.old_poly_ring(x).ideal(x).is_whole_ring() + False + >>> QQ.old_poly_ring(x).ideal(3).is_whole_ring() + True + >>> QQ.old_poly_ring(x, order=ilex).ideal(2 + x).is_whole_ring() + True + """ + return self._module.is_full_module() + + def __repr__(self): + from sympy.printing.str import sstr + gens = [self.ring.to_sympy(x) for [x] in self._module.gens] + return '<' + ','.join(sstr(g) for g in gens) + '>' + + # NOTE this is the only method using the fact that the module is a SubModule + def _product(self, J): + if not isinstance(J, ModuleImplementedIdeal): + raise NotImplementedError + return self.__class__(self.ring, self._module.submodule( + *[[x*y] for [x] in self._module.gens for [y] in J._module.gens])) + + def in_terms_of_generators(self, e): + """ + Express ``e`` in terms of the generators of ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> I = QQ.old_poly_ring(x).ideal(x**2 + 1, x) + >>> I.in_terms_of_generators(1) # doctest: +SKIP + [DMP_Python([1], QQ), DMP_Python([-1, 0], QQ)] + """ + return self._module.in_terms_of_generators([e]) + + def reduce_element(self, x, **options): + return self._module.reduce_element([x], **options)[0] diff --git a/.venv/Lib/site-packages/sympy/polys/agca/modules.py b/.venv/Lib/site-packages/sympy/polys/agca/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..7e4ac982ff65183052940af098d63f909582eb04 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/agca/modules.py @@ -0,0 +1,1488 @@ +""" +Computations with modules over polynomial rings. + +This module implements various classes that encapsulate groebner basis +computations for modules. Most of them should not be instantiated by hand. +Instead, use the constructing routines on objects you already have. + +For example, to construct a free module over ``QQ[x, y]``, call +``QQ[x, y].free_module(rank)`` instead of the ``FreeModule`` constructor. +In fact ``FreeModule`` is an abstract base class that should not be +instantiated, the ``free_module`` method instead returns the implementing class +``FreeModulePolyRing``. + +In general, the abstract base classes implement most functionality in terms of +a few non-implemented methods. The concrete base classes supply only these +non-implemented methods. They may also supply new implementations of the +convenience methods, for example if there are faster algorithms available. +""" + + +from copy import copy +from functools import reduce + +from sympy.polys.agca.ideals import Ideal +from sympy.polys.domains.field import Field +from sympy.polys.orderings import ProductOrder, monomial_key +from sympy.polys.polyclasses import DMP +from sympy.polys.polyerrors import CoercionFailed +from sympy.core.basic import _aresame +from sympy.utilities.iterables import iterable + +# TODO +# - module saturation +# - module quotient/intersection for quotient rings +# - free resoltutions / syzygies +# - finding small/minimal generating sets +# - ... + +########################################################################## +## Abstract base classes ################################################# +########################################################################## + + +class Module: + """ + Abstract base class for modules. + + Do not instantiate - use ring explicit constructors instead: + + >>> from sympy import QQ + >>> from sympy.abc import x + >>> QQ.old_poly_ring(x).free_module(2) + QQ[x]**2 + + Attributes: + + - dtype - type of elements + - ring - containing ring + + Non-implemented methods: + + - submodule + - quotient_module + - is_zero + - is_submodule + - multiply_ideal + + The method convert likely needs to be changed in subclasses. + """ + + def __init__(self, ring): + self.ring = ring + + def convert(self, elem, M=None): + """ + Convert ``elem`` into internal representation of this module. + + If ``M`` is not None, it should be a module containing it. + """ + if not isinstance(elem, self.dtype): + raise CoercionFailed + return elem + + def submodule(self, *gens): + """Generate a submodule.""" + raise NotImplementedError + + def quotient_module(self, other): + """Generate a quotient module.""" + raise NotImplementedError + + def __truediv__(self, e): + if not isinstance(e, Module): + e = self.submodule(*e) + return self.quotient_module(e) + + def contains(self, elem): + """Return True if ``elem`` is an element of this module.""" + try: + self.convert(elem) + return True + except CoercionFailed: + return False + + def __contains__(self, elem): + return self.contains(elem) + + def subset(self, other): + """ + Returns True if ``other`` is is a subset of ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> F.subset([(1, x), (x, 2)]) + True + >>> F.subset([(1/x, x), (x, 2)]) + False + """ + return all(self.contains(x) for x in other) + + def __eq__(self, other): + return self.is_submodule(other) and other.is_submodule(self) + + def __ne__(self, other): + return not (self == other) + + def is_zero(self): + """Returns True if ``self`` is a zero module.""" + raise NotImplementedError + + def is_submodule(self, other): + """Returns True if ``other`` is a submodule of ``self``.""" + raise NotImplementedError + + def multiply_ideal(self, other): + """ + Multiply ``self`` by the ideal ``other``. + """ + raise NotImplementedError + + def __mul__(self, e): + if not isinstance(e, Ideal): + try: + e = self.ring.ideal(e) + except (CoercionFailed, NotImplementedError): + return NotImplemented + return self.multiply_ideal(e) + + __rmul__ = __mul__ + + def identity_hom(self): + """Return the identity homomorphism on ``self``.""" + raise NotImplementedError + + +class ModuleElement: + """ + Base class for module element wrappers. + + Use this class to wrap primitive data types as module elements. It stores + a reference to the containing module, and implements all the arithmetic + operators. + + Attributes: + + - module - containing module + - data - internal data + + Methods that likely need change in subclasses: + + - add + - mul + - div + - eq + """ + + def __init__(self, module, data): + self.module = module + self.data = data + + def add(self, d1, d2): + """Add data ``d1`` and ``d2``.""" + return d1 + d2 + + def mul(self, m, d): + """Multiply module data ``m`` by coefficient d.""" + return m * d + + def div(self, m, d): + """Divide module data ``m`` by coefficient d.""" + return m / d + + def eq(self, d1, d2): + """Return true if d1 and d2 represent the same element.""" + return d1 == d2 + + def __add__(self, om): + if not isinstance(om, self.__class__) or om.module != self.module: + try: + om = self.module.convert(om) + except CoercionFailed: + return NotImplemented + return self.__class__(self.module, self.add(self.data, om.data)) + + __radd__ = __add__ + + def __neg__(self): + return self.__class__(self.module, self.mul(self.data, + self.module.ring.convert(-1))) + + def __sub__(self, om): + if not isinstance(om, self.__class__) or om.module != self.module: + try: + om = self.module.convert(om) + except CoercionFailed: + return NotImplemented + return self.__add__(-om) + + def __rsub__(self, om): + return (-self).__add__(om) + + def __mul__(self, o): + if not isinstance(o, self.module.ring.dtype): + try: + o = self.module.ring.convert(o) + except CoercionFailed: + return NotImplemented + return self.__class__(self.module, self.mul(self.data, o)) + + __rmul__ = __mul__ + + def __truediv__(self, o): + if not isinstance(o, self.module.ring.dtype): + try: + o = self.module.ring.convert(o) + except CoercionFailed: + return NotImplemented + return self.__class__(self.module, self.div(self.data, o)) + + def __eq__(self, om): + if not isinstance(om, self.__class__) or om.module != self.module: + try: + om = self.module.convert(om) + except CoercionFailed: + return False + return self.eq(self.data, om.data) + + def __ne__(self, om): + return not self == om + +########################################################################## +## Free Modules ########################################################## +########################################################################## + + +class FreeModuleElement(ModuleElement): + """Element of a free module. Data stored as a tuple.""" + + def add(self, d1, d2): + return tuple(x + y for x, y in zip(d1, d2)) + + def mul(self, d, p): + return tuple(x * p for x in d) + + def div(self, d, p): + return tuple(x / p for x in d) + + def __repr__(self): + from sympy.printing.str import sstr + data = self.data + if any(isinstance(x, DMP) for x in data): + data = [self.module.ring.to_sympy(x) for x in data] + return '[' + ', '.join(sstr(x) for x in data) + ']' + + def __iter__(self): + return self.data.__iter__() + + def __getitem__(self, idx): + return self.data[idx] + + +class FreeModule(Module): + """ + Abstract base class for free modules. + + Additional attributes: + + - rank - rank of the free module + + Non-implemented methods: + + - submodule + """ + + dtype = FreeModuleElement + + def __init__(self, ring, rank): + Module.__init__(self, ring) + self.rank = rank + + def __repr__(self): + return repr(self.ring) + "**" + repr(self.rank) + + def is_submodule(self, other): + """ + Returns True if ``other`` is a submodule of ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> M = F.submodule([2, x]) + >>> F.is_submodule(F) + True + >>> F.is_submodule(M) + True + >>> M.is_submodule(F) + False + """ + if isinstance(other, SubModule): + return other.container == self + if isinstance(other, FreeModule): + return other.ring == self.ring and other.rank == self.rank + return False + + def convert(self, elem, M=None): + """ + Convert ``elem`` into the internal representation. + + This method is called implicitly whenever computations involve elements + not in the internal representation. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> F.convert([1, 0]) + [1, 0] + """ + if isinstance(elem, FreeModuleElement): + if elem.module is self: + return elem + if elem.module.rank != self.rank: + raise CoercionFailed + return FreeModuleElement(self, + tuple(self.ring.convert(x, elem.module.ring) for x in elem.data)) + elif iterable(elem): + tpl = tuple(self.ring.convert(x) for x in elem) + if len(tpl) != self.rank: + raise CoercionFailed + return FreeModuleElement(self, tpl) + elif _aresame(elem, 0): + return FreeModuleElement(self, (self.ring.convert(0),)*self.rank) + else: + raise CoercionFailed + + def is_zero(self): + """ + Returns True if ``self`` is a zero module. + + (If, as this implementation assumes, the coefficient ring is not the + zero ring, then this is equivalent to the rank being zero.) + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).free_module(0).is_zero() + True + >>> QQ.old_poly_ring(x).free_module(1).is_zero() + False + """ + return self.rank == 0 + + def basis(self): + """ + Return a set of basis elements. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).free_module(3).basis() + ([1, 0, 0], [0, 1, 0], [0, 0, 1]) + """ + from sympy.matrices import eye + M = eye(self.rank) + return tuple(self.convert(M.row(i)) for i in range(self.rank)) + + def quotient_module(self, submodule): + """ + Return a quotient module. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> M = QQ.old_poly_ring(x).free_module(2) + >>> M.quotient_module(M.submodule([1, x], [x, 2])) + QQ[x]**2/<[1, x], [x, 2]> + + Or more conicisely, using the overloaded division operator: + + >>> QQ.old_poly_ring(x).free_module(2) / [[1, x], [x, 2]] + QQ[x]**2/<[1, x], [x, 2]> + """ + return QuotientModule(self.ring, self, submodule) + + def multiply_ideal(self, other): + """ + Multiply ``self`` by the ideal ``other``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> I = QQ.old_poly_ring(x).ideal(x) + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> F.multiply_ideal(I) + <[x, 0], [0, x]> + """ + return self.submodule(*self.basis()).multiply_ideal(other) + + def identity_hom(self): + """ + Return the identity homomorphism on ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).free_module(2).identity_hom() + Matrix([ + [1, 0], : QQ[x]**2 -> QQ[x]**2 + [0, 1]]) + """ + from sympy.polys.agca.homomorphisms import homomorphism + return homomorphism(self, self, self.basis()) + + +class FreeModulePolyRing(FreeModule): + """ + Free module over a generalized polynomial ring. + + Do not instantiate this, use the constructor method of the ring instead: + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(3) + >>> F + QQ[x]**3 + >>> F.contains([x, 1, 0]) + True + >>> F.contains([1/x, 0, 1]) + False + """ + + def __init__(self, ring, rank): + from sympy.polys.domains.old_polynomialring import PolynomialRingBase + FreeModule.__init__(self, ring, rank) + if not isinstance(ring, PolynomialRingBase): + raise NotImplementedError('This implementation only works over ' + + 'polynomial rings, got %s' % ring) + if not isinstance(ring.dom, Field): + raise NotImplementedError('Ground domain must be a field, ' + + 'got %s' % ring.dom) + + def submodule(self, *gens, **opts): + """ + Generate a submodule. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> M = QQ.old_poly_ring(x, y).free_module(2).submodule([x, x + y]) + >>> M + <[x, x + y]> + >>> M.contains([2*x, 2*x + 2*y]) + True + >>> M.contains([x, y]) + False + """ + return SubModulePolyRing(gens, self, **opts) + + +class FreeModuleQuotientRing(FreeModule): + """ + Free module over a quotient ring. + + Do not instantiate this, use the constructor method of the ring instead: + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(3) + >>> F + (QQ[x]/)**3 + + Attributes + + - quot - the quotient module `R^n / IR^n`, where `R/I` is our ring + """ + + def __init__(self, ring, rank): + from sympy.polys.domains.quotientring import QuotientRing + FreeModule.__init__(self, ring, rank) + if not isinstance(ring, QuotientRing): + raise NotImplementedError('This implementation only works over ' + + 'quotient rings, got %s' % ring) + F = self.ring.ring.free_module(self.rank) + self.quot = F / (self.ring.base_ideal*F) + + def __repr__(self): + return "(" + repr(self.ring) + ")" + "**" + repr(self.rank) + + def submodule(self, *gens, **opts): + """ + Generate a submodule. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> M = (QQ.old_poly_ring(x, y)/[x**2 - y**2]).free_module(2).submodule([x, x + y]) + >>> M + <[x + , x + y + ]> + >>> M.contains([y**2, x**2 + x*y]) + True + >>> M.contains([x, y]) + False + """ + return SubModuleQuotientRing(gens, self, **opts) + + def lift(self, elem): + """ + Lift the element ``elem`` of self to the module self.quot. + + Note that self.quot is the same set as self, just as an R-module + and not as an R/I-module, so this makes sense. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(2) + >>> e = F.convert([1, 0]) + >>> e + [1 + , 0 + ] + >>> L = F.quot + >>> l = F.lift(e) + >>> l + [1, 0] + <[x**2 + 1, 0], [0, x**2 + 1]> + >>> L.contains(l) + True + """ + return self.quot.convert([x.data for x in elem]) + + def unlift(self, elem): + """ + Push down an element of self.quot to self. + + This undoes ``lift``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(2) + >>> e = F.convert([1, 0]) + >>> l = F.lift(e) + >>> e == l + False + >>> e == F.unlift(l) + True + """ + return self.convert(elem.data) + +########################################################################## +## Submodules and subquotients ########################################### +########################################################################## + + +class SubModule(Module): + """ + Base class for submodules. + + Attributes: + + - container - containing module + - gens - generators (subset of containing module) + - rank - rank of containing module + + Non-implemented methods: + + - _contains + - _syzygies + - _in_terms_of_generators + - _intersect + - _module_quotient + + Methods that likely need change in subclasses: + + - reduce_element + """ + + def __init__(self, gens, container): + Module.__init__(self, container.ring) + self.gens = tuple(container.convert(x) for x in gens) + self.container = container + self.rank = container.rank + self.ring = container.ring + self.dtype = container.dtype + + def __repr__(self): + return "<" + ", ".join(repr(x) for x in self.gens) + ">" + + def _contains(self, other): + """Implementation of containment. + Other is guaranteed to be FreeModuleElement.""" + raise NotImplementedError + + def _syzygies(self): + """Implementation of syzygy computation wrt self generators.""" + raise NotImplementedError + + def _in_terms_of_generators(self, e): + """Implementation of expression in terms of generators.""" + raise NotImplementedError + + def convert(self, elem, M=None): + """ + Convert ``elem`` into the internal represantition. + + Mostly called implicitly. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> M = QQ.old_poly_ring(x).free_module(2).submodule([1, x]) + >>> M.convert([2, 2*x]) + [2, 2*x] + """ + if isinstance(elem, self.container.dtype) and elem.module is self: + return elem + r = copy(self.container.convert(elem, M)) + r.module = self + if not self._contains(r): + raise CoercionFailed + return r + + def _intersect(self, other): + """Implementation of intersection. + Other is guaranteed to be a submodule of same free module.""" + raise NotImplementedError + + def _module_quotient(self, other): + """Implementation of quotient. + Other is guaranteed to be a submodule of same free module.""" + raise NotImplementedError + + def intersect(self, other, **options): + """ + Returns the intersection of ``self`` with submodule ``other``. + + Examples + ======== + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x, y).free_module(2) + >>> F.submodule([x, x]).intersect(F.submodule([y, y])) + <[x*y, x*y]> + + Some implementation allow further options to be passed. Currently, to + only one implemented is ``relations=True``, in which case the function + will return a triple ``(res, rela, relb)``, where ``res`` is the + intersection module, and ``rela`` and ``relb`` are lists of coefficient + vectors, expressing the generators of ``res`` in terms of the + generators of ``self`` (``rela``) and ``other`` (``relb``). + + >>> F.submodule([x, x]).intersect(F.submodule([y, y]), relations=True) + (<[x*y, x*y]>, [(DMP_Python([[1, 0]], QQ),)], [(DMP_Python([[1], []], QQ),)]) + + The above result says: the intersection module is generated by the + single element `(-xy, -xy) = -y (x, x) = -x (y, y)`, where + `(x, x)` and `(y, y)` respectively are the unique generators of + the two modules being intersected. + """ + if not isinstance(other, SubModule): + raise TypeError('%s is not a SubModule' % other) + if other.container != self.container: + raise ValueError( + '%s is contained in a different free module' % other) + return self._intersect(other, **options) + + def module_quotient(self, other, **options): + r""" + Returns the module quotient of ``self`` by submodule ``other``. + + That is, if ``self`` is the module `M` and ``other`` is `N`, then + return the ideal `\{f \in R | fN \subset M\}`. + + Examples + ======== + + >>> from sympy import QQ + >>> from sympy.abc import x, y + >>> F = QQ.old_poly_ring(x, y).free_module(2) + >>> S = F.submodule([x*y, x*y]) + >>> T = F.submodule([x, x]) + >>> S.module_quotient(T) + + + Some implementations allow further options to be passed. Currently, the + only one implemented is ``relations=True``, which may only be passed + if ``other`` is principal. In this case the function + will return a pair ``(res, rel)`` where ``res`` is the ideal, and + ``rel`` is a list of coefficient vectors, expressing the generators of + the ideal, multiplied by the generator of ``other`` in terms of + generators of ``self``. + + >>> S.module_quotient(T, relations=True) + (, [[DMP_Python([[1]], QQ)]]) + + This means that the quotient ideal is generated by the single element + `y`, and that `y (x, x) = 1 (xy, xy)`, `(x, x)` and `(xy, xy)` being + the generators of `T` and `S`, respectively. + """ + if not isinstance(other, SubModule): + raise TypeError('%s is not a SubModule' % other) + if other.container != self.container: + raise ValueError( + '%s is contained in a different free module' % other) + return self._module_quotient(other, **options) + + def union(self, other): + """ + Returns the module generated by the union of ``self`` and ``other``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(1) + >>> M = F.submodule([x**2 + x]) # + >>> N = F.submodule([x**2 - 1]) # <(x-1)(x+1)> + >>> M.union(N) == F.submodule([x+1]) + True + """ + if not isinstance(other, SubModule): + raise TypeError('%s is not a SubModule' % other) + if other.container != self.container: + raise ValueError( + '%s is contained in a different free module' % other) + return self.__class__(self.gens + other.gens, self.container) + + def is_zero(self): + """ + Return True if ``self`` is a zero module. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> F.submodule([x, 1]).is_zero() + False + >>> F.submodule([0, 0]).is_zero() + True + """ + return all(x == 0 for x in self.gens) + + def submodule(self, *gens): + """ + Generate a submodule. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> M = QQ.old_poly_ring(x).free_module(2).submodule([x, 1]) + >>> M.submodule([x**2, x]) + <[x**2, x]> + """ + if not self.subset(gens): + raise ValueError('%s not a subset of %s' % (gens, self)) + return self.__class__(gens, self.container) + + def is_full_module(self): + """ + Return True if ``self`` is the entire free module. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> F.submodule([x, 1]).is_full_module() + False + >>> F.submodule([1, 1], [1, 2]).is_full_module() + True + """ + return all(self.contains(x) for x in self.container.basis()) + + def is_submodule(self, other): + """ + Returns True if ``other`` is a submodule of ``self``. + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> M = F.submodule([2, x]) + >>> N = M.submodule([2*x, x**2]) + >>> M.is_submodule(M) + True + >>> M.is_submodule(N) + True + >>> N.is_submodule(M) + False + """ + if isinstance(other, SubModule): + return self.container == other.container and \ + all(self.contains(x) for x in other.gens) + if isinstance(other, (FreeModule, QuotientModule)): + return self.container == other and self.is_full_module() + return False + + def syzygy_module(self, **opts): + r""" + Compute the syzygy module of the generators of ``self``. + + Suppose `M` is generated by `f_1, \ldots, f_n` over the ring + `R`. Consider the homomorphism `\phi: R^n \to M`, given by + sending `(r_1, \ldots, r_n) \to r_1 f_1 + \cdots + r_n f_n`. + The syzygy module is defined to be the kernel of `\phi`. + + Examples + ======== + + The syzygy module is zero iff the generators generate freely a free + submodule: + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).free_module(2).submodule([1, 0], [1, 1]).syzygy_module().is_zero() + True + + A slightly more interesting example: + + >>> M = QQ.old_poly_ring(x, y).free_module(2).submodule([x, 2*x], [y, 2*y]) + >>> S = QQ.old_poly_ring(x, y).free_module(2).submodule([y, -x]) + >>> M.syzygy_module() == S + True + """ + F = self.ring.free_module(len(self.gens)) + # NOTE we filter out zero syzygies. This is for convenience of the + # _syzygies function and not meant to replace any real "generating set + # reduction" algorithm + return F.submodule(*[x for x in self._syzygies() if F.convert(x) != 0], + **opts) + + def in_terms_of_generators(self, e): + """ + Express element ``e`` of ``self`` in terms of the generators. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> M = F.submodule([1, 0], [1, 1]) + >>> M.in_terms_of_generators([x, x**2]) # doctest: +SKIP + [DMP_Python([-1, 1, 0], QQ), DMP_Python([1, 0, 0], QQ)] + """ + try: + e = self.convert(e) + except CoercionFailed: + raise ValueError('%s is not an element of %s' % (e, self)) + return self._in_terms_of_generators(e) + + def reduce_element(self, x): + """ + Reduce the element ``x`` of our ring modulo the ideal ``self``. + + Here "reduce" has no specific meaning, it could return a unique normal + form, simplify the expression a bit, or just do nothing. + """ + return x + + def quotient_module(self, other, **opts): + """ + Return a quotient module. + + This is the same as taking a submodule of a quotient of the containing + module. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> S1 = F.submodule([x, 1]) + >>> S2 = F.submodule([x**2, x]) + >>> S1.quotient_module(S2) + <[x, 1] + <[x**2, x]>> + + Or more coincisely, using the overloaded division operator: + + >>> F.submodule([x, 1]) / [(x**2, x)] + <[x, 1] + <[x**2, x]>> + """ + if not self.is_submodule(other): + raise ValueError('%s not a submodule of %s' % (other, self)) + return SubQuotientModule(self.gens, + self.container.quotient_module(other), **opts) + + def __add__(self, oth): + return self.container.quotient_module(self).convert(oth) + + __radd__ = __add__ + + def multiply_ideal(self, I): + """ + Multiply ``self`` by the ideal ``I``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> I = QQ.old_poly_ring(x).ideal(x**2) + >>> M = QQ.old_poly_ring(x).free_module(2).submodule([1, 1]) + >>> I*M + <[x**2, x**2]> + """ + return self.submodule(*[x*g for [x] in I._module.gens for g in self.gens]) + + def inclusion_hom(self): + """ + Return a homomorphism representing the inclusion map of ``self``. + + That is, the natural map from ``self`` to ``self.container``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).free_module(2).submodule([x, x]).inclusion_hom() + Matrix([ + [1, 0], : <[x, x]> -> QQ[x]**2 + [0, 1]]) + """ + return self.container.identity_hom().restrict_domain(self) + + def identity_hom(self): + """ + Return the identity homomorphism on ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> QQ.old_poly_ring(x).free_module(2).submodule([x, x]).identity_hom() + Matrix([ + [1, 0], : <[x, x]> -> <[x, x]> + [0, 1]]) + """ + return self.container.identity_hom().restrict_domain( + self).restrict_codomain(self) + + +class SubQuotientModule(SubModule): + """ + Submodule of a quotient module. + + Equivalently, quotient module of a submodule. + + Do not instantiate this, instead use the submodule or quotient_module + constructing methods: + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> S = F.submodule([1, 0], [1, x]) + >>> Q = F/[(1, 0)] + >>> S/[(1, 0)] == Q.submodule([5, x]) + True + + Attributes: + + - base - base module we are quotient of + - killed_module - submodule used to form the quotient + """ + def __init__(self, gens, container, **opts): + SubModule.__init__(self, gens, container) + self.killed_module = self.container.killed_module + # XXX it is important for some code below that the generators of base + # are in this particular order! + self.base = self.container.base.submodule( + *[x.data for x in self.gens], **opts).union(self.killed_module) + + def _contains(self, elem): + return self.base.contains(elem.data) + + def _syzygies(self): + # let N = self.killed_module be generated by e_1, ..., e_r + # let F = self.base be generated by f_1, ..., f_s and e_1, ..., e_r + # Then self = F/N. + # Let phi: R**s --> self be the evident surjection. + # Similarly psi: R**(s + r) --> F. + # We need to find generators for ker(phi). Let chi: R**s --> F be the + # evident lift of phi. For X in R**s, phi(X) = 0 iff chi(X) is + # contained in N, iff there exists Y in R**r such that + # psi(X, Y) = 0. + # Hence if alpha: R**(s + r) --> R**s is the projection map, then + # ker(phi) = alpha ker(psi). + return [X[:len(self.gens)] for X in self.base._syzygies()] + + def _in_terms_of_generators(self, e): + return self.base._in_terms_of_generators(e.data)[:len(self.gens)] + + def is_full_module(self): + """ + Return True if ``self`` is the entire free module. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> F.submodule([x, 1]).is_full_module() + False + >>> F.submodule([1, 1], [1, 2]).is_full_module() + True + """ + return self.base.is_full_module() + + def quotient_hom(self): + """ + Return the quotient homomorphism to self. + + That is, return the natural map from ``self.base`` to ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> M = (QQ.old_poly_ring(x).free_module(2) / [(1, x)]).submodule([1, 0]) + >>> M.quotient_hom() + Matrix([ + [1, 0], : <[1, 0], [1, x]> -> <[1, 0] + <[1, x]>, [1, x] + <[1, x]>> + [0, 1]]) + """ + return self.base.identity_hom().quotient_codomain(self.killed_module) + + +_subs0 = lambda x: x[0] +_subs1 = lambda x: x[1:] + + +class ModuleOrder(ProductOrder): + """A product monomial order with a zeroth term as module index.""" + + def __init__(self, o1, o2, TOP): + if TOP: + ProductOrder.__init__(self, (o2, _subs1), (o1, _subs0)) + else: + ProductOrder.__init__(self, (o1, _subs0), (o2, _subs1)) + + +class SubModulePolyRing(SubModule): + """ + Submodule of a free module over a generalized polynomial ring. + + Do not instantiate this, use the constructor method of FreeModule instead: + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x, y).free_module(2) + >>> F.submodule([x, y], [1, 0]) + <[x, y], [1, 0]> + + Attributes: + + - order - monomial order used + """ + + #self._gb - cached groebner basis + #self._gbe - cached groebner basis relations + + def __init__(self, gens, container, order="lex", TOP=True): + SubModule.__init__(self, gens, container) + if not isinstance(container, FreeModulePolyRing): + raise NotImplementedError('This implementation is for submodules of ' + + 'FreeModulePolyRing, got %s' % container) + self.order = ModuleOrder(monomial_key(order), self.ring.order, TOP) + self._gb = None + self._gbe = None + + def __eq__(self, other): + if isinstance(other, SubModulePolyRing) and self.order != other.order: + return False + return SubModule.__eq__(self, other) + + def _groebner(self, extended=False): + """Returns a standard basis in sdm form.""" + from sympy.polys.distributedmodules import sdm_groebner, sdm_nf_mora + if self._gbe is None and extended: + gb, gbe = sdm_groebner( + [self.ring._vector_to_sdm(x, self.order) for x in self.gens], + sdm_nf_mora, self.order, self.ring.dom, extended=True) + self._gb, self._gbe = tuple(gb), tuple(gbe) + if self._gb is None: + self._gb = tuple(sdm_groebner( + [self.ring._vector_to_sdm(x, self.order) for x in self.gens], + sdm_nf_mora, self.order, self.ring.dom)) + if extended: + return self._gb, self._gbe + else: + return self._gb + + def _groebner_vec(self, extended=False): + """Returns a standard basis in element form.""" + if not extended: + return [FreeModuleElement(self, + tuple(self.ring._sdm_to_vector(x, self.rank))) + for x in self._groebner()] + gb, gbe = self._groebner(extended=True) + return ([self.convert(self.ring._sdm_to_vector(x, self.rank)) + for x in gb], + [self.ring._sdm_to_vector(x, len(self.gens)) for x in gbe]) + + def _contains(self, x): + from sympy.polys.distributedmodules import sdm_zero, sdm_nf_mora + return sdm_nf_mora(self.ring._vector_to_sdm(x, self.order), + self._groebner(), self.order, self.ring.dom) == \ + sdm_zero() + + def _syzygies(self): + """Compute syzygies. See [SCA, algorithm 2.5.4].""" + # NOTE if self.gens is a standard basis, this can be done more + # efficiently using Schreyer's theorem + + # First bullet point + k = len(self.gens) + r = self.rank + zero = self.ring.convert(0) + one = self.ring.convert(1) + Rkr = self.ring.free_module(r + k) + newgens = [] + for j, f in enumerate(self.gens): + m = [0]*(r + k) + for i, v in enumerate(f): + m[i] = f[i] + for i in range(k): + m[r + i] = one if j == i else zero + m = FreeModuleElement(Rkr, tuple(m)) + newgens.append(m) + # Note: we need *descending* order on module index, and TOP=False to + # get an elimination order + F = Rkr.submodule(*newgens, order='ilex', TOP=False) + + # Second bullet point: standard basis of F + G = F._groebner_vec() + + # Third bullet point: G0 = G intersect the new k components + G0 = [x[r:] for x in G if all(y == zero for y in x[:r])] + + # Fourth and fifth bullet points: we are done + return G0 + + def _in_terms_of_generators(self, e): + """Expression in terms of generators. See [SCA, 2.8.1].""" + # NOTE: if gens is a standard basis, this can be done more efficiently + M = self.ring.free_module(self.rank).submodule(*((e,) + self.gens)) + S = M.syzygy_module( + order="ilex", TOP=False) # We want decreasing order! + G = S._groebner_vec() + # This list cannot not be empty since e is an element + e = [x for x in G if self.ring.is_unit(x[0])][0] + return [-x/e[0] for x in e[1:]] + + def reduce_element(self, x, NF=None): + """ + Reduce the element ``x`` of our container modulo ``self``. + + This applies the normal form ``NF`` to ``x``. If ``NF`` is passed + as none, the default Mora normal form is used (which is not unique!). + """ + from sympy.polys.distributedmodules import sdm_nf_mora + if NF is None: + NF = sdm_nf_mora + return self.container.convert(self.ring._sdm_to_vector(NF( + self.ring._vector_to_sdm(x, self.order), self._groebner(), + self.order, self.ring.dom), + self.rank)) + + def _intersect(self, other, relations=False): + # See: [SCA, section 2.8.2] + fi = self.gens + hi = other.gens + r = self.rank + ci = [[0]*(2*r) for _ in range(r)] + for k in range(r): + ci[k][k] = 1 + ci[k][r + k] = 1 + di = [list(f) + [0]*r for f in fi] + ei = [[0]*r + list(h) for h in hi] + syz = self.ring.free_module(2*r).submodule(*(ci + di + ei))._syzygies() + nonzero = [x for x in syz if any(y != self.ring.zero for y in x[:r])] + res = self.container.submodule(*([-y for y in x[:r]] for x in nonzero)) + reln1 = [x[r:r + len(fi)] for x in nonzero] + reln2 = [x[r + len(fi):] for x in nonzero] + if relations: + return res, reln1, reln2 + return res + + def _module_quotient(self, other, relations=False): + # See: [SCA, section 2.8.4] + if relations and len(other.gens) != 1: + raise NotImplementedError + if len(other.gens) == 0: + return self.ring.ideal(1) + elif len(other.gens) == 1: + # We do some trickery. Let f be the (vector!) generating ``other`` + # and f1, .., fn be the (vectors) generating self. + # Consider the submodule of R^{r+1} generated by (f, 1) and + # {(fi, 0) | i}. Then the intersection with the last module + # component yields the quotient. + g1 = list(other.gens[0]) + [1] + gi = [list(x) + [0] for x in self.gens] + # NOTE: We *need* to use an elimination order + M = self.ring.free_module(self.rank + 1).submodule(*([g1] + gi), + order='ilex', TOP=False) + if not relations: + return self.ring.ideal(*[x[-1] for x in M._groebner_vec() if + all(y == self.ring.zero for y in x[:-1])]) + else: + G, R = M._groebner_vec(extended=True) + indices = [i for i, x in enumerate(G) if + all(y == self.ring.zero for y in x[:-1])] + return (self.ring.ideal(*[G[i][-1] for i in indices]), + [[-x for x in R[i][1:]] for i in indices]) + # For more generators, we use I : = intersection of + # {I : | i} + # TODO this can be done more efficiently + return reduce(lambda x, y: x.intersect(y), + (self._module_quotient(self.container.submodule(x)) for x in other.gens)) + + +class SubModuleQuotientRing(SubModule): + """ + Class for submodules of free modules over quotient rings. + + Do not instantiate this. Instead use the submodule methods. + + >>> from sympy.abc import x, y + >>> from sympy import QQ + >>> M = (QQ.old_poly_ring(x, y)/[x**2 - y**2]).free_module(2).submodule([x, x + y]) + >>> M + <[x + , x + y + ]> + >>> M.contains([y**2, x**2 + x*y]) + True + >>> M.contains([x, y]) + False + + Attributes: + + - quot - the subquotient of `R^n/IR^n` generated by lifts of our generators + """ + + def __init__(self, gens, container): + SubModule.__init__(self, gens, container) + self.quot = self.container.quot.submodule( + *[self.container.lift(x) for x in self.gens]) + + def _contains(self, elem): + return self.quot._contains(self.container.lift(elem)) + + def _syzygies(self): + return [tuple(self.ring.convert(y, self.quot.ring) for y in x) + for x in self.quot._syzygies()] + + def _in_terms_of_generators(self, elem): + return [self.ring.convert(x, self.quot.ring) for x in + self.quot._in_terms_of_generators(self.container.lift(elem))] + +########################################################################## +## Quotient Modules ###################################################### +########################################################################## + + +class QuotientModuleElement(ModuleElement): + """Element of a quotient module.""" + + def eq(self, d1, d2): + """Equality comparison.""" + return self.module.killed_module.contains(d1 - d2) + + def __repr__(self): + return repr(self.data) + " + " + repr(self.module.killed_module) + + +class QuotientModule(Module): + """ + Class for quotient modules. + + Do not instantiate this directly. For subquotients, see the + SubQuotientModule class. + + Attributes: + + - base - the base module we are a quotient of + - killed_module - the submodule used to form the quotient + - rank of the base + """ + + dtype = QuotientModuleElement + + def __init__(self, ring, base, submodule): + Module.__init__(self, ring) + if not base.is_submodule(submodule): + raise ValueError('%s is not a submodule of %s' % (submodule, base)) + self.base = base + self.killed_module = submodule + self.rank = base.rank + + def __repr__(self): + return repr(self.base) + "/" + repr(self.killed_module) + + def is_zero(self): + """ + Return True if ``self`` is a zero module. + + This happens if and only if the base module is the same as the + submodule being killed. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) + >>> (F/[(1, 0)]).is_zero() + False + >>> (F/[(1, 0), (0, 1)]).is_zero() + True + """ + return self.base == self.killed_module + + def is_submodule(self, other): + """ + Return True if ``other`` is a submodule of ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> Q = QQ.old_poly_ring(x).free_module(2) / [(x, x)] + >>> S = Q.submodule([1, 0]) + >>> Q.is_submodule(S) + True + >>> S.is_submodule(Q) + False + """ + if isinstance(other, QuotientModule): + return self.killed_module == other.killed_module and \ + self.base.is_submodule(other.base) + if isinstance(other, SubQuotientModule): + return other.container == self + return False + + def submodule(self, *gens, **opts): + """ + Generate a submodule. + + This is the same as taking a quotient of a submodule of the base + module. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> Q = QQ.old_poly_ring(x).free_module(2) / [(x, x)] + >>> Q.submodule([x, 0]) + <[x, 0] + <[x, x]>> + """ + return SubQuotientModule(gens, self, **opts) + + def convert(self, elem, M=None): + """ + Convert ``elem`` into the internal representation. + + This method is called implicitly whenever computations involve elements + not in the internal representation. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> F = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)] + >>> F.convert([1, 0]) + [1, 0] + <[1, 2], [1, x]> + """ + if isinstance(elem, QuotientModuleElement): + if elem.module is self: + return elem + if self.killed_module.is_submodule(elem.module.killed_module): + return QuotientModuleElement(self, self.base.convert(elem.data)) + raise CoercionFailed + return QuotientModuleElement(self, self.base.convert(elem)) + + def identity_hom(self): + """ + Return the identity homomorphism on ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> M = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)] + >>> M.identity_hom() + Matrix([ + [1, 0], : QQ[x]**2/<[1, 2], [1, x]> -> QQ[x]**2/<[1, 2], [1, x]> + [0, 1]]) + """ + return self.base.identity_hom().quotient_codomain( + self.killed_module).quotient_domain(self.killed_module) + + def quotient_hom(self): + """ + Return the quotient homomorphism to ``self``. + + That is, return a homomorphism representing the natural map from + ``self.base`` to ``self``. + + Examples + ======== + + >>> from sympy.abc import x + >>> from sympy import QQ + >>> M = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)] + >>> M.quotient_hom() + Matrix([ + [1, 0], : QQ[x]**2 -> QQ[x]**2/<[1, 2], [1, x]> + [0, 1]]) + """ + return self.base.identity_hom().quotient_codomain( + self.killed_module) diff --git a/.venv/Lib/site-packages/sympy/polys/appellseqs.py b/.venv/Lib/site-packages/sympy/polys/appellseqs.py new file mode 100644 index 0000000000000000000000000000000000000000..ac10fe3d1f1e60ccdf46cdae4eb5b8a969500a3e --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/appellseqs.py @@ -0,0 +1,269 @@ +r""" +Efficient functions for generating Appell sequences. + +An Appell sequence is a zero-indexed sequence of polynomials `p_i(x)` +satisfying `p_{i+1}'(x)=(i+1)p_i(x)` for all `i`. This definition leads +to the following iterative algorithm: + +.. math :: p_0(x) = c_0,\ p_i(x) = i \int_0^x p_{i-1}(t)\,dt + c_i + +The constant coefficients `c_i` are usually determined from the +just-evaluated integral and `i`. + +Appell sequences satisfy the following identity from umbral calculus: + +.. math :: p_n(x+y) = \sum_{k=0}^n \binom{n}{k} p_k(x) y^{n-k} + +References +========== + +.. [1] https://en.wikipedia.org/wiki/Appell_sequence +.. [2] Peter Luschny, "An introduction to the Bernoulli function", + https://arxiv.org/abs/2009.06743 +""" +from sympy.polys.densearith import dup_mul_ground, dup_sub_ground, dup_quo_ground +from sympy.polys.densetools import dup_eval, dup_integrate +from sympy.polys.domains import ZZ, QQ +from sympy.polys.polytools import named_poly +from sympy.utilities import public + +def dup_bernoulli(n, K): + """Low-level implementation of Bernoulli polynomials.""" + if n < 1: + return [K.one] + p = [K.one, K(-1,2)] + for i in range(2, n+1): + p = dup_integrate(dup_mul_ground(p, K(i), K), 1, K) + if i % 2 == 0: + p = dup_sub_ground(p, dup_eval(p, K(1,2), K) * K(1<<(i-1), (1<>> from sympy import summation + >>> from sympy.abc import x + >>> from sympy.polys import bernoulli_poly + >>> bernoulli_poly(5, x) + x**5 - 5*x**4/2 + 5*x**3/3 - x/6 + + >>> def psum(p, a, b): + ... return (bernoulli_poly(p+1,b+1) - bernoulli_poly(p+1,a)) / (p+1) + >>> psum(4, -6, 27) + 3144337 + >>> summation(x**4, (x, -6, 27)) + 3144337 + + >>> psum(1, 1, x).factor() + x*(x + 1)/2 + >>> psum(2, 1, x).factor() + x*(x + 1)*(2*x + 1)/6 + >>> psum(3, 1, x).factor() + x**2*(x + 1)**2/4 + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + + See Also + ======== + + sympy.functions.combinatorial.numbers.bernoulli + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Bernoulli_polynomials + """ + return named_poly(n, dup_bernoulli, QQ, "Bernoulli polynomial", (x,), polys) + +def dup_bernoulli_c(n, K): + """Low-level implementation of central Bernoulli polynomials.""" + p = [K.one] + for i in range(1, n+1): + p = dup_integrate(dup_mul_ground(p, K(i), K), 1, K) + if i % 2 == 0: + p = dup_sub_ground(p, dup_eval(p, K.one, K) * K((1<<(i-1))-1, (1<>> from sympy import bernoulli, euler, genocchi + >>> from sympy.abc import x + >>> from sympy.polys import andre_poly + >>> andre_poly(9, x) + x**9 - 36*x**7 + 630*x**5 - 5124*x**3 + 12465*x + + >>> [andre_poly(n, 0) for n in range(11)] + [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521] + >>> [euler(n) for n in range(11)] + [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521] + >>> [andre_poly(n-1, 1) * n / (4**n - 2**n) for n in range(1, 11)] + [1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66] + >>> [bernoulli(n) for n in range(1, 11)] + [1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66] + >>> [-andre_poly(n-1, -1) * n / (-2)**(n-1) for n in range(1, 11)] + [-1, -1, 0, 1, 0, -3, 0, 17, 0, -155] + >>> [genocchi(n) for n in range(1, 11)] + [-1, -1, 0, 1, 0, -3, 0, 17, 0, -155] + + >>> [abs(andre_poly(n, n%2)) for n in range(11)] + [1, 1, 1, 2, 5, 16, 61, 272, 1385, 7936, 50521] + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + + See Also + ======== + + sympy.functions.combinatorial.numbers.andre + + References + ========== + + .. [1] Peter Luschny, "An introduction to the Bernoulli function", + https://arxiv.org/abs/2009.06743 + """ + return named_poly(n, dup_andre, ZZ, "Andre polynomial", (x,), polys) diff --git a/.venv/Lib/site-packages/sympy/polys/fglmtools.py b/.venv/Lib/site-packages/sympy/polys/fglmtools.py new file mode 100644 index 0000000000000000000000000000000000000000..d68fe5bc2a40741e39b89163d393f7b57e6b1c49 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/fglmtools.py @@ -0,0 +1,153 @@ +"""Implementation of matrix FGLM Groebner basis conversion algorithm. """ + + +from sympy.polys.monomials import monomial_mul, monomial_div + +def matrix_fglm(F, ring, O_to): + """ + Converts the reduced Groebner basis ``F`` of a zero-dimensional + ideal w.r.t. ``O_from`` to a reduced Groebner basis + w.r.t. ``O_to``. + + References + ========== + + .. [1] J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient + Computation of Zero-dimensional Groebner Bases by Change of + Ordering + """ + domain = ring.domain + ngens = ring.ngens + + ring_to = ring.clone(order=O_to) + + old_basis = _basis(F, ring) + M = _representing_matrices(old_basis, F, ring) + + # V contains the normalforms (wrt O_from) of S + S = [ring.zero_monom] + V = [[domain.one] + [domain.zero] * (len(old_basis) - 1)] + G = [] + + L = [(i, 0) for i in range(ngens)] # (i, j) corresponds to x_i * S[j] + L.sort(key=lambda k_l: O_to(_incr_k(S[k_l[1]], k_l[0])), reverse=True) + t = L.pop() + + P = _identity_matrix(len(old_basis), domain) + + while True: + s = len(S) + v = _matrix_mul(M[t[0]], V[t[1]]) + _lambda = _matrix_mul(P, v) + + if all(_lambda[i] == domain.zero for i in range(s, len(old_basis))): + # there is a linear combination of v by V + lt = ring.term_new(_incr_k(S[t[1]], t[0]), domain.one) + rest = ring.from_dict({S[i]: _lambda[i] for i in range(s)}) + + g = (lt - rest).set_ring(ring_to) + if g: + G.append(g) + else: + # v is linearly independent from V + P = _update(s, _lambda, P) + S.append(_incr_k(S[t[1]], t[0])) + V.append(v) + + L.extend([(i, s) for i in range(ngens)]) + L = list(set(L)) + L.sort(key=lambda k_l: O_to(_incr_k(S[k_l[1]], k_l[0])), reverse=True) + + L = [(k, l) for (k, l) in L if all(monomial_div(_incr_k(S[l], k), g.LM) is None for g in G)] + + if not L: + G = [ g.monic() for g in G ] + return sorted(G, key=lambda g: O_to(g.LM), reverse=True) + + t = L.pop() + + +def _incr_k(m, k): + return tuple(list(m[:k]) + [m[k] + 1] + list(m[k + 1:])) + + +def _identity_matrix(n, domain): + M = [[domain.zero]*n for _ in range(n)] + + for i in range(n): + M[i][i] = domain.one + + return M + + +def _matrix_mul(M, v): + return [sum(row[i] * v[i] for i in range(len(v))) for row in M] + + +def _update(s, _lambda, P): + """ + Update ``P`` such that for the updated `P'` `P' v = e_{s}`. + """ + k = min(j for j in range(s, len(_lambda)) if _lambda[j] != 0) + + for r in range(len(_lambda)): + if r != k: + P[r] = [P[r][j] - (P[k][j] * _lambda[r]) / _lambda[k] for j in range(len(P[r]))] + + P[k] = [P[k][j] / _lambda[k] for j in range(len(P[k]))] + P[k], P[s] = P[s], P[k] + + return P + + +def _representing_matrices(basis, G, ring): + r""" + Compute the matrices corresponding to the linear maps `m \mapsto + x_i m` for all variables `x_i`. + """ + domain = ring.domain + u = ring.ngens-1 + + def var(i): + return tuple([0] * i + [1] + [0] * (u - i)) + + def representing_matrix(m): + M = [[domain.zero] * len(basis) for _ in range(len(basis))] + + for i, v in enumerate(basis): + r = ring.term_new(monomial_mul(m, v), domain.one).rem(G) + + for monom, coeff in r.terms(): + j = basis.index(monom) + M[j][i] = coeff + + return M + + return [representing_matrix(var(i)) for i in range(u + 1)] + + +def _basis(G, ring): + r""" + Computes a list of monomials which are not divisible by the leading + monomials wrt to ``O`` of ``G``. These monomials are a basis of + `K[X_1, \ldots, X_n]/(G)`. + """ + order = ring.order + + leading_monomials = [g.LM for g in G] + candidates = [ring.zero_monom] + basis = [] + + while candidates: + t = candidates.pop() + basis.append(t) + + new_candidates = [_incr_k(t, k) for k in range(ring.ngens) + if all(monomial_div(_incr_k(t, k), lmg) is None + for lmg in leading_monomials)] + candidates.extend(new_candidates) + candidates.sort(key=order, reverse=True) + + basis = list(set(basis)) + + return sorted(basis, key=order) diff --git a/.venv/Lib/site-packages/sympy/polys/fields.py b/.venv/Lib/site-packages/sympy/polys/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..a3f239c4ed10dc769def957c520738e45bc1de8f --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/fields.py @@ -0,0 +1,631 @@ +"""Sparse rational function fields. """ + +from __future__ import annotations +from typing import Any +from functools import reduce + +from operator import add, mul, lt, le, gt, ge + +from sympy.core.expr import Expr +from sympy.core.mod import Mod +from sympy.core.numbers import Exp1 +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.core.sympify import CantSympify, sympify +from sympy.functions.elementary.exponential import ExpBase +from sympy.polys.domains.domainelement import DomainElement +from sympy.polys.domains.fractionfield import FractionField +from sympy.polys.domains.polynomialring import PolynomialRing +from sympy.polys.constructor import construct_domain +from sympy.polys.orderings import lex +from sympy.polys.polyerrors import CoercionFailed +from sympy.polys.polyoptions import build_options +from sympy.polys.polyutils import _parallel_dict_from_expr +from sympy.polys.rings import PolyElement +from sympy.printing.defaults import DefaultPrinting +from sympy.utilities import public +from sympy.utilities.iterables import is_sequence +from sympy.utilities.magic import pollute + +@public +def field(symbols, domain, order=lex): + """Construct new rational function field returning (field, x1, ..., xn). """ + _field = FracField(symbols, domain, order) + return (_field,) + _field.gens + +@public +def xfield(symbols, domain, order=lex): + """Construct new rational function field returning (field, (x1, ..., xn)). """ + _field = FracField(symbols, domain, order) + return (_field, _field.gens) + +@public +def vfield(symbols, domain, order=lex): + """Construct new rational function field and inject generators into global namespace. """ + _field = FracField(symbols, domain, order) + pollute([ sym.name for sym in _field.symbols ], _field.gens) + return _field + +@public +def sfield(exprs, *symbols, **options): + """Construct a field deriving generators and domain + from options and input expressions. + + Parameters + ========== + + exprs : py:class:`~.Expr` or sequence of :py:class:`~.Expr` (sympifiable) + + symbols : sequence of :py:class:`~.Symbol`/:py:class:`~.Expr` + + options : keyword arguments understood by :py:class:`~.Options` + + Examples + ======== + + >>> from sympy import exp, log, symbols, sfield + + >>> x = symbols("x") + >>> K, f = sfield((x*log(x) + 4*x**2)*exp(1/x + log(x)/3)/x**2) + >>> K + Rational function field in x, exp(1/x), log(x), x**(1/3) over ZZ with lex order + >>> f + (4*x**2*(exp(1/x)) + x*(exp(1/x))*(log(x)))/((x**(1/3))**5) + """ + single = False + if not is_sequence(exprs): + exprs, single = [exprs], True + + exprs = list(map(sympify, exprs)) + opt = build_options(symbols, options) + numdens = [] + for expr in exprs: + numdens.extend(expr.as_numer_denom()) + reps, opt = _parallel_dict_from_expr(numdens, opt) + + if opt.domain is None: + # NOTE: this is inefficient because construct_domain() automatically + # performs conversion to the target domain. It shouldn't do this. + coeffs = sum([list(rep.values()) for rep in reps], []) + opt.domain, _ = construct_domain(coeffs, opt=opt) + + _field = FracField(opt.gens, opt.domain, opt.order) + fracs = [] + for i in range(0, len(reps), 2): + fracs.append(_field(tuple(reps[i:i+2]))) + + if single: + return (_field, fracs[0]) + else: + return (_field, fracs) + +_field_cache: dict[Any, Any] = {} + +class FracField(DefaultPrinting): + """Multivariate distributed rational function field. """ + + def __new__(cls, symbols, domain, order=lex): + from sympy.polys.rings import PolyRing + ring = PolyRing(symbols, domain, order) + symbols = ring.symbols + ngens = ring.ngens + domain = ring.domain + order = ring.order + + _hash_tuple = (cls.__name__, symbols, ngens, domain, order) + obj = _field_cache.get(_hash_tuple) + + if obj is None: + obj = object.__new__(cls) + obj._hash_tuple = _hash_tuple + obj._hash = hash(_hash_tuple) + obj.ring = ring + obj.dtype = type("FracElement", (FracElement,), {"field": obj}) + obj.symbols = symbols + obj.ngens = ngens + obj.domain = domain + obj.order = order + + obj.zero = obj.dtype(ring.zero) + obj.one = obj.dtype(ring.one) + + obj.gens = obj._gens() + + for symbol, generator in zip(obj.symbols, obj.gens): + if isinstance(symbol, Symbol): + name = symbol.name + + if not hasattr(obj, name): + setattr(obj, name, generator) + + _field_cache[_hash_tuple] = obj + + return obj + + def _gens(self): + """Return a list of polynomial generators. """ + return tuple([ self.dtype(gen) for gen in self.ring.gens ]) + + def __getnewargs__(self): + return (self.symbols, self.domain, self.order) + + def __hash__(self): + return self._hash + + def index(self, gen): + if isinstance(gen, self.dtype): + return self.ring.index(gen.to_poly()) + else: + raise ValueError("expected a %s, got %s instead" % (self.dtype,gen)) + + def __eq__(self, other): + return isinstance(other, FracField) and \ + (self.symbols, self.ngens, self.domain, self.order) == \ + (other.symbols, other.ngens, other.domain, other.order) + + def __ne__(self, other): + return not self == other + + def raw_new(self, numer, denom=None): + return self.dtype(numer, denom) + def new(self, numer, denom=None): + if denom is None: denom = self.ring.one + numer, denom = numer.cancel(denom) + return self.raw_new(numer, denom) + + def domain_new(self, element): + return self.domain.convert(element) + + def ground_new(self, element): + try: + return self.new(self.ring.ground_new(element)) + except CoercionFailed: + domain = self.domain + + if not domain.is_Field and domain.has_assoc_Field: + ring = self.ring + ground_field = domain.get_field() + element = ground_field.convert(element) + numer = ring.ground_new(ground_field.numer(element)) + denom = ring.ground_new(ground_field.denom(element)) + return self.raw_new(numer, denom) + else: + raise + + def field_new(self, element): + if isinstance(element, FracElement): + if self == element.field: + return element + + if isinstance(self.domain, FractionField) and \ + self.domain.field == element.field: + return self.ground_new(element) + elif isinstance(self.domain, PolynomialRing) and \ + self.domain.ring.to_field() == element.field: + return self.ground_new(element) + else: + raise NotImplementedError("conversion") + elif isinstance(element, PolyElement): + denom, numer = element.clear_denoms() + + if isinstance(self.domain, PolynomialRing) and \ + numer.ring == self.domain.ring: + numer = self.ring.ground_new(numer) + elif isinstance(self.domain, FractionField) and \ + numer.ring == self.domain.field.to_ring(): + numer = self.ring.ground_new(numer) + else: + numer = numer.set_ring(self.ring) + + denom = self.ring.ground_new(denom) + return self.raw_new(numer, denom) + elif isinstance(element, tuple) and len(element) == 2: + numer, denom = list(map(self.ring.ring_new, element)) + return self.new(numer, denom) + elif isinstance(element, str): + raise NotImplementedError("parsing") + elif isinstance(element, Expr): + return self.from_expr(element) + else: + return self.ground_new(element) + + __call__ = field_new + + def _rebuild_expr(self, expr, mapping): + domain = self.domain + powers = tuple((gen, gen.as_base_exp()) for gen in mapping.keys() + if gen.is_Pow or isinstance(gen, ExpBase)) + + def _rebuild(expr): + generator = mapping.get(expr) + + if generator is not None: + return generator + elif expr.is_Add: + return reduce(add, list(map(_rebuild, expr.args))) + elif expr.is_Mul: + return reduce(mul, list(map(_rebuild, expr.args))) + elif expr.is_Pow or isinstance(expr, (ExpBase, Exp1)): + b, e = expr.as_base_exp() + # look for bg**eg whose integer power may be b**e + for gen, (bg, eg) in powers: + if bg == b and Mod(e, eg) == 0: + return mapping.get(gen)**int(e/eg) + if e.is_Integer and e is not S.One: + return _rebuild(b)**int(e) + elif mapping.get(1/expr) is not None: + return 1/mapping.get(1/expr) + + try: + return domain.convert(expr) + except CoercionFailed: + if not domain.is_Field and domain.has_assoc_Field: + return domain.get_field().convert(expr) + else: + raise + + return _rebuild(expr) + + def from_expr(self, expr): + mapping = dict(list(zip(self.symbols, self.gens))) + + try: + frac = self._rebuild_expr(sympify(expr), mapping) + except CoercionFailed: + raise ValueError("expected an expression convertible to a rational function in %s, got %s" % (self, expr)) + else: + return self.field_new(frac) + + def to_domain(self): + return FractionField(self) + + def to_ring(self): + from sympy.polys.rings import PolyRing + return PolyRing(self.symbols, self.domain, self.order) + +class FracElement(DomainElement, DefaultPrinting, CantSympify): + """Element of multivariate distributed rational function field. """ + + def __init__(self, numer, denom=None): + if denom is None: + denom = self.field.ring.one + elif not denom: + raise ZeroDivisionError("zero denominator") + + self.numer = numer + self.denom = denom + + def raw_new(f, numer, denom): + return f.__class__(numer, denom) + def new(f, numer, denom): + return f.raw_new(*numer.cancel(denom)) + + def to_poly(f): + if f.denom != 1: + raise ValueError("f.denom should be 1") + return f.numer + + def parent(self): + return self.field.to_domain() + + def __getnewargs__(self): + return (self.field, self.numer, self.denom) + + _hash = None + + def __hash__(self): + _hash = self._hash + if _hash is None: + self._hash = _hash = hash((self.field, self.numer, self.denom)) + return _hash + + def copy(self): + return self.raw_new(self.numer.copy(), self.denom.copy()) + + def set_field(self, new_field): + if self.field == new_field: + return self + else: + new_ring = new_field.ring + numer = self.numer.set_ring(new_ring) + denom = self.denom.set_ring(new_ring) + return new_field.new(numer, denom) + + def as_expr(self, *symbols): + return self.numer.as_expr(*symbols)/self.denom.as_expr(*symbols) + + def __eq__(f, g): + if isinstance(g, FracElement) and f.field == g.field: + return f.numer == g.numer and f.denom == g.denom + else: + return f.numer == g and f.denom == f.field.ring.one + + def __ne__(f, g): + return not f == g + + def __bool__(f): + return bool(f.numer) + + def sort_key(self): + return (self.denom.sort_key(), self.numer.sort_key()) + + def _cmp(f1, f2, op): + if isinstance(f2, f1.field.dtype): + return op(f1.sort_key(), f2.sort_key()) + else: + return NotImplemented + + def __lt__(f1, f2): + return f1._cmp(f2, lt) + def __le__(f1, f2): + return f1._cmp(f2, le) + def __gt__(f1, f2): + return f1._cmp(f2, gt) + def __ge__(f1, f2): + return f1._cmp(f2, ge) + + def __pos__(f): + """Negate all coefficients in ``f``. """ + return f.raw_new(f.numer, f.denom) + + def __neg__(f): + """Negate all coefficients in ``f``. """ + return f.raw_new(-f.numer, f.denom) + + def _extract_ground(self, element): + domain = self.field.domain + + try: + element = domain.convert(element) + except CoercionFailed: + if not domain.is_Field and domain.has_assoc_Field: + ground_field = domain.get_field() + + try: + element = ground_field.convert(element) + except CoercionFailed: + pass + else: + return -1, ground_field.numer(element), ground_field.denom(element) + + return 0, None, None + else: + return 1, element, None + + def __add__(f, g): + """Add rational functions ``f`` and ``g``. """ + field = f.field + + if not g: + return f + elif not f: + return g + elif isinstance(g, field.dtype): + if f.denom == g.denom: + return f.new(f.numer + g.numer, f.denom) + else: + return f.new(f.numer*g.denom + f.denom*g.numer, f.denom*g.denom) + elif isinstance(g, field.ring.dtype): + return f.new(f.numer + f.denom*g, f.denom) + else: + if isinstance(g, FracElement): + if isinstance(field.domain, FractionField) and field.domain.field == g.field: + pass + elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: + return g.__radd__(f) + else: + return NotImplemented + elif isinstance(g, PolyElement): + if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: + pass + else: + return g.__radd__(f) + + return f.__radd__(g) + + def __radd__(f, c): + if isinstance(c, f.field.ring.dtype): + return f.new(f.numer + f.denom*c, f.denom) + + op, g_numer, g_denom = f._extract_ground(c) + + if op == 1: + return f.new(f.numer + f.denom*g_numer, f.denom) + elif not op: + return NotImplemented + else: + return f.new(f.numer*g_denom + f.denom*g_numer, f.denom*g_denom) + + def __sub__(f, g): + """Subtract rational functions ``f`` and ``g``. """ + field = f.field + + if not g: + return f + elif not f: + return -g + elif isinstance(g, field.dtype): + if f.denom == g.denom: + return f.new(f.numer - g.numer, f.denom) + else: + return f.new(f.numer*g.denom - f.denom*g.numer, f.denom*g.denom) + elif isinstance(g, field.ring.dtype): + return f.new(f.numer - f.denom*g, f.denom) + else: + if isinstance(g, FracElement): + if isinstance(field.domain, FractionField) and field.domain.field == g.field: + pass + elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: + return g.__rsub__(f) + else: + return NotImplemented + elif isinstance(g, PolyElement): + if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: + pass + else: + return g.__rsub__(f) + + op, g_numer, g_denom = f._extract_ground(g) + + if op == 1: + return f.new(f.numer - f.denom*g_numer, f.denom) + elif not op: + return NotImplemented + else: + return f.new(f.numer*g_denom - f.denom*g_numer, f.denom*g_denom) + + def __rsub__(f, c): + if isinstance(c, f.field.ring.dtype): + return f.new(-f.numer + f.denom*c, f.denom) + + op, g_numer, g_denom = f._extract_ground(c) + + if op == 1: + return f.new(-f.numer + f.denom*g_numer, f.denom) + elif not op: + return NotImplemented + else: + return f.new(-f.numer*g_denom + f.denom*g_numer, f.denom*g_denom) + + def __mul__(f, g): + """Multiply rational functions ``f`` and ``g``. """ + field = f.field + + if not f or not g: + return field.zero + elif isinstance(g, field.dtype): + return f.new(f.numer*g.numer, f.denom*g.denom) + elif isinstance(g, field.ring.dtype): + return f.new(f.numer*g, f.denom) + else: + if isinstance(g, FracElement): + if isinstance(field.domain, FractionField) and field.domain.field == g.field: + pass + elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: + return g.__rmul__(f) + else: + return NotImplemented + elif isinstance(g, PolyElement): + if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: + pass + else: + return g.__rmul__(f) + + return f.__rmul__(g) + + def __rmul__(f, c): + if isinstance(c, f.field.ring.dtype): + return f.new(f.numer*c, f.denom) + + op, g_numer, g_denom = f._extract_ground(c) + + if op == 1: + return f.new(f.numer*g_numer, f.denom) + elif not op: + return NotImplemented + else: + return f.new(f.numer*g_numer, f.denom*g_denom) + + def __truediv__(f, g): + """Computes quotient of fractions ``f`` and ``g``. """ + field = f.field + + if not g: + raise ZeroDivisionError + elif isinstance(g, field.dtype): + return f.new(f.numer*g.denom, f.denom*g.numer) + elif isinstance(g, field.ring.dtype): + return f.new(f.numer, f.denom*g) + else: + if isinstance(g, FracElement): + if isinstance(field.domain, FractionField) and field.domain.field == g.field: + pass + elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field: + return g.__rtruediv__(f) + else: + return NotImplemented + elif isinstance(g, PolyElement): + if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring: + pass + else: + return g.__rtruediv__(f) + + op, g_numer, g_denom = f._extract_ground(g) + + if op == 1: + return f.new(f.numer, f.denom*g_numer) + elif not op: + return NotImplemented + else: + return f.new(f.numer*g_denom, f.denom*g_numer) + + def __rtruediv__(f, c): + if not f: + raise ZeroDivisionError + elif isinstance(c, f.field.ring.dtype): + return f.new(f.denom*c, f.numer) + + op, g_numer, g_denom = f._extract_ground(c) + + if op == 1: + return f.new(f.denom*g_numer, f.numer) + elif not op: + return NotImplemented + else: + return f.new(f.denom*g_numer, f.numer*g_denom) + + def __pow__(f, n): + """Raise ``f`` to a non-negative power ``n``. """ + if n >= 0: + return f.raw_new(f.numer**n, f.denom**n) + elif not f: + raise ZeroDivisionError + else: + return f.raw_new(f.denom**-n, f.numer**-n) + + def diff(f, x): + """Computes partial derivative in ``x``. + + Examples + ======== + + >>> from sympy.polys.fields import field + >>> from sympy.polys.domains import ZZ + + >>> _, x, y, z = field("x,y,z", ZZ) + >>> ((x**2 + y)/(z + 1)).diff(x) + 2*x/(z + 1) + + """ + x = x.to_poly() + return f.new(f.numer.diff(x)*f.denom - f.numer*f.denom.diff(x), f.denom**2) + + def __call__(f, *values): + if 0 < len(values) <= f.field.ngens: + return f.evaluate(list(zip(f.field.gens, values))) + else: + raise ValueError("expected at least 1 and at most %s values, got %s" % (f.field.ngens, len(values))) + + def evaluate(f, x, a=None): + if isinstance(x, list) and a is None: + x = [ (X.to_poly(), a) for X, a in x ] + numer, denom = f.numer.evaluate(x), f.denom.evaluate(x) + else: + x = x.to_poly() + numer, denom = f.numer.evaluate(x, a), f.denom.evaluate(x, a) + + field = numer.ring.to_field() + return field.new(numer, denom) + + def subs(f, x, a=None): + if isinstance(x, list) and a is None: + x = [ (X.to_poly(), a) for X, a in x ] + numer, denom = f.numer.subs(x), f.denom.subs(x) + else: + x = x.to_poly() + numer, denom = f.numer.subs(x, a), f.denom.subs(x, a) + + return f.new(numer, denom) + + def compose(f, x, a=None): + raise NotImplementedError diff --git a/.venv/Lib/site-packages/sympy/polys/galoistools.py b/.venv/Lib/site-packages/sympy/polys/galoistools.py new file mode 100644 index 0000000000000000000000000000000000000000..b09f85057eced59b8054c6007f2b291a35a2fafb --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/galoistools.py @@ -0,0 +1,2532 @@ +"""Dense univariate polynomials with coefficients in Galois fields. """ + +from math import ceil as _ceil, sqrt as _sqrt, prod + +from sympy.core.random import uniform, _randint +from sympy.external.gmpy import SYMPY_INTS, MPZ, invert +from sympy.polys.polyconfig import query +from sympy.polys.polyerrors import ExactQuotientFailed +from sympy.polys.polyutils import _sort_factors + + +def gf_crt(U, M, K=None): + """ + Chinese Remainder Theorem. + + Given a set of integer residues ``u_0,...,u_n`` and a set of + co-prime integer moduli ``m_0,...,m_n``, returns an integer + ``u``, such that ``u = u_i mod m_i`` for ``i = ``0,...,n``. + + Examples + ======== + + Consider a set of residues ``U = [49, 76, 65]`` + and a set of moduli ``M = [99, 97, 95]``. Then we have:: + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_crt + + >>> gf_crt([49, 76, 65], [99, 97, 95], ZZ) + 639985 + + This is the correct result because:: + + >>> [639985 % m for m in [99, 97, 95]] + [49, 76, 65] + + Note: this is a low-level routine with no error checking. + + See Also + ======== + + sympy.ntheory.modular.crt : a higher level crt routine + sympy.ntheory.modular.solve_congruence + + """ + p = prod(M, start=K.one) + v = K.zero + + for u, m in zip(U, M): + e = p // m + s, _, _ = K.gcdex(e, m) + v += e*(u*s % m) + + return v % p + + +def gf_crt1(M, K): + """ + First part of the Chinese Remainder Theorem. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_crt, gf_crt1, gf_crt2 + >>> U = [49, 76, 65] + >>> M = [99, 97, 95] + + The following two codes have the same result. + + >>> gf_crt(U, M, ZZ) + 639985 + + >>> p, E, S = gf_crt1(M, ZZ) + >>> gf_crt2(U, M, p, E, S, ZZ) + 639985 + + However, it is faster when we want to fix ``M`` and + compute for multiple U, i.e. the following cases: + + >>> p, E, S = gf_crt1(M, ZZ) + >>> Us = [[49, 76, 65], [23, 42, 67]] + >>> for U in Us: + ... print(gf_crt2(U, M, p, E, S, ZZ)) + 639985 + 236237 + + See Also + ======== + + sympy.ntheory.modular.crt1 : a higher level crt routine + sympy.polys.galoistools.gf_crt + sympy.polys.galoistools.gf_crt2 + + """ + E, S = [], [] + p = prod(M, start=K.one) + + for m in M: + E.append(p // m) + S.append(K.gcdex(E[-1], m)[0] % m) + + return p, E, S + + +def gf_crt2(U, M, p, E, S, K): + """ + Second part of the Chinese Remainder Theorem. + + See ``gf_crt1`` for usage. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_crt2 + + >>> U = [49, 76, 65] + >>> M = [99, 97, 95] + >>> p = 912285 + >>> E = [9215, 9405, 9603] + >>> S = [62, 24, 12] + + >>> gf_crt2(U, M, p, E, S, ZZ) + 639985 + + See Also + ======== + + sympy.ntheory.modular.crt2 : a higher level crt routine + sympy.polys.galoistools.gf_crt + sympy.polys.galoistools.gf_crt1 + + """ + v = K.zero + + for u, m, e, s in zip(U, M, E, S): + v += e*(u*s % m) + + return v % p + + +def gf_int(a, p): + """ + Coerce ``a mod p`` to an integer in the range ``[-p/2, p/2]``. + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_int + + >>> gf_int(2, 7) + 2 + >>> gf_int(5, 7) + -2 + + """ + if a <= p // 2: + return a + else: + return a - p + + +def gf_degree(f): + """ + Return the leading degree of ``f``. + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_degree + + >>> gf_degree([1, 1, 2, 0]) + 3 + >>> gf_degree([]) + -1 + + """ + return len(f) - 1 + + +def gf_LC(f, K): + """ + Return the leading coefficient of ``f``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_LC + + >>> gf_LC([3, 0, 1], ZZ) + 3 + + """ + if not f: + return K.zero + else: + return f[0] + + +def gf_TC(f, K): + """ + Return the trailing coefficient of ``f``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_TC + + >>> gf_TC([3, 0, 1], ZZ) + 1 + + """ + if not f: + return K.zero + else: + return f[-1] + + +def gf_strip(f): + """ + Remove leading zeros from ``f``. + + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_strip + + >>> gf_strip([0, 0, 0, 3, 0, 1]) + [3, 0, 1] + + """ + if not f or f[0]: + return f + + k = 0 + + for coeff in f: + if coeff: + break + else: + k += 1 + + return f[k:] + + +def gf_trunc(f, p): + """ + Reduce all coefficients modulo ``p``. + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_trunc + + >>> gf_trunc([7, -2, 3], 5) + [2, 3, 3] + + """ + return gf_strip([ a % p for a in f ]) + + +def gf_normal(f, p, K): + """ + Normalize all coefficients in ``K``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_normal + + >>> gf_normal([5, 10, 21, -3], 5, ZZ) + [1, 2] + + """ + return gf_trunc(list(map(K, f)), p) + + +def gf_from_dict(f, p, K): + """ + Create a ``GF(p)[x]`` polynomial from a dict. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_from_dict + + >>> gf_from_dict({10: ZZ(4), 4: ZZ(33), 0: ZZ(-1)}, 5, ZZ) + [4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 4] + + """ + n, h = max(f.keys()), [] + + if isinstance(n, SYMPY_INTS): + for k in range(n, -1, -1): + h.append(f.get(k, K.zero) % p) + else: + (n,) = n + + for k in range(n, -1, -1): + h.append(f.get((k,), K.zero) % p) + + return gf_trunc(h, p) + + +def gf_to_dict(f, p, symmetric=True): + """ + Convert a ``GF(p)[x]`` polynomial to a dict. + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_to_dict + + >>> gf_to_dict([4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 4], 5) + {0: -1, 4: -2, 10: -1} + >>> gf_to_dict([4, 0, 0, 0, 0, 0, 3, 0, 0, 0, 4], 5, symmetric=False) + {0: 4, 4: 3, 10: 4} + + """ + n, result = gf_degree(f), {} + + for k in range(0, n + 1): + if symmetric: + a = gf_int(f[n - k], p) + else: + a = f[n - k] + + if a: + result[k] = a + + return result + + +def gf_from_int_poly(f, p): + """ + Create a ``GF(p)[x]`` polynomial from ``Z[x]``. + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_from_int_poly + + >>> gf_from_int_poly([7, -2, 3], 5) + [2, 3, 3] + + """ + return gf_trunc(f, p) + + +def gf_to_int_poly(f, p, symmetric=True): + """ + Convert a ``GF(p)[x]`` polynomial to ``Z[x]``. + + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_to_int_poly + + >>> gf_to_int_poly([2, 3, 3], 5) + [2, -2, -2] + >>> gf_to_int_poly([2, 3, 3], 5, symmetric=False) + [2, 3, 3] + + """ + if symmetric: + return [ gf_int(c, p) for c in f ] + else: + return f + + +def gf_neg(f, p, K): + """ + Negate a polynomial in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_neg + + >>> gf_neg([3, 2, 1, 0], 5, ZZ) + [2, 3, 4, 0] + + """ + return [ -coeff % p for coeff in f ] + + +def gf_add_ground(f, a, p, K): + """ + Compute ``f + a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_add_ground + + >>> gf_add_ground([3, 2, 4], 2, 5, ZZ) + [3, 2, 1] + + """ + if not f: + a = a % p + else: + a = (f[-1] + a) % p + + if len(f) > 1: + return f[:-1] + [a] + + if not a: + return [] + else: + return [a] + + +def gf_sub_ground(f, a, p, K): + """ + Compute ``f - a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_sub_ground + + >>> gf_sub_ground([3, 2, 4], 2, 5, ZZ) + [3, 2, 2] + + """ + if not f: + a = -a % p + else: + a = (f[-1] - a) % p + + if len(f) > 1: + return f[:-1] + [a] + + if not a: + return [] + else: + return [a] + + +def gf_mul_ground(f, a, p, K): + """ + Compute ``f * a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_mul_ground + + >>> gf_mul_ground([3, 2, 4], 2, 5, ZZ) + [1, 4, 3] + + """ + if not a: + return [] + else: + return [ (a*b) % p for b in f ] + + +def gf_quo_ground(f, a, p, K): + """ + Compute ``f/a`` where ``f`` in ``GF(p)[x]`` and ``a`` in ``GF(p)``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_quo_ground + + >>> gf_quo_ground(ZZ.map([3, 2, 4]), ZZ(2), 5, ZZ) + [4, 1, 2] + + """ + return gf_mul_ground(f, K.invert(a, p), p, K) + + +def gf_add(f, g, p, K): + """ + Add polynomials in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_add + + >>> gf_add([3, 2, 4], [2, 2, 2], 5, ZZ) + [4, 1] + + """ + if not f: + return g + if not g: + return f + + df = gf_degree(f) + dg = gf_degree(g) + + if df == dg: + return gf_strip([ (a + b) % p for a, b in zip(f, g) ]) + else: + k = abs(df - dg) + + if df > dg: + h, f = f[:k], f[k:] + else: + h, g = g[:k], g[k:] + + return h + [ (a + b) % p for a, b in zip(f, g) ] + + +def gf_sub(f, g, p, K): + """ + Subtract polynomials in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_sub + + >>> gf_sub([3, 2, 4], [2, 2, 2], 5, ZZ) + [1, 0, 2] + + """ + if not g: + return f + if not f: + return gf_neg(g, p, K) + + df = gf_degree(f) + dg = gf_degree(g) + + if df == dg: + return gf_strip([ (a - b) % p for a, b in zip(f, g) ]) + else: + k = abs(df - dg) + + if df > dg: + h, f = f[:k], f[k:] + else: + h, g = gf_neg(g[:k], p, K), g[k:] + + return h + [ (a - b) % p for a, b in zip(f, g) ] + + +def gf_mul(f, g, p, K): + """ + Multiply polynomials in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_mul + + >>> gf_mul([3, 2, 4], [2, 2, 2], 5, ZZ) + [1, 0, 3, 2, 3] + + """ + df = gf_degree(f) + dg = gf_degree(g) + + dh = df + dg + h = [0]*(dh + 1) + + for i in range(0, dh + 1): + coeff = K.zero + + for j in range(max(0, i - dg), min(i, df) + 1): + coeff += f[j]*g[i - j] + + h[i] = coeff % p + + return gf_strip(h) + + +def gf_sqr(f, p, K): + """ + Square polynomials in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_sqr + + >>> gf_sqr([3, 2, 4], 5, ZZ) + [4, 2, 3, 1, 1] + + """ + df = gf_degree(f) + + dh = 2*df + h = [0]*(dh + 1) + + for i in range(0, dh + 1): + coeff = K.zero + + jmin = max(0, i - df) + jmax = min(i, df) + + n = jmax - jmin + 1 + + jmax = jmin + n // 2 - 1 + + for j in range(jmin, jmax + 1): + coeff += f[j]*f[i - j] + + coeff += coeff + + if n & 1: + elem = f[jmax + 1] + coeff += elem**2 + + h[i] = coeff % p + + return gf_strip(h) + + +def gf_add_mul(f, g, h, p, K): + """ + Returns ``f + g*h`` where ``f``, ``g``, ``h`` in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_add_mul + >>> gf_add_mul([3, 2, 4], [2, 2, 2], [1, 4], 5, ZZ) + [2, 3, 2, 2] + """ + return gf_add(f, gf_mul(g, h, p, K), p, K) + + +def gf_sub_mul(f, g, h, p, K): + """ + Compute ``f - g*h`` where ``f``, ``g``, ``h`` in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_sub_mul + + >>> gf_sub_mul([3, 2, 4], [2, 2, 2], [1, 4], 5, ZZ) + [3, 3, 2, 1] + + """ + return gf_sub(f, gf_mul(g, h, p, K), p, K) + + +def gf_expand(F, p, K): + """ + Expand results of :func:`~.factor` in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_expand + + >>> gf_expand([([3, 2, 4], 1), ([2, 2], 2), ([3, 1], 3)], 5, ZZ) + [4, 3, 0, 3, 0, 1, 4, 1] + + """ + if isinstance(F, tuple): + lc, F = F + else: + lc = K.one + + g = [lc] + + for f, k in F: + f = gf_pow(f, k, p, K) + g = gf_mul(g, f, p, K) + + return g + + +def gf_div(f, g, p, K): + """ + Division with remainder in ``GF(p)[x]``. + + Given univariate polynomials ``f`` and ``g`` with coefficients in a + finite field with ``p`` elements, returns polynomials ``q`` and ``r`` + (quotient and remainder) such that ``f = q*g + r``. + + Consider polynomials ``x**3 + x + 1`` and ``x**2 + x`` in GF(2):: + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_div, gf_add_mul + + >>> gf_div(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ) + ([1, 1], [1]) + + As result we obtained quotient ``x + 1`` and remainder ``1``, thus:: + + >>> gf_add_mul(ZZ.map([1]), ZZ.map([1, 1]), ZZ.map([1, 1, 0]), 2, ZZ) + [1, 0, 1, 1] + + References + ========== + + .. [1] [Monagan93]_ + .. [2] [Gathen99]_ + + """ + df = gf_degree(f) + dg = gf_degree(g) + + if not g: + raise ZeroDivisionError("polynomial division") + elif df < dg: + return [], f + + inv = K.invert(g[0], p) + + h, dq, dr = list(f), df - dg, dg - 1 + + for i in range(0, df + 1): + coeff = h[i] + + for j in range(max(0, dg - i), min(df - i, dr) + 1): + coeff -= h[i + j - dg] * g[dg - j] + + if i <= dq: + coeff *= inv + + h[i] = coeff % p + + return h[:dq + 1], gf_strip(h[dq + 1:]) + + +def gf_rem(f, g, p, K): + """ + Compute polynomial remainder in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_rem + + >>> gf_rem(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ) + [1] + + """ + return gf_div(f, g, p, K)[1] + + +def gf_quo(f, g, p, K): + """ + Compute exact quotient in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_quo + + >>> gf_quo(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ) + [1, 1] + >>> gf_quo(ZZ.map([1, 0, 3, 2, 3]), ZZ.map([2, 2, 2]), 5, ZZ) + [3, 2, 4] + + """ + df = gf_degree(f) + dg = gf_degree(g) + + if not g: + raise ZeroDivisionError("polynomial division") + elif df < dg: + return [] + + inv = K.invert(g[0], p) + + h, dq, dr = f[:], df - dg, dg - 1 + + for i in range(0, dq + 1): + coeff = h[i] + + for j in range(max(0, dg - i), min(df - i, dr) + 1): + coeff -= h[i + j - dg] * g[dg - j] + + h[i] = (coeff * inv) % p + + return h[:dq + 1] + + +def gf_exquo(f, g, p, K): + """ + Compute polynomial quotient in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_exquo + + >>> gf_exquo(ZZ.map([1, 0, 3, 2, 3]), ZZ.map([2, 2, 2]), 5, ZZ) + [3, 2, 4] + + >>> gf_exquo(ZZ.map([1, 0, 1, 1]), ZZ.map([1, 1, 0]), 2, ZZ) + Traceback (most recent call last): + ... + ExactQuotientFailed: [1, 1, 0] does not divide [1, 0, 1, 1] + + """ + q, r = gf_div(f, g, p, K) + + if not r: + return q + else: + raise ExactQuotientFailed(f, g) + + +def gf_lshift(f, n, K): + """ + Efficiently multiply ``f`` by ``x**n``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_lshift + + >>> gf_lshift([3, 2, 4], 4, ZZ) + [3, 2, 4, 0, 0, 0, 0] + + """ + if not f: + return f + else: + return f + [K.zero]*n + + +def gf_rshift(f, n, K): + """ + Efficiently divide ``f`` by ``x**n``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_rshift + + >>> gf_rshift([1, 2, 3, 4, 0], 3, ZZ) + ([1, 2], [3, 4, 0]) + + """ + if not n: + return f, [] + else: + return f[:-n], f[-n:] + + +def gf_pow(f, n, p, K): + """ + Compute ``f**n`` in ``GF(p)[x]`` using repeated squaring. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_pow + + >>> gf_pow([3, 2, 4], 3, 5, ZZ) + [2, 4, 4, 2, 2, 1, 4] + + """ + if not n: + return [K.one] + elif n == 1: + return f + elif n == 2: + return gf_sqr(f, p, K) + + h = [K.one] + + while True: + if n & 1: + h = gf_mul(h, f, p, K) + n -= 1 + + n >>= 1 + + if not n: + break + + f = gf_sqr(f, p, K) + + return h + +def gf_frobenius_monomial_base(g, p, K): + """ + return the list of ``x**(i*p) mod g in Z_p`` for ``i = 0, .., n - 1`` + where ``n = gf_degree(g)`` + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_frobenius_monomial_base + >>> g = ZZ.map([1, 0, 2, 1]) + >>> gf_frobenius_monomial_base(g, 5, ZZ) + [[1], [4, 4, 2], [1, 2]] + + """ + n = gf_degree(g) + if n == 0: + return [] + b = [0]*n + b[0] = [1] + if p < n: + for i in range(1, n): + mon = gf_lshift(b[i - 1], p, K) + b[i] = gf_rem(mon, g, p, K) + elif n > 1: + b[1] = gf_pow_mod([K.one, K.zero], p, g, p, K) + for i in range(2, n): + b[i] = gf_mul(b[i - 1], b[1], p, K) + b[i] = gf_rem(b[i], g, p, K) + + return b + +def gf_frobenius_map(f, g, b, p, K): + """ + compute gf_pow_mod(f, p, g, p, K) using the Frobenius map + + Parameters + ========== + + f, g : polynomials in ``GF(p)[x]`` + b : frobenius monomial base + p : prime number + K : domain + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_frobenius_monomial_base, gf_frobenius_map + >>> f = ZZ.map([2, 1, 0, 1]) + >>> g = ZZ.map([1, 0, 2, 1]) + >>> p = 5 + >>> b = gf_frobenius_monomial_base(g, p, ZZ) + >>> r = gf_frobenius_map(f, g, b, p, ZZ) + >>> gf_frobenius_map(f, g, b, p, ZZ) + [4, 0, 3] + """ + m = gf_degree(g) + if gf_degree(f) >= m: + f = gf_rem(f, g, p, K) + if not f: + return [] + n = gf_degree(f) + sf = [f[-1]] + for i in range(1, n + 1): + v = gf_mul_ground(b[i], f[n - i], p, K) + sf = gf_add(sf, v, p, K) + return sf + +def _gf_pow_pnm1d2(f, n, g, b, p, K): + """ + utility function for ``gf_edf_zassenhaus`` + Compute ``f**((p**n - 1) // 2)`` in ``GF(p)[x]/(g)`` + ``f**((p**n - 1) // 2) = (f*f**p*...*f**(p**n - 1))**((p - 1) // 2)`` + """ + f = gf_rem(f, g, p, K) + h = f + r = f + for i in range(1, n): + h = gf_frobenius_map(h, g, b, p, K) + r = gf_mul(r, h, p, K) + r = gf_rem(r, g, p, K) + + res = gf_pow_mod(r, (p - 1)//2, g, p, K) + return res + +def gf_pow_mod(f, n, g, p, K): + """ + Compute ``f**n`` in ``GF(p)[x]/(g)`` using repeated squaring. + + Given polynomials ``f`` and ``g`` in ``GF(p)[x]`` and a non-negative + integer ``n``, efficiently computes ``f**n (mod g)`` i.e. the remainder + of ``f**n`` from division by ``g``, using the repeated squaring algorithm. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_pow_mod + + >>> gf_pow_mod(ZZ.map([3, 2, 4]), 3, ZZ.map([1, 1]), 5, ZZ) + [] + + References + ========== + + .. [1] [Gathen99]_ + + """ + if not n: + return [K.one] + elif n == 1: + return gf_rem(f, g, p, K) + elif n == 2: + return gf_rem(gf_sqr(f, p, K), g, p, K) + + h = [K.one] + + while True: + if n & 1: + h = gf_mul(h, f, p, K) + h = gf_rem(h, g, p, K) + n -= 1 + + n >>= 1 + + if not n: + break + + f = gf_sqr(f, p, K) + f = gf_rem(f, g, p, K) + + return h + + +def gf_gcd(f, g, p, K): + """ + Euclidean Algorithm in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_gcd + + >>> gf_gcd(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 3]), 5, ZZ) + [1, 3] + + """ + while g: + f, g = g, gf_rem(f, g, p, K) + + return gf_monic(f, p, K)[1] + + +def gf_lcm(f, g, p, K): + """ + Compute polynomial LCM in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_lcm + + >>> gf_lcm(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 3]), 5, ZZ) + [1, 2, 0, 4] + + """ + if not f or not g: + return [] + + h = gf_quo(gf_mul(f, g, p, K), + gf_gcd(f, g, p, K), p, K) + + return gf_monic(h, p, K)[1] + + +def gf_cofactors(f, g, p, K): + """ + Compute polynomial GCD and cofactors in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_cofactors + + >>> gf_cofactors(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 3]), 5, ZZ) + ([1, 3], [3, 3], [2, 1]) + + """ + if not f and not g: + return ([], [], []) + + h = gf_gcd(f, g, p, K) + + return (h, gf_quo(f, h, p, K), + gf_quo(g, h, p, K)) + + +def gf_gcdex(f, g, p, K): + """ + Extended Euclidean Algorithm in ``GF(p)[x]``. + + Given polynomials ``f`` and ``g`` in ``GF(p)[x]``, computes polynomials + ``s``, ``t`` and ``h``, such that ``h = gcd(f, g)`` and ``s*f + t*g = h``. + The typical application of EEA is solving polynomial diophantine equations. + + Consider polynomials ``f = (x + 7) (x + 1)``, ``g = (x + 7) (x**2 + 1)`` + in ``GF(11)[x]``. Application of Extended Euclidean Algorithm gives:: + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_gcdex, gf_mul, gf_add + + >>> s, t, g = gf_gcdex(ZZ.map([1, 8, 7]), ZZ.map([1, 7, 1, 7]), 11, ZZ) + >>> s, t, g + ([5, 6], [6], [1, 7]) + + As result we obtained polynomials ``s = 5*x + 6`` and ``t = 6``, and + additionally ``gcd(f, g) = x + 7``. This is correct because:: + + >>> S = gf_mul(s, ZZ.map([1, 8, 7]), 11, ZZ) + >>> T = gf_mul(t, ZZ.map([1, 7, 1, 7]), 11, ZZ) + + >>> gf_add(S, T, 11, ZZ) == [1, 7] + True + + References + ========== + + .. [1] [Gathen99]_ + + """ + if not (f or g): + return [K.one], [], [] + + p0, r0 = gf_monic(f, p, K) + p1, r1 = gf_monic(g, p, K) + + if not f: + return [], [K.invert(p1, p)], r1 + if not g: + return [K.invert(p0, p)], [], r0 + + s0, s1 = [K.invert(p0, p)], [] + t0, t1 = [], [K.invert(p1, p)] + + while True: + Q, R = gf_div(r0, r1, p, K) + + if not R: + break + + (lc, r1), r0 = gf_monic(R, p, K), r1 + + inv = K.invert(lc, p) + + s = gf_sub_mul(s0, s1, Q, p, K) + t = gf_sub_mul(t0, t1, Q, p, K) + + s1, s0 = gf_mul_ground(s, inv, p, K), s1 + t1, t0 = gf_mul_ground(t, inv, p, K), t1 + + return s1, t1, r1 + + +def gf_monic(f, p, K): + """ + Compute LC and a monic polynomial in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_monic + + >>> gf_monic(ZZ.map([3, 2, 4]), 5, ZZ) + (3, [1, 4, 3]) + + """ + if not f: + return K.zero, [] + else: + lc = f[0] + + if K.is_one(lc): + return lc, list(f) + else: + return lc, gf_quo_ground(f, lc, p, K) + + +def gf_diff(f, p, K): + """ + Differentiate polynomial in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_diff + + >>> gf_diff([3, 2, 4], 5, ZZ) + [1, 2] + + """ + df = gf_degree(f) + + h, n = [K.zero]*df, df + + for coeff in f[:-1]: + coeff *= K(n) + coeff %= p + + if coeff: + h[df - n] = coeff + + n -= 1 + + return gf_strip(h) + + +def gf_eval(f, a, p, K): + """ + Evaluate ``f(a)`` in ``GF(p)`` using Horner scheme. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_eval + + >>> gf_eval([3, 2, 4], 2, 5, ZZ) + 0 + + """ + result = K.zero + + for c in f: + result *= a + result += c + result %= p + + return result + + +def gf_multi_eval(f, A, p, K): + """ + Evaluate ``f(a)`` for ``a`` in ``[a_1, ..., a_n]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_multi_eval + + >>> gf_multi_eval([3, 2, 4], [0, 1, 2, 3, 4], 5, ZZ) + [4, 4, 0, 2, 0] + + """ + return [ gf_eval(f, a, p, K) for a in A ] + + +def gf_compose(f, g, p, K): + """ + Compute polynomial composition ``f(g)`` in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_compose + + >>> gf_compose([3, 2, 4], [2, 2, 2], 5, ZZ) + [2, 4, 0, 3, 0] + + """ + if len(g) <= 1: + return gf_strip([gf_eval(f, gf_LC(g, K), p, K)]) + + if not f: + return [] + + h = [f[0]] + + for c in f[1:]: + h = gf_mul(h, g, p, K) + h = gf_add_ground(h, c, p, K) + + return h + + +def gf_compose_mod(g, h, f, p, K): + """ + Compute polynomial composition ``g(h)`` in ``GF(p)[x]/(f)``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_compose_mod + + >>> gf_compose_mod(ZZ.map([3, 2, 4]), ZZ.map([2, 2, 2]), ZZ.map([4, 3]), 5, ZZ) + [4] + + """ + if not g: + return [] + + comp = [g[0]] + + for a in g[1:]: + comp = gf_mul(comp, h, p, K) + comp = gf_add_ground(comp, a, p, K) + comp = gf_rem(comp, f, p, K) + + return comp + + +def gf_trace_map(a, b, c, n, f, p, K): + """ + Compute polynomial trace map in ``GF(p)[x]/(f)``. + + Given a polynomial ``f`` in ``GF(p)[x]``, polynomials ``a``, ``b``, + ``c`` in the quotient ring ``GF(p)[x]/(f)`` such that ``b = c**t + (mod f)`` for some positive power ``t`` of ``p``, and a positive + integer ``n``, returns a mapping:: + + a -> a**t**n, a + a**t + a**t**2 + ... + a**t**n (mod f) + + In factorization context, ``b = x**p mod f`` and ``c = x mod f``. + This way we can efficiently compute trace polynomials in equal + degree factorization routine, much faster than with other methods, + like iterated Frobenius algorithm, for large degrees. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_trace_map + + >>> gf_trace_map([1, 2], [4, 4], [1, 1], 4, [3, 2, 4], 5, ZZ) + ([1, 3], [1, 3]) + + References + ========== + + .. [1] [Gathen92]_ + + """ + u = gf_compose_mod(a, b, f, p, K) + v = b + + if n & 1: + U = gf_add(a, u, p, K) + V = b + else: + U = a + V = c + + n >>= 1 + + while n: + u = gf_add(u, gf_compose_mod(u, v, f, p, K), p, K) + v = gf_compose_mod(v, v, f, p, K) + + if n & 1: + U = gf_add(U, gf_compose_mod(u, V, f, p, K), p, K) + V = gf_compose_mod(v, V, f, p, K) + + n >>= 1 + + return gf_compose_mod(a, V, f, p, K), U + +def _gf_trace_map(f, n, g, b, p, K): + """ + utility for ``gf_edf_shoup`` + """ + f = gf_rem(f, g, p, K) + h = f + r = f + for i in range(1, n): + h = gf_frobenius_map(h, g, b, p, K) + r = gf_add(r, h, p, K) + r = gf_rem(r, g, p, K) + return r + + +def gf_random(n, p, K): + """ + Generate a random polynomial in ``GF(p)[x]`` of degree ``n``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_random + >>> gf_random(10, 5, ZZ) #doctest: +SKIP + [1, 2, 3, 2, 1, 1, 1, 2, 0, 4, 2] + + """ + pi = int(p) + return [K.one] + [ K(int(uniform(0, pi))) for i in range(0, n) ] + + +def gf_irreducible(n, p, K): + """ + Generate random irreducible polynomial of degree ``n`` in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_irreducible + >>> gf_irreducible(10, 5, ZZ) #doctest: +SKIP + [1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4] + + """ + while True: + f = gf_random(n, p, K) + if gf_irreducible_p(f, p, K): + return f + + +def gf_irred_p_ben_or(f, p, K): + """ + Ben-Or's polynomial irreducibility test over finite fields. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_irred_p_ben_or + + >>> gf_irred_p_ben_or(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ) + True + >>> gf_irred_p_ben_or(ZZ.map([3, 2, 4]), 5, ZZ) + False + + """ + n = gf_degree(f) + + if n <= 1: + return True + + _, f = gf_monic(f, p, K) + if n < 5: + H = h = gf_pow_mod([K.one, K.zero], p, f, p, K) + + for i in range(0, n//2): + g = gf_sub(h, [K.one, K.zero], p, K) + + if gf_gcd(f, g, p, K) == [K.one]: + h = gf_compose_mod(h, H, f, p, K) + else: + return False + else: + b = gf_frobenius_monomial_base(f, p, K) + H = h = gf_frobenius_map([K.one, K.zero], f, b, p, K) + for i in range(0, n//2): + g = gf_sub(h, [K.one, K.zero], p, K) + if gf_gcd(f, g, p, K) == [K.one]: + h = gf_frobenius_map(h, f, b, p, K) + else: + return False + + return True + + +def gf_irred_p_rabin(f, p, K): + """ + Rabin's polynomial irreducibility test over finite fields. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_irred_p_rabin + + >>> gf_irred_p_rabin(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ) + True + >>> gf_irred_p_rabin(ZZ.map([3, 2, 4]), 5, ZZ) + False + + """ + n = gf_degree(f) + + if n <= 1: + return True + + _, f = gf_monic(f, p, K) + + x = [K.one, K.zero] + + from sympy.ntheory import factorint + + indices = { n//d for d in factorint(n) } + + b = gf_frobenius_monomial_base(f, p, K) + h = b[1] + + for i in range(1, n): + if i in indices: + g = gf_sub(h, x, p, K) + + if gf_gcd(f, g, p, K) != [K.one]: + return False + + h = gf_frobenius_map(h, f, b, p, K) + + return h == x + +_irred_methods = { + 'ben-or': gf_irred_p_ben_or, + 'rabin': gf_irred_p_rabin, +} + + +def gf_irreducible_p(f, p, K): + """ + Test irreducibility of a polynomial ``f`` in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_irreducible_p + + >>> gf_irreducible_p(ZZ.map([1, 4, 2, 2, 3, 2, 4, 1, 4, 0, 4]), 5, ZZ) + True + >>> gf_irreducible_p(ZZ.map([3, 2, 4]), 5, ZZ) + False + + """ + method = query('GF_IRRED_METHOD') + + if method is not None: + irred = _irred_methods[method](f, p, K) + else: + irred = gf_irred_p_rabin(f, p, K) + + return irred + + +def gf_sqf_p(f, p, K): + """ + Return ``True`` if ``f`` is square-free in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_sqf_p + + >>> gf_sqf_p(ZZ.map([3, 2, 4]), 5, ZZ) + True + >>> gf_sqf_p(ZZ.map([2, 4, 4, 2, 2, 1, 4]), 5, ZZ) + False + + """ + _, f = gf_monic(f, p, K) + + if not f: + return True + else: + return gf_gcd(f, gf_diff(f, p, K), p, K) == [K.one] + + +def gf_sqf_part(f, p, K): + """ + Return square-free part of a ``GF(p)[x]`` polynomial. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_sqf_part + + >>> gf_sqf_part(ZZ.map([1, 1, 3, 0, 1, 0, 2, 2, 1]), 5, ZZ) + [1, 4, 3] + + """ + _, sqf = gf_sqf_list(f, p, K) + + g = [K.one] + + for f, _ in sqf: + g = gf_mul(g, f, p, K) + + return g + + +def gf_sqf_list(f, p, K, all=False): + """ + Return the square-free decomposition of a ``GF(p)[x]`` polynomial. + + Given a polynomial ``f`` in ``GF(p)[x]``, returns the leading coefficient + of ``f`` and a square-free decomposition ``f_1**e_1 f_2**e_2 ... f_k**e_k`` + such that all ``f_i`` are monic polynomials and ``(f_i, f_j)`` for ``i != j`` + are co-prime and ``e_1 ... e_k`` are given in increasing order. All trivial + terms (i.e. ``f_i = 1``) are not included in the output. + + Consider polynomial ``f = x**11 + 1`` over ``GF(11)[x]``:: + + >>> from sympy.polys.domains import ZZ + + >>> from sympy.polys.galoistools import ( + ... gf_from_dict, gf_diff, gf_sqf_list, gf_pow, + ... ) + ... # doctest: +NORMALIZE_WHITESPACE + + >>> f = gf_from_dict({11: ZZ(1), 0: ZZ(1)}, 11, ZZ) + + Note that ``f'(x) = 0``:: + + >>> gf_diff(f, 11, ZZ) + [] + + This phenomenon does not happen in characteristic zero. However we can + still compute square-free decomposition of ``f`` using ``gf_sqf()``:: + + >>> gf_sqf_list(f, 11, ZZ) + (1, [([1, 1], 11)]) + + We obtained factorization ``f = (x + 1)**11``. This is correct because:: + + >>> gf_pow([1, 1], 11, 11, ZZ) == f + True + + References + ========== + + .. [1] [Geddes92]_ + + """ + n, sqf, factors, r = 1, False, [], int(p) + + lc, f = gf_monic(f, p, K) + + if gf_degree(f) < 1: + return lc, [] + + while True: + F = gf_diff(f, p, K) + + if F != []: + g = gf_gcd(f, F, p, K) + h = gf_quo(f, g, p, K) + + i = 1 + + while h != [K.one]: + G = gf_gcd(g, h, p, K) + H = gf_quo(h, G, p, K) + + if gf_degree(H) > 0: + factors.append((H, i*n)) + + g, h, i = gf_quo(g, G, p, K), G, i + 1 + + if g == [K.one]: + sqf = True + else: + f = g + + if not sqf: + d = gf_degree(f) // r + + for i in range(0, d + 1): + f[i] = f[i*r] + + f, n = f[:d + 1], n*r + else: + break + + if all: + raise ValueError("'all=True' is not supported yet") + + return lc, factors + + +def gf_Qmatrix(f, p, K): + """ + Calculate Berlekamp's ``Q`` matrix. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_Qmatrix + + >>> gf_Qmatrix([3, 2, 4], 5, ZZ) + [[1, 0], + [3, 4]] + + >>> gf_Qmatrix([1, 0, 0, 0, 1], 5, ZZ) + [[1, 0, 0, 0], + [0, 4, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 4]] + + """ + n, r = gf_degree(f), int(p) + + q = [K.one] + [K.zero]*(n - 1) + Q = [list(q)] + [[]]*(n - 1) + + for i in range(1, (n - 1)*r + 1): + qq, c = [(-q[-1]*f[-1]) % p], q[-1] + + for j in range(1, n): + qq.append((q[j - 1] - c*f[-j - 1]) % p) + + if not (i % r): + Q[i//r] = list(qq) + + q = qq + + return Q + + +def gf_Qbasis(Q, p, K): + """ + Compute a basis of the kernel of ``Q``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_Qmatrix, gf_Qbasis + + >>> gf_Qbasis(gf_Qmatrix([1, 0, 0, 0, 1], 5, ZZ), 5, ZZ) + [[1, 0, 0, 0], [0, 0, 1, 0]] + + >>> gf_Qbasis(gf_Qmatrix([3, 2, 4], 5, ZZ), 5, ZZ) + [[1, 0]] + + """ + Q, n = [ list(q) for q in Q ], len(Q) + + for k in range(0, n): + Q[k][k] = (Q[k][k] - K.one) % p + + for k in range(0, n): + for i in range(k, n): + if Q[k][i]: + break + else: + continue + + inv = K.invert(Q[k][i], p) + + for j in range(0, n): + Q[j][i] = (Q[j][i]*inv) % p + + for j in range(0, n): + t = Q[j][k] + Q[j][k] = Q[j][i] + Q[j][i] = t + + for i in range(0, n): + if i != k: + q = Q[k][i] + + for j in range(0, n): + Q[j][i] = (Q[j][i] - Q[j][k]*q) % p + + for i in range(0, n): + for j in range(0, n): + if i == j: + Q[i][j] = (K.one - Q[i][j]) % p + else: + Q[i][j] = (-Q[i][j]) % p + + basis = [] + + for q in Q: + if any(q): + basis.append(q) + + return basis + + +def gf_berlekamp(f, p, K): + """ + Factor a square-free ``f`` in ``GF(p)[x]`` for small ``p``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_berlekamp + + >>> gf_berlekamp([1, 0, 0, 0, 1], 5, ZZ) + [[1, 0, 2], [1, 0, 3]] + + """ + Q = gf_Qmatrix(f, p, K) + V = gf_Qbasis(Q, p, K) + + for i, v in enumerate(V): + V[i] = gf_strip(list(reversed(v))) + + factors = [f] + + for k in range(1, len(V)): + for f in list(factors): + s = K.zero + + while s < p: + g = gf_sub_ground(V[k], s, p, K) + h = gf_gcd(f, g, p, K) + + if h != [K.one] and h != f: + factors.remove(f) + + f = gf_quo(f, h, p, K) + factors.extend([f, h]) + + if len(factors) == len(V): + return _sort_factors(factors, multiple=False) + + s += K.one + + return _sort_factors(factors, multiple=False) + + +def gf_ddf_zassenhaus(f, p, K): + """ + Cantor-Zassenhaus: Deterministic Distinct Degree Factorization + + Given a monic square-free polynomial ``f`` in ``GF(p)[x]``, computes + partial distinct degree factorization ``f_1 ... f_d`` of ``f`` where + ``deg(f_i) != deg(f_j)`` for ``i != j``. The result is returned as a + list of pairs ``(f_i, e_i)`` where ``deg(f_i) > 0`` and ``e_i > 0`` + is an argument to the equal degree factorization routine. + + Consider the polynomial ``x**15 - 1`` in ``GF(11)[x]``:: + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_from_dict + + >>> f = gf_from_dict({15: ZZ(1), 0: ZZ(-1)}, 11, ZZ) + + Distinct degree factorization gives:: + + >>> from sympy.polys.galoistools import gf_ddf_zassenhaus + + >>> gf_ddf_zassenhaus(f, 11, ZZ) + [([1, 0, 0, 0, 0, 10], 1), ([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], 2)] + + which means ``x**15 - 1 = (x**5 - 1) (x**10 + x**5 + 1)``. To obtain + factorization into irreducibles, use equal degree factorization + procedure (EDF) with each of the factors. + + References + ========== + + .. [1] [Gathen99]_ + .. [2] [Geddes92]_ + + """ + i, g, factors = 1, [K.one, K.zero], [] + + b = gf_frobenius_monomial_base(f, p, K) + while 2*i <= gf_degree(f): + g = gf_frobenius_map(g, f, b, p, K) + h = gf_gcd(f, gf_sub(g, [K.one, K.zero], p, K), p, K) + + if h != [K.one]: + factors.append((h, i)) + + f = gf_quo(f, h, p, K) + g = gf_rem(g, f, p, K) + b = gf_frobenius_monomial_base(f, p, K) + + i += 1 + + if f != [K.one]: + return factors + [(f, gf_degree(f))] + else: + return factors + + +def gf_edf_zassenhaus(f, n, p, K): + """ + Cantor-Zassenhaus: Probabilistic Equal Degree Factorization + + Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and + an integer ``n``, such that ``n`` divides ``deg(f)``, returns all + irreducible factors ``f_1,...,f_d`` of ``f``, each of degree ``n``. + EDF procedure gives complete factorization over Galois fields. + + Consider the square-free polynomial ``f = x**3 + x**2 + x + 1`` in + ``GF(5)[x]``. Let's compute its irreducible factors of degree one:: + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_edf_zassenhaus + + >>> gf_edf_zassenhaus([1,1,1,1], 1, 5, ZZ) + [[1, 1], [1, 2], [1, 3]] + + Notes + ===== + + The case p == 2 is handled by Cohen's Algorithm 3.4.8. The case p odd is + as in Geddes Algorithm 8.9 (or Cohen's Algorithm 3.4.6). + + References + ========== + + .. [1] [Gathen99]_ + .. [2] [Geddes92]_ Algorithm 8.9 + .. [3] [Cohen93]_ Algorithm 3.4.8 + + """ + factors = [f] + + if gf_degree(f) <= n: + return factors + + N = gf_degree(f) // n + if p != 2: + b = gf_frobenius_monomial_base(f, p, K) + + t = [K.one, K.zero] + while len(factors) < N: + if p == 2: + h = r = t + + for i in range(n - 1): + r = gf_pow_mod(r, 2, f, p, K) + h = gf_add(h, r, p, K) + + g = gf_gcd(f, h, p, K) + t += [K.zero, K.zero] + else: + r = gf_random(2 * n - 1, p, K) + h = _gf_pow_pnm1d2(r, n, f, b, p, K) + g = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K) + + if g != [K.one] and g != f: + factors = gf_edf_zassenhaus(g, n, p, K) \ + + gf_edf_zassenhaus(gf_quo(f, g, p, K), n, p, K) + + return _sort_factors(factors, multiple=False) + + +def gf_ddf_shoup(f, p, K): + """ + Kaltofen-Shoup: Deterministic Distinct Degree Factorization + + Given a monic square-free polynomial ``f`` in ``GF(p)[x]``, computes + partial distinct degree factorization ``f_1,...,f_d`` of ``f`` where + ``deg(f_i) != deg(f_j)`` for ``i != j``. The result is returned as a + list of pairs ``(f_i, e_i)`` where ``deg(f_i) > 0`` and ``e_i > 0`` + is an argument to the equal degree factorization routine. + + This algorithm is an improved version of Zassenhaus algorithm for + large ``deg(f)`` and modulus ``p`` (especially for ``deg(f) ~ lg(p)``). + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_ddf_shoup, gf_from_dict + + >>> f = gf_from_dict({6: ZZ(1), 5: ZZ(-1), 4: ZZ(1), 3: ZZ(1), 1: ZZ(-1)}, 3, ZZ) + + >>> gf_ddf_shoup(f, 3, ZZ) + [([1, 1, 0], 1), ([1, 1, 0, 1, 2], 2)] + + References + ========== + + .. [1] [Kaltofen98]_ + .. [2] [Shoup95]_ + .. [3] [Gathen92]_ + + """ + n = gf_degree(f) + k = int(_ceil(_sqrt(n//2))) + b = gf_frobenius_monomial_base(f, p, K) + h = gf_frobenius_map([K.one, K.zero], f, b, p, K) + # U[i] = x**(p**i) + U = [[K.one, K.zero], h] + [K.zero]*(k - 1) + + for i in range(2, k + 1): + U[i] = gf_frobenius_map(U[i-1], f, b, p, K) + + h, U = U[k], U[:k] + # V[i] = x**(p**(k*(i+1))) + V = [h] + [K.zero]*(k - 1) + + for i in range(1, k): + V[i] = gf_compose_mod(V[i - 1], h, f, p, K) + + factors = [] + + for i, v in enumerate(V): + h, j = [K.one], k - 1 + + for u in U: + g = gf_sub(v, u, p, K) + h = gf_mul(h, g, p, K) + h = gf_rem(h, f, p, K) + + g = gf_gcd(f, h, p, K) + f = gf_quo(f, g, p, K) + + for u in reversed(U): + h = gf_sub(v, u, p, K) + F = gf_gcd(g, h, p, K) + + if F != [K.one]: + factors.append((F, k*(i + 1) - j)) + + g, j = gf_quo(g, F, p, K), j - 1 + + if f != [K.one]: + factors.append((f, gf_degree(f))) + + return factors + +def gf_edf_shoup(f, n, p, K): + """ + Gathen-Shoup: Probabilistic Equal Degree Factorization + + Given a monic square-free polynomial ``f`` in ``GF(p)[x]`` and integer + ``n`` such that ``n`` divides ``deg(f)``, returns all irreducible factors + ``f_1,...,f_d`` of ``f``, each of degree ``n``. This is a complete + factorization over Galois fields. + + This algorithm is an improved version of Zassenhaus algorithm for + large ``deg(f)`` and modulus ``p`` (especially for ``deg(f) ~ lg(p)``). + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_edf_shoup + + >>> gf_edf_shoup(ZZ.map([1, 2837, 2277]), 1, 2917, ZZ) + [[1, 852], [1, 1985]] + + References + ========== + + .. [1] [Shoup91]_ + .. [2] [Gathen92]_ + + """ + N, q = gf_degree(f), int(p) + + if not N: + return [] + if N <= n: + return [f] + + factors, x = [f], [K.one, K.zero] + + r = gf_random(N - 1, p, K) + + if p == 2: + h = gf_pow_mod(x, q, f, p, K) + H = gf_trace_map(r, h, x, n - 1, f, p, K)[1] + h1 = gf_gcd(f, H, p, K) + h2 = gf_quo(f, h1, p, K) + + factors = gf_edf_shoup(h1, n, p, K) \ + + gf_edf_shoup(h2, n, p, K) + else: + b = gf_frobenius_monomial_base(f, p, K) + H = _gf_trace_map(r, n, f, b, p, K) + h = gf_pow_mod(H, (q - 1)//2, f, p, K) + + h1 = gf_gcd(f, h, p, K) + h2 = gf_gcd(f, gf_sub_ground(h, K.one, p, K), p, K) + h3 = gf_quo(f, gf_mul(h1, h2, p, K), p, K) + + factors = gf_edf_shoup(h1, n, p, K) \ + + gf_edf_shoup(h2, n, p, K) \ + + gf_edf_shoup(h3, n, p, K) + + return _sort_factors(factors, multiple=False) + + +def gf_zassenhaus(f, p, K): + """ + Factor a square-free ``f`` in ``GF(p)[x]`` for medium ``p``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_zassenhaus + + >>> gf_zassenhaus(ZZ.map([1, 4, 3]), 5, ZZ) + [[1, 1], [1, 3]] + + """ + factors = [] + + for factor, n in gf_ddf_zassenhaus(f, p, K): + factors += gf_edf_zassenhaus(factor, n, p, K) + + return _sort_factors(factors, multiple=False) + + +def gf_shoup(f, p, K): + """ + Factor a square-free ``f`` in ``GF(p)[x]`` for large ``p``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_shoup + + >>> gf_shoup(ZZ.map([1, 4, 3]), 5, ZZ) + [[1, 1], [1, 3]] + + """ + factors = [] + + for factor, n in gf_ddf_shoup(f, p, K): + factors += gf_edf_shoup(factor, n, p, K) + + return _sort_factors(factors, multiple=False) + +_factor_methods = { + 'berlekamp': gf_berlekamp, # ``p`` : small + 'zassenhaus': gf_zassenhaus, # ``p`` : medium + 'shoup': gf_shoup, # ``p`` : large +} + + +def gf_factor_sqf(f, p, K, method=None): + """ + Factor a square-free polynomial ``f`` in ``GF(p)[x]``. + + Examples + ======== + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_factor_sqf + + >>> gf_factor_sqf(ZZ.map([3, 2, 4]), 5, ZZ) + (3, [[1, 1], [1, 3]]) + + """ + lc, f = gf_monic(f, p, K) + + if gf_degree(f) < 1: + return lc, [] + + method = method or query('GF_FACTOR_METHOD') + + if method is not None: + factors = _factor_methods[method](f, p, K) + else: + factors = gf_zassenhaus(f, p, K) + + return lc, factors + + +def gf_factor(f, p, K): + """ + Factor (non square-free) polynomials in ``GF(p)[x]``. + + Given a possibly non square-free polynomial ``f`` in ``GF(p)[x]``, + returns its complete factorization into irreducibles:: + + f_1(x)**e_1 f_2(x)**e_2 ... f_d(x)**e_d + + where each ``f_i`` is a monic polynomial and ``gcd(f_i, f_j) == 1``, + for ``i != j``. The result is given as a tuple consisting of the + leading coefficient of ``f`` and a list of factors of ``f`` with + their multiplicities. + + The algorithm proceeds by first computing square-free decomposition + of ``f`` and then iteratively factoring each of square-free factors. + + Consider a non square-free polynomial ``f = (7*x + 1) (x + 2)**2`` in + ``GF(11)[x]``. We obtain its factorization into irreducibles as follows:: + + >>> from sympy.polys.domains import ZZ + >>> from sympy.polys.galoistools import gf_factor + + >>> gf_factor(ZZ.map([5, 2, 7, 2]), 11, ZZ) + (5, [([1, 2], 1), ([1, 8], 2)]) + + We arrived with factorization ``f = 5 (x + 2) (x + 8)**2``. We did not + recover the exact form of the input polynomial because we requested to + get monic factors of ``f`` and its leading coefficient separately. + + Square-free factors of ``f`` can be factored into irreducibles over + ``GF(p)`` using three very different methods: + + Berlekamp + efficient for very small values of ``p`` (usually ``p < 25``) + Cantor-Zassenhaus + efficient on average input and with "typical" ``p`` + Shoup-Kaltofen-Gathen + efficient with very large inputs and modulus + + If you want to use a specific factorization method, instead of the default + one, set ``GF_FACTOR_METHOD`` with one of ``berlekamp``, ``zassenhaus`` or + ``shoup`` values. + + References + ========== + + .. [1] [Gathen99]_ + + """ + lc, f = gf_monic(f, p, K) + + if gf_degree(f) < 1: + return lc, [] + + factors = [] + + for g, n in gf_sqf_list(f, p, K)[1]: + for h in gf_factor_sqf(g, p, K)[1]: + factors.append((h, n)) + + return lc, _sort_factors(factors) + + +def gf_value(f, a): + """ + Value of polynomial 'f' at 'a' in field R. + + Examples + ======== + + >>> from sympy.polys.galoistools import gf_value + + >>> gf_value([1, 7, 2, 4], 11) + 2204 + + """ + result = 0 + for c in f: + result *= a + result += c + return result + + +def linear_congruence(a, b, m): + """ + Returns the values of x satisfying a*x congruent b mod(m) + + Here m is positive integer and a, b are natural numbers. + This function returns only those values of x which are distinct mod(m). + + Examples + ======== + + >>> from sympy.polys.galoistools import linear_congruence + + >>> linear_congruence(3, 12, 15) + [4, 9, 14] + + There are 3 solutions distinct mod(15) since gcd(a, m) = gcd(3, 15) = 3. + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Linear_congruence_theorem + + """ + from sympy.polys.polytools import gcdex + if a % m == 0: + if b % m == 0: + return list(range(m)) + else: + return [] + r, _, g = gcdex(a, m) + if b % g != 0: + return [] + return [(r * b // g + t * m // g) % m for t in range(g)] + + +def _raise_mod_power(x, s, p, f): + """ + Used in gf_csolve to generate solutions of f(x) cong 0 mod(p**(s + 1)) + from the solutions of f(x) cong 0 mod(p**s). + + Examples + ======== + + >>> from sympy.polys.galoistools import _raise_mod_power + >>> from sympy.polys.galoistools import csolve_prime + + These is the solutions of f(x) = x**2 + x + 7 cong 0 mod(3) + + >>> f = [1, 1, 7] + >>> csolve_prime(f, 3) + [1] + >>> [ i for i in range(3) if not (i**2 + i + 7) % 3] + [1] + + The solutions of f(x) cong 0 mod(9) are constructed from the + values returned from _raise_mod_power: + + >>> x, s, p = 1, 1, 3 + >>> V = _raise_mod_power(x, s, p, f) + >>> [x + v * p**s for v in V] + [1, 4, 7] + + And these are confirmed with the following: + + >>> [ i for i in range(3**2) if not (i**2 + i + 7) % 3**2] + [1, 4, 7] + + """ + from sympy.polys.domains import ZZ + f_f = gf_diff(f, p, ZZ) + alpha = gf_value(f_f, x) + beta = - gf_value(f, x) // p**s + return linear_congruence(alpha, beta, p) + + +def _csolve_prime_las_vegas(f, p, seed=None): + r""" Solutions of `f(x) \equiv 0 \pmod{p}`, `f(0) \not\equiv 0 \pmod{p}`. + + Explanation + =========== + + This algorithm is classified as the Las Vegas method. + That is, it always returns the correct answer and solves the problem + fast in many cases, but if it is unlucky, it does not answer forever. + + Suppose the polynomial f is not a zero polynomial. Assume further + that it is of degree at most p-1 and `f(0)\not\equiv 0 \pmod{p}`. + These assumptions are not an essential part of the algorithm, + only that it is more convenient for the function calling this + function to resolve them. + + Note that `x^{p-1} - 1 \equiv \prod_{a=1}^{p-1}(x - a) \pmod{p}`. + Thus, the greatest common divisor with f is `\prod_{s \in S}(x - s)`, + with S being the set of solutions to f. Furthermore, + when a is randomly determined, `(x+a)^{(p-1)/2}-1` is + a polynomial with (p-1)/2 randomly chosen solutions. + The greatest common divisor of f may be a nontrivial factor of f. + + When p is large and the degree of f is small, + it is faster than naive solution methods. + + Parameters + ========== + + f : polynomial + p : prime number + + Returns + ======= + + list[int] + a list of solutions, sorted in ascending order + by integers in the range [1, p). The same value + does not exist in the list even if there is + a multiple solution. If no solution exists, returns []. + + Examples + ======== + + >>> from sympy.polys.galoistools import _csolve_prime_las_vegas + >>> _csolve_prime_las_vegas([1, 4, 3], 7) # x^2 + 4x + 3 = 0 (mod 7) + [4, 6] + >>> _csolve_prime_las_vegas([5, 7, 1, 9], 11) # 5x^3 + 7x^2 + x + 9 = 0 (mod 11) + [1, 5, 8] + + References + ========== + + .. [1] R. Crandall and C. Pomerance "Prime Numbers", 2nd Ed., Algorithm 2.3.10 + + """ + from sympy.polys.domains import ZZ + from sympy.ntheory import sqrt_mod + randint = _randint(seed) + root = set() + g = gf_pow_mod([1, 0], p - 1, f, p, ZZ) + g = gf_sub_ground(g, 1, p, ZZ) + # We want to calculate gcd(x**(p-1) - 1, f(x)) + factors = [gf_gcd(f, g, p, ZZ)] + while factors: + f = factors.pop() + # If the degree is small, solve directly + if len(f) <= 1: + continue + if len(f) == 2: + root.add(-invert(f[0], p) * f[1] % p) + continue + if len(f) == 3: + inv = invert(f[0], p) + b = f[1] * inv % p + b = (b + p * (b % 2)) // 2 + root.update((r - b) % p for r in + sqrt_mod(b**2 - f[2] * inv, p, all_roots=True)) + continue + while True: + # Determine `a` randomly and + # compute gcd((x+a)**((p-1)//2)-1, f(x)) + a = randint(0, p - 1) + g = gf_pow_mod([1, a], (p - 1) // 2, f, p, ZZ) + g = gf_sub_ground(g, 1, p, ZZ) + g = gf_gcd(f, g, p, ZZ) + if 1 < len(g) < len(f): + factors.append(g) + factors.append(gf_div(f, g, p, ZZ)[0]) + break + return sorted(root) + + +def csolve_prime(f, p, e=1): + r""" Solutions of `f(x) \equiv 0 \pmod{p^e}`. + + Parameters + ========== + + f : polynomial + p : prime number + e : positive integer + + Returns + ======= + + list[int] + a list of solutions, sorted in ascending order + by integers in the range [1, p**e). The same value + does not exist in the list even if there is + a multiple solution. If no solution exists, returns []. + + Examples + ======== + + >>> from sympy.polys.galoistools import csolve_prime + >>> csolve_prime([1, 1, 7], 3, 1) + [1] + >>> csolve_prime([1, 1, 7], 3, 2) + [1, 4, 7] + + Solutions [7, 4, 1] (mod 3**2) are generated by ``_raise_mod_power()`` + from solution [1] (mod 3). + """ + from sympy.polys.domains import ZZ + g = [MPZ(int(c)) for c in f] + # Convert to polynomial of degree at most p-1 + for i in range(len(g) - p): + g[i + p - 1] += g[i] + g[i] = 0 + g = gf_trunc(g, p) + # Checks whether g(x) is divisible by x + k = 0 + while k < len(g) and g[len(g) - k - 1] == 0: + k += 1 + if k: + g = g[:-k] + root_zero = [0] + else: + root_zero = [] + if g == []: + X1 = list(range(p)) + elif len(g)**2 < p: + # The conditions under which `_csolve_prime_las_vegas` is faster than + # a naive solution are worth considering. + X1 = root_zero + _csolve_prime_las_vegas(g, p) + else: + X1 = root_zero + [i for i in range(p) if gf_eval(g, i, p, ZZ) == 0] + if e == 1: + return X1 + X = [] + S = list(zip(X1, [1]*len(X1))) + while S: + x, s = S.pop() + if s == e: + X.append(x) + else: + s1 = s + 1 + ps = p**s + S.extend([(x + v*ps, s1) for v in _raise_mod_power(x, s, p, f)]) + return sorted(X) + + +def gf_csolve(f, n): + """ + To solve f(x) congruent 0 mod(n). + + n is divided into canonical factors and f(x) cong 0 mod(p**e) will be + solved for each factor. Applying the Chinese Remainder Theorem to the + results returns the final answers. + + Examples + ======== + + Solve [1, 1, 7] congruent 0 mod(189): + + >>> from sympy.polys.galoistools import gf_csolve + >>> gf_csolve([1, 1, 7], 189) + [13, 49, 76, 112, 139, 175] + + See Also + ======== + + sympy.ntheory.residue_ntheory.polynomial_congruence : a higher level solving routine + + References + ========== + + .. [1] 'An introduction to the Theory of Numbers' 5th Edition by Ivan Niven, + Zuckerman and Montgomery. + + """ + from sympy.polys.domains import ZZ + from sympy.ntheory import factorint + P = factorint(n) + X = [csolve_prime(f, p, e) for p, e in P.items()] + pools = list(map(tuple, X)) + perms = [[]] + for pool in pools: + perms = [x + [y] for x in perms for y in pool] + dist_factors = [pow(p, e) for p, e in P.items()] + return sorted([gf_crt(per, dist_factors, ZZ) for per in perms]) diff --git a/.venv/Lib/site-packages/sympy/polys/groebnertools.py b/.venv/Lib/site-packages/sympy/polys/groebnertools.py new file mode 100644 index 0000000000000000000000000000000000000000..fc5c2f228ab4f4182e4c8fff68d974aa25c9d531 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/groebnertools.py @@ -0,0 +1,862 @@ +"""Groebner bases algorithms. """ + + +from sympy.core.symbol import Dummy +from sympy.polys.monomials import monomial_mul, monomial_lcm, monomial_divides, term_div +from sympy.polys.orderings import lex +from sympy.polys.polyerrors import DomainError +from sympy.polys.polyconfig import query + +def groebner(seq, ring, method=None): + """ + Computes Groebner basis for a set of polynomials in `K[X]`. + + Wrapper around the (default) improved Buchberger and the other algorithms + for computing Groebner bases. The choice of algorithm can be changed via + ``method`` argument or :func:`sympy.polys.polyconfig.setup`, where + ``method`` can be either ``buchberger`` or ``f5b``. + + """ + if method is None: + method = query('groebner') + + _groebner_methods = { + 'buchberger': _buchberger, + 'f5b': _f5b, + } + + try: + _groebner = _groebner_methods[method] + except KeyError: + raise ValueError("'%s' is not a valid Groebner bases algorithm (valid are 'buchberger' and 'f5b')" % method) + + domain, orig = ring.domain, None + + if not domain.is_Field or not domain.has_assoc_Field: + try: + orig, ring = ring, ring.clone(domain=domain.get_field()) + except DomainError: + raise DomainError("Cannot compute a Groebner basis over %s" % domain) + else: + seq = [ s.set_ring(ring) for s in seq ] + + G = _groebner(seq, ring) + + if orig is not None: + G = [ g.clear_denoms()[1].set_ring(orig) for g in G ] + + return G + +def _buchberger(f, ring): + """ + Computes Groebner basis for a set of polynomials in `K[X]`. + + Given a set of multivariate polynomials `F`, finds another + set `G`, such that Ideal `F = Ideal G` and `G` is a reduced + Groebner basis. + + The resulting basis is unique and has monic generators if the + ground domains is a field. Otherwise the result is non-unique + but Groebner bases over e.g. integers can be computed (if the + input polynomials are monic). + + Groebner bases can be used to choose specific generators for a + polynomial ideal. Because these bases are unique you can check + for ideal equality by comparing the Groebner bases. To see if + one polynomial lies in an ideal, divide by the elements in the + base and see if the remainder vanishes. + + They can also be used to solve systems of polynomial equations + as, by choosing lexicographic ordering, you can eliminate one + variable at a time, provided that the ideal is zero-dimensional + (finite number of solutions). + + Notes + ===== + + Algorithm used: an improved version of Buchberger's algorithm + as presented in T. Becker, V. Weispfenning, Groebner Bases: A + Computational Approach to Commutative Algebra, Springer, 1993, + page 232. + + References + ========== + + .. [1] [Bose03]_ + .. [2] [Giovini91]_ + .. [3] [Ajwa95]_ + .. [4] [Cox97]_ + + """ + order = ring.order + + monomial_mul = ring.monomial_mul + monomial_div = ring.monomial_div + monomial_lcm = ring.monomial_lcm + + def select(P): + # normal selection strategy + # select the pair with minimum LCM(LM(f), LM(g)) + pr = min(P, key=lambda pair: order(monomial_lcm(f[pair[0]].LM, f[pair[1]].LM))) + return pr + + def normal(g, J): + h = g.rem([ f[j] for j in J ]) + + if not h: + return None + else: + h = h.monic() + + if h not in I: + I[h] = len(f) + f.append(h) + + return h.LM, I[h] + + def update(G, B, ih): + # update G using the set of critical pairs B and h + # [BW] page 230 + h = f[ih] + mh = h.LM + + # filter new pairs (h, g), g in G + C = G.copy() + D = set() + + while C: + # select a pair (h, g) by popping an element from C + ig = C.pop() + g = f[ig] + mg = g.LM + LCMhg = monomial_lcm(mh, mg) + + def lcm_divides(ip): + # LCM(LM(h), LM(p)) divides LCM(LM(h), LM(g)) + m = monomial_lcm(mh, f[ip].LM) + return monomial_div(LCMhg, m) + + # HT(h) and HT(g) disjoint: mh*mg == LCMhg + if monomial_mul(mh, mg) == LCMhg or ( + not any(lcm_divides(ipx) for ipx in C) and + not any(lcm_divides(pr[1]) for pr in D)): + D.add((ih, ig)) + + E = set() + + while D: + # select h, g from D (h the same as above) + ih, ig = D.pop() + mg = f[ig].LM + LCMhg = monomial_lcm(mh, mg) + + if not monomial_mul(mh, mg) == LCMhg: + E.add((ih, ig)) + + # filter old pairs + B_new = set() + + while B: + # select g1, g2 from B (-> CP) + ig1, ig2 = B.pop() + mg1 = f[ig1].LM + mg2 = f[ig2].LM + LCM12 = monomial_lcm(mg1, mg2) + + # if HT(h) does not divide lcm(HT(g1), HT(g2)) + if not monomial_div(LCM12, mh) or \ + monomial_lcm(mg1, mh) == LCM12 or \ + monomial_lcm(mg2, mh) == LCM12: + B_new.add((ig1, ig2)) + + B_new |= E + + # filter polynomials + G_new = set() + + while G: + ig = G.pop() + mg = f[ig].LM + + if not monomial_div(mg, mh): + G_new.add(ig) + + G_new.add(ih) + + return G_new, B_new + # end of update ################################ + + if not f: + return [] + + # replace f with a reduced list of initial polynomials; see [BW] page 203 + f1 = f[:] + + while True: + f = f1[:] + f1 = [] + + for i in range(len(f)): + p = f[i] + r = p.rem(f[:i]) + + if r: + f1.append(r.monic()) + + if f == f1: + break + + I = {} # ip = I[p]; p = f[ip] + F = set() # set of indices of polynomials + G = set() # set of indices of intermediate would-be Groebner basis + CP = set() # set of pairs of indices of critical pairs + + for i, h in enumerate(f): + I[h] = i + F.add(i) + + ##################################### + # algorithm GROEBNERNEWS2 in [BW] page 232 + + while F: + # select p with minimum monomial according to the monomial ordering + h = min([f[x] for x in F], key=lambda f: order(f.LM)) + ih = I[h] + F.remove(ih) + G, CP = update(G, CP, ih) + + # count the number of critical pairs which reduce to zero + reductions_to_zero = 0 + + while CP: + ig1, ig2 = select(CP) + CP.remove((ig1, ig2)) + + h = spoly(f[ig1], f[ig2], ring) + # ordering divisors is on average more efficient [Cox] page 111 + G1 = sorted(G, key=lambda g: order(f[g].LM)) + ht = normal(h, G1) + + if ht: + G, CP = update(G, CP, ht[1]) + else: + reductions_to_zero += 1 + + ###################################### + # now G is a Groebner basis; reduce it + Gr = set() + + for ig in G: + ht = normal(f[ig], G - {ig}) + + if ht: + Gr.add(ht[1]) + + Gr = [f[ig] for ig in Gr] + + # order according to the monomial ordering + Gr = sorted(Gr, key=lambda f: order(f.LM), reverse=True) + + return Gr + +def spoly(p1, p2, ring): + """ + Compute LCM(LM(p1), LM(p2))/LM(p1)*p1 - LCM(LM(p1), LM(p2))/LM(p2)*p2 + This is the S-poly provided p1 and p2 are monic + """ + LM1 = p1.LM + LM2 = p2.LM + LCM12 = ring.monomial_lcm(LM1, LM2) + m1 = ring.monomial_div(LCM12, LM1) + m2 = ring.monomial_div(LCM12, LM2) + s1 = p1.mul_monom(m1) + s2 = p2.mul_monom(m2) + s = s1 - s2 + return s + +# F5B + +# convenience functions + + +def Sign(f): + return f[0] + + +def Polyn(f): + return f[1] + + +def Num(f): + return f[2] + + +def sig(monomial, index): + return (monomial, index) + + +def lbp(signature, polynomial, number): + return (signature, polynomial, number) + +# signature functions + + +def sig_cmp(u, v, order): + """ + Compare two signatures by extending the term order to K[X]^n. + + u < v iff + - the index of v is greater than the index of u + or + - the index of v is equal to the index of u and u[0] < v[0] w.r.t. order + + u > v otherwise + """ + if u[1] > v[1]: + return -1 + if u[1] == v[1]: + #if u[0] == v[0]: + # return 0 + if order(u[0]) < order(v[0]): + return -1 + return 1 + + +def sig_key(s, order): + """ + Key for comparing two signatures. + + s = (m, k), t = (n, l) + + s < t iff [k > l] or [k == l and m < n] + s > t otherwise + """ + return (-s[1], order(s[0])) + + +def sig_mult(s, m): + """ + Multiply a signature by a monomial. + + The product of a signature (m, i) and a monomial n is defined as + (m * t, i). + """ + return sig(monomial_mul(s[0], m), s[1]) + +# labeled polynomial functions + + +def lbp_sub(f, g): + """ + Subtract labeled polynomial g from f. + + The signature and number of the difference of f and g are signature + and number of the maximum of f and g, w.r.t. lbp_cmp. + """ + if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) < 0: + max_poly = g + else: + max_poly = f + + ret = Polyn(f) - Polyn(g) + + return lbp(Sign(max_poly), ret, Num(max_poly)) + + +def lbp_mul_term(f, cx): + """ + Multiply a labeled polynomial with a term. + + The product of a labeled polynomial (s, p, k) by a monomial is + defined as (m * s, m * p, k). + """ + return lbp(sig_mult(Sign(f), cx[0]), Polyn(f).mul_term(cx), Num(f)) + + +def lbp_cmp(f, g): + """ + Compare two labeled polynomials. + + f < g iff + - Sign(f) < Sign(g) + or + - Sign(f) == Sign(g) and Num(f) > Num(g) + + f > g otherwise + """ + if sig_cmp(Sign(f), Sign(g), Polyn(f).ring.order) == -1: + return -1 + if Sign(f) == Sign(g): + if Num(f) > Num(g): + return -1 + #if Num(f) == Num(g): + # return 0 + return 1 + + +def lbp_key(f): + """ + Key for comparing two labeled polynomials. + """ + return (sig_key(Sign(f), Polyn(f).ring.order), -Num(f)) + +# algorithm and helper functions + + +def critical_pair(f, g, ring): + """ + Compute the critical pair corresponding to two labeled polynomials. + + A critical pair is a tuple (um, f, vm, g), where um and vm are + terms such that um * f - vm * g is the S-polynomial of f and g (so, + wlog assume um * f > vm * g). + For performance sake, a critical pair is represented as a tuple + (Sign(um * f), um, f, Sign(vm * g), vm, g), since um * f creates + a new, relatively expensive object in memory, whereas Sign(um * + f) and um are lightweight and f (in the tuple) is a reference to + an already existing object in memory. + """ + domain = ring.domain + + ltf = Polyn(f).LT + ltg = Polyn(g).LT + lt = (monomial_lcm(ltf[0], ltg[0]), domain.one) + + um = term_div(lt, ltf, domain) + vm = term_div(lt, ltg, domain) + + # The full information is not needed (now), so only the product + # with the leading term is considered: + fr = lbp_mul_term(lbp(Sign(f), Polyn(f).leading_term(), Num(f)), um) + gr = lbp_mul_term(lbp(Sign(g), Polyn(g).leading_term(), Num(g)), vm) + + # return in proper order, such that the S-polynomial is just + # u_first * f_first - u_second * f_second: + if lbp_cmp(fr, gr) == -1: + return (Sign(gr), vm, g, Sign(fr), um, f) + else: + return (Sign(fr), um, f, Sign(gr), vm, g) + + +def cp_cmp(c, d): + """ + Compare two critical pairs c and d. + + c < d iff + - lbp(c[0], _, Num(c[2]) < lbp(d[0], _, Num(d[2])) (this + corresponds to um_c * f_c and um_d * f_d) + or + - lbp(c[0], _, Num(c[2]) >< lbp(d[0], _, Num(d[2])) and + lbp(c[3], _, Num(c[5])) < lbp(d[3], _, Num(d[5])) (this + corresponds to vm_c * g_c and vm_d * g_d) + + c > d otherwise + """ + zero = Polyn(c[2]).ring.zero + + c0 = lbp(c[0], zero, Num(c[2])) + d0 = lbp(d[0], zero, Num(d[2])) + + r = lbp_cmp(c0, d0) + + if r == -1: + return -1 + if r == 0: + c1 = lbp(c[3], zero, Num(c[5])) + d1 = lbp(d[3], zero, Num(d[5])) + + r = lbp_cmp(c1, d1) + + if r == -1: + return -1 + #if r == 0: + # return 0 + return 1 + + +def cp_key(c, ring): + """ + Key for comparing critical pairs. + """ + return (lbp_key(lbp(c[0], ring.zero, Num(c[2]))), lbp_key(lbp(c[3], ring.zero, Num(c[5])))) + + +def s_poly(cp): + """ + Compute the S-polynomial of a critical pair. + + The S-polynomial of a critical pair cp is cp[1] * cp[2] - cp[4] * cp[5]. + """ + return lbp_sub(lbp_mul_term(cp[2], cp[1]), lbp_mul_term(cp[5], cp[4])) + + +def is_rewritable_or_comparable(sign, num, B): + """ + Check if a labeled polynomial is redundant by checking if its + signature and number imply rewritability or comparability. + + (sign, num) is comparable if there exists a labeled polynomial + h in B, such that sign[1] (the index) is less than Sign(h)[1] + and sign[0] is divisible by the leading monomial of h. + + (sign, num) is rewritable if there exists a labeled polynomial + h in B, such thatsign[1] is equal to Sign(h)[1], num < Num(h) + and sign[0] is divisible by Sign(h)[0]. + """ + for h in B: + # comparable + if sign[1] < Sign(h)[1]: + if monomial_divides(Polyn(h).LM, sign[0]): + return True + + # rewritable + if sign[1] == Sign(h)[1]: + if num < Num(h): + if monomial_divides(Sign(h)[0], sign[0]): + return True + return False + + +def f5_reduce(f, B): + """ + F5-reduce a labeled polynomial f by B. + + Continuously searches for non-zero labeled polynomial h in B, such + that the leading term lt_h of h divides the leading term lt_f of + f and Sign(lt_h * h) < Sign(f). If such a labeled polynomial h is + found, f gets replaced by f - lt_f / lt_h * h. If no such h can be + found or f is 0, f is no further F5-reducible and f gets returned. + + A polynomial that is reducible in the usual sense need not be + F5-reducible, e.g.: + + >>> from sympy.polys.groebnertools import lbp, sig, f5_reduce, Polyn + >>> from sympy.polys import ring, QQ, lex + + >>> R, x,y,z = ring("x,y,z", QQ, lex) + + >>> f = lbp(sig((1, 1, 1), 4), x, 3) + >>> g = lbp(sig((0, 0, 0), 2), x, 2) + + >>> Polyn(f).rem([Polyn(g)]) + 0 + >>> f5_reduce(f, [g]) + (((1, 1, 1), 4), x, 3) + + """ + order = Polyn(f).ring.order + domain = Polyn(f).ring.domain + + if not Polyn(f): + return f + + while True: + g = f + + for h in B: + if Polyn(h): + if monomial_divides(Polyn(h).LM, Polyn(f).LM): + t = term_div(Polyn(f).LT, Polyn(h).LT, domain) + if sig_cmp(sig_mult(Sign(h), t[0]), Sign(f), order) < 0: + # The following check need not be done and is in general slower than without. + #if not is_rewritable_or_comparable(Sign(gp), Num(gp), B): + hp = lbp_mul_term(h, t) + f = lbp_sub(f, hp) + break + + if g == f or not Polyn(f): + return f + + +def _f5b(F, ring): + """ + Computes a reduced Groebner basis for the ideal generated by F. + + f5b is an implementation of the F5B algorithm by Yao Sun and + Dingkang Wang. Similarly to Buchberger's algorithm, the algorithm + proceeds by computing critical pairs, computing the S-polynomial, + reducing it and adjoining the reduced S-polynomial if it is not 0. + + Unlike Buchberger's algorithm, each polynomial contains additional + information, namely a signature and a number. The signature + specifies the path of computation (i.e. from which polynomial in + the original basis was it derived and how), the number says when + the polynomial was added to the basis. With this information it + is (often) possible to decide if an S-polynomial will reduce to + 0 and can be discarded. + + Optimizations include: Reducing the generators before computing + a Groebner basis, removing redundant critical pairs when a new + polynomial enters the basis and sorting the critical pairs and + the current basis. + + Once a Groebner basis has been found, it gets reduced. + + References + ========== + + .. [1] Yao Sun, Dingkang Wang: "A New Proof for the Correctness of F5 + (F5-Like) Algorithm", https://arxiv.org/abs/1004.0084 (specifically + v4) + + .. [2] Thomas Becker, Volker Weispfenning, Groebner bases: A computational + approach to commutative algebra, 1993, p. 203, 216 + """ + order = ring.order + + # reduce polynomials (like in Mario Pernici's implementation) (Becker, Weispfenning, p. 203) + B = F + while True: + F = B + B = [] + + for i in range(len(F)): + p = F[i] + r = p.rem(F[:i]) + + if r: + B.append(r) + + if F == B: + break + + # basis + B = [lbp(sig(ring.zero_monom, i + 1), F[i], i + 1) for i in range(len(F))] + B.sort(key=lambda f: order(Polyn(f).LM), reverse=True) + + # critical pairs + CP = [critical_pair(B[i], B[j], ring) for i in range(len(B)) for j in range(i + 1, len(B))] + CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True) + + k = len(B) + + reductions_to_zero = 0 + + while len(CP): + cp = CP.pop() + + # discard redundant critical pairs: + if is_rewritable_or_comparable(cp[0], Num(cp[2]), B): + continue + if is_rewritable_or_comparable(cp[3], Num(cp[5]), B): + continue + + s = s_poly(cp) + + p = f5_reduce(s, B) + + p = lbp(Sign(p), Polyn(p).monic(), k + 1) + + if Polyn(p): + # remove old critical pairs, that become redundant when adding p: + indices = [] + for i, cp in enumerate(CP): + if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]): + indices.append(i) + elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]): + indices.append(i) + + for i in reversed(indices): + del CP[i] + + # only add new critical pairs that are not made redundant by p: + for g in B: + if Polyn(g): + cp = critical_pair(p, g, ring) + if is_rewritable_or_comparable(cp[0], Num(cp[2]), [p]): + continue + elif is_rewritable_or_comparable(cp[3], Num(cp[5]), [p]): + continue + + CP.append(cp) + + # sort (other sorting methods/selection strategies were not as successful) + CP.sort(key=lambda cp: cp_key(cp, ring), reverse=True) + + # insert p into B: + m = Polyn(p).LM + if order(m) <= order(Polyn(B[-1]).LM): + B.append(p) + else: + for i, q in enumerate(B): + if order(m) > order(Polyn(q).LM): + B.insert(i, p) + break + + k += 1 + + #print(len(B), len(CP), "%d critical pairs removed" % len(indices)) + else: + reductions_to_zero += 1 + + # reduce Groebner basis: + H = [Polyn(g).monic() for g in B] + H = red_groebner(H, ring) + + return sorted(H, key=lambda f: order(f.LM), reverse=True) + + +def red_groebner(G, ring): + """ + Compute reduced Groebner basis, from BeckerWeispfenning93, p. 216 + + Selects a subset of generators, that already generate the ideal + and computes a reduced Groebner basis for them. + """ + def reduction(P): + """ + The actual reduction algorithm. + """ + Q = [] + for i, p in enumerate(P): + h = p.rem(P[:i] + P[i + 1:]) + if h: + Q.append(h) + + return [p.monic() for p in Q] + + F = G + H = [] + + while F: + f0 = F.pop() + + if not any(monomial_divides(f.LM, f0.LM) for f in F + H): + H.append(f0) + + # Becker, Weispfenning, p. 217: H is Groebner basis of the ideal generated by G. + return reduction(H) + + +def is_groebner(G, ring): + """ + Check if G is a Groebner basis. + """ + for i in range(len(G)): + for j in range(i + 1, len(G)): + s = spoly(G[i], G[j], ring) + s = s.rem(G) + if s: + return False + + return True + + +def is_minimal(G, ring): + """ + Checks if G is a minimal Groebner basis. + """ + order = ring.order + domain = ring.domain + + G.sort(key=lambda g: order(g.LM)) + + for i, g in enumerate(G): + if g.LC != domain.one: + return False + + for h in G[:i] + G[i + 1:]: + if monomial_divides(h.LM, g.LM): + return False + + return True + + +def is_reduced(G, ring): + """ + Checks if G is a reduced Groebner basis. + """ + order = ring.order + domain = ring.domain + + G.sort(key=lambda g: order(g.LM)) + + for i, g in enumerate(G): + if g.LC != domain.one: + return False + + for term in g.terms(): + for h in G[:i] + G[i + 1:]: + if monomial_divides(h.LM, term[0]): + return False + + return True + +def groebner_lcm(f, g): + """ + Computes LCM of two polynomials using Groebner bases. + + The LCM is computed as the unique generator of the intersection + of the two ideals generated by `f` and `g`. The approach is to + compute a Groebner basis with respect to lexicographic ordering + of `t*f` and `(1 - t)*g`, where `t` is an unrelated variable and + then filtering out the solution that does not contain `t`. + + References + ========== + + .. [1] [Cox97]_ + + """ + if f.ring != g.ring: + raise ValueError("Values should be equal") + + ring = f.ring + domain = ring.domain + + if not f or not g: + return ring.zero + + if len(f) <= 1 and len(g) <= 1: + monom = monomial_lcm(f.LM, g.LM) + coeff = domain.lcm(f.LC, g.LC) + return ring.term_new(monom, coeff) + + fc, f = f.primitive() + gc, g = g.primitive() + + lcm = domain.lcm(fc, gc) + + f_terms = [ ((1,) + monom, coeff) for monom, coeff in f.terms() ] + g_terms = [ ((0,) + monom, coeff) for monom, coeff in g.terms() ] \ + + [ ((1,) + monom,-coeff) for monom, coeff in g.terms() ] + + t = Dummy("t") + t_ring = ring.clone(symbols=(t,) + ring.symbols, order=lex) + + F = t_ring.from_terms(f_terms) + G = t_ring.from_terms(g_terms) + + basis = groebner([F, G], t_ring) + + def is_independent(h, j): + return not any(monom[j] for monom in h.monoms()) + + H = [ h for h in basis if is_independent(h, 0) ] + + h_terms = [ (monom[1:], coeff*lcm) for monom, coeff in H[0].terms() ] + h = ring.from_terms(h_terms) + + return h + +def groebner_gcd(f, g): + """Computes GCD of two polynomials using Groebner bases. """ + if f.ring != g.ring: + raise ValueError("Values should be equal") + domain = f.ring.domain + + if not domain.is_Field: + fc, f = f.primitive() + gc, g = g.primitive() + gcd = domain.gcd(fc, gc) + + H = (f*g).quo([groebner_lcm(f, g)]) + + if len(H) != 1: + raise ValueError("Length should be 1") + h = H[0] + + if not domain.is_Field: + return gcd*h + else: + return h.monic() diff --git a/.venv/Lib/site-packages/sympy/polys/heuristicgcd.py b/.venv/Lib/site-packages/sympy/polys/heuristicgcd.py new file mode 100644 index 0000000000000000000000000000000000000000..ea9eeac952e88552d729f0bd3073dee21b6ab68b --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/heuristicgcd.py @@ -0,0 +1,149 @@ +"""Heuristic polynomial GCD algorithm (HEUGCD). """ + +from .polyerrors import HeuristicGCDFailed + +HEU_GCD_MAX = 6 + +def heugcd(f, g): + """ + Heuristic polynomial GCD in ``Z[X]``. + + Given univariate polynomials ``f`` and ``g`` in ``Z[X]``, returns + their GCD and cofactors, i.e. polynomials ``h``, ``cff`` and ``cfg`` + such that:: + + h = gcd(f, g), cff = quo(f, h) and cfg = quo(g, h) + + The algorithm is purely heuristic which means it may fail to compute + the GCD. This will be signaled by raising an exception. In this case + you will need to switch to another GCD method. + + The algorithm computes the polynomial GCD by evaluating polynomials + ``f`` and ``g`` at certain points and computing (fast) integer GCD + of those evaluations. The polynomial GCD is recovered from the integer + image by interpolation. The evaluation process reduces f and g variable + by variable into a large integer. The final step is to verify if the + interpolated polynomial is the correct GCD. This gives cofactors of + the input polynomials as a side effect. + + Examples + ======== + + >>> from sympy.polys.heuristicgcd import heugcd + >>> from sympy.polys import ring, ZZ + + >>> R, x,y, = ring("x,y", ZZ) + + >>> f = x**2 + 2*x*y + y**2 + >>> g = x**2 + x*y + + >>> h, cff, cfg = heugcd(f, g) + >>> h, cff, cfg + (x + y, x + y, x) + + >>> cff*h == f + True + >>> cfg*h == g + True + + References + ========== + + .. [1] [Liao95]_ + + """ + assert f.ring == g.ring and f.ring.domain.is_ZZ + + ring = f.ring + x0 = ring.gens[0] + domain = ring.domain + + gcd, f, g = f.extract_ground(g) + + f_norm = f.max_norm() + g_norm = g.max_norm() + + B = domain(2*min(f_norm, g_norm) + 29) + + x = max(min(B, 99*domain.sqrt(B)), + 2*min(f_norm // abs(f.LC), + g_norm // abs(g.LC)) + 4) + + for i in range(0, HEU_GCD_MAX): + ff = f.evaluate(x0, x) + gg = g.evaluate(x0, x) + + if ff and gg: + if ring.ngens == 1: + h, cff, cfg = domain.cofactors(ff, gg) + else: + h, cff, cfg = heugcd(ff, gg) + + h = _gcd_interpolate(h, x, ring) + h = h.primitive()[1] + + cff_, r = f.div(h) + + if not r: + cfg_, r = g.div(h) + + if not r: + h = h.mul_ground(gcd) + return h, cff_, cfg_ + + cff = _gcd_interpolate(cff, x, ring) + + h, r = f.div(cff) + + if not r: + cfg_, r = g.div(h) + + if not r: + h = h.mul_ground(gcd) + return h, cff, cfg_ + + cfg = _gcd_interpolate(cfg, x, ring) + + h, r = g.div(cfg) + + if not r: + cff_, r = f.div(h) + + if not r: + h = h.mul_ground(gcd) + return h, cff_, cfg + + x = 73794*x * domain.sqrt(domain.sqrt(x)) // 27011 + + raise HeuristicGCDFailed('no luck') + +def _gcd_interpolate(h, x, ring): + """Interpolate polynomial GCD from integer GCD. """ + f, i = ring.zero, 0 + + # TODO: don't expose poly repr implementation details + if ring.ngens == 1: + while h: + g = h % x + if g > x // 2: g -= x + h = (h - g) // x + + # f += X**i*g + if g: + f[(i,)] = g + i += 1 + else: + while h: + g = h.trunc_ground(x) + h = (h - g).quo_ground(x) + + # f += X**i*g + if g: + for monom, coeff in g.iterterms(): + f[(i,) + monom] = coeff + i += 1 + + if f.LC < 0: + return -f + else: + return f diff --git a/.venv/Lib/site-packages/sympy/polys/modulargcd.py b/.venv/Lib/site-packages/sympy/polys/modulargcd.py new file mode 100644 index 0000000000000000000000000000000000000000..20dfd33d919703873e9fb5d3039abb351297df61 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/modulargcd.py @@ -0,0 +1,2277 @@ +from sympy.core.symbol import Dummy +from sympy.ntheory import nextprime +from sympy.ntheory.modular import crt +from sympy.polys.domains import PolynomialRing +from sympy.polys.galoistools import ( + gf_gcd, gf_from_dict, gf_gcdex, gf_div, gf_lcm) +from sympy.polys.polyerrors import ModularGCDFailed + +from mpmath import sqrt +import random + + +def _trivial_gcd(f, g): + """ + Compute the GCD of two polynomials in trivial cases, i.e. when one + or both polynomials are zero. + """ + ring = f.ring + + if not (f or g): + return ring.zero, ring.zero, ring.zero + elif not f: + if g.LC < ring.domain.zero: + return -g, ring.zero, -ring.one + else: + return g, ring.zero, ring.one + elif not g: + if f.LC < ring.domain.zero: + return -f, -ring.one, ring.zero + else: + return f, ring.one, ring.zero + return None + + +def _gf_gcd(fp, gp, p): + r""" + Compute the GCD of two univariate polynomials in `\mathbb{Z}_p[x]`. + """ + dom = fp.ring.domain + + while gp: + rem = fp + deg = gp.degree() + lcinv = dom.invert(gp.LC, p) + + while True: + degrem = rem.degree() + if degrem < deg: + break + rem = (rem - gp.mul_monom((degrem - deg,)).mul_ground(lcinv * rem.LC)).trunc_ground(p) + + fp = gp + gp = rem + + return fp.mul_ground(dom.invert(fp.LC, p)).trunc_ground(p) + + +def _degree_bound_univariate(f, g): + r""" + Compute an upper bound for the degree of the GCD of two univariate + integer polynomials `f` and `g`. + + The function chooses a suitable prime `p` and computes the GCD of + `f` and `g` in `\mathbb{Z}_p[x]`. The choice of `p` guarantees that + the degree in `\mathbb{Z}_p[x]` is greater than or equal to the degree + in `\mathbb{Z}[x]`. + + Parameters + ========== + + f : PolyElement + univariate integer polynomial + g : PolyElement + univariate integer polynomial + + """ + gamma = f.ring.domain.gcd(f.LC, g.LC) + p = 1 + + p = nextprime(p) + while gamma % p == 0: + p = nextprime(p) + + fp = f.trunc_ground(p) + gp = g.trunc_ground(p) + hp = _gf_gcd(fp, gp, p) + deghp = hp.degree() + return deghp + + +def _chinese_remainder_reconstruction_univariate(hp, hq, p, q): + r""" + Construct a polynomial `h_{pq}` in `\mathbb{Z}_{p q}[x]` such that + + .. math :: + + h_{pq} = h_p \; \mathrm{mod} \, p + + h_{pq} = h_q \; \mathrm{mod} \, q + + for relatively prime integers `p` and `q` and polynomials + `h_p` and `h_q` in `\mathbb{Z}_p[x]` and `\mathbb{Z}_q[x]` + respectively. + + The coefficients of the polynomial `h_{pq}` are computed with the + Chinese Remainder Theorem. The symmetric representation in + `\mathbb{Z}_p[x]`, `\mathbb{Z}_q[x]` and `\mathbb{Z}_{p q}[x]` is used. + It is assumed that `h_p` and `h_q` have the same degree. + + Parameters + ========== + + hp : PolyElement + univariate integer polynomial with coefficients in `\mathbb{Z}_p` + hq : PolyElement + univariate integer polynomial with coefficients in `\mathbb{Z}_q` + p : Integer + modulus of `h_p`, relatively prime to `q` + q : Integer + modulus of `h_q`, relatively prime to `p` + + Examples + ======== + + >>> from sympy.polys.modulargcd import _chinese_remainder_reconstruction_univariate + >>> from sympy.polys import ring, ZZ + + >>> R, x = ring("x", ZZ) + >>> p = 3 + >>> q = 5 + + >>> hp = -x**3 - 1 + >>> hq = 2*x**3 - 2*x**2 + x + + >>> hpq = _chinese_remainder_reconstruction_univariate(hp, hq, p, q) + >>> hpq + 2*x**3 + 3*x**2 + 6*x + 5 + + >>> hpq.trunc_ground(p) == hp + True + >>> hpq.trunc_ground(q) == hq + True + + """ + n = hp.degree() + x = hp.ring.gens[0] + hpq = hp.ring.zero + + for i in range(n+1): + hpq[(i,)] = crt([p, q], [hp.coeff(x**i), hq.coeff(x**i)], symmetric=True)[0] + + hpq.strip_zero() + return hpq + + +def modgcd_univariate(f, g): + r""" + Computes the GCD of two polynomials in `\mathbb{Z}[x]` using a modular + algorithm. + + The algorithm computes the GCD of two univariate integer polynomials + `f` and `g` by computing the GCD in `\mathbb{Z}_p[x]` for suitable + primes `p` and then reconstructing the coefficients with the Chinese + Remainder Theorem. Trial division is only made for candidates which + are very likely the desired GCD. + + Parameters + ========== + + f : PolyElement + univariate integer polynomial + g : PolyElement + univariate integer polynomial + + Returns + ======= + + h : PolyElement + GCD of the polynomials `f` and `g` + cff : PolyElement + cofactor of `f`, i.e. `\frac{f}{h}` + cfg : PolyElement + cofactor of `g`, i.e. `\frac{g}{h}` + + Examples + ======== + + >>> from sympy.polys.modulargcd import modgcd_univariate + >>> from sympy.polys import ring, ZZ + + >>> R, x = ring("x", ZZ) + + >>> f = x**5 - 1 + >>> g = x - 1 + + >>> h, cff, cfg = modgcd_univariate(f, g) + >>> h, cff, cfg + (x - 1, x**4 + x**3 + x**2 + x + 1, 1) + + >>> cff * h == f + True + >>> cfg * h == g + True + + >>> f = 6*x**2 - 6 + >>> g = 2*x**2 + 4*x + 2 + + >>> h, cff, cfg = modgcd_univariate(f, g) + >>> h, cff, cfg + (2*x + 2, 3*x - 3, x + 1) + + >>> cff * h == f + True + >>> cfg * h == g + True + + References + ========== + + 1. [Monagan00]_ + + """ + assert f.ring == g.ring and f.ring.domain.is_ZZ + + result = _trivial_gcd(f, g) + if result is not None: + return result + + ring = f.ring + + cf, f = f.primitive() + cg, g = g.primitive() + ch = ring.domain.gcd(cf, cg) + + bound = _degree_bound_univariate(f, g) + if bound == 0: + return ring(ch), f.mul_ground(cf // ch), g.mul_ground(cg // ch) + + gamma = ring.domain.gcd(f.LC, g.LC) + m = 1 + p = 1 + + while True: + p = nextprime(p) + while gamma % p == 0: + p = nextprime(p) + + fp = f.trunc_ground(p) + gp = g.trunc_ground(p) + hp = _gf_gcd(fp, gp, p) + deghp = hp.degree() + + if deghp > bound: + continue + elif deghp < bound: + m = 1 + bound = deghp + continue + + hp = hp.mul_ground(gamma).trunc_ground(p) + if m == 1: + m = p + hlastm = hp + continue + + hm = _chinese_remainder_reconstruction_univariate(hp, hlastm, p, m) + m *= p + + if not hm == hlastm: + hlastm = hm + continue + + h = hm.quo_ground(hm.content()) + fquo, frem = f.div(h) + gquo, grem = g.div(h) + if not frem and not grem: + if h.LC < 0: + ch = -ch + h = h.mul_ground(ch) + cff = fquo.mul_ground(cf // ch) + cfg = gquo.mul_ground(cg // ch) + return h, cff, cfg + + +def _primitive(f, p): + r""" + Compute the content and the primitive part of a polynomial in + `\mathbb{Z}_p[x_0, \ldots, x_{k-2}, y] \cong \mathbb{Z}_p[y][x_0, \ldots, x_{k-2}]`. + + Parameters + ========== + + f : PolyElement + integer polynomial in `\mathbb{Z}_p[x0, \ldots, x{k-2}, y]` + p : Integer + modulus of `f` + + Returns + ======= + + contf : PolyElement + integer polynomial in `\mathbb{Z}_p[y]`, content of `f` + ppf : PolyElement + primitive part of `f`, i.e. `\frac{f}{contf}` + + Examples + ======== + + >>> from sympy.polys.modulargcd import _primitive + >>> from sympy.polys import ring, ZZ + + >>> R, x, y = ring("x, y", ZZ) + >>> p = 3 + + >>> f = x**2*y**2 + x**2*y - y**2 - y + >>> _primitive(f, p) + (y**2 + y, x**2 - 1) + + >>> R, x, y, z = ring("x, y, z", ZZ) + + >>> f = x*y*z - y**2*z**2 + >>> _primitive(f, p) + (z, x*y - y**2*z) + + """ + ring = f.ring + dom = ring.domain + k = ring.ngens + + coeffs = {} + for monom, coeff in f.iterterms(): + if monom[:-1] not in coeffs: + coeffs[monom[:-1]] = {} + coeffs[monom[:-1]][monom[-1]] = coeff + + cont = [] + for coeff in iter(coeffs.values()): + cont = gf_gcd(cont, gf_from_dict(coeff, p, dom), p, dom) + + yring = ring.clone(symbols=ring.symbols[k-1]) + contf = yring.from_dense(cont).trunc_ground(p) + + return contf, f.quo(contf.set_ring(ring)) + + +def _deg(f): + r""" + Compute the degree of a multivariate polynomial + `f \in K[x_0, \ldots, x_{k-2}, y] \cong K[y][x_0, \ldots, x_{k-2}]`. + + Parameters + ========== + + f : PolyElement + polynomial in `K[x_0, \ldots, x_{k-2}, y]` + + Returns + ======= + + degf : Integer tuple + degree of `f` in `x_0, \ldots, x_{k-2}` + + Examples + ======== + + >>> from sympy.polys.modulargcd import _deg + >>> from sympy.polys import ring, ZZ + + >>> R, x, y = ring("x, y", ZZ) + + >>> f = x**2*y**2 + x**2*y - 1 + >>> _deg(f) + (2,) + + >>> R, x, y, z = ring("x, y, z", ZZ) + + >>> f = x**2*y**2 + x**2*y - 1 + >>> _deg(f) + (2, 2) + + >>> f = x*y*z - y**2*z**2 + >>> _deg(f) + (1, 1) + + """ + k = f.ring.ngens + degf = (0,) * (k-1) + for monom in f.itermonoms(): + if monom[:-1] > degf: + degf = monom[:-1] + return degf + + +def _LC(f): + r""" + Compute the leading coefficient of a multivariate polynomial + `f \in K[x_0, \ldots, x_{k-2}, y] \cong K[y][x_0, \ldots, x_{k-2}]`. + + Parameters + ========== + + f : PolyElement + polynomial in `K[x_0, \ldots, x_{k-2}, y]` + + Returns + ======= + + lcf : PolyElement + polynomial in `K[y]`, leading coefficient of `f` + + Examples + ======== + + >>> from sympy.polys.modulargcd import _LC + >>> from sympy.polys import ring, ZZ + + >>> R, x, y = ring("x, y", ZZ) + + >>> f = x**2*y**2 + x**2*y - 1 + >>> _LC(f) + y**2 + y + + >>> R, x, y, z = ring("x, y, z", ZZ) + + >>> f = x**2*y**2 + x**2*y - 1 + >>> _LC(f) + 1 + + >>> f = x*y*z - y**2*z**2 + >>> _LC(f) + z + + """ + ring = f.ring + k = ring.ngens + yring = ring.clone(symbols=ring.symbols[k-1]) + y = yring.gens[0] + degf = _deg(f) + + lcf = yring.zero + for monom, coeff in f.iterterms(): + if monom[:-1] == degf: + lcf += coeff*y**monom[-1] + return lcf + + +def _swap(f, i): + """ + Make the variable `x_i` the leading one in a multivariate polynomial `f`. + """ + ring = f.ring + fswap = ring.zero + for monom, coeff in f.iterterms(): + monomswap = (monom[i],) + monom[:i] + monom[i+1:] + fswap[monomswap] = coeff + return fswap + + +def _degree_bound_bivariate(f, g): + r""" + Compute upper degree bounds for the GCD of two bivariate + integer polynomials `f` and `g`. + + The GCD is viewed as a polynomial in `\mathbb{Z}[y][x]` and the + function returns an upper bound for its degree and one for the degree + of its content. This is done by choosing a suitable prime `p` and + computing the GCD of the contents of `f \; \mathrm{mod} \, p` and + `g \; \mathrm{mod} \, p`. The choice of `p` guarantees that the degree + of the content in `\mathbb{Z}_p[y]` is greater than or equal to the + degree in `\mathbb{Z}[y]`. To obtain the degree bound in the variable + `x`, the polynomials are evaluated at `y = a` for a suitable + `a \in \mathbb{Z}_p` and then their GCD in `\mathbb{Z}_p[x]` is + computed. If no such `a` exists, i.e. the degree in `\mathbb{Z}_p[x]` + is always smaller than the one in `\mathbb{Z}[y][x]`, then the bound is + set to the minimum of the degrees of `f` and `g` in `x`. + + Parameters + ========== + + f : PolyElement + bivariate integer polynomial + g : PolyElement + bivariate integer polynomial + + Returns + ======= + + xbound : Integer + upper bound for the degree of the GCD of the polynomials `f` and + `g` in the variable `x` + ycontbound : Integer + upper bound for the degree of the content of the GCD of the + polynomials `f` and `g` in the variable `y` + + References + ========== + + 1. [Monagan00]_ + + """ + ring = f.ring + + gamma1 = ring.domain.gcd(f.LC, g.LC) + gamma2 = ring.domain.gcd(_swap(f, 1).LC, _swap(g, 1).LC) + badprimes = gamma1 * gamma2 + p = 1 + + p = nextprime(p) + while badprimes % p == 0: + p = nextprime(p) + + fp = f.trunc_ground(p) + gp = g.trunc_ground(p) + contfp, fp = _primitive(fp, p) + contgp, gp = _primitive(gp, p) + conthp = _gf_gcd(contfp, contgp, p) # polynomial in Z_p[y] + ycontbound = conthp.degree() + + # polynomial in Z_p[y] + delta = _gf_gcd(_LC(fp), _LC(gp), p) + + for a in range(p): + if not delta.evaluate(0, a) % p: + continue + fpa = fp.evaluate(1, a).trunc_ground(p) + gpa = gp.evaluate(1, a).trunc_ground(p) + hpa = _gf_gcd(fpa, gpa, p) + xbound = hpa.degree() + return xbound, ycontbound + + return min(fp.degree(), gp.degree()), ycontbound + + +def _chinese_remainder_reconstruction_multivariate(hp, hq, p, q): + r""" + Construct a polynomial `h_{pq}` in + `\mathbb{Z}_{p q}[x_0, \ldots, x_{k-1}]` such that + + .. math :: + + h_{pq} = h_p \; \mathrm{mod} \, p + + h_{pq} = h_q \; \mathrm{mod} \, q + + for relatively prime integers `p` and `q` and polynomials + `h_p` and `h_q` in `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]` and + `\mathbb{Z}_q[x_0, \ldots, x_{k-1}]` respectively. + + The coefficients of the polynomial `h_{pq}` are computed with the + Chinese Remainder Theorem. The symmetric representation in + `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`, + `\mathbb{Z}_q[x_0, \ldots, x_{k-1}]` and + `\mathbb{Z}_{p q}[x_0, \ldots, x_{k-1}]` is used. + + Parameters + ========== + + hp : PolyElement + multivariate integer polynomial with coefficients in `\mathbb{Z}_p` + hq : PolyElement + multivariate integer polynomial with coefficients in `\mathbb{Z}_q` + p : Integer + modulus of `h_p`, relatively prime to `q` + q : Integer + modulus of `h_q`, relatively prime to `p` + + Examples + ======== + + >>> from sympy.polys.modulargcd import _chinese_remainder_reconstruction_multivariate + >>> from sympy.polys import ring, ZZ + + >>> R, x, y = ring("x, y", ZZ) + >>> p = 3 + >>> q = 5 + + >>> hp = x**3*y - x**2 - 1 + >>> hq = -x**3*y - 2*x*y**2 + 2 + + >>> hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q) + >>> hpq + 4*x**3*y + 5*x**2 + 3*x*y**2 + 2 + + >>> hpq.trunc_ground(p) == hp + True + >>> hpq.trunc_ground(q) == hq + True + + >>> R, x, y, z = ring("x, y, z", ZZ) + >>> p = 6 + >>> q = 5 + + >>> hp = 3*x**4 - y**3*z + z + >>> hq = -2*x**4 + z + + >>> hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q) + >>> hpq + 3*x**4 + 5*y**3*z + z + + >>> hpq.trunc_ground(p) == hp + True + >>> hpq.trunc_ground(q) == hq + True + + """ + hpmonoms = set(hp.monoms()) + hqmonoms = set(hq.monoms()) + monoms = hpmonoms.intersection(hqmonoms) + hpmonoms.difference_update(monoms) + hqmonoms.difference_update(monoms) + + zero = hp.ring.domain.zero + + hpq = hp.ring.zero + + if isinstance(hp.ring.domain, PolynomialRing): + crt_ = _chinese_remainder_reconstruction_multivariate + else: + def crt_(cp, cq, p, q): + return crt([p, q], [cp, cq], symmetric=True)[0] + + for monom in monoms: + hpq[monom] = crt_(hp[monom], hq[monom], p, q) + for monom in hpmonoms: + hpq[monom] = crt_(hp[monom], zero, p, q) + for monom in hqmonoms: + hpq[monom] = crt_(zero, hq[monom], p, q) + + return hpq + + +def _interpolate_multivariate(evalpoints, hpeval, ring, i, p, ground=False): + r""" + Reconstruct a polynomial `h_p` in `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]` + from a list of evaluation points in `\mathbb{Z}_p` and a list of + polynomials in + `\mathbb{Z}_p[x_0, \ldots, x_{i-1}, x_{i+1}, \ldots, x_{k-1}]`, which + are the images of `h_p` evaluated in the variable `x_i`. + + It is also possible to reconstruct a parameter of the ground domain, + i.e. if `h_p` is a polynomial over `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`. + In this case, one has to set ``ground=True``. + + Parameters + ========== + + evalpoints : list of Integer objects + list of evaluation points in `\mathbb{Z}_p` + hpeval : list of PolyElement objects + list of polynomials in (resp. over) + `\mathbb{Z}_p[x_0, \ldots, x_{i-1}, x_{i+1}, \ldots, x_{k-1}]`, + images of `h_p` evaluated in the variable `x_i` + ring : PolyRing + `h_p` will be an element of this ring + i : Integer + index of the variable which has to be reconstructed + p : Integer + prime number, modulus of `h_p` + ground : Boolean + indicates whether `x_i` is in the ground domain, default is + ``False`` + + Returns + ======= + + hp : PolyElement + interpolated polynomial in (resp. over) + `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]` + + """ + hp = ring.zero + + if ground: + domain = ring.domain.domain + y = ring.domain.gens[i] + else: + domain = ring.domain + y = ring.gens[i] + + for a, hpa in zip(evalpoints, hpeval): + numer = ring.one + denom = domain.one + for b in evalpoints: + if b == a: + continue + + numer *= y - b + denom *= a - b + + denom = domain.invert(denom, p) + coeff = numer.mul_ground(denom) + hp += hpa.set_ring(ring) * coeff + + return hp.trunc_ground(p) + + +def modgcd_bivariate(f, g): + r""" + Computes the GCD of two polynomials in `\mathbb{Z}[x, y]` using a + modular algorithm. + + The algorithm computes the GCD of two bivariate integer polynomials + `f` and `g` by calculating the GCD in `\mathbb{Z}_p[x, y]` for + suitable primes `p` and then reconstructing the coefficients with the + Chinese Remainder Theorem. To compute the bivariate GCD over + `\mathbb{Z}_p`, the polynomials `f \; \mathrm{mod} \, p` and + `g \; \mathrm{mod} \, p` are evaluated at `y = a` for certain + `a \in \mathbb{Z}_p` and then their univariate GCD in `\mathbb{Z}_p[x]` + is computed. Interpolating those yields the bivariate GCD in + `\mathbb{Z}_p[x, y]`. To verify the result in `\mathbb{Z}[x, y]`, trial + division is done, but only for candidates which are very likely the + desired GCD. + + Parameters + ========== + + f : PolyElement + bivariate integer polynomial + g : PolyElement + bivariate integer polynomial + + Returns + ======= + + h : PolyElement + GCD of the polynomials `f` and `g` + cff : PolyElement + cofactor of `f`, i.e. `\frac{f}{h}` + cfg : PolyElement + cofactor of `g`, i.e. `\frac{g}{h}` + + Examples + ======== + + >>> from sympy.polys.modulargcd import modgcd_bivariate + >>> from sympy.polys import ring, ZZ + + >>> R, x, y = ring("x, y", ZZ) + + >>> f = x**2 - y**2 + >>> g = x**2 + 2*x*y + y**2 + + >>> h, cff, cfg = modgcd_bivariate(f, g) + >>> h, cff, cfg + (x + y, x - y, x + y) + + >>> cff * h == f + True + >>> cfg * h == g + True + + >>> f = x**2*y - x**2 - 4*y + 4 + >>> g = x + 2 + + >>> h, cff, cfg = modgcd_bivariate(f, g) + >>> h, cff, cfg + (x + 2, x*y - x - 2*y + 2, 1) + + >>> cff * h == f + True + >>> cfg * h == g + True + + References + ========== + + 1. [Monagan00]_ + + """ + assert f.ring == g.ring and f.ring.domain.is_ZZ + + result = _trivial_gcd(f, g) + if result is not None: + return result + + ring = f.ring + + cf, f = f.primitive() + cg, g = g.primitive() + ch = ring.domain.gcd(cf, cg) + + xbound, ycontbound = _degree_bound_bivariate(f, g) + if xbound == ycontbound == 0: + return ring(ch), f.mul_ground(cf // ch), g.mul_ground(cg // ch) + + fswap = _swap(f, 1) + gswap = _swap(g, 1) + degyf = fswap.degree() + degyg = gswap.degree() + + ybound, xcontbound = _degree_bound_bivariate(fswap, gswap) + if ybound == xcontbound == 0: + return ring(ch), f.mul_ground(cf // ch), g.mul_ground(cg // ch) + + # TODO: to improve performance, choose the main variable here + + gamma1 = ring.domain.gcd(f.LC, g.LC) + gamma2 = ring.domain.gcd(fswap.LC, gswap.LC) + badprimes = gamma1 * gamma2 + m = 1 + p = 1 + + while True: + p = nextprime(p) + while badprimes % p == 0: + p = nextprime(p) + + fp = f.trunc_ground(p) + gp = g.trunc_ground(p) + contfp, fp = _primitive(fp, p) + contgp, gp = _primitive(gp, p) + conthp = _gf_gcd(contfp, contgp, p) # monic polynomial in Z_p[y] + degconthp = conthp.degree() + + if degconthp > ycontbound: + continue + elif degconthp < ycontbound: + m = 1 + ycontbound = degconthp + continue + + # polynomial in Z_p[y] + delta = _gf_gcd(_LC(fp), _LC(gp), p) + + degcontfp = contfp.degree() + degcontgp = contgp.degree() + degdelta = delta.degree() + + N = min(degyf - degcontfp, degyg - degcontgp, + ybound - ycontbound + degdelta) + 1 + + if p < N: + continue + + n = 0 + evalpoints = [] + hpeval = [] + unlucky = False + + for a in range(p): + deltaa = delta.evaluate(0, a) + if not deltaa % p: + continue + + fpa = fp.evaluate(1, a).trunc_ground(p) + gpa = gp.evaluate(1, a).trunc_ground(p) + hpa = _gf_gcd(fpa, gpa, p) # monic polynomial in Z_p[x] + deghpa = hpa.degree() + + if deghpa > xbound: + continue + elif deghpa < xbound: + m = 1 + xbound = deghpa + unlucky = True + break + + hpa = hpa.mul_ground(deltaa).trunc_ground(p) + evalpoints.append(a) + hpeval.append(hpa) + n += 1 + + if n == N: + break + + if unlucky: + continue + if n < N: + continue + + hp = _interpolate_multivariate(evalpoints, hpeval, ring, 1, p) + + hp = _primitive(hp, p)[1] + hp = hp * conthp.set_ring(ring) + degyhp = hp.degree(1) + + if degyhp > ybound: + continue + if degyhp < ybound: + m = 1 + ybound = degyhp + continue + + hp = hp.mul_ground(gamma1).trunc_ground(p) + if m == 1: + m = p + hlastm = hp + continue + + hm = _chinese_remainder_reconstruction_multivariate(hp, hlastm, p, m) + m *= p + + if not hm == hlastm: + hlastm = hm + continue + + h = hm.quo_ground(hm.content()) + fquo, frem = f.div(h) + gquo, grem = g.div(h) + if not frem and not grem: + if h.LC < 0: + ch = -ch + h = h.mul_ground(ch) + cff = fquo.mul_ground(cf // ch) + cfg = gquo.mul_ground(cg // ch) + return h, cff, cfg + + +def _modgcd_multivariate_p(f, g, p, degbound, contbound): + r""" + Compute the GCD of two polynomials in + `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]`. + + The algorithm reduces the problem step by step by evaluating the + polynomials `f` and `g` at `x_{k-1} = a` for suitable + `a \in \mathbb{Z}_p` and then calls itself recursively to compute the GCD + in `\mathbb{Z}_p[x_0, \ldots, x_{k-2}]`. If these recursive calls are + successful for enough evaluation points, the GCD in `k` variables is + interpolated, otherwise the algorithm returns ``None``. Every time a GCD + or a content is computed, their degrees are compared with the bounds. If + a degree greater then the bound is encountered, then the current call + returns ``None`` and a new evaluation point has to be chosen. If at some + point the degree is smaller, the correspondent bound is updated and the + algorithm fails. + + Parameters + ========== + + f : PolyElement + multivariate integer polynomial with coefficients in `\mathbb{Z}_p` + g : PolyElement + multivariate integer polynomial with coefficients in `\mathbb{Z}_p` + p : Integer + prime number, modulus of `f` and `g` + degbound : list of Integer objects + ``degbound[i]`` is an upper bound for the degree of the GCD of `f` + and `g` in the variable `x_i` + contbound : list of Integer objects + ``contbound[i]`` is an upper bound for the degree of the content of + the GCD in `\mathbb{Z}_p[x_i][x_0, \ldots, x_{i-1}]`, + ``contbound[0]`` is not used can therefore be chosen + arbitrarily. + + Returns + ======= + + h : PolyElement + GCD of the polynomials `f` and `g` or ``None`` + + References + ========== + + 1. [Monagan00]_ + 2. [Brown71]_ + + """ + ring = f.ring + k = ring.ngens + + if k == 1: + h = _gf_gcd(f, g, p).trunc_ground(p) + degh = h.degree() + + if degh > degbound[0]: + return None + if degh < degbound[0]: + degbound[0] = degh + raise ModularGCDFailed + + return h + + degyf = f.degree(k-1) + degyg = g.degree(k-1) + + contf, f = _primitive(f, p) + contg, g = _primitive(g, p) + + conth = _gf_gcd(contf, contg, p) # polynomial in Z_p[y] + + degcontf = contf.degree() + degcontg = contg.degree() + degconth = conth.degree() + + if degconth > contbound[k-1]: + return None + if degconth < contbound[k-1]: + contbound[k-1] = degconth + raise ModularGCDFailed + + lcf = _LC(f) + lcg = _LC(g) + + delta = _gf_gcd(lcf, lcg, p) # polynomial in Z_p[y] + + evaltest = delta + + for i in range(k-1): + evaltest *= _gf_gcd(_LC(_swap(f, i)), _LC(_swap(g, i)), p) + + degdelta = delta.degree() + + N = min(degyf - degcontf, degyg - degcontg, + degbound[k-1] - contbound[k-1] + degdelta) + 1 + + if p < N: + return None + + n = 0 + d = 0 + evalpoints = [] + heval = [] + points = list(range(p)) + + while points: + a = random.sample(points, 1)[0] + points.remove(a) + + if not evaltest.evaluate(0, a) % p: + continue + + deltaa = delta.evaluate(0, a) % p + + fa = f.evaluate(k-1, a).trunc_ground(p) + ga = g.evaluate(k-1, a).trunc_ground(p) + + # polynomials in Z_p[x_0, ..., x_{k-2}] + ha = _modgcd_multivariate_p(fa, ga, p, degbound, contbound) + + if ha is None: + d += 1 + if d > n: + return None + continue + + if ha.is_ground: + h = conth.set_ring(ring).trunc_ground(p) + return h + + ha = ha.mul_ground(deltaa).trunc_ground(p) + + evalpoints.append(a) + heval.append(ha) + n += 1 + + if n == N: + h = _interpolate_multivariate(evalpoints, heval, ring, k-1, p) + + h = _primitive(h, p)[1] * conth.set_ring(ring) + degyh = h.degree(k-1) + + if degyh > degbound[k-1]: + return None + if degyh < degbound[k-1]: + degbound[k-1] = degyh + raise ModularGCDFailed + + return h + + return None + + +def modgcd_multivariate(f, g): + r""" + Compute the GCD of two polynomials in `\mathbb{Z}[x_0, \ldots, x_{k-1}]` + using a modular algorithm. + + The algorithm computes the GCD of two multivariate integer polynomials + `f` and `g` by calculating the GCD in + `\mathbb{Z}_p[x_0, \ldots, x_{k-1}]` for suitable primes `p` and then + reconstructing the coefficients with the Chinese Remainder Theorem. To + compute the multivariate GCD over `\mathbb{Z}_p` the recursive + subroutine :func:`_modgcd_multivariate_p` is used. To verify the result in + `\mathbb{Z}[x_0, \ldots, x_{k-1}]`, trial division is done, but only for + candidates which are very likely the desired GCD. + + Parameters + ========== + + f : PolyElement + multivariate integer polynomial + g : PolyElement + multivariate integer polynomial + + Returns + ======= + + h : PolyElement + GCD of the polynomials `f` and `g` + cff : PolyElement + cofactor of `f`, i.e. `\frac{f}{h}` + cfg : PolyElement + cofactor of `g`, i.e. `\frac{g}{h}` + + Examples + ======== + + >>> from sympy.polys.modulargcd import modgcd_multivariate + >>> from sympy.polys import ring, ZZ + + >>> R, x, y = ring("x, y", ZZ) + + >>> f = x**2 - y**2 + >>> g = x**2 + 2*x*y + y**2 + + >>> h, cff, cfg = modgcd_multivariate(f, g) + >>> h, cff, cfg + (x + y, x - y, x + y) + + >>> cff * h == f + True + >>> cfg * h == g + True + + >>> R, x, y, z = ring("x, y, z", ZZ) + + >>> f = x*z**2 - y*z**2 + >>> g = x**2*z + z + + >>> h, cff, cfg = modgcd_multivariate(f, g) + >>> h, cff, cfg + (z, x*z - y*z, x**2 + 1) + + >>> cff * h == f + True + >>> cfg * h == g + True + + References + ========== + + 1. [Monagan00]_ + 2. [Brown71]_ + + See also + ======== + + _modgcd_multivariate_p + + """ + assert f.ring == g.ring and f.ring.domain.is_ZZ + + result = _trivial_gcd(f, g) + if result is not None: + return result + + ring = f.ring + k = ring.ngens + + # divide out integer content + cf, f = f.primitive() + cg, g = g.primitive() + ch = ring.domain.gcd(cf, cg) + + gamma = ring.domain.gcd(f.LC, g.LC) + + badprimes = ring.domain.one + for i in range(k): + badprimes *= ring.domain.gcd(_swap(f, i).LC, _swap(g, i).LC) + + degbound = [min(fdeg, gdeg) for fdeg, gdeg in zip(f.degrees(), g.degrees())] + contbound = list(degbound) + + m = 1 + p = 1 + + while True: + p = nextprime(p) + while badprimes % p == 0: + p = nextprime(p) + + fp = f.trunc_ground(p) + gp = g.trunc_ground(p) + + try: + # monic GCD of fp, gp in Z_p[x_0, ..., x_{k-2}, y] + hp = _modgcd_multivariate_p(fp, gp, p, degbound, contbound) + except ModularGCDFailed: + m = 1 + continue + + if hp is None: + continue + + hp = hp.mul_ground(gamma).trunc_ground(p) + if m == 1: + m = p + hlastm = hp + continue + + hm = _chinese_remainder_reconstruction_multivariate(hp, hlastm, p, m) + m *= p + + if not hm == hlastm: + hlastm = hm + continue + + h = hm.primitive()[1] + fquo, frem = f.div(h) + gquo, grem = g.div(h) + if not frem and not grem: + if h.LC < 0: + ch = -ch + h = h.mul_ground(ch) + cff = fquo.mul_ground(cf // ch) + cfg = gquo.mul_ground(cg // ch) + return h, cff, cfg + + +def _gf_div(f, g, p): + r""" + Compute `\frac f g` modulo `p` for two univariate polynomials over + `\mathbb Z_p`. + """ + ring = f.ring + densequo, denserem = gf_div(f.to_dense(), g.to_dense(), p, ring.domain) + return ring.from_dense(densequo), ring.from_dense(denserem) + + +def _rational_function_reconstruction(c, p, m): + r""" + Reconstruct a rational function `\frac a b` in `\mathbb Z_p(t)` from + + .. math:: + + c = \frac a b \; \mathrm{mod} \, m, + + where `c` and `m` are polynomials in `\mathbb Z_p[t]` and `m` has + positive degree. + + The algorithm is based on the Euclidean Algorithm. In general, `m` is + not irreducible, so it is possible that `b` is not invertible modulo + `m`. In that case ``None`` is returned. + + Parameters + ========== + + c : PolyElement + univariate polynomial in `\mathbb Z[t]` + p : Integer + prime number + m : PolyElement + modulus, not necessarily irreducible + + Returns + ======= + + frac : FracElement + either `\frac a b` in `\mathbb Z(t)` or ``None`` + + References + ========== + + 1. [Hoeij04]_ + + """ + ring = c.ring + domain = ring.domain + M = m.degree() + N = M // 2 + D = M - N - 1 + + r0, s0 = m, ring.zero + r1, s1 = c, ring.one + + while r1.degree() > N: + quo = _gf_div(r0, r1, p)[0] + r0, r1 = r1, (r0 - quo*r1).trunc_ground(p) + s0, s1 = s1, (s0 - quo*s1).trunc_ground(p) + + a, b = r1, s1 + if b.degree() > D or _gf_gcd(b, m, p) != 1: + return None + + lc = b.LC + if lc != 1: + lcinv = domain.invert(lc, p) + a = a.mul_ground(lcinv).trunc_ground(p) + b = b.mul_ground(lcinv).trunc_ground(p) + + field = ring.to_field() + + return field(a) / field(b) + + +def _rational_reconstruction_func_coeffs(hm, p, m, ring, k): + r""" + Reconstruct every coefficient `c_h` of a polynomial `h` in + `\mathbb Z_p(t_k)[t_1, \ldots, t_{k-1}][x, z]` from the corresponding + coefficient `c_{h_m}` of a polynomial `h_m` in + `\mathbb Z_p[t_1, \ldots, t_k][x, z] \cong \mathbb Z_p[t_k][t_1, \ldots, t_{k-1}][x, z]` + such that + + .. math:: + + c_{h_m} = c_h \; \mathrm{mod} \, m, + + where `m \in \mathbb Z_p[t]`. + + The reconstruction is based on the Euclidean Algorithm. In general, `m` + is not irreducible, so it is possible that this fails for some + coefficient. In that case ``None`` is returned. + + Parameters + ========== + + hm : PolyElement + polynomial in `\mathbb Z[t_1, \ldots, t_k][x, z]` + p : Integer + prime number, modulus of `\mathbb Z_p` + m : PolyElement + modulus, polynomial in `\mathbb Z[t]`, not necessarily irreducible + ring : PolyRing + `\mathbb Z(t_k)[t_1, \ldots, t_{k-1}][x, z]`, `h` will be an + element of this ring + k : Integer + index of the parameter `t_k` which will be reconstructed + + Returns + ======= + + h : PolyElement + reconstructed polynomial in + `\mathbb Z(t_k)[t_1, \ldots, t_{k-1}][x, z]` or ``None`` + + See also + ======== + + _rational_function_reconstruction + + """ + h = ring.zero + + for monom, coeff in hm.iterterms(): + if k == 0: + coeffh = _rational_function_reconstruction(coeff, p, m) + + if not coeffh: + return None + + else: + coeffh = ring.domain.zero + for mon, c in coeff.drop_to_ground(k).iterterms(): + ch = _rational_function_reconstruction(c, p, m) + + if not ch: + return None + + coeffh[mon] = ch + + h[monom] = coeffh + + return h + + +def _gf_gcdex(f, g, p): + r""" + Extended Euclidean Algorithm for two univariate polynomials over + `\mathbb Z_p`. + + Returns polynomials `s, t` and `h`, such that `h` is the GCD of `f` and + `g` and `sf + tg = h \; \mathrm{mod} \, p`. + + """ + ring = f.ring + s, t, h = gf_gcdex(f.to_dense(), g.to_dense(), p, ring.domain) + return ring.from_dense(s), ring.from_dense(t), ring.from_dense(h) + + +def _trunc(f, minpoly, p): + r""" + Compute the reduced representation of a polynomial `f` in + `\mathbb Z_p[z] / (\check m_{\alpha}(z))[x]` + + Parameters + ========== + + f : PolyElement + polynomial in `\mathbb Z[x, z]` + minpoly : PolyElement + polynomial `\check m_{\alpha} \in \mathbb Z[z]`, not necessarily + irreducible + p : Integer + prime number, modulus of `\mathbb Z_p` + + Returns + ======= + + ftrunc : PolyElement + polynomial in `\mathbb Z[x, z]`, reduced modulo + `\check m_{\alpha}(z)` and `p` + + """ + ring = f.ring + minpoly = minpoly.set_ring(ring) + p_ = ring.ground_new(p) + + return f.trunc_ground(p).rem([minpoly, p_]).trunc_ground(p) + + +def _euclidean_algorithm(f, g, minpoly, p): + r""" + Compute the monic GCD of two univariate polynomials in + `\mathbb{Z}_p[z]/(\check m_{\alpha}(z))[x]` with the Euclidean + Algorithm. + + In general, `\check m_{\alpha}(z)` is not irreducible, so it is possible + that some leading coefficient is not invertible modulo + `\check m_{\alpha}(z)`. In that case ``None`` is returned. + + Parameters + ========== + + f, g : PolyElement + polynomials in `\mathbb Z[x, z]` + minpoly : PolyElement + polynomial in `\mathbb Z[z]`, not necessarily irreducible + p : Integer + prime number, modulus of `\mathbb Z_p` + + Returns + ======= + + h : PolyElement + GCD of `f` and `g` in `\mathbb Z[z, x]` or ``None``, coefficients + are in `\left[ -\frac{p-1} 2, \frac{p-1} 2 \right]` + + """ + ring = f.ring + + f = _trunc(f, minpoly, p) + g = _trunc(g, minpoly, p) + + while g: + rem = f + deg = g.degree(0) # degree in x + lcinv, _, gcd = _gf_gcdex(ring.dmp_LC(g), minpoly, p) + + if not gcd == 1: + return None + + while True: + degrem = rem.degree(0) # degree in x + if degrem < deg: + break + quo = (lcinv * ring.dmp_LC(rem)).set_ring(ring) + rem = _trunc(rem - g.mul_monom((degrem - deg, 0))*quo, minpoly, p) + + f = g + g = rem + + lcfinv = _gf_gcdex(ring.dmp_LC(f), minpoly, p)[0].set_ring(ring) + + return _trunc(f * lcfinv, minpoly, p) + + +def _trial_division(f, h, minpoly, p=None): + r""" + Check if `h` divides `f` in + `\mathbb K[t_1, \ldots, t_k][z]/(m_{\alpha}(z))`, where `\mathbb K` is + either `\mathbb Q` or `\mathbb Z_p`. + + This algorithm is based on pseudo division and does not use any + fractions. By default `\mathbb K` is `\mathbb Q`, if a prime number `p` + is given, `\mathbb Z_p` is chosen instead. + + Parameters + ========== + + f, h : PolyElement + polynomials in `\mathbb Z[t_1, \ldots, t_k][x, z]` + minpoly : PolyElement + polynomial `m_{\alpha}(z)` in `\mathbb Z[t_1, \ldots, t_k][z]` + p : Integer or None + if `p` is given, `\mathbb K` is set to `\mathbb Z_p` instead of + `\mathbb Q`, default is ``None`` + + Returns + ======= + + rem : PolyElement + remainder of `\frac f h` + + References + ========== + + .. [1] [Hoeij02]_ + + """ + ring = f.ring + + zxring = ring.clone(symbols=(ring.symbols[1], ring.symbols[0])) + + minpoly = minpoly.set_ring(ring) + + rem = f + + degrem = rem.degree() + degh = h.degree() + degm = minpoly.degree(1) + + lch = _LC(h).set_ring(ring) + lcm = minpoly.LC + + while rem and degrem >= degh: + # polynomial in Z[t_1, ..., t_k][z] + lcrem = _LC(rem).set_ring(ring) + rem = rem*lch - h.mul_monom((degrem - degh, 0))*lcrem + if p: + rem = rem.trunc_ground(p) + degrem = rem.degree(1) + + while rem and degrem >= degm: + # polynomial in Z[t_1, ..., t_k][x] + lcrem = _LC(rem.set_ring(zxring)).set_ring(ring) + rem = rem.mul_ground(lcm) - minpoly.mul_monom((0, degrem - degm))*lcrem + if p: + rem = rem.trunc_ground(p) + degrem = rem.degree(1) + + degrem = rem.degree() + + return rem + + +def _evaluate_ground(f, i, a): + r""" + Evaluate a polynomial `f` at `a` in the `i`-th variable of the ground + domain. + """ + ring = f.ring.clone(domain=f.ring.domain.ring.drop(i)) + fa = ring.zero + + for monom, coeff in f.iterterms(): + fa[monom] = coeff.evaluate(i, a) + + return fa + + +def _func_field_modgcd_p(f, g, minpoly, p): + r""" + Compute the GCD of two polynomials `f` and `g` in + `\mathbb Z_p(t_1, \ldots, t_k)[z]/(\check m_\alpha(z))[x]`. + + The algorithm reduces the problem step by step by evaluating the + polynomials `f` and `g` at `t_k = a` for suitable `a \in \mathbb Z_p` + and then calls itself recursively to compute the GCD in + `\mathbb Z_p(t_1, \ldots, t_{k-1})[z]/(\check m_\alpha(z))[x]`. If these + recursive calls are successful, the GCD over `k` variables is + interpolated, otherwise the algorithm returns ``None``. After + interpolation, Rational Function Reconstruction is used to obtain the + correct coefficients. If this fails, a new evaluation point has to be + chosen, otherwise the desired polynomial is obtained by clearing + denominators. The result is verified with a fraction free trial + division. + + Parameters + ========== + + f, g : PolyElement + polynomials in `\mathbb Z[t_1, \ldots, t_k][x, z]` + minpoly : PolyElement + polynomial in `\mathbb Z[t_1, \ldots, t_k][z]`, not necessarily + irreducible + p : Integer + prime number, modulus of `\mathbb Z_p` + + Returns + ======= + + h : PolyElement + primitive associate in `\mathbb Z[t_1, \ldots, t_k][x, z]` of the + GCD of the polynomials `f` and `g` or ``None``, coefficients are + in `\left[ -\frac{p-1} 2, \frac{p-1} 2 \right]` + + References + ========== + + 1. [Hoeij04]_ + + """ + ring = f.ring + domain = ring.domain # Z[t_1, ..., t_k] + + if isinstance(domain, PolynomialRing): + k = domain.ngens + else: + return _euclidean_algorithm(f, g, minpoly, p) + + if k == 1: + qdomain = domain.ring.to_field() + else: + qdomain = domain.ring.drop_to_ground(k - 1) + qdomain = qdomain.clone(domain=qdomain.domain.ring.to_field()) + + qring = ring.clone(domain=qdomain) # = Z(t_k)[t_1, ..., t_{k-1}][x, z] + + n = 1 + d = 1 + + # polynomial in Z_p[t_1, ..., t_k][z] + gamma = ring.dmp_LC(f) * ring.dmp_LC(g) + # polynomial in Z_p[t_1, ..., t_k] + delta = minpoly.LC + + evalpoints = [] + heval = [] + LMlist = [] + points = list(range(p)) + + while points: + a = random.sample(points, 1)[0] + points.remove(a) + + if k == 1: + test = delta.evaluate(k-1, a) % p == 0 + else: + test = delta.evaluate(k-1, a).trunc_ground(p) == 0 + + if test: + continue + + gammaa = _evaluate_ground(gamma, k-1, a) + minpolya = _evaluate_ground(minpoly, k-1, a) + + if gammaa.rem([minpolya, gammaa.ring(p)]) == 0: + continue + + fa = _evaluate_ground(f, k-1, a) + ga = _evaluate_ground(g, k-1, a) + + # polynomial in Z_p[x, t_1, ..., t_{k-1}, z]/(minpoly) + ha = _func_field_modgcd_p(fa, ga, minpolya, p) + + if ha is None: + d += 1 + if d > n: + return None + continue + + if ha == 1: + return ha + + LM = [ha.degree()] + [0]*(k-1) + if k > 1: + for monom, coeff in ha.iterterms(): + if monom[0] == LM[0] and coeff.LM > tuple(LM[1:]): + LM[1:] = coeff.LM + + evalpoints_a = [a] + heval_a = [ha] + if k == 1: + m = qring.domain.get_ring().one + else: + m = qring.domain.domain.get_ring().one + + t = m.ring.gens[0] + + for b, hb, LMhb in zip(evalpoints, heval, LMlist): + if LMhb == LM: + evalpoints_a.append(b) + heval_a.append(hb) + m *= (t - b) + + m = m.trunc_ground(p) + evalpoints.append(a) + heval.append(ha) + LMlist.append(LM) + n += 1 + + # polynomial in Z_p[t_1, ..., t_k][x, z] + h = _interpolate_multivariate(evalpoints_a, heval_a, ring, k-1, p, ground=True) + + # polynomial in Z_p(t_k)[t_1, ..., t_{k-1}][x, z] + h = _rational_reconstruction_func_coeffs(h, p, m, qring, k-1) + + if h is None: + continue + + if k == 1: + dom = qring.domain.field + den = dom.ring.one + + for coeff in h.itercoeffs(): + den = dom.ring.from_dense(gf_lcm(den.to_dense(), coeff.denom.to_dense(), + p, dom.domain)) + + else: + dom = qring.domain.domain.field + den = dom.ring.one + + for coeff in h.itercoeffs(): + for c in coeff.itercoeffs(): + den = dom.ring.from_dense(gf_lcm(den.to_dense(), c.denom.to_dense(), + p, dom.domain)) + + den = qring.domain_new(den.trunc_ground(p)) + h = ring(h.mul_ground(den).as_expr()).trunc_ground(p) + + if not _trial_division(f, h, minpoly, p) and not _trial_division(g, h, minpoly, p): + return h + + return None + + +def _integer_rational_reconstruction(c, m, domain): + r""" + Reconstruct a rational number `\frac a b` from + + .. math:: + + c = \frac a b \; \mathrm{mod} \, m, + + where `c` and `m` are integers. + + The algorithm is based on the Euclidean Algorithm. In general, `m` is + not a prime number, so it is possible that `b` is not invertible modulo + `m`. In that case ``None`` is returned. + + Parameters + ========== + + c : Integer + `c = \frac a b \; \mathrm{mod} \, m` + m : Integer + modulus, not necessarily prime + domain : IntegerRing + `a, b, c` are elements of ``domain`` + + Returns + ======= + + frac : Rational + either `\frac a b` in `\mathbb Q` or ``None`` + + References + ========== + + 1. [Wang81]_ + + """ + if c < 0: + c += m + + r0, s0 = m, domain.zero + r1, s1 = c, domain.one + + bound = sqrt(m / 2) # still correct if replaced by ZZ.sqrt(m // 2) ? + + while int(r1) >= bound: + quo = r0 // r1 + r0, r1 = r1, r0 - quo*r1 + s0, s1 = s1, s0 - quo*s1 + + if abs(int(s1)) >= bound: + return None + + if s1 < 0: + a, b = -r1, -s1 + elif s1 > 0: + a, b = r1, s1 + else: + return None + + field = domain.get_field() + + return field(a) / field(b) + + +def _rational_reconstruction_int_coeffs(hm, m, ring): + r""" + Reconstruct every rational coefficient `c_h` of a polynomial `h` in + `\mathbb Q[t_1, \ldots, t_k][x, z]` from the corresponding integer + coefficient `c_{h_m}` of a polynomial `h_m` in + `\mathbb Z[t_1, \ldots, t_k][x, z]` such that + + .. math:: + + c_{h_m} = c_h \; \mathrm{mod} \, m, + + where `m \in \mathbb Z`. + + The reconstruction is based on the Euclidean Algorithm. In general, + `m` is not a prime number, so it is possible that this fails for some + coefficient. In that case ``None`` is returned. + + Parameters + ========== + + hm : PolyElement + polynomial in `\mathbb Z[t_1, \ldots, t_k][x, z]` + m : Integer + modulus, not necessarily prime + ring : PolyRing + `\mathbb Q[t_1, \ldots, t_k][x, z]`, `h` will be an element of this + ring + + Returns + ======= + + h : PolyElement + reconstructed polynomial in `\mathbb Q[t_1, \ldots, t_k][x, z]` or + ``None`` + + See also + ======== + + _integer_rational_reconstruction + + """ + h = ring.zero + + if isinstance(ring.domain, PolynomialRing): + reconstruction = _rational_reconstruction_int_coeffs + domain = ring.domain.ring + else: + reconstruction = _integer_rational_reconstruction + domain = hm.ring.domain + + for monom, coeff in hm.iterterms(): + coeffh = reconstruction(coeff, m, domain) + + if not coeffh: + return None + + h[monom] = coeffh + + return h + + +def _func_field_modgcd_m(f, g, minpoly): + r""" + Compute the GCD of two polynomials in + `\mathbb Q(t_1, \ldots, t_k)[z]/(m_{\alpha}(z))[x]` using a modular + algorithm. + + The algorithm computes the GCD of two polynomials `f` and `g` by + calculating the GCD in + `\mathbb Z_p(t_1, \ldots, t_k)[z] / (\check m_{\alpha}(z))[x]` for + suitable primes `p` and the primitive associate `\check m_{\alpha}(z)` + of `m_{\alpha}(z)`. Then the coefficients are reconstructed with the + Chinese Remainder Theorem and Rational Reconstruction. To compute the + GCD over `\mathbb Z_p(t_1, \ldots, t_k)[z] / (\check m_{\alpha})[x]`, + the recursive subroutine ``_func_field_modgcd_p`` is used. To verify the + result in `\mathbb Q(t_1, \ldots, t_k)[z] / (m_{\alpha}(z))[x]`, a + fraction free trial division is used. + + Parameters + ========== + + f, g : PolyElement + polynomials in `\mathbb Z[t_1, \ldots, t_k][x, z]` + minpoly : PolyElement + irreducible polynomial in `\mathbb Z[t_1, \ldots, t_k][z]` + + Returns + ======= + + h : PolyElement + the primitive associate in `\mathbb Z[t_1, \ldots, t_k][x, z]` of + the GCD of `f` and `g` + + Examples + ======== + + >>> from sympy.polys.modulargcd import _func_field_modgcd_m + >>> from sympy.polys import ring, ZZ + + >>> R, x, z = ring('x, z', ZZ) + >>> minpoly = (z**2 - 2).drop(0) + + >>> f = x**2 + 2*x*z + 2 + >>> g = x + z + >>> _func_field_modgcd_m(f, g, minpoly) + x + z + + >>> D, t = ring('t', ZZ) + >>> R, x, z = ring('x, z', D) + >>> minpoly = (z**2-3).drop(0) + + >>> f = x**2 + (t + 1)*x*z + 3*t + >>> g = x*z + 3*t + >>> _func_field_modgcd_m(f, g, minpoly) + x + t*z + + References + ========== + + 1. [Hoeij04]_ + + See also + ======== + + _func_field_modgcd_p + + """ + ring = f.ring + domain = ring.domain + + if isinstance(domain, PolynomialRing): + k = domain.ngens + QQdomain = domain.ring.clone(domain=domain.domain.get_field()) + QQring = ring.clone(domain=QQdomain) + else: + k = 0 + QQring = ring.clone(domain=ring.domain.get_field()) + + cf, f = f.primitive() + cg, g = g.primitive() + + # polynomial in Z[t_1, ..., t_k][z] + gamma = ring.dmp_LC(f) * ring.dmp_LC(g) + # polynomial in Z[t_1, ..., t_k] + delta = minpoly.LC + + p = 1 + primes = [] + hplist = [] + LMlist = [] + + while True: + p = nextprime(p) + + if gamma.trunc_ground(p) == 0: + continue + + if k == 0: + test = (delta % p == 0) + else: + test = (delta.trunc_ground(p) == 0) + + if test: + continue + + fp = f.trunc_ground(p) + gp = g.trunc_ground(p) + minpolyp = minpoly.trunc_ground(p) + + hp = _func_field_modgcd_p(fp, gp, minpolyp, p) + + if hp is None: + continue + + if hp == 1: + return ring.one + + LM = [hp.degree()] + [0]*k + if k > 0: + for monom, coeff in hp.iterterms(): + if monom[0] == LM[0] and coeff.LM > tuple(LM[1:]): + LM[1:] = coeff.LM + + hm = hp + m = p + + for q, hq, LMhq in zip(primes, hplist, LMlist): + if LMhq == LM: + hm = _chinese_remainder_reconstruction_multivariate(hq, hm, q, m) + m *= q + + primes.append(p) + hplist.append(hp) + LMlist.append(LM) + + hm = _rational_reconstruction_int_coeffs(hm, m, QQring) + + if hm is None: + continue + + if k == 0: + h = hm.clear_denoms()[1] + else: + den = domain.domain.one + for coeff in hm.itercoeffs(): + den = domain.domain.lcm(den, coeff.clear_denoms()[0]) + h = hm.mul_ground(den) + + # convert back to Z[t_1, ..., t_k][x, z] from Q[t_1, ..., t_k][x, z] + h = h.set_ring(ring) + h = h.primitive()[1] + + if not (_trial_division(f.mul_ground(cf), h, minpoly) or + _trial_division(g.mul_ground(cg), h, minpoly)): + return h + + +def _to_ZZ_poly(f, ring): + r""" + Compute an associate of a polynomial + `f \in \mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` in + `\mathbb Z[x_1, \ldots, x_{n-1}][z] / (\check m_{\alpha}(z))[x_0]`, + where `\check m_{\alpha}(z) \in \mathbb Z[z]` is the primitive associate + of the minimal polynomial `m_{\alpha}(z)` of `\alpha` over + `\mathbb Q`. + + Parameters + ========== + + f : PolyElement + polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` + ring : PolyRing + `\mathbb Z[x_1, \ldots, x_{n-1}][x_0, z]` + + Returns + ======= + + f_ : PolyElement + associate of `f` in + `\mathbb Z[x_1, \ldots, x_{n-1}][x_0, z]` + + """ + f_ = ring.zero + + if isinstance(ring.domain, PolynomialRing): + domain = ring.domain.domain + else: + domain = ring.domain + + den = domain.one + + for coeff in f.itercoeffs(): + for c in coeff.to_list(): + if c: + den = domain.lcm(den, c.denominator) + + for monom, coeff in f.iterterms(): + coeff = coeff.to_list() + m = ring.domain.one + if isinstance(ring.domain, PolynomialRing): + m = m.mul_monom(monom[1:]) + n = len(coeff) + + for i in range(n): + if coeff[i]: + c = domain.convert(coeff[i] * den) * m + + if (monom[0], n-i-1) not in f_: + f_[(monom[0], n-i-1)] = c + else: + f_[(monom[0], n-i-1)] += c + + return f_ + + +def _to_ANP_poly(f, ring): + r""" + Convert a polynomial + `f \in \mathbb Z[x_1, \ldots, x_{n-1}][z]/(\check m_{\alpha}(z))[x_0]` + to a polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]`, + where `\check m_{\alpha}(z) \in \mathbb Z[z]` is the primitive associate + of the minimal polynomial `m_{\alpha}(z)` of `\alpha` over + `\mathbb Q`. + + Parameters + ========== + + f : PolyElement + polynomial in `\mathbb Z[x_1, \ldots, x_{n-1}][x_0, z]` + ring : PolyRing + `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` + + Returns + ======= + + f_ : PolyElement + polynomial in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` + + """ + domain = ring.domain + f_ = ring.zero + + if isinstance(f.ring.domain, PolynomialRing): + for monom, coeff in f.iterterms(): + for mon, coef in coeff.iterterms(): + m = (monom[0],) + mon + c = domain([domain.domain(coef)] + [0]*monom[1]) + + if m not in f_: + f_[m] = c + else: + f_[m] += c + + else: + for monom, coeff in f.iterterms(): + m = (monom[0],) + c = domain([domain.domain(coeff)] + [0]*monom[1]) + + if m not in f_: + f_[m] = c + else: + f_[m] += c + + return f_ + + +def _minpoly_from_dense(minpoly, ring): + r""" + Change representation of the minimal polynomial from ``DMP`` to + ``PolyElement`` for a given ring. + """ + minpoly_ = ring.zero + + for monom, coeff in minpoly.terms(): + minpoly_[monom] = ring.domain(coeff) + + return minpoly_ + + +def _primitive_in_x0(f): + r""" + Compute the content in `x_0` and the primitive part of a polynomial `f` + in + `\mathbb Q(\alpha)[x_0, x_1, \ldots, x_{n-1}] \cong \mathbb Q(\alpha)[x_1, \ldots, x_{n-1}][x_0]`. + """ + fring = f.ring + ring = fring.drop_to_ground(*range(1, fring.ngens)) + dom = ring.domain.ring + f_ = ring(f.as_expr()) + cont = dom.zero + + for coeff in f_.itercoeffs(): + cont = func_field_modgcd(cont, coeff)[0] + if cont == dom.one: + return cont, f + + return cont, f.quo(cont.set_ring(fring)) + + +# TODO: add support for algebraic function fields +def func_field_modgcd(f, g): + r""" + Compute the GCD of two polynomials `f` and `g` in + `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` using a modular algorithm. + + The algorithm first computes the primitive associate + `\check m_{\alpha}(z)` of the minimal polynomial `m_{\alpha}` in + `\mathbb{Z}[z]` and the primitive associates of `f` and `g` in + `\mathbb{Z}[x_1, \ldots, x_{n-1}][z]/(\check m_{\alpha})[x_0]`. Then it + computes the GCD in + `\mathbb Q(x_1, \ldots, x_{n-1})[z]/(m_{\alpha}(z))[x_0]`. + This is done by calculating the GCD in + `\mathbb{Z}_p(x_1, \ldots, x_{n-1})[z]/(\check m_{\alpha}(z))[x_0]` for + suitable primes `p` and then reconstructing the coefficients with the + Chinese Remainder Theorem and Rational Reconstuction. The GCD over + `\mathbb{Z}_p(x_1, \ldots, x_{n-1})[z]/(\check m_{\alpha}(z))[x_0]` is + computed with a recursive subroutine, which evaluates the polynomials at + `x_{n-1} = a` for suitable evaluation points `a \in \mathbb Z_p` and + then calls itself recursively until the ground domain does no longer + contain any parameters. For + `\mathbb{Z}_p[z]/(\check m_{\alpha}(z))[x_0]` the Euclidean Algorithm is + used. The results of those recursive calls are then interpolated and + Rational Function Reconstruction is used to obtain the correct + coefficients. The results, both in + `\mathbb Q(x_1, \ldots, x_{n-1})[z]/(m_{\alpha}(z))[x_0]` and + `\mathbb{Z}_p(x_1, \ldots, x_{n-1})[z]/(\check m_{\alpha}(z))[x_0]`, are + verified by a fraction free trial division. + + Apart from the above GCD computation some GCDs in + `\mathbb Q(\alpha)[x_1, \ldots, x_{n-1}]` have to be calculated, + because treating the polynomials as univariate ones can result in + a spurious content of the GCD. For this ``func_field_modgcd`` is + called recursively. + + Parameters + ========== + + f, g : PolyElement + polynomials in `\mathbb Q(\alpha)[x_0, \ldots, x_{n-1}]` + + Returns + ======= + + h : PolyElement + monic GCD of the polynomials `f` and `g` + cff : PolyElement + cofactor of `f`, i.e. `\frac f h` + cfg : PolyElement + cofactor of `g`, i.e. `\frac g h` + + Examples + ======== + + >>> from sympy.polys.modulargcd import func_field_modgcd + >>> from sympy.polys import AlgebraicField, QQ, ring + >>> from sympy import sqrt + + >>> A = AlgebraicField(QQ, sqrt(2)) + >>> R, x = ring('x', A) + + >>> f = x**2 - 2 + >>> g = x + sqrt(2) + + >>> h, cff, cfg = func_field_modgcd(f, g) + + >>> h == x + sqrt(2) + True + >>> cff * h == f + True + >>> cfg * h == g + True + + >>> R, x, y = ring('x, y', A) + + >>> f = x**2 + 2*sqrt(2)*x*y + 2*y**2 + >>> g = x + sqrt(2)*y + + >>> h, cff, cfg = func_field_modgcd(f, g) + + >>> h == x + sqrt(2)*y + True + >>> cff * h == f + True + >>> cfg * h == g + True + + >>> f = x + sqrt(2)*y + >>> g = x + y + + >>> h, cff, cfg = func_field_modgcd(f, g) + + >>> h == R.one + True + >>> cff * h == f + True + >>> cfg * h == g + True + + References + ========== + + 1. [Hoeij04]_ + + """ + ring = f.ring + domain = ring.domain + n = ring.ngens + + assert ring == g.ring and domain.is_Algebraic + + result = _trivial_gcd(f, g) + if result is not None: + return result + + z = Dummy('z') + + ZZring = ring.clone(symbols=ring.symbols + (z,), domain=domain.domain.get_ring()) + + if n == 1: + f_ = _to_ZZ_poly(f, ZZring) + g_ = _to_ZZ_poly(g, ZZring) + minpoly = ZZring.drop(0).from_dense(domain.mod.to_list()) + + h = _func_field_modgcd_m(f_, g_, minpoly) + h = _to_ANP_poly(h, ring) + + else: + # contx0f in Q(a)[x_1, ..., x_{n-1}], f in Q(a)[x_0, ..., x_{n-1}] + contx0f, f = _primitive_in_x0(f) + contx0g, g = _primitive_in_x0(g) + contx0h = func_field_modgcd(contx0f, contx0g)[0] + + ZZring_ = ZZring.drop_to_ground(*range(1, n)) + + f_ = _to_ZZ_poly(f, ZZring_) + g_ = _to_ZZ_poly(g, ZZring_) + minpoly = _minpoly_from_dense(domain.mod, ZZring_.drop(0)) + + h = _func_field_modgcd_m(f_, g_, minpoly) + h = _to_ANP_poly(h, ring) + + contx0h_, h = _primitive_in_x0(h) + h *= contx0h.set_ring(ring) + f *= contx0f.set_ring(ring) + g *= contx0g.set_ring(ring) + + h = h.quo_ground(h.LC) + + return h, f.quo(h), g.quo(h) diff --git a/.venv/Lib/site-packages/sympy/polys/monomials.py b/.venv/Lib/site-packages/sympy/polys/monomials.py new file mode 100644 index 0000000000000000000000000000000000000000..f464ba97f137931c29154b1bdcb8ec4f364919f8 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/monomials.py @@ -0,0 +1,622 @@ +"""Tools and arithmetics for monomials of distributed polynomials. """ + + +from itertools import combinations_with_replacement, product +from textwrap import dedent + +from sympy.core import Mul, S, Tuple, sympify +from sympy.polys.polyerrors import ExactQuotientFailed +from sympy.polys.polyutils import PicklableWithSlots, dict_from_expr +from sympy.utilities import public +from sympy.utilities.iterables import is_sequence, iterable + +@public +def itermonomials(variables, max_degrees, min_degrees=None): + r""" + ``max_degrees`` and ``min_degrees`` are either both integers or both lists. + Unless otherwise specified, ``min_degrees`` is either ``0`` or + ``[0, ..., 0]``. + + A generator of all monomials ``monom`` is returned, such that + either + ``min_degree <= total_degree(monom) <= max_degree``, + or + ``min_degrees[i] <= degree_list(monom)[i] <= max_degrees[i]``, + for all ``i``. + + Case I. ``max_degrees`` and ``min_degrees`` are both integers + ============================================================= + + Given a set of variables $V$ and a min_degree $N$ and a max_degree $M$ + generate a set of monomials of degree less than or equal to $N$ and greater + than or equal to $M$. The total number of monomials in commutative + variables is huge and is given by the following formula if $M = 0$: + + .. math:: + \frac{(\#V + N)!}{\#V! N!} + + For example if we would like to generate a dense polynomial of + a total degree $N = 50$ and $M = 0$, which is the worst case, in 5 + variables, assuming that exponents and all of coefficients are 32-bit long + and stored in an array we would need almost 80 GiB of memory! Fortunately + most polynomials, that we will encounter, are sparse. + + Consider monomials in commutative variables $x$ and $y$ + and non-commutative variables $a$ and $b$:: + + >>> from sympy import symbols + >>> from sympy.polys.monomials import itermonomials + >>> from sympy.polys.orderings import monomial_key + >>> from sympy.abc import x, y + + >>> sorted(itermonomials([x, y], 2), key=monomial_key('grlex', [y, x])) + [1, x, y, x**2, x*y, y**2] + + >>> sorted(itermonomials([x, y], 3), key=monomial_key('grlex', [y, x])) + [1, x, y, x**2, x*y, y**2, x**3, x**2*y, x*y**2, y**3] + + >>> a, b = symbols('a, b', commutative=False) + >>> set(itermonomials([a, b, x], 2)) + {1, a, a**2, b, b**2, x, x**2, a*b, b*a, x*a, x*b} + + >>> sorted(itermonomials([x, y], 2, 1), key=monomial_key('grlex', [y, x])) + [x, y, x**2, x*y, y**2] + + Case II. ``max_degrees`` and ``min_degrees`` are both lists + =========================================================== + + If ``max_degrees = [d_1, ..., d_n]`` and + ``min_degrees = [e_1, ..., e_n]``, the number of monomials generated + is: + + .. math:: + (d_1 - e_1 + 1) (d_2 - e_2 + 1) \cdots (d_n - e_n + 1) + + Let us generate all monomials ``monom`` in variables $x$ and $y$ + such that ``[1, 2][i] <= degree_list(monom)[i] <= [2, 4][i]``, + ``i = 0, 1`` :: + + >>> from sympy import symbols + >>> from sympy.polys.monomials import itermonomials + >>> from sympy.polys.orderings import monomial_key + >>> from sympy.abc import x, y + + >>> sorted(itermonomials([x, y], [2, 4], [1, 2]), reverse=True, key=monomial_key('lex', [x, y])) + [x**2*y**4, x**2*y**3, x**2*y**2, x*y**4, x*y**3, x*y**2] + """ + n = len(variables) + if is_sequence(max_degrees): + if len(max_degrees) != n: + raise ValueError('Argument sizes do not match') + if min_degrees is None: + min_degrees = [0]*n + elif not is_sequence(min_degrees): + raise ValueError('min_degrees is not a list') + else: + if len(min_degrees) != n: + raise ValueError('Argument sizes do not match') + if any(i < 0 for i in min_degrees): + raise ValueError("min_degrees cannot contain negative numbers") + total_degree = False + else: + max_degree = max_degrees + if max_degree < 0: + raise ValueError("max_degrees cannot be negative") + if min_degrees is None: + min_degree = 0 + else: + if min_degrees < 0: + raise ValueError("min_degrees cannot be negative") + min_degree = min_degrees + total_degree = True + if total_degree: + if min_degree > max_degree: + return + if not variables or max_degree == 0: + yield S.One + return + # Force to list in case of passed tuple or other incompatible collection + variables = list(variables) + [S.One] + if all(variable.is_commutative for variable in variables): + monomials_list_comm = [] + for item in combinations_with_replacement(variables, max_degree): + powers = dict.fromkeys(variables, 0) + for variable in item: + if variable != 1: + powers[variable] += 1 + if sum(powers.values()) >= min_degree: + monomials_list_comm.append(Mul(*item)) + yield from set(monomials_list_comm) + else: + monomials_list_non_comm = [] + for item in product(variables, repeat=max_degree): + powers = dict.fromkeys(variables, 0) + for variable in item: + if variable != 1: + powers[variable] += 1 + if sum(powers.values()) >= min_degree: + monomials_list_non_comm.append(Mul(*item)) + yield from set(monomials_list_non_comm) + else: + if any(min_degrees[i] > max_degrees[i] for i in range(n)): + raise ValueError('min_degrees[i] must be <= max_degrees[i] for all i') + power_lists = [] + for var, min_d, max_d in zip(variables, min_degrees, max_degrees): + power_lists.append([var**i for i in range(min_d, max_d + 1)]) + for powers in product(*power_lists): + yield Mul(*powers) + +def monomial_count(V, N): + r""" + Computes the number of monomials. + + The number of monomials is given by the following formula: + + .. math:: + + \frac{(\#V + N)!}{\#V! N!} + + where `N` is a total degree and `V` is a set of variables. + + Examples + ======== + + >>> from sympy.polys.monomials import itermonomials, monomial_count + >>> from sympy.polys.orderings import monomial_key + >>> from sympy.abc import x, y + + >>> monomial_count(2, 2) + 6 + + >>> M = list(itermonomials([x, y], 2)) + + >>> sorted(M, key=monomial_key('grlex', [y, x])) + [1, x, y, x**2, x*y, y**2] + >>> len(M) + 6 + + """ + from sympy.functions.combinatorial.factorials import factorial + return factorial(V + N) / factorial(V) / factorial(N) + +def monomial_mul(A, B): + """ + Multiplication of tuples representing monomials. + + Examples + ======== + + Lets multiply `x**3*y**4*z` with `x*y**2`:: + + >>> from sympy.polys.monomials import monomial_mul + + >>> monomial_mul((3, 4, 1), (1, 2, 0)) + (4, 6, 1) + + which gives `x**4*y**5*z`. + + """ + return tuple([ a + b for a, b in zip(A, B) ]) + +def monomial_div(A, B): + """ + Division of tuples representing monomials. + + Examples + ======== + + Lets divide `x**3*y**4*z` by `x*y**2`:: + + >>> from sympy.polys.monomials import monomial_div + + >>> monomial_div((3, 4, 1), (1, 2, 0)) + (2, 2, 1) + + which gives `x**2*y**2*z`. However:: + + >>> monomial_div((3, 4, 1), (1, 2, 2)) is None + True + + `x*y**2*z**2` does not divide `x**3*y**4*z`. + + """ + C = monomial_ldiv(A, B) + + if all(c >= 0 for c in C): + return tuple(C) + else: + return None + +def monomial_ldiv(A, B): + """ + Division of tuples representing monomials. + + Examples + ======== + + Lets divide `x**3*y**4*z` by `x*y**2`:: + + >>> from sympy.polys.monomials import monomial_ldiv + + >>> monomial_ldiv((3, 4, 1), (1, 2, 0)) + (2, 2, 1) + + which gives `x**2*y**2*z`. + + >>> monomial_ldiv((3, 4, 1), (1, 2, 2)) + (2, 2, -1) + + which gives `x**2*y**2*z**-1`. + + """ + return tuple([ a - b for a, b in zip(A, B) ]) + +def monomial_pow(A, n): + """Return the n-th pow of the monomial. """ + return tuple([ a*n for a in A ]) + +def monomial_gcd(A, B): + """ + Greatest common divisor of tuples representing monomials. + + Examples + ======== + + Lets compute GCD of `x*y**4*z` and `x**3*y**2`:: + + >>> from sympy.polys.monomials import monomial_gcd + + >>> monomial_gcd((1, 4, 1), (3, 2, 0)) + (1, 2, 0) + + which gives `x*y**2`. + + """ + return tuple([ min(a, b) for a, b in zip(A, B) ]) + +def monomial_lcm(A, B): + """ + Least common multiple of tuples representing monomials. + + Examples + ======== + + Lets compute LCM of `x*y**4*z` and `x**3*y**2`:: + + >>> from sympy.polys.monomials import monomial_lcm + + >>> monomial_lcm((1, 4, 1), (3, 2, 0)) + (3, 4, 1) + + which gives `x**3*y**4*z`. + + """ + return tuple([ max(a, b) for a, b in zip(A, B) ]) + +def monomial_divides(A, B): + """ + Does there exist a monomial X such that XA == B? + + Examples + ======== + + >>> from sympy.polys.monomials import monomial_divides + >>> monomial_divides((1, 2), (3, 4)) + True + >>> monomial_divides((1, 2), (0, 2)) + False + """ + return all(a <= b for a, b in zip(A, B)) + +def monomial_max(*monoms): + """ + Returns maximal degree for each variable in a set of monomials. + + Examples + ======== + + Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`. + We wish to find out what is the maximal degree for each of `x`, `y` + and `z` variables:: + + >>> from sympy.polys.monomials import monomial_max + + >>> monomial_max((3,4,5), (0,5,1), (6,3,9)) + (6, 5, 9) + + """ + M = list(monoms[0]) + + for N in monoms[1:]: + for i, n in enumerate(N): + M[i] = max(M[i], n) + + return tuple(M) + +def monomial_min(*monoms): + """ + Returns minimal degree for each variable in a set of monomials. + + Examples + ======== + + Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`. + We wish to find out what is the minimal degree for each of `x`, `y` + and `z` variables:: + + >>> from sympy.polys.monomials import monomial_min + + >>> monomial_min((3,4,5), (0,5,1), (6,3,9)) + (0, 3, 1) + + """ + M = list(monoms[0]) + + for N in monoms[1:]: + for i, n in enumerate(N): + M[i] = min(M[i], n) + + return tuple(M) + +def monomial_deg(M): + """ + Returns the total degree of a monomial. + + Examples + ======== + + The total degree of `xy^2` is 3: + + >>> from sympy.polys.monomials import monomial_deg + >>> monomial_deg((1, 2)) + 3 + """ + return sum(M) + +def term_div(a, b, domain): + """Division of two terms in over a ring/field. """ + a_lm, a_lc = a + b_lm, b_lc = b + + monom = monomial_div(a_lm, b_lm) + + if domain.is_Field: + if monom is not None: + return monom, domain.quo(a_lc, b_lc) + else: + return None + else: + if not (monom is None or a_lc % b_lc): + return monom, domain.quo(a_lc, b_lc) + else: + return None + +class MonomialOps: + """Code generator of fast monomial arithmetic functions. """ + + def __init__(self, ngens): + self.ngens = ngens + + def _build(self, code, name): + ns = {} + exec(code, ns) + return ns[name] + + def _vars(self, name): + return [ "%s%s" % (name, i) for i in range(self.ngens) ] + + def mul(self): + name = "monomial_mul" + template = dedent("""\ + def %(name)s(A, B): + (%(A)s,) = A + (%(B)s,) = B + return (%(AB)s,) + """) + A = self._vars("a") + B = self._vars("b") + AB = [ "%s + %s" % (a, b) for a, b in zip(A, B) ] + code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)} + return self._build(code, name) + + def pow(self): + name = "monomial_pow" + template = dedent("""\ + def %(name)s(A, k): + (%(A)s,) = A + return (%(Ak)s,) + """) + A = self._vars("a") + Ak = [ "%s*k" % a for a in A ] + code = template % {"name": name, "A": ", ".join(A), "Ak": ", ".join(Ak)} + return self._build(code, name) + + def mulpow(self): + name = "monomial_mulpow" + template = dedent("""\ + def %(name)s(A, B, k): + (%(A)s,) = A + (%(B)s,) = B + return (%(ABk)s,) + """) + A = self._vars("a") + B = self._vars("b") + ABk = [ "%s + %s*k" % (a, b) for a, b in zip(A, B) ] + code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "ABk": ", ".join(ABk)} + return self._build(code, name) + + def ldiv(self): + name = "monomial_ldiv" + template = dedent("""\ + def %(name)s(A, B): + (%(A)s,) = A + (%(B)s,) = B + return (%(AB)s,) + """) + A = self._vars("a") + B = self._vars("b") + AB = [ "%s - %s" % (a, b) for a, b in zip(A, B) ] + code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)} + return self._build(code, name) + + def div(self): + name = "monomial_div" + template = dedent("""\ + def %(name)s(A, B): + (%(A)s,) = A + (%(B)s,) = B + %(RAB)s + return (%(R)s,) + """) + A = self._vars("a") + B = self._vars("b") + RAB = [ "r%(i)s = a%(i)s - b%(i)s\n if r%(i)s < 0: return None" % {"i": i} for i in range(self.ngens) ] + R = self._vars("r") + code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "RAB": "\n ".join(RAB), "R": ", ".join(R)} + return self._build(code, name) + + def lcm(self): + name = "monomial_lcm" + template = dedent("""\ + def %(name)s(A, B): + (%(A)s,) = A + (%(B)s,) = B + return (%(AB)s,) + """) + A = self._vars("a") + B = self._vars("b") + AB = [ "%s if %s >= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ] + code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)} + return self._build(code, name) + + def gcd(self): + name = "monomial_gcd" + template = dedent("""\ + def %(name)s(A, B): + (%(A)s,) = A + (%(B)s,) = B + return (%(AB)s,) + """) + A = self._vars("a") + B = self._vars("b") + AB = [ "%s if %s <= %s else %s" % (a, a, b, b) for a, b in zip(A, B) ] + code = template % {"name": name, "A": ", ".join(A), "B": ", ".join(B), "AB": ", ".join(AB)} + return self._build(code, name) + +@public +class Monomial(PicklableWithSlots): + """Class representing a monomial, i.e. a product of powers. """ + + __slots__ = ('exponents', 'gens') + + def __init__(self, monom, gens=None): + if not iterable(monom): + rep, gens = dict_from_expr(sympify(monom), gens=gens) + if len(rep) == 1 and list(rep.values())[0] == 1: + monom = list(rep.keys())[0] + else: + raise ValueError("Expected a monomial got {}".format(monom)) + + self.exponents = tuple(map(int, monom)) + self.gens = gens + + def rebuild(self, exponents, gens=None): + return self.__class__(exponents, gens or self.gens) + + def __len__(self): + return len(self.exponents) + + def __iter__(self): + return iter(self.exponents) + + def __getitem__(self, item): + return self.exponents[item] + + def __hash__(self): + return hash((self.__class__.__name__, self.exponents, self.gens)) + + def __str__(self): + if self.gens: + return "*".join([ "%s**%s" % (gen, exp) for gen, exp in zip(self.gens, self.exponents) ]) + else: + return "%s(%s)" % (self.__class__.__name__, self.exponents) + + def as_expr(self, *gens): + """Convert a monomial instance to a SymPy expression. """ + gens = gens or self.gens + + if not gens: + raise ValueError( + "Cannot convert %s to an expression without generators" % self) + + return Mul(*[ gen**exp for gen, exp in zip(gens, self.exponents) ]) + + def __eq__(self, other): + if isinstance(other, Monomial): + exponents = other.exponents + elif isinstance(other, (tuple, Tuple)): + exponents = other + else: + return False + + return self.exponents == exponents + + def __ne__(self, other): + return not self == other + + def __mul__(self, other): + if isinstance(other, Monomial): + exponents = other.exponents + elif isinstance(other, (tuple, Tuple)): + exponents = other + else: + raise NotImplementedError + + return self.rebuild(monomial_mul(self.exponents, exponents)) + + def __truediv__(self, other): + if isinstance(other, Monomial): + exponents = other.exponents + elif isinstance(other, (tuple, Tuple)): + exponents = other + else: + raise NotImplementedError + + result = monomial_div(self.exponents, exponents) + + if result is not None: + return self.rebuild(result) + else: + raise ExactQuotientFailed(self, Monomial(other)) + + __floordiv__ = __truediv__ + + def __pow__(self, other): + n = int(other) + if n < 0: + raise ValueError("a non-negative integer expected, got %s" % other) + return self.rebuild(monomial_pow(self.exponents, n)) + + def gcd(self, other): + """Greatest common divisor of monomials. """ + if isinstance(other, Monomial): + exponents = other.exponents + elif isinstance(other, (tuple, Tuple)): + exponents = other + else: + raise TypeError( + "an instance of Monomial class expected, got %s" % other) + + return self.rebuild(monomial_gcd(self.exponents, exponents)) + + def lcm(self, other): + """Least common multiple of monomials. """ + if isinstance(other, Monomial): + exponents = other.exponents + elif isinstance(other, (tuple, Tuple)): + exponents = other + else: + raise TypeError( + "an instance of Monomial class expected, got %s" % other) + + return self.rebuild(monomial_lcm(self.exponents, exponents)) diff --git a/.venv/Lib/site-packages/sympy/polys/multivariate_resultants.py b/.venv/Lib/site-packages/sympy/polys/multivariate_resultants.py new file mode 100644 index 0000000000000000000000000000000000000000..b6c967a8b981e25e8e26745804a658ff7b90e9af --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/multivariate_resultants.py @@ -0,0 +1,473 @@ +""" +This module contains functions for two multivariate resultants. These +are: + +- Dixon's resultant. +- Macaulay's resultant. + +Multivariate resultants are used to identify whether a multivariate +system has common roots. That is when the resultant is equal to zero. +""" +from math import prod + +from sympy.core.mul import Mul +from sympy.matrices.dense import (Matrix, diag) +from sympy.polys.polytools import (Poly, degree_list, rem) +from sympy.simplify.simplify import simplify +from sympy.tensor.indexed import IndexedBase +from sympy.polys.monomials import itermonomials, monomial_deg +from sympy.polys.orderings import monomial_key +from sympy.polys.polytools import poly_from_expr, total_degree +from sympy.functions.combinatorial.factorials import binomial +from itertools import combinations_with_replacement +from sympy.utilities.exceptions import sympy_deprecation_warning + +class DixonResultant(): + """ + A class for retrieving the Dixon's resultant of a multivariate + system. + + Examples + ======== + + >>> from sympy import symbols + + >>> from sympy.polys.multivariate_resultants import DixonResultant + >>> x, y = symbols('x, y') + + >>> p = x + y + >>> q = x ** 2 + y ** 3 + >>> h = x ** 2 + y + + >>> dixon = DixonResultant(variables=[x, y], polynomials=[p, q, h]) + >>> poly = dixon.get_dixon_polynomial() + >>> matrix = dixon.get_dixon_matrix(polynomial=poly) + >>> matrix + Matrix([ + [ 0, 0, -1, 0, -1], + [ 0, -1, 0, -1, 0], + [-1, 0, 1, 0, 0], + [ 0, -1, 0, 0, 1], + [-1, 0, 0, 1, 0]]) + >>> matrix.det() + 0 + + See Also + ======== + + Notebook in examples: sympy/example/notebooks. + + References + ========== + + .. [1] [Kapur1994]_ + .. [2] [Palancz08]_ + + """ + + def __init__(self, polynomials, variables): + """ + A class that takes two lists, a list of polynomials and list of + variables. Returns the Dixon matrix of the multivariate system. + + Parameters + ---------- + polynomials : list of polynomials + A list of m n-degree polynomials + variables: list + A list of all n variables + """ + self.polynomials = polynomials + self.variables = variables + + self.n = len(self.variables) + self.m = len(self.polynomials) + + a = IndexedBase("alpha") + # A list of n alpha variables (the replacing variables) + self.dummy_variables = [a[i] for i in range(self.n)] + + # A list of the d_max of each variable. + self._max_degrees = [max(degree_list(poly)[i] for poly in self.polynomials) + for i in range(self.n)] + + @property + def max_degrees(self): + sympy_deprecation_warning( + """ + The max_degrees property of DixonResultant is deprecated. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-dixonresultant-properties", + ) + return self._max_degrees + + def get_dixon_polynomial(self): + r""" + Returns + ======= + + dixon_polynomial: polynomial + Dixon's polynomial is calculated as: + + delta = Delta(A) / ((x_1 - a_1) ... (x_n - a_n)) where, + + A = |p_1(x_1,... x_n), ..., p_n(x_1,... x_n)| + |p_1(a_1,... x_n), ..., p_n(a_1,... x_n)| + |... , ..., ...| + |p_1(a_1,... a_n), ..., p_n(a_1,... a_n)| + """ + if self.m != (self.n + 1): + raise ValueError('Method invalid for given combination.') + + # First row + rows = [self.polynomials] + + temp = list(self.variables) + + for idx in range(self.n): + temp[idx] = self.dummy_variables[idx] + substitution = dict(zip(self.variables, temp)) + rows.append([f.subs(substitution) for f in self.polynomials]) + + A = Matrix(rows) + + terms = zip(self.variables, self.dummy_variables) + product_of_differences = Mul(*[a - b for a, b in terms]) + dixon_polynomial = (A.det() / product_of_differences).factor() + + return poly_from_expr(dixon_polynomial, self.dummy_variables)[0] + + def get_upper_degree(self): + sympy_deprecation_warning( + """ + The get_upper_degree() method of DixonResultant is deprecated. Use + get_max_degrees() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-dixonresultant-properties" + ) + list_of_products = [self.variables[i] ** self._max_degrees[i] + for i in range(self.n)] + product = prod(list_of_products) + product = Poly(product).monoms() + + return monomial_deg(*product) + + def get_max_degrees(self, polynomial): + r""" + Returns a list of the maximum degree of each variable appearing + in the coefficients of the Dixon polynomial. The coefficients are + viewed as polys in $x_1, x_2, \dots, x_n$. + """ + deg_lists = [degree_list(Poly(poly, self.variables)) + for poly in polynomial.coeffs()] + + max_degrees = [max(degs) for degs in zip(*deg_lists)] + + return max_degrees + + def get_dixon_matrix(self, polynomial): + r""" + Construct the Dixon matrix from the coefficients of polynomial + \alpha. Each coefficient is viewed as a polynomial of x_1, ..., + x_n. + """ + + max_degrees = self.get_max_degrees(polynomial) + + # list of column headers of the Dixon matrix. + monomials = itermonomials(self.variables, max_degrees) + monomials = sorted(monomials, reverse=True, + key=monomial_key('lex', self.variables)) + + dixon_matrix = Matrix([[Poly(c, *self.variables).coeff_monomial(m) + for m in monomials] + for c in polynomial.coeffs()]) + + # remove columns if needed + if dixon_matrix.shape[0] != dixon_matrix.shape[1]: + keep = [column for column in range(dixon_matrix.shape[-1]) + if any(element != 0 for element + in dixon_matrix[:, column])] + + dixon_matrix = dixon_matrix[:, keep] + + return dixon_matrix + + def KSY_precondition(self, matrix): + """ + Test for the validity of the Kapur-Saxena-Yang precondition. + + The precondition requires that the column corresponding to the + monomial 1 = x_1 ^ 0 * x_2 ^ 0 * ... * x_n ^ 0 is not a linear + combination of the remaining ones. In SymPy notation this is + the last column. For the precondition to hold the last non-zero + row of the rref matrix should be of the form [0, 0, ..., 1]. + """ + if matrix.is_zero_matrix: + return False + + m, n = matrix.shape + + # simplify the matrix and keep only its non-zero rows + matrix = simplify(matrix.rref()[0]) + rows = [i for i in range(m) if any(matrix[i, j] != 0 for j in range(n))] + matrix = matrix[rows,:] + + condition = Matrix([[0]*(n-1) + [1]]) + + if matrix[-1,:] == condition: + return True + else: + return False + + def delete_zero_rows_and_columns(self, matrix): + """Remove the zero rows and columns of the matrix.""" + rows = [ + i for i in range(matrix.rows) if not matrix.row(i).is_zero_matrix] + cols = [ + j for j in range(matrix.cols) if not matrix.col(j).is_zero_matrix] + + return matrix[rows, cols] + + def product_leading_entries(self, matrix): + """Calculate the product of the leading entries of the matrix.""" + res = 1 + for row in range(matrix.rows): + for el in matrix.row(row): + if el != 0: + res = res * el + break + return res + + def get_KSY_Dixon_resultant(self, matrix): + """Calculate the Kapur-Saxena-Yang approach to the Dixon Resultant.""" + matrix = self.delete_zero_rows_and_columns(matrix) + _, U, _ = matrix.LUdecomposition() + matrix = self.delete_zero_rows_and_columns(simplify(U)) + + return self.product_leading_entries(matrix) + +class MacaulayResultant(): + """ + A class for calculating the Macaulay resultant. Note that the + polynomials must be homogenized and their coefficients must be + given as symbols. + + Examples + ======== + + >>> from sympy import symbols + + >>> from sympy.polys.multivariate_resultants import MacaulayResultant + >>> x, y, z = symbols('x, y, z') + + >>> a_0, a_1, a_2 = symbols('a_0, a_1, a_2') + >>> b_0, b_1, b_2 = symbols('b_0, b_1, b_2') + >>> c_0, c_1, c_2,c_3, c_4 = symbols('c_0, c_1, c_2, c_3, c_4') + + >>> f = a_0 * y - a_1 * x + a_2 * z + >>> g = b_1 * x ** 2 + b_0 * y ** 2 - b_2 * z ** 2 + >>> h = c_0 * y * z ** 2 - c_1 * x ** 3 + c_2 * x ** 2 * z - c_3 * x * z ** 2 + c_4 * z ** 3 + + >>> mac = MacaulayResultant(polynomials=[f, g, h], variables=[x, y, z]) + >>> mac.monomial_set + [x**4, x**3*y, x**3*z, x**2*y**2, x**2*y*z, x**2*z**2, x*y**3, + x*y**2*z, x*y*z**2, x*z**3, y**4, y**3*z, y**2*z**2, y*z**3, z**4] + >>> matrix = mac.get_matrix() + >>> submatrix = mac.get_submatrix(matrix) + >>> submatrix + Matrix([ + [-a_1, a_0, a_2, 0], + [ 0, -a_1, 0, 0], + [ 0, 0, -a_1, 0], + [ 0, 0, 0, -a_1]]) + + See Also + ======== + + Notebook in examples: sympy/example/notebooks. + + References + ========== + + .. [1] [Bruce97]_ + .. [2] [Stiller96]_ + + """ + def __init__(self, polynomials, variables): + """ + Parameters + ========== + + variables: list + A list of all n variables + polynomials : list of SymPy polynomials + A list of m n-degree polynomials + """ + self.polynomials = polynomials + self.variables = variables + self.n = len(variables) + + # A list of the d_max of each polynomial. + self.degrees = [total_degree(poly, *self.variables) for poly + in self.polynomials] + + self.degree_m = self._get_degree_m() + self.monomials_size = self.get_size() + + # The set T of all possible monomials of degree degree_m + self.monomial_set = self.get_monomials_of_certain_degree(self.degree_m) + + def _get_degree_m(self): + r""" + Returns + ======= + + degree_m: int + The degree_m is calculated as 1 + \sum_1 ^ n (d_i - 1), + where d_i is the degree of the i polynomial + """ + return 1 + sum(d - 1 for d in self.degrees) + + def get_size(self): + r""" + Returns + ======= + + size: int + The size of set T. Set T is the set of all possible + monomials of the n variables for degree equal to the + degree_m + """ + return binomial(self.degree_m + self.n - 1, self.n - 1) + + def get_monomials_of_certain_degree(self, degree): + """ + Returns + ======= + + monomials: list + A list of monomials of a certain degree. + """ + monomials = [Mul(*monomial) for monomial + in combinations_with_replacement(self.variables, + degree)] + + return sorted(monomials, reverse=True, + key=monomial_key('lex', self.variables)) + + def get_row_coefficients(self): + """ + Returns + ======= + + row_coefficients: list + The row coefficients of Macaulay's matrix + """ + row_coefficients = [] + divisible = [] + for i in range(self.n): + if i == 0: + degree = self.degree_m - self.degrees[i] + monomial = self.get_monomials_of_certain_degree(degree) + row_coefficients.append(monomial) + else: + divisible.append(self.variables[i - 1] ** + self.degrees[i - 1]) + degree = self.degree_m - self.degrees[i] + poss_rows = self.get_monomials_of_certain_degree(degree) + for div in divisible: + for p in poss_rows: + if rem(p, div) == 0: + poss_rows = [item for item in poss_rows + if item != p] + row_coefficients.append(poss_rows) + return row_coefficients + + def get_matrix(self): + """ + Returns + ======= + + macaulay_matrix: Matrix + The Macaulay numerator matrix + """ + rows = [] + row_coefficients = self.get_row_coefficients() + for i in range(self.n): + for multiplier in row_coefficients[i]: + coefficients = [] + poly = Poly(self.polynomials[i] * multiplier, + *self.variables) + + for mono in self.monomial_set: + coefficients.append(poly.coeff_monomial(mono)) + rows.append(coefficients) + + macaulay_matrix = Matrix(rows) + return macaulay_matrix + + def get_reduced_nonreduced(self): + r""" + Returns + ======= + + reduced: list + A list of the reduced monomials + non_reduced: list + A list of the monomials that are not reduced + + Definition + ========== + + A polynomial is said to be reduced in x_i, if its degree (the + maximum degree of its monomials) in x_i is less than d_i. A + polynomial that is reduced in all variables but one is said + simply to be reduced. + """ + divisible = [] + for m in self.monomial_set: + temp = [] + for i, v in enumerate(self.variables): + temp.append(bool(total_degree(m, v) >= self.degrees[i])) + divisible.append(temp) + reduced = [i for i, r in enumerate(divisible) + if sum(r) < self.n - 1] + non_reduced = [i for i, r in enumerate(divisible) + if sum(r) >= self.n -1] + + return reduced, non_reduced + + def get_submatrix(self, matrix): + r""" + Returns + ======= + + macaulay_submatrix: Matrix + The Macaulay denominator matrix. Columns that are non reduced are kept. + The row which contains one of the a_{i}s is dropped. a_{i}s + are the coefficients of x_i ^ {d_i}. + """ + reduced, non_reduced = self.get_reduced_nonreduced() + + # if reduced == [], then det(matrix) should be 1 + if reduced == []: + return diag([1]) + + # reduced != [] + reduction_set = [v ** self.degrees[i] for i, v + in enumerate(self.variables)] + + ais = [self.polynomials[i].coeff(reduction_set[i]) + for i in range(self.n)] + + reduced_matrix = matrix[:, reduced] + keep = [] + for row in range(reduced_matrix.rows): + check = [ai in reduced_matrix[row, :] for ai in ais] + if True not in check: + keep.append(row) + + return matrix[keep, non_reduced] diff --git a/.venv/Lib/site-packages/sympy/polys/orderings.py b/.venv/Lib/site-packages/sympy/polys/orderings.py new file mode 100644 index 0000000000000000000000000000000000000000..b6ed575d5103440e1e8ebda4c53c4149d3badf11 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/orderings.py @@ -0,0 +1,286 @@ +"""Definitions of monomial orderings. """ + +from __future__ import annotations + +__all__ = ["lex", "grlex", "grevlex", "ilex", "igrlex", "igrevlex"] + +from sympy.core import Symbol +from sympy.utilities.iterables import iterable + +class MonomialOrder: + """Base class for monomial orderings. """ + + alias: str | None = None + is_global: bool | None = None + is_default = False + + def __repr__(self): + return self.__class__.__name__ + "()" + + def __str__(self): + return self.alias + + def __call__(self, monomial): + raise NotImplementedError + + def __eq__(self, other): + return self.__class__ == other.__class__ + + def __hash__(self): + return hash(self.__class__) + + def __ne__(self, other): + return not (self == other) + +class LexOrder(MonomialOrder): + """Lexicographic order of monomials. """ + + alias = 'lex' + is_global = True + is_default = True + + def __call__(self, monomial): + return monomial + +class GradedLexOrder(MonomialOrder): + """Graded lexicographic order of monomials. """ + + alias = 'grlex' + is_global = True + + def __call__(self, monomial): + return (sum(monomial), monomial) + +class ReversedGradedLexOrder(MonomialOrder): + """Reversed graded lexicographic order of monomials. """ + + alias = 'grevlex' + is_global = True + + def __call__(self, monomial): + return (sum(monomial), tuple(reversed([-m for m in monomial]))) + +class ProductOrder(MonomialOrder): + """ + A product order built from other monomial orders. + + Given (not necessarily total) orders O1, O2, ..., On, their product order + P is defined as M1 > M2 iff there exists i such that O1(M1) = O2(M2), + ..., Oi(M1) = Oi(M2), O{i+1}(M1) > O{i+1}(M2). + + Product orders are typically built from monomial orders on different sets + of variables. + + ProductOrder is constructed by passing a list of pairs + [(O1, L1), (O2, L2), ...] where Oi are MonomialOrders and Li are callables. + Upon comparison, the Li are passed the total monomial, and should filter + out the part of the monomial to pass to Oi. + + Examples + ======== + + We can use a lexicographic order on x_1, x_2 and also on + y_1, y_2, y_3, and their product on {x_i, y_i} as follows: + + >>> from sympy.polys.orderings import lex, grlex, ProductOrder + >>> P = ProductOrder( + ... (lex, lambda m: m[:2]), # lex order on x_1 and x_2 of monomial + ... (grlex, lambda m: m[2:]) # grlex on y_1, y_2, y_3 + ... ) + >>> P((2, 1, 1, 0, 0)) > P((1, 10, 0, 2, 0)) + True + + Here the exponent `2` of `x_1` in the first monomial + (`x_1^2 x_2 y_1`) is bigger than the exponent `1` of `x_1` in the + second monomial (`x_1 x_2^10 y_2^2`), so the first monomial is greater + in the product ordering. + + >>> P((2, 1, 1, 0, 0)) < P((2, 1, 0, 2, 0)) + True + + Here the exponents of `x_1` and `x_2` agree, so the grlex order on + `y_1, y_2, y_3` is used to decide the ordering. In this case the monomial + `y_2^2` is ordered larger than `y_1`, since for the grlex order the degree + of the monomial is most important. + """ + + def __init__(self, *args): + self.args = args + + def __call__(self, monomial): + return tuple(O(lamda(monomial)) for (O, lamda) in self.args) + + def __repr__(self): + contents = [repr(x[0]) for x in self.args] + return self.__class__.__name__ + '(' + ", ".join(contents) + ')' + + def __str__(self): + contents = [str(x[0]) for x in self.args] + return self.__class__.__name__ + '(' + ", ".join(contents) + ')' + + def __eq__(self, other): + if not isinstance(other, ProductOrder): + return False + return self.args == other.args + + def __hash__(self): + return hash((self.__class__, self.args)) + + @property + def is_global(self): + if all(o.is_global is True for o, _ in self.args): + return True + if all(o.is_global is False for o, _ in self.args): + return False + return None + +class InverseOrder(MonomialOrder): + """ + The "inverse" of another monomial order. + + If O is any monomial order, we can construct another monomial order iO + such that `A >_{iO} B` if and only if `B >_O A`. This is useful for + constructing local orders. + + Note that many algorithms only work with *global* orders. + + For example, in the inverse lexicographic order on a single variable `x`, + high powers of `x` count as small: + + >>> from sympy.polys.orderings import lex, InverseOrder + >>> ilex = InverseOrder(lex) + >>> ilex((5,)) < ilex((0,)) + True + """ + + def __init__(self, O): + self.O = O + + def __str__(self): + return "i" + str(self.O) + + def __call__(self, monomial): + def inv(l): + if iterable(l): + return tuple(inv(x) for x in l) + return -l + return inv(self.O(monomial)) + + @property + def is_global(self): + if self.O.is_global is True: + return False + if self.O.is_global is False: + return True + return None + + def __eq__(self, other): + return isinstance(other, InverseOrder) and other.O == self.O + + def __hash__(self): + return hash((self.__class__, self.O)) + +lex = LexOrder() +grlex = GradedLexOrder() +grevlex = ReversedGradedLexOrder() +ilex = InverseOrder(lex) +igrlex = InverseOrder(grlex) +igrevlex = InverseOrder(grevlex) + +_monomial_key = { + 'lex': lex, + 'grlex': grlex, + 'grevlex': grevlex, + 'ilex': ilex, + 'igrlex': igrlex, + 'igrevlex': igrevlex +} + +def monomial_key(order=None, gens=None): + """ + Return a function defining admissible order on monomials. + + The result of a call to :func:`monomial_key` is a function which should + be used as a key to :func:`sorted` built-in function, to provide order + in a set of monomials of the same length. + + Currently supported monomial orderings are: + + 1. lex - lexicographic order (default) + 2. grlex - graded lexicographic order + 3. grevlex - reversed graded lexicographic order + 4. ilex, igrlex, igrevlex - the corresponding inverse orders + + If the ``order`` input argument is not a string but has ``__call__`` + attribute, then it will pass through with an assumption that the + callable object defines an admissible order on monomials. + + If the ``gens`` input argument contains a list of generators, the + resulting key function can be used to sort SymPy ``Expr`` objects. + + """ + if order is None: + order = lex + + if isinstance(order, Symbol): + order = str(order) + + if isinstance(order, str): + try: + order = _monomial_key[order] + except KeyError: + raise ValueError("supported monomial orderings are 'lex', 'grlex' and 'grevlex', got %r" % order) + if hasattr(order, '__call__'): + if gens is not None: + def _order(expr): + return order(expr.as_poly(*gens).degree_list()) + return _order + return order + else: + raise ValueError("monomial ordering specification must be a string or a callable, got %s" % order) + +class _ItemGetter: + """Helper class to return a subsequence of values.""" + + def __init__(self, seq): + self.seq = tuple(seq) + + def __call__(self, m): + return tuple(m[idx] for idx in self.seq) + + def __eq__(self, other): + if not isinstance(other, _ItemGetter): + return False + return self.seq == other.seq + +def build_product_order(arg, gens): + """ + Build a monomial order on ``gens``. + + ``arg`` should be a tuple of iterables. The first element of each iterable + should be a string or monomial order (will be passed to monomial_key), + the others should be subsets of the generators. This function will build + the corresponding product order. + + For example, build a product of two grlex orders: + + >>> from sympy.polys.orderings import build_product_order + >>> from sympy.abc import x, y, z, t + + >>> O = build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t]) + >>> O((1, 2, 3, 4)) + ((3, (1, 2)), (7, (3, 4))) + + """ + gens2idx = {} + for i, g in enumerate(gens): + gens2idx[g] = i + order = [] + for expr in arg: + name = expr[0] + var = expr[1:] + + def makelambda(var): + return _ItemGetter(gens2idx[g] for g in var) + order.append((monomial_key(name), makelambda(var))) + return ProductOrder(*order) diff --git a/.venv/Lib/site-packages/sympy/polys/orthopolys.py b/.venv/Lib/site-packages/sympy/polys/orthopolys.py new file mode 100644 index 0000000000000000000000000000000000000000..ee82457703a2be172951ee38e3cd67f221a438a0 --- /dev/null +++ b/.venv/Lib/site-packages/sympy/polys/orthopolys.py @@ -0,0 +1,343 @@ +"""Efficient functions for generating orthogonal polynomials.""" +from sympy.core.symbol import Dummy +from sympy.polys.densearith import (dup_mul, dup_mul_ground, + dup_lshift, dup_sub, dup_add, dup_sub_term, dup_sub_ground, dup_sqr) +from sympy.polys.domains import ZZ, QQ +from sympy.polys.polytools import named_poly +from sympy.utilities import public + +def dup_jacobi(n, a, b, K): + """Low-level implementation of Jacobi polynomials.""" + if n < 1: + return [K.one] + m2, m1 = [K.one], [(a+b)/K(2) + K.one, (a-b)/K(2)] + for i in range(2, n+1): + den = K(i)*(a + b + i)*(a + b + K(2)*i - K(2)) + f0 = (a + b + K(2)*i - K.one) * (a*a - b*b) / (K(2)*den) + f1 = (a + b + K(2)*i - K.one) * (a + b + K(2)*i - K(2)) * (a + b + K(2)*i) / (K(2)*den) + f2 = (a + i - K.one)*(b + i - K.one)*(a + b + K(2)*i) / den + p0 = dup_mul_ground(m1, f0, K) + p1 = dup_mul_ground(dup_lshift(m1, 1, K), f1, K) + p2 = dup_mul_ground(m2, f2, K) + m2, m1 = m1, dup_sub(dup_add(p0, p1, K), p2, K) + return m1 + +@public +def jacobi_poly(n, a, b, x=None, polys=False): + r"""Generates the Jacobi polynomial `P_n^{(a,b)}(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + a + Lower limit of minimal domain for the list of coefficients. + b + Upper limit of minimal domain for the list of coefficients. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_jacobi, None, "Jacobi polynomial", (x, a, b), polys) + +def dup_gegenbauer(n, a, K): + """Low-level implementation of Gegenbauer polynomials.""" + if n < 1: + return [K.one] + m2, m1 = [K.one], [K(2)*a, K.zero] + for i in range(2, n+1): + p1 = dup_mul_ground(dup_lshift(m1, 1, K), K(2)*(a-K.one)/K(i) + K(2), K) + p2 = dup_mul_ground(m2, K(2)*(a-K.one)/K(i) + K.one, K) + m2, m1 = m1, dup_sub(p1, p2, K) + return m1 + +def gegenbauer_poly(n, a, x=None, polys=False): + r"""Generates the Gegenbauer polynomial `C_n^{(a)}(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + a + Decides minimal domain for the list of coefficients. + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_gegenbauer, None, "Gegenbauer polynomial", (x, a), polys) + +def dup_chebyshevt(n, K): + """Low-level implementation of Chebyshev polynomials of the first kind.""" + if n < 1: + return [K.one] + # When n is small, it is faster to directly calculate the recurrence relation. + if n < 64: # The threshold serves as a heuristic + return _dup_chebyshevt_rec(n, K) + return _dup_chebyshevt_prod(n, K) + +def _dup_chebyshevt_rec(n, K): + r""" Chebyshev polynomials of the first kind using recurrence. + + Explanation + =========== + + Chebyshev polynomials of the first kind are defined by the recurrence + relation: + + .. math:: + T_0(x) &= 1\\ + T_1(x) &= x\\ + T_n(x) &= 2xT_{n-1}(x) - T_{n-2}(x) + + This function calculates the Chebyshev polynomial of the first kind using + the above recurrence relation. + + Parameters + ========== + + n : int + n is a nonnegative integer. + K : domain + + """ + m2, m1 = [K.one], [K.one, K.zero] + for _ in range(n - 1): + m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K) + return m1 + +def _dup_chebyshevt_prod(n, K): + r""" Chebyshev polynomials of the first kind using recursive products. + + Explanation + =========== + + Computes Chebyshev polynomials of the first kind using + + .. math:: + T_{2n}(x) &= 2T_n^2(x) - 1\\ + T_{2n+1}(x) &= 2T_{n+1}(x)T_n(x) - x + + This is faster than ``_dup_chebyshevt_rec`` for large ``n``. + + Parameters + ========== + + n : int + n is a nonnegative integer. + K : domain + + """ + m2, m1 = [K.one, K.zero], [K(2), K.zero, -K.one] + for i in bin(n)[3:]: + c = dup_sub_term(dup_mul_ground(dup_mul(m1, m2, K), K(2), K), K.one, 1, K) + if i == '1': + m2, m1 = c, dup_sub_ground(dup_mul_ground(dup_sqr(m1, K), K(2), K), K.one, K) + else: + m2, m1 = dup_sub_ground(dup_mul_ground(dup_sqr(m2, K), K(2), K), K.one, K), c + return m2 + +def dup_chebyshevu(n, K): + """Low-level implementation of Chebyshev polynomials of the second kind.""" + if n < 1: + return [K.one] + m2, m1 = [K.one], [K(2), K.zero] + for i in range(2, n+1): + m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2), K), m2, K) + return m1 + +@public +def chebyshevt_poly(n, x=None, polys=False): + r"""Generates the Chebyshev polynomial of the first kind `T_n(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_chebyshevt, ZZ, + "Chebyshev polynomial of the first kind", (x,), polys) + +@public +def chebyshevu_poly(n, x=None, polys=False): + r"""Generates the Chebyshev polynomial of the second kind `U_n(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_chebyshevu, ZZ, + "Chebyshev polynomial of the second kind", (x,), polys) + +def dup_hermite(n, K): + """Low-level implementation of Hermite polynomials.""" + if n < 1: + return [K.one] + m2, m1 = [K.one], [K(2), K.zero] + for i in range(2, n+1): + a = dup_lshift(m1, 1, K) + b = dup_mul_ground(m2, K(i-1), K) + m2, m1 = m1, dup_mul_ground(dup_sub(a, b, K), K(2), K) + return m1 + +def dup_hermite_prob(n, K): + """Low-level implementation of probabilist's Hermite polynomials.""" + if n < 1: + return [K.one] + m2, m1 = [K.one], [K.one, K.zero] + for i in range(2, n+1): + a = dup_lshift(m1, 1, K) + b = dup_mul_ground(m2, K(i-1), K) + m2, m1 = m1, dup_sub(a, b, K) + return m1 + +@public +def hermite_poly(n, x=None, polys=False): + r"""Generates the Hermite polynomial `H_n(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_hermite, ZZ, "Hermite polynomial", (x,), polys) + +@public +def hermite_prob_poly(n, x=None, polys=False): + r"""Generates the probabilist's Hermite polynomial `He_n(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_hermite_prob, ZZ, + "probabilist's Hermite polynomial", (x,), polys) + +def dup_legendre(n, K): + """Low-level implementation of Legendre polynomials.""" + if n < 1: + return [K.one] + m2, m1 = [K.one], [K.one, K.zero] + for i in range(2, n+1): + a = dup_mul_ground(dup_lshift(m1, 1, K), K(2*i-1, i), K) + b = dup_mul_ground(m2, K(i-1, i), K) + m2, m1 = m1, dup_sub(a, b, K) + return m1 + +@public +def legendre_poly(n, x=None, polys=False): + r"""Generates the Legendre polynomial `P_n(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_legendre, QQ, "Legendre polynomial", (x,), polys) + +def dup_laguerre(n, alpha, K): + """Low-level implementation of Laguerre polynomials.""" + m2, m1 = [K.zero], [K.one] + for i in range(1, n+1): + a = dup_mul(m1, [-K.one/K(i), (alpha-K.one)/K(i) + K(2)], K) + b = dup_mul_ground(m2, (alpha-K.one)/K(i) + K.one, K) + m2, m1 = m1, dup_sub(a, b, K) + return m1 + +@public +def laguerre_poly(n, x=None, alpha=0, polys=False): + r"""Generates the Laguerre polynomial `L_n^{(\alpha)}(x)`. + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + alpha : optional + Decides minimal domain for the list of coefficients. + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + """ + return named_poly(n, dup_laguerre, None, "Laguerre polynomial", (x, alpha), polys) + +def dup_spherical_bessel_fn(n, K): + """Low-level implementation of fn(n, x).""" + if n < 1: + return [K.one, K.zero] + m2, m1 = [K.one], [K.one, K.zero] + for i in range(2, n+1): + m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(2*i-1), K), m2, K) + return dup_lshift(m1, 1, K) + +def dup_spherical_bessel_fn_minus(n, K): + """Low-level implementation of fn(-n, x).""" + m2, m1 = [K.one, K.zero], [K.zero] + for i in range(2, n+1): + m2, m1 = m1, dup_sub(dup_mul_ground(dup_lshift(m1, 1, K), K(3-2*i), K), m2, K) + return m1 + +def spherical_bessel_fn(n, x=None, polys=False): + """ + Coefficients for the spherical Bessel functions. + + These are only needed in the jn() function. + + The coefficients are calculated from: + + fn(0, z) = 1/z + fn(1, z) = 1/z**2 + fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z) + + Parameters + ========== + + n : int + Degree of the polynomial. + x : optional + polys : bool, optional + If True, return a Poly, otherwise (default) return an expression. + + Examples + ======== + + >>> from sympy.polys.orthopolys import spherical_bessel_fn as fn + >>> from sympy import Symbol + >>> z = Symbol("z") + >>> fn(1, z) + z**(-2) + >>> fn(2, z) + -1/z + 3/z**3 + >>> fn(3, z) + -6/z**2 + 15/z**4 + >>> fn(4, z) + 1/z - 45/z**3 + 105/z**5 + + """ + if x is None: + x = Dummy("x") + f = dup_spherical_bessel_fn_minus if n < 0 else dup_spherical_bessel_fn + return named_poly(abs(n), f, ZZ, "", (QQ(1)/x,), polys)