import math
import typing
import warnings


# TODO: 不要在 Python 层面实现
# 考虑到是课程作业，这里是用 Python 实现了四阶龙格库塔法。
# 实际一般直接使用 C++ 层面的包，否则效率较低。
class SimplePendulumProblem:
    def __init__(self,
                 g: float, gamma: float, theta0: float,
                 m: float = 1, l_: float = 1,
                 t_step: float = 1E-3):
        if t_step <= 0:
            raise ValueError(
                f"The given argument {t_step=} is less than zero.")
        self._t_step = t_step

        if g < 0:
            warnings.warn(
                f"The given argument {g=} is less than zero. "
                f"This is mathematically supported, but the physical significance might be broken.")
        self._g = g

        if gamma < 0:
            warnings.warn(
                f"The given argument {gamma=} is less than zero. "
                f"This is mathematically supported, but the physical significance might be broken.")
        self._gamma = gamma

        if m < 0:
            warnings.warn(
                f"The given argument {m=} is less than zero. "
                f"This is mathematically supported, but the physical significance might be broken.")
        self._m = m

        if l_ < 0:
            warnings.warn(
                f"The given argument {l_=} is less than zero. "
                f"This is mathematically supported, but the physical significance might be broken.")
        self._l = l_

        def f(theta: float, omega: float) -> tuple[float, float]:
            d_theta_d_t = omega
            d_omega_d_t = -g * math.sin(theta) / l_ - gamma * omega / m
            return d_theta_d_t, d_omega_d_t

        self._f = f

        self.__cached_theta_positive = [theta0]
        self.__cached_omega_positive = [0.]

        self.__cached_theta_negative = [theta0]
        self.__cached_omega_negative = [0.]

    def g(self) -> float:
        return self._g

    def gamma(self) -> float:
        return self._gamma

    def theta0(self) -> float:
        return self.__cached_theta_negative[0]

    def m(self) -> float:
        return self._m

    def l_(self) -> float:
        return self._l

    def t_step(self) -> float:
        return self._t_step

    def __solve_to_step(self,
                        step_stop_abs: int,
                        step_stop_sign: int):
        assert step_stop_abs >= 0
        if step_stop_sign >= 0:
            assert step_stop_sign == 1
            theta_caches = self.__cached_theta_positive
            omega_caches = self.__cached_omega_positive
            assert len(theta_caches) == len(omega_caches) <= step_stop_abs
        else:
            assert step_stop_sign == -1
            theta_caches = self.__cached_theta_negative
            omega_caches = self.__cached_omega_negative
            assert len(theta_caches) == len(omega_caches) <= step_stop_abs

        h = self._t_step * step_stop_sign
        h_d_2 = h / 2
        h_d_3 = h / 3
        h_d_6 = h / 6
        for n in range(len(theta_caches) - 1, step_stop_abs):
            p_t = theta_caches[-1]
            p_o = omega_caches[-1]
            k1_t, k1_o = self._f(p_t,
                                 p_o)
            k2_t, k2_o = self._f(p_t + h_d_2 * k1_t,
                                 p_o + h_d_2 * k1_o)
            k3_t, k3_o = self._f(p_t + h_d_2 * k2_t,
                                 p_o + h_d_2 * k2_o)
            k4_t, k4_o = self._f(p_t + h * k3_t,
                                 p_o + h * k3_o)
            theta_caches.append(
                p_t + h_d_6 * k1_t + h_d_3 * k2_t + h_d_3 * k3_t + h_d_6 * k4_t)
            omega_caches.append(
                p_o + h_d_6 * k1_o + h_d_3 * k2_o + h_d_3 * k3_o + h_d_6 * k4_o)

    def theta(self, t: float) -> float:
        step = int(t / self._t_step)
        if step >= 0:
            caches = self.__cached_theta_positive
            sign = 1
        else:
            warnings.warn(
                f"The given argument {t=} is less than zero. "
                f"This is mathematically supported, but the physical significance might be broken.")
            caches = self.__cached_theta_negative
            step = -step
            sign = -1

        if len(caches) <= step:
            self.__solve_to_step(step, sign)
        return caches[step]

    def theta_batch(self, t_batch: typing.Iterable[float]) \
            -> typing.Generator[float, None, None]:
        for t in t_batch:
            yield self.theta(t)
