# 抽象的DFT矩阵分解基类
import numpy as np
from solutions.digital_fourier_transform_base import DigitalFourierTransformerBase
from solvers.alpha import Individual, AlphaInherit
import pickle
import os


class DFT_2(DigitalFourierTransformerBase):
    @property
    def __name__(self):
        return "Problem_2"

    def identify(self, A_k: np.ndarray, rate: float = 0.00001) -> np.ndarray:
        """
        使用一波最原始的方法，取出每行的绝对值最大的元素，将其放大某个倍数，然后将第二大的也放大这个位数，将其他的缩小某个倍数。
        """
        res = A_k - self.max_two(A_k)
        return res * (1 - rate) + self.max_two(A_k) * (1 + rate)

    def grad(self, k: int, m: float = 3):
        return (
            -(self.beta * self.dft - self._matmul(self._A_K, self.K))
            @ np.conjugate(self._matmul(self._A_K, k))
            - self._matmul(self._A_K, k)
            @ np.conjugate((self.beta * self.dft - self._matmul(self._A_K, self.K)))
            * m
        )

    def grad_beta(self):
        return np.real(
            np.trace(
                (self.beta * self.dft - self._matmul(self._A_K, self.K))
                @ np.conjugate(self.dft)
            )
        )

    def train(
        self, lr: float = 0.01, epoch: int = 1000, store_path="./result/Problem_2.pkl", is_post_train: bool = False, **kwargs
    ):
        error_list = []
        A_K_list = []
        beta_list = []
        self.lr = lr
        error = 100000
        for i in range(epoch):
            try:
                A_K_temp = [x - lr * self.grad(k)
                            for k, x in enumerate(self._A_K)]
                beta_temp = self.beta - lr * self.grad_beta()
                if self.error - error > 0:
                    break
                if i % 100 == 0:
                    # error = self.error
                    print(f"epoch:{i}, error:{self.error}")
                error = self.error
                self._A_K = A_K_temp
                self.beta = beta_temp
                error_list.append(self.error)
                A_K_list.append(self._A_K)
                beta_list.append(self.beta)
            except:
                raise Exception("TrainError")
        pickle.dump((self, error_list, A_K_list, beta_list), open(os.path.join(
                    store_path, f"K{self.K}_N{self.N}_lr{lr}_"+self.__name__+".pkl"), "wb"))
        if is_post_train:
            self.post_train(store_path=store_path, **kwargs)

    def convert_to_power_of_two(self, A: np.ndarray):
        B = np.zeros_like(A, dtype=int)
        A_real = np.real(A)
        A_imag = np.imag(A)
        B = (
            np.sign(A_real)
            * 2 ** np.floor((np.clip(np.log2(np.abs(A_real)), 0, None)))
            + np.sign(A_imag)
            * 2 ** np.floor((np.clip(np.log2(np.abs(A_imag)), 0, None)))
            * 1j
        )
        return B

    def post_train(
        self,
        store_path: str = "./result/Problem_2.pkl",
        iter_num: int = 10000,
        individual_num: int = 8,
        offstring_num: int = 8,
    ):
        """
        后期操作，在完成浮点训练之后，进行一次规划，由于规划的目标函数不可以求导，不能使用梯度下降，替代梯度的方法也不好做，这里尝试使用智能优化算法遗传算法来做。
        """
        print("*"*50)
        print("现在进入遗传算法内")
        print("*"*50)
        M, _, _, _ = pickle.load(
            open(os.path.join(store_path, f"K{self.K}_N{self.N}_lr{self.lr}_"+self.__name__+".pkl"), "rb"))
        IS = AlphaInherit(
            individual_num,
            Individual,
            val=np.random.normal(2, 1, individual_num),
            sigma=np.random.normal(0.4, 0.1, individual_num),
            dft=M.dft,
            beta=M.get_beta,
            A_K=M.A_K,
            K=M.K,
            gate=-30,
            lr=1e-4
        )
        final_group = IS.train(
            epoch_num=iter_num,
            multiply_kwargs={"offstring_num": offstring_num},
            filter_kwargs={},
        )
        val = 0
        min_obj = None
        for i in final_group:
            if val > i.score(M.dft, M.A_K, M.K, M.get_beta):
                val = i.score(M.dft, M.A_K, M.K, M.get_beta)
                min_obj = i

        self._A_K = [self.convert_to_power_of_two(
            min_obj.val*A_K_i) for A_K_i in M._A_K]
        self.alpha = min_obj.val
        print(
            f"拟合结果与真实值的差: {val}, 拟合因子: {min_obj.val}, 拟合因子的标准差: {min_obj.sigma}")
        pickle.dump(
            (self, min_obj),
            open(os.path.join(store_path, self.__name__ +
                 "_processed" + ".pkl"), "wb"),
        )

    @property
    def result(self):
        """
        输出最终的连乘积拟合结果
        """
        return self._matmul(self._A_K / self.alpha, self.K)

    def print(self, **kwargs):
        print(f"beta:{self.beta}")
        for i in range(self.K):
            print(f"A_{i}:\n{self._A_K[i]}")
        print(f"result:{self.result}")
        print(f"complex_degree:{self.comlexity}")
        self._A_K = [A_K_i/self.alpha for A_K_i in self._A_K]
        print(f"error:{self.error/self.N}")
