# coding=utf-8
from __future__ import absolute_import, division, print_function
import torch
import torch.optim
import numpy as np
import struct
import time
import matplotlib.pyplot as plt
import math


class Net:
    def __init__(self, ph, sigma_init, x13, x10, y11, y10, gradient_inv):
        self.ph = torch.tensor([ph], requires_grad=True)
        self.x13 = torch.tensor([x13], requires_grad=True)
        self.x10 = torch.tensor([x10], requires_grad=True)
        self.y11 = torch.tensor([y11], requires_grad=True)
        self.y10 = torch.tensor([y10], requires_grad=True)
        self.sigma1 = torch.tensor([sigma_init[0]], requires_grad=True)
        self.sigma2 = torch.tensor([sigma_init[1]], requires_grad=True)
        # 平行四边形斜边的斜率倒数
        self.gradient_inv = torch.tensor([gradient_inv], requires_grad=True)
        self.__parameters = dict(
            sigma1=self.sigma1,
            sigma2=self.sigma2,
            ph=self.ph,
            x13=self.x13,
            x10=self.x10,
            y11=self.y11,
            y10=self.y10,
        )  # 参数字典
        self.___gpu = False  # 是否使用gpu来拟合

    def cuda(self):
        if not self.___gpu:
            self.ph = self.ph.cuda().detach().requires_grad_(True)
            self.x13 = self.x13.cuda().detach().requires_grad_(True)
            self.x10 = self.x10.cuda().detach().requires_grad_(True)
            self.y11 = self.y11.cuda().detach().requires_grad_(True)
            self.y10 = self.y10.cuda().detach().requires_grad_(True)
            self.sigma1 = self.sigma1.cuda().detach().requires_grad_(True)
            self.sigma2 = self.sigma2.cuda().detach().requires_grad_(True)
            self.gradient_inv = self.gradient_inv.cuda().detach().requires_grad_(True)
            self.__parameters = dict(
                sigma1=self.sigma1,
                sigma2=self.sigma2,
                ph=self.ph,
                x13=self.x13,
                x10=self.x10,
                y11=self.y11,
                y10=self.y10,
            )  # 更新参数
            self.___gpu = True  # 更新标志，表示参数已经传输到gpu了
        # 返回self，以支持链式调用
        return self

    def cpu(self):
        if self.___gpu:
            self.ph = self.ph.cpu().detach().requires_grad_(True)
            self.x13 = self.x13.cpu().detach().requires_grad_(True)
            self.x10 = self.x10.cpu().detach().requires_grad_(True)
            self.y11 = self.y11.cpu().detach().requires_grad_(True)
            self.y10 = self.y10.cpu().detach().requires_grad_(True)
            self.sigma1 = self.sigma1.cpu().detach().requires_grad_(True)
            self.sigma2 = self.sigma2.cpu().detach().requires_grad_(True)
            self.gradient_inv = self.gradient_inv.cpu().detach().requires_grad_(True)
            self.__parameters = dict(
                sigma1=self.sigma1,
                sigma2=self.sigma2,
                ph=self.ph,
                x13=self.x13,
                x10=self.x10,
                y11=self.y11,
                y10=self.y10,
            )  # 更新参数
            self.___gpu = False
        return self

    def forward(self, inputs_x: torch.Tensor, inputs_y: torch.Tensor):
        sigma1_denominator = 1 / (self.sigma1 * 1.414213562373095048801688724209698)
        sigma2_denominator = 1 / (self.sigma2 * 1.414213562373095048801688724209698)
        # 剪切变换
        actual_x = inputs_x + self.gradient_inv * (inputs_y - self.y11)
        # result = torch.abs(
        #     0.25
        #     * (
        #         torch.erf((self.x13 - actual_x) * sigma1_denominator)
        #         - torch.erf((self.x10 - actual_x) * sigma1_denominator)
        #     )
        #     * (
        #         torch.erf((self.y11 - inputs_y) * sigma2_denominator)
        #         - torch.erf((self.y10 - inputs_y) * sigma2_denominator)
        #     )
        #     * self.ph
        # ).cpu().detach().numpy().reshape([160, 160])
        # fig = plt.figure(figsize=(160, 160))
        # plt.imshow(result)
        # plt.colorbar()
        # plt.title("Plot 2D array")
        # plt.show()

        return torch.abs(
            0.25
            * (
                torch.erf((self.x13 - actual_x) * sigma1_denominator)
                - torch.erf((self.x10 - actual_x) * sigma1_denominator)
            )
            * (
                torch.erf((self.y11 - inputs_y) * sigma2_denominator)
                - torch.erf((self.y10 - inputs_y) * sigma2_denominator)
            )
            * self.ph
        )
        # return torch.exp(-(inputs_x * inputs_x + inputs_y * inputs_y) / (2 * self.sigma1 * self.sigma1)) / (2 * 3.1415926 * self.sigma1 * self.sigma1) * self.ph

    def parameters(self):
        for name, param in self.__parameters.items():
            yield param
        
    def parameters_ph(self):
        for name, param in self.__parameters.items():
            if name == 'ph':
                yield param
    
    def parameters_other(self):
        for name, param in self.__parameters.items():
            if name != 'ph':
                yield param

def my_loss_op(input: torch.Tensor, ground_truth: torch.Tensor, ground_truth_max, alpha):
    input_max = torch.max(input)
    return alpha * torch.sum(torch.pow(input - ground_truth, 2)) + (1 - alpha) * torch.pow(input_max - ground_truth_max, 2) * math.prod(input.size())


def main():
    resolution = (160, 160)
    binfile = open("D:/Learn/solar_software/solar_3/1.bin", "rb")
    binfile_data = binfile.read()
    float_data = struct.unpack("f" * resolution[0] * resolution[1], binfile_data)
    ph = 6.901197509765625E2
    x13 = 2.1725692749023438E0
    x10 = -1.6070754528045654E0
    y11 = -1.3468780517578125E0
    y10 = 1.3304443359375E0
    sigma_init = (4.987233877182007E-2, 4.987233877182007E-2)
    gradient_inv = 2.1428488194942474E-1

    X = np.array(float_data, dtype=np.float32).reshape([160, 160])
    fig = plt.figure(figsize=(160, 160))
    plt.imshow(X)
    # plt.colorbar().set_ticks([0, 40, 80, 120])
    plt.colorbar()
    plt.title("Ray-Tracing Result")
    plt.show()
    # return

    time_start = time.time()
    # 生成坐标
    x = np.linspace(-4, 4 - 0.05, 160)
    y = np.linspace(-4, 4 - 0.05, 160)
    # inputs = np.reshape(np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))]), (400, 400, 2))
    inputs = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))]).astype(np.float32)
    inputs_x = inputs[:, 0]
    inputs_y = inputs[:, 1]

    # # 系数a、b
    # a = 2
    # b = 1

    # # 生成y
    # y = a * x + b

    # 转换为Tensor
    # x = torch.from_numpy(x.astype(np.float32))
    # y = torch.from_numpy(y.astype(np.float32))
    # inputs = torch.from_numpy(inputs.astype(np.float32))
    inputs_x = torch.from_numpy(inputs_x)
    inputs_y = torch.from_numpy(inputs_y)
    ground_truth = torch.from_numpy(np.array(float_data, dtype=np.float32))
    # print(torch.max(ground_truth))

    # 定义网络
    net = Net(ph, sigma_init, x13, x10, y11, y10, gradient_inv)

    # 传输到GPU
    if torch.cuda.is_available():
        print("using cuda")
        # x = x.cuda()
        # y = y.cuda()
        inputs_x = inputs_x.cuda()
        inputs_y = inputs_y.cuda()
        ground_truth = ground_truth.cuda()
        net = net.cuda()
    
    ground_truth_max = torch.max(ground_truth)
    ground_truth_sum = torch.sum(ground_truth)

    params_dict = [{'params': net.parameters_ph(), 'lr': 0.5},
                   {'params': net.parameters_other(), 'lr': 0.1}]

    # 定义优化器
    optimizer = torch.optim.Adam(params_dict, weight_decay=0.0005) # type: ignore

    # 定义损失函数
    loss_op = torch.nn.MSELoss(reduction="sum")
    loss_array = []
    # loss_op = torch.nn.SmoothL1Loss(reduction="sum")
    out = net.forward(inputs_x, inputs_y)

    # 最多优化20001次
    for i in range(1, 7001, 1):
        # 向前传播
        out = net.forward(inputs_x, inputs_y)
        # 计算损失
        # loss = loss_op(ground_truth, out)
        loss = my_loss_op(out, ground_truth, ground_truth_max, 0.75)
        # 清空梯度（非常重要）
        optimizer.zero_grad()
        # 向后传播，计算梯度
        loss.backward()
        # 更新参数
        optimizer.step()
        # 得到损失的numpy值
        loss_numpy = loss.cpu().detach().numpy()
        # loss_array.append(loss_numpy)
        if i % 1000 == 0:  # 每1000次打印一下损失
            print(i, loss_numpy)
            # print(
            #     torch.sum(torch.abs(out - ground_truth)) / torch.sum(ground_truth)
            # )  
            # RAD loss
            # ground_truth_mod = torch.where(
            #     ground_truth > 50, ground_truth, 9999999999999999.0
            # )
            # cpu_out = (
            #     torch.abs((out - ground_truth) / ground_truth_mod)
            #     .cpu()
            #     .detach()
            #     .numpy()
            #     .reshape([160, 160])
            # )
            # cpu_out = out.cpu().detach().numpy().reshape([160, 160])
            # fig = plt.figure(figsize=(160, 160))
            # plt.imshow(cpu_out)
            # plt.colorbar().set_ticks([0, 40, 80, 120])
            # # plt.colorbar()
            # plt.title("Plot 2D array")
            # plt.show()

        # if loss_numpy < 0.00001:  # 如果损失小于0.00001
        #     # 打印参数
        #     sigma1 = net.sigma1.cpu().detach().numpy()
        #     sigma2 = net.sigma2.cpu().detach().numpy()
        #     ph = net.ph.cpu().detach().numpy()
        #     print(sigma1, sigma2, ph)
        #     time_end = time.time()
        #     print("程序运行时间:%s毫秒" % ((time_end - time_start) * 1000))
        #     # 退出
        #     exit()
    # fig = plt.plot(loss_array)

    # plt.imshow(loss_array)
            # plt.colorbar().set_ticks([0, 300, 600, 900])
    # plt.colorbar()
    # plt.title("Plot 2D array")
    
    # plt.show()

    cpu_out = out.cpu().detach().numpy().reshape([160, 160])
    fig = plt.figure(figsize=(160, 160))
    plt.imshow(cpu_out)
    plt.colorbar()
            # plt.colorbar()
    plt.title("Plot 2D array")
    plt.show()

    sigma1 = net.sigma1.cpu().detach().numpy()
    sigma2 = net.sigma2.cpu().detach().numpy()
    ph = net.ph.cpu().detach().numpy()
    x13 = net.x13.cpu().detach().numpy()
    x10 = net.x10.cpu().detach().numpy()
    y11 = net.y11.cpu().detach().numpy()
    y10 = net.y10.cpu().detach().numpy()
    print(sigma1, sigma2, ph, x13, x10, y11, y10)
    e_rad = torch.sum(torch.abs(out - ground_truth)) / ground_truth_sum
    e_energy = torch.abs(torch.sum(out) - ground_truth_sum) / ground_truth_sum
    e_peak = torch.abs(torch.max(out) - ground_truth_max) / ground_truth_max
    e_rmse = torch.sqrt(torch.sum(torch.pow(out - ground_truth, 2))) / ground_truth_sum
    print(e_rad, e_energy, e_peak, e_rmse)



if __name__ == "__main__":
    main()
