# coding=utf-8
from __future__ import absolute_import, division, print_function
import torch
import numpy as np
import struct
import time
import matplotlib.pyplot as plt
import math
import os
import json


class Net:
    def __init__(self, ph, sigma_init, x13, x10, y11, y10, gradient_inv):
        self.ph = torch.tensor([ph], dtype=torch.float32, requires_grad=True)
        self.x13 = torch.tensor([x13], dtype=torch.float32, requires_grad=True)
        self.x10 = torch.tensor([x10], dtype=torch.float32, requires_grad=True)
        self.y11 = torch.tensor([y11], dtype=torch.float32, requires_grad=True)
        self.y10 = torch.tensor([y10], dtype=torch.float32, requires_grad=True)
        self.sigma1 = torch.tensor(
            [sigma_init[0]], dtype=torch.float32, requires_grad=True
        )
        self.sigma2 = torch.tensor(
            [sigma_init[1]], dtype=torch.float32, requires_grad=True
        )
        # 平行四边形斜边的斜率倒数
        self.gradient_inv = torch.tensor(
            [gradient_inv], dtype=torch.float32, requires_grad=True
        )
        self.__parameters = dict(
            sigma1=self.sigma1,
            sigma2=self.sigma2,
            ph=self.ph,
            x13=self.x13,
            x10=self.x10,
            y11=self.y11,
            y10=self.y10,
        )  # 参数字典
        self.___gpu = False  # 是否使用gpu来拟合

    def cuda(self):
        if not self.___gpu:
            self.ph = self.ph.cuda().detach().requires_grad_(True)
            self.x13 = self.x13.cuda().detach().requires_grad_(True)
            self.x10 = self.x10.cuda().detach().requires_grad_(True)
            self.y11 = self.y11.cuda().detach().requires_grad_(True)
            self.y10 = self.y10.cuda().detach().requires_grad_(True)
            self.sigma1 = self.sigma1.cuda().detach().requires_grad_(True)
            self.sigma2 = self.sigma2.cuda().detach().requires_grad_(True)
            self.gradient_inv = self.gradient_inv.cuda().detach().requires_grad_(True)
            self.__parameters = dict(
                sigma1=self.sigma1,
                sigma2=self.sigma2,
                ph=self.ph,
                x13=self.x13,
                x10=self.x10,
                y11=self.y11,
                y10=self.y10,
            )  # 更新参数
            self.___gpu = True  # 更新标志，表示参数已经传输到gpu了
        # 返回self，以支持链式调用
        return self

    def cpu(self):
        if self.___gpu:
            self.ph = self.ph.cpu().detach().requires_grad_(True)
            self.x13 = self.x13.cpu().detach().requires_grad_(True)
            self.x10 = self.x10.cpu().detach().requires_grad_(True)
            self.y11 = self.y11.cpu().detach().requires_grad_(True)
            self.y10 = self.y10.cpu().detach().requires_grad_(True)
            self.sigma1 = self.sigma1.cpu().detach().requires_grad_(True)
            self.sigma2 = self.sigma2.cpu().detach().requires_grad_(True)
            self.gradient_inv = self.gradient_inv.cpu().detach().requires_grad_(True)
            self.__parameters = dict(
                sigma1=self.sigma1,
                sigma2=self.sigma2,
                ph=self.ph,
                x13=self.x13,
                x10=self.x10,
                y11=self.y11,
                y10=self.y10,
            )  # 更新参数
            self.___gpu = False
        return self

    def forward(self, inputs_x: torch.Tensor, inputs_y: torch.Tensor):
        sigma1_denominator = 1 / (self.sigma1 * 1.414213562373095048801688724209698)
        sigma2_denominator = 1 / (self.sigma2 * 1.414213562373095048801688724209698)
        # 剪切变换
        actual_x = inputs_x + self.gradient_inv * (inputs_y - self.y11)

        return torch.abs(
            0.25
            * (
                torch.erf((self.x13 - actual_x) * sigma1_denominator)
                - torch.erf((self.x10 - actual_x) * sigma1_denominator)
            )
            * (
                torch.erf((self.y11 - inputs_y) * sigma2_denominator)
                - torch.erf((self.y10 - inputs_y) * sigma2_denominator)
            )
            * self.ph
        )
        # return torch.exp(-(inputs_x * inputs_x + inputs_y * inputs_y) / (2 * self.sigma1 * self.sigma1)) / (2 * 3.1415926 * self.sigma1 * self.sigma1) * self.ph

    def parameters(self):
        for name, param in self.__parameters.items():
            yield param

    def parameters_ph(self):
        for name, param in self.__parameters.items():
            if name == "ph":
                yield param

    def parameters_other(self):
        for name, param in self.__parameters.items():
            if name != "ph":
                yield param


def my_loss_op(
    input: torch.Tensor, ground_truth: torch.Tensor, ground_truth_max, alpha
):
    input_max = torch.max(input)
    return alpha * torch.sum(torch.pow(input - ground_truth, 2)) + (
        1 - alpha
    ) * torch.pow(input_max - ground_truth_max, 2) * math.prod(input.size())


def unizar_fit_one_heliostat(
    raytracing_binary_path: str,
    flux_map_shape: list[int],
    peak_flux_param: float,
    sigmas: list[float],
    x13: float,
    x10: float,
    y11: float,
    y10: float,
    gradient_inv: float,
):
    binfile = open(raytracing_binary_path, "rb")
    binfile_data = binfile.read()
    float_data = struct.unpack(
        "f" * flux_map_shape[0] * flux_map_shape[1], binfile_data
    )
    binfile.close()

    image_plane_length = flux_map_shape[0] / 20
    image_plane_width = flux_map_shape[1] / 20

    # 生成坐标
    x = np.linspace(
        -image_plane_length / 2, image_plane_length / 2 - 0.05, flux_map_shape[0]
    ).astype(np.float32)
    y = np.linspace(
        -image_plane_width / 2, image_plane_width / 2 - 0.05, flux_map_shape[1]
    ).astype(np.float32)
    # inputs = np.reshape(np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))]), (400, 400, 2))
    inputs = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
    inputs_x = inputs[:, 0]
    inputs_y = inputs[:, 1]

    # # 系数a、b
    # a = 2
    # b = 1

    # # 生成y
    # y = a * x + b

    # 转换为Tensor
    # x = torch.from_numpy(x.astype(np.float32))
    # y = torch.from_numpy(y.astype(np.float32))
    # inputs = torch.from_numpy(inputs.astype(np.float32))
    inputs_x = torch.from_numpy(inputs_x)
    inputs_y = torch.from_numpy(inputs_y)
    ground_truth = torch.from_numpy(np.array(float_data, dtype=np.float32))
    # print(torch.max(ground_truth))

    # 定义网络
    net = Net(peak_flux_param, sigmas, x13, x10, y11, y10, gradient_inv)

    # 传输到GPU
    if torch.cuda.is_available():
        print("using cuda")
        # x = x.cuda()
        # y = y.cuda()
        inputs_x = inputs_x.cuda()
        inputs_y = inputs_y.cuda()
        ground_truth = ground_truth.cuda()
        net = net.cuda()

    ground_truth_max = torch.max(ground_truth)
    # ground_truth_sum = torch.sum(ground_truth)

    params_dict = [
        {"params": net.parameters_ph(), "lr": 0.5},
        {"params": net.parameters_other(), "lr": 0.1},
    ]

    # 定义优化器
    optimizer = torch.optim.Adam(params_dict, weight_decay=0.0005)  # type: ignore

    # 定义损失函数
    # loss_op = torch.nn.MSELoss(reduction="sum")
    # loss_array = []
    # loss_op = torch.nn.SmoothL1Loss(reduction="sum")

    for _ in range(1, 7001, 1):
        # 向前传播
        out = net.forward(inputs_x, inputs_y)
        # 计算损失
        # loss = loss_op(ground_truth, out)
        loss = my_loss_op(out, ground_truth, ground_truth_max, 0.75)
        # 清空梯度（非常重要）
        optimizer.zero_grad()
        # 向后传播，计算梯度
        loss.backward()
        # 更新参数
        optimizer.step()

    current_sigma1 = net.sigma1.cpu().detach().numpy()
    current_sigma2 = net.sigma2.cpu().detach().numpy()
    current_ph = net.ph.cpu().detach().numpy()
    current_x13 = net.x13.cpu().detach().numpy()
    current_x10 = net.x10.cpu().detach().numpy()
    current_y11 = net.y11.cpu().detach().numpy()
    current_y10 = net.y10.cpu().detach().numpy()
    return (
        current_sigma1[0],
        current_sigma2[0],
        current_ph[0],
        current_x13[0],
        current_x10[0],
        current_y11[0],
        current_y10[0],
    )


def main():
    num_files = len(os.listdir("."))
    num_flux_maps = int(num_files / 2)
    for i in range(0, num_flux_maps):
        json_name = str(i) + ".json"
        flux_map_name = str(i) + ".bin"
        if not (
            os.path.exists("./" + json_name) and os.path.exists("./" + flux_map_name)
        ):
            continue
        with open("./" + json_name, "r") as json_file:
            json_object = json.load(json_file)
            if ("fitted" in json_object and True == json_object["fitted"]):
                continue
            if (json_object["sun_elevation"] == 0):
                continue
            flux_map_shape = json_object["flux_map_shape"]
            peak_flux_param = json_object["peak_flux_param"]
            sigmas = json_object["sigmas"]
            x13 = json_object["UNIZAR"]["x13"]
            x10 = json_object["UNIZAR"]["x10"]
            y11 = json_object["UNIZAR"]["y11"]
            y10 = json_object["UNIZAR"]["y10"]
            gradient_inv = json_object["UNIZAR"]["gradient_inv"]
            new_sigma1, new_sigma2, new_ph, new_x13, new_x10, new_y11, new_y10 = (
                unizar_fit_one_heliostat(
                    "./" + flux_map_name,
                    flux_map_shape,
                    peak_flux_param,
                    sigmas,
                    x13,
                    x10,
                    y11,
                    y10,
                    gradient_inv,
                )
            )
            json_object["peak_flux_param"] = float(new_ph)
            json_object["sigmas"] = [float(new_sigma1), float(new_sigma2)]
            json_object["UNIZAR"]["x13"] = float(new_x13)
            json_object["UNIZAR"]["x10"] = float(new_x10)
            json_object["UNIZAR"]["y11"] = float(new_y11)
            json_object["UNIZAR"]["y10"] = float(new_y10)
            json_object["fitted"] = True

        with open("./" + json_name, "w") as json_file:
            json.dump(json_object, json_file)
        



if __name__ == "__main__":
    main()
