# coding=utf-8

import h5py
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from tqdm import tqdm


def preprocess_data(hdf5_file_path: str):
    h5_file = h5py.File(hdf5_file_path, "r")
    id_array = np.atleast_2d(np.array(h5_file["id"]))
    heliostat_area_array = np.atleast_2d(np.array(h5_file["heliostat_area"]))
    heliostat_area_float_array = np.atleast_2d(heliostat_area_array.astype(np.float32))
    heliostat_l_ratio_array = np.array(h5_file["heliostat_l_ratio"])
    heliostat_w_ratio_array = np.array(h5_file["heliostat_w_ratio"])
    heliostat_l_w_ratio_array = np.atleast_2d(
        (heliostat_l_ratio_array / heliostat_w_ratio_array).astype(np.float32)
    )
    heliostat_l_ratio_array = np.atleast_2d(heliostat_l_ratio_array)
    heliostat_w_ratio_array = np.atleast_2d(heliostat_w_ratio_array)
    heliostat_glass_thickness_array = np.atleast_2d(
        np.array(h5_file["heliostat_glass_thickness"])
    )
    heliostat_glass_thickness_float_array = np.atleast_2d(
        heliostat_glass_thickness_array.astype(np.float32)
    )
    flux_map_shape_array = np.atleast_2d(np.array(h5_file["flux_map_shape"]))
    distance_heliostat_receiver_array = np.atleast_2d(
        np.array(h5_file["distance_heliostat_receiver"]).astype(np.float32)
    )
    sun_elevation_array = np.atleast_2d(
        np.array(h5_file["sun_elevation"]).astype(np.float32)
    )
    sun_azimuth_array = np.atleast_2d(
        np.array(h5_file["sun_azimuth"]).astype(np.float32)
    )
    heliostat_glass_sigma_array = np.atleast_2d(
        np.array(h5_file["heliostat_glass_sigma"])
    )
    sun_csr_array = np.atleast_2d(np.array(h5_file["sun_csr"]))
    heliostat_glass_refractive_index_array = np.atleast_2d(
        np.array(h5_file["heliostat_glass_refractive_index"])
    )
    peak_flux_param_array = np.atleast_2d(np.array(h5_file["peak_flux_param"]))
    sigma_x_array = np.atleast_2d(np.array(h5_file["sigma_x"]))
    sigma_y_array = np.atleast_2d(np.array(h5_file["sigma_y"]))
    x13_array = np.atleast_2d(np.array(h5_file["x13"]))
    x10_array = np.atleast_2d(np.array(h5_file["x10"]))
    y11_array = np.atleast_2d(np.array(h5_file["y11"]))
    y10_array = np.atleast_2d(np.array(h5_file["y10"]))
    gradient_inv_array = np.atleast_2d(np.array(h5_file["gradient_inv"]))

    h5_file.close()

    # [heliostat_area, heliostat_l_ratio, heliostat_w_ratio, heliostat_glass_thickness, id, flux_map_shape,   # for index flux_map
    #  heliostat_area_float, heliostat_l_w_ratio, distance_heliostat_receiver, heliostat_glass_thickness_float, sun_elevation, sun_azimuth, heliostat_glass_sigma, sun_csr, heliostat_glass_refractive_index,  # model input params
    #  peak_flux_param, sigma_x, sigma_y, x13, x10, y11, y10,   # model output params
    #  gradient_inv]   # 积分区域参数，不参与网络输入/输出
    # 玻璃厚度的单位是毫米

    all_data = (
        np.concatenate(
            (
                heliostat_area_array,
                heliostat_l_ratio_array,
                heliostat_w_ratio_array,
                heliostat_glass_thickness_array,
                id_array,
                flux_map_shape_array,
                heliostat_area_float_array,  # 6
                heliostat_l_w_ratio_array,
                distance_heliostat_receiver_array,
                heliostat_glass_thickness_float_array,
                sun_elevation_array,
                sun_azimuth_array,
                heliostat_glass_sigma_array,
                sun_csr_array,
                heliostat_glass_refractive_index_array,  # 14
                peak_flux_param_array,  # 15
                sigma_x_array,
                sigma_y_array,
                x13_array,
                x10_array,
                y11_array,
                y10_array,  # 21
                gradient_inv_array,  # 22
            ),
            axis=0,
        )
        .astype(np.float32)
        .T
    )

    rng = np.random.default_rng(666)
    rng.shuffle(all_data)

    return all_data


class UNIZARNet(torch.nn.Module):
    def __init__(self, input_param_count: int):
        super(UNIZARNet, self).__init__()
        self.net = torch.nn.Sequential(
            torch.nn.BatchNorm1d(input_param_count),
            torch.nn.Sigmoid(),
            torch.nn.Linear(input_param_count, 48),
            torch.nn.BatchNorm1d(48),
            torch.nn.ReLU(),
            torch.nn.Linear(48, 64),
            torch.nn.BatchNorm1d(64),
            torch.nn.ReLU(),
            torch.nn.Linear(64, 36),
            torch.nn.BatchNorm1d(36),
            torch.nn.ReLU(),
            torch.nn.Linear(36, 24),
            torch.nn.BatchNorm1d(24),
            torch.nn.ReLU(),
            torch.nn.Linear(24, 7),
        )

    def forward(self, input: torch.FloatTensor):
        return self.net(input)


def unizar_net_loss(output_predict: torch.Tensor, output_gt: torch.Tensor):
    return torch.mean(
        torch.abs((output_predict[:, 0] - output_gt[:, 0]) / output_gt[:, 0])
        + torch.abs((output_predict[:, 1] - output_gt[:, 1]) / output_gt[:, 1])
        + torch.abs((output_predict[:, 2] - output_gt[:, 2]) / output_gt[:, 2])
        + torch.abs((output_predict[:, 3] - output_gt[:, 3]) / output_gt[:, 3])
        + torch.abs((output_predict[:, 4] - output_gt[:, 4]) / output_gt[:, 4])
        + torch.abs((output_predict[:, 5] - output_gt[:, 5]) / output_gt[:, 5])
        + torch.abs((output_predict[:, 6] - output_gt[:, 6]) / output_gt[:, 6])
    )


def main():
    all_data = preprocess_data("G:/unizar_fit_data/unizar_fitting_params.hdf5")
    all_input_params = all_data[:, 6:15]
    all_output_params = all_data[:, 15:22]
    input_param_count = all_input_params[0].shape[0]
    validate_data_count = int(all_data.shape[0] / 10)
    train_data_count = int(all_data.shape[0]) - validate_data_count
    validate_inputs = all_input_params[train_data_count:]
    validate_outputs_gt = all_output_params[train_data_count:]
    train_inputs = all_input_params[:train_data_count]
    train_outputs_gt = all_output_params[:train_data_count]

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    train_data_set = TensorDataset(
        torch.tensor(train_inputs, dtype=torch.float32).to(device),
        torch.tensor(train_outputs_gt, dtype=torch.float32).to(device),
    )
    train_data_loader = DataLoader(train_data_set, batch_size=1024, shuffle=True)
    validate_data_set = TensorDataset(
        torch.tensor(validate_inputs, dtype=torch.float32).to(device),
        torch.tensor(validate_outputs_gt, dtype=torch.float32).to(device),
    )
    validate_data_loader = DataLoader(validate_data_set, batch_size=1024, shuffle=True)

    epochs = 250
    net = UNIZARNet(input_param_count)
    net.to(device)
    optim = torch.optim.Adam(UNIZARNet.parameters(net), lr=1e-4)  # type: ignore
    for _ in tqdm(range(epochs)):
        for batch_input, batch_output in train_data_loader:
            output_params_predict = net(batch_input)
            loss = unizar_net_loss(output_params_predict, batch_output)
            optim.zero_grad()
            loss.backward()
            optim.step()

    model_save_path = "unizar_net.pth"
    torch.save(net.state_dict(), model_save_path)

    total_validate_loss = 0
    eval_net = UNIZARNet(input_param_count).to(device)
    eval_net.load_state_dict(torch.load(model_save_path, map_location=device))
    eval_net.eval()
    with torch.no_grad():
        for batch_input, batch_output in validate_data_loader:
            output_params_predict = net(batch_input)
            loss = unizar_net_loss(output_params_predict, batch_output)
            total_validate_loss += loss.cpu().detach().numpy()
    total_validate_loss_avg = total_validate_loss / validate_data_count
    print("测试集上的平均损失：{}".format(total_validate_loss_avg))


if __name__ == "__main__":
    main()
