import sys
import os

cur_path = os.path.split(__file__)[0]
sys.path.append(cur_path)

import torch
from torch.optim import Adam

# from NN.Transolver.SageTrans_importer import FVGN
from dataset.Load_mesh import DatasetFactory

# import os
from utils import get_param, scheduler
import time
from utils.get_param import get_hyperparam
from utils.Logger import Logger
from utils.losses import LpLoss
import random
import datetime
import numpy as np


# configurate parameters
params = get_param.params()
seed = int(datetime.datetime.now().timestamp())
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.set_per_process_memory_fraction(0.8, params.on_gpu)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# initialize Logger and load model / optimizer if according parameters were given
logger = Logger(
    get_hyperparam(params),
    use_csv=True,
    use_tensorboard=False,
    params=params,
    copy_code=True,
    seed=seed,
)

# initialize Training Dataset
start = time.time()
datasets_factory = DatasetFactory(
    params=params,
    device=device,
)

# create dataset objetc
train_dataset, train_loader = datasets_factory.create_trainset(
    batch_size=params.batch_size,
    num_workers=2,
    pin_memory=False,
    persistent_workers=True,
    subsampling=True,
)
valid_dataset, valid_loader = datasets_factory.create_testset(
    batch_size=1,
    num_workers=0,
    pin_memory=False,
    persistent_workers=False,
    subsampling=False,
)
end = time.time()
print("Training traj has been loaded time consuming:{0}".format(end - start))

# initialize fluid model
from NN.model_importer.importer import FVGN

model = FVGN(params)

fluid_model = model.to(device)
fluid_model.train()
optimizer = Adam(fluid_model.parameters(), lr=params.lr)

""" >>> lr scheduler settings >>> """
before_explr_decay_steps = int(params.n_epochs * 0.7)
two_step_scheduler = scheduler.ExpLR(
    optimizer, decay_steps=params.n_epochs - before_explr_decay_steps, gamma=1e-4
)
lr_scheduler = scheduler.GradualStepExplrScheduler(
    optimizer,
    multiplier=1.0,
    milestone=[int(params.n_epochs / 2)],
    gamma=0.1,
    total_epoch=before_explr_decay_steps,
    after_scheduler=two_step_scheduler,
    expgamma=1e-2,
    decay_steps=params.n_epochs - before_explr_decay_steps,
    min_lr=1e-6,
)
""" <<< lr scheduler settings <<< """

if (
    params.load_latest
    or params.load_date_time is not None
    or params.load_index is not None
):
    logger.load_logger(datetime=params.load_date_time)
    if params.load_optimizer:
        params.load_date_time, params.load_index = logger.load_state(
            model=fluid_model,
            optimizer=optimizer,
            scheduler=lr_scheduler,
            datetime=params.load_date_time,
            index=params.load_index,
            device=device,
        )
    else:
        params.load_date_time, params.load_index = logger.load_state(
            model=fluid_model,
            optimizer=None,
            scheduler=None,
            datetime=params.load_date_time,
            index=params.load_index,
            device=device,
        )
    params.load_index = int(params.load_index)
    print(f"loaded: {params.load_date_time}, {params.load_index}")

params.load_index = 0 if params.load_index is None else params.load_index

lp_loss = LpLoss(size_average=True)

# 初始化用于收集整个数据集的残差的张量
epoc_loss = 0
epoc_val_loss = 0
epoch_val_loss_list = [999]

# training loop
for epoch in range(params.n_epochs):

    fluid_model.train()
    epoc_loss = 0
    epoc_loss_vel = 0
    epoc_loss_press = 0
    epoc_loss_cd = 0
    start = time.time()

    for batch_index, graph_node in enumerate(train_loader):

        optimizer.zero_grad()

        graph_node = train_dataset.datapreprocessing(
            graph_node.cuda(), is_training=True
        )

        pred_vel, pred_press, pred_cd = fluid_model(
            graph_node=graph_node,
            params=params,
            is_training=False,
            is_pretrain=False,
        )

        loss_vel = lp_loss(
            pred_vel,
            graph_node.norm_velocity,
            batch=graph_node.batch,
            mask=graph_node.mask_vel,
        )
        loss_press = lp_loss(
            pred_press,
            graph_node.norm_pressure,
            batch=graph_node.batch,
            mask=graph_node.mask_press,
        )
        loss_cd = lp_loss(
            pred_cd[graph_node.mask_cd],
            graph_node.cd_data[graph_node.mask_cd],
            dim=1,
        )  # 这里是因为cd已经在前向过程被平均了，因此可以直接取mask
        loss = loss_vel + loss_press + loss_cd
        loss.backward()
        optimizer.step()
        epoc_loss_vel += loss_vel.cpu().item()
        epoc_loss_press += loss_press.cpu().item()
        epoc_loss_cd += loss_cd.cpu().item()

    epoc_loss_vel = epoc_loss_vel / len(train_loader)
    epoc_loss_press = epoc_loss_press / len(train_loader)
    epoc_loss_cd = epoc_loss_cd / len(train_loader)
    epoc_loss = epoc_loss_vel + epoc_loss_press + epoc_loss_cd

    # Validation
    if epoch % 2 == 0:
        # if True:
        with torch.no_grad():
            epoc_val_loss = 0
            epoc_val_loss_vel = 0
            epoc_val_loss_press = 0
            epoc_val_loss_cd = 0

            count_val_loss_vel = 0
            count_val_loss_press = 0
            count_val_loss_cd = 0

            cd_pair_list = []
            for (
                batch_index,
                graph_node_valid,
            ) in enumerate(valid_loader):

                graph_node_valid = valid_dataset.datapreprocessing(
                    graph_node_valid.cuda(),
                    is_training=False,
                )

                pred_vel_valid, pred_press_valid, pred_cd_valid = fluid_model(
                    graph_node=graph_node_valid,
                    params=params,
                    is_training=False,
                    is_pretrain=False,
                )

                current_files_name = "".join(
                    chr(code) for code in graph_node_valid.origin_id.cpu().tolist()
                )

                if current_files_name.endswith(".vtk"):
                    reversed_node_vel = pred_vel_valid
                    reversed_vel_label = graph_node_valid.norm_velocity
                    val_loss_vel = lp_loss(
                        pred_vel_valid, graph_node_valid.norm_velocity, dim=None
                    )
                    epoc_val_loss_vel += val_loss_vel.cpu().item()
                    count_val_loss_vel += 1

                elif current_files_name.endswith(".ply"):
                    reversed_node_press = (
                        pred_press_valid * graph_node_valid.phi_std[0, 0]
                    ) + graph_node_valid.phi_mean[0, 0]
                    reversed_press_label = (
                        graph_node_valid.norm_pressure * graph_node_valid.phi_std[0, 0]
                    ) + graph_node_valid.phi_mean[0, 0]
                    val_loss_press = lp_loss(
                        reversed_node_press, reversed_press_label, dim=None
                    )
                    epoc_val_loss_press += val_loss_press.cpu().item()
                    count_val_loss_press += 1

                elif current_files_name.endswith(".obj"):
                    reversed_cd = (
                        pred_cd_valid / graph_node_valid.phi_std[0, 0]
                    ) + graph_node_valid.phi_mean[0, 0]
                    reversed_cd_label = (
                        graph_node_valid.cd_data / graph_node_valid.phi_std[0, 0]
                    ) + graph_node_valid.phi_mean[0, 0]

                    cd_pair_list.append([reversed_cd, reversed_cd_label])
                    val_loss_cd = lp_loss(reversed_cd, reversed_cd_label, dim=1)
                    epoc_val_loss_cd += val_loss_cd.cpu().item()
                    count_val_loss_cd += 1

            cd_pair = torch.tensor(cd_pair_list)
            A_board_cd_loss = lp_loss(cd_pair[:, 0:1], cd_pair[:, 1:2], dim=None).cpu().item()

            epoc_val_loss = (
                epoc_val_loss_vel + epoc_val_loss_press + A_board_cd_loss
            ) / (count_val_loss_vel + count_val_loss_press + 1)

            epoc_val_loss_vel = epoc_val_loss_vel / count_val_loss_vel
            epoc_val_loss_press = epoc_val_loss_press / count_val_loss_press
            epoc_val_loss_cd = epoc_val_loss_cd / count_val_loss_cd

            if all(epoc_val_loss < loss for loss in epoch_val_loss_list):
                model_saving_path = logger.save_state(
                    model=fluid_model,
                    optimizer=optimizer,
                    scheduler=lr_scheduler,
                    index=999,
                )
                print(f"Best Epoch {epoch} eval loss: {epoc_val_loss}")

            epoch_val_loss_list.append(epoc_val_loss)

    if epoch > int(params.n_epochs // 2 - 5) and epoch < int(
        (params.n_epochs // 2) + 25
    ):
        model_saving_path = logger.save_state(
            model=fluid_model,
            optimizer=optimizer,
            scheduler=lr_scheduler,
            index=epoch,
        )
    else:
        model_saving_path = logger.save_state(
            model=fluid_model,
            optimizer=optimizer,
            scheduler=lr_scheduler,
            index=epoch % 1,
        )
    print(f"Epoch {epoch} train loss: {epoc_loss}")
    print(f"Epoch {epoch} train epoc_press_loss: {epoc_loss_press}")
    print(f"Epoch {epoch} train epoc_vel_loss: {epoc_loss_vel}")
    print(f"Epoch {epoch} train epoc_cd_loss: {epoc_loss_cd}")
    print(f"Epoch {epoch} eval loss: {epoc_val_loss}")
    print(f"Epoch {epoch} eval epoc_press_loss: {epoc_val_loss_press}")
    print(f"Epoch {epoch} eval epoc_vel_loss: {epoc_val_loss_vel}")
    print(f"Epoch {epoch} eval epoc_cd_loss: {epoc_val_loss_cd}")
    print(f"Epoch {epoch} A_board_cd_loss: {A_board_cd_loss}")    
    print(f"Epoch {epoch} completed in {time.time() - start:.2f} seconds")

    logger.add_log_item(f"Epoch_train_loss", (epoc_loss))
    logger.add_log_item(f"Epoch_train_epoc_press_loss", (epoc_loss_press))
    logger.add_log_item(f"Epoch_train_epoc_vel_loss", (epoc_loss_vel))
    logger.add_log_item(f"Epoch_train_epoc_cd_loss", (epoc_loss_cd))
    logger.add_log_item(f"Epoch_eval_epoc_press_loss", (epoc_val_loss_press))
    logger.add_log_item(f"Epoch_eval_epoc_vel_loss", (epoc_val_loss_vel))
    logger.add_log_item(f"Epoch_eval_epoc_cd_loss", (epoc_val_loss_cd))
    logger.add_log_item(f"A_board_cd_loss", (A_board_cd_loss))
    logger.log(f"Epoch_eval_loss", epoc_val_loss, epoch)

    lr_scheduler.step()

print("Training completed")
