import sys
import os
import torch
from torch.optim import Adam

# from NN.Transolver.SageTrans_importer import FVGN
from dataset.Load_mesh import DatasetFactory
import torch.nn as nn

# import os
from utils import get_param, scheduler
from utils.utilities import calc_cell_centered_with_node_attr
import time
from utils.get_param import get_hyperparam
from utils.Logger import Logger
from utils.losses import LpLoss
import random
import datetime
from torch_geometric.data import Batch

# configurate parameters
params, git_info = get_param.params()

# git information
if git_info is not False:
    git_info = {
        "git_branch": params.git_branch,
        "git_commit_dates": params.git_commit_dates,
    }
else:
    git_info = {"git_branch": " ", "git_commit_dates": " "}

# for saving model
# torch.manual_seed(0)
# torch.set_num_threads(4)

random.seed(int(datetime.datetime.now().timestamp()))
torch.manual_seed(int(datetime.datetime.now().timestamp()))
torch.cuda.set_per_process_memory_fraction(0.99, params.on_gpu)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# initialize Logger and load model / optimizer if according parameters were given
logger = Logger(
    get_hyperparam(params),
    use_csv=True,
    use_tensorboard=False,
    params=params,
    git_info=git_info,
    copy_code=True,
)

# initialize Training Dataset
start = time.time()
datasets_factory = DatasetFactory(
    params=params,
    device=device,
)
train_indices = list(range(0, 480))
val_indices = list(range(480, 500))
# create dataset objetc
train_dataset, train_loader, train_sampler = datasets_factory.create_trainset(
    batch_size=params.batch_size,
    num_workers=2,
    pin_memory=False,
    persistent_workers=True,
    indices=train_indices,
)
valid_dataset, valid_loader, valid_sampler = datasets_factory.create_trainset(
    batch_size=1,
    # valid_num=50,
    num_workers=0,
    pin_memory=False,
    persistent_workers=False,
    indices=val_indices,
)
end = time.time()
print("Training traj has been loaded time consuming:{0}".format(end - start))

# initialize fluid model
from NN.Model_importer.Importer import FVGN
model = FVGN(params)

fluid_model = model.to(device)
fluid_model.train()
optimizer = Adam(fluid_model.parameters(), lr=params.lr)

""" >>> lr scheduler settings >>> """
before_explr_decay_steps = int(params.n_epochs * 0.7)
two_step_scheduler = scheduler.ExpLR(
    optimizer, decay_steps=params.n_epochs - before_explr_decay_steps, gamma=1e-4
)
lr_scheduler = scheduler.GradualStepExplrScheduler(
    optimizer,
    multiplier=1.0,
    milestone=[int(params.n_epochs / 2)],
    gamma=0.1,
    total_epoch=before_explr_decay_steps,
    after_scheduler=two_step_scheduler,
    expgamma=1e-2,
    decay_steps=params.n_epochs - before_explr_decay_steps,
    min_lr=1e-6,
)
""" <<< lr scheduler settings <<< """

if (
    params.load_latest
    or params.load_date_time is not None
    or params.load_index is not None
):
    logger.load_logger(datetime=params.load_date_time)
    # load_logger = Logger(get_hyperparam(params),use_csv=False,use_tensorboard=params.log,params=params,git_info=git_info)
    if params.load_optimizer:
        params.load_date_time, params.load_index = logger.load_state(
            model=fluid_model,
            optimizer=optimizer,
            scheduler=lr_scheduler,
            datetime=params.load_date_time,
            index=params.load_index,
            device=device,
        )
    else:
        params.load_date_time, params.load_index = logger.load_state(
            model=fluid_model,
            optimizer=None,
            scheduler=None,
            datetime=params.load_date_time,
            index=params.load_index,
            device=device,
        )
    params.load_index = int(params.load_index)
    print(f"loaded: {params.load_date_time}, {params.load_index}")

params.load_index = 0 if params.load_index is None else params.load_index

lp_loss = LpLoss(size_average=True)

# 初始化用于收集整个数据集的残差的张量
epoc_loss = 0
epoc_val_loss = 0
epoch_val_loss_list = [999]

# training loop
for epoch in range(params.n_epochs):

    fluid_model.train()
    train_sampler.set_epoch(epoch)
    valid_sampler.set_epoch(epoch)
    epoc_loss = 0

    start = time.time()

    for batch_index, (
        graph_node,
        graph_edge,
        graph_cell,
    ) in enumerate(train_loader):

        optimizer.zero_grad()

        (graph_node, graph_edge, graph_cell) = train_dataset.datapreprocessing(
            graph_node.cuda(), graph_edge.cuda(), graph_cell.cuda(), is_training=True
        )

        pred_node = fluid_model(
            graph_node=graph_node,
            graph_edge=graph_edge,
            graph_cell=graph_cell,
            params=params,
            is_training=True,
        )

        loss = lp_loss(
            pred_node[None,],
            graph_node.norm_y[None,],
        )
        # log_loss = torch.log(loss)

        loss.backward()
        optimizer.step()
        epoc_loss += loss.cpu().item()

    epoc_loss = epoc_loss / len(train_loader)

    # Validation
    if epoch % 2 == 0:

        with torch.no_grad():
            epoc_val_loss = 0

            for batch_index, (
                graph_node_valid,
                graph_edge_valid,
                graph_cell_valid,
            ) in enumerate(valid_loader):

                (graph_node_valid, graph_edge_valid, graph_cell_valid) = (
                    valid_dataset.datapreprocessing(
                        graph_node_valid.cuda(),
                        graph_edge_valid.cuda(),
                        graph_cell_valid.cuda(),
                        is_training=False,
                    )
                )

                pred_node_valid = fluid_model(
                    graph_node=graph_node_valid,
                    graph_edge=graph_edge_valid,
                    graph_cell=graph_cell_valid,
                    is_training=False,
                    params=params,
                )

                val_loss = lp_loss(
                    pred_node_valid[None,],
                    graph_node_valid.norm_y[None,],
                )
                epoc_val_loss += val_loss.cpu().item()

            epoc_val_loss = epoc_val_loss / len(valid_loader)

            if all(epoc_val_loss < loss for loss in epoch_val_loss_list):
                model_saving_path = logger.save_state(
                    model=fluid_model,
                    optimizer=optimizer,
                    scheduler=lr_scheduler,
                    index=999,
                )
                print(f"Best Epoch {epoch} eval loss: {epoc_val_loss}")

            epoch_val_loss_list.append(epoc_val_loss)

            try:
                graph_node_valid = Batch.to_data_list(graph_node_valid)[0]
            except:
                pass

            res_dict = {
                "node|pos": graph_node_valid.pos.cpu().detach().numpy(),
                "node|pressure": pred_node_valid.cpu().detach().numpy(),
                "node|pressure_gt": graph_node_valid.norm_y.cpu().detach().numpy(),
                "cells_node": graph_node_valid.face[0].cpu().detach().numpy(),
            }

            logger.plot(res_dict=res_dict, data_index=graph_node_valid.origin_id.item())

        model_saving_path = logger.save_state(
            model=fluid_model,
            optimizer=optimizer,
            scheduler=lr_scheduler,
            index=epoch % 3,
        )

    print(f"Epoch {epoch} train loss: {epoc_loss}")
    print(f"Epoch {epoch} eval loss: {epoc_val_loss}")
    print(f"Epoch {epoch} completed in {time.time() - start:.2f} seconds")
    logger.add_log_item("Epcoh_loss", epoc_loss, epoch)
    logger.log(f"Epoch_eval_loss", epoc_val_loss, epoch)

    lr_scheduler.step()

print("Training completed")
