import sys
import os

cur_path = os.path.split(__file__)[0]
sys.path.append(cur_path)

import torch
from dataset.Load_mesh import DatasetFactory

# import os
from utils import get_param
import time
from utils.get_param import get_hyperparam
from utils.Logger import Logger
from utils.losses import LpLoss
import random
import datetime

# configurate parameters
params = get_param.params(f"Logger/net Attu-FVGN; hs 128;/2024-09-01-02:44:47/states")
params.load_date_time = "2024-09-01-02:44:47"
params.load_index = "999"
params.testset = "datasets/conveted_dataset/test_wit_label.h5"
params.on_gpu = 1

random.seed(int(datetime.datetime.now().timestamp()))
torch.manual_seed(int(datetime.datetime.now().timestamp()))
torch.cuda.set_per_process_memory_fraction(0.99, params.on_gpu)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# initialize Logger and load model / optimizer if according parameters were given
logger = Logger(
    get_hyperparam(params),
    datetime=params.load_date_time,
    use_csv=False,
    use_tensorboard=False,
    copy_code=False,
)

# initialize Training Dataset
datasets_factory = DatasetFactory(
    params=params,
    device=device,
)

# create dataset objetc
test_dataset, test_loader = datasets_factory.create_testset(
    batch_size=1,
    num_workers=0,
    pin_memory=False,
    persistent_workers=False,
    subsampling=False,
)

# initialize fluid model
from NN.model_importer.importer import FVGN

model = FVGN(params)

fluid_model = model.to(device)
fluid_model.eval()

lp_loss = LpLoss(size_average=True)

params.load_date_time, params.load_index = logger.load_state(
    model=fluid_model,
    optimizer=None,
    scheduler=None,
    datetime=params.load_date_time,
    index=params.load_index,
    device=device,
)
params.load_index = params.load_index
print(f"loaded: {params.load_date_time}, {params.load_index}")
params.load_index = 0 if params.load_index is None else params.load_index

cd_list = [_ for _ in range(50)]
vel_loss_list = [_ for _ in range(50)]
press_loss_list = [_ for _ in range(50)]
cd_loss_list = [_ for _ in range(50)]

start = time.time()
with torch.no_grad():

    epoc_test_loss = 0
    cd_pair_list = []
    for batch_index, graph_node in enumerate(test_loader):

        graph_node = test_dataset.datapreprocessing(
            graph_node.cuda(),
            is_training=False,
        )

        pred_vel, pred_press, pred_cd = fluid_model(
            graph_node=graph_node,
            is_training=False,
            is_pretrain=False,
            params=params,
        )

        current_files_name = "".join(
            chr(code) for code in graph_node.origin_id.cpu().tolist()
        )

        if current_files_name.endswith(".vtk"):

            reversed_node_vel = pred_vel
            reversed_vel_label = graph_node.norm_velocity
            loss_vel = lp_loss(reversed_node_vel, reversed_vel_label)
            vel_loss_list[graph_node.idx.cpu().item() - 50] = loss_vel.cpu().item()

            logger.save_test_results_npy(
                value=reversed_node_vel.cpu().detach().squeeze().numpy(),
                file_name=current_files_name.split(".")[0],
            )

        elif current_files_name.endswith(".ply"):
            reversed_node_press = (
                pred_press * graph_node.phi_std[0, 0]
            ) + graph_node.phi_mean[0, 0]

            reversed_press_label = (
                graph_node.norm_pressure * graph_node.phi_std[0, 0]
            ) + graph_node.phi_mean[0, 0]

            loss_press = lp_loss(
                reversed_node_press, reversed_press_label
            )
            press_loss_list[graph_node.idx.cpu().item()] = loss_press.cpu().item()

            logger.save_test_results_npy(
                value=reversed_node_press.cpu().detach().squeeze().numpy(),
                file_name=current_files_name.replace("mesh_", "press_").split(".")[0],
            )

        elif current_files_name.endswith(".obj"):

            reversed_cd = pred_cd / graph_node.phi_std[0, 0]
            reversed_cd_label = graph_node.cd_data / graph_node.phi_std[0, 0]
            val_loss_cd = lp_loss(reversed_cd, reversed_cd_label)
            cd_pair_list.append([reversed_cd, reversed_cd_label])
            cd_loss_list[graph_node.idx.cpu().item()-100] = val_loss_cd.cpu().item()
            cd_list[graph_node.idx.cpu().item()-100] = reversed_cd.cpu().item()

    logger.save_test_results_csv(cd_list, file_name="Answer.csv")
    
cd_pair = torch.tensor(cd_pair_list)
A_board_cd_loss = lp_loss(cd_pair[:, 0:1], cd_pair[:, 1:2], dim=None).cpu().item()
total_avg_vel_loss = sum(vel_loss_list) / len(vel_loss_list)
total_avg_press_loss = sum(press_loss_list) / len(press_loss_list)
total_avg_cd_loss = sum(cd_loss_list) / len(cd_loss_list)

print(f"total_avg_vel_loss: {total_avg_vel_loss}")
print(f"total_avg_press_loss: {total_avg_press_loss}")
print(f"total_avg_cd_loss: {total_avg_cd_loss}")
print(f"test_loss: {(sum(vel_loss_list) + sum(press_loss_list) + A_board_cd_loss)/101}")
print(f"total_avg_loss: {(total_avg_vel_loss+total_avg_press_loss+total_avg_cd_loss)/3.}")

fore_20_avg_vel_loss = sum(vel_loss_list[:20]) / 20
fore_20_avg_press_loss = sum(press_loss_list[:20]) / 20
fore_20_avg_cd_loss = sum(cd_loss_list[:20]) / 20


print(f"fore_20_avg_vel_loss: {fore_20_avg_vel_loss}")
print(f"fore_20_avg_press_loss: {fore_20_avg_press_loss}")
print(f"fore_20_avg_cd_loss: {fore_20_avg_cd_loss}")
print(f"fore_20_avg_loss: {(fore_20_avg_vel_loss+fore_20_avg_press_loss+fore_20_avg_cd_loss)/3.}")

print(f"Generating answer completed completed in {time.time() - start:.2f} seconds")
