#%%
import sys
# sys.path.append("./PaddleScience/")
sys.path.append("model")
import argparse
import os
os.environ["CUDA_VISIBLE_DEVICES"]='0'
import csv
from timeit import default_timer
from typing import List
import numpy as np
import paddle
import yaml
import pandas as pd
from paddle.optimizer.lr import LRScheduler
from src.data import instantiate_datamodule
from src.networks import instantiate_network
from src.utils.average_meter import AverageMeter
from src.utils.dot_dict import DotDict
from src.utils.dot_dict import flatten_dict

path='data/'
version='L4090-B002'

os.makedirs(path+'models',exist_ok=True)
output_path=path+'models/'+version+'/'
os.makedirs(output_path,exist_ok=True)
os.makedirs(path+'feature',exist_ok=True)
os.makedirs(path+'feature_importance',exist_ok=True)
os.makedirs(path+'submissions',exist_ok=True)
os.makedirs(path+'submissions/content/gen_answer_B/',exist_ok=True)
os.makedirs(path+'logs',exist_ok=True)
#%%
class StepDecay(LRScheduler):
    def __init__(
        self, learning_rate, step_size, gamma=0.1, last_epoch=-1, verbose=False
    ):
        if not isinstance(step_size, int):
            raise TypeError(
                "The type of 'step_size' must be 'int', but received %s."
                % type(step_size)
            )
        if gamma >= 1.0:
            raise ValueError("gamma should be < 1.0.")

        self.step_size = step_size
        self.gamma = gamma
        super().__init__(learning_rate, last_epoch, verbose)

    def get_lr(self):
        i = self.last_epoch // self.step_size
        return self.base_lr * (self.gamma**i)


def instantiate_scheduler(config):
    if config.opt_scheduler == "CosineAnnealingLR":
        scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
            config.lr, T_max=config.opt_scheduler_T_max
        )
    elif config.opt_scheduler == "StepLR":
        scheduler = StepDecay(
            config.lr, step_size=config.opt_step_size, gamma=config.opt_gamma
        )
    else:
        raise ValueError(f"Got {config.opt_scheduler=}")
    return scheduler


# loss function with rel/abs Lp loss
class LpLoss(object):
    def __init__(self, d=2, p=2, size_average=True, reduction=True):
        super(LpLoss, self).__init__()
        # Dimension and Lp-norm type are postive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average

    def abs(self, x, y):
        num_examples = x.size()[0]

        # Assume uniform mesh
        h = 1.0 / (x.size()[1] - 1.0)

        all_norms = (h ** (self.d / self.p)) * paddle.norm(
            x.reshape((num_examples, -1)) - y.reshape((num_examples, -1)), self.p, 1
        )

        if self.reduction:
            if self.size_average:
                return paddle.mean(all_norms)
            else:
                return paddle.sum(all_norms)

        return all_norms

    def rel(self, x, y):
        diff_norms = paddle.norm(x-y, 2)
        y_norms = paddle.norm(y, self.p)

        if self.reduction:
            if self.size_average:
                return paddle.mean(diff_norms / y_norms)
            else:
                return paddle.sum(diff_norms / y_norms)

        return diff_norms / y_norms

    def __call__(self, x, y):
        return self.rel(x, y)


def set_seed(seed: int = 0):
    paddle.seed(seed)
    np.random.seed(seed)
    import random

    random.seed(seed)


def str2intlist(s: str) -> List[int]:
    return [int(item.strip()) for item in s.split(",")]


def parse_args(yaml="UnetShapeNetCar.yaml"):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--config",
        type=str,
        default="configs/"+ yaml,
        help="Path to the configuration file",
    )
    parser.add_argument(
        "--device",
        type=str,
        default="cuda",
        help="Device to use for training (cuda or cpu)",
    )
    parser.add_argument("--lr", type=float, default=None, help="Learning rate")
    parser.add_argument("--batch_size", type=int, default=None, help="Batch size")
    parser.add_argument("--num_epochs", type=int, default=None, help="Number of epochs")
    parser.add_argument(
        "--checkpoint",
        type=str,
        default=None,
        help="Path to the checkpoint file to resume training",
    )
    parser.add_argument(
        "--output",
        type=str,
        default=output_path,
        help="Path to the output directory",
    )
    parser.add_argument(
        "--log",
        type=str,
        default="log",
        help="Path to the log directory",
    )
    parser.add_argument("--logger_types", type=str, nargs="+", default=None)
    parser.add_argument("--seed", type=int, default=0, help="Random seed for training")
    parser.add_argument("--model", type=str, default=None, help="Model name")
    parser.add_argument(
        "--sdf_spatial_resolution",
        type=str2intlist,
        default=None,
        help="SDF spatial resolution. Use comma to separate the values e.g. 32,32,32.",
    )

    args, _ = parser.parse_known_args()
    return args


def load_config(config_path):
    def include_constructor(loader, node):
        # Get the path of the current YAML file
        current_file_path = loader.name

        # Get the folder containing the current YAML file
        base_folder = os.path.dirname(current_file_path)

        # Get the included file path, relative to the current file
        included_file = os.path.join(base_folder, loader.construct_scalar(node))

        # Read and parse the included file
        with open(included_file, "r") as file:
            return yaml.load(file, Loader=yaml.Loader)

    # Register the custom constructor for !include
    yaml.Loader.add_constructor("!include", include_constructor)

    with open(config_path, "r") as f:
        config = yaml.load(f, Loader=yaml.Loader)

    # Convert to dot dict
    config_flat = flatten_dict(config)
    config_flat = DotDict(config_flat)
    return config_flat



import re
 
def extract_numbers(s):
    return [int(digit) for digit in re.findall(r'\d+', s)]


@paddle.no_grad()
def eval(model, datamodule, config, loss_fn=None,output_file=None):
#     if output_file is None:
#         output_file=output_path+f"{config.project_name}.csv"
    model.eval()
    test_loader = datamodule.test_dataloader(
        batch_size=config.eval_batch_size, shuffle=False, num_workers=0
    )
    data_list = []
    pred_list=[]
    label_list=[]
    averaged_output_dict = {}

    for i, data_dict in enumerate(test_loader):
        out_dict = model.eval_dict(
            data_dict, loss_fn=loss_fn, decode_fn=datamodule.decode
        )
        pred_list.append(out_dict['p_predict_decode'].cpu().numpy().reshape(-1).tolist())
        if 'p_label_decode' in out_dict:
            label_list.append(out_dict['p_label_decode'].cpu().numpy().reshape(-1).tolist())
        if 'c_p truth' in out_dict:
            if i == 0:
                data_list.append(['id', 'c_d', 'c_d ref', 'c_f', 'c_f ref', 'c_p', 'c_p ref'])
            # load c_d from file
            data_path = config["data_dir"]
            index = str.zfill(str(i + 1), 3)
            c_d = paddle.load(data_path + "/test/drag_history_" + index + ".pdtensor")
            c_p = float(c_d["c_p"][-1])
            c_f = float(c_d["c_f"][-1])
            c_d = c_p + c_f
            
            # print(f"\nc_p abs error = {100 * float(paddle.abs(out_dict['c_p pred'] - out_dict['c_p truth']) / out_dict['c_p truth']):3f}%")
            # print(f"c_f abs error = {100 * float(paddle.abs(out_dict['c_f pred'] - out_dict['c_f truth']) / out_dict['c_f truth']):3f}%")
            # print(f"c_d abs error = {100 * float(paddle.abs(out_dict['c_d pred'] - out_dict['c_d truth']) / out_dict['c_d truth']):3f}%")
            data_list.append([i, c_d, float(out_dict['c_d pred']), c_f, float(out_dict['c_f pred']), c_p, float(out_dict['c_p pred'])])
        # if 'c_p pred' in out_dict:
            # print(f"c_p Pred = {out_dict['c_p pred'].item():3f}")
            # print(f"c_f Pred = {out_dict['c_f pred'].item():3f}")
            # print(f"c_d Pred = {out_dict['c_d pred'].item():3f}")

        if'l2 decoded pressure' in out_dict: #Ahmed
            # print(f"l2 error decoded pressure =  {100 * float(out_dict['l2 decoded pressure']):3f}%")
            if i == 0:
                data_list.append(['id', 'l2 p decoded'])
            data_list.append([i, float(out_dict['l2_decoded'])])
        if 'l2_decoded' in out_dict: #Shape Net Car
            # print(f"l2 error decoded pressure =  {100 * float(out_dict['l2_decoded']):3f}%")
            if i == 0:
                data_list.append(['id', 'l2 p decoded'])
            data_list.append([i, float(out_dict['l2_decoded'])])


#         with open(output_file, "w", newline="") as file:
#             writer = csv.writer(file)
#             writer.writerows(data_list)

    if len(label_list)>0:
        data_list = np.array(data_list)[:,1:]
    
        for i, k in enumerate(data_list[0]):
            averaged_output_dict[k] = data_list[1:, i].astype(np.float32).mean() #average l2
        return averaged_output_dict,pred_list,label_list
    else:
        return None,pred_list,label_list

#%%

args = parse_args("UnetShapeNetCar.yaml")
# print command line args
config = load_config(args.config)

# Update config with command line arguments
for key, value in vars(args).items():
    if key != "config" and value is not None:
        config[key] = value

# pretty print the config
if paddle.distributed.get_rank() == 0:
    print("\n--------------- Config yaml Table----------------")
    for key, value in config.items():
        print("Key: {:<30} Val: {}".format(key, value))
    print("--------------- Config yaml Table----------------\n")

# Set the random seed
if config.seed is not None:
    set_seed(config.seed)
        

#%%
config.n_train=449
config.n_test=50
config.batch_size=1
config.data_module='CarDataModule'
config.data_module='CarDataModule'
config.data_dir='Dataset/data_train_B/'
config.test_data_dir='Dataset/data_valid_B/'

#%%
config.opt_step_size=1
config.opt_gamma=0.97
config.num_epochs=400
config.hidden_channels=128
# Initialize the model
model = instantiate_network(config)
model
#%%
from loguru import logger

logger.add(path+f"logs/log_{version}.log")
config.eval_interval=1
#%%
import numpy as np
class valid_LpLoss(object):
    def __init__(self, d=2, p=2, size_average=True, reduction=True):
        super(valid_LpLoss, self).__init__()
        # Dimension and Lp-norm type are postive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average


    def rel(self, x, y):
        diff_norms = np.linalg.norm(x-y, 2)
        y_norms = np.linalg.norm(y, self.p)

        if self.reduction:
            if self.size_average:
                return np.mean(diff_norms / y_norms)
            else:
                return np.sum(diff_norms / y_norms)

        return diff_norms / y_norms

    def __call__(self, x, y):
        return self.rel(x, y)
#%%
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math,time

# Initialize the dataloaders
datamodule = instantiate_datamodule(config)

train_loader = datamodule.train_dataloader(
    batch_size=config.batch_size, shuffle=False
)

# Initialize the optimizer
scheduler = instantiate_scheduler(config)
optimizer = paddle.optimizer.Adam(
    parameters=model.parameters(), learning_rate=scheduler, weight_decay=1e-4
)

# Initialize the loss function
loss_fn = LpLoss(size_average=True)
valid_loss_fn = valid_LpLoss(size_average=True)
best_rmse=np.inf
time_list=[time.time()]
for ep in range(config.num_epochs):
    model.train()
    train_l2_meter = AverageMeter()
    # train_reg = 0
    for i, data_dict in enumerate(train_loader):
        optimizer.clear_grad()
        loss_dict = model.loss_dict(data_dict, loss_fn=loss_fn)
        loss = 0
        for k, v in loss_dict.items():
            loss = loss + v.mean()
        loss.backward()
        optimizer.step()
        train_l2_meter.update(loss.item())
        
    if ep % config.eval_interval == 0 or ep == config.num_epochs - 1:
        eval_dict,valid_preds_,valid_labels_ = eval(model, datamodule, config, loss_fn)

        score=[]
        valid_preds=[]
        valid_labels=[]
        for idx,(pred) in enumerate(valid_preds_):
            label=valid_labels_[idx]
            l2 = valid_loss_fn(np.array(pred),np.array(label))
            score.append(l2)
            valid_preds+=pred
            valid_labels+=label
        mse = mean_squared_error(valid_labels, valid_preds)
        rmse = math.sqrt(mse)
        mae = mean_absolute_error(valid_labels, valid_preds)
        score=np.array(score)
        score=np.mean(score)
        time_list.append(time.time())
        logger.info(f"Epoch [{ep + 1}/{config.num_epochs}] | Loss: {train_l2_meter.avg:.4f} | Lr: {scheduler.get_lr():.8f}"+
                    f" | Valid mse: {mse:.5f} | Valid rmse: {rmse:.5f} | Valid mae: {mae:.5f} | Valid score: {score:.5f} | time: {time_list[-1]-time_list[-2]:.4f}")
        if best_rmse >= rmse:
            paddle.save(
                model.state_dict(),
                os.path.join(output_path, f"model-{config.model}.pdparams"),
            )
            logger.info("model saved")
            best_rmse=rmse
    scheduler.step()
#%%
model = instantiate_network(config)
checkpoint = paddle.load(output_path+f"model-{config.model}.pdparams")
model.load_dict(checkpoint)
#%%
config.test_data_dir="Dataset/data_test_B/"
config.n_train = 1
config.n_test = 50
datamodule=instantiate_datamodule(config)
eval_dict,test_preds,_ = eval(
    model, datamodule, config
)
#%%
for idx,(file_id) in enumerate(datamodule.test_indices):
    test_modelOutput=test_preds[idx]
    test_modelOutput=np.array(test_modelOutput).astype(np.float32)
    np.save(path+f'submissions/content/gen_answer_B/press_{file_id}.npy',test_modelOutput)

#%%
import zipfile
import time
folder_path=path+f'submissions/content/gen_answer_B/'
with zipfile.ZipFile(f'B_result_unet.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
    for root, _, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            zipf.write(file_path, 'content/gen_answer_B/' + os.path.basename(file_path))
#%%
