import os, time
import pandas as pd 
import seaborn as sns 

import torch
import torch.nn as nn
from torch_geometric.loader import DataLoader
from torch.cuda import amp

from models import *
from dataset import STLGraphDataset, NormalizeAugment
from utils import *
from sklearn.metrics import r2_score
from scipy.stats import pearsonr, spearmanr

stl_dir = '/shared-public/data/DrivAer_model_TrainingData_3900/DrivAer_model_TrainingData_3900/'
model_name = 'HGPAttention'
input_dim, hidden_dim, layers, output_dim = 6, 256, 3, 1
CUDA = 1
num_cpus = 16
test_num = 300
batch_size = 1
simplify_size = 417114 

checkpoint_path = os.path.join('./logs/', model_name)
assert os.path.exists(checkpoint_path), "There is no such checkpoint_path !!!"

pred_path = os.path.join(checkpoint_path, 'predLog')
if not os.path.exists(pred_path):
    os.makedirs(pred_path, exist_ok=True)
    
fileName = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
sys.stdout = Logger(os.path.join(pred_path, fileName+".log")) # 记录正常的 print 信息
sys.stderr = Logger(os.path.join(pred_path, fileName+".log")) # 记录 traceback 信息
    
print("==============================")
print("Model:",model_name, "CUDA:", CUDA, "CPUs",num_cpus)
print("Testdata:", test_num)

xlsx = pd.read_excel(os.path.join(stl_dir, 'DrivAer_model_TrainingData_3900.xlsx'))
stl_files = [os.path.join(stl_dir,'DrivAer_model_TrainingData_' + str(i+1).zfill(4)+'.stl') for i in range(len(xlsx))]
labels = [xlsx['Average Cd'][i] for i in range(len(xlsx))]


def main():
    if CUDA and torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
        
    model = eval(model_name + "(input_dim=input_dim, hidden_dim=hidden_dim, n_layers=layers, n_head=8, out_dim=output_dim,slice_num=32)")
    model = model.to(device)
    print(f"Total number of parameters: {count_parameters(model)}")
   
    criterion = nn.MSELoss()

    X_test, y_test = stl_files[-test_num:], labels[-test_num:]
    transform = NormalizeAugment(False, mode='standard')
    test_dataset = STLGraphDataset(X_test, y_test, input_dim, transform,simplify_size=simplify_size)
    
    # load model
    checkpoint = torch.load(os.path.join(checkpoint_path,'model_best.pt'), map_location=device)
    state_dict = checkpoint
    print(f"Loading model from {os.path.join(checkpoint_path,'model_best.pt')} !!!!")
    new_state_dict = {}
    for key, value in state_dict.items():
        new_key = key.replace('module.', '')
        new_state_dict[new_key] = value
    model.load_state_dict(new_state_dict)

    # dataset
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False,num_workers= num_cpus)

    # test
    print("Begin testing....")
    model.eval()
    MSE_loss, MAE_loss = 0., 0.
    Pred, Truth = [], []
    t_begin = time.time()
    with torch.no_grad():
        for i, data in enumerate(test_loader):
            t0 = time.time()
            data = data.to(device)
            
            outputs = model(data)
            mse_loss = criterion(outputs, data.y).item()
            
            pred = outputs.float().view(-1).cpu().numpy()
            label = data.y.float().view(-1).cpu().numpy()
            
            mae_loss = abs(pred - label)
            relative_error = mae_loss / label
            
            for ii in range(len(pred)):
                print("{} pred:{:.5f} truth:{:.5f} MAE:{:.5f} Relative:{:.4f} time:{:.2f}".format(i*batch_size+ii, pred[ii], label[ii], mae_loss[ii], relative_error[ii], time.time()-t0))
                Pred.append(pred[ii])
                Truth.append(label[ii])

            MSE_loss += mse_loss * len(pred)
            MAE_loss += np.sum(mae_loss)
    print("Time used for testing: {:.4f} s, per stl costs:{:.4f}s".format(time.time()-t_begin, (time.time()-t_begin) / len(Pred)))
    print_gpu_memory()
    mean_mse_loss = MSE_loss / len(test_loader)
    Pred, Truth = np.array(Pred), np.array(Truth)
    r2 = r2_score(Truth, Pred)
    Error = np.abs(Pred - Truth)
    relative = np.mean(Error / Truth)
    pearsonr_corr = pearsonr(Truth, Pred)[0]
    spearmanr_corr = spearmanr(Truth, Pred)[0]

    np.savetxt(os.path.join(pred_path,'preds.txt') ,np.stack([Pred, Truth],-1))
    print("MSE:{:.4e} MAE:{:.5f} ± {:.5f} max:{:.4f} relative:{:.4f} r2:{:.4f} pearCorr:{:.4f} spearCorr:{:.4f}".format(mean_mse_loss, np.mean(Error), np.std(Error), np.max(Error), relative ,r2, pearsonr_corr, spearmanr_corr))
    
    #  plot error distribution
    g  = sns.jointplot(x=Pred, y=Error, kind='scatter')
    g.set_axis_labels("Cd", "Error")
    g.fig.set_size_inches(10, 8)
    plt.savefig(os.path.join(pred_path,'error.png'))
    plt.close()

    print("Time used for testing: {:.4f} s".format(time.time()-t_begin))
    print("END!")

if __name__ == '__main__':
    main()
