import os, time
import pandas as pd 
import seaborn as sns 

import torch
import torch.nn as nn
from torch_geometric.loader import DataLoader
from torch.cuda import amp

from models import *
from dataset import STLGraphDataset, NormalizeAugment
from utils import *
from sklearn.metrics import r2_score
from scipy.stats import pearsonr, spearmanr

model_name = 'HGPAttention'
test_name = "CAERI-Test-Model" 
test_dir = "/root/liubo/Dataset"

input_dim, hidden_dim, layers, output_dim = 6, 256, 3, 1
CUDA = 1
num_cpus = 64
batch_size = 1
simplify_size = 417114 # downsample mesh

checkpoint_path = os.path.join('./logs/', model_name)
assert os.path.exists(checkpoint_path), "There is no such checkpoint_path !!!"
pred_path = os.path.join(checkpoint_path, test_name)
data_path = os.path.join(test_dir, test_name)


if not os.path.exists(pred_path):
    os.makedirs(pred_path, exist_ok=True)
    
fileName = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
sys.stdout = Logger(os.path.join(pred_path, fileName+".log")) # 记录正常的 print 信息
sys.stderr = Logger(os.path.join(pred_path, fileName+".log")) # 记录 traceback 信息
    
print("==============================")
print("Model:",model_name, "CUDA:", CUDA, "CPUs",num_cpus)

stl_files = [os.path.join(data_path, f) for f in os.listdir(data_path) if f.endswith('.stl')]
print("stl names:", stl_files)
print(f"Dataset for test: {test_name}")

def main():
    if CUDA and torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    
    model = eval(model_name + "(input_dim=input_dim, hidden_dim=hidden_dim, n_layers=layers, n_head=8, out_dim=output_dim,slice_num=32)")
    model = model.to(device)
    print(f"Total number of parameters: {count_parameters(model)}")

    criterion = nn.MSELoss()

    X_test = stl_files
    y_test = np.zeros(len(X_test))
    transform = NormalizeAugment(False, mode='standard')
    test_dataset = STLGraphDataset(X_test, y_test, input_dim, transform,simplify_size=simplify_size)

    # 读入模型
    checkpoint = torch.load(os.path.join(checkpoint_path,'model_best.pt'), map_location=device)
    state_dict = checkpoint
    print(f"Loading model from {os.path.join(checkpoint_path,'model_best.pt')} !!!!")
    new_state_dict = {}
    for key, value in state_dict.items():
        new_key = key.replace('module.', '')
        new_state_dict[new_key] = value
    model.load_state_dict(new_state_dict)

    # dataset
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False,num_workers= num_cpus)

    # test
    print("Begin testing....")
    model.eval()
    Pred = []
    t_begin = time.time()
    with torch.no_grad():
        for i, data in enumerate(test_loader):
            t0 = time.time()
            data = data.to(device)
            with amp.autocast():
                outputs = model(data)
            
            pred = outputs.float().view(-1).cpu().numpy()
            
            for ii in range(len(pred)):
                print("{} pred:{:.5f} time:{:.2f}".format(X_test[i].split('/')[-1], pred[ii], time.time()-t0))
                Pred.append(pred[ii])
                
    print("Time used for All: {:.4f} s, Average:{:.4f} s per sample".format(time.time()-t_begin, (time.time()-t_begin) / len(Pred) ))
    print_gpu_memory()
    print("END!")

if __name__ == '__main__':
    main()
