#%%
import pandas as pd
import os
os.environ["CUDA_VISIBLE_DEVICES"]='0'

import numpy as np
import random
import torch

path='data/'
version='L3090-A012'

os.makedirs(path+'models',exist_ok=True)
os.makedirs(path+'feature',exist_ok=True)
os.makedirs(path+'feature_importance',exist_ok=True)
os.makedirs(path+'submissions',exist_ok=True)
os.makedirs(path+'submissions/content/gen_answer_A/',exist_ok=True)
os.makedirs(path+'logs',exist_ok=True)
#%%
def seed_everything(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
seed_everything()
#%%
train_df=pd.read_csv("Dataset/Training_data/watertight_meshes.txt",header=None,dtype=str)
train_df.columns=['id']
train_df=train_df.sort_values(by=['id'],ascending=[True]).reset_index(drop=True)
valid_df=train_df[train_df.index>=450].reset_index(drop=True)
valid_df
#%%
train_df=train_df[train_df.index<450].reset_index(drop=True)
train_df
#%%
test_df=pd.read_csv("Dataset/data_test_A/watertight_meshes.txt",header=None,dtype=str)
test_df.columns=['id']
test_df=test_df.sort_values(by=['id'],ascending=[True]).reset_index(drop=True)
test_df
#%%
from plyfile import PlyData
from tqdm import tqdm
train_pos=[]
train_press=[]
for file_id in tqdm(train_df['id']):
    ply = PlyData.read(f"Dataset/Training_data/Feature/mesh_{file_id}.ply")
    vtx = ply['vertex']
    x=np.array(vtx['x']).reshape(1,3586,1)
    y=np.array(vtx['y']).reshape(1,3586,1)
    z=np.array(vtx['z']).reshape(1,3586,1)
    pos=np.concatenate([x,y,z],axis=-1)
    train_pos.append(pos)
    press = np.load(f"Dataset/Training_data/Label/press_{file_id}.npy").reshape((-1,))
    press = np.concatenate((press[0:16], press[112:]), axis=0).reshape(1,-1)
    train_press.append(press)
train_pos=np.concatenate(train_pos,axis=0).astype(np.float32)
train_press=np.concatenate(train_press,axis=0).astype(np.float32)
#%%
train_pos.shape,train_press.shape,
#%%
valid_pos=[]
valid_press=[]
for file_id in tqdm(valid_df['id']):
    ply = PlyData.read(f"Dataset/Training_data/Feature/mesh_{file_id}.ply")
    vtx = ply['vertex']
    x=np.array(vtx['x']).reshape(1,3586,1)
    y=np.array(vtx['y']).reshape(1,3586,1)
    z=np.array(vtx['z']).reshape(1,3586,1)
    pos=np.concatenate([x,y,z],axis=-1)
    valid_pos.append(pos)
    press = np.load(f"Dataset/Training_data/Label/press_{file_id}.npy").reshape((-1,))
    press = np.concatenate((press[0:16], press[112:]), axis=0).reshape(1,-1)
    valid_press.append(press)
valid_pos=np.concatenate(valid_pos,axis=0).astype(np.float32)
valid_press=np.concatenate(valid_press,axis=0).astype(np.float32)
#%%
valid_pos.shape,valid_press.shape,
#%%
test_pos=[]
test_press=[]
for file_id in tqdm(test_df['id']):
    ply = PlyData.read(f"Dataset/data_test_A/mesh_{file_id}.ply")
    vtx = ply['vertex']
    x=np.array(vtx['x']).reshape(1,3586,1)
    y=np.array(vtx['y']).reshape(1,3586,1)
    z=np.array(vtx['z']).reshape(1,3586,1)
    pos=np.concatenate([x,y,z],axis=-1)
    test_pos.append(pos)
test_pos=np.concatenate(test_pos,axis=0).astype(np.float32)

#%%
test_pos.shape
#%%
mean,std=-37.09, 48.0955
#%%
train_press=(train_press-mean)/std
valid_press=(valid_press-mean)/std
#%%
from torch.utils.data import Dataset, DataLoader
class MyDataset(Dataset):
    def __init__(self, Input1 , labels):
        self.Input1 = torch.tensor(Input1)
        self.labels = torch.tensor(labels)

    def __len__(self):
        return len(self.Input1)

    def __getitem__(self, index):
        return self.Input1[index],self.labels[index]
#%%
train_dataset=MyDataset(train_pos,train_press)
valid_dataset=MyDataset(valid_pos,valid_press)
test_dataset=MyDataset(test_pos,torch.zeros(test_pos.shape[0],valid_press.shape[1]))
#%%
train_loader=DataLoader(train_dataset, batch_size=6, shuffle=True)
valid_loader=DataLoader(valid_dataset, batch_size=16, shuffle=False)
test_loader=DataLoader(test_dataset, batch_size=1, shuffle=False)
#%%
from transformers import BertConfig
from modeling_bert_SDPA import BertModel
from torch import nn

class CustomModel(nn.Module):
    def __init__(self):
        super(CustomModel, self).__init__()
        self.config = BertConfig()
        self.config.hidden_dropout_prob = 0.0
        self.config.attention_probs_dropout_prob = 0.0
        self.config.hidden_size = 512
        self.config.num_attention_heads = 4
        self.config.num_hidden_layers = 12
        self.config.max_position_embeddings=3586
        self.config.intermediate_size=2048
        self.config.vocab_size=1
        self.bert = BertModel(self.config)
        self.fc1=nn.Linear(3,self.config.hidden_size)
        self.fc2=nn.Linear(self.config.hidden_size,1)


    def forward(self, x):
        x=self.fc1(x)
        output = self.bert(inputs_embeds=x)[0]
        output=self.fc2(output).squeeze(dim=-1)
        return output


#%%
model=CustomModel()
model = model.cuda()
model=torch.compile(model)
class EMA():
    def __init__(self, model, decay):
        self.model = model
        self.decay = decay
        self.shadow = {}
        self.backup = {}

    def register(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.shadow[name] = param.data.clone()

    def update(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                assert name in self.shadow
                new_average = (1.0 - self.decay) * param.data + self.decay * self.shadow[name]
                self.shadow[name] = new_average.clone()

    def apply_shadow(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                assert name in self.shadow
                self.backup[name] = param.data
                param.data = self.shadow[name]

    def restore(self):
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                assert name in self.backup
                param.data = self.backup[name]
        self.backup = {}
ema = EMA(model, 0.999)
ema.register()
#%%
from loguru import logger

logger.add(path+f"logs/log_{version}.log")
#%%
class LpLoss(nn.Module):
    def __init__(self, d=2, p=2, size_average=True, reduction='mean'):
        super(LpLoss, self).__init__()
        # Dimension and Lp-norm type are positive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average

    def forward(self, x, y):
        # Compute L2 norm of the difference and Lp norm of y
        diff_norms = torch.norm(x - y, 2)
        y_norms = torch.norm(y, self.p)

        # Handle the reduction
        if self.reduction == 'mean':
            return torch.mean(diff_norms / y_norms)
        elif self.reduction == 'sum':
            return torch.sum(diff_norms / y_norms)
        else:
            return diff_norms / y_norms
#%%
from torch.cuda import amp # 导入AMP模块
from datetime import datetime
from tqdm import tqdm
import time
import shutil
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math,time
# model Constructing
# ========================================================
model = model.cuda()
# criterion = nn.BCEWithLogitsLoss().cuda()
criterion = LpLoss(size_average=True)  # 使用L2范数作为损失函数
EPOCHS=400
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

scaler = amp.GradScaler()
time_list=[time.time()]
# model Training and Saving
# =========================================================
best_loss = np.inf
if os.path.exists(f"best_valid_checkpoin_{version}.pt"):
    shutil.copyfile(f"best_valid_checkpoin_{version}.pt",path+f"models/best_valid_checkpoin_{version}.pt")
else:
    for epoch in range(EPOCHS):
        model.train()
        epoch_loss = 0.0
        for input_embeds,label1 in train_loader:
            optimizer.zero_grad()
            input_embeds,label1 = input_embeds.cuda(),label1.cuda()
            with amp.autocast():
                modelOutput = ema.model(input_embeds)
                loss = criterion(modelOutput, label1)
            scaler.scale(loss).backward()    # loss缩放并反向转播
            scaler.step(optimizer)    # 更新参数（自动unscaling）
            scaler.update()    # 基于动态Loss Scale更新loss_scaling系数
            epoch_loss += loss.item() * input_embeds.shape[0]
            ema.update()

        ema.apply_shadow()
        # model Evaluating
        model.eval()
        with torch.no_grad():
            valid_preds=[]
            valid_labels=[]
            val_loss=0.0
            for input_embeds,label1 in valid_loader:
                input_embeds,label1 = input_embeds.cuda(),label1.cuda()
                val_modelOutput = ema.model(input_embeds)
                val_loss += criterion(val_modelOutput, label1).item()* input_embeds.shape[0]
                valid_preds.append(val_modelOutput.cpu())
                valid_labels.append(label1.cpu())

            valid_preds=torch.cat(valid_preds,dim=0).numpy()*std+mean
            valid_labels=torch.cat(valid_labels,dim=0).numpy()*std+mean
            mse = mean_squared_error(valid_labels, valid_preds)
            rmse = math.sqrt(mse)
            mae = mean_absolute_error(valid_labels, valid_preds)
            time_list.append(time.time())
            logger.info(f"Epoch [{epoch + 1}/{EPOCHS}] | Loss: {epoch_loss/len(train_dataset):.4f} | Valid loss: {val_loss/len(valid_dataset):.4f} | Valid mse: {mse:.5f} | Valid rmse: {rmse:.5f} | Valid mae: {mae:.5f} | time: {time_list[-1]-time_list[-2]:.4f}")

            #  | lr : {scheduler.get_last_lr()}
            if val_loss < best_loss:
                # model saving
                logger.info("model saved")
                best_loss = val_loss
                checkpoint = {
                    "epoch" : epoch,
                    "model_state_dict" : ema.model.state_dict(),
                    "optimizer_state_dict" : optimizer.state_dict(),
                    # "scheduler_state_dict" : scheduler.state_dict(),
                }
                torch.save(checkpoint, path+f"models/best_valid_checkpoin_{version}.pt")


        ema.restore()

#%%
model=CustomModel()
model=torch.compile(model)
state_dict=torch.load( path+f"models/best_valid_checkpoin_{version}.pt")['model_state_dict']
model.load_state_dict(state_dict)
model=model.cuda()
#%%
model.eval()
with torch.no_grad():
    test_preds=[]
    for idx,(input_embeds,label1) in enumerate(test_dataset):
        input_embeds = input_embeds.unsqueeze(0).cuda()
        test_modelOutput = model(input_embeds)
        test_modelOutput=test_modelOutput.cpu().numpy().reshape(-1)*std+mean
        file_id=test_df['id'][idx]
        np.save(f'Example_C/press_{file_id}.npy',test_modelOutput)


#%%
