#%%
import pandas as pd
import polars as pl
import os
from sklearn.model_selection import StratifiedKFold, KFold
os.environ["CUDA_VISIBLE_DEVICES"]='0'

import numpy as np
import random
import torch

path='data/'
version='L3090-B054'
os.makedirs(path+'models',exist_ok=True)
os.makedirs(path+'feature',exist_ok=True)
os.makedirs(path+'feature_importance',exist_ok=True)
os.makedirs(path+'submissions',exist_ok=True)
os.makedirs(path+'submissions/content/gen_answer_A/',exist_ok=True)
os.makedirs(path+'logs',exist_ok=True)
#%%
def seed_everything(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    pl.set_random_seed(seed)
seed_everything()
#%%
l1=os.listdir("Dataset/train_track_B/")
l1=sorted([i for i in l1 if 'centroid_' in i])
l1
#%%
train_df=pd.DataFrame(l1,columns=['id'])
train_df['id']=train_df['id'].apply(lambda x:x[9:13])
valid_df=train_df[train_df.index>=450].reset_index(drop=True)
valid_df
#%%
train_df=train_df[train_df.index<450].reset_index(drop=True)
#%%
l1=os.listdir("Dataset/data_test_B/")
l1=sorted([i for i in l1 if 'centroid_' in i])
test_df=pd.DataFrame(l1,columns=['id'])
test_df['id']=test_df['id'].apply(lambda x:x[9:12])
test_df

#%%
mean,std=-93.13160634703354, 113.59875244405217
#%%
from tqdm import tqdm
train_data={}
for row,file_id in tqdm(enumerate(train_df['id'])):
    pos = np.load(f"Dataset/data_train_B/centroid_{file_id}.npy").astype(np.float32)
    press = np.load(f"Dataset/data_train_B/press_{file_id}.npy").astype(np.float32)
    tmp_df_01=pl.DataFrame(pos)
    tmp_df_01.columns=['x','y','z']
    tmp_df_01=tmp_df_01.with_columns((pl.Series((press-mean)/std)).alias("press"))
    train_data[row]=tmp_df_01
#%%
valid_data={}
for row,file_id in tqdm(enumerate(valid_df['id'])):
    pos = np.load(f"Dataset/data_valid_B/centroid_{file_id}.npy").astype(np.float32)
    press = np.load(f"Dataset/data_valid_B/press_{file_id}.npy").astype(np.float32)
    tmp_df_01=pl.DataFrame(pos)
    tmp_df_01.columns=['x','y','z']
    tmp_df_01=tmp_df_01.with_columns((pl.Series((press-mean)/std)).alias("press"))
    valid_data[row]=tmp_df_01
#%%
from torch.utils.data import Dataset, DataLoader
class MyDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        tmp_df_01=self.data[index]
        tmp_df_01=tmp_df_01.sample(fraction=0.01,shuffle=True)
        pos=torch.tensor(tmp_df_01[['x','y','z']].to_numpy())
        press=torch.tensor(tmp_df_01['press'].to_numpy())
        # pos=pos.reshape(1,-1,3)
        # press=press.reshape(1,-1)
        attention_mask = torch.tensor([1] * len(tmp_df_01), dtype=torch.long)
        return pos,attention_mask,press
#%%
train_dataset=MyDataset(train_data)
valid_dataset=MyDataset(valid_data)
#%%
from torch.utils.data import DataLoader
def collate_fn(batch):
    pos,attention_mask,press = zip(*batch)
    # 对batch中的tokens进行padding
    pos = torch.nn.utils.rnn.pad_sequence(pos, batch_first=True, padding_value=0)
    attention_mask = torch.nn.utils.rnn.pad_sequence(attention_mask, batch_first=True, padding_value=0)
    press = torch.nn.utils.rnn.pad_sequence(press, batch_first=True, padding_value=0)
    return pos,attention_mask,press
#%%
train_loader=DataLoader(train_dataset, batch_size=1, collate_fn=collate_fn,drop_last=False)
valid_loader=DataLoader(valid_dataset, batch_size=1, collate_fn=collate_fn,drop_last=False)
#%%
from transformers import BertConfig
from modeling_bert_SDPA import BertModel
from torch import nn

class CustomModel(nn.Module):
    def __init__(self):
        super(CustomModel, self).__init__()
        self.config = BertConfig()
        self.config.hidden_dropout_prob = 0.0
        self.config.attention_probs_dropout_prob = 0.0
        self.config.hidden_size = 512
        self.config.num_attention_heads = 4
        self.config.num_hidden_layers = 12
        self.config.max_position_embeddings=20000
        self.config.intermediate_size=2048
        self.config.position_embedding_type=''
        self.config.vocab_size=1
        self.bert = BertModel(self.config)
        self.fc1=nn.Linear(3,self.config.hidden_size)
        self.fc2=nn.Linear(self.config.hidden_size,1)


    def forward(self, x,mask):
        x=self.fc1(x)
        output = self.bert(inputs_embeds=x,attention_mask=mask)[0]
        output=self.fc2(output).squeeze(dim=-1)
        return output


#%%
model=CustomModel()
model = model.cuda()

#%%
from loguru import logger

logger.add(path+f"logs/log_{version}.log")
#%%
class LpLoss(nn.Module):
    def __init__(self, d=2, p=2, size_average=True, reduction='mean'):
        super(LpLoss, self).__init__()
        # Dimension and Lp-norm type are positive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average

    def forward(self, x, y):
        # Compute L2 norm of the difference and Lp norm of y
        diff_norms = torch.norm(x - y, 2)
        y_norms = torch.norm(y, self.p)

        # Handle the reduction
        if self.reduction == 'mean':
            return torch.mean(diff_norms / y_norms)
        elif self.reduction == 'sum':
            return torch.sum(diff_norms / y_norms)
        else:
            return diff_norms / y_norms
#%%
import numpy as np
class valid_LpLoss(object):
    def __init__(self, d=2, p=2, size_average=True, reduction=True):
        super(valid_LpLoss, self).__init__()
        # Dimension and Lp-norm type are postive
        assert d > 0 and p > 0

        self.d = d
        self.p = p
        self.reduction = reduction
        self.size_average = size_average


    def rel(self, x, y):
        diff_norms = np.linalg.norm(x-y, 2)
        y_norms = np.linalg.norm(y, self.p)

        if self.reduction:
            if self.size_average:
                return np.mean(diff_norms / y_norms)
            else:
                return np.sum(diff_norms / y_norms)

        return diff_norms / y_norms

    def __call__(self, x, y):
        return self.rel(x, y)
#%%
from torch.cuda import amp # 导入AMP模块
from datetime import datetime
from tqdm import tqdm
import time
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math,time
# model Constructing
# ========================================================
model = model.cuda()
# criterion = nn.BCEWithLogitsLoss().cuda()
criterion = nn.MSELoss().cuda()
EPOCHS=400
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
from torch.optim import lr_scheduler
scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.98)

scaler = amp.GradScaler()
time_list=[time.time()]
# model Training and Saving
# =========================================================
best_score = np.inf
loss_fn = valid_LpLoss(size_average=True)

for epoch in range(EPOCHS):
    model.train()
    epoch_loss = 0.0
    for pos,attention_mask,press in train_loader:
        optimizer.zero_grad()
        pos,attention_mask,press = pos.cuda(),attention_mask.cuda(),press.cuda()
        with amp.autocast():
            modelOutput = model(pos,attention_mask)
            selected_prediction = torch.masked_select(modelOutput, torch.eq(attention_mask,1))
            selected_target  = torch.masked_select(press, torch.eq(attention_mask,1))
            loss = criterion(selected_prediction, selected_target)
        scaler.scale(loss).backward()    # loss缩放并反向转播
        scaler.step(optimizer)    # 更新参数（自动unscaling）
        scaler.update()    # 基于动态Loss Scale更新loss_scaling系数
        epoch_loss += loss.item() * selected_target.shape[0]

    # model Evaluating
    model.eval()
    with torch.no_grad():
        valid_preds=[]
        valid_labels=[]
        val_loss=0.0
        for pos,attention_mask,press in valid_loader:
            pos,attention_mask,press = pos.cuda(),attention_mask.cuda(),press.cuda()
            modelOutput = model(pos,attention_mask)
            selected_prediction = torch.masked_select(modelOutput, torch.eq(attention_mask,1))
            selected_target  = torch.masked_select(press, torch.eq(attention_mask,1))
            val_loss += criterion(selected_prediction, selected_target).item()* selected_prediction.shape[0]
            valid_preds.append(selected_prediction.cpu().numpy()*std+mean)
            valid_labels.append(selected_target.cpu().numpy()*std+mean)

        score=[]
        for idx in range(len(valid_preds)) :
            valid_modelOutput = valid_preds[idx]
            label1=valid_labels[idx]
            l2 = loss_fn(valid_modelOutput,label1)
            score.append(l2)
        score=np.array(score)
        score=np.mean(score)
        time_list.append(time.time())
        logger.info(f"Epoch [{epoch + 1}/{EPOCHS}] | lr: {scheduler.get_last_lr()[0]} | Loss: {epoch_loss/len(train_dataset):.4f} | Valid loss: {val_loss/len(valid_dataset):.4f} | Valid score: {score:.5f} | time: {time_list[-1]-time_list[-2]:.4f}")

        if score < best_score:
            # model saving
            logger.info("model saved")
            best_score = score
            checkpoint = {
                "epoch" : epoch,
                "model_state_dict" : model.state_dict(),
                "best_score" : best_score,
                # "optimizer_state_dict" : optimizer.state_dict(),
                # "scheduler_state_dict" : scheduler.state_dict(),
            }
            torch.save(checkpoint, path+f"models/best_valid_checkpoin_{version}.pt")
    scheduler.step()


#%%
model=CustomModel()
# model=torch.compile(model)
state_dict=torch.load( path+f"models/best_valid_checkpoin_{version}.pt")['model_state_dict']
model.load_state_dict(state_dict)
model=model.cuda()
#%%
test_data={}
for row,file_id in tqdm(enumerate(test_df['id'])):
    pos = np.load(f"Dataset/data_test_B/centroid_{file_id}.npy").astype(np.float32)
    tmp_df_01=pl.DataFrame(pos)
    tmp_df_01.columns=['x','y','z']
    test_data[row]=tmp_df_01
#%%
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math,time
model.eval()
test_preds=[]
with torch.no_grad():
    for key,tmp_df_01 in tqdm(test_data.items()):
        tmp_df_01=tmp_df_01.with_columns((pl.Series(range(len(tmp_df_01)))).alias("id"))
        tmp_df_01=tmp_df_01.with_columns((pl.Series([key]*len(tmp_df_01))).alias("sample_id"))
        for seed in range(42,52):
            kf=KFold(n_splits=100,random_state=seed,shuffle=True)
            for train_index, test_index in kf.split(tmp_df_01):
                tmp_df_02=tmp_df_01[test_index]
                pos=torch.tensor(tmp_df_02[['x','y','z']].to_numpy()).reshape(1,-1,3).cuda()
                val_modelOutput = model(pos,None).reshape(-1)
                tmp_df_02=tmp_df_02.with_columns((pl.Series(val_modelOutput.cpu().numpy())).alias(f"pred"))
                test_preds.append(tmp_df_02)
#%%
test_preds=pl.concat(test_preds)
test_preds=test_preds.with_columns((pl.col("pred")*std+mean).alias("pred"))
test_preds
#%%
test_preds_grp=test_preds[['sample_id','id','pred']].groupby(['sample_id','id']).mean()
test_preds_grp=test_preds_grp.sort(['sample_id','id'], descending=[False,False])
test_preds_grp
#%%
for key,group in tqdm(test_preds_grp.groupby(['sample_id'], maintain_order=True)):
    file_id=int(test_df['id'][key[0]])
    np.save(path+f'submissions/content/gen_answer_B/press_{file_id}.npy',group['pred'].to_numpy())
#%%
import zipfile
import time
folder_path=path+f'submissions/content/gen_answer_B/'
with zipfile.ZipFile(f'B_result_torch.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
    for root, _, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            zipf.write(file_path, 'content/gen_answer_B/' + os.path.basename(file_path))
#%%
