#%%
import pandas as pd
import polars as pl
import os
from sklearn.model_selection import StratifiedKFold, KFold
os.environ["CUDA_VISIBLE_DEVICES"]='0'

import numpy as np
import random
import torch

path='data/'
version='L3090-B054'
os.makedirs(path+'models',exist_ok=True)
os.makedirs(path+'feature',exist_ok=True)
os.makedirs(path+'feature_importance',exist_ok=True)
os.makedirs(path+'submissions',exist_ok=True)
os.makedirs(path+'submissions/content/gen_answer_A/',exist_ok=True)
os.makedirs(path+'logs',exist_ok=True)
#%%
def seed_everything(seed=42):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    pl.set_random_seed(seed)
seed_everything()
#%%
l1=os.listdir("Dataset/data_test_B/")
l1=sorted([i for i in l1 if 'centroid_' in i])
test_df=pd.DataFrame(l1,columns=['id'])
test_df['id']=test_df['id'].apply(lambda x:x[9:12])
test_df

#%%
mean,std=-93.13160634703354, 113.59875244405217
#%%
from torch.utils.data import Dataset, DataLoader
class MyDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        tmp_df_01=self.data[index]
        tmp_df_01=tmp_df_01.sample(fraction=0.01,shuffle=True)
        pos=torch.tensor(tmp_df_01[['x','y','z']].to_numpy())
        press=torch.tensor(tmp_df_01['press'].to_numpy())
        # pos=pos.reshape(1,-1,3)
        # press=press.reshape(1,-1)
        attention_mask = torch.tensor([1] * len(tmp_df_01), dtype=torch.long)
        return pos,attention_mask,press
#%%
from transformers import BertConfig
from modeling_bert_SDPA import BertModel
from torch import nn

class CustomModel(nn.Module):
    def __init__(self):
        super(CustomModel, self).__init__()
        self.config = BertConfig()
        self.config.hidden_dropout_prob = 0.0
        self.config.attention_probs_dropout_prob = 0.0
        self.config.hidden_size = 512
        self.config.num_attention_heads = 4
        self.config.num_hidden_layers = 12
        self.config.max_position_embeddings=20000
        self.config.intermediate_size=2048
        self.config.position_embedding_type=''
        self.config.vocab_size=1
        self.bert = BertModel(self.config)
        self.fc1=nn.Linear(3,self.config.hidden_size)
        self.fc2=nn.Linear(self.config.hidden_size,1)


    def forward(self, x,mask):
        x=self.fc1(x)
        output = self.bert(inputs_embeds=x,attention_mask=mask)[0]
        output=self.fc2(output).squeeze(dim=-1)
        return output


#%%
model=CustomModel()
model = model.cuda()


#%%
model=CustomModel()
# model=torch.compile(model)
state_dict=torch.load( f"best_valid_checkpoin_L3090-B054.pt")['model_state_dict']
model.load_state_dict(state_dict)
model=model.cuda()
#%%
from tqdm import tqdm
test_data={}
for row,file_id in tqdm(enumerate(test_df['id'])):
    pos = np.load(f"Dataset/data_test_B/centroid_{file_id}.npy").astype(np.float32)
    tmp_df_01=pl.DataFrame(pos)
    tmp_df_01.columns=['x','y','z']
    test_data[row]=tmp_df_01
#%%
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math,time
model.eval()
test_preds=[]
with torch.no_grad():
    for key,tmp_df_01 in tqdm(test_data.items()):
        tmp_df_01=tmp_df_01.with_columns((pl.Series(range(len(tmp_df_01)))).alias("id"))
        tmp_df_01=tmp_df_01.with_columns((pl.Series([key]*len(tmp_df_01))).alias("sample_id"))
        for seed in range(42,52):
            kf=KFold(n_splits=100,random_state=seed,shuffle=True)
            for train_index, test_index in kf.split(tmp_df_01):
                tmp_df_02=tmp_df_01[test_index]
                pos=torch.tensor(tmp_df_02[['x','y','z']].to_numpy()).reshape(1,-1,3).cuda()
                val_modelOutput = model(pos,None).reshape(-1)
                tmp_df_02=tmp_df_02.with_columns((pl.Series(val_modelOutput.cpu().numpy())).alias(f"pred"))
                test_preds.append(tmp_df_02)
#%%
test_preds=pl.concat(test_preds)
test_preds=test_preds.with_columns((pl.col("pred")*std+mean).alias("pred"))
test_preds
#%%
test_preds_grp=test_preds[['sample_id','id','pred']].groupby(['sample_id','id']).mean()
test_preds_grp=test_preds_grp.sort(['sample_id','id'], descending=[False,False])
test_preds_grp
#%%
for key,group in tqdm(test_preds_grp.groupby(['sample_id'], maintain_order=True)):
    file_id=int(test_df['id'][key[0]])
    np.save(path+f'submissions/content/gen_answer_B/press_{file_id}.npy',group['pred'].to_numpy())
#%%
import zipfile
import time
folder_path=path+f'submissions/content/gen_answer_B/'
with zipfile.ZipFile(f'B_result_torch.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
    for root, _, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            zipf.write(file_path, 'content/gen_answer_B/' + os.path.basename(file_path))
