from pytorch_lightning import seed_everything
import numpy as np
import torch

import pandas as pd
from processing_utils import *
from pytorch_widedeep.preprocessing import TabPreprocessor
from pytorch_lightning.loggers import TensorBoardLogger
from models import *
from model_utils import *

seed = 625
seed_everything(seed, workers=True)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)


df = pd.read_pickle('/home/yx/肺部并发症预测/Data/data_multi_text.pkl')  # 读取数据

entity_vocab_po = get_vocab(df, 'po_words')
entity_vocab_pd = get_vocab(df, 'pd_words')

# labels = ['死亡', '肺部并发症', '严重心血管不良', '急性肾损伤']
label = '肺部并发症'
df_label = get_lable_data(label, df)

words_po = df_label.pop('po_words')
words_pd = df_label.pop('pd_words')
text_pd = df_label.pop('术前诊断').fillna("无")
text_po = df_label.pop('拟行手术').fillna("无")
y = df_label.pop(label).values

pre_model, text_pd, text_po = get_text_model(text_pd, text_po)

words_ids_pd = get_entity_id(words_pd, entity_vocab_pd)
words_ids_po = get_entity_id(words_pd, entity_vocab_po)

cat, cont = cat_cont_split(df_label)
df_label_remove = remove_outliers(df_label, cont)

method = "cart"
for num in range(len(cont)):
    dtype = "numerical"
    binning(df_label_remove, cont, num, method, y, dtype)

tab_preprocessor = TabPreprocessor(embed_cols=df_label_remove.columns,
                                   for_transformer=True
                                   )
X_tab = tab_preprocessor.fit_transform(df_label_remove)

X_tab_train, X_tab_test, y_train_valid, y_test, text_pd_train, text_pd_test, words_ids_pd_train, words_ids_pd_test, text_po_train, text_po_test, words_ids_po_train, words_ids_po_test = time_split(
    X_tab, text_pd, text_po, words_ids_pd, words_ids_po, y, 13904)

b_size = 1024
lr = 3e-5
epoch = 1000
agd = 1
dropout = 0.9
weight_decay = 0.01
use_text = True
use_entity = True
use_local_attention = True
use_global_attention = True
vocab_pd_len = entity_vocab_pd.__len__()
vocab_po_len = entity_vocab_po.__len__()

kf = KFold(n_splits=5, shuffle=True, random_state=625)
results = []
n = 0
data_loaders = []
for train_index, valid_index in kf.split(X_tab_train):

    n += 1

    data_loader_train, data_loader_valid, data_loader_valid_test, data_loader_test = get_Dataloader(
        X_tab_train, text_pd_train, text_po_train, np.array(words_ids_pd_train), np.array(
            words_ids_po_train), y_train_valid, train_index, valid_index, b_size,
        X_tab_test, text_pd_test, text_po_test, words_ids_pd_test, words_ids_po_test, y_test
    )

    data_loaders.append(data_loader_valid_test)
    pt_path = "model_checkpoint/test"
    pt_name = str(n)+"_lr="+str(lr)+"_b_size="+str(b_size) + \
        "_agd="+str(agd)+"_dropout="+str(dropout)
    logger = TensorBoardLogger(pt_path, name=pt_name)
    model = NET(
        use_text=use_text, use_entity=use_entity, use_local_attention=use_local_attention, use_global_attention=use_global_attention,
        vocab_pd_len=vocab_pd_len, vocab_po_len=vocab_po_len, pre_model=pre_model, dropout=dropout, weight_decay=weight_decay, lr=lr,
        column_idx=tab_preprocessor.column_idx, embed_input=tab_preprocessor.embeddings_input)
    trainer = get_trainer(agd, logger, epoch)
    trainer.fit(model, data_loader_train, data_loader_valid)
    torch.cuda.empty_cache()
    break
