import sys
sys.path.append('.')
import os
import pandas as pd
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import precision_recall_fscore_support
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor
from sklearn.metrics import average_precision_score as APS
from pytorch_lightning import loggers as pl_loggers
from datas import *
from models import *
import torch
class CFG:
    EPOCHS = 20
    BATCH_SIZE = 128
    LR = 1e-3
    WD = 0.05

    NBR_FOLDS = 15
    SELECTED_FOLDS = [0]

    SEED = 2024

def set_seeds(seed):
    os.environ['PYTHONHASHSEED'] = str(seed)
    random.seed(seed)
    #tf.random.set_seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)
    
dirpath = './crossattention_gpu01'
tb_logger = pl_loggers.TensorBoardLogger(save_dir=f"{dirpath}/logs/")
train_path = 'datas/leash-BELKA/train_split.parquet'
valid_path = 'datas/leash-BELKA/valid_split.parquet'
test_path = 'datas/leash-BELKA/test.parquet'
protein_embedding_path = 'datas/leash-BELKA/protein_features.parquet'
train_dataset = BalancedEmbedDataset(train_path,protein_embedding_path,data_size=100)
valid_dataset = BalancedEmbedDataset(valid_path,protein_embedding_path,data_size=100)  
print(len(train_dataset), len(valid_dataset))
# train_loader = DataLoader(train_dataset, batch_size=CFG.BATCH_SIZE, shuffle=True, num_workers=10)
# valid_loader = DataLoader(valid_dataset, batch_size=CFG.BATCH_SIZE, num_workers=10)
    
model = CrossAttentionModel(lr=CFG.LR, weight_decay=CFG.WD,train_dataset=train_dataset,valid_dataset=valid_dataset)

early_stop_callback = EarlyStopping(monitor="val_loss", mode="min", patience=5, verbose=True)
checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=dirpath, filename=f"checkopint", save_top_k=3, mode="min")
lr_monitor = LearningRateMonitor(logging_interval='epoch')

trainer = pl.Trainer(
    # precision="16-mixed",
    max_epochs=CFG.EPOCHS,
    callbacks=[early_stop_callback, checkpoint_callback, lr_monitor],
    devices=1,
    accelerator="gpu",  # Adjust based on your hardware
    enable_progress_bar=True,
    logger=tb_logger,
)
trainer.fit(model)