import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score
import loguru
import argparse
import os
import torch
from sklearn.model_selection import train_test_split

from dataset import build_dataloader


def get_score(y_ture, y_pred):
    y_ture = np.array(y_ture)
    y_pred = np.array(y_pred)
    f1 = f1_score(y_ture, y_pred, average='macro', zero_division=0) * 100
    p = precision_score(y_ture, y_pred, average='macro', zero_division=0) * 100
    r = recall_score(y_ture, y_pred, average='macro', zero_division=0) * 100

    return f1, p, r


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--test", action='store_true', default=False)
    args = parser.parse_args()
    return args

def get_logger():
    logger = loguru.logger
    logger.add('./log/lstm-{time}.log')
    return logger


def prepare(train):
    if os.path.exists('ckpt') is False:
        os.makedirs('ckpt')
    

    if train:
        processed_data = torch.load('./data/train_data.pt', weights_only=False)
        text_data = processed_data['train_text']
        labels = processed_data['train_label']
        print(f'Train data loaded')
    else:
        processed_data = torch.load('./data/test_data.pt', weights_only=False)
        text_data = processed_data['test_text']
        labels = None
        print(f'Test data loaded')

    
    num_classes = processed_data['num_classes']
    padding_idx = processed_data['padding_idx']
    vocab_size = processed_data['vocab_size']
    max_size = 1024

    loader_params = {
        'batch_size': 4,
        'num_workers': 0,
        'padding_idx': padding_idx,
        'max_size': max_size
    }

    model_params = {
        'vocab_size': vocab_size,
        'padding_idx': padding_idx,
        'num_classes': num_classes, 
        'max_size': max_size
    }

    if train:
        filter_size = 60
        selected_idx = []
        for i, line in enumerate(text_data):
            if len(line) > filter_size:
                selected_idx.append(i)
        text_data = [text_data[i] for i in selected_idx]
        labels = [labels[i] for i in selected_idx]

        train_data, val_data, train_labels, val_labels = train_test_split(text_data, labels, test_size=0.1, random_state=42)
        train_loader = build_dataloader(train_data, train_labels, **loader_params)
        val_loader = build_dataloader(val_data, val_labels, **loader_params)
        return train_loader, val_loader, model_params
    else:
        test_loader = build_dataloader(text_data, labels=None, **loader_params)
        return test_loader, model_params
