
from transformers import get_constant_schedule_with_warmup, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
from datasets import load_dataset

from data import Amazon, FakeNewsNet, CoAID, GlueFromHFDataset, TokenizedCollator, PromptTokenzierCollator
from model import BertFineTune, BertPromptTune
from forward_calculator import FinetuneFoward, UnbiasForward


target_words = {
    'FND': {
        'positive_words' : ['true', 'real', 'actual', 'substantial', 'authentic', 'genuine', 'factual', 'correct', 'fact', 'truth'],
        'negative_words' : ['false', 'fake', 'unreal', 'misleading', 'artificial', 'bogus', 'virtual', 'incorrect', 'wrong', 'fault']
    },
    'SA': {
        'positive_words' : ['positive'],
        'negative_words' : ['negative']
    },
    'NLI': {
        'positive_words' : ['entailment'],
        'negative_words' : ['not_entailment', 'contradiction']
    }
}


templates = {
    'FND': "Here is a piece of news with <mask> information .",
    'SA': "Here is a comment with <mask> polarity .",
    "NLI": "These two sentences are <mask> ."
}

feat_names_dict = {
    "super_glue-rte" : ["premise", "hypothesis"],
    "glue-rte" : ["sentence1", "sentence2"],
    "glue-qnli": ["question", "sentence"]
}

def data_selection(data_path, dataset_name, domain=None):
    if dataset_name == "coaid":
        data = CoAID(data_path)
    elif dataset_name == "amazon":
        data_path = data_path + "/" + dataset_name
        data = Amazon(data_path, domain)  
    elif dataset_name in ["politifact", "gossipcop"]:
        data_path = data_path + "/" + dataset_name
        data = FakeNewsNet(data_path)  # type: ignore
    elif "glue" in dataset_name:
        benchmark_name, data_name = dataset_name.split("-")
        dataset = load_dataset(benchmark_name, data_name)

        feat_names = feat_names_dict[dataset_name]
        train_data = GlueFromHFDataset(dataset['train'], feat_names)  # type: ignore
        val_data = GlueFromHFDataset(dataset['validation'], feat_names)  # type: ignore
        data = [train_data, val_data]
    
    return data

def collator_selection(mode, tokenizer, task, use_learnable_token):
    if mode == "fine-tune":
        tokenized_collator = TokenizedCollator(
            tokenizer, token_idx=0, label_idx=1)
    elif mode == "prompt-tune":
        tokenized_collator = PromptTokenzierCollator(tokenizer,
                                                    template=templates[task],
                                                    token_idx=0,
                                                    label_idx=1,
                                                    use_learnable_token=use_learnable_token)  # type: ignore
    else:
        raise RuntimeError
    return tokenized_collator

def model_selection(mode, config, nums_label = None, hidden_size = None, dropout = None, vocab_size = None, special_token_ids = None):
    if mode == "fine-tune":
        model = BertFineTune(config['backbone'], nums_label, hidden_size, dropout, fine_tune_all=config['fine_tune_all'])

    elif mode == "prompt-tune":
        model = BertPromptTune(backbone=config['backbone'], 
                                vocab_size=vocab_size,
                                mask_token_id=special_token_ids["mask_token_id"],
                                positive_token_ids=special_token_ids["positive_token_ids"],
                                negative_token_ids=special_token_ids["negative_token_ids"],
                                with_learnable_emb=config['use_learnable_token'],
                                with_position_weights=False,
                                with_answer_weights=config['with_answer_weights'],
                                fine_tune_all=config['fine_tune_all'],
                                mlm_pretrained=config['mlm_pretrained'],
                                compute_template=config['compute_template'],
                                add_xavg=config['add_xavg'])
    else:
        raise RuntimeError
    
    return model

def forward_selection(mode, config, loss_fn, metrics_fn):
    if mode == "fine-tune":
        forward_calculator = FinetuneFoward(loss_fn=loss_fn, metrics_fn=metrics_fn, fp16=config['fp16'])

    elif mode == "prompt-tune":
        forward_calculator = UnbiasForward(loss_fn=loss_fn, 
                                            metrics_fn=metrics_fn, 
                                            compute_template=config['compute_template'], 
                                            fp16=config['fp16'],
                                            alpha=config['alpha'], 
                                            beta=config['beta'])
    else:
        raise RuntimeError

    return forward_calculator

def scheduler_selection(scheduler, optimizer, num_warmup_steps, num_training_steps = None, num_cycles = 0.5):
    if scheduler == "cosntant":
        return get_constant_schedule_with_warmup(optimizer=optimizer, 
                                                 num_warmup_steps=num_warmup_steps)
    elif scheduler == "linear":
        return  get_linear_schedule_with_warmup(optimizer=optimizer, 
                                                num_warmup_steps=num_warmup_steps, 
                                                num_training_steps=num_training_steps)
    elif scheduler == "cosine":
        return get_cosine_schedule_with_warmup(optimizer=optimizer, 
                                               num_warmup_steps=num_warmup_steps, 
                                               num_training_steps=num_training_steps,
                                               num_cycles=num_cycles)
    else:
        raise RuntimeError