import logging
import random
import pandas as pd
import torch

from open_clip import tokenize


def get_word_to_frequency(input_filename):
    """
    Map word to frequency
    """
    wordfrequency_path = {
        '/remote-home/share/medical/public/ROCO/test/radiology/processed_test.csv': '/remote-home/weixionglin/vlp/Analysis/ROCO/Frequency/csv_cache/test_wordcount.csv',
        '/remote-home/share/medical/public/ROCO/valid/radiology/processed_valid.csv': '/remote-home/weixionglin/vlp/Analysis/ROCO/Frequency/csv_cache/valid_wordcount.csv',
        '/remote-home/share/medical/public/ROCO/train/radiology/processed_train.csv': '/remote-home/weixionglin/vlp/Analysis/ROCO/Frequency/train_wordcount.csv',
        "/remote-home/weixionglin/vlp/Analysis/ROCO/Frequency/csv_cache/train_disjoint.csv": '/remote-home/weixionglin/vlp/Analysis/ROCO/Frequency/csv_cache/train_wordcount.csv',
        "/remote-home/weixionglin/vlp/Analysis/ROCO/Frequency/csv_cache/valid_disjoint.csv": '/remote-home/weixionglin/vlp/Analysis/ROCO/Frequency/csv_cache/valid_wordcount.csv'
    }[input_filename]
    df_wordfrequency = pd.read_csv(wordfrequency_path, sep=self.args.csv_separator)
    word_frequency = dict(zip(df_wordfrequency['word'].tolist(), df_wordfrequency['norm_frequency'].tolist()))
    return word_frequency

def remove_low_frequency_words(caption: str, word_to_frequency: dict, filter_ratio: float):
    """
    过滤 low frequency word:
    - 根据 word frequency 决定 word 被 mask 的概率
    - 被 mask 概率 = filter_ratio * (1 - norm(frequency))
    """
    words = caption.split(' ')
    # words = [word for word in words if random.random() < self.word_frequency.get(word, 0.0) * self.filter_ratio]
    words = [word for word in words if random.random() < word_to_frequency.get(word, 1.0) * filter_ratio]
    return ' '.join(words)

def remove_mlm_words(caption, tokenizer, vocab, mask_token='<MASK>', pad_token='<PAD>', ratio=0.15):
    """
    Remove mlm words

    Args:
        caption: str
        ratio: probability of token been masked out
    output_label: 85% 的概率为 0 表示无操作; 其它情况为 1 表示 token 被替换为了其他值
    """
    tokens = caption.split()
    output_mask = []
    bert_label_tokens = []  # 被 mask 的保留原词, 否则用 [PAD] 代替
    for i, token in enumerate(tokens):
        prob = random.random()
        if prob < ratio:
            prob /= ratio
            # 80% randomly change token to mask token
            if prob < 0.8:
                tokens[i] = mask_token
            # 10% randomly change token to random token
            elif prob < 0.9:
                # tokens[i] = random.randrange(self.tokenizer.vocab_size)
                tokens[i] = random.choice(vocab).replace('</w>', '')
                # raise RuntimeError(f'{tokens[i]} is chosen')
            # 10% randomly change token to current token
            else:
                tokens[i] = token
            output_mask.append(1)
            bert_label_tokens.append(token)
        else:
            tokens[i] = token
            output_mask.append(0)
            bert_label_tokens.append(pad_token)
    logging.debug(f"\033[42mtokens:\033[0m {tokens}")
    logging.debug(f"\033[42moutput_mask:\033[0m {output_mask}")

    token_result = dict.fromkeys(["tokens", "output_mask", "bert_label_tokens"], None)
    for key in token_result:
        token_result[key] = eval(key)  # HACK dark magic, could be dangerous
    return token_result

def encode_mlm(caption, tokenizer, vocab, mask_token: str, pad_token: str, ratio: float, context_length: int):
    """
    生成 MLM 需要的 bert_input, bert_label
    """
    token_result = remove_mlm_words(
        caption=caption,
        tokenizer=tokenizer,
        vocab=vocab,
        mask_token=mask_token,
        pad_token=pad_token,
        ratio=ratio
    )  # Remove words for MLM task

    output_mask = token_result["output_mask"]
    output_mask += [0] * (context_length - len(output_mask))
    output_mask = torch.tensor(output_mask[:context_length])
    logging.debug(len(output_mask), output_mask)

    tokens = token_result["tokens"]
    bert_input = ' '.join(tokens)
    bert_label_tokens = token_result["bert_label_tokens"]
    bert_label = ' '.join(bert_label_tokens)

    # logging.info(f"bert_input: {text_input}")
    # logging.info(f"bert_label: {caption}")
    # logging.info(f"output_mask: {output_mask}")

    # bert_input = bert_label * output_mask
    # raise RuntimeError('stop in encode_mlm')

    return bert_input, bert_label
