# from transformers import AutoTokenizer, AutoModel

# # tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
# # model = AutoModel.from_pretrained("bert-base-uncased")

# # inputs = tokenizer("Hello world!", return_tensors="pt")
# # outputs = model(**inputs)


# from transformers import pipeline
# unmasker = pipeline('fill-mask', model='bert-base-uncased')
# while True:
#     sentence = input()
#     # result = unmasker("The man worked as a [MASK].")
#     result = unmasker(sentence)
#     print(result)

import os
import re
import time
import argparse

import json

import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader

from transformers import BertTokenizer, BertForMaskedLM

work_dir_dict = {
    "234-2": "/data/disk5/private/yuc/coref/bert-tagger",
    "cluster": "/home/shiyukai/project/yuc/coref/bert-tagger"
}
server_list = ["234-2", "cluster"]

parser = argparse.ArgumentParser(description="Arguments for relation generation.")
parser.add_argument('location', choices=server_list,
                    help='Indicate the server this script is running on.')
parser.add_argument('--dump', dest='dump_dir', required=True,
                    help='Set directory to dump the data and progress log.'
                    '(relative to the working directory, for example, bert-tagger/)')
parser.add_argument('--log', dest='log_interval', type=int, default=100,
                    help='Set log interval for the process.')
parser.add_argument('--save', dest='save_interval', type=int, default=500,
                    help='Set save interval for the process.')
parser.add_argument('--batch', dest='batch_size', type=int, default=0,
                    help='Batch size used by BERT model. '
                    '0 for dynamic batch size suited to memory.')
parser.add_argument('--memory', type=int, default=16,
                    help="GPU memory(GB). Referred when batch size is set to dynamic(0).")
parser.add_argument("--device", "-d", choices=["cpu", "gpu"], default="gpu", 
                    help="Devices")
parser.add_argument("--model_card", default="bert-base-cased")

args = parser.parse_args()
WORK_DIR = work_dir_dict["234-2"]
if args.location in work_dir_dict.keys():
    WORK_DIR = work_dir_dict[args.location]

def expected_batch_size(length):
    if args.batch_size == 0:
        return min((220 // length), 4) * args.memory
    else:
        return args.batch_size

FILE_LIST = os.path.join(WORK_DIR, "playground/filelist.txt")
WIKI_DIR = os.path.join(WORK_DIR, "../wikipedia/text")

PARSED_DATA_DIR = os.path.join(WORK_DIR, "../wikipedia/parsed-text")

DUMP_DIR = os.path.join(WORK_DIR, args.dump_dir)
if os.path.exists(DUMP_DIR) and not os.path.isdir(DUMP_DIR):
    print("Error: dump path refer to a non-directory.")
    exit()
if not os.path.exists(DUMP_DIR):
    os.makedirs(DUMP_DIR)

print("Global initialization started.")
global_device = torch.device('cpu')
if args.device == "gpu" and torch.cuda.is_available() == True:
    global_device = torch.device('cuda')
global_tokenizer = None
global_model = None
if args.location == "cluster":
    PRETRAINED_PATH = os.path.join(WORK_DIR, "pretrained_models", args.model_card)
    MODEL_PATH = os.path.join(PRETRAINED_PATH, "model")
    TOKENIZER_PATH = os.path.join(PRETRAINED_PATH, "tokenizer")
    global_tokenizer = BertTokenizer.from_pretrained(TOKENIZER_PATH, local_files_only=True)
    global_model = BertForMaskedLM.from_pretrained(MODEL_PATH, local_files_only=True).to(global_device)
else:
    global_tokenizer = BertTokenizer.from_pretrained(args.model_card)
    global_model = BertForMaskedLM.from_pretrained(args.model_card).to(global_device)
print("Global initialization completed.")

def encode_context_id(file_id, stc_id):
    return file_id * 50000 + stc_id

def decode_context_id(context_id):
    return {
        "file_id": context_id // 50000, 
        "stc_id": context_id % 50000
    }

# result: masked;   [B N V]
# target: unmasked  [B N V]
# index: golden     [B N V]
# mask: valid token [B N]

def weighted_average(tensor, weight, dim=1):
    tensor = tensor * weight
    return torch.sum(tensor, dim=dim) / torch.sum(weight, dim=dim)

def index_only_dist(result, target, index, mask):
    n_dim = 1
    v_dim = 2
    # [B N]
    return weighted_average(
        torch.sum(
            F.relu(
                F.log_softmax(target, dim=v_dim) - F.log_softmax(result, dim=v_dim)
            ) * index, dim=v_dim
        ),
        mask,
        dim=n_dim
    )

def kl_divergence_dist(result, target, index, mask):
    n_dim = 1
    v_dim = 2
    return weighted_average(
        torch.sum(
            F.softmax(target, dim=v_dim) * ( - F.log_softmax(result, dim=v_dim) + 
            F.log_softmax(target, dim=v_dim)),
            dim=v_dim
        ),
        mask,
        dim=n_dim
    )

def js_divergence_dist(result, target, index, mask):
    return 0.5 * (kl_divergence_dist(result, target, index, mask) +
             kl_divergence_dist(target, result, index, mask))

def cross_entropy_dist(result, target, index, mask):
    n_dim = 1
    v_dim = 2
    return weighted_average(
        torch.sum(
            - F.softmax(target, dim=v_dim) * F.log_softmax(result, dim=v_dim),
            dim=v_dim
        ),
        mask,
        dim=n_dim
    )

def inv_softmax(tensor, dim=1):
    res = torch.clone(tensor)
    base = torch.amin(res, dim=dim)
    base = base.unsqueeze(1)
    res = torch.log(res / base)
    return res

def default_transform(sentence, device=global_device):
    if sentence[0] == "<":
        return None
    # raw_tokens: parsed into subword but not yet converted to ids
    # raw tokens converted to token ids
    tokens = global_tokenizer(sentence, return_tensors="pt")["input_ids"].to(device)
    tokens = torch.squeeze(tokens)
    # raw_tokens: parsed into subword but not yet converted to ids
    raw_tokens = global_tokenizer.convert_ids_to_tokens(tokens)
    # l = len(tokens)
    l = tokens.shape[0]
    if l <= 8 or 128 <= l: # ignore doc that is too long or too short
        return None


    # this is the format of a sentence.
    return {
        "tokens": tokens,
        "raw": raw_tokens
    }

# ts = default_transform("To be or not to be, this is the question.")

class SentenceIterable:
    def __init__(self,
        file_path_list=FILE_LIST,
        file_id=0,
        stc_id=0,
        transform=default_transform,
        device=global_device):
        self.file_id = file_id
        self.stc_id = stc_id
        self.path_to_file_list = file_path_list
        self.device = device
        if transform == None:
            self.transform = default_transform
        else:
            self.transform = transform
        print("SentenceIterable constructed.")
        
    def __iter__(self):
        return self.sentence_generator()
    
    def sentence_generator(self):
        with open(self.path_to_file_list, "r") as f_list:
            for file_id, file_path in enumerate(f_list):
                if file_id < self.file_id:
                    continue
                file_path = os.path.join(WIKI_DIR, file_path)
                with open(file_path.strip()) as fs:
                    for stc_id, sentence in enumerate(fs):
                        if file_id == self.file_id and stc_id <= self.stc_id:
                            continue
                        sentence = self.transform(sentence, device=self.device)
                        if sentence != None:
                            yield (sentence, file_id, stc_id)

def default_transform_with_index_mapping(sentence, device):
    # { "sentence", "file_id", "sent_id", "np_list" }
    if len(sentence["np_list"]) <= 2 or 30 <= len(sentence["np_list"]):
        return None
    tokens = [ 101, ] # [CLS]
    index_mapping = []
    start, end = 1, 1
    for raw_token in sentence["sentence"]:
        encoded = global_tokenizer.encode(raw_token, add_special_tokens=False)
        tokens.extend(encoded)
        end += len(encoded)
        index_mapping.append((start, end))
        start += len(encoded)
    tokens.append(102)    # [SEP]s
    l = len(tokens)
    if l <= 8 or 128 <= l: # ignore doc that is too long or too short
        return None

    tokens = torch.tensor(tokens, device=device)
    # raw_tokens: parsed into subword but not yet converted to ids
    raw_tokens = global_tokenizer.convert_ids_to_tokens(tokens)
      
    index_as_start = lambda x: index_mapping[x][0]
    index_as_end = lambda x: index_mapping[x][1]

    np_list = [ [index_as_start(x), index_as_end(y-1)] 
                        for x, y in sentence["np_list"] ]
    
    def interval_to_indices(interval, padding):
        indices = list(range(*interval))
        mask = [1,] * len(indices)
        if len(indices) > padding:
            print("Warning: padding not used.")
            return (indices, mask)
        while len(indices) < padding:
            indices.append(0)
            mask.append(0)
        return (indices, mask)

    np_max_length = max([y-x for x, y in np_list])
    
    np_list = [ interval_to_indices(x, np_max_length) for x in np_list]
    
    sentence["np_list"] = np_list
    
    # this is the format of a sentence.
    transformed = {
        "tokens": tokens,
        "raw": raw_tokens
    }
    transformed.update(sentence)
    return transformed
    

class ParsedSentenceIterable:
    def __init__(self,
        file_id=0,
        stc_id=0,
        transform=default_transform_with_index_mapping,
        device=global_device):
        self.file_id = file_id
        self.stc_id = stc_id
        self.transform = transform
        self.device = device
        self.path_to_file_list = os.path.join(PARSED_DATA_DIR, "filelist.txt")
        if not os.path.exists(self.path_to_file_list):
            file_list = []
            for root, dirs, files in os.walk(PARSED_DATA_DIR):
                for filename in files:
                    if filename[-4:] == "dump":
                        file_list.append(filename)
            pattern = re.compile("([1-9][0-9]*).dump")
            def sort_key(x):
                return eval(pattern.search(x).group(1))
            file_list.sort(key=sort_key)
            with open(self.path_to_file_list, "w") as fl:
                for filename in file_list:
                    fl.write(filename + "\n")            

    def __iter__(self):
        return self.sentence_generator()

    def sentence_generator(self):
        with open(self.path_to_file_list, "r") as f_list:
            for file_id, file_path in enumerate(f_list):
                if file_id < self.file_id:
                    continue
                file_path = os.path.join(PARSED_DATA_DIR, file_path)
                with open(file_path.strip()) as fs:
                    for stc_id, line in enumerate(fs):
                        # { "sentence", "file_id", "sent_id", "np_list" }
                        sentence = json.loads(line.strip())
                        sentence = self.transform(sentence, self.device)
                        if sentence != None:
                            yield (sentence, file_id, stc_id)

class QuestionPairIterable(Dataset):
    def __init__(self, 
        sentence,
        mask_placeholder="[MASK]",
        miss_placeholder="[MASK]",
        device=global_device):
        super(QuestionPairIterable).__init__()
        self.sentence = sentence["tokens"]
        self.miss_ph = miss_placeholder
        self.mask_ph = mask_placeholder
        self.miss_id = global_tokenizer.convert_tokens_to_ids(miss_placeholder)
        self.mask_id = global_tokenizer.convert_tokens_to_ids(mask_placeholder)        
        self.device = device
        self.index_pairs = self.generate_index_pairs()
        self.start = 0
        self.end = len(self.index_pairs)
    
    def generate_index_pairs(self):
        length = len(self.sentence)
        return [
            ([miss_index], [1], [mask_index], [1])
            for miss_index in range(1, length-1)
                for mask_index in range(1, length-1)
                    if miss_index != mask_index
        ]
    
    def __len__(self):
        return len(self.index_pairs)

    def __getitem__(self, index):
        missing_indices, miss_idx_mask, masked_indices, mask_idx_mask = self.index_pairs[index]
        # unmasked_question = list(self.sentence)
        unmasked_question = self.sentence.clone()
        for missing_index in missing_indices:
            # unmasked_question[missing_index] = self.miss_ph
            unmasked_question[missing_index] = self.miss_id
        # masked_question = list(unmasked_question)
        masked_question = unmasked_question.clone()
        for masked_index in masked_indices:
            # masked_question[masked_index] = self.mask_ph
            masked_question[masked_index] = self.mask_id

        # this is the format of a question pair.
        return {
            "label": self.sentence,
            "unmasked": unmasked_question, 
            "masked": masked_question, 
            "miss_id": torch.tensor(missing_indices, device=self.device),
            "mask_id": torch.tensor(masked_indices, device=self.device),
            "miss_mask": torch.tensor(miss_idx_mask, device=self.device),
            # "mask_mask": torch.tensor(mask_idx_mask, device=self.device),
        }

class NounQuestionPairIterable(QuestionPairIterable):
    def __init__(self, sentence, **args):
        self.np_list = sentence["np_list"]
        super().__init__(sentence, **args)
        
    def generate_index_pairs(self):
        return [
            (miss_idx, miss_mask, mask_idx, mask_mask)
            for miss_idx, miss_mask in self.np_list
                for mask_idx, mask_mask in self.np_list
                    if miss_idx != mask_idx
        ]


def get_batch_size(batch):
    for value in batch.values():
        return value.shape[0]

class QuestionPairConsumer:
    def __init__(self,
        tokenizer=global_tokenizer,
        model=global_model,
        measure=kl_divergence_dist,
        device=global_device):
        self.tokenizer = tokenizer
        self.model = model
        self.measure = measure
        self.device = device
    
    def consume_question_pair(self, question_pair):
        # [B(atch), L(ength of sentence)]
        context = question_pair["label"]
        unmasked = question_pair["unmasked"]
        masked = question_pair["masked"]
        # [B(atch), n(umber of missing tokens)]
        missing_indices = question_pair["miss_id"]
        masked_indices = question_pair["mask_id"]
        miss_idx_mask = question_pair["miss_mask"]
        # mask_idx_mask = question_pair["mask_mask"]
        
        # u_pred = consume_question(unmasked, context)
        # m_pred = consume_question(masked, unmasked)
        # [B(atch), L(ength of sentence), V(ocabulary size)]
        u_logits = self.model(input_ids=unmasked).logits
        m_logits = self.model(input_ids=masked).logits

        missing_label_ids = torch.gather(context, 1, missing_indices) # [B, n]
        answer_shape = list(missing_indices.shape)
        answer_shape.append(u_logits.shape[2])
        missing_indices = missing_indices.unsqueeze(2).expand(answer_shape) # [B, n, V]
        missing_label_ids = missing_label_ids.unsqueeze(2).expand(answer_shape) # [B, n, V]
        ones_template = torch.tensor([[[1.]]], device=self.device).expand(answer_shape) # [B, n, V]
        # golden logits ,g_logits[b][n][index[b][n]] = 1, [B, n, V]
        g_logits = torch.scatter(torch.zeros(answer_shape, device=self.device), 2, missing_label_ids, ones_template)
        # unmasked logits [B, n, V]
        u_logits = torch.gather(u_logits, 1, missing_indices)
        # masked logits [B, n, V]
        m_logits = torch.gather(m_logits, 1, missing_indices)
        return self.measure(m_logits, u_logits, g_logits, miss_idx_mask)

class SaveManager:
    def __init__(self,
        dump_dir=DUMP_DIR,
        counter=0,
        log_interval=100,
        save_interval=500):
        self.sentence_dict = {}
        self.relation_list = []
        self.log_interval = log_interval
        self.save_interval = save_interval
        self.counter = counter
        self.dump_dir = dump_dir
        self.progress_path = os.path.join(self.dump_dir, "progress.log")
        self.rel_template = os.path.join(dump_dir, "relation_list_cnt_{}.dump")
        self.stc_template = os.path.join(dump_dir, "sentence_dict_cnt_{}.dump")

        self.watch = StopWatch()

    def sort_relation_list(self, key="dist", reverse=True):
        if key=="dist":
            self.relation_list.sort(key=lambda x:x["distance"], reverse=reverse)
        elif key=="index":
            self.relation_list.sort(
                key=lambda x:(x["context_id"]*1000+x["missing_index"][0])*1000+x["masked_index"][0],
                reverse=reverse
            )
        else:
            return    

    def save_disabled(self):
        return self.save_interval <= 0

    def load_progress(self):
        if self.save_disabled():
            return (0, 0)

        if os.path.exists(self.progress_path):
            with open(self.progress_path, "r") as p_log:
                progress = json.load(p_log)
                file_id = progress["file_id"]
                stc_id = progress["stc_id"]
                self.save_interval = progress["save_interval"]
                self.counter = progress["counter"]
                return (file_id, stc_id)
        return (0, 0)

    def dump_progress(self, context_id):
        if self.save_disabled():
            return

        ids = decode_context_id(context_id)
        file_id, stc_id = ids["file_id"], ids["stc_id"]
        
        with open(self.progress_path, "w") as p_log:
            progress = {
                "file_id": file_id,
                "stc_id": stc_id,
                "counter": self.counter,
                "save_interval": self.save_interval
            }
            p_log.write(json.dumps(progress))
        
    def save_sentence_list(self):
        sentence_list = []
        for context_id, raw_tokens in self.sentence_dict.items():
            sentence_list.append({
            "id": context_id,
            "context": raw_tokens
        })
        sentence_list.sort(key=lambda x:x["id"])
        save_path = self.stc_template.format(self.counter)
        with open(save_path, "w") as f:
            for sentence in sentence_list:
                f.write(json.dumps(sentence)+"\n")

    def update_sentence(self, sentence, context_id):
        self.sentence_dict[context_id] = sentence["raw"]
    
    def save_relation_list(self):
        save_path = self.rel_template.format(self.counter)
        with open(save_path, "w") as f:
            for relation in self.relation_list:
                f.write(json.dumps(relation)+"\n")
    
    def update_relation(self, sample, distance, context_id):
        self.relation_list.append({
            "context": context_id,
            "missing_index": sample["miss_id"].tolist(),
            "masked_index": sample["mask_id"].tolist(),
            "distance": float(distance)
        })
        self.counter += 1
        if self.counter % self.log_interval == 0:
            interval = self.watch.tick()
            total = self.watch.total_elapsed()
            print("sentence count: {0}  current speed: {1:.4f} sent/s  speed on average: {2:.4f} sent/s".format(
                    self.counter, self.log_interval / interval, self.counter / total
            ))

        if self.save_disabled():
            return

        if self.counter % self.save_interval == 0:
            print("Save examples.")
            self.save_relation_list()
            self.save_sentence_list()
            self.dump_progress(context_id)
            self.relation_list = []
            self.sentence_dict = {}

    def update_relation_batched(self, batch, distance, context_id):
        batch_size = get_batch_size(batch)
        for index in range(0, batch_size):
            relation = {}
            for key, batched_tensor in batch.items():
                relation[key] = batched_tensor[index]
            self.update_relation(relation, distance[index], context_id)

    def start_watch(self):
        self.watch.start()

    def tick_watch(self):
        return self.watch.tick()

class StopWatch:
    def __init__(self):
        self.ticks = []

    def start(self):
        self.ticks.append(time.time())
    
    def tick(self):
        self.ticks.append(time.time())
        return self.ticks[-1] - self.ticks[-2] 

    def total_elapsed(self):
        return self.ticks[-1] - self.ticks[0]

    def clear(self):
        self.ticks.clear()


def iterate_on_sentence(sentence, transform=default_transform, measure=kl_divergence_dist):
    log_interval = args.log_interval
    save_interval = 0
    save_manager = SaveManager(log_interval=log_interval, save_interval=save_interval)
    consumer = QuestionPairConsumer() 
    sentence = transform(sentence)

    context_id = encode_context_id(0, 0)
    question_pair_dataset = QuestionPairIterable(sentence)
    length = len(sentence["raw"])
    dataloader = DataLoader(question_pair_dataset, 
        batch_size=expected_batch_size(length), num_workers=0)
    save_manager.start_watch()
    for sample_batched in dataloader:
        distance = consumer.consume_question_pair(sample_batched)
        save_manager.update_relation_batched(sample_batched, distance, context_id)

    save_manager.sort_relation_list(key="dist", reverse=True)
    print(save_manager.relation_list[:5])


def main():
    log_interval = args.log_interval
    save_interval = args.save_interval

    save_manager = SaveManager(
        dump_dir=DUMP_DIR,
        log_interval=log_interval, 
        save_interval=save_interval)
    last_file_id, last_stc_id = save_manager.load_progress()
   
    sentence_dataset = ParsedSentenceIterable(file_id=last_file_id,stc_id=last_stc_id)
    # sentence_dataset = SentenceIterable(file_id=last_file_id,stc_id=last_stc_id)
    consumer = QuestionPairConsumer() 
    save_manager.start_watch()
    for sentence, file_id, stc_id in sentence_dataset:
        context_id = encode_context_id(file_id, stc_id)
        question_pair_dataset = NounQuestionPairIterable(sentence)
        length = len(sentence["raw"])
        np_count = len(sentence["np_list"])
        # print(f"sentence: {context_id}  file_id: {file_id}  stc_id: {stc_id}  "
          # f"length: {length}  np count: {np_count}")
        dataloader = DataLoader(question_pair_dataset, 
            batch_size=expected_batch_size(length), num_workers=0)
        for sample_batched in dataloader:
            distance = consumer.consume_question_pair(sample_batched)
            save_manager.update_sentence(sentence, context_id)
            save_manager.update_relation_batched(sample_batched, distance, context_id)
            # save_manager.dump_progress(file_id, stc_id)

if __name__ == "__main__":
    main()
    # sentence = "This table can be completed with the marginal totals of the two variables"
    # iterate_on_sentence(sentence)
    