
import os
import re
import time
import argparse
import multiprocessing

import json
from transform import *
from measure import *
from utils import SaveManager, encode_context_id, get_batch_size

import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.parallel import DistributedDataParallel
from torch.multiprocessing import Pool, Process, set_start_method

from transformers import BertTokenizer, BertForMaskedLM

def expected_batch_size(length, args):
    if args.batch_size == 0:
        return min((220 // length), 4) * args.memory
    else:
        return args.batch_size
        
def parse_args(raw_args=None):
    work_dir_dict = {
        "234-2": "/data/disk5/private/yuc/coref/bert-tagger",
        "cluster": "/home/shiyukai/project/yuc/coref/bert-tagger"
    }
    server_list = ["234-2", "cluster"]

    parser = argparse.ArgumentParser(description="Arguments for relation generation.")
    parser.add_argument('location', choices=server_list,
                        help='Indicate the server this script is running on.')
    parser.add_argument('--dump', dest='dump_dir', required=True,
                        help='Set directory to dump the data and progress log.'
                        '(relative to the working directory, for example, bert-tagger/)')
    parser.add_argument('--log', dest='log_interval', type=int, default=100,
                        help='Set log interval for the process.')
    parser.add_argument('--save', dest='save_interval', type=int, default=500,
                        help='Set save interval for the process.')
    parser.add_argument('--batch', dest='batch_size', type=int, default=0,
                        help='Batch size used by BERT model. '
                        '0 for dynamic batch size suited to memory.')
    parser.add_argument('--memory', type=int, default=16,
                        help="GPU memory(GB). Referred when batch size is set to dynamic(0).")
    parser.add_argument("--device", "-d", choices=["cpu", "gpu"], default="gpu", 
                        help="Devices")
    parser.add_argument("--device_id", nargs="+", type=int, help="Ids of GPUs.")
    parser.add_argument("--model_card", default="bert-base-cased")
    parser.add_argument("--local_files_only", action='store_true',
                        help="Whether to specify local weights for the model.")
    parser.add_argument("--num_workers", type=int, default=0, 
                        help="num_workers for dataloder.")
    if raw_args != None:
        args = parser.parse_args(raw_args)
    else:
        args = parser.parse_args()
        
    path_flags = {}
    WORK_DIR = work_dir_dict["234-2"]
    if args.location in work_dir_dict.keys():
        WORK_DIR = work_dir_dict[args.location]
    FILE_LIST = os.path.join(WORK_DIR, "playground/filelist.txt")
    WIKI_DIR = os.path.join(WORK_DIR, "../wikipedia/text")
    PARSED_DATA_DIR = os.path.join(WORK_DIR, "../wikipedia/parsed-text")
    DUMP_DIR = os.path.join(WORK_DIR, args.dump_dir)
    if os.path.exists(DUMP_DIR) and not os.path.isdir(DUMP_DIR):
        print("Error: dump path refer to a non-directory.")
        exit()
    if not os.path.exists(DUMP_DIR):
        os.makedirs(DUMP_DIR)
    path_flags["--work_dir"] = WORK_DIR
    path_flags["--file_list"] = FILE_LIST
    path_flags["--wiki_dir"] = WIKI_DIR
    path_flags["--parsed_data_dir"] = PARSED_DATA_DIR
    path_flags["--dump_dir"] = DUMP_DIR
    path_parser = argparse.ArgumentParser()
    for key in path_flags.keys():
        path_parser.add_argument(key, type=str)
    raw_args = []
    for key, value in path_flags.items():
        raw_args.append(key)
        raw_args.append(value)
    args = path_parser.parse_args(raw_args, namespace=args)
    
    return args

class ParsedSentenceIterable:
    def __init__(self, args,
        file_id=0,
        stc_id=0,
        transform=None,
        device=None):
        self.parsed_data_dir = args.parsed_data_dir
        self.file_id = file_id
        self.stc_id = stc_id
        self.transform = transform
        self.device = device
        self.path_to_file_list = os.path.join(self.parsed_data_dir, "filelist.txt")
        if not os.path.exists(self.path_to_file_list):
            file_list = []
            for root, dirs, files in os.walk(self.parsed_data_dir):
                for filename in files:
                    if filename[-4:] == "dump":
                        file_list.append(filename)
            pattern = re.compile("([1-9][0-9]*).dump")
            def sort_key(x):
                return eval(pattern.search(x).group(1))
            file_list.sort(key=sort_key)
            with open(self.path_to_file_list, "w") as fl:
                for filename in file_list:
                    fl.write(filename + "\n") 
        # TODO separate filelist.txt for different process

    def __iter__(self):
        return self.sentence_generator()

    def sentence_generator(self):
        with open(self.path_to_file_list, "r") as f_list:
            for file_id, file_path in enumerate(f_list):
                if file_id < self.file_id:
                    continue
                file_path = os.path.join(self.parsed_data_dir, file_path)
                with open(file_path.strip()) as fs:
                    for stc_id, line in enumerate(fs):
                        # { "sentence", "file_id", "sent_id", "np_list" }
                        sentence = json.loads(line.strip())
                        if self.transform != None:
                            sentence = self.transform(sentence)
                        if sentence != None:
                            yield (sentence, file_id, stc_id)

class QuestionPairIterable(Dataset):
    def __init__(self, 
        sentence,
        mask_placeholder="[MASK]",
        miss_placeholder="[MASK]",
        device=None, tokenizer=None):
        super(QuestionPairIterable).__init__()
        self.sentence = sentence["tokens"]
        self.miss_ph = miss_placeholder
        self.mask_ph = mask_placeholder
        # TODO is this tokenizer argument needed?
        self.tokenizer = tokenizer
        self.miss_id = self.tokenizer.convert_tokens_to_ids(miss_placeholder)
        self.mask_id = self.tokenizer.convert_tokens_to_ids(mask_placeholder)
        self.device = device
        self.index_pairs = self.generate_index_pairs()
        self.start = 0
        self.end = len(self.index_pairs)
    
    def generate_index_pairs(self):
        length = len(self.sentence)
        return [
            ([miss_index], [1], [mask_index], [1])
            for miss_index in range(1, length-1)
                for mask_index in range(1, length-1)
                    if miss_index != mask_index
        ]
    
    def __len__(self):
        return len(self.index_pairs)

    def __getitem__(self, index):
        missing_indices, miss_idx_mask, masked_indices, mask_idx_mask = self.index_pairs[index]
        unmasked_question = list(self.sentence)
        # unmasked_question = self.sentence.clone()
        for missing_index in missing_indices:
            # unmasked_question[missing_index] = self.miss_ph
            unmasked_question[missing_index] = self.miss_id
        masked_question = list(unmasked_question)
        # masked_question = unmasked_question.clone()
        for masked_index in masked_indices:
            # masked_question[masked_index] = self.mask_ph
            masked_question[masked_index] = self.mask_id

        tensorize = lambda x: torch.tensor(x, device=self.device)
        # this is the format of a question pair.
        return {
            "label": tensorize(self.sentence),
            "unmasked": tensorize(unmasked_question),
            "masked": tensorize(masked_question),
            "miss_id": tensorize(missing_indices),
            "mask_id": tensorize(masked_indices),
            "miss_mask": tensorize(miss_idx_mask),
            # "mask_mask": torch.tensor(mask_idx_mask, device=self.device),
        }
    
    def optimized_collate_fn(self, examples):
        questions = []
        miss_ids = []
        # last_miss_id = None
        last_unmasked_ptr = 0
        for example in examples:
            miss_id = example["miss_id"]
            # if last_miss_id == None or not torch.all(miss_id == last_miss_id):
            if len(miss_ids)==0 or not torch.all(miss_id == miss_ids[-1]):
                # add new question
                # last_miss_id = miss_id
                questions.append(example["unmasked"])
                miss_ids.append(miss_id)
                example["unmasked"] = torch.tensor(len(questions)-1, device=self.device)
                last_unmasked_ptr = torch.tensor(len(questions)-1, device=self.device)
            else: # point to old question
                example["unmasked"] = last_unmasked_ptr
            questions.append(example["masked"])
            miss_ids.append(miss_id)
            example["masked"] = torch.tensor(len(questions)-1, device=self.device)
        batch = { key: torch.stack([ ele[key] for ele in examples ]) for key in examples[0] if key != "label" }
        batch["label"] = examples[0]["label"]
        batch["question_batch"] = torch.stack(questions)
        batch["miss_id_batch"] = torch.stack(miss_ids)
        return batch
        
class NounQuestionPairIterable(QuestionPairIterable):
    def __init__(self, sentence, **args):
        self.np_list = sentence["np_list"]
        super().__init__(sentence, **args)
        
    def generate_index_pairs(self):
        return [
            (miss_idx, miss_mask, mask_idx, mask_mask)
            for miss_idx, miss_mask in self.np_list
                for mask_idx, mask_mask in self.np_list
                    if miss_idx != mask_idx
        ]
        
class CompressedQuestionPairConsumer:
    def __init__(self, tokenizer, model, measure, device):
        self.tokenizer = tokenizer
        self.model = model
        self.measure = measure
        self.device = device
  
    def consume_question_pair(self, question_pair):
        # [Q(uestion count deduplicated), L(ength of sentence)]
        questions = question_pair["question_batch"]        
        # [Q, n(umber of missing tokens)]
        missing_indices = question_pair["miss_id_batch"]
        # [B, n]
        miss_idx_mask = question_pair["miss_mask"]
        # [B]
        unmasked = question_pair["unmasked"]
        masked = question_pair["masked"]
        question_num = questions.shape[0]
        # [L(ength of sentence)]
        context = question_pair["label"]
        # [B(atch), L(ength of sentence)]
        context = context.unsqueeze(0).expand(question_num, -1)

        # [Q, L, V]
        logits = self.model(input_ids=questions).logits
        missing_label_ids = torch.gather(context, 1, missing_indices) # [Q, n]
        answer_shape = list(missing_indices.shape)
        answer_shape.append(logits.shape[2])
        missing_indices = missing_indices.unsqueeze(2).expand(answer_shape) # [Q, n, V]
        missing_label_ids = missing_label_ids.unsqueeze(2).expand(answer_shape) # [Q, n, V]
        ones_template = torch.tensor([[[1.]]], device=self.device).expand(answer_shape) # [Q, n, V]
        # golden logits ,g_logits[b][n][index[b][n]] = 1, [Q, n, V]
        g_logits = torch.scatter(torch.zeros(answer_shape, device=self.device), 2, missing_label_ids, ones_template)
        # predicted logits, [Q, n, V]
        p_logits = torch.gather(logits, 1, missing_indices)
        # unmasked logits [B, n, V]
        u_logits = torch.index_select(p_logits, 0, unmasked)
        # masked logits [B, n, V]
        m_logits = torch.index_select(p_logits, 0, masked)
        return self.measure(m_logits, u_logits, g_logits, miss_idx_mask) 

def run_tagging(args):
    print("Global initialization started.")
    device = torch.device('cpu')
    use_cuda = False
    if args.device == "gpu" and torch.cuda.is_available() == True:
        use_cuda = True
    
    if use_cuda:
        if args.num_workers > 0:
            print("Try initializng multiprocessing.")
            try:
                set_start_method('spawn')
            except RuntimeError:
                print("Failed to initialize multiprocessing.")
                args.num_workers = 0
        if args.device_id != None:
            device = torch.device('cuda:' + str(args.device_id[0]))
        else:
            device = torch.device('cuda')
    tokenizer = None
    model = None
    if args.local_files_only or args.location == "cluster":
        PRETRAINED_PATH = os.path.join(args.work_dir, "pretrained_models", args.model_card)
        MODEL_PATH = os.path.join(PRETRAINED_PATH, "model")
        TOKENIZER_PATH = os.path.join(PRETRAINED_PATH, "tokenizer")
        tokenizer = BertTokenizer.from_pretrained(TOKENIZER_PATH, local_files_only=True)
        model = BertForMaskedLM.from_pretrained(MODEL_PATH, local_files_only=True)
    else:
        tokenizer = BertTokenizer.from_pretrained(args.model_card)
        model = BertForMaskedLM.from_pretrained(args.model_card)
    print(args.device_id)
    if use_cuda and len(args.device_id) > 1:
        model = torch.nn.DataParallel(model, device_ids=args.device_id, output_device=device)
    model.to(device)
    print("Global initialization completed.")

    log_interval = args.log_interval
    save_interval = args.save_interval

    save_manager = SaveManager(
        dump_dir=args.dump_dir,
        log_interval=log_interval,
        save_interval=save_interval)
    last_file_id, last_stc_id = save_manager.load_progress()

    transform = IndexMappingTransform(device, tokenizer)
    sentence_dataset = ParsedSentenceIterable(
        args,
        file_id=last_file_id,
        stc_id=last_stc_id,
        transform=transform,
        device=device)
    consumer = CompressedQuestionPairConsumer(
        tokenizer=tokenizer, model=model, measure=js_divergence_dist, 
        device=device)
    save_manager.start_watch()
    for sentence, file_id, stc_id in sentence_dataset:
        context_id = encode_context_id(file_id, stc_id)
        question_pair_dataset = NounQuestionPairIterable(sentence, device=device, tokenizer=tokenizer)
        length = len(sentence["raw"])
        dataloader = DataLoader(question_pair_dataset,
            batch_size=expected_batch_size(length, args),
            collate_fn=question_pair_dataset.optimized_collate_fn,
            num_workers=args.num_workers)
        for sample_batched in dataloader:
            distance = consumer.consume_question_pair(sample_batched)
            save_manager.update_sentence(sentence, context_id)
            save_manager.update_relation_batched(sample_batched, distance, context_id)


def main():
    args = parse_args()
    run_tagging(args)

if __name__=="__main__":
    main()