import os
import re
import time
from numpy.core.defchararray import lower
from tqdm import tqdm
import random
import logging
import argparse

from math import exp
import numpy as np

import json

import matplotlib.pyplot as plt


"""

data form in parsed_text:

Here the file id and sent id are of original raw data.(wikipedia/text)
file id is line id in the [REPO]/filelist.txt.
sent id is just line id in file.

per line of text dump:
{
    "sentence": sentence (tokenized but not encoded), 
    "file_id": file_id, 
    "sent_id": sent_id,
    "np_list": list of intervals representing nouns
}

data form in dump_[measure]_para_noun:

Here the context id is from the file id and sent id of NP data.
    (wikipedia/parsed_text)
file id is line id in the parsed_text/filelist.txt.
sent id is just line id in file.

per line of sentence_list:
{
    "id": context id,
    "context": raw_tokens
}

per line of relation_list:
{
    "context": context id,
    "missing_index": a list of missing indices (zero-padded)
    "masked_index": a list of masked indices (zero-padded)
    "distance": distance
}

training data:

per line of training data:
{
    "context": raw tokens(cased),
    "candidates": List([start, end]) (inclusive interval),
    "relations": List([miss_idx, mask_idx]) (idx of interval in candidates)
}

A loaded example should look like
(
    [
        input_ids, 
        input_mask, 
        candidate_starts, 
        candidate_ends, 
        candidate_mask
    ], # input
    [
        relations, 
        relation_mask
    ]  # label
) and every element should be a tensor.
"""
def encode_context_id(file_id, sent_id):
    return file_id * 50000 + sent_id

def decode_context_id(context_id):
    return {
        "file_id": context_id // 50000, 
        "sent_id": context_id % 50000
    }

class DumpFileIterable:
    def __init__(self, 
        start, end, step,
        dump_dir,
        mode="context"):
        self.counters = range(start, end, step)
        self.dump_dir = dump_dir
        self.mode = mode
    
    def __iter__(self):
        if self.mode == "context":
            return self.context_iterator()
        else:
            raise NotImplementedError

    def context_iterator(self):
        current_context_id = -1
        current_context = None
        current_rel_list = []
        for counter in tqdm(self.counters):
            rel_path = os.path.join(self.dump_dir, "relation_list_cnt_{}.dump".format(counter))
            stc_path = os.path.join(self.dump_dir, "sentence_dict_cnt_{}.dump".format(counter))
            with open(rel_path, "r") as rel_file, open(stc_path, "r") as stc_file:
                stc_list = [ json.loads(line.strip()) for line in stc_file ]
                stc_list = { stc["id"]:stc["context"] for stc in stc_list }
                # part_rel_list = [ json.loads(line.strip()) for line in rel_file ] 
                for line in rel_file:
                    sent = json.loads(line.strip())
                    context_id = sent["context"]
                    # file_id, sent_id = decode_context_id(context_id)
                    if context_id != current_context_id:
                        if current_context_id != -1:
                            yield {
                                "id": current_context_id,
                                "context": current_context, 
                                "relations": current_rel_list
                            }
                        current_context_id = context_id
                        current_context = stc_list[current_context_id]
                        current_rel_list = []
                    current_rel_list.append(sent)

class ParsedTextIterable:
    def __init__(self,
        text_dir):
        self.text_dir = text_dir
        file_list = os.listdir(self.text_dir)
        self.file_list = []
        for file_name in file_list:
            if file_name[-4:] == "dump":
                self.file_list.append(file_name)
        self.pattern = re.compile("([1-9][0-9]*).dump")
        self.file_list.sort(key=self.file_name_key)
    
    def file_name_key(self, file_name):
        return eval(self.pattern.search(file_name).group(1))

    def __iter__(self):
        return self.text_iterator()
        
    def text_iterator(self):
        for file_id, file_name in enumerate(self.file_list):
            file_path = os.path.join(self.text_dir, file_name)
            with open(file_path, "r") as f:
                for sent_id, line in enumerate(f):
                    sent = json.loads(line.strip())
                    yield {
                        "sentence": sent["sentence"],
                        "file_id": file_id,
                        "sent_id": sent_id,
                        "np_list": sent["np_list"]
                    }

def tokenize(sentence, tokenizer, 
        return_map=False, return_inv_map=False):
    tokens = [ 101, ] # [CLS]
    index_mapping = [] # raw to tokenized
    inv_index_mapping = [] # tokenized to raw
    if return_inv_map:
        inv_index_mapping.append(None) # [CLS] mapped to none
    start, end = 1, 1
    for i, raw_token in enumerate(sentence):
        encoded = tokenizer.encode(raw_token, add_special_tokens=False)
        tokens.extend(encoded)
        end += len(encoded)
        if return_map:
            index_mapping.append((start, end))
        if return_inv_map:
            for j in range(start, end):
                inv_index_mapping.append(i)
        start += len(encoded)
    tokens.append(102)    # [SEP]s
    if return_inv_map:
        inv_index_mapping.append(None) # [SEP] mapped to none
        assert(len(inv_index_mapping) == len(tokens))
    raw_tokens = tokenizer.convert_ids_to_tokens(tokens)
    result = {
        "tokens": raw_tokens,
    }
    if return_map:
        result["map"] = index_mapping
    if return_inv_map:
        result["inv_map"] = inv_index_mapping
    return result

def convert_uncased_to_cased(
        context_data, 
        uncased_tokenizer,
        cased_tokenizer
    ):
    result = tokenize(
        context_data["sentence"],
        tokenizer=uncased_tokenizer,
        return_inv_map=True
    )
    if len(result["tokens"]) != len(context_data["context"]):
        print("Tokens mismatch.")
        return None
    inv_map = result["inv_map"]
    result = tokenize(
        context_data["sentence"],
        tokenizer=cased_tokenizer,
        return_map=True
    )
    idx_map = result["map"]
    # map_fn = lambda x: idx_map[inv_map[x]]
    # list_map_fn = lambda l: list(map(map_fn, l))
    def list_map_fn(l):
        l = [ ele for ele in l if ele != 0 ]
        orig_idx = { inv_map[ele] for ele in l if inv_map[ele] != None }
        idx_list = [ ele for ele in orig_idx ]
        idx_list.sort()
        new_idx_list = []
        for ele in idx_list:
            start, end = idx_map[ele]
            for i in range(start, end):
                new_idx_list.append(i)
        return new_idx_list       
            
    # start conversion
    for relation in context_data["relations"]:
        miss_idx = list_map_fn(relation["missing_index"])
        mask_idx = list_map_fn(relation["masked_index"])
        if len(miss_idx) == 0 or len(mask_idx) == 0:
            print(context_data["context"])
            print(result["tokens"])
            print(relation)
            print(idx_map)
            return None
        relation["missing_index"] = miss_idx
        relation["masked_index"]  = mask_idx
    context_data["context"] = result["tokens"]
    return context_data

def analyze_data(context_data, args):
    relations = context_data["relations"]
    relation_count = len(relations)
    relation_key = lambda x:x["distance"]
    relations.sort(key=relation_key, reverse=True)
    distances = [ x["distance"] for x in relations ]
    distances = np.array(distances)
    average_dist = np.mean(distances)
    max_dist = np.amax(distances)
    min_dist = np.amin(distances)

    count = len(relations)
    normalized_dist = distances / max_dist
    norm_pos = normalized_dist > args.norm_min
    normalized_dist = np.compress(norm_pos, normalized_dist)
    norm_color = list(range(len(normalized_dist)))
    log_dist = np.log(distances)
    log_pos = log_dist > args.log_min
    log_dist = np.compress(log_pos, log_dist)
    log_color = list(range(len(log_dist)))
    return count, normalized_dist, norm_color, log_dist, log_color

    print(f"count: {relation_count} average: {average_dist} "
        f"max: {max_dist} min: {min_dist}")
    k = min(args.max_span_num, len(context_data["np_list"]))
    print(f"TOP {k}:")
    for ele in relations[:k]:
        print(ele)    

def sample_data(context_data, args):
    relations = context_data["relations"]
    relation_key = lambda x:x["distance"]
    relations.sort(key=relation_key, reverse=True)
    k = min(args.max_span_num, len(context_data["np_list"]))
    max_dist = relations[0]["distance"]
    norm_bound = max_dist * args.norm_min
    log_bound = exp(args.log_min)
    lower_bound = min(norm_bound, log_bound)
    for index, relation in enumerate(relations):
        if relation["distance"] < lower_bound:
            k = min(index, k)
            break
    training_data = convert_relations_for_training(relations[:k])
    training_data["context"] = context_data["context"]
    return training_data

def convert_relations_for_training(relations):
    """
    Relations are a subset of context_data["relations"].
    """
    candidates = {}
    def get_candidate_index(indices):
        indices = [ index for index in indices if index != 0 ]
        interval = (indices[0], indices[-1])
        
        if not interval in candidates.keys():
            candidates[interval] = len(candidates)
        return candidates[interval]
    new_relations = []
    for relation in relations:
        try:
            miss_idx = get_candidate_index(relation["missing_index"])
            mask_idx = get_candidate_index(relation["masked_index"])
        except IndexError:
            print(relation)
            continue
        new_relations.append((miss_idx, mask_idx))
    spans = [None, ] * len(candidates)
    for span, index in candidates.items():
        spans[index] = span
    return {
        "candidates": spans, 
        "relations": new_relations
    }
        
class SaveManager:
    def __init__(self, 
            dump_dir,
            save_interval):
        self.dump_dir = dump_dir
        self.save_interval = save_interval
        self.counter = 0
        self.sentence_list = []
        self.stc_template = os.path.join(dump_dir, "data_cnt_{:08d}.jsonlines")

    def update_sentence(self, sentence):
        self.sentence_list.append(sentence)
        self.counter += 1

        if self.save_interval > 0 and self.counter % self.save_interval == 0:     
            self.save_sentence_list()

    def save_sentence_list(self):
        if self.save_interval <= 0:
            return
        save_path = self.stc_template.format(self.counter)
        with open(save_path, "w") as f:
            for sentence in self.sentence_list:
                f.write(json.dumps(sentence)+"\n")
            self.sentence_list = []
    

def parse_args(args=None):
    server_list = ["234-2", "cluster"]
    parser = argparse.ArgumentParser(description="Arguments for relation generation.")
    parser.add_argument('location', choices=server_list,
                        help='Indicate the server this script is running on.')
    parser.add_argument('--relation', '-r', nargs=2, type=int,
                        help="Specify start and end of relation counters."
                            "a -1 means to specify no boundary.")
    parser.add_argument('--dump', dest='dump_dir', required=True,
                        help='Set directory where the data and progress log are dumped.'
                        '(relative to the working directory. ex: playground/dump_kl_para)')
    parser.add_argument('--text', dest='text_dir', required=True,
                        help='Set directory where the original texts come from.'
                        '(relative to the working directory. ex: ../wikipedia/parsed-text)')
    parser.add_argument('--save', dest='save_dir', required=True,
                        help="Path to save the training data.")
    parser.add_argument('--save_pic', help="Path to save analysis pics. Absolute.")
    parser.add_argument('--save_interval', type=int, required=True,
                        help="Save interval.")
    parser.add_argument('--context', '-c', nargs="*", type=int,
                        help="Context to be analyzed.")
    parser.add_argument('--max_span_num', '-k', type=int, required=True,
                        help="Maximum number of candidates in generated training data.")
    parser.add_argument('--analysis', dest="analysis", action="store_true", help="Do analysis only.")                        
    parser.add_argument('--fig_size', nargs=2, type=int)
    parser.add_argument('--size', type=int, default=5)
    parser.add_argument('--bins', type=int, default=50)
    parser.add_argument('--norm_min', type=float, default=0.5)
    parser.add_argument('--log_min', type=float, default=0)
    if args == None:
        return parser.parse_args()
    else:
        return parser.parse_args(args)

def dump_dir_check(dump_dir):
    if not os.path.isdir(dump_dir):
        raise ValueError("Non-existent dump path")
    files = os.listdir(dump_dir)

    rel_lists = []
    stc_dicts = []
    for file_name in files:
        if file_name[:8]=="relation":
            rel_lists.append(file_name)
        elif file_name[:8]=="sentence":
            stc_dicts.append(file_name)
        elif file_name != "progress.log":
            print("Warning: Unexpected file:", file_name)
    if len(rel_lists) != len(stc_dicts):
        raise ValueError("Mismatching number of Relation files and sentence files")
    
    pattern = re.compile("([1-9][0-9]*).dump$")
    def extract_counters(file_names):
        counters = []
        for file_name in file_names:
            result = pattern.search(file_name)
            if result == None:
                raise ValueError("File name parsing failed:", file_name)
            else:
                counters.append(eval(result.group(1)))
        counters.sort()
        return counters
    def cast_to_range(seq):
        if len(seq) == 0:
            return (0, 0, 0)
        if len(seq) == 1:
            return (seq[0], seq[0]+1, 1)
        step = seq[1] - seq[0]
        for i in range(1, len(seq)):
            if seq[i] - seq[i-1] != step:
                return None
        return (seq[0], seq[-1]+step, step)

    rel_counters = extract_counters(rel_lists)
    stc_counters = extract_counters(stc_dicts)

    rel_range = cast_to_range(rel_counters)
    stc_range = cast_to_range(stc_counters)

    if rel_range == None or stc_range == None or rel_range != stc_range:
        raise ValueError("Bad counters of files")

    return rel_range

def main():
    args = parse_args()
    print("Global initialization started.")
    work_dir_dict = {
        "234-2": "/data/disk5/private/yuc/coref/bert-tagger",
        "cluster": "/home/shiyukai/project/yuc/coref/bert-tagger"
    }
    WORK_DIR = work_dir_dict["234-2"]
    if args.location in work_dir_dict.keys():
        WORK_DIR = work_dir_dict[args.location]
    DUMP_DIR = os.path.join(WORK_DIR, args.dump_dir)
    '''
    uncased_tokenizer = None
    cased_tokenizer = None
    if args.location == "cluster":
        TOKENIZER_PATH = os.path.join(WORK_DIR, "tokenizer")
        uncased_tokenizer = BertTokenizer.from_pretrained(TOKENIZER_PATH, local_files_only=True)
        CASED_TOKENIZER_PATH = os.path.join(WORK_DIR, "cased_tokenizer")
        cased_tokenizer = BertTokenizer.from_pretrained(CASED_TOKENIZER_PATH, local_files_only=True)
    else:
        uncased_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        cased_tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
    '''
    TEXT_DIR = os.path.join(WORK_DIR, args.text_dir)
    SAVE_DIR = os.path.join(WORK_DIR, args.save_dir)
    if os.path.exists(SAVE_DIR) and os.listdir(SAVE_DIR):
        raise ValueError("Output directory () already exists and is not empty.")
    if not os.path.exists(SAVE_DIR):
        os.makedirs(SAVE_DIR)
    f_start, f_end, f_step = dump_dir_check(DUMP_DIR)

    if args.analysis and not os.path.exists(args.save_pic):
        os.makedirs(args.save_pic)
    
    start = args.relation[0]
    end = args.relation[1]
    if start == -1:
        start = f_start
    if end == -1:
        end = f_end
    start = start - start % f_step
    end = end - end % f_step + f_step
    if start < f_start:
        start = f_start
    if end > f_end:
        end = f_end

    print("Global initialization completed.")
    
    parsed_text = ParsedTextIterable(TEXT_DIR)
    text_loader = iter(parsed_text)
    current_text_id = None
    current_text = None
    tagged_data = DumpFileIterable(start, end, f_step, DUMP_DIR)
    save_manager = SaveManager(SAVE_DIR, args.save_interval)

    if args.analysis:
        fig, (ax_norm, ax_log) = plt.subplots(2, 1, 
            figsize=(args.fig_size[0], args.fig_size[1]))
        fig.suptitle('relations')

        norm_rel_counts = []
        log_rel_counts = []
        selected_ratios = []
    else:
        selected_ratios = []
        selected_count = []
    for index, context_data in enumerate(tagged_data):
        # {"id", "context", 
        #   "relations":[ "context", "missing_index", "masked_index," 
        #                 "distance" ]}
        context_id = context_data["id"]
        while current_text_id != context_id:
            try:
                # { "sentence", "file_id", "sent_id", "np_list" }
                current_text = text_loader.__next__()
            except StopIteration:
                raise ValueError("Original text not found.")
            current_text_id = encode_context_id(
                current_text["file_id"],
                current_text["sent_id"]
            )
        # {"id", "context"(from context dict["context"]), 
        #   "relations":[ "context", "missing_index", "masked_index," 
        #                 "distance" ],
        #  "sentence", "file_id", "sent_id", "np_list" }
        context_data.update(current_text)
        ''' 
        res = convert_uncased_to_cased(context_data,
            uncased_tokenizer,
            cased_tokenizer
        )
        if res == None:
            print("Invalid relation. Context discarded.")
            continue
        '''
        # print(context_data["context"])
        if args.analysis:
            count, normalized_dist, norm_color, log_dist, log_color = analyze_data(context_data, args)
            ax_norm.scatter(normalized_dist, np.ones_like(normalized_dist) * index, 
                s=args.size, c=norm_color, cmap='viridis')
            ax_log.scatter(log_dist, np.ones_like(log_dist) * index, 
                s=args.size, c=log_color, cmap='viridis')
            norm_rel_counts.append(len(normalized_dist))
            log_rel_counts.append(len(log_dist))
            selected_ratios.append(min(len(normalized_dist), len(log_dist))/count)
        else:
            training_data = sample_data(context_data, args)
            selected_count.append(len(training_data["relations"]))
            cand_cnt = len(training_data["candidates"])
            total_count = cand_cnt * (cand_cnt-1)
            selected_ratios.append(len(training_data["relations"]) / total_count)
            save_manager.update_sentence(training_data)
    
    if args.analysis:
        pic_path = os.path.join(args.save_pic, "result.png")
        plt.savefig(pic_path)
        
        fig, (ax_norm, ax_log, ax_ratio) = plt.subplots(3, 1, 
        figsize=(args.fig_size[0], args.fig_size[1]))
        fig.suptitle('relation counts')
        ax_norm.hist(norm_rel_counts, bins=args.bins)
        ax_log.hist(log_rel_counts, bins=args.bins)
        ax_ratio.hist(selected_ratios, bins=args.bins)
        pic_path = os.path.join(args.save_pic, "count.png")
        plt.savefig(pic_path)
    else:
        save_manager.save_sentence_list()
        selected_count = np.array(selected_count)
        mean_count = np.mean(selected_count)
        std_count = np.std(selected_count)
        print(f"mean: {mean_count}  std:{std_count}")
        selected_ratios = np.array(selected_ratios)
        mean_ratio = np.mean(selected_ratios)
        std_ratio = np.std(selected_ratios)
        print(f"mean: {mean_ratio}  std:{std_ratio}")
        

    print("Finished.")
        

if __name__ == "__main__":
    main()