
import re
import os
import pickle
import json
import random
import argparse

import torch
import numpy as np

import matplotlib.pyplot as plt

def get_dump_file_list():
    counter_list = []
    file_list = []
    rel_file_list = []
    stc_file_list = []
    def get_counter(file_name):
        cnt_pattern = re.compile("cnt_([0-9]+).dump$")
        result = cnt_pattern.match(file_name)
        if result:
            return eval(result.group(1))
        else:
            return 0
    for root, dirs, files in os.walk(DUMP_DIR):
        for file_name in files:
            # rel_name_pattern = re.compile("^relation_list_cnt_([0-9]+).dump$")
            # stc_name_pattern = re.compile("^sentence_dict_cnt_([0-9]+).dump$")
            counter = get_counter(file_name)
            if counter != 0:
                counter_list.append(counter)
            file_list.append((root, file_name))
    
    counter_list.sort()
    file_list.sort(key=lambda x: get_counter(x[1]))

def load_from_pickle(counters, dump_dir):
    rel_list = []
    stc_dict = {}
    for counter in counters:
        rel_path = os.path.join(dump_dir, "relation_list_cnt_{}.dump".format(counter))
        stc_path = os.path.join(dump_dir, "sentence_dict_cnt_{}.dump".format(counter))
        with open(rel_path, "rb") as rel_file, open(stc_path, "rb") as stc_file:
            part_rel_list = pickle.load(rel_file)
            part_stc_dict = pickle.load(stc_file)
            rel_list += part_rel_list
            stc_dict.update(part_stc_dict)
    return rel_list, stc_dict

def load_from_json(counters, dump_dir):
    rel_list = []
    stc_dict = {}
    for counter in counters:
        rel_path = os.path.join(dump_dir, "relation_list_cnt_{}.dump".format(counter))
        stc_path = os.path.join(dump_dir, "sentence_dict_cnt_{}.dump".format(counter))
        with open(rel_path, "r") as rel_file, open(stc_path, "r") as stc_file:
            part_rel_list = [ json.loads(line.strip()) for line in rel_file ] 
            part_stc_list = [ json.loads(line.strip()) for line in stc_file ]
            rel_list += part_rel_list
            stc_dict.update({ stc["id"]:stc["context"] for stc in part_stc_list })
    return rel_list, stc_dict

def naive_join_tokens(tokens):
    string = ""
    for token in tokens:
        if token[:2] == "##":
            string += token[2:]
        elif string == "" or token in [",", "."]:
            string += token
        else:
            string += " " + token
    return string

def get_rel_and_stc(counters, dump_dir, args):
    
    rel_list, stc_dict = load_from_json(counters, dump_dir)

    def sort_relations(relations):
        return relations.sort(key=lambda x:x["distance"], reverse=True)
    def human_readable_relation(relation):
        try:
            context = stc_dict[relation["context"]]
        except KeyError:
            print(stc_dict.keys(), relation)
            exit()
        # x-1 for the added special tokens at start
        try:
            missing_token = [ context[x] for x in relation["missing_index"] ]
            masked_token = [ context[x] for x in relation["masked_index"] ]
        except IndexError:
            print(context, relation)
            exit()
        distance = relation["distance"]
        
        if type(distance) == torch.Tensor:
            np_distance = distance.detach().numpy()
        elif type(distance) == float:
            np_distance = np.array(distance)
        elif type(distance) == np.ndarray:
            np_distance = distance
        distance = float(distance)
        relation.update({
            "missing_token": missing_token,
            "masked_token": masked_token,
            "distance": distance,
            "distance_np": np_distance
        })
        return relation
    def str_relation(h_relation):
        missing_token = naive_join_tokens([ x for x in h_relation["missing_token"] if x != "[CLS]" ])
        masked_token = naive_join_tokens([ x for x in h_relation["masked_token"] if x != "[CLS]" ])
        missing_indices = [x for x in h_relation["missing_index"] if x != 0]
        masked_indices = [x for x in h_relation["masked_index"] if x != 0]
        return "context: {0}  missing token: {1}({4})  masked token: {2}({5})  distance: {3}".format(
            h_relation["context"], missing_token, masked_token, 
            h_relation["distance_np"], missing_indices, masked_indices
        )
    def pretty_print(relations):
        for relation in relations:
            print(str_relation(relation))
    def analyze_relations(relations, stat_only=False, do_sort=True, do_shuffle=False, save=False):
        print("relation count: ", len(relations))
        for rel_id, relation in enumerate(relations):
            human_readable_relation(relation)
            
        if do_shuffle:
            random.shuffle(relations)    
        elif do_sort:
            sort_relations(relations)
        print("top 7:")
        pretty_print(relations[:7])
        print("last 7:")
        pretty_print(relations[-7:])
        if not stat_only:
            missing_order = [ x["missing_index"] for x in relations]
            # print(missing_order)
        # distances = np.stack([ x["distance_np"] for x in relations ])

        sum = 0
        weight_sum = 0
        dists = []
        weights = []
        '''
        for relation in relations:
            missing_indices = [x for x in relation["missing_index"] if x != 0]
            masked_indices = [x for x in relation["masked_index"] if x != 0]
            dist1 = np.abs(missing_indices[-1] - masked_indices[0])
            dist2 = np.abs(missing_indices[0] - masked_indices[-1])
            dist = np.min([dist1, dist2])
            weight = relation["distance_np"]
            dists.append(dist)
            weights.append(weight)
            sum += dist * weight
            weight_sum += weight
        if save:
            plt.scatter(dists, weights, s=5)
            plt.savefig("result.png")
        '''
        # weighted_mean_dist = sum / weight_sum
        weighted_mean_dist = 0
        distances = [ x["distance_np"] for x in relations ]
        average_dist = np.mean(distances)
        print("Average distance: ", average_dist, "Average token dist: ", weighted_mean_dist)

    # random.shuffle(rel_list)
    analyze_relations(rel_list, stat_only=True, 
        do_sort=args.sort, do_shuffle=args.shuffle, save=True)

    def analyze_context(stc_id, sort=True, shuffle=False):
        # print(list(enumerate(stc_dict[stc_id])))
        if not stc_id in stc_dict:
            print(f"Context {stc_id} not present.")
            return
        print(list(enumerate(stc_dict[stc_id])))
        print(naive_join_tokens(stc_dict[stc_id]))
        context_filter = lambda x: x["context"] == stc_id
        filtered_rel_list = list(filter(context_filter, rel_list))    
        analyze_relations(filtered_rel_list, do_sort=sort, do_shuffle=shuffle)

    if args.context != None:
        for context in args.context:
            analyze_context(context, args.sort, args.shuffle)
    
def main():
    work_dir_dict = {
        "234-2": "/data/disk5/private/yuc/coref/bert-tagger",
        "cluster": "/home/shiyukai/project/yuc/coref/bert-tagger"
    }
    server_list = ["234-2", "cluster"]

    parser = argparse.ArgumentParser(description="Arguments for relation generation.")
    parser.add_argument('location', choices=server_list,
                        help='Indicate the server this script is running on.')
    parser.add_argument('start', type=int, help="start of counter (normally 500)") 
    parser.add_argument('end', type=int, help="end of counter") 
    parser.add_argument('--dump', dest='dump_dir', required=True,
                        help='Set directory to dump the data and progress log.'
                        '(relative to the working directory. ex: playground/dump_kl_para)')
    parser.add_argument('--context', '-c', nargs="*", type=int,
                        help="Context to be analyzed.")
    parser.add_argument('--sort', action="store_true")
    parser.add_argument('--shuffle', action="store_true")

    args = parser.parse_args()
    WORK_DIR = work_dir_dict["234-2"]
    if args.location in work_dir_dict.keys():
        WORK_DIR = work_dir_dict[args.location]                    

    FILE_LIST = os.path.join(WORK_DIR, "playground/filelist.txt")
    DUMP_DIR = os.path.join(WORK_DIR, args.dump_dir)
    if not os.path.exists(DUMP_DIR):    
        print("Invalid dump path. exit.")
        exit()  
    
    files = os.listdir(DUMP_DIR)
    context_pat = re.compile("relation_list_cnt_([0-9]+).dump")
    relation_pat = re.compile("sentence_dict_cnt_([0-9]+).dump")
    context_counters = []
    relation_counters = []
    for file in files:
        res = context_pat.search(file)
        if res != None:
            context_counters.append(eval(res.group(1)))
        res = relation_pat.search(file)
        if res != None:
            relation_counters.append(eval(res.group(1)))
    if len(context_counters) != len(relation_counters):
        print("Warning: unaligned data detected. counters:", 
            len(context_counters), len(relation_counters))
    context_counters.sort()
    relation_counters.sort()            
    def compare_two_lists(x, y):
        for e1, e2 in zip(x, y):
            if e1 != e2:
                return False
        return True
        
    counters = list(range(context_counters[0], context_counters[-1]+1, 
                        context_counters[1]-context_counters[0]))
    if compare_two_lists(context_counters, counters) and compare_two_lists(relation_counters, counters):
        print("completeness checked.")
    else:
        print("invalid data.")
    counters = [ x for x in counters if args.start <= x and x <= args.end ]
    get_rel_and_stc(counters, dump_dir=DUMP_DIR, args=args)


if __name__=="__main__":
    main()
        


