import gc
import glob
import hashlib
import itertools
import json
import os
import random
import re
import subprocess
from collections import Counter
from os.path import join as pjoin
import argparse
import torch
from multiprocess import Pool
import tqdm

from others.logging import logger
# from others.tokenization import BertTokenizer
from pytorch_transformers import XLNetTokenizer

from others.utils import clean
from prepro.utils import _get_word_ngrams
from transformers import BertTokenizer
import xml.etree.ElementTree as ET

nyt_remove_words = ["photo", "graph", "chart", "map", "table", "drawing"]


def recover_from_corenlp(s):
    s = re.sub(r' \'{\w}', '\'\g<1>', s)
    s = re.sub(r'\'\' {\w}', '\'\'\g<1>', s)


def load_json(p, lower):
    source = []
    tgt = []
    flag = False
    for sent in json.load(open(p))['sentences']:
        tokens = [t['word'] for t in sent['tokens']]
        if (lower):
            tokens = [t.lower() for t in tokens]
        if (tokens[0] == '@highlight'):
            flag = True
            tgt.append([])
            continue
        if (flag):
            tgt[-1].extend(tokens)
        else:
            source.append(tokens)

    source = [clean(' '.join(sent)).split() for sent in source]
    tgt = [clean(' '.join(sent)).split() for sent in tgt]
    return source, tgt


def load_xml(p):
    tree = ET.parse(p)
    root = tree.getroot()
    title, byline, abs, paras = [], [], [], []
    title_node = list(root.iter('hedline'))
    if (len(title_node) > 0):
        try:
            title = [p.text.lower().split() for p in list(title_node[0].iter('hl1'))][0]
        except:
            print(p)

    else:
        return None, None
    byline_node = list(root.iter('byline'))
    byline_node = [n for n in byline_node if n.attrib['class'] == 'normalized_byline']
    if (len(byline_node) > 0):
        byline = byline_node[0].text.lower().split()
    abs_node = list(root.iter('abstract'))
    if (len(abs_node) > 0):
        try:
            abs = [p.text.lower().split() for p in list(abs_node[0].iter('p'))][0]
        except:
            print(p)

    else:
        return None, None
    abs = ' '.join(abs).split(';')
    abs[-1] = abs[-1].replace('(m)', '')
    abs[-1] = abs[-1].replace('(s)', '')

    for ww in nyt_remove_words:
        abs[-1] = abs[-1].replace('(' + ww + ')', '')
    abs = [p.split() for p in abs]
    abs = [p for p in abs if len(p) > 2]

    for doc_node in root.iter('block'):
        att = doc_node.get('class')
        # if(att == 'abstract'):
        #     abs = [p.text for p in list(f.iter('p'))]
        if (att == 'full_text'):
            paras = [p.text.lower().split() for p in list(doc_node.iter('p'))]
            break
    if (len(paras) > 0):
        if (len(byline) > 0):
            paras = [title + ['[unused3]'] + byline + ['[unused4]']] + paras
        else:
            paras = [title + ['[unused3]']] + paras

        return paras, abs
    else:
        return None, None


def tokenize(args):
    stories_dir = os.path.abspath(args.raw_path)
    tokenized_stories_dir = os.path.abspath(args.save_path)

    print("Preparing to tokenize %s to %s..." % (stories_dir, tokenized_stories_dir))
    stories = os.listdir(stories_dir)  # story文件夹下的所有文件名称，列表
    # make IO list file
    print("Making list of files to tokenize...")
    with open("mapping_for_corenlp.txt", "w") as f:
        for s in stories:
            if (not s.endswith('story')):  # .story文件
                continue
            f.write("%s\n" % (os.path.join(stories_dir, s)))
    command = ['java', 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-properties', 'StanfordCoreNLP-chinese.properties',
               '-annotators', 'tokenize,ssplit',
               '-ssplit.newlineIsSentenceBreak', 'always', '-filelist', 'mapping_for_corenlp.txt', '-outputFormat',
               'json', '-outputDirectory', tokenized_stories_dir]
    print("Tokenizing %i files in %s and saving in %s..." % (len(stories), stories_dir, tokenized_stories_dir))
    subprocess.call(command)
    print("Stanford CoreNLP Tokenizer has finished.")
    os.remove("mapping_for_corenlp.txt")

    # Check that the tokenized stories directory contains the same number of files as the original directory
    num_orig = len(os.listdir(stories_dir))
    num_tokenized = len(os.listdir(tokenized_stories_dir))
    if num_orig != num_tokenized:
        raise Exception(
            "The tokenized stories directory %s contains %i files, but it should contain the same number as %s (which has %i files). Was there an error during tokenization?" % (
                tokenized_stories_dir, num_tokenized, stories_dir, num_orig))
    print("Successfully finished tokenizing %s to %s.\n" % (stories_dir, tokenized_stories_dir))


def cal_rouge(evaluated_ngrams, reference_ngrams):
    reference_count = len(reference_ngrams)
    evaluated_count = len(evaluated_ngrams)

    overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
    overlapping_count = len(overlapping_ngrams)

    if evaluated_count == 0:
        precision = 0.0
    else:
        precision = overlapping_count / evaluated_count

    if reference_count == 0:
        recall = 0.0
    else:
        recall = overlapping_count / reference_count

    f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
    return {"f": f1_score, "p": precision, "r": recall}


def greedy_selection(doc_sent_list, abstract_sent_list, summary_size):
    """
    以贪婪的方式，在原始段落中选取最多summary_size个句子作为摘要，使得其与参考摘要的rouge值最大.
    """

    def _rouge_clean(s):
        return re.sub(r'[^a-zA-Z0-9 ]', '', s)

    max_rouge = 0.0
    abstract = sum(abstract_sent_list, [])
    abstract = _rouge_clean(' '.join(abstract)).split()
    sents = [_rouge_clean(' '.join(s)).split() for s in doc_sent_list]
    evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
    reference_1grams = _get_word_ngrams(1, [abstract])
    evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
    reference_2grams = _get_word_ngrams(2, [abstract])

    selected = []
    for s in range(summary_size):
        cur_max_rouge = max_rouge
        cur_id = -1
        for i in range(len(sents)):
            if (i in selected):
                continue
            c = selected + [i]
            candidates_1 = [evaluated_1grams[idx] for idx in c]
            candidates_1 = set.union(*map(set, candidates_1))
            candidates_2 = [evaluated_2grams[idx] for idx in c]
            candidates_2 = set.union(*map(set, candidates_2))
            rouge_1 = cal_rouge(candidates_1, reference_1grams)['f']
            rouge_2 = cal_rouge(candidates_2, reference_2grams)['f']
            rouge_score = rouge_1 + rouge_2
            if rouge_score > cur_max_rouge:
                cur_max_rouge = rouge_score
                cur_id = i
        if (cur_id == -1):
            return selected
        selected.append(cur_id)
        max_rouge = cur_max_rouge

    return sorted(selected)


def hashhex(s):
    """Returns a heximal formated SHA1 hash of the input string."""
    h = hashlib.sha1()
    h.update(s.encode('utf-8'))
    return h.hexdigest()


class BertData():
    def __init__(self, args):
        self.args = args
        self.tokenizer = BertTokenizer.from_pretrained(args.model_name, do_lower_case=True)

        self.sep_token = '[SEP]'
        self.cls_token = '[CLS]'
        self.pad_token = '[PAD]'
        self.tgt_bos = '[unused0]'
        self.tgt_eos = '[unused1]'
        self.tgt_sent_split = '[unused2]'
        self.sep_vid = self.tokenizer.vocab[self.sep_token]
        self.cls_vid = self.tokenizer.vocab[self.cls_token]
        self.pad_vid = self.tokenizer.vocab[self.pad_token]

    def preprocess(self, fact, law, evidence, claim, sent_labels=None, use_bert_basic_tokenizer=False, is_test=False):

        if ((not is_test) and len(claim) < 4):
            return None

        # original_src_txt = [' '.join(s) for s in src]
        original_fact_txt = fact.split('。')
        original_law_txt = law  # [l.split('。') for l in law]
        original_evidence_txt = evidence
        tgt = claim

        idxs = [i for i, s in enumerate(original_fact_txt) if (len(s) > self.args.min_src_ntokens_per_sent)]
        fact = [original_fact_txt[i][:self.args.max_src_ntokens_per_sent] for i in idxs]

        fact = fact[:self.args.max_src_nsents]

        # _sent_labels = [0] * len(src)  # rouge值最高的三句话作为抽取式摘要的gold answer
        # for l in sent_labels:
        #     _sent_labels[l] = 1

        # sent_labels = [_sent_labels[i] for i in idxs]
        # src = src[:self.args.max_src_nsents]
        # sent_labels = sent_labels[:self.args.max_src_nsents]

        # if ((not is_test) and len(src) < self.args.min_src_nsents):
        #     return None

        # src_txt = [' '.join(sent) for sent in src]  # 句子数组
        fact_text = ' {} {} '.format(self.sep_token, self.cls_token).join(fact)  # 用SEP 和 CLS隔开不同句子
        evidence_text = self.sep_token.join(evidence)

        fact_subtokens = self.tokenizer.tokenize(fact_text)  # 转换成token表示
        law_subtokens = [self.tokenizer.tokenize(l) for l in original_law_txt]
        evidence_subtokens = self.tokenizer.tokenize(evidence_text)

        fact_subtokens = [self.cls_token] + fact_subtokens + [self.sep_token]  # 加上头尾token
        fact_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(fact_subtokens)  # token转成ids

        law_subtokens = [[self.cls_token] + l_subtokens + [self.sep_token] for l_subtokens in law_subtokens]
        law_subtokens_idxs = [self.tokenizer.convert_tokens_to_ids(l_subtokens) for l_subtokens in law_subtokens]

        evidence_subtokens = [self.cls_token] + evidence_subtokens + [self.sep_token]
        evidence_subtokens_idxs = self.tokenizer.convert_tokens_to_ids(evidence_subtokens)

        law_segments_ids = [[0] * len(l) for l in law_subtokens]
        law_cls_ids = [0]

        evidence_segments_ids = [0] * len(evidence_subtokens)
        evidence_cls_ids = [0]

        _fact_segs = [-1] + [i for i, t in enumerate(fact_subtoken_idxs) if t == self.sep_vid]
        fact_segs = [_fact_segs[i] - _fact_segs[i - 1] for i in range(1, len(_fact_segs))]
        fact_segments_ids = []
        for i, s in enumerate(fact_segs):
            if (i % 2 == 0):
                fact_segments_ids += s * [0]
            else:
                fact_segments_ids += s * [1]
        fact_cls_ids = [i for i, t in enumerate(fact_subtoken_idxs) if t == self.cls_vid]
        # sent_labels = sent_labels[:len(cls_ids)]

        tgt_subtokens_str = '[unused1] ' + ' [unused3] '.join(
            [' '.join(self.tokenizer.tokenize(tgt))]) + ' [unused2]'
        tgt_subtoken = tgt_subtokens_str.split()[:self.args.max_tgt_ntokens]
        if ((not is_test) and len(tgt_subtoken) < self.args.min_tgt_ntokens):
            return None

        tgt_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(tgt_subtoken)
        # print(tgt)
        # print(tgt_subtoken_idxs)
        # print(self.tokenizer.convert_ids_to_tokens(tgt_subtoken_idxs))

        tgt_txt = claim
        fact_txt = [original_fact_txt[i] for i in idxs]
        law_txt = original_law_txt
        evidence_txt = original_evidence_txt

        return fact_subtoken_idxs, law_subtokens_idxs, evidence_subtokens_idxs, tgt_subtoken_idxs, \
               fact_segments_ids, law_segments_ids, evidence_segments_ids, \
               fact_cls_ids, law_cls_ids, evidence_cls_ids, \
               fact_txt, law_txt, evidence_txt, tgt_txt


def format_to_bert(args):
    if (args.dataset != ''):
        datasets = [args.dataset]
    else:
        datasets = ['train', 'valid', 'test']
    for corpus_type in datasets:
        a_lst = []
        for json_f in glob.glob(pjoin(args.raw_path, '*' + corpus_type + '.*.json')):
            real_name = json_f.split('/')[-1]
            a_lst.append((corpus_type, json_f, args, pjoin(args.save_path, real_name.replace('json', 'bert.pt'))))
        # print(a_lst)
        pool = Pool(args.n_cpus)
        for d in pool.imap(_format_to_bert, a_lst):
            pass

        pool.close()
        pool.join()


def _format_to_bert(params):
    corpus_type, json_file, args, save_file = params
    is_test = corpus_type == 'test'
    if (os.path.exists(save_file)):
        logger.info('Ignore %s' % save_file)
        return

    bert = BertData(args)

    logger.info('Processing %s' % json_file)
    jobs = json.load(open(json_file))
    datasets = []
    for step, d in enumerate(tqdm.tqdm(jobs)):
        fact, law, claim2evidence = d['fact'], d['law'], d['claim2evidence']

        # source, tgt = d['src'], d['tgt']
        for claim in claim2evidence:
            evidence = claim2evidence[claim]

            # sent_labels = greedy_selection(source[:args.max_src_nsents], tgt, 3)  #
            # if (args.lower):
            #     source = [' '.join(s).lower().split() for s in source]
            #     tgt = [' '.join(s).lower().split() for s in tgt]
            b_data = bert.preprocess(fact, law, evidence, claim, use_bert_basic_tokenizer=args.use_bert_basic_tokenizer,
                                     is_test=is_test)
            # b_data = bert.preprocess(source, tgt, sent_labels, use_bert_basic_tokenizer=args.use_bert_basic_tokenizer)
            # print(b_data)
            # print('\n')
            if (b_data is None):
                continue
            fact_subtoken_idxs, law_subtokens_idxs, evidence_subtokens_idxs, tgt_subtoken_idxs, \
            fact_segments_ids, law_segments_ids, evidence_segments_ids, \
            fact_cls_ids, law_cls_ids, evidence_cls_ids, \
            fact_txt, law_txt, evidence_txt, tgt_txt = b_data
            # print(tgt_subtoken_idxs)
            # print(tgt_txt)

            b_data_dict = {"fact": fact_subtoken_idxs, "law": law_subtokens_idxs, "evidence": evidence_subtokens_idxs,
                           "claim": tgt_subtoken_idxs,
                           "fsegs": fact_segments_ids, "lsegs": law_segments_ids, "esegs": evidence_segments_ids,
                           "fclss": fact_cls_ids, "lclss": law_cls_ids, "eclss": evidence_cls_ids,
                           "ftxt": fact_txt, "ltxt": law_txt, "etxt": evidence_txt, "ctxt": tgt_txt}

            # src_subtoken_idxs, sent_labels, tgt_subtoken_idxs, segments_ids, cls_ids, src_txt, tgt_txt = b_data
            # b_data_dict = {"src": src_subtoken_idxs, "tgt": tgt_subtoken_idxs,
            #                "src_sent_labels": sent_labels, "segs": segments_ids, 'clss': cls_ids,
            #                'src_txt': src_txt, "tgt_txt": tgt_txt}
            # datasets.append(b_data_dict)
            datasets.append(b_data_dict)
    logger.info('Processed instances %d' % len(datasets))
    logger.info('Saving to %s' % save_file)
    torch.save(datasets, save_file)
    datasets = []
    gc.collect()


def format_to_lines(args):
    corpus_mapping = {}
    for corpus_type in ['valid', 'test', 'train']:
        temp = []
        for line in open(
                pjoin(args.map_path, 'mapping_' + corpus_type + '.txt')):  # mapping存着哪个文件是train，test，validation
            temp.append(hashhex(line.strip()))
        corpus_mapping[corpus_type] = {key.strip(): 1 for key in temp}
    train_files, valid_files, test_files = [], [], []
    for f in glob.glob(pjoin(args.raw_path, '*.json')):  # glob.glob 返回所有匹配的文件路径列表（list）, 即匹配rawpath下的所有json文件
        # .story.json为按句子分割的各个字符，docId，sentences
        real_name = f.split('/')[-1].split('.')[0]  # ffff11**.story.json
        if (real_name in corpus_mapping['valid']):
            valid_files.append(f)
        elif (real_name in corpus_mapping['test']):
            test_files.append(f)
        elif (real_name in corpus_mapping['train']):
            train_files.append(f)
        # else:
        #     train_files.append(f)

    corpora = {'train': train_files, 'valid': valid_files, 'test': test_files}
    for corpus_type in ['train', 'valid', 'test']:
        a_lst = [(f, args) for f in corpora[corpus_type]]  # f和args是后面多线程pool需要用到的params
        pool = Pool(args.n_cpus)
        dataset = []
        p_ct = 0
        for d in pool.imap_unordered(_format_to_lines, a_lst):  # _format_to_lines处理后得到src，tgt的字典列表
            dataset.append(d)
            if (len(dataset) > args.shard_size):  # shard_size=2000，每2000个数据存一个文件
                pt_file = "{:s}.{:s}.{:d}.json".format(args.save_path, corpus_type, p_ct)  # json_data.train.1.json
                with open(pt_file, 'w') as save:
                    # save.write('\n'.join(dataset))
                    save.write(json.dumps(dataset))
                    p_ct += 1
                    dataset = []

        pool.close()
        pool.join()
        if (len(dataset) > 0):
            pt_file = "{:s}.{:s}.{:d}.json".format(args.save_path, corpus_type, p_ct)
            with open(pt_file, 'w') as save:
                # save.write('\n'.join(dataset))
                save.write(json.dumps(dataset))
                p_ct += 1
                dataset = []


def _format_to_lines(params):
    f, args = params
    print(f)
    source, tgt = load_json(f, args.lower)
    return {'src': source, 'tgt': tgt}


def format_xsum_to_lines(args):
    if (args.dataset != ''):
        datasets = [args.dataset]
    else:
        datasets = ['train', 'test', 'valid']

    corpus_mapping = json.load(open(pjoin(args.raw_path, 'XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json')))

    for corpus_type in datasets:
        mapped_fnames = corpus_mapping[corpus_type]
        root_src = pjoin(args.raw_path, 'restbody')
        root_tgt = pjoin(args.raw_path, 'firstsentence')
        # realnames = [fname.split('.')[0] for fname in os.listdir(root_src)]
        realnames = mapped_fnames

        a_lst = [(root_src, root_tgt, n) for n in realnames]
        pool = Pool(args.n_cpus)
        dataset = []
        p_ct = 0
        for d in pool.imap_unordered(_format_xsum_to_lines, a_lst):
            if (d is None):
                continue
            dataset.append(d)
            if (len(dataset) > args.shard_size):
                pt_file = "{:s}.{:s}.{:d}.json".format(args.save_path, corpus_type, p_ct)
                with open(pt_file, 'w') as save:
                    save.write(json.dumps(dataset))
                    p_ct += 1
                    dataset = []

        pool.close()
        pool.join()
        if (len(dataset) > 0):
            pt_file = "{:s}.{:s}.{:d}.json".format(args.save_path, corpus_type, p_ct)
            with open(pt_file, 'w') as save:
                save.write(json.dumps(dataset))
                p_ct += 1
                dataset = []


def _format_xsum_to_lines(params):
    src_path, root_tgt, name = params
    f_src = pjoin(src_path, name + '.restbody')
    f_tgt = pjoin(root_tgt, name + '.fs')
    if (os.path.exists(f_src) and os.path.exists(f_tgt)):
        print(name)
        source = []
        for sent in open(f_src):
            source.append(sent.split())
        tgt = []
        for sent in open(f_tgt):
            tgt.append(sent.split())
        return {'src': source, 'tgt': tgt}
    return None


def _greedy_selection(doc_sent_list, abstract_sent_list, summary_size):
    """
    以贪婪的方式，在原始段落中选取最多summary_size个句子作为摘要，使得其与参考摘要的rouge值最大.
    """

    def _rouge_clean(s):
        return re.sub(r'[^a-zA-Z0-9 ]', '', s)

    max_rouge = 0.0
    abstract = sum(abstract_sent_list, [])
    abstract = _rouge_clean(' '.join(abstract)).split()
    sents = [_rouge_clean(' '.join(s)).split() for s in doc_sent_list]
    evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
    reference_1grams = _get_word_ngrams(1, [abstract])
    evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
    reference_2grams = _get_word_ngrams(2, [abstract])

    selected = []
    for s in range(summary_size):
        cur_max_rouge = max_rouge
        cur_id = -1
        for i in range(len(sents)):
            if (i in selected):
                continue
            c = selected + [i]
            candidates_1 = [evaluated_1grams[idx] for idx in c]
            candidates_1 = set.union(*map(set, candidates_1))
            candidates_2 = [evaluated_2grams[idx] for idx in c]
            candidates_2 = set.union(*map(set, candidates_2))
            rouge_1 = cal_rouge(candidates_1, reference_1grams)['f']
            rouge_2 = cal_rouge(candidates_2, reference_2grams)['f']
            rouge_score = rouge_1 + rouge_2
            if rouge_score > cur_max_rouge:
                cur_max_rouge = rouge_score
                cur_id = i
        if (cur_id == -1):
            return selected
        selected.append(cur_id)
        max_rouge = cur_max_rouge

    return sorted(selected)


def str2bool(v):
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.')


def _preprocess(tokenizer, fact, law, claim2evidence, use_bert_basic_tokenizer=False, is_test=False):
    for claim in claim2evidence:
        claimToken = tokenizer.tokenize(claim)[:50]
        factToken = tokenizer.tokenize(fact)[:500]
        lawToken = [tokenizer.tokenize(l)[:500] for l in law]
        evidenceToken = tokenizer.tokenize(";".join(claim2evidence[claim]))[:500]
    # self.args = args


if __name__ == '__main__':
    json_file = '../../json_data/law.train.0.json'
    tokenizer = BertTokenizer.from_pretrained('hfl/chinese-bert-wwm', do_lower_case=True)
    jobs = json.load(open(json_file, encoding='utf-8'))
    for d in jobs:
        fact, law, claim2evidence = d['fact'], d['law'], d['claim2evidence']

        b_data = _preprocess(tokenizer, fact, law, claim2evidence)
        # print(b_data)
