# hate coding:utf-8
import multiprocessing as mp
import os
import sys
import functools
import json
import stanza
import tqdm
from os.path import join
from multiprocessing import Pool,Process

"""
这个脚本用于给作者处理过后的json文件中添加结点的修饰成分

具体逻辑：
1. 读取每一个article对应的json文件
2. 处理其中的结点
    2.1 对于nodes_pruned2中的结点，调用stanza对其进行POS
    2.2 将POS之后的结果作为新的结点，添加到nodes_pruned2中
    2.3 在读取这些数据的时候，需要进行判断，是否为descriptive
3. 重新写入article的json文件
"""

# 在stanza处理之后再处理，即为post process
post_process = True
process_n = 6

if post_process == False:
    nlp = stanza.Pipeline('en', processors='tokenize,pos,lemma,depparse', use_gpu=True)

def id_generator(prefix, start):
    while True:
        yield prefix + str(start)
        start += 1

def desc_node(token_list, major_node, mention, pos_type):
    """
    用于构建一个修饰结点
    token_list: token列表
    major_node: 这些token修饰的node编号
    mention: 所属的mention
    pos_type: 修饰成分的类型，可选ADJ, ADV, VERT, NOUN, PROPN
    """
    d = {
        'content': [
            {
                'text': ' '.join([ token['text'] for token in token_list]),
                'word_pos': [],
                'insent_pos': [],
                'sent_pos': mention['sent_pos'],
            }
        ],
        'type': 'part',
        'pos_type': pos_type,
        #'': True, # 是否是描述信息
        'summary_worthy': 0,
        'InSalientSent': 0,
        'target': major_node,   # 修饰的node编号
    }
    if len(token_list) == 1:
        d['content'][0]['word_pos'] = [token_list[0]['id'] + mention['word_pos'][0] - 1]
        d['content'][0]['insent_pos'] = [token_list[0]['id'] + mention['insent_pos'][0] - 1]
    else:
        m_pos = mention['word_pos'][0]
        i_pos = mention['insent_pos'][0]
        d['content'][0]['word_pos'] = list(range(token_list[0]['id'] + m_pos - 1, token_list[-1]['id'] + m_pos))
        d['content'][0]['insent_pos'] = list(range(token_list[0]['id'] + i_pos - 1, token_list[-1]['id'] + i_pos))
    return d

def desc_nodes(mention, n_id, nlp_result):
    """
    mention: json中的一个mention
    n_id: mention所属的node id, 形如"node_3"
    nlp_result: 把这个mention输入到stanza中得到的结果, 可能有多个sentence, 可以使用nlp_result[0]取第一个
    ---
    return: 修饰node列表
    """
    nodes = []
    result = nlp_result[0]
    i = 0
    while i < len(result):
        token_type = result[i]['upos']
        if token_type in ['ADJ', 'ADV', 'VERB', 'NUM']:
            nodes.append(desc_node([result[i]], n_id, mention, token_type))
        elif token_type in ['NOUN', 'PROPN']:
            k = i + 1
            while k < len(result):
                if result[k]['upos'] != token_type:
                    break
                k += 1
            nodes.append(desc_node(result[i:k], n_id, mention, token_type))
            i = k-1
        i += 1
    return nodes

def process_one(datadir, outdir, split, filename):
    if os.path.exists(join(outdir, split, filename)):
        return

    with open(join(datadir, split, filename), 'r') as f:
        data = json.load(f)

    if len(data['nodes_pruned2']) == 0:
        with open(join(outdir, split, filename), 'w') as f:
            json.dump(data, f)
        return

    new_nodes = []
    discard_node_ids = []

    for n_id, content in data['nodes_pruned2'].items():
        if content['type'] == 'desc' or content['type'] == 'part':
            discard_node_ids.append(n_id)
            continue
        for mention in content['content']:
            if len(mention['word_pos']) >= 2:
                if not post_process:
                    result = nlp(mention['text'])
                    result = json.loads(str(result))
                    mention['pos'] = result
                else:
                    result = mention['pos']
                desc = desc_nodes(mention, n_id, result)
                new_nodes.extend(desc)

    # remove the old desc nodes
    for discard_node in discard_node_ids:
        del data['nodes_pruned2'][discard_node]

    # renumber the desc nodes
    node_dict = {}
    max_id = max(map(lambda x:int(x.split('_')[-1]), data['nodes_pruned2'].keys()))
    id_gen = id_generator('node_', max_id+1)
    for node in new_nodes:
        node_dict[next(id_gen)] = node
    data['desc_nodes'] = node_dict
    data['nodes_pruned2'].update(node_dict)

    with open(join(outdir, split, filename), 'w') as f:
        json.dump(data, f)
    print('done with {}/{}'.format(split, filename))


def process_split(datadir, outdir, split):
    print(f'start processing: {datadir}/{split}')
    files = os.listdir(join(datadir, split))
    processed = os.listdir(join(outdir, split))
    to_process = list(set(files) - set(processed))
    with Pool(process_n) as p:
        p.map(functools.partial(process_one, datadir, outdir, split), to_process)

def process_list(datadir, outdir, split, filelist):
    with Pool(process_n) as p:
        p.map(functools.partial(process_one, datadir, outdir, split), filelist)

def process_filelist_with_args(filelist):
    with Pool(process_n) as p:
        p.starmap(process_one, filelist)

def get_filelist(datadir, outdir, split_list):
    def get_split_filelist(split):
        processed = os.listdir(join(outdir, split))
        total = os.listdir(join(datadir, split))
        to_process = set(total) - set(processed)
        return [(datadir, outdir, split, filename) for filename in to_process]
    res = []
    for split in split_list:
        res.extend(get_split_filelist(split))
    return res

def even_split(data: list, n: int):
    assert n > 0
    res = []
    step = len(data) // n
    remain = len(data) % n
    start_i = 0
    l = len(data)
    while start_i < l:
        inc = step
        if remain > 0:
            inc += 1
            remain -= 1
        res.append(data[start_i: start_i + inc])
        start_i += inc
    assert len(res) == n
    return res

debug = True

if __name__ == '__main__':
    mp.set_start_method('spawn')
    if debug:
        # test/4844
        #print(even_split(list(range(11)), 4))
        d = '/home/zhanziqi/z/cnndm_desc'
        t = '/home/zhanziqi/z/cnndm_desc_final'
        filelist = os.listdir(d + '/test')
        process_list(d, t, 'test', filelist)
    else:
        datadir = sys.argv[1]
        outdir = sys.argv[2]
        """
        datadir = '/data/liuhongfei/z/cnndm_mini'
        outdir = '/data/liuhongfei/z/cnndm_mini_desc'
        """
        s = ['test', 'train', 'val']

        to_process = get_filelist(datadir, outdir, s)
        print(f'total size: {len(to_process)}')
        gpu_n = 3
        chunk_list = even_split(to_process, gpu_n)
        processes = []
        for i,chunk in enumerate(chunk_list):
            os.environ['CUDA_VISIBLE_DEVICES'] = str(i)
            process = Process(target=process_filelist_with_args, args=(chunk,), name='process-' + str(i))
            processes.append(process)
            process.start()
            print(f'{process.name} started')

        for process in processes:
            process.join()