#coding=utf-8
import re
from convert_util import *
from functools import  reduce
from itertools import  chain


LINE_MAX = 4


def _bag_to_str(bag={}):
    names = set()
    for l, person in bag.items():
        names.update(person.keys())
    name_map = dict([(name, i + 1) for i, name in enumerate(list(set(names)))])
    strs = []
    for l, person in sorted(bag.items(), key=lambda x: x[0]):
        person_new = dict([(x, name_map[x]) for x in person.keys()])
        strs.append("{}\t{}\n".format(l, person_new))
    return ''.join(strs)

class PersonSorter:

    def __init__(self, certains_out = "certains_out", uncertains_out = "uncertains_out"):
        self.certains_file = open(certains_out, encoding="utf-8", mode="w")
        self.uncertains_file = open(uncertains_out, encoding="utf-8", mode="w")
        self.uncertains_path = uncertains_out
        self.last_line = -9
        self.last_names =  {}
        self.is_certain = True

    def process_one(self, line, line_id):
        names = re.findall(r'<font color="FF0000">(.*?)</font>', line)

        name_map = dict([(name, i+1) for i, name in enumerate(list(set(names)))])
        if name_map:
            # if line_id - self.last_line > self.LINE_MAX: # all names clear
            #     if self.is_certain and self.last_names:
            #         self.certains_file.write("{}\t{}\n".format(self.last_line, self.last_names))
            #     else:
            #         self.uncertains_file.write("{}\t{}\n".format(self.last_line, self.last_names))
            #
            #     # self.is_certain = True
            # else: #maybe continues
            #     self.uncertains_file.write("{}\t{}\n".format(self.last_line, self.last_names))
            #     self.is_certain = False
            self.uncertains_file.write("{}\t{}\n".format(self.last_line, self.last_names))
            self.last_names = name_map
            self.last_line = line_id

    def close(self):
        # if self.is_certain and self.last_names:
        #     self.certains_file.write("{}\t{}\n".format(self.last_line, self.last_names))
        # elif self.last_names:
        self.uncertains_file.write("{}\t{}\n".format(self.last_line, self.last_names))
        self.certains_file.close()
        self.uncertains_file.close()


    def person_reset_id(self, ):
        bag = {}
        last_line_id = -1
        with open(self.uncertains_path, encoding="utf-8") as f:
            fw = open(self.uncertains_path + ".new" , encoding="utf-8",mode="w")
            for line in f:
                line_id, persons = line.strip().split('\t')
                line_id = int(line_id)
                if line_id - last_line_id > LINE_MAX: #new bag
                    fw.write(_bag_to_str(bag) + "----------\n")
                    bag = {}
                bag[line_id] = eval(persons)
                last_line_id = line_id
            fw.write(_bag_to_str(bag))
            fw.close()





def extract_persons_id(input, output):
    contents = []
    for x in range(0, 5):
        with open((input + "{}-job.html").format(x), encoding="utf-8") as f:
            contents.append(f.read().strip())
    fw = open(output, encoding="utf-8",mode="w")
    bag = {}
    last_line_id = -9
    last_names = {}
    for line in re.split('\n+', '\n'.join(contents)):
        if line.find("&nbsp;&nbsp;&nbsp;&nbsp;") < 0:
            print(line)
            continue
        line_id, html_content = line.split("&nbsp;&nbsp;&nbsp;&nbsp;")
        names = re.findall(r'<font color="FF0000">(.*?)</font>', line)
        name_map = dict([(name, i+1) for i, name in enumerate(list(set(names)))])

        line_id = int(line_id)
        if name_map :
            if line_id - last_line_id > LINE_MAX:
                fw.write(_bag_to_str(bag) + "----------\n")
                bag = {}

            bag[line_id] = name_map
            last_line_id = line_id

        else:
            pass
    fw.write(_bag_to_str(bag))
    fw.close()


def extract_persons2(input, input2):
    contents = []
    # for x in range(0, 5):
    #     with open((input + "{}-job.html").format(x), encoding="utf-8") as f:
    #         contents.append(f.read().strip())
    with open((input + "-all.html"), encoding="utf-8") as f:
        contents.append(f.read().strip())

    sentences = [line for line in re.split('\n+', '\n'.join(contents))]
    print(sentences[11012])

    with open(input2, encoding="utf-8") as f:
        groups = f.read().split("----------\n")[1:]
        fp = open(input + "_persons-all", encoding="utf-8", mode="w")
        ft = open(input + "_entities", encoding="utf-8", mode="w")
        for g in groups:
            name_lines = g.strip().split('\n')
            if len(name_lines) < 1:
                print(g)
                continue
            group_start, group_end = [int(x.split('\t')[0]) for x in (name_lines[0],name_lines[-1])]
            fp.write("-----({})---\n{}".format(group_start, g))
            ##-----collect entities-------------------------
            ft.write("-----({})---\n".format(group_start))
            entity_set = set()
            for l in range(group_start, group_end+1):
                entity_set .update (re.findall(r'<font color="(\w{6})">([^<]*?)</font>', sentences[l]) )
            for entity in [x for x in sorted(entity_set, key=lambda  x: x[0]) if x[0]!= 'FF0000']:
                ft.write("{}\t[]\n".format(entity))

        fp.close()
        ft.close()


def compare_to_range(line_id, group_range):
    ''' -1, 0, 1'''
    if line_id < group_range[0] :
        return -1
    elif line_id > group_range[1]:
        return 1
    return 0


def convert_back_ids_within(html_content, type_map, ids_off, name_map):
    '''no '\n' per char'''
    chars = split_html(html_content)
    num_fonts = html_content.count('</font>')
    num_extracteds = len([d for d in chars if type(d) == dict])
    if num_fonts != num_extracteds:
        print(" group {}, {} only {} => {}".format(int(ids_off/100), num_fonts, num_extracteds ,html_content))
    result = []
    for c in chars:
        if type(c) == str:
            result.append("{}\tO".format(c))
        else:
            text = c['text']
            char_types = [type_map[c['color']]] # LOC JOB
            if char_types[0] == 'PER':
                person_id = name_map.get(text)
                if person_id:
                    char_types.append(str(person_id + ids_off))
                else:
                    print("{}  miss person_id=>{}".format(int(ids_off/100), html_content))
            elif c.get('ids'):
                ids = [str(int(x) + ids_off) for x in  re.split(r"\D+", c.get('ids')) if x]
                char_types.append(','.join(ids))#ids
            else:
                pass
            type_tail = '-'.join(char_types)
            result.append("{}\tB-{}".format(text[0], type_tail)) #B
            for cc in text[1:]:
                result.append("{}\tI-{}".format(cc, type_tail))  # I
    return result

def fill_groups(name_map, kk_map):
    for first, sec in kk_map.items():
        if first not in name_map:
            if sec in name_map:
                name_map[first] = name_map[sec]
            else: # neither
                name_map[first] = len(name_map) + 1
                name_map[sec] = name_map[first]
        else:
            if sec not in name_map:
                name_map[sec] = name_map[first]
            #both in
    return name_map


def run_convert_back(input, output):

    with open((input + ".html"), encoding="utf-8") as f:
        all_lines = (f.read().strip()).split("\n")
    with open((input + ".persons"), encoding="utf-8") as f:
        persons_offset = (f.read().strip())
    fw = open(output, encoding='utf-8', mode='w')

    persons = re.findall( '\-{5}\((\d+)\)\-{3}\n([^\-]+)',  persons_offset)

    ## [(start, end, all_names)]

    person_groups = [((int(x[0]), int(x[1].strip().split('\n')[-1].split('\t')[0]) + 4), x[1].strip())  for x in persons]

    ## [((111, 222) merge all names using reduce)]
    person_groups = [(x[0],  reduce( lambda r,c: dict(chain(r.items(), c.items())), [ eval(y.split('\t')[1]) for y in x[1].split("\n")], {} )) for x in person_groups]

    group_index = 0
    line_idx = 0
    while line_idx < len(all_lines):
        line_id, line_html = all_lines[line_idx].rstrip('<br></br>\n').split("&nbsp;&nbsp;&nbsp;&nbsp;")
        line_pos_cmp = compare_to_range(int(line_id), person_groups[group_index][0])
        if line_pos_cmp < 0 : # move line to next
            fw.write('\n'.join(convert_back_ids_within(line_html, repl_map_revert, 0, {})) + "\n\n")
            line_idx += 1
        elif line_pos_cmp > 0 : # move groups to next
            group_index += 1
            continue
        else: # within groups
            group_id = person_groups[group_index][0][0]
            group_names = person_groups[group_index][1]
            if group_id == 36789:
              #  print(group_id, line_html)
                pass
            if group_id > 36789:
               # print(group_id)
                pass
            group_names = fill_groups(group_names, {"汤姆斯":"汤", '尤伯':'尤'})
            fw.write('\n'.join(convert_back_ids_within(line_html, repl_map_revert, group_id * 100, group_names)) + "\n\n")
            line_idx += 1


if __name__ == "__main__":
    run_convert_back( r'D:\sfxy191\back\train_data', r'D:\sfxy191\back\train_data_char')