#!/usr/bin/env python
#-*-coding:utf-8-*-


import json
import os


class GenCorpus():


    def __init__(self):

        self.source_path = os.path.expanduser("~/wdata/corpus_news.json")
        self.all_entity_type = ["job_title", "org_name", "location", "time", "product_name"]

        self.tag_path = "../../data/all_tag.txt"
        self.ner_path = "../../data/ner_train.txt"


    def itera_file(self, path):
        f = open(path)

        for i , line in enumerate(f):
            if i % 1000 == 0:
                print(i)

            data = json.loads(line)
            yield data


    def gen_tag_data(self):
        """
        产生词性标注数据
        :return:
        """

        outf = open(self.tag_path, 'w')
        for data in self.itera_file(self.source_path) :
            tags = data.get("tag")
            words = data.get("word")
            for word, tag in zip(words, tags):
                outf.write("\t".join([word, tag])+"\n")
                if word in ['。','!','！', '?','？']:
                    outf.write("\n")


    def gen_ner_data(self,):
        """
        :return:
        """
        train_outf = open("../../data/seg_ner_train.txt", 'w')
        dev_outf = open("../../data/seg_ner_dev.txt", 'w')
        test_outf = open("../../data/seg_ner_test.txt", 'w')

        all_data = []
        sents = []

        for data in self.itera_file(self.source_path):
            # print(data)
            tags = data.get("tag")
            words = data.get("word")
            entity_labels = ["O"] * len(words)
            entitys = data.get("entity")

            for entity in entitys:
                tag_type = entity[2].split("_")[0]
                # 不识别品牌
                if tag_type == "product":
                    continue
                start = entity[0]
                end = entity[1]
                if end - start == 1:
                    entity_labels[start] = "S_" + tag_type
                else:
                    entity_labels[start] = "B_" + tag_type
                    entity_labels[end-1] = "E_" + tag_type

                    for i in range(start+1, end-1):
                        entity_labels[i] = "M_" + tag_type

            word_count = len(words)
            for i in range(word_count):
                word = words[i]
                tag = tags[i]
                entity_label = entity_labels[i]
                word_len = len(word)
                seg_list = ["M"] * word_len
                entity_list = [entity_label] * word_len
                tag_list  = [tag] * word_len

                if word_len == 1:
                    seg_list[0] = "S"
                    entity_list[0] = entity_label
                else :
                    seg_list[0] = "B"
                    seg_list[-1] = "E"
                    tag_type = entity_label.split("_")[-1]

                    if "B" in entity_label:
                        for j in range(1, word_len):
                            entity_list[j] = "M_" + tag_type
                    elif "E"   in entity_label:
                        for j in range(word_len-1):
                            entity_list[j] = "M_" + tag_type
                    elif 'S' in entity_label:
                        entity_list[0] = "B_" + tag_type
                        entity_list[-1] = "E_" + tag_type
                        for j in range(1, word_len-1):
                            entity_list[j] = "M_" + tag_type

                for w, seg, tag, ent  in zip(word, seg_list, tag_list, entity_list):
                    # print(w, seg, tag, ent)
                    # input()
                    sents.append(("\t".join([w, seg, tag, ent]) + "\n"))
                    if w in ["。", "!", "！", "?","？", "\n", ]:
                        all_data.append(sents)
                        sents = []

        sent_count = len(all_data)
        train_count = sent_count * 0.8
        test_count = sent_count * 0.9
        for i, sent in enumerate(all_data):
            if i< train_count:
                outf = train_outf
            elif i < test_count :
                outf = test_outf
            else:
                outf = dev_outf

            for w in sent:
                outf.write(w)
            outf.write("\n")


    @staticmethod
    def gen_seg_data():
        """
        产生分词数据
        :return:
        """
        pass


    def gen_ner_word(self):
        outf = open(self.ner_path, 'w')

        for data in self.itera_file(self.source_path):
            # print(data)

            tags = data.get("tag")
            words = data.get("word")
            entity_labels = ["O"] * len(words)
            entitys = data.get("entity")
            # print(entitys)

            for entity in entitys:
                tag_type = entity[2].split("_")[0]
                start = entity[0]
                end = entity[1]
                if end - start == 1:
                    entity_labels[start] = "S_" + tag_type
                else:
                    entity_labels[start] = "B_" + tag_type
                    entity_labels[end-1] = "E_" + tag_type

                for i in range(start+1, end-1):
                   entity_labels[i] = "M_" + tag_type

            for word, label in zip(words, entity_labels):
                outf.write(" ".join([word, label])+"\n")
                if word in ['。', '？', '!', '！','?']:
                    outf.write("\n")
                # print(word, label)


    def gen_seg_word(self):
        outf = open("../../data/seg_train.txt", 'w')

        for data in self.itera_file(self.source_path):
            # print(data)

            words = data.get("word")
            word_count = len(words)

            for i in range(word_count):
                word = words[i]

                word_len = len(word)

                seg_list = ["M"] * word_len
                if word_len == 1:
                    seg_list[0] = "S"
                else :
                    seg_list[0] = "B"
                    seg_list[-1] = "E"

                for w, label in zip(list(word), seg_list):
                    outf.write(" ".join([w, label])+"\n")
                    if w in ['。', '?', '？', '!', '！']:
                        outf.write("\n")


def pre_word_word2vec():
    f = open(os.path.expanduser("~/wdata/corpus_news.json"))
    outf = open("../../data/pre_word_vec.txt", 'w')

    for i, line in enumerate(f):
        if i % 1000 == 0:
            print(i)
        data = json.loads(line)
        words = data.get("word")
        content = " ".join(words)
        outf.write(content + "\n")
        # print(" ".join(words))

import re
def pre_char_word2vec():
    f = open(os.path.expanduser("~/wdata/corpus_news.json"))
    outf = open("../../data/pre_char_vec.txt", 'w')
    for i, line in enumerate(f):
        if i % 1000 == 0:
            print(i)
        data = json.loads(line)
        words = data.get("word")
        content = "".join(words)
        content = re.sub("[\s]", '', content)
        outf.write(" ".join(list(content)) + "\n")



if __name__ == '__main__':
    # pre_word_word2vec()
    corpus = GenCorpus()
    corpus.gen_ner_data()
    # pre_char_word2vec()
    pass
