# -*- utf-8 -*-
"""
    该文件主要主要功能：
        1:从原始数据中按照 分类 获取数据 分类数据
        2:对分类的数据每一条进行分词
        3:对同分类的文件转为文本文件进行存储
        
    所有的文件存储到 data目录下的 tokenizer
"""
import sys
import jieba
import os
import gc
import shutil
import jieba.analyse
import random

path = sys.path[0].split('/')
path = "/".join(path[0:-1])
sys.path.append(path)

import load_dict
from job import write_job
from code_analy import time_me
from mongo_db import DB

load_dict.load_dict()

module_path = os.path.split(os.path.realpath(__file__))[0]


def get_data_dir():
    return os.path.join(module_path, "../data")


class QDTokenizer:
    def __init__(self, db_inst, job_id):
        self._job_id = job_id
        self._db_inst = db_inst
        self._labels_map = {}

        self._store_text_list = []
        self._store_label_list = []

    def open(self):
        pass

    def _write_job(self, progress, step, extra=''):
        write_job(self._db_inst, self._job_id, str(progress), step, extra)

    def _get_qd_collect(self):
        return self._db_inst['qd']

    @time_me()
    def _get_all_categroy(self):
        """
        从数据库中获取文本分类(category)
        :return: 
        """
        return self._get_qd_collect().distinct("category")

    @staticmethod
    def cut_word(category_text):
        # return [word for word in (jieba.lcut(category_text, cut_all=True)) if
        # word != '' and word.isnumeric() is False]
        return [word for word in (jieba.analyse.extract_tags(category_text, 20)) if word.isnumeric() is False]

    @staticmethod
    def gen_qd_texts(qd_list):
        data = []
        for qd in qd_list:
            text, _ = QDTokenizer.regular_qd_data(qd)
            texts = QDTokenizer.cut_word(text)
            data.append(text)
        return data
    @staticmethod
    def append_label(data, label):
        return [text + " __label__"+label+"\r\n" for text in data]
    
    def _train_test_split(self, data, test_size=0.1):
        self
        data_len = len(data)
        random.shuffle(data)
        dim_value = int(data_len * test_size)
        return data[dim_value:], data[0:dim_value]

    @time_me()
    def run(self, categroy_split=3):
        categroys = self._get_all_categroy()
        categroy_total = len(categroys)
        categroy_index = 0

        self._write_job(0, "从数据库加载数据中...",
                        "总分类个数 : {0}".format(categroy_total))

        if os.path.exists(get_data_dir()):
            shutil.rmtree(get_data_dir())
        os.mkdir(get_data_dir())

        train_file_handler = open(os.path.join(
            get_data_dir(), "fasttext_train.txt"), "w+")
        test_file_handler = open(os.path.join(
            get_data_dir(), "fasttext_test.txt"), "w+")
        for category in categroys:
            print("category : ", category)
            categorys = category.split("/")[0: categroy_split]
            
            categroy_index += 1.0
            progress = categroy_index * 100 / categroy_total
            if progress % 10 <= 0.01:
                self._write_job(progress, "处理分类 : {0}".format(category), "")
            qd_list = self._get_qd_collect().find(
                filter={'category': category}, limit=1000)
            
            text_data = self.gen_qd_texts(qd_list)

            sub_filename = "O".join(categorys) + "_train.txt"
            with open(os.path.join(get_data_dir(), sub_filename), "a") as f:
                f.writelines(self.append_label(text_data, category))

            parent_label = '/'.join(categorys)
            train_data, test_data = self._train_test_split(text_data)
            train_file_handler.writelines(self.append_label(train_data, parent_label))
            test_file_handler.writelines(self.append_label(test_data,parent_label))

            del text_data
            del qd_list
            del train_data
            del test_data
            gc.collect()

        gc.collect()

        train_file_handler.close()
        test_file_handler.close()

    @staticmethod
    def regular_qd_data(qd):
        raw_paragraph = " ".join(qd['raw_paragraph'].split("/"))
        texts = [qd['name'], qd['dw_name'],
                 raw_paragraph, qd['spec'], qd['dx_name']]
        category = qd['category']
        texts = " ".join(texts)
        texts = ' '.join(texts.split())
        return texts, category

    def close(self):
        self
        gc.collect()


if __name__ == "__main__":
    db_inst = DB()
    db_inst.open("127.0.0.1", "9005")

    inst = QDTokenizer(db_inst.database("data_analy"), "")
    inst.open()
    inst.run()
    inst.close()
