#!/usr/bin/env python 
# -*- coding:utf-8 -*-
'''
@File    :   data_pretreatment.py    
@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/4/12 0012 13:38   st      1.0         None
'''
from time import time

import pkuseg
import jieba

from utils.common_utils import *
from utils.model_utils import *
from utils.user_seg import UserSeg
from model import trie_model_new, trie_model

stop_word_list = get_stopwords()


# class DataPretreatment:
#     def __init__(self):
#         root = None

def init_model(datas, ngram_num=1, is_new=False, single=True, model_new=False):
    """
    初始化模型
    :param datas: 数据集
    :param ngram_num: ngram阶数
    :param is_new: 是否使用新结构
    :param single:
    :param model_new: 是否更新模型
    :return:
    """
    if not single:
        ngram_str = ''
    else:
        ngram_str = '_'+str(ngram_num)
    model_path = model_dir + ngram_str + '.pkl'
    if not model_new and os.path.exists(model_path):
        root = load_model(model_path)
    else:
        if is_new:
            root = trie_model_new.TrieNode('*', {})
        else:
            root = trie_model.TrieNode('*', {})
        root = add_data_to_tire(root, datas, ngram_num, single)
        save_model(root, model_path)
    return root


def data_clean(content_list, delSpace=True, full2half=True, Tra2Sim=True):
    """
    数据清洗
    :param content_list:文本列表
    :param delSpace:删除空格
    :param full2half:全角转半角
    :param Tra2Sim:转换sim
    :return:
    """
    new_contents = []
    content_count = content_list.__len__()
    for index, content in enumerate(content_list):
        # 去除空格
        if delSpace:
            content = del_space(content)
        # 全角转半角
        if full2half:
            content = full_to_half(content)
        # 简繁体转换
        if Tra2Sim:
            content = Traditional2Simplified(content)
        new_contents.append(content)
        if (index+1) % NUIT_COUNT_NUM == 0:
            print('数据清洗中：', index+1, '/', content_count)
    return new_contents


def cut_words(content_list, stopword=[], cut_mod=0):
    """
    文本切割、分词
    :param content_list:
    :param stopword:
    :param cut_mod: 0 seg, 1 seg_dict, 2 jieba
    :return:
    """
    content_list_words = []
    content_count = content_list.__len__()
    sentence_list = []
    if cut_mod == 0:
        seg = pkuseg.pkuseg(model_name=pkuseg_model_path)
    elif cut_mod == 1:
        user_dict = os.path.join(data_path, 'pkuseg', 'user_dict', 'newword.txt')
        seg = pkuseg.pkuseg(model_name=pkuseg_model_path, user_dict=user_dict)
    elif cut_mod == 2:
        seg = jieba
    else:
        seg = UserSeg()
    for index, content in enumerate(content_list):
        # 按句分割
        # texts = cut_sent_seg(content)
        # texts_1 = [x.strip() for x in texts if x.strip()]
        sentence_list.extend([content])
        # 对每句进行分词
        texts_cut = [{'text': x, 'cuts': [y for y in seg.cut(x) if y not in stopword]} for x in [content]]
        # 封装
        content_list_words.append({'content': content, 'cut_list': texts_cut})
        # 打印进度
        if (index+1) % NUIT_COUNT_NUM == 0:
            print('数据切割、分词中：', index + 1, '/', content_count)
    return content_list_words, sentence_list


def add_data_to_tire(root, datas=None, ngram_num=1, single=True):
    """
    加载原始数据到trie树中
    :param root: 根trie结构
    :param ngram_num: ngram阶数
    :return:
    """
    if not datas:
        if not os.path.exists(matadata_path_words):
            if matadata_path.endswith('xlsx'):
                datas = matadata_cut_excel()
            else:
                datas = matadata_cut_txt()
        else:
            datas = []
            f = open(matadata_path_words, 'r', encoding='utf-8')
            for x in f.readlines():
                x = x.strip()
                xs = x.split('\t')
                if len(xs) < 2:
                    continue
                datas.append(xs[1].split(' '))
    datas_count = datas.__len__()
    start_time = time()
    print('加载数据到trie树中')
    for index, data in enumerate(datas):
        if single:
            ngrams = create_ngram_1(data, ngram_num)
        else:
            ngrams = create_ngram(data, NGRAM_MINI, NGRAM_MAX)
        for n in ngrams:
            root.add(n)
        # 统计数据打印
        if (index+1) % NUIT_COUNT_NUM == 0:
            print('加载数据到trie树中：', index+1, '/', datas_count)
            if (index+1) % NUIT_COUNT_TIME == 0:
                end_time = time()
                print('-------耗时：', end_time-start_time)
                start_time = end_time
    print('加载数据到trie树完成--')
    return root


def matadata_cut_excel(datas=None):
    """
    原始数据切割保存
    :return:
    """
    if not datas:
        datas = get_excel_data(matadata_path, sheet_index=0, start_row=1)
    data_num = datas.__len__()
    print('读取原始数据完成，总行数：', data_num)
    datas_new = []
    for data in datas:
        datas_new.extend(data)
    print('总数据数：', datas_new.__len__())
    datas_new = data_clean(datas_new, full2half=False)
    print('数据清洗完成')
    if STOP_WORD:
        datas_cut, sentence_list = cut_words(datas_new, stop_word_list)
    else:
        datas_cut, sentence_list = cut_words(datas_new)
    print('数据分词完成')

    print('分词结果开始写入')
    all_cut_datas = []
    for index, datas in enumerate(datas_cut):
        if (index+1) % NUIT_COUNT_NUM == 1:
            print('分词结果写入中：', index + 1, '/', data_num)
        cut_list = datas['cut_list']
        for cut in cut_list:
            cuts = cut['cuts']
            all_cut_datas.append(cuts)
    print('原始数据处理完成')
    return all_cut_datas


def matadata_cut_txt(datas=None, cut_mod=0):
    """
    原始数据切割保存
    :return:
    """
    if not datas:
        for line in open(matadata_path, 'r', encoding='utf-8'):
            ls = line.strip().split('\t')
            datas.append(ls)
    data_num = datas.__len__()
    print('读取原始数据完成，总行数：', data_num)

    print('总数据数：', data_num)
    datas_new = data_clean(datas, full2half=False)
    print('---------数据清洗完成')
    if STOP_WORD:
        datas_cut, sentence_list = cut_words(datas_new, stop_word_list, cut_mod)
    else:
        datas_cut, sentence_list = cut_words(datas_new, cut_mod=cut_mod)
    print('---------数据分词完成')

    print('分词结果开始写入')
    all_cut_datas = []
    for index, datas in enumerate(datas_cut):
        if (index+1) % NUIT_COUNT_NUM == 1:
            print('分词结果写入中：', index + 1, '/', data_num)
        cut_list = datas['cut_list']
        # 增加流水号
        # id = datas['id']
        for cut in cut_list:
            cuts = cut['cuts']
            all_cut_datas.append(cuts)
    print('原始数据处理完成')
    return all_cut_datas


def matadata_word_frequency(datas):
    """
    词频统计
    :return:
    """
    print('词频统计开始')
    word_count_dict = dict()
    for (id, data) in datas:
        for char in data:
            if not word_count_dict.__contains__(char):
                word_count_dict[char] = 0
            word_count_dict[char] += 1
    datas_frequency = sorted(word_count_dict.items(), key=lambda item: item[1], reverse=True)
    print('词频统计完成')
    return datas_frequency

