# coding: utf-8

# In[67]:
import os
import re
from datetime import datetime
import subprocess

import jieba.posseg as pseg
from gensim import corpora, models, similarities
import docx
import sys
import importlib
import xlrd

from model.model import File_data
from service import similarity_service
from typing import List

importlib.reload(sys)


def stopWordsList(filepath):
    if filepath.endswith('.docx'):
        texts = get_paragraphs(filepath)
        return texts

    wlst = [w.strip() for w in open(filepath, 'r', encoding='utf8').readlines()]
    return wlst


stop_words = stopWordsList("stopwords.txt")


def seg_sentence(sentence):
    # stop_flag = ['x', 'c', 'u', 'd', 'p', 't', 'uj', 'm', 'f', 'r']#过滤数字m
    stop_flag = ['x', 'c', 'u', 'd', 'p', 't', 'uj', 'f', 'r']
    outstr = []
    try:
        sentence_seged = pseg.cut(sentence)
        # sentence_seged = pseg.lcut(sentence)
        for word, flag in sentence_seged:
            # if word not in stop_words:
            if word not in stop_words and flag not in stop_flag:
                outstr.append(word)
    except AttributeError:
        similarity_service.error_log()
        # print(AttributeError)
        pass
    return outstr


# 分段读取docx文件内容，返回纯文本字符串（分段）
def doc_to_text(tpath):
    file = docx.Document(tpath)
    keyword = ''
    # 输出每一段的内容
    for para in file.paragraphs:
        keyword += para.text + '\n'

    return keyword


# 获取所有文本段落
def get_paragraphs(filePath, leastChar=15):
    if filePath:
        file = docx.Document(filePath)
        texts = []

        for para in file.paragraphs:
            if para.text != '' and len(
                    para.text) > leastChar and not para.text.isspace() and not ".........." in para.text:
                # text = para.text.replace("\t", "").replace("\n", "")
                texts.append(str(para.text))
        texts = list(set(texts))
        return texts


# 按行读取txt文件内容
def get_txt_content(file_path, leastChar=5):
    if os.path.exists(file_path):
        texts = []
        with open(file_path, encoding='utf-8') as r:
            for text in r.readlines():
                if len(text) >= leastChar and not text.isspace() and not ".........." in text:
                    se = re.search('\\d.\\d[\\s、]', text)
                    if se:
                        text = text[se.span()[1]:len(text)]
                    texts.append(text.replace('\n', ''))
        texts_list = list(set(texts))
        texts_list.sort(key=texts.index)
        return texts_list


def get_paragraphs_text(filePath):
    if filePath:
        file = docx.Document(filePath)
        texts = []

        for para in file.paragraphs:
            if para.text != '' and len(para.text) > 2 and not para.text.isspace() and not ".........." in para.text:
                # text = para.text.replace("\t", "").replace("\n", "")
                texts.append(para.text)
        return texts


# 读取多个模板文件
def read_template_list(templates):
    texts = []
    error_files = []
    for template in templates:
        if template.endswith('.docx'):
            try:
                file = docx.Document(template)
            except:
                error_files.append(template)
                continue

            for para in file.paragraphs:
                if para.text != '' and len(para.text) > 7 and not para.text.isspace() and not ".........." in para.text:
                    # text = para.text.replace("\t", "").replace("\n", "")
                    texts.append(para.text)
        elif template.endswith('.xlsx') or template.endswith('.xls'):
            try:
                texts = texts + get_excls_content(template)
            except:
                error_files.append(template)
                continue

    return texts, error_files


# 读取excls表格内容
def get_excls_content(excls_path):
    content = []
    # 打开excls文件
    work_book = xlrd.open_workbook(excls_path)
    # 获取excls文件中所有sheet工作表的内容
    for i in range(len(work_book.sheets())):
        sheet = work_book.sheet_by_index(i)
        if sheet.name in "软件EOL信息":
            continue
        for row in range(sheet.nrows):
            row_data = sheet.row_values(row)
            for data in row_data:
                data = str(data)
                if data != '' and len(data) > 7 and not data.isspace() and not ".........." in data:
                    content.append(data)

    return content


# 优化方法 把没必要循环的数据放到外面
def similarity_word(s_paragraphs):
    texts = []
    if isinstance(s_paragraphs, str):
        # 1、将【文本集】生产【分词列表】
        texts.append(seg_sentence(s_paragraphs))
    else:
        texts = [seg_sentence(txt) for txt in s_paragraphs]

    # 一、建立词袋模型
    # 2、基于文件集建立【词典】，并提取词典特征数
    dictionary = corpora.Dictionary(texts)
    feature_cnt = len(dictionary.token2id.keys())
    # 3、基于词典，将【分词列表集】转换为【稀疏向量集】，也就是【语料库】
    corpus = [dictionary.doc2bow(text) for text in texts]
    # 4、使用“TF-TDF模型”处理【语料库】
    # 二、建立TF-IDF模型
    tfidf = models.TfidfModel(corpus)
    # 三构建一个query文本，利用词袋模型的字典将其映射到向量空间

    # 6、对稀疏向量建立索引
    index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=feature_cnt)

    return index, tfidf, dictionary


# 模板过滤
def template_filter(template_str: list, file_paths: list):
    file_datas = []
    template_index, template_tfidf, template_dictionary = similarity_word(template_str)
    # 无内容的文件列表
    not_contents = []
    for file in file_paths[:]:
        if os.path.exists(file):
            file_data = File_data()
            file_data.file_path = file
            source_str = None
            try:
                source_str = get_paragraphs(file)
            except KeyError:
                # 对docx无法读取的文件处理
                docPath = file[0:-1]
                subprocess.run('mv ' + str(file) + ' ' + docPath, shell=True)
                if os.path.exists(docPath):
                    try:
                        docx_path = similarity_service.thread_to_docx(docPath)
                        source_str = get_paragraphs(docx_path)
                    except:
                        similarity_service.doc_error()
                        subprocess.run('mv ' + str(docPath) + ' ' + file, shell=True)
                        not_contents.append(file)
                        file_paths.remove(file)
                        similarity_service.error_log()
                        continue
            except:
                not_contents.append(file)
                file_paths.remove(file)
                similarity_service.error_log()
                continue
            if source_str and template_str:
                for content in source_str[:]:
                    template_kw_vector = template_dictionary.doc2bow(seg_sentence(content))
                    sim = template_index[template_tfidf[template_kw_vector]]
                    for s in range(len(sim)):
                        if sim[s] > 0.48:
                            source_str.remove(content)
                            break
                        # 增强模板过滤
                        # if grow_templates(template_str[s], content):
                        #     source_str.remove(content)
                        #     break

            if not source_str:
                pdf_path = it_exist_pdf(file)
                if pdf_path:
                    not_contents.append(pdf_path)
                else:
                    not_contents.append(file)
            # 过滤重复字段减少重复对比
            # file.str_list = list(set(source_str))
            file_data.content = list(set(source_str))
            file_datas.append(file_data)

    return file_datas, not_contents


# 判断docx是否有对应的pdf文件
def it_exist_pdf(docx_path: str):
    pdf_path = docx_path.split('.')[0]
    if os.path.exists(pdf_path + '.pdf'):
        pdf_path = pdf_path + ".pdf"
        return pdf_path
    elif os.path.exists(pdf_path + '.PDF'):
        pdf_path = pdf_path + ".PDF"
        return pdf_path
    else:
        return None


# 测试模板过滤不干净情况,增强模板过滤
def grow_templates(template_str, text):
    if not template_str:
        return None
    text_flag = text.replace('\n', '').replace('\t', '').replace(' ', '')
    template_str = template_str.replace('\n', '').replace('\t', '').replace(' ', '')
    if template_str in text_flag:
        return True
    else:
        return None


def convert_n_bytes(n, b):
    bits = b * 8
    return (n + 2 ** (bits - 1)) % 2 ** bits - 2 ** (bits - 1)


def convert_4_bytes(n):
    return convert_n_bytes(n, 4)


# 获取字符串hashcode
def getHashCode(str):
    h = 0
    n = len(str)
    for i, c in enumerate(str):
        h = h + ord(c) * 31 ** (n - 1 - i)
    return convert_4_bytes(h)


''''' 
解析pdf文件，获取文件中包含的各种对象 
'''
