import json
import math
import re
from datetime import datetime

from app.config.secure import *
from lxml import etree
import bs4
import unicodedata
from app.models.document_dom import DocumentDom
from app.models.base import db
from app.libs.tools import nearest


class ParseDocumentHtml():

    def __init__(self, order_number):
        self.order_number = order_number
        WordHtml = f'{WordHtml_PATH}{order_number}/word.html'
        with open(WordHtml, "rb") as f:
            self.HtmlContent = f.read().decode('utf-8').replace(
                '﻿<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">',
                '')
        self.domList = []
        self.domModel = []

    def content(self):
        tree = etree.HTML(self.HtmlContent)
        paragraphs = tree.xpath('//p')
        content = ''
        for paragraph in paragraphs:
            text = paragraph.xpath('string(.)').strip()
            content += text + '\n'
        return content

    def parseContent(self):
        s = bs4.BeautifulSoup(self.HtmlContent, "lxml")
        paragraphs_list = s.select("p")
        paragraphs = []
        sentences = []
        paragraphs_index = 0
        for paragraph in paragraphs_list:
            paragraphAttr = paragraph.attrs
            content = unicodedata.normalize('NFKC', paragraph.text).strip()
            if content != '':
                spans = paragraph.findAll('span')
                paragraphSpanStyle = self.guessSpanStyle(spans)
                check = self.paragraphNeedCheck(paragraphAttr, paragraphSpanStyle, content)
                info = {
                    'ParagraphAttr': json.dumps(paragraphAttr),
                    'ParagraphSpanStyle': paragraphSpanStyle,
                    'check': check,
                    'content': content,
                    'paragraph_index': paragraphs_index
                }
                sentence_list = self.cut_paragraph(info)
                paragraphs.append(info)
                for item in sentence_list:
                    sentences.append(item)
                paragraphs_index += 1
        return paragraphs, sentences

    def parseDom(self):
        soup = bs4.BeautifulSoup(self.HtmlContent, "html.parser")
        self.traverse(soup.body)

    def dom_path(self, domInfo):
        domNameList = []
        now = int(datetime.now().timestamp())
        for index, item in enumerate(domInfo):
            if item['node'] != 'text':
                domNameList.append(f"{item['node']}_{item['index']}")
        for index in range(1, len(domNameList) + 1):
            current_node = domInfo[index - 1]
            level = index - 1
            path = '-'.join(domNameList[0:index])
            if path.replace('#', '') not in self.domList:
                self.domList.append(path.replace('#', ''))
                if current_node['node'] != 'text':
                    text = domInfo[-1]['value'] if current_node['node'] in ['span', 'tr#'] else None
                    if text and 'created with Spire.Doc' in text:
                        text = ''
                    paragraph = 1 if text else 0
                    if current_node['node'] == 'span' and 'tr#' in path:
                        paragraph = 0
                    params = {
                        'create_time': now,
                        'status': 1,
                        'order_number': self.order_number,
                        'node': current_node['node'],
                        'index': current_node['index'],
                        'level': level,
                        'attrs': json.dumps(current_node['attrs']).replace('position:absolute;', '').replace(
                            '\\\"Microsoft YaHei\\\"', 'Microsoft YaHei'),
                        'parent': f"{domInfo[index - 2]['node']}_{domInfo[index - 2]['index']}" if level > 0 else None,
                        'text': text,
                        'path': path,
                        'paragraph': paragraph
                    }
                    self.domModel.append(params)

    def traverse(self, t, current_path=None):
        if current_path is None:
            current_path = [
                {
                    'node': f'{t.name}',
                    'attrs': t.attrs,
                    'index': 0
                }
            ]
        if t.name in ['p']:
            tags = t.find_all('span')
            text = unicodedata.normalize('NFKC', t.text).strip()
            spanTags = []
            ortherTags = []
            for item in tags:
                if item.name == 'span':
                    spanTags.append(item)
                else:
                    ortherTags.append(item)
            if len(spanTags) > 0:
                guessSpanAttrs = self.guessSpanAttrs(spanTags)
                domInfo = current_path + [
                    {
                        'node': 'span',
                        'attrs': guessSpanAttrs,
                        'index': 0
                    },
                    {
                        'node': 'text',
                        'value': text
                    }
                ]
                self.dom_path(domInfo)
            for item in ortherTags:
                tags = item.find_all(recursive=False)
                for index, tag in enumerate(tags):
                    if not tag.find():
                        domInfo = current_path + [
                            {
                                'node': tag.name,
                                'attrs': tag.attrs,
                                'index': index
                            },
                            {
                                'node': 'text',
                                'value': tag.find(text=True)
                            }
                        ]
                        self.dom_path(domInfo)
                    else:
                        self.traverse(tag, current_path + [{
                            'node': tag.name,
                            'attrs': tag.attrs,
                            'index': index
                        }, ])
        else:
            tags = t.find_all(recursive=False)
            for index, tag in enumerate(tags):
                if not tag.find():
                    value = tag.find(text=True)
                    domInfo = current_path + [
                        {
                            'node': tag.name,
                            'attrs': tag.attrs,
                            'index': index
                        },
                        {
                            'node': 'text',
                            'value': value
                        }
                    ]
                    self.dom_path(domInfo)
                else:
                    if tag.name == 'tr':
                        text = unicodedata.normalize('NFKC', tag.text).strip()
                        if len(text) < 65:
                            tag.name = 'tr#'
                            domInfo = current_path + [
                                {
                                    'node': tag.name,
                                    'attrs': tag.attrs,
                                    'index': index
                                },
                                {
                                    'node': 'text',
                                    'value': text
                                }
                            ]
                            self.dom_path(domInfo)
                    self.traverse(tag, current_path + [{
                        'node': tag.name,
                        'attrs': tag.attrs,
                        'index': index
                    }, ])

    def guessSpanStyle(self, spans):
        spanStyleContent = {}
        for span in spans:
            style = self.cleanStyle(span.attrs['style'])
            spanStyleContent[style] = 0
        for span in spans:
            text = span.text.strip()
            style = self.cleanStyle(span.attrs['style'])
            spanStyleContent[style] += len(text)
        return self.maxCountStyle(spanStyleContent)

    def guessSpanAttrs(self, spans):
        spanStyleContent = {}
        for span in spans:
            attrs = self.cleanAttrs(span.attrs)
            spanStyleContent[json.dumps(attrs)] = 0
        for span in spans:
            text = span.text.strip()
            attrs = self.cleanAttrs(span.attrs)
            spanStyleContent[json.dumps(attrs)] += len(text)
        return self.maxCountAttrs(spanStyleContent)

    def maxCountStyle(self, spanStyleContent):
        max = 0
        maxkey = ''
        for key in spanStyleContent.keys():
            if spanStyleContent[key] > max:
                max = spanStyleContent[key]
                maxkey = key
        return maxkey

    def maxCountAttrs(self, spanAttrsContent):
        max = 0
        maxkey = ''
        for key in spanAttrsContent.keys():
            if spanAttrsContent[key] > max:
                max = spanAttrsContent[key]
                maxkey = key
        try:
            attrs = json.loads(maxkey)
        except Exception as e:
            attrs = {}
        return attrs

    def cleanStyle(self, style):
        attrs = style.split(';')
        styleWithoutColor = ''
        for attr in attrs:
            if 'color' not in attr and attr:
                styleWithoutColor += attr + ';'
        return styleWithoutColor

    def cleanAttrs(self, attrs):
        if 'style' in attrs.keys():
            style = attrs['style']
            style_attrs = style.split(';')
            styleWithoutColor = ''
            for attr in style_attrs:
                if 'color' not in attr and attr:
                    styleWithoutColor += attr + ';'
            clean_attrs = attrs.copy()
            clean_attrs['style'] = styleWithoutColor
            return clean_attrs
        return {}

    def paragraphNeedCheck(self, paragraphAttr, paragraphSpanStyle, content):
        if len(content) < 9:
            return False
        if 'class' in paragraphAttr.keys():
            paragraphClass = paragraphAttr['class'][0]
            if 'TOC' in paragraphClass or '目录' in paragraphClass:
                return False
            if 'Heading-2' in paragraphClass or '标题' in paragraphClass:
                return False
            if 'Caption' in paragraphClass:
                return False
        if 'style' in paragraphAttr.keys():
            paragraphStyle = paragraphAttr['style']
            if 'text-align:center' in paragraphStyle:
                return False
        if '本人所呈交的毕业论文' in content or '本人郑重声明' in content:
            return False
        if ('关键词:' in content or '关键词：' in content) and len(content) < 40:
            return False
        return True

    def SplitSentences(self, splitstring, sentecne):
        sentences = re.split(splitstring, sentecne)
        sentences.append("")
        sentences = ["".join(i) for i in zip(sentences[0::2], sentences[1::2])]
        return sentences

    def merge_sub_sentence(self, sentence_list):
        sentences_res = []
        sentences = ''.join(sentence_list)
        count_list = []
        sum = 0
        if len(sentence_list) == 1:
            return sentence_list
        else:
            start = 0
            index = 0
            mark = []
            for item in sentence_list:
                count_list.append(len(item))
                sum += len(item)
            for char in count_list:
                start = index
                index += char
                mark.append(index - 1)
            if 60 > sum >= 50:
                str = [''.join(sentence_list)]
                return str
            else:
                slice = math.ceil(sum / 50)
                average_base = math.ceil(sum / slice)
                average_list = []
                start = 0
                for i in range(1, slice):
                    average_list.append(i * average_base)
                for average_item in average_list:
                    target = nearest(mark, average_item)
                    sentences_res.append(sentences[start:target + 1])
                    start = target + 1
                sentences_res.append(sentences[start:len(sentences)])
            return sentences_res

    def cut_paragraph(self, info):
        sentence_info = []
        length = len(info['content'])
        if not info['check']:
            sentence_info.append(info.copy())
            return sentence_info
        if 55 >= length > 0:
            sentence_info.append(info.copy())
        else:
            sentence_list = self.SplitSentences('([。！；;])', info.copy()['content'])
            for sentence_list_item in sentence_list:
                if len(sentence_list_item.strip()) > 55:
                    sub_sentences_list = self.SplitSentences('([,，])', sentence_list_item)
                    paragraph_sentence_list = self.merge_sub_sentence(sub_sentences_list)
                    for paragraph_sentence_list_item in paragraph_sentence_list:
                        if len(paragraph_sentence_list_item.strip()) > 0:
                            sentence_item = info.copy()
                            sentence_item['content'] = paragraph_sentence_list_item
                            sentence_info.append(sentence_item)
                else:
                    if len(sentence_list_item.strip()) > 0:
                        sentence_item = info.copy()
                        sentence_item['content'] = sentence_list_item
                        sentence_info.append(sentence_item)
        return sentence_info

    def guessParagraphType(self, paragraph, style):
        pass

    def parse(self):
        tree = etree.HTML(self.HtmlContent)


if __name__ == '__main__':
    bot = ParseDocumentHtml('81655149914400')
    bot.parseDom()
