import json
import os
import shutil
import re
from lxml import etree

from app.libs.tools import get_number, get_float
from app.libs.zip import ZipFile


class ExportPaperYY(object):
    def __init__(self, ppyy):
        self.report = ppyy
        self.report_dir = ppyy.split('.zip')[0]

    def export(self):
        zip_file = ZipFile(self.report)
        for file in zip_file.namelist():
            zip_file.extract(file, self.report_dir)

    def show(self):
        zip_file = ZipFile(self.report)
        for file in zip_file.namelist():
            print(file)

    def Orianna(self):
        orianna = self.report_dir + '/PaperYY论文检测报告/全文标明引文报告.html'
        f = open(orianna, "rb")
        content = f.read().decode('utf-8')
        f.close()
        tree = etree.HTML(content)
        # paperTitle = tree.xpath('/html/body/div[2]/div[1]/div[2]/div[2]/div[1]/div[2]/div[2]/div[1]/ul/li[1]/text()')[0]
        # if '##' in paperTitle:
        #     paperTitle = paperTitle.split('##')[1]
        # paperAuthor = tree.xpath('/html/body/div[2]/div[1]/div[2]/div[2]/div[1]/div[2]/div[2]/div[1]/ul/li[2]/text()')[
        #     0]
        # paperTime = tree.xpath('/html/body/div[2]/div[1]/div[2]/div[2]/div[1]/div[2]/div[2]/div[1]/ul/li[3]/text()')[0]
        totalRate = tree.xpath('/html/body/div[3]/div[1]/div[2]/div[2]/div[4]/div[3]/div/ul/li[3]/span/text()')[
            0].replace('总文字复制比：', '')
        # singleMaxSimilarRate = tree.xpath('/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/dl/dd[3]/text()')[1] \
        #     .replace('\n', '').replace(' ', '')
        # paragraphsSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[4]/text()")[0]
        # similarCharsSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[1]/text()")[1] \
        #         .replace('\n', '').replace(' ', '')
        # charsSum = tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[2]/text()")[0]
        # singleMaxSimilarSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[3]/text()")[0]
        # frontSimilarSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[5]/text()")[1] \
        #         .replace('\n', '').replace(' ', '')
        # maxSimilarSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[6]/text()")[1] \
        #         .replace('\n', '').replace(' ', '')
        # similarParagraphsSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[7]/text()")[1] \
        #         .replace('\n', '').replace(' ', '')
        # behindSimilarSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[8]/text()")[1] \
        #         .replace('\n', '').replace(' ', '')
        # minSimilarSum = \
        #     tree.xpath("/html/body/div[2]/div[1]/div[2]/div[2]/div[2]/div[3]/div/div/div[1]/ul/li[9]/text()")[1] \
        #         .replace('\n', '').replace(' ', '')
        return totalRate

    def Galio(self):
        galio = self.report_dir + '/PaperYY论文检测报告/全文标明引文报告.html'
        f = open(galio, "rb")
        content = f.read().decode('utf-8')
        f.close()
        tree = etree.HTML(content)
        galioOriginContent = tree.xpath("//div[@class='yuanwen']")
        sections = []
        for index, item in enumerate(galioOriginContent):
            content = etree.tostring(item, encoding='utf-8').decode('utf-8').replace('<div class="yuanwen">', '').replace(
                '</div>', '')
            sectionTitleDom = tree.xpath('//dl')[index]
            sectionTitle = sectionTitleDom.xpath('./dt/span[@class="sumInfo"]/text()')[0]
            sectionSumTotal = sectionTitleDom.xpath('./dt/span[@class="sumTotal"]/text()')[0]
            sectionSimilarRate = sectionTitleDom.xpath('./dd/text()')[0].replace(' ', '').replace('\n', '').replace(
                '文字复制比：', '')
            simpleSourceDom = sectionTitleDom.xpath('./following-sibling::div[1]')[0]
            sectionSourceInfo = []
            if simpleSourceDom.attrib['class'] == 'simply_table':
                simpleSources = simpleSourceDom.xpath('./table[@class="simp_table"]/tr[not(@class)]')
                for item in simpleSources:
                    sourceInfoDom = item.xpath('./td[@class="org_content"]/ul')[0]
                    try:
                        title = sourceInfoDom.xpath('./li')[0].xpath('./a/text()')[0]
                    except:
                        title = ''
                    source = sourceInfoDom.xpath('./li')[1].xpath('./text()')[0].replace(' ', '').replace('\n', '')
                    rate = item.xpath('./td[@class="quote"]/ul')[0].xpath('./li')[0].xpath('./text()')[0]
                    sourceInfo = {
                        'title': title,
                        'source': source,
                        'rate': rate,
                        'sum': int(get_number(sectionSumTotal) * get_float(rate))
                    }
                    sectionSourceInfo.append(sourceInfo)
            sectionInfo = {
                'sectionTitle': sectionTitle,
                'sectionSumTotal': sectionSumTotal,
                'sectionSimilarRate': sectionSimilarRate,
                'sectionSources': sectionSourceInfo,
                'sectionContent': content
            }
            sections.append(sectionInfo)
        return sections

    def LeBlanc(self):
        leblanc = self.report_dir + '/PaperYY论文检测报告/全文对照报告.html'
        f = open(leblanc, "rb")
        content = f.read().decode('utf-8')
        f.close()
        tree = etree.HTML(content)
        sections = []
        leblancDoms = tree.xpath('//div[@class="summary"]')
        sectionsSource = self.Galio()
        for index, section in enumerate(leblancDoms):
            sectionInfo = {
                'sectionTitle': sectionsSource[index]['sectionTitle'],
                'sectionSumTotal': sectionsSource[index]['sectionSumTotal'],
                'sectionSimilarRate': sectionsSource[index]['sectionSimilarRate'],
                'sectionSources': sectionsSource[index]['sectionSources'],
                'sectionSourcesDetail': []
            }
            compareDoms = section.xpath('./div[@class="detail_table"]/table[@class="detail_tableText"]/tr')
            for compareDom in compareDoms:
                originSentence = etree.tostring(compareDom.xpath('./td[@class="Origin_text"]/div[@class="Sim1"]/p')[0], encoding='utf-8').decode('utf-8')\
                .replace('<p>','').replace('</p>','')
                startIndex = originSentence.find('<em')
                endIndex = originSentence.rfind('</em>')
                similarCount = ExportPaperYY.countSimilar(compareDom.xpath('./td[@class="Origin_text"]/div[@class="Sim1"]/p/em'))
                pattern = r'''<em class="similar[\s\S]{0,10}" onmouseover="highlightSetter\('[\s\S]{0,10}',false\)" onmouseout="highlightSetter\('[\s\S]{0,10}',true\)">'''
                origin_s = re.sub(pattern, '<em class="similar">', originSentence[startIndex:endIndex+5], count=0, flags=0)
                info = {
                    'originSentence': origin_s,
                    'similarCount': similarCount,
                    'source': []
                }
                sourceDetails = compareDom.xpath('./td[@class="Si_text"]/div[@class="add_tabBox"]/div[@class="bd"]/div[@class="siminfo"]/p')
                content = []
                titles = []
                authors = []
                sources = []
                for index, sourceDetail in enumerate(sourceDetails):
                    if index > 0:
                        sourceContent = etree.tostring(sourceDetail, encoding='utf-8').decode('utf-8')
                        content.append(sourceContent)
                sourceTitles = compareDom.xpath('./td[@class="Si_text"]/div[@class="add_tabBox"]/div[@class="bd"]/div[@class="siminfo"]/ul')
                for sourceTitle in sourceTitles:
                    title = sourceTitle.xpath('./li[1]/text()')[1].replace(' ','').replace('\n','')
                    titles.append(title)
                    author = sourceTitle.xpath('./li[2]/text()')[1].replace(' ','').replace('\n','')
                    authors.append(author)
                    source = sourceTitle.xpath('./li[2]/text()')[2].replace(' ','').replace('\n','')
                    sources.append(source)
                for index,title in enumerate(titles):
                    sourceDetailStruct = {
                        'title': title,
                        'author': authors[index],
                        'source': sources[index],
                        'content': content[index]
                    }
                    info['source'].append(sourceDetailStruct)
                sectionInfo['sectionSourcesDetail'].append(info)
            sections.append(sectionInfo)
            try:
                writeRecord = json.dumps(sections)
                with open('record/'+self.report.split('.zip')[0]+'.json', 'a') as f:
                    f.write(writeRecord)
            except:
                pass
        return sections

    @staticmethod
    def countSimilar(doms):
        sum = 0
        for item in doms:
            similar = item.xpath('./text()')[0]
            sum += len(similar)
        return sum

    def findAllFile(self, path):
        fileList = os.listdir(path)
        fileList.sort(key=lambda x: int(x[:-5]))
        return fileList

    def remove(self):
        os.remove(self.report)
        shutil.rmtree(self.report.split('.zip')[0])
