import datetime
import os
import re
import shutil
import sys
import time
from itertools import zip_longest

import bs4
import facade
from bs4 import BeautifulSoup
from xjlibrary.mprocesspoll.MThreadingRun import MThreadingRun
from xjlibrary.msqlite3.sqlite3_client import ExeSqlliteSql
from xjlibrary.our_file_dir import BaseDir

sqlList = list()
selectResult = list()

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "cdxg_ams", "download", "archive")
sPath2 = BaseDir.get_new_path(TopPath, "download", "cdxg_ams", "download", "archive2")
BaseDir.create_dir(sPath2)
db3Path = BaseDir.get_new_path(TopPath, "download", "cdxg_ams", "download", "db3")
BaseDir.create_dir(db3Path)
sGUrl = "https://journals.ametsoc.org/doi/abs/"
configfile = BaseDir.get_new_path(curPath, "db.ini")


class ParserArchive(object):
    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(configfile, "db", self.logger)
        if not BaseDir.is_file_exists(BaseDir.get_new_path(db3Path, 'ametsocjournal.db3')):
            BaseDir.copy_file_to_dir("./db3/ametsocjournal.db3", db3Path)

    def SelectListFromDB2(self, sql):
        row = self.mysqlutils.SelectFromDB(sql)
        row = row[0]
        return row

    def SelectListFromDB3(self, sql):
        rows = self.mysqlutils.SelectFromDB(sql)
        return rows

    # 插入数据库
    def InsertIntoDb(self, sql):
        if not BaseDir.is_file_exists(BaseDir.get_new_path(db3Path, 'ametsocjournal.db3')):
            BaseDir.copy_file_to_dir("./db3/ametsocjournal.db3", BaseDir.get_new_path(db3Path, 'ametsocjournal.db3'))
        ExeSqlliteSql(BaseDir.get_new_path(db3Path, 'ametsocjournal.db3'), sql)

    # 解析目标文件
    def AnalysisFile(self, threadval, filePath, fileName):
        result_queue = threadval.result_queue
        provider = 'ametsocjournal'
        batch = str(datetime.datetime.now().strftime('%Y%m%d')) + "00"

        listname = fileName.split(os.extsep)
        id = listname[0]

        # if dictsql[int(id)] in selectResult:
        #     return

        sql = "select * from `ams_issue` where id={}".format(id)
        rows = self.SelectListFromDB2(sql)
        print(rows)
        url = rows[6]
        # vidlist = url.split(os.altsep)
        # vid = "{}/{}".format(vidlist[-2], vidlist[-1])
        vid = url.replace("/doi/abs/", "").strip()
        gch = provider + "@" + rows[1].split("_")[0]
        rawid = vid
        doi = vid
        # AMETSOC_WK_
        lngid = 'AMETSOC_WK_' + vid

        namej = rows[1].split("_")[0]
        namejs = "/toc/" + namej + "/current"
        sql = "select * from `ams_journal` where `url`='{}'".format(namejs)
        rowsj = self.SelectListFromDB2(sql)
        sql = "select * from `ams_volume` where `name`='{}'".format(namej)
        rowsv = self.SelectListFromDB2(sql)

        soup = BeautifulSoup(open(filePath, 'r', encoding='utf-8'), "lxml")
        div_tag = soup.find_all("div", class_="articleMeta ja")
        if div_tag:
            # 标题
            h1_tag = div_tag[0].find_all("h1", class_="chaptertitle")
            titles = "".join(h1_tag[0].stripped_strings).strip()
            if not titles:
                print("标题不存在,请检查")
                titles = ""
            titles = self.sqliteEscape(titles)
            print(titles)
            # 调用单独的函数处理作者这一复杂问题
            auther_intact, creator_institution = self.author(div_tag)
            auther_intact = self.sqliteEscape(auther_intact)
            creator_institution = self.sqliteEscape(creator_institution)
            datas = "".join(div_tag[0].find("div", class_="publicationContentEpubDate dates").stripped_strings)
            datas = datas.replace("Published Online: ", "")
            try:
                datestruct = time.strptime(datas, '%d %b %Y')
            except:
                datestruct = time.strptime(datas, '%d %B %Y')
            date_created = time.strftime("%Y%m%d", datestruct)
            year = date_created[:4]
            if not date_created:
                year = '1900'
                date_created = '19000000'
        else:
            print("头部不存在,请检查{}".format(filePath))
            with open("log.txt", 'a', encoding="utf-8") as f:
                f.write(filePath)
            sys.exit(-1)

        # 摘要
        abstract = soup.find("div", class_="abstractSection abstractInFull")
        if abstract:
            abstract = "".join(abstract.stripped_strings)
        elif soup.find("div", class_="hlFld-Abstract"):
            div_tag = soup.find("div", class_="hlFld-Abstract")
            abstract = "".join(div_tag.stripped_strings)
        else:
            abstract = ''
        abstract = self.sqliteEscape(abstract)
        # 关键词
        div_keywords = soup.find("div", class_="hlFld-KeywordText")
        if div_keywords:
            listkey = []
            a_keywords = div_keywords.find_all("a", class_="attributes")
            for key in a_keywords:
                keyword = "".join(key.stripped_strings)
                listkey.append(keyword.strip())
            keywords = ";".join(listkey)
            keywords = self.sqliteEscape(keywords)
            keywords = keywords[:-1]
        else:
            keywords = ''
        # issn
        pissn = rowsj[4]
        eissn = rowsj[5]
        volume = rowsv[2]
        issue = rowsv[6]
        page = rowsv[10]
        source = rowsj[1]
        if page:
            page = page.replace("pp ", "")

        type_ = '3'
        provider_id = provider + '@' + vid
        provider_url = provider + '@' + sGUrl + vid
        language = 'EN'
        country = 'US'
        medium = '2'
        publisher = 'American Meteorological Society'

        sql = "INSERT INTO modify_title_info_zt(gch,lngid,rawid, title, creator,identifier_doi, date,date_created, identifier_pissn, identifier_eissn,description, language, country, type, provider, provider_id, provider_url,subject,page,volume,issue,batch,medium,publisher,source,creator_institution) VALUES" \
              "('{gch}','{lngid}','{rawid}', '{title}', '{creator}', '{identifier_doi}', '{date}','{date_created}','{identifier_pissn}','{identifier_eissn}', '{description}', \
    		 '{language}', '{country}', '{type}', '{provider}', '{provider_id}', '{provider_url}','{subject}','{page}','{volume}','{issue}','{batch}','{medium}','{publisher}','{source}','{creator_institution}')".format(
            gch=gch, lngid=lngid, rawid=rawid, title=titles, creator=auther_intact, identifier_doi=doi,
            date=year, date_created=date_created, identifier_pissn=pissn, identifier_eissn=eissn, description=abstract,
            language=language, country=country, type=type_,
            provider=provider, provider_id=provider_id, provider_url=provider_url, subject=keywords, page=page,
            volume=volume, issue=issue, batch=batch, medium=medium, publisher=publisher, source=source,
            creator_institution=creator_institution)
        result_queue.put(sql)
        soupath = BaseDir.get_new_path(sPath2, BaseDir.get_file_name(filePath))
        shutil.move(filePath, soupath)

    # 转移字符
    def sqliteEscape(self, keyWord):
        # keyWord = keyWord.replace('/', '//')
        keyWord = keyWord.replace('\\', '\\\\')
        keyWord = keyWord.replace("'", "''")
        # keyWord = keyWord.replace("[", "/[")
        # keyWord = keyWord.replace("]", "/]")
        # keyWord = keyWord.replace("%", "/%")
        # keyWord = keyWord.replace("&", "/&")
        # keyWord = keyWord.replace("_", "/_")
        # keyWord = keyWord.replace("(", "/(")
        # keyWord = keyWord.replace(")", "/)")
        return keyWord

    # 作者和机构太复杂单独一个函数处理
    def author(self, div_tag):
        number = 1
        authorfinish = ''
        affifinish = ''
        # 作者
        authorGroup = div_tag[0].find_all("div", class_="hlFld-ContribAuthor")
        NLM_xref_aff_Group = authorGroup[0].find_all("span", class_="NLM_xref-aff")
        if NLM_xref_aff_Group:
            auther_intact = ''
            for auther, aff in zip_longest(authorGroup, NLM_xref_aff_Group):
                autherstring = ''
                affstring = ''
                if auther:
                    autherstring = auther.get_text()
                if aff:
                    affstring = aff.get_text()
                    affstring = "[{}]".format(affstring)
                auther_intact += "{}{};".format(autherstring, affstring)
            auther_intact = auther_intact[:-1]

            # 作者机构
            span_tag = div_tag[0].find("span", class_="NLM_contrib-group")
            if span_tag:
                affiliationsList = span_tag.find_all("span", class_="affiliationsTtile")
            else:
                affiliationsList = None
            # 如果作者单位存在
            creator_institution = ''
            if affiliationsList:
                for affi in affiliationsList:
                    affistring = ''
                    for next in affi.next_siblings:
                        if isinstance(next, bs4.element.Tag):
                            if next.name == "br":
                                affistring += ";"
                                if re.match("^\*\*", affistring.strip()):
                                    affistring = affistring.strip().lstrip("**").strip()
                                    affistring = "[**]" + affistring
                                if re.match("^\*", affistring.strip()):
                                    affistring = affistring.strip().lstrip("*").strip()
                                    affistring = "[*]" + affistring
                                break
                            if next.name == "sup" or next.name == "a":
                                affistring += "[" + next.get_text() + "]"
                                continue
                            if next.name == "i":
                                affistring += next.get_text()
                                continue
                            if next.name == "b":
                                continue
                            if next.name == "sub":
                                affistring += next.get_text()
                                continue
                        affistring += next
                    creator_institution += affistring
                creator_institution = creator_institution[:-1]
            return auther_intact, creator_institution
        else:
            div_tags = authorGroup[0].find_all("div", recursive=False)
            if div_tags:
                for div_tag in div_tags:
                    if not div_tag.attrs:
                        authors = div_tag.find_all("a", recursive=False)
                        for author in authors:
                            authorstring = author.get_text().strip()
                            if authorstring:
                                authorstring = "{}[{}];".format(authorstring, number)
                                authorfinish += authorstring

                        affis = div_tag.find("span", class_="NLM_contrib-group")
                        if affis:
                            strings = affis.strings
                            for string in strings:
                                if string.strip() != "Affiliations":
                                    affisstring = string.strip()
                                    affisstring = "[{}]{};".format(number, affisstring)
                                    affifinish += affisstring
                        number += 1

                    if div_tag.get("class", ""):
                        if div_tag.get("class", "")[0] == "expandable-author":
                            div_tags = div_tag.find_all("div", recursive=False)
                            for div_tag in div_tags:
                                if not div_tag.attrs:
                                    authors = div_tag.find_all("a", recursive=False)
                                    for author in authors:
                                        authorstring = author.get_text().strip()
                                        if authorstring:
                                            authorstring = "{}[{}];".format(authorstring, number)
                                            authorfinish += authorstring

                                    affis = div_tag.find("span", class_="NLM_contrib-group")
                                    if affis:
                                        strings = affis.strings
                                        for string in strings:
                                            if string.strip() != "Affiliations":
                                                affisstring = string.strip()
                                                affisstring = "[{}]{};".format(number, affisstring)
                                                affifinish += affisstring
                                    number += 1
            return authorfinish[:-1], affifinish[:-1]


class ParserArchiveThreadRun(MThreadingRun):

    def __init__(self, num):
        super(ParserArchiveThreadRun, self).__init__(num)
        self.down = ParserArchive()

    def getTask(self, *args, **kwargs):
        pass

    def setTask(self, results=None, *args, **kwargs):
        for path, dirNames, fileNames in os.walk(sPath):
            for fileName in fileNames:
                filePath = os.path.join(path, fileName)
                print(filePath)
                self.thread_pool.add_job(self.func, filePath, fileName)
        time.sleep(20)
        return "break"

    def dealresult(self, *args, **kwargs):
        # sqlite3的同一链接不能夸线程使用
        db3utils = facade.Sqlite3Utiles(self.logger).Sqlite3DBConnectFromFilePath(
            BaseDir.get_new_path(db3Path, 'ametsocjournal.db3'))
        for result in self.results:
            db3utils.ExeSqlliteSql(result)
        db3utils.close()
        print("总共处理结果" + str(self.resultnum))

    def is_break(self):
        return True

    def fun(self, threadval, *args, **kwargs):
        filePath, fileName = args
        self.down.AnalysisFile(threadval, filePath, fileName)

    def thread_pool_hook(self, thread_pool_dicts, thread, *args, **kwargs) -> dict:
        return {}


def main():
    threadrun = ParserArchiveThreadRun(1)
    threadrun.run()
    """
    奇怪 多线程会出问题
    """


if __name__ == "__main__":
    main()
