import json
import os
import shutil
import sys

import facade
import parsel
from xjlibrary.mdatetime.mtime import getTodayDate
from xjlibrary.mdatetime.mtime2 import MDateTimeUtils
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
print(curPath)
sys.path.insert(0, curPath)  # 插入

from AuthorOrganSort import SortCreatorOrgan
from id_encode import GetLngid

curPath = BaseDir.get_file_dir_absolute(__file__)
configfile = BaseDir.get_new_path(curPath, "db.ini")
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "astmjournal", "download", "article")
adb3Path = BaseDir.get_new_path(TopPath, "download", "astmjournal", "download", "adb3")
BaseDir.create_dir(adb3Path)


class ParaArticle(object):
    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.mysqlutile = facade.MysqlUtiles(configfile, "db", logger=self.logger)
        if not BaseDir.is_file_exists(BaseDir.get_new_path(adb3Path, 'base_obj_meta_a_template_qkwx.db3')):
            BaseDir.copy_file_to_dir("./template/base_obj_meta_a_template_qkwx.db3", adb3Path)
        self.db3path_a = BaseDir.get_new_path(adb3Path, 'base_obj_meta_a_template_qkwx.db3')
        self.listpara = []
        self.dbsqlutils = facade.Sqlite3Utiles(logger=self.logger).Sqlite3DBConnectFromFilePath(
            self.db3path_a, encoding="utf-8")

    def select_file(self):
        for filepath in BaseDir.get_dir_all_files(sPath):
            for line in BaseDir.read_file_r_mode_yield(filepath):
                self.para_a_table(line)
        sql = "insert or ignore into base_obj_meta_a (`rawid`,`sub_db_id`,`product`,`sub_db`,`provider`,`lngid`,`down_date`,`batch`,`doi`,`source_type`,`provider_url`,`title`,`keyword`,`abstract`,`recv_date`,`accept_date`,`pub_date`,`page_cnt`,`pdf_size`,`author`,`author_1st`,`organ_1st`,`organ`,`journal_raw_id`,`journal_name`,`pub_year`,`vol`,`num`,`issn`,`publisher`,`country`,`language`,`coden`,`fulltext_type`) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
        self.dbsqlutils.ExeSqlliteMany(sql, self.listpara)
        self.listpara = []

    def para_a_table(self, line):
        dicts = json.loads(line)
        downdate = dicts["downdate"]
        pubdate = dicts["pubdate"].strip()
        source = dicts["source"]
        gch = dicts["gch"]
        volissue = dicts["volissue"]
        url = dicts["url"]
        print(url)
        html = dicts["html"]
        # searchObj = re.search(r'Volume (\d*?), Issue (\d*?)', volissue)
        # vol = searchObj.group(1)
        # issue = searchObj.group(2)
        selector = parsel.Selector(text=html)
        # 在meta中有的信息
        doi = selector.xpath("//meta[@name='doi']/@content").get()
        keyword = selector.xpath("//meta[@name='mc-keywordsen']/@content").get()
        keyword = keyword.replace("~", ";")
        keyword = keyword.replace("; ", ";")
        keyword = keyword.strip(";")
        volume = selector.xpath("//meta[@name='citation_volume']/@content").get()
        issue = selector.xpath("//meta[@name='citation_issue']/@content").get()
        firstpage = selector.xpath("//meta[@name='citation_firstpage']/@content").get()
        lastpage = selector.xpath("//meta[@name='citation_lastpage']/@content").get()
        # pages = int(lastpage) - int(firstpage) + 1
        issn = selector.xpath("//meta[@name='citation_issn']/@content").get()
        title = selector.xpath("//meta[@name='citation_title']/@content").get()
        pub_date = selector.xpath("//meta[@name='citation_publication_date']/@content").get()
        publisher = selector.xpath("//meta[@name='citation_publisher']/@content").get()
        journal_title = selector.xpath("//meta[@name='citation_journal_title']/@content").get()
        src_href = selector.xpath("//meta[@name='src_href']/@content").get()
        # journal_title 简写 但有可能和首页不一样
        journal_abbrev = selector.xpath("//meta[@name='citation_journal_abbrev']/@content").get()
        doctype = selector.xpath("//meta[@name='mc-doctype']/@content").get()
        abstract = selector.xpath("//meta[@name='abstract']/@content").get()
        absselector = parsel.Selector(text=abstract)
        abslist = absselector.xpath(".//text()").getall()
        abstract = "".join(abslist)
        # 以上为meta中可以获取的信息

        # 所有有用的信息都包含在里面
        alldate = selector.xpath("//mcx_snippet_start/div[1]")
        # 表格中的信息
        pdfsize = alldate.xpath("./table/tr[2]/td[2]/text()").get()
        if pdfsize:
            pdfsize = pdfsize.replace("PDF (", "").replace(")", "")
        pages = alldate.xpath("./table/tr[2]/td[3]/text()").get()
        price = alldate.xpath("./table/tr[2]/td[4]/text()").getall()
        price = "".join(price)

        ptaglist = alldate.xpath("./p")
        # 各种日期
        dates = ""
        published = ""
        coden = ""
        stock = ""
        Receiveddate = ""
        accepteddate = ""
        authorlist = []
        for ptag in ptaglist:
            # print(ptag)
            # print(ptag.getall())
            ptagstrings = "".join(ptag.getall())
            # 发现i标签 应该是作者和机构
            if ptagstrings.find("<i>") > -1:
                authorlist.append(ptag)
            strings = "".join(ptag.xpath(".//text()").getall())
            if strings.find("(") > -1:
                # 多个日期 代表不同的阶段
                dates = strings
            # elif strings.find("Published") > -1:
            #     published = strings
            elif strings.find("CODEN") > -1:
                coden = strings
                coden = coden.replace("CODEN:", "").strip()
            elif strings.find("Stock") > -1:
                stock = strings
            # elif strings.find("ISSN") > -1:
            #     issn = strings
            # elif strings.find("DOI") > -1:
            #     doi = strings

        if dates != "":
            listdate = dates.replace("(", "").replace(")", "").split(";")
            for date in listdate:
                if date.find("Received") > -1:
                    Receiveddate = date.replace("Received", "").strip()
                elif date.find("accepted") > -1:
                    accepteddate = date.replace("accepted", "").strip()

        # 时间转换
        if Receiveddate:
            Receiveddate = MDateTimeUtils.date_format(Receiveddate, "%d %B %Y")
        if accepteddate:
            # 特殊情况 accepted 15 November 200
            # 网页上可能出现这种年份的最后一位没有 用pubyear来替换这个年
            if accepteddate.endswith("200"):
                accepteddate = accepteddate.replace("200", pub_date[:4])
                accepteddate = MDateTimeUtils.date_format(accepteddate, "%d %B %Y")
            elif accepteddate.startswith("0 "):
                accepteddate = accepteddate.replace("0 ", "01 ")
                accepteddate = MDateTimeUtils.date_format(accepteddate, "%d %B %Y")
                accepteddate = accepteddate[:-2] + "00"
            else:
                accepteddate = MDateTimeUtils.date_format(accepteddate, "%d %B %Y")
        if pubdate:
            # 特殊情况处理
            if pubdate.startswith("00"):
                pubdate = pubdate.replace("00 ", "01 ")
            pubdate = MDateTimeUtils.date_format(pubdate, "%d %B %Y")
        # 处理作者信息
        authordicts = {}
        for author in authorlist:
            authorstrings = author.xpath("./text()").get()
            origenstrings = author.xpath("./i/text()").get()
            if authorstrings:
                authorstrings = authorstrings.strip()
            if origenstrings:
                origenstrings = origenstrings.strip()
            authordicts[authorstrings] = origenstrings
            # authorstringslist = authorstrings.split(",")
            # for authorstringsone in authorstringslist:
            #     authordicts[authorstringsone] = origenstrings
        author, organ = SortCreatorOrgan(authordicts)
        author_1st = ''
        organ_1st = ''
        if authorlist:
            author_1st = authorlist[0].xpath("./text()").get()
            organ_1st = authorlist[0].xpath("./i/text()").get()
            if author_1st:
                author_1st = author_1st.strip()
            if organ_1st:
                organ_1st = organ_1st.strip()

        ########################################################################
        # a表导入 开始将数据格式化
        rawid = doi
        sub_db_id = "00074"
        product = "ASTM"
        sub_db = "QK"
        provider = "ASTM"
        provider_zt = "astmjournal"
        lngid = GetLngid(sub_db_id, rawid)
        down_date = downdate
        batch = MDateTimeUtils.datetime_to_string(MDateTimeUtils.get_now_datetime(), '%Y%m%d_%H%M%S')
        doi = doi
        source_type = 3
        provider_url = url
        title = title
        keyword = keyword
        abstract = abstract
        begin_page = firstpage
        end_page = lastpage
        recv_date = Receiveddate
        accept_date = accepteddate
        pub_date = pubdate
        page_cnt = pages
        pdf_size = pdfsize
        author = author
        author_1st = author_1st
        organ_1st = organ_1st
        organ = organ
        journal_raw_id = gch
        journal_name = source
        pub_year = pub_date[:4]
        vol = volume
        num = issue
        issn = issn
        publisher = publisher
        country = "US"
        language = "EN"
        coden = coden
        fulltext_type = "pdf"
        self.listpara.append((rawid, sub_db_id, product, sub_db, provider,
                              lngid, down_date, batch, doi, source_type,
                              provider_url, title, keyword, abstract, recv_date,
                              accept_date, pub_date, page_cnt, pdf_size, author,
                              author_1st, organ_1st, organ, journal_raw_id, journal_name,
                              pub_year, vol, num, issn, publisher,
                              country, language, coden, fulltext_type))
        if len(self.listpara) > 3000:
            sql = "insert or ignore into base_obj_meta_a (`rawid`,`sub_db_id`,`product`,`sub_db`,`provider`,`lngid`,`down_date`,`batch`,`doi`,`source_type`,`provider_url`,`title`,`keyword`,`abstract`,`recv_date`,`accept_date`,`pub_date`,`page_cnt`,`pdf_size`,`author`,`author_1st`,`organ_1st`,`organ`,`journal_raw_id`,`journal_name`,`pub_year`,`vol`,`num`,`issn`,`publisher`,`country`,`language`,`coden`,`fulltext_type`) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
            self.dbsqlutils.ExeSqlliteMany(sql, self.listpara)
            self.listpara = []

    def copyTempDB3(self, temp_path, copy_out):
        """
        拷贝创建新的db3文件
        :param temp_path: 模板db3文件路径
        :param copy_out: 新的db3输出路径
        :return: 新db3路径
        """
        today_str = str(getTodayDate())
        if not os.path.isfile(temp_path) or not os.path.isdir(copy_out):
            raise FileNotFoundError
        new_db3_name = 'astmjournal_' + today_str + '.db3'
        new_db3_path = os.path.join(copy_out, new_db3_name)
        shutil.copyfile(temp_path, new_db3_path)
        return new_db3_path

    def fromatozt(self):
        """
        从a表提取数据到zt表
        :return:
        """
        temp_db3 = os.path.join(curPath, "zt_template", "zt_template.db3")
        new_db3_out = adb3Path
        BaseDir.create_dir(new_db3_out)
        new_db3_path = self.copyTempDB3(temp_db3, new_db3_out)
        self.db3_zt = facade.Sqlite3Utiles(logger=facade.get_streamlogger()).Sqlite3DBConnectFromFilePath(
            new_db3_path, encoding="utf-8")
        sql = "select * from `base_obj_meta_a` where batch > '{}'".format(
            MDateTimeUtils.datetime_to_string(MDateTimeUtils.get_now_datetime(), '%Y%m%d_000000'))
        rows = self.dbsqlutils.SelectFromSqlliteFetchall_dicts(sql)
        print(rows)
        Listpara = []
        for row in rows:
            print("************************")
            print(row["lngid"])
            bacth = MDateTimeUtils.get_today_date_strings() + "00"
            provider_zt = "astmjournal"
            Listpara.append(
                (row["lngid"], row["rawid"], row['doi'], row['title'], row['issn'],
                 row['author'], row['abstract'], row['keyword'], row['pub_date'], row['pub_year'],
                 row['language'], row['country'], "3", "2", bacth,
                 "astmjournal@" + row["doi"], "astmjournal@" + row["provider_url"],
                 "astmjournal", row['publisher'], row["page_cnt"],
                 row["organ"], "astmjournal@" + row["journal_raw_id"], row["journal_name"],
                 row["vol"], row["num"], "1"))
        print(Listpara)
        sql = "insert or ignore into modify_title_info_zt (`lngid`,`rawid`,`identifier_doi`,`title`,`identifier_eissn`,`creator`,`description`,`subject`,`date_created`,`date`,`language`,`country`,`type`,`medium`,`batch`,`provider_id`,`provider_url`,`provider`,`publisher`,`pagecount`, `creator_institution`,`gch`,`source`,`volume`,`issue`,`if_pdf_fulltext`) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
        self.db3_zt.ExeSqlliteMany(sql, Listpara)


def main():
    para = ParaArticle()
    # a表解析
    para.select_file()
    # 解析到智图
    para.fromatozt()


if __name__ == "__main__":
    main()
