import os
import re
import shutil
import sys
import time

import facade
from bs4 import BeautifulSoup
from xjlibrary.mdatetime.mtime import getTodayDate
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "cell", "download", "abstractdetails")
db3dir = BaseDir.get_new_path(TopPath, "download", "cell", "download", "db3")
out_path = BaseDir.get_new_path(TopPath, "download", "cell", "download", "abstractdetails2")
BaseDir.create_dir(db3dir)
BaseDir.create_dir(out_path)
db3_temp_Path = BaseDir.get_new_path(curPath, "template")


class ParseFulltext(object):
    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(BaseDir.get_new_path(curPath, "db.ini"), "db", self.logger)
        self.sGUrl = "https://www.cell.com/{}/abstract/{}"
        self.issnre = re.compile(r"\d{4}-\d{4}")
        self.sqlList = list()
        self.templatedb3 = BaseDir.get_new_path(db3_temp_Path, "zt_template.db3")
        self.new_db3_path = self.copyTempDB3(self.templatedb3, db3dir)
        self.db3_zt = facade.Sqlite3Utiles(logger=self.logger).Sqlite3DBConnectFromFilePath(
            self.new_db3_path, encoding="utf-8")

    def copyTempDB3(self, temp_path, copy_out):
        """
        拷贝创建新的db3文件
        :param temp_path: 模板db3文件路径
        :param copy_out: 新的db3输出路径
        :return: 新db3路径
        """
        today_str = str(getTodayDate())
        if not os.path.isfile(temp_path) or not os.path.isdir(copy_out):
            raise FileNotFoundError
        new_db3_name = 'zt_celljournal_' + today_str + '.db3'
        new_db3_path = os.path.join(copy_out, new_db3_name)
        shutil.copyfile(temp_path, new_db3_path)
        return new_db3_path

    # 转移字符
    def sqliteEscape(self, keyWord):
        keyWord = keyWord.replace('/', '//')
        keyWord = keyWord.replace("'", "''")
        # keyWord = keyWord.replace("[", "/[")
        # keyWord = keyWord.replace("]", "/]")
        # keyWord = keyWord.replace("%", "/%")
        # keyWord = keyWord.replace("&", "/&")
        # keyWord = keyWord.replace("_", "/_")
        # keyWord = keyWord.replace("(", "/(")
        # keyWord = keyWord.replace(")", "/)")

        return keyWord

    # 解析目标文件
    def AnalysisFile(self, filePath):
        filename = BaseDir.get_filename_not_extsep(filePath)

        listname = filename.split("_")
        try:
            listname.remove("")
        except:
            pass
        try:
            listname.remove("molecular-therapy-family")
        except:
            pass
        try:
            listname.remove("trends")
        except:
            pass
        if len(listname) != 4:
            print("结构错误")
            sys.exit(-1)
        vid = listname[-1]

        provider = 'celljournal'
        # 期刊的url简写
        gch = provider + "@" + listname[-3]
        # 文章级标识
        rawid = vid
        # 主键
        lngid = 'CELL_WK_' + vid
        # 批次
        batch = str(time.strftime('%Y%m%d', time.localtime())) + "00"

        type_ = '3'
        provider_id = provider + '@' + vid
        provider_url = provider + '@' + self.sGUrl.format(listname[-3], vid)
        language = 'EN'
        country = 'US'
        medium = '2'
        publisher = 'Elsevier Inc.'
        soup = BeautifulSoup(open(filePath, 'r', encoding='utf-8'), "lxml")
        meta_tag = soup.find("meta", attrs={"name": "citation_journal_title"})
        if not meta_tag:
            self.logger.info(filePath)
            self.logger.info("没有期刊名，不能通过")
            return
        # 期刊名
        source = meta_tag["content"]
        print("source is " + source)
        # 标题
        try:
            titles = "".join(soup.find("h1", class_="article-header__title").stripped_strings)
            titles = titles.strip()
            if titles == "":
                titles = "".join(soup.find("span", class_="article-header__sub-title").stripped_strings)
            titles = titles.strip()
            if titles == "":
                # url = "/{}/fulltext/{}".format(listname[-3], listname[-1])
                sql = "select `title` from `archive` where rawid='{}'".format(listname[-1])
                rows = self.mysqlutils.SelectFromDB(sql)
                titles = rows[0][0]
        except:
            try:
                # url = "/{}/fulltext/{}".format(listname[-3], listname[-1])
                sql = "select `title` from `archive` where rawid='{}'".format(listname[-1])
                rows = self.mysqlutils.SelectFromDB(sql)
                titles = rows[0][0]
            except:
                print("return ;no titles ")
                return

        titles = self.sqliteEscape(titles)
        print(titles)

        # 作者和机构
        authoraffiliations = ''
        authors = ''
        autherList = list()
        authoraffiliationList = list()
        creator_bio_list = list()
        authorGroup = soup.find_all("meta", attrs={"name": "citation_author"})
        for author_tag in authorGroup:
            author = author_tag["content"]
            autherList.append(author)
        authors = ";".join(autherList)
        authoraffiliationGroup = soup.find_all("meta", attrs={"name": "citation_author_institution"})
        for authorsaffi_tag in authoraffiliationGroup:
            authoraffiliation = authorsaffi_tag["content"]
            authoraffiliationList.append(authoraffiliation)
        authoraffiliations = ";".join(authoraffiliationList)

        authorsGroup = soup.find("ul", class_="rlist loa inline-bullet-list")
        if authorsGroup:
            li_all_tag = authorsGroup.find_all("li", class_="loa__item author")
            for li in li_all_tag:
                # 作者简介
                try:
                    creator_bio = "".join(li.find("div", class_="article-header__info__sub-label").stripped_strings)
                    if creator_bio:
                        if creator_bio.strip() == "Corresponding author":
                            continue
                        if creator_bio.strip() == "Correspondence:":
                            continue
                        if creator_bio.strip() == "To whom requests for reprints should be addressed.":
                            continue
                        if creator_bio.strip() == "To whom requests for reprints should be sent.":
                            continue
                        if creator_bio.strip() == "Correspondence":
                            continue
                        creator_bio_list.append(creator_bio)
                except:
                    creator_bio = ""
        authors = self.sqliteEscape(authors)
        authoraffiliations = self.sqliteEscape(authoraffiliations)
        creator_bio = self.sqliteEscape(";".join(creator_bio_list))

        try:
            dio_tag = soup.find("meta", attrs={"name": "citation_doi"})
            dio = dio_tag["content"]
        except:
            try:
                dio = soup.find('a', class_="article-header__doi__value")["href"]
                dio = dio.replace('https://doi.org/', '')
            except:
                section_tag = soup.find("section", class_="article-info")
                try:
                    dio = "".join(section_tag.find_all("section")[0].p.a.stripped_strings)
                except:
                    dio = "".join(section_tag.find_all("section")[1].p.a.stripped_strings)
        if not dio:
            sys.exit("dio is null please check")
        # 摘要
        abstract = soup.find("div", class_="section-paragraph")
        if abstract:
            abstract = "".join(abstract.stripped_strings)
            abstract = self.sqliteEscape(abstract)
        else:
            abstract = ""
        # 关键词
        keywords = soup.find("ul", class_="rlist keywords-list inline-bullet-list")
        if keywords:
            listkey = []
            li_tags = keywords.find_all("li")
            for li in li_tags:
                listkey.append("".join(li.stripped_strings))
            keywords = ";".join(listkey)
            keywords = self.sqliteEscape(keywords)
        else:
            keywords = ''
        # issn
        try:
            issn = soup.find("input", attrs={"name", "seriesISSNFltraddfilter"})["value"]
        except:
            meta_tag = soup.find_all("meta", attrs={"name": "citation_issn"})
            listissn = []
            for meta in meta_tag:
                issn = meta["content"]
                if issn != "":
                    listissn.append(issn)
            issn = ";".join(listissn)

        div_tag = soup.find("div", class_="article-header__meta")
        year = '1900'
        date_created = '19000000'

        if div_tag and div_tag.find_all("span", class_="article-header__pages faded"):
            volIssue = "".join(div_tag.a.stripped_strings)
            span_tags = div_tag.find_all("span", class_="article-header__pages faded")
            print(span_tags)
            pages = "".join(span_tags[0].stripped_strings)
            span_tags = div_tag.find_all("span", class_="article-header__date faded")
            data = "".join(span_tags[0].stripped_strings)

            tupledata = re.search("Volume (\d+)", volIssue).groups()
            vol = tupledata[0]
            try:
                tupledata = re.search("ISSUE (\d+)", volIssue).groups()
                issue = tupledata[0]
            except:
                url = "/{}/issue?pii={}".format(listname[-3], listname[-4])
                sql = "select vol,Issue_text from issue where url='{}'".format(url)
                rows = self.mysqlutils.SelectFromDB(sql)
                try:
                    issue = rows[0][1]
                    # issuenum = issue.split("Supplement")[1].split(" ")[0]
                    try:
                        tupledata = re.search("Supplement (\d+)", issue).groups()
                        issue = "Supplement " + tupledata[0]
                    except:
                        issue = "Supplement"
                except:
                    try:
                        tupledata = re.search("Issue (\d+)", issue).groups()
                        issue = tupledata[0]
                    except:
                        issue = ""

            pages = pages.replace(",", "")
            date_created = time.strftime("%Y%m%d", time.strptime(data, '%B %d, %Y'))
            year = date_created[:4]
        else:
            url = "/{}/issue?pii={}".format(listname[-3], listname[-4])
            sql = "select vol,Issue_text from issue where url='{}'".format(url)
            rows = self.mysqlutils.SelectFromDB(sql)
            if not rows:
                print("return issue ")
                return
            vol = rows[0][0]
            issue = rows[0][1]
            data = issue.split("Issue")[0]
            try:
                date_created = time.strftime("%Y%m%d", time.strptime(data, '%b %d, %Y'))
            except:
                try:
                    date_created = time.strftime("%Y%m%d", time.strptime(data, '%B %Y'))
                except:
                    date_created = time.strftime("%Y%m%d", time.strptime(data, '%B %d, %Y'))
            year = date_created[:4]
            listdata = issue.split("Issue")[1].split(",")
            issue = listdata[0].strip()
            try:
                pages = listdata[1].strip()
            except:
                pages = ""
        self.sqlList.append((gch, lngid, rawid, titles, authors, dio, year, date_created, issn, abstract,
                             language, country, type_, provider, provider_id, provider_url, keywords, pages, vol, issue,
                             batch, medium, publisher, source, authoraffiliations, creator_bio))

        if len(self.sqlList) > 100:
            self.InsertIntoDb()

        shutil.move(filePath, out_path)

    # 插入数据库
    def InsertIntoDb(self):
        sql = "insert or ignore into modify_title_info_zt(gch,lngid,rawid, title, creator,identifier_doi, `date`,date_created, identifier_pissn,`description`, `language`, country, type, provider, provider_id, provider_url,`subject`,`page`,volume,issue,batch,`medium`,publisher,`source`,creator_institution,creator_bio) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"

        self.db3_zt.ExeSqlliteMany(sql, self.sqlList)
        self.sqlList.clear()

    def select(self):
        for file in BaseDir.get_dir_all_files(sPath):
            print(file)
            self.AnalysisFile(file)



def main():
    parse = ParseFulltext()
    parse.select()


if __name__ == '__main__':
    main()
