import sys
import os
import csv
from PyQt5.QtCore import QThread, pyqtSignal
from threading import Thread
from scihub import SciHub
import allcodes
import requests
from bs4 import BeautifulSoup
from queue import Queue
import selenium
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
import json
import time
import re

# 数据库链接
SCHOLARS_BASE_URL = \
    [
        'https://pubmed.ncbi.nlm.nih.gov/',
        'https://www.sciencedirect.com/search',
        'https://ieeexplore.ieee.org/search/searchresult.jsp'
    ]

# 不同数据库的起始页码和跳转步长
PAGE = [1, 0, 1]
PAGE_STEP = [1, 10, 1]


# 搜索线程
class SearchThread(QThread):
    update_date = pyqtSignal(str)

    # search_url, keywords, num, beg_year, ed_year
    def __init__(self, SearchInfo):
        super(SearchThread, self).__init__()
        self.search_url = SearchInfo[0]         # 搜索数据库
        self.keywords = SearchInfo[1]           # 关键词
        self.num = SearchInfo[2]                # 搜索数量
        self.beg_year = SearchInfo[3]           # 起始年份
        self.ed_year = SearchInfo[4]            # 终止年份
        self.save_path = ''                     # 保存路径
        self.sh = SciHub()                      # 创建SCIHUB对象
        self.have_search = 0                    # 已搜索数量
        self.stop_flag = False                  # 停止线程标志位
        self.search_only = True                 # 仅搜索标志位
        self.headers = {}
        self.searchDic = {}

    def run(self):
        self.buildSavePath()
        self.searchPaper(self.search_url, self.keywords, self.beg_year, self.ed_year)

    # 搜索论文主程序
    def searchPaper(self, search_url, keywords, beg_year, ed_year):
        self.update_date.emit("0000--> 开始搜索...\n")

        # 设置post请求IEEE的header
        if self.search_url == 2:
            self.headers = {
                'Accept': 'application/json,text/plain,*/*',
                'Accept-Encoding': 'gzip,deflate,br',
                'Accept-Language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                'Connection': 'keep-alive',
                'Content-Length': '122',
                'Content-Type': 'application/json',
                'Referer': 'https://ieeexplore.ieee.org/search/searchresult.jsp?newsearch=true&queryText=mechanical',
                'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0'}

        # 获取网址参数字典并访问网址
        self.searchDic = self.get_param(search_url, keywords, beg_year, ed_year, PAGE[search_url])
        webResults = ''
        # 搜索IEEE时，使用web_IEEE函数内置requests.post请求
        if self.search_url != 2:
            webResults = self.sh.search(SCHOLARS_BASE_URL[search_url], self.searchDic)

        if self.stop_flag:  # 按下停止 返回
            return

        # 获取搜索到的论文总数，并进行相应处理
        retNum = self.processWebInfo(search_url, webResults, True)
        print("retNum:{}".format(retNum))
        if retNum == 0:
            self.totalPaper = 0
            self.update_date.emit("0000--> 未搜索到相关论文.\n")
            return
        elif retNum == -1:  # 状态码非200
            return
        elif self.num > retNum:
            self.totalPaper = retNum
            self.update_date.emit("0000--> 提示：共搜索论文 {} 篇".format(self.totalPaper))
            self.num = self.totalPaper


        # 保存论文信息
        page = PAGE[search_url]
        while self.have_search < self.num:
            if self.stop_flag:                  # 按下停止 返回
                return
            self.searchDic = self.get_param(search_url, keywords, beg_year, ed_year, page)
            results = ''
            if self.search_url != 2:
                results = self.sh.search(SCHOLARS_BASE_URL[search_url], self.searchDic)
            self.have_search = self.processWebInfo(search_url, results, False)
            if self.have_search is None:
                self.update_date.emit("1002停止搜索！")
                return
            page += PAGE_STEP[search_url]  # 下一页
            print("have_down:{}".format(self.have_search))

        self.update_date.emit("1001搜索完成！")


    # 网页源码处理
    def processWebInfo(self, search_url, webInfo, firstTime):

        if self.search_url != 2:
            # 状态码判断
            sta = "Error_Code:" + str(webInfo.status_code) + ", " + allcodes.get_status(webInfo.status_code)

            if not webInfo.status_code == requests.codes.ok:
                self.update_date.emit('0000--> 数据库访问失败')
                self.update_date.emit('0000     ' + sta + ' \n')
                return -1

            # 利用beautifulsoup4获得html数据 保存到s中
            s = BeautifulSoup(webInfo.content, 'lxml')



        # 处理不同数据库网页数据
        webData = None
        if search_url == 0:
            webData = self.web_pubmed(s, firstTime)
        elif search_url == 1:
            webData = self.web_scienceDirect(s, firstTime)
        elif search_url == 2:
            #{'queryText': keyword, 'ranges': datetemp, 'pageNumber': page}
            data = {'newsearch': 'true',
                    'queryText': self.searchDic['queryText'],
                    'pageNumber': self.searchDic['pageNumber'],
                    'publicationYear': self.searchDic['ranges'],
                    'rowsPerPage': 100
                    }

            url = 'https://ieeexplore.ieee.org/rest/search'
            webData = self.web_IEEE(search_url, firstTime, url, data)

        return webData

    # 处理pubmed网页数据
    def web_pubmed(self, s, firstTime):
        # True: 获取搜索到总的篇数， False: 获取论文信息
        if firstTime:
            totalPaper = 0
            amount = s.find('div', class_="results-amount")
            if amount.find('span', class_="value"):
                r = ''
                for i in amount.find('span', class_="value").text:
                    if '0' <= i <= '9':
                        r += i
                totalPaper = int(r)
            return totalPaper
        else:
            papers = s.find_all('div', class_="docsum-content")  # 从html数据中找出论文列表数据
            print("论文列表数量：" + str(len(papers)))

            # 获取论文列表失败
            if not papers:
                self.update_date.emit('0000--> 获取论文列表失败，请重试！ \n')
                return -1

            # 遍历论文列表，获取论文信息
            for paper in papers:
                # 停止搜索
                if self.stop_flag:
                    return

                pmid = 0
                if paper.find('span', class_="docsum-pmid"):  # 论文编号
                    pmid = paper.find('span', class_="docsum-pmid").text
                print("pmid:{}".format(pmid))

                PaperPage = self.sh.sess.get(f"https://pubmed.ncbi.nlm.nih.gov/{pmid}/", verify=False)  # 打开文章网址
                print("PaperPage")
                PageInfo = BeautifulSoup(PaperPage.content, 'lxml')

                print("beautifulsoup")

                # 判断文章类型
                if not PageInfo.find('div', class_="article-citation"):
                    continue
                print("PageInfo.find")

                # 论文名
                paperName = ''
                if PageInfo.find('h1', class_="heading-title"):
                    paperName = PageInfo.find('h1', class_="heading-title").text
                    paperName = self._replace_word(paperName)
                    paperName = ' '.join(paperName.split())
                print("paperName:{}".format(paperName))

                # 摘要
                abstract = "No abstract available"
                if PageInfo.find('div', class_="abstract-content selected"):
                    abstract = PageInfo.find('div', class_="abstract-content selected").text
                    abstract = self._replace_word(abstract)
                    abstract = ' '.join(abstract.split())

                # 期刊
                ArticleInfo = PageInfo.find('div', class_="article-citation")
                journal = ArticleInfo.find('button', class_="journal-actions-trigger trigger").text
                journal = self._replace_word(journal)
                journal = ' '.join(journal.split())
                print("journal:{}".format(journal))

                doi = pmid
                # doi
                if ArticleInfo.find('span', class_="citation-doi"):
                    doi = ArticleInfo.find('span', class_="citation-doi").text
                    doi = self._replace_word(doi)
                    doi = ' '.join(doi.split())[4:-1]
                print("doi:{}".format(doi))

                # 出版日期
                if ArticleInfo.find('span', class_="cit"):
                    date_info = ArticleInfo.find('span', class_="cit").text
                    date_info = date_info[0:date_info.find(';')]
                else:
                    date_info = 0000
                print("date_info:{}".format(date_info))

                # 整合数据并保存到csv文件中
                writeList = []
                datas = []
                datas.append(doi)
                datas.append(paperName)
                datas.append(journal)
                datas.append(date_info)
                datas.append(abstract)
                datas.append(f"https://pubmed.ncbi.nlm.nih.gov/{pmid}/")
                writeList.append(datas)

                print(self.save_path)
                with open(self.save_path, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerows(writeList)
                    f.close()

                self.have_search += 1
                self.update_date.emit('1003' + str(self.have_search))       # 已搜索数量
                # 边搜边下
                if not self.search_only:
                    self.update_date.emit('2001' + doi + '|' + paperName)
                print("已搜索{}篇，还剩{}篇……\n".format(self.have_search, self.num - self.have_search))
                if self.have_search >= self.num:
                    return self.have_search

            return self.have_search

    # 处理scienceDirect网页数据
    def web_scienceDirect(self, s, firstTime):
        # True: 获取搜索到总的篇数， False: 获取论文信息
        if firstTime:
            totalPaper = 0
            if s.find('span', class_="search-body-results-text"):
                amount = s.find('span', class_="search-body-results-text").text
                r = ''
                for i in amount.split(' ')[0]:
                    if '0' <= i <= '9':
                        r += i
                totalPaper = int(r)
            return totalPaper
        else:
            papers = s.find_all('div', class_="result-item-content")  # 从html数据中找出论文列表数据
            print("论文列表数量：" + str(len(papers)))

            # 获取论文列表失败
            if not papers:
                self.update_date.emit('0000--> 获取论文列表失败，请重试！ \n')
                return -1

            # 遍历论文列表，获取论文信息
            for paper in papers:
                # 停止搜索
                if self.stop_flag:
                    return

                # 论文名
                paperName = ''
                if paper.find('h2'):
                    link = paper.find('h2')
                    if link.find('a'):
                        source = link.find('a')['href']  # 论文网址
                    else:
                        continue
                    paperName = self._replace_word(link.text)
                    paperName = ' '.join(paperName.split())
                print("paperName:"+paperName)

                # 期刊名 出版日期
                journal_info = ''
                date_info = ''
                if paper.find_all('span', class_="preceding-comma"):
                    info = paper.find_all('span', class_="preceding-comma")
                    journal_info = info[0].text
                    date_info = info[1].text
                print("journal_info:" + journal_info)
                print("date_info:" + date_info)

                # 摘要  当前网页需要点击下拉键才能显示摘要，不会。所以直接打开新网址获取
                PaperPage = self.sh.sess.get(f"https://www.sciencedirect.com{source}", verify=False)  # 打开文章网址
                PageCt = BeautifulSoup(PaperPage.content, 'lxml')
                # doi
                doi = 'none'
                print(doi)
                if PageCt.find('a', class_="doi"):
                    doiLink = PageCt.find('a', class_="doi")  # 定位到DOI
                    print(doiLink)
                    doi = doiLink.text[16:]  # 获取DOI号
                print("doi:" + doi)

                # 摘要
                abstract = "No Abstract!"
                if PageCt.find('div', class_="abstract author"):
                    abs_info = PageCt.find('div', class_="abstract author")  # 定位到摘要部分
                    abstemp = ''
                    if abs_info:
                        abs_h2 = abs_info.find_all('h2')    # 判断是否有 Abstract
                        abs_h3 = abs_info.find_all('h3')    # 判断是否有 Objectives，Methods and results，Conclusions
                        abs_h3_p = abs_info.find_all('p')   # 对应上面h3的每一节内容
                        if (abs_h2 and abs_h3) or abs_h3:   # 研究性文章，有Objectives，Methods and results，Conclusions
                            for i in range(0, len(abs_h3)):
                                abstemp = abstemp + abs_h3[i].text.replace('\n', ' ').replace('\r', ' ') + \
                                           "\n" + abs_h3_p[i].text.replace('\n', ' ').replace('\r', ' ') + "\n"
                            abstract = abstemp
                        elif abs_h2:  # 普通文章，只有摘要的时候
                            abstract = abs_info.find('p').text.replace('\n', ' ').replace('\r', ' ')
                        else:
                            continue

                writeList = []
                datas = []
                datas.append(doi)
                datas.append(paperName)
                datas.append(journal_info)
                datas.append(date_info)
                datas.append(abstract)
                datas.append(f"https://www.sciencedirect.com{source}")
                writeList.append(datas)

                with open(self.save_path, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerows(writeList)
                    f.close()

                self.have_search += 1
                self.update_date.emit('1003' + str(self.have_search))  # 已搜索数量
                # 边搜边下
                if not self.search_only:
                    self.update_date.emit('2001' + doi + '|' + paperName)
                print("已搜索{}篇，还剩{}篇……\n".format(self.have_search, self.num - self.have_search))
                if self.have_search >= self.num:
                    return self.have_search

            return self.have_search

    # 处理IEEE网页数据
    def web_IEEE1(self, search_url, firstTime, browser, url):
        # True: 获取搜索到总的篇数， False: 获取论文信息

        if firstTime:
            browser.get(url)
            browser.implicitly_wait(10)       # 隐式等待
            WebDriverWait(browser, 5, 0.5).until(lambda x: x.find_elements_by_class_name('List-results-items'))
            div = browser.find_element_by_xpath("//*[@id=\"xplMainContent\"]/div[1]/div["
                                                "2]/xpl-search-dashboard/section/div/div[1]/span[1]/span[2]")
            totalPaper = 0
            if div:
                all_page = div.text
                totalPaper = int(re.sub(',', '', all_page))
            return totalPaper
        else:
            papers = []
            res = browser.find_elements_by_class_name('icon-html')
            for i in res:
                url = i.get_attribute('href')
                papers.append(url)
            print("论文列表数量：" + str(len(papers)))

            # 获取论文列表失败
            if not papers:
                self.update_date.emit('0000--> 获取论文列表失败，请重试！ \n')
                return -1

            # 遍历论文列表，获取论文信息
            for source in papers:
                # 停止搜索
                if self.stop_flag:
                    return
                print(source)
                browser.get(source)
                browser.implicitly_wait(10)  # 隐式等待
                WebDriverWait(browser, 5, 0.5).until(lambda x: x.find_elements_by_class_name('document-main-left'
                                                                                             '-trail-content'))

                # 论文名
                try:
                    paper_name = browser.find_element_by_css_selector('h1.document-title')
                    paper_name = self._replace_word(paper_name.text)
                except selenium.common.exceptions.NoSuchElementException as e:
                    paper_name = "No Paper Name!"
                print("paper_name:", paper_name)

                # 期刊名
                try:
                    journal_name_temp = browser.find_element_by_css_selector('.u-pb-1.stats-document-abstract'
                                                                             '-publishedIn')
                    journal_name_temp = journal_name_temp.text
                    journal_name = journal_name_temp.splic(':')[1]
                except selenium.common.exceptions.NoSuchElementException as e:
                    journal_name = "No Journal Name!"
                print("journal_name:", journal_name)

                # 出版/会议日期
                try:
                    paper_date_temp = browser.find_element_by_css_selector('.u-pb-1.doc-abstract-confdate')
                    paper_date_temp = paper_date_temp.text
                    paper_date = paper_date_temp.splic(':')[1]
                except selenium.common.exceptions.NoSuchElementException as e:
                    paper_date_temp = browser.find_element_by_css_selector('.u-pb-1.doc-abstract-pubdate')
                    paper_date_temp = paper_date_temp.text
                    paper_date = paper_date_temp.splic(':')[1]
                print("paper_date:", paper_date)

                # 摘要
                try:
                    paper_abstract = browser.find_element_by_xpath("//*[@id=\"LayoutWrapper\"]/div/div/div/div["
                                                               "3]/div/xpl-root/div/xpl-document-details/div/div["
                                                               "1]/div/div[2]/section/div["
                                                               "2]/div/xpl-document-abstract/section/div[3]/div["
                                                               "1]/div/div/div")
                    paper_abstract = self._replace_word(paper_abstract.text)
                except selenium.common.exceptions.NoSuchElementException as e:
                    paper_abstract = 'No abstract!'

                # doi
                try:
                    paper_doi = browser.find_element_by_css_selector('.u-pb-1.stats-document-abstract-doi')
                    paper_doi = paper_doi.text[4:]
                except selenium.common.exceptions.NoSuchElementException as e:
                    paper_doi = ''
                print("paper_doi:", paper_doi)

                writeList = []
                datas = []
                datas.append(paper_doi)
                datas.append(paper_name)
                datas.append(journal_name)
                datas.append(paper_date)
                datas.append(paper_abstract)
                datas.append(f"https://ieeexplore.ieee.org{source}")
                writeList.append(datas)

                with open(self.save_path, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerows(writeList)
                    f.close()

                self.have_search += 1
                self.update_date.emit('1003' + str(self.have_search))  # 已搜索数量
                # 边搜边下
                if not self.search_only:
                    self.update_date.emit('2001' + paper_doi + '|' + paper_name)
                print("已搜索{}篇，还剩{}篇……\n".format(self.have_search, self.num - self.have_search))
                if self.have_search >= self.num:
                    return self.have_search

            return self.have_search


    def web_IEEE(self, search_url, firstTime, url, data):
        # True: 获取搜索到总的篇数， False: 获取论文信息
        IEEE_response = requests.post(url=url, data=json.dumps(data), headers=self.headers, verify=False)
        response_text = IEEE_response.text
        if firstTime:
            print("开始获取论文总数......")
            time.sleep(2)
            totalPaper = 0
            if 'totalRecords' in json.loads(response_text):
                totalPaper = json.loads(response_text)['totalRecords']
            return totalPaper
        else:
            print("开始爬取论文列表......")
            papers = json.loads(response_text)['records']
            print("论文列表数量：" + str(len(papers)))

            # 获取论文列表失败
            if not papers:
                self.update_date.emit('0000--> 获取论文列表失败，请重试！ \n')
                return -1

            # 遍历论文列表，获取论文信息
            for paper in papers:
                print(type(paper))
                # 停止搜索
                if self.stop_flag:
                    return

                # 论文名
                paper_name = "No Paper Name!"
                if 'articleTitle' in paper:
                    paper_name = paper['articleTitle']
                print("paper_name:", paper_name)

                # 期刊名
                journal_name = "No Journal Name!"
                if 'publicationTitle' in paper:
                    journal_name = paper['publicationTitle']
                print("journal_name:", journal_name)

                # 出版/会议日期
                if 'publicationDate' in paper:
                    paper_date = paper['publicationDate']
                else:
                    print("no date")
                print("paper_date:", paper_date)

                # 摘要
                paper_abstract = 'No abstract!'
                if 'abstract' in paper:
                    paper_abstract = paper['abstract']

                # doi
                paper_doi = ''
                if 'doi' in paper:
                    paper_doi = paper['doi']
                print("paper_doi:", paper_doi)

                # 论文编号
                paper_num = ''
                if 'articleNumber' in paper:
                    paper_num = paper['articleNumber']
                print("paper_num:", paper_num)
                print(f"https://ieeexplore.ieee.org/document/{paper_num}/")

                writeList = []
                datas = []
                datas.append(paper_doi)
                datas.append(paper_name)
                datas.append(journal_name)
                datas.append(paper_date)
                datas.append(paper_abstract)
                datas.append(f"https://ieeexplore.ieee.org/document/{paper_num}/")
                writeList.append(datas)

                with open(self.save_path, 'a', newline='', encoding='utf-8') as f:
                    writer = csv.writer(f)
                    writer.writerows(writeList)
                    f.close()

                self.have_search += 1
                self.update_date.emit('1003' + str(self.have_search))  # 已搜索数量
                time.sleep(0.5)
                # 边搜边下
                if not self.search_only:
                    self.update_date.emit('2001' + paper_doi + '|' + paper_name)
                print("已搜索{}篇，还剩{}篇……\n".format(self.have_search, self.num - self.have_search))
                if self.have_search >= self.num:
                    return self.have_search

            return self.have_search
    # 组合不同数据库的网址参数字典
    def get_param(self, url_flag, keyword, b_year, e_year, page):
        searchDic = {}
        if url_flag == 0:               # pubmed
            datetemp = 'dates.' + str(b_year) + '-' + str(e_year)
            searchDic = {'term': keyword, 'filter': datetemp, 'page': page}
        elif url_flag == 1:             # ScienceDirect
            datetemp = str(b_year) + '-' + str(e_year)
            searchDic = {'qs': keyword, 'start': page, 'date': datetemp}
        elif url_flag == 2:
            datetemp = str(b_year) + '_' + str(e_year)
            searchDic ={'queryText': keyword, 'ranges': datetemp, 'pageNumber': page}

        return searchDic

    # 特殊字符处理
    def _replace_word(self, s):
        res = ""
        for i in range(len(s)):
            if ('0' <= s[i] <= '9') or \
                    ('A' <= s[i] <= 'Z') or ('a' <= s[i] <= 'z') or \
                    s[i] == ' ' or s[i] == '-' or  s[i] == '.' or \
                    s[i] == '/' or s[i] == '_':
                res += s[i]
            else:
                res += ' '
        return res

    # 停止当前线程
    def stop_thread(self, flag):
        if flag:
            self.stop_flag = True

    # 是否仅搜索
    def search_status(self, flag):
        if flag:
            self.search_only = True
        else:
            self.search_only = False

    # 获取UI信息
    def get_ui_info(self, path):
        self.save_path = path

    # 创建.CSV文件保存论文信息
    def buildSavePath(self):
        self.save_path = self.save_path + "\\paper_info.csv"
        mode = 'w'
        datas = []
        head = ["DOI/pmid", "title", "journal", "date", "Abstract", "url"]
        datas.append(head)

        with open(self.save_path, mode, newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerows(datas)
            f.close()

    # 无界面浏览器设置
    def brower_int(self):
        # 使用headless无界面浏览器模式
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_argument('--headless')  # 增加无界面选项
        chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题
        return chrome_options






# 获取IEEE某文章主页上的信息（主要是为了获得完整版的摘要）
# gheaders = {
#     'Referer': 'https://ieeexplore.ieee.org/search/searchresult.jsp?newsearch=true&queryText=support',
#     'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0',
#     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,'
#               'application/signed-exchange;v=b3;q=0.9',
#     'Accept-Encoding': 'gzip, deflate, br',
#     'Accept-Language': 'zh-CN,zh;q=0.9',
#     'Connection': 'keep-alive'
# }
# url = 'https://ieeexplore.ieee.org/document/4537113'
# IEEE_response = requests.get(url=url, headers=gheaders)
# soup = BeautifulSoup(IEEE_response.text, 'lxml')
#
# pattern = re.compile(r'xplGlobal.document.metadata=(.*?);', re.MULTILINE | re.DOTALL)
# script = soup.find("script", text=pattern)
# res_dic = pattern.search(script.string).group(1)
# print(res_dic)
# json_data = json.loads(res_dic)
# print(json_data['userInfo'])