import scrapy
import time
import re
from scrapy.http import Request
from scrapy.http import FormRequest
from mySpider.items import MyspiderItem
import locale


class ScienceSpider(scrapy.Spider):
    name = 'science'
    allowed_domains = ['webofknowledge.com']
    start_urls = ['http://www.webofknowledge.com/']
    timestamp = str(time.strftime('%Y-%m-%d-%H.%M.%S', time.localtime(time.time())))
    agent = 'Mozilla / 5.0(Windows NT 10.0; Win64; x64) AppleWebKit / 537.36(KHTML, like Gecko) Chrome / 84.0.4147.105 Safari / 537.36'
    # 提取URL中的SID和QID所需要的正则表达式
    sid_pattern = r'SID=(\w+)&'
    qid_pattern = r'qid=(\d+)&'


    def parse(self, response):
        i = int(time.time())
        # 获取SID
        pattern = re.compile(self.sid_pattern)
        result = re.search(pattern, response.url)
        if result is not None:
            sid = result.group(1)
            print('提取得到SID：', result.group(1))
        else:
            print('SID提取失败')
            sid = None
            exit(-1)
        sc_list = ['TS', 'TI', 'AU', 'AI', 'GP', 'ED', 'SO', 'DO', 'PY', 'AD']
        while True:
            sc = input('请输入查询类型TS= 主题，TI= 标题，AU= 作者 [索引]，AI= 作者识别号，GP= 团体作者 [索引]，'
                       'ED= 编者，SO= 出版物名称 [索引]，DO= DOI，PY= 出版年，AD= 地址 ')
            if any(name in sc for name in sc_list):
                break
        ts = input('请输入关键字')

        startYear = input('请输入起始年份')

        endYear = input('请输入终止年份')

        # 提交post高级搜索请求
        General_Search_url = 'https://apps.webofknowledge.com/UA_GeneralSearch.do'

        query_form = {
            "fieldCount": "1",
            "action": "search",
            "product": "UA",
            "search_mode": "GeneralSearch",
            "SID": sid,
            "max_field_count": "25",
            "max_field_notice": "注意: 无法添加另一字段。",
            "input_invalid_notice": "检索错误: 请输入检索词。",
            "exp_notice": "检索错误: 专利检索词可以在多个家族中找到(",
            "input_invalid_notice_limits": "< br / > 注意: 滚动框中显示的字段必须至少与一个其他检索字段相组配。",
            "sa_params": "UA||" + str(sid) + "|https://apps.webofknowledge.com:443|'",
            "formUpdated": "true",
            "value(input1)": ts,
            "value(select1)": sc,
            "value(hidInput1)": "",
            "limitStatus": "expanded",
            "ss_lemmatization": "On",
            "ss_spellchecking": "Suggest",
            "SinceLastVisit_UTC": "",
            "SinceLastVisit_DATE": "",
            "period": "Range Selection",
            "range": "ALL",
            "startYear": startYear,
            "endYear": endYear,
            "editions": "WOS.SSCI",
            "editions": "WOS.SCI",
            "editions": "WOS.IC",
            "editions": "WOS.ISTP",
            "editions": "WOS.CCR",
            "collections": "WOS",
            "editions": "DIIDW.EDerwent",
            "editions": "DIIDW.CDerwent",
            "editions": "DIIDW.MDerwent",
            "collections": "DIIDW",
            "editions": "KJD.KJD",
            "collections": "KJD",
            "editions": "MEDLINE.MEDLINE",
            "collections": "MEDLINE",
            "editions": "RSCI.RSCI",
            "collections": "RSCI",
            "editions": "SCIELO.SCIELO",
            "collections": "SCIELO",
            "update_back2search_link_param": "yes",
            "ssStatus": "display:none",
            "ss_showsuggestions": "ON",
            "ss_query_language": "auto",
            "ss_numDefaultGeneralSearchFields": "1",
            "rs_sort_by": "PY.D;LD.D;SO.A;VL.D;PG.A;AU.A"
        }
        print(General_Search_url)
        return FormRequest(General_Search_url, formdata=query_form, method='POST',
                           callback=self.parse_first, meta=dict(sid=sid, sc=sc, ts=ts))

    def parse_first(self, response):
        sid = response.meta['sid']
        sc = response.meta['sc']
        ts = response.meta['ts']

        locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')

        # 获取总文献数
        count = response.xpath('//*[@id="hitCount.top"]/text()').extract()[0]
        #count = locale.atoi(count)
        #if count > 100000:
        #    count = 100000
        #    print('文献数量超过上限，只爬取最新10万篇')

        first_url = response.xpath('//*[@id="RECORD_1"]/div[3]/div/div[1]/div/a/@href').extract()[0]
        # print(first_url)
        first_url = 'https://apps.webofknowledge.com' + str(first_url)
        # print(first_url)
        return Request(first_url, callback=self.parse_get,
                       meta=dict(sid=sid, sc=sc, ts=ts, count=count))

    def parse_get(self, response):
        sid = response.meta['sid']
        sc = response.meta['sc']
        ts = response.meta['ts']
        count = response.meta['count']
        str1 = ''
        str2 = '；'
        item = MyspiderItem()

        # 循环爬取
        for i in range(0,1):

            # 获取数据
            title = response.xpath('//*[@id="records_form"]/div/div/div/div[1]/div/div[1]/item/text()[1]')
            #title = str1.join(title)
            author = response.xpath('//*[@id="records_form"]/div/div/div/div[1]/div/div[2]/p/a/text()').extract()
            #author = str1.join(author)
            time = response.xpath('//*[@id="records_form"]/div/div/div/div[1]/div/div[3]/p[5]/value/text()').extract()
            keywords = response.xpath('//*[@id="records_form"]/div/div/div/div[1]/div/div[5]/p/a/text()').extract()
            #keywords = str2.join(keywords)
            if keywords == None:
                keywords = response.xpath('//*[@id="records_form"]/div/div/div/div[1]/div/div[5]/p/text()').extract()
                #keywords = str1.join(keywords)
            next_url = response.xpath('//*[@id="paginationForm"]/span/a[2]/@href').extract()[0]
            next_url = 'https://apps.webofknowledge.com' + str(next_url)
            print(count)
            print(title)
            print(author)
            print(time)
            print(keywords)
            print(next_url)
            # 跳转下一篇
            #next_page = Request(next_url, callback=None)
            #return response()



        # 获取下一页链接

        #print(next_url)
        # while True:

        #    if next_url=='':
        #        break

        pass
