from CrawlNews.items import CrawlnewsItem
import fake_useragent
import scrapy
import json
import re


class MostGovApiSpider(scrapy.Spider):
    name = 'most_gov_api'
    # allowed_domains = ['www.most.gov.cn']
    start_urls = ['http://www.most.gov.cn/']
    keywords = ['产业协同创新共同体', '协同创新', '创新助力工程', '创新助力']
    afterDate = '2018.01.01'
    # 预先分析出的当前科技部网站的信息搜索接口
    url = 'http://168.160.11.196/search/api/fulltext/searchRecently'
    # url = 'http://168.160.11.196/search/api/fulltext/searchCondition'

    header = {
        'User-Agent': fake_useragent.UserAgent().random,
        'Connection': 'keep-alive',
        'Accept': '*/*',
        'Content-Type': 'application/x-www-form-urlencoded'
    }

    def parse(self, response):
        if response.status != 200:
            print('oops!!!')
            return

        for keyword in self.keywords:
            payload = u'searchword={}&group=全站&currentPage=1'.format(keyword)
            # scrapy会对中文进行utf-8编码，将编码后的原生数据放到消息体中
            # e.g.: 'searchword=%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0&group=%E5%85%A8%E7%AB%99'
            yield scrapy.Request(url=self.url, callback=self.parseItem, method='POST',
                                 body=payload, headers=self.header)

    def parseItem(self, response):
        payload = response.request.body.decode()
        keyword = re.search('searchword=(.*?)&', payload).group(1)
        currentPage = int(re.search('currentPage=(\d+)', payload).group(1))
        jsonPage = json.loads(response.text.strip())

        result = jsonPage['result']
        totalPages = result['totalPages']
        totalRecords = result['totalRecords']
        print(f'关于 {keyword} 的信息，共有 {totalPages} 页，共有 {totalRecords} 条, 当前位于 {currentPage} 页')
        firstNewsDate = result['list'][0]['reltime'] if result['list'] else '1999.01.01'
        if firstNewsDate > self.afterDate and currentPage < totalPages:
            payload = re.sub('currentPage=(\d+)', 'currentPage=%d' % (currentPage + 1), payload)
            yield scrapy.Request(url=self.url, callback=self.parseItem, method='POST', body=payload, headers=self.header)
        for piece in result['list']:
            item = CrawlnewsItem()
            item['newsTitle'] = piece['title']
            item['newsDate'] = piece['reltime']
            item['newsTime'] = piece['pubtime']
            item['newsLink'] = piece['puburl']
            item['newsSource'] = piece['source']
            item['newsContent'] = piece['content'].strip()
            item['newsCategory'] = keyword
            item['newsGroup'] = piece['groupname']
            item['newsLocation'] = 'Unknown'
            if item['newsDate'] > self.afterDate:
                yield item
