import scrapy
import re
from CrawlNews.items import CrawlnewsItem


class MostGovSpider(scrapy.Spider):
    name = 'most_gov'
    # allowed_domains = ['http://www.most.gov.cn']
    start_urls = ['http://www.most.gov.cn/dfkj/',  # 地方科技
                  'http://www.most.gov.cn/kjbgz/', # 科技部工作
                 ]

    keywords = ['产业协同创新共同体','协同创新', '创新助力工程', '创新助力']
    afterDate = '201800'

    def parse(self, response):
        if response.url == self.start_urls[1]:
            # 为了确定总页数，总页数在js中，使用了正则表达式来搜索，必须得能成功找到
            text = response.xpath("//table/tr/td/script[@language='JavaScript']/text()").extract_first()
            pageCnt = re.search(r"countPage = (\d+)", text).group(1)
            for i in range(1, int(pageCnt)):
                nextPage = response.url + 'index_%d.htm' % i
                # print(nextPage)
                yield scrapy.Request(url=nextPage, callback=self.parsePage)
        else:
            # return scrapy.Request(url='http://www.most.gov.cn/')  # for debug
            for href in response.xpath('//tr/td/table//div/a/@href').extract():  #href 格式为/dfkj/bj/等
                try:
                    href = self.start_urls[0] + href.split('/', 2)[-1]
                    # href = 'http://www.most.gov.cn/dfkj/xj/'  # for debug
                    yield scrapy.Request(url=href, callback=self.parseRegion)
                except:
                    continue

    def parseRegion(self, response):
        # deal with news in first page
        for newsUrl in response.css('.news1').css('a::attr(href)').extract():
            # 过滤新闻时间，只要2018年之后
            if newsUrl.split('/')[2].isnumeric() and newsUrl.split('/')[2] > self.afterDate:
                newsUrl = response.url + newsUrl
                # print(newsUrl)
                yield scrapy.Request(url=newsUrl, callback=self.parseNews)

        # 为了确定总页数，总页数在js中，使用了正则表达式来搜索，必须得能成功找到
        text = response.xpath("//table/tr/td/script[@language='JavaScript']/text()").extract_first()
        pageCnt = re.search(r"countPage = (\d+)", text).group(1)
        for i in range(1, int(pageCnt)):
            nextPage = response.url + 'index_%d.htm' % i
            # print(nextPage)
            yield scrapy.Request(url=nextPage, callback=self.parsePage)

    def parsePage(self, response):
        for newsUrl in response.css('.STYLE30').css('a::attr(href)').extract():
            # newsUrl in /kjbgz/ 形如 "./202103/t20210322_161865.htm"
            # 过滤新闻时间，只要2018年之后
            if newsUrl.split('/')[1].isnumeric() and newsUrl.split('/')[1] > self.afterDate:
                newsUrl = response.url[:response.url.rindex('/') + 1] + newsUrl
                # print(newsUrl)
                yield scrapy.Request(url=newsUrl, callback=self.parseNews)

        for newsUrl in response.css('.news1').css('a::attr(href)').extract():
            # newsUrl in /dfkj/ 形如 "./zxdt/201905/t20190521_146698.htm"
            # 过滤新闻时间，只要2018年之后
            if newsUrl.split('/')[2].isnumeric() and newsUrl.split('/')[2] > self.afterDate:
                newsUrl = response.url[:response.url.rindex('/')+1] + newsUrl
                # print(newsUrl)
                yield scrapy.Request(url=newsUrl, callback=self.parseNews)

    def parseNews(self, response):
        # e.g.: http://www.most.gov.cn/dfkj/gz/tpxw/202104/t20210430_162675.htm
        # 有时候爬到的url指向个文件,如pdf或doc，这样情况，反正也没法检索不如直接跳过处理
        if '.htm' not in response.url:
            return
        separator = response.css('#Zoom meta').extract()
        # print(separator) # ['<meta name="ContentStart">', '<meta name="ContentEnd">']
        content = response.xpath('//div[@id="Zoom"]//p/text()').extract()
        if content:
            for i in range(len(content)):
                content[i] = content[i].strip()
            content = '\n'.join(content)
        else:
            content = response.css('#Zoom').extract()[0].split(separator[0])[1].split(separator[1])[0]  # X_X
            content = content.strip()
        # print(response.url)
        # print(content)

        for keyword in self.keywords:
            if keyword in content:
                item = CrawlnewsItem()
                item['newsTitle'] = response.css('title::text').extract()[0]
                item['newsSource'] = response.css('meta[name=source]::attr(content)').extract()[0]
                item['newsDate'] = response.css('meta[name=pubdate]::attr(content)').extract()[0]
                item['newsLink'] = response.url
                item['newsCategory'] = keyword
                # item['newsLocation'] = re.search(r'省', item['newsSource']).group(0)
                item['newsLocation'] = item['newsSource'][:2]
                item['newsContent'] = content
                item['newsGroup'] = ''
                yield item
