import scrapy
import lxml
from CrawlNews.items import CrawlnewsItem

class CastOrgSpider(scrapy.Spider):
    name = 'cast_org'
    allowed_domains = ['www.cast.org.cn']
    start_urls = ['https://www.cast.org.cn/col/col79/', # 头条新闻
                  # 'https://www.cast.org.cn/col/col80/index.html', # 科协要闻
                  ]

    keywords = ['产业协同创新共同体', '创新助力工程', '创新助力', '产业协同创新' ]
    afterDate = '201800'

    def parse(self, response):
        # print(response.body)
        xmldata = response.css(".cont-list script").extract()
        xml = lxml.etree.HTML(xmldata, lxml.etree.HTMLParser())
        print(xml)

        for newsUrl in xml.xpath("//div[@class='list-title']//a/@href").extract():
            yield scrapy.Request(url=newsUrl, callback=self.parseNews)

        for i in range(2, 70):
            nextPage = self.start_urls[0] + '?uid=335&pageNum=%d' % i
            print(nextPage)
            yield scrapy.Request(url=nextPage, callback=self.parse)


    def parseNews(self, response):
        item = CrawlnewsItem()
        item['newsTitle'] = response.css('meta[name=ArticleTitle]::attr(content)').extract()[0]


        pass
