from scrapy.spiders import CrawlSpider
import scrapy
# from example.items import SeemeispiderItem
from search_news.items import NewsItem


class MySpider(CrawlSpider):
    name = 'cecSpider'
    start_urls = ["http://cec.jmu.edu.cn/list.jsp?urltype=tree.TreeTempUrl&wbtreeid=1044"]

    def parse(self, response):
        list = response.xpath('//table[@class="winstyle124907"]//tr')
        print(list)
        for i in list:
            item = NewsItem()
            url = i.xpath('.//a/@href').extract_first()
            item['title'] = i.xpath('.//a/@title').extract_first()
            item['pubdate'] = i.xpath('.//td[@class="timestyle124907"]/text()').extract_first()
            item['url'] = 'http://cec.jmu.edu.cn/' + url
            item['college'] = "计算机工程学院"
            # print(item)
            yield item


        next_page = response.xpath("//a[@class='Next'][1]/@href").extract_first()
        print(next_page)
        if next_page is not None:
            next_page = response.urljoin(next_page)
            yield scrapy.Request(next_page, callback=self.parse, dont_filter=True)