# -*- coding: utf-8 -*-
import scrapy

from dra100_scrapy.items import ScrapyItem
from scrapy.http import Request
from scrapy.utils.project import get_project_settings

class SciencenetSpider(scrapy.Spider):
    name = "sciencenet"
    site_name = "科学网"
    url = "http://news.sciencenet.cn"
    allowed_domains = ["sciencenet.cn"]
    start_urls = ['http://news.sciencenet.cn/morenews-M-1.aspx']

    # 列表
    def parse(self, response):
        settings = get_project_settings()

        i = 0
        link_list = response.xpath('//table[@id="DataGrid1"]//table//a/@href')
        for link in link_list:
            temp_link = self.url + link.extract()
            if i == 0: 
                log_file_name = settings['SPIDER_LOG_STORE'] + self.name
                f = open(log_file_name, 'r+')
                log_link = f.read()
                if log_link == temp_link:
                    break
                else:
                    f.seek(0)
                    f.truncate()
                    f.write(temp_link)
                f.close()
            yield Request(temp_link, callback=self.parse_detail, meta={'link': temp_link})
            i += 1

    # 详情
    def parse_detail(self, response):
        item = ScrapyItem()
        item['source_url'] = response.meta['link']
        item['source'] = self.site_name
        item['img_url'] = ''
        item['summary'] = ''
        item['title']= response.xpath('//div[@id="content1"]//td/text()').extract()[2].strip()

        p_list = response.xpath('//table[@id="content"]//p').extract()
        item['content'] = "".join(p_list)

        img_url_list = response.xpath('//table[@id="content"]//p//img/@src').extract()
        item['image_urls'] = []
        for img in img_url_list:
            item['image_urls'].append(self.url + img)

        return item