# -*- coding: utf-8 -*-
import scrapy
from news_project.items import CeItem


class CeFinanceSpider(scrapy.Spider):
    name = 'ce_finance'
    allowed_domains = ['finance.ce.cn']
    start_urls = ['http://finance.ce.cn/money/index_17790.shtml']

    def parse(self, response):
        trs = response.xpath("//div[@class='list_left']/table[2]/tr")
        for tr in trs:
            item = CeItem()
            item['title'] = tr.xpath("td/a/text()").get()
            if item['title'] is None:
                continue
            item['push_time'] = tr.xpath("td/span/text()").get()[1:-1]
            href = tr.xpath("td/a/@href").get()
            yield scrapy.Request(response.urljoin(href), callback=self.parse_content, meta={"data": item})

    def parse_content(self, response):
        self.logger.info('CeFinance-->A response from %s just arrived!', response.url)
        data = response.meta['data']

        content_div = response.xpath("//div[@id='article']")
        source = content_div.xpath("//span[@id='articleSource']/text()").get()
        author = content_div.xpath("//span[@id='articleAuthor']/text()").get()
        content = content_div.xpath("div[@id='articleText']").get()


        data['source'] = source
        data['source_link'] = response.url
        data['author'] = author
        data['content'] = content
        data['module'] = 12

        if data['content'] is None:
            self.logger.warn("Not found the article content! url [%s]", response.url)
        else:
            yield data
