# -*- coding: utf-8 -*-
import scrapy
from news_project.items import CeItem

# 经济网-IT
class CeTechSpider(scrapy.Spider):
    name = 'ce_tech'
    allowed_domains = ['ce.cn']
    start_urls = ['http://ce.cn/cysc/tech/gd2012/']


    def parse(self, response):
        print("response-->", response)
        lis = response.xpath("//div[@class='left']/ul/li")
        for li in lis:
            li_dic = CeItem()
            li_dic['title'] = li.xpath("a/text()").get()

            if not li_dic['title']:
                continue
            li_dic['push_time'] = li.xpath("text()").getall()[1][3:-1]

            href = li.xpath("a/@href").extract_first()
            yield scrapy.Request(response.urljoin(href), callback=self.parse_content, meta={"data": li_dic})

    def parse_content(self, response):
        self.logger.info('A response from %s just arrived!', response.url)
        data = response.meta['data']

        content_form = response.xpath("//form[@id='formarticle']")
        source = content_form.xpath("//span[@id='articleSource']/text()").get()
        author = content_form.xpath("//span[@id='articleAuthor']/text()").get()
        content = content_form.xpath("div[@id='articleText']").get()


        data['source'] = source
        data['source_link'] = response.url
        data['author'] = author
        data['content'] = content
        data['module'] = 10

        if data['content'] is None:
            self.logger.warn("Not found the article content! url [%s]", response.url)
        else:
            yield data
