# -*- coding: utf-8 -*-
import scrapy
import sys
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from sina_finance.items import SinaFinanceItem


class TestSpider(CrawlSpider):
    def __init__(self, name=None, **kwargs):
        super(TestSpider, self).__init__(name=None, **kwargs)
        for sub in sys.argv:
            if '_job' in sub:
                self.jod_id = sub.split('=')[1]
            else:
                self.jod_id=''
    name = 'finance'
    allowed_domains = ['finance.sina.com.cn']
    start_urls = ['http://finance.sina.com.cn/']

    rules = (
        Rule(LinkExtractor(restrict_xpaths=("//div[@id='fin_tabs0_c0']//a",)), callback='parse_item', follow=True),
    )


    def parse_item(self, response):
        i = SinaFinanceItem()
        #i['domain_id'] = response.xpath('//input[@id="sid"]/@value').extract()
        #i['name'] = response.xpath('//div[@id="name"]').extract()
        #i['description'] = response.xpath('//div[@id="description"]').extract()
        title_xpath="//h1[@id='artibodyTitle']/text()"   #标题
        body_xpath="//div[@id='artibody']//text()"   #文章内容
        title_list=response.xpath(title_xpath).extract()
        body_content=u''.join(response.xpath(body_xpath).extract()).strip().replace('\n', '').replace('\t', '')
        if title_list.__len__()>0:
            i['title']=title_list[0]
            i['content']=body_content
            i['content_href']=response.url
            i['jod_id']=self.jod_id
            return i
