# encoding: utf-8
import scrapy
import re
from scrapy.selector import Selector
from tech163.items import Tech163Item
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule


class ExampleSpider(CrawlSpider):
    name = "news"
    allowed_domains = ['tech.163.com']
    start_urls = ['http://tech.163.com/']
    rules = (
        Rule(LinkExtractor(allow=('http://tech.163.com/16/07[0-9][0-9]/.*',)),
             callback="parse_news", follow=True),
        # Rule(LinkExtractor(allow=('http://tech.163.com/.*',),deny='http://tech.163.com/\d{2}/\d{4}/\d{2}/.*',), follow=True),
    )

    def parse_news(self, response):
        item = Tech163Item()
        item['news_thread'] = response.url.strip().split('/')[-1][:-5]
        # self.get_thread(response,item)
        self.get_title(response, item)
        self.get_source(response, item)
        self.get_url(response, item)
        self.get_news_from(response, item)
        self.get_from_url(response, item)
        self.get_text(response, item)
        self.get_category(response, item)
        return item

    def get_title(self, response, item):
        title = response.xpath("/html/head/title/text()").extract()
        if title:
            self.log(title)
            self.log('title: ' + title[0][:-5], level=20)
            # print 'title: ' + title[0][:-5]
            item['news_title'] = title[0]

    def get_source(self, response, item):
        # source = response.xpath("//div[@class='ep-time-soure cDGray']/text()").extract()
        source = response.xpath("//div[@class='post_time_source']").extract()
        if source:
            # print 'source: ' + source[0].strip()[0:-4]
            item['news_time'] = source[0].strip()[0:9]

    def get_news_from(self, response, item):
        news_from = response.xpath("//a[@id='ne_article_source']/text()").extract()
        if news_from:
            # print 'from: ' + news_from[0].encode('utf-8')
            item['news_from'] = news_from[0]

    def get_from_url(self, response, item):
        from_url = response.xpath("//a[@id='ne_article_source']/@href").extract()
        if from_url:
            # print 'url: ' + from_url[0].encode('utf-8')
            item['from_url'] = from_url[0]

    def get_text(self, response, item):
        news_body = response.xpath("//div[@id='endText']/p/text()").extract()
        if news_body:
            item['news_body'] = news_body

    def get_url(self, response, item):
        news_url = response.url
        if news_url:
            self.log('news_url: ' + news_url, level=20)
            item['news_url'] = news_url

    def get_category(self, response, item):
        new_cate = response.xpath("//div[@class='post_crumb']/a[3]/text()").extract()
        if new_cate:
            item['news_category'] = new_cate[0]





