#encoding=utf-8
import scrapy
from news_spider.items import NewsSpiderItem
import json
import time 
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from scrapy.spiders import CrawlSpider
from pyquery import PyQuery
import datetime
import time
import iso8601

class NetEaseSpider(CrawlSpider):

	name='netease'
	allowed_domains = ["tech.163.com"]
	start_urls = ['http://tech.163.com/']
	rules = (
		Rule(
			LinkExtractor(allow=r"http://tech.163.com/17/12\d+/\d+/.*html"),
			callback="parseNews",
			follow=True
		),
	)


	def parseNews(self,response):
		item = NewsSpiderItem()
		timee = response.xpath("//meta[@property='article:published_time']/@content").extract()
		title = response.xpath("//title/text()").extract()
		content_elements = response.xpath("//div[@class='post_text']").extract()
		content = PyQuery(content_elements[0]).text()
		timee = timee[0]

		timee = iso8601.parse_date(timee)
		timee = time.mktime(timee.timetuple())
		item['time'] = timee
		item['title'] = PyQuery(title[0]).text()
		item['url'] = response.url
		item['content'] = content
		yield item

