#coding=utf-8
import scrapy
import json
import datetime
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import hashlib

from myscrapy.items import Comment

'''
scrapy crawl comment -s JOBDIR=crawls/comment-1
'''
class CommentSpider(scrapy.Spider):
	name = "comment"
	time_unit = {
		"MINS": datetime.timedelta(seconds=60),
		"MIN": datetime.timedelta(seconds=60),
		"HOUR": datetime.timedelta(hours=1),
		"HOURS": datetime.timedelta(hours=1),
		"DAY": datetime.timedelta(days=1),
		"DAYS": datetime.timedelta(days=1),
		"WEEK": datetime.timedelta(days=7),
		"WEEKS": datetime.timedelta(days=7),
		"MONTH": datetime.timedelta(days=30),
		"MONTHS": datetime.timedelta(days=30),
		"YEAR": datetime.timedelta(days=365),
		"YEARS": datetime.timedelta(days=365),

		"mins": datetime.timedelta(seconds=60),
		"min": datetime.timedelta(seconds=60),
		"hour": datetime.timedelta(hours=1),
		"hours": datetime.timedelta(hours=1),
		"day": datetime.timedelta(days=1),
		"days": datetime.timedelta(days=1),
		"week": datetime.timedelta(days=7),
		"weeks": datetime.timedelta(days=7),
		"month": datetime.timedelta(days=30),
		"months": datetime.timedelta(days=30),
		"year": datetime.timedelta(days=365),
		"years": datetime.timedelta(days=365),
	}

	def start_requests(self):
		urls = [
			"http://jandan.net/duan/",
			# "http://jandan.net/duan/page-755#comments",
		]
		for url in urls:
			yield scrapy.Request(url=url, callback=self.parse)

	def parse(self, response):
		outputs = []
		now = datetime.datetime.now()
		for i in xrange(1, len(response.css("ol.commentlist>li").extract())+1):
			vote_id = response.css("ol.commentlist>li:nth-child("+str(i)+")>div>div>div.text>div.vote::attr(id)").extract()
			if 0 < len(vote_id):
				vote_id = vote_id[0].split("-")[1]
			else:
				continue
			_author_code = response.css("ol.commentlist>li:nth-child("+str(i)+")>div>div>div.author>strong::attr(title)").extract()
			if 0 < len(_author_code):
				_author_code = _author_code[0].split(u"：")[1]
			else:
				_author_code = "0";
			_comment = response.css("ol.commentlist>li:nth-child("+str(i)+")>div>div>div.text>p::text").extract()
			if 0 < len(_comment):
				_comment = _comment[0]
			else:
				continue
			_author = response.css("ol.commentlist>li:nth-child("+str(i)+")>div>div>div.author>strong::text").extract()
			if 0 < len(_author):
				_author = _author[0].strip()
			else:
				continue
			_time = response.css("ol.commentlist>li:nth-child("+str(i)+")>div>div>div.author>small>a::text").extract()
			if 0 < len(_time):
				_time = _time[0].strip("@").strip(" ago")
			else:
				continue
			_support = response.css("ol.commentlist>li:nth-child("+str(i)+")>div>div>div.text>div>span#cos_support-"+vote_id+"::text").extract()
			if 0 < len(_support):
				_support = _support[0]
			else:
				continue
			_unsupport = response.css("ol.commentlist>li:nth-child("+str(i)+")>div>div>div.text>div>span#cos_unsupport-"+vote_id+"::text").extract()
			if 0 < len(_unsupport):
				_unsupport = _unsupport[0]
			else:
				continue
			yield Comment(
				_id=hashlib.md5(_author_code+_comment).hexdigest(),
				author_code=_author_code,
				author=_author,
				time=self.get_timestamp_str(now, _time),
				comment=_comment,
				support=_support,
				unsupport=_unsupport
			)
		next_page = response.css("a.previous-comment-page::attr(href)").extract()
		if 0 < len(next_page):
			print "finish crawling page: "+response.url
			yield scrapy.Request(next_page[0], callback=self.parse)

	def get_timestamp_str(self, now, time_str):
		number = int(time_str.split(" ")[0])
		unit = time_str.split(" ")[1]
		timestamp_ = now
		for x in range(1, number+1):
			timestamp_ = timestamp_ - self.time_unit[unit]
		return timestamp_.strftime('%Y-%m-%d %H:%M:%S')
