#coding=utf-8
import scrapy
import json
import datetime
import sys
reload(sys)
sys.setdefaultencoding('utf-8')

class DuanziSpider(scrapy.Spider):
	name = "duanzi"
	time_unit = {
		"MINS": datetime.timedelta(seconds=60),
		"MIN": datetime.timedelta(seconds=60),
		"HOUR": datetime.timedelta(hours=1),
		"HOURS": datetime.timedelta(hours=1),
		"DAY": datetime.timedelta(days=1),
		"DAYS": datetime.timedelta(days=1),
		"WEEK": datetime.timedelta(days=7),
		"WEEKS": datetime.timedelta(days=7),
		"MONTH": datetime.timedelta(days=30),
		"MONTHS": datetime.timedelta(days=30),
		"YEAR": datetime.timedelta(days=365),
		"YEARS": datetime.timedelta(days=365),

		"mins": datetime.timedelta(seconds=60),
		"min": datetime.timedelta(seconds=60),
		"hour": datetime.timedelta(hours=1),
		"hours": datetime.timedelta(hours=1),
		"day": datetime.timedelta(days=1),
		"days": datetime.timedelta(days=1),
		"week": datetime.timedelta(days=7),
		"weeks": datetime.timedelta(days=7),
		"month": datetime.timedelta(days=30),
		"months": datetime.timedelta(days=30),
		"year": datetime.timedelta(days=365),
		"years": datetime.timedelta(days=365),
	}

	def start_requests(self):
		urls = [
			"http://jandan.net/duan/page-235#comments",
		]
		for url in urls:
			yield scrapy.Request(url=url, callback=self.parse)

	def parse(self, response):
		outputs = []
		now = datetime.datetime.now()
		for row in response.css("div.row"):
			row_dict = {
				"author": row.css("div.author strong::text").extract_first(),
				"time": self.get_timestamp_str(now, row.css("div.author small a::text").extract_first().strip(" ago")[1:]),
				"duanzi": " ".join(row.css("div.text p::text").extract()).replace("\n", " "),
				"support": row.css("div.text div.vote span")[1].css("::text").extract_first(),
				"unsupport": row.css("div.text div.vote span")[2].css("::text").extract_first(),
			} if (0 < len(row.css("div.author strong::text"))) and (0 < len(row.css("div.author small a::text"))) and (2 < len(row.css("div.text div.vote span"))) else {}
			outputs.append(row_dict)
			# yield row_dict
		self.save_duanzi(response.url[response.url.index("page-"):], outputs)
		next_page = response.css("div.comments div.cp-pagenavi a.previous-comment-page::attr(href)").extract_first()
		if next_page is not None:
			yield scrapy.Request(response.urljoin(next_page), callback=self.parse)

	def save_duanzi(self, filename, duanzi_list):
		with open("duanzi/"+filename+".json", "wb") as f:
			f.write(json.dumps(duanzi_list, ensure_ascii=False, indent=4))
	
	def get_timestamp_str(self, now, time_str):
		number = int(time_str.split(" ")[0])
		unit = time_str.split(" ")[1]
		timestamp_ = now
		for x in range(1, number+1):
			timestamp_ = timestamp_ - self.time_unit[unit]
		return timestamp_.strftime('%Y-%m-%d %H:%M:%S')
