# author: Gabriel Mickel
# email: g3mickel@gmail.com

import os, re, scrapy, requests, json, logging, MySQLdb
from datetime import datetime
from math import ceil, floor
from time import sleep

from Hunter_Crawler.items import JingdongReview, JingdongProduct, JingdongProductUpdate
from Hunter_Crawler.settings import HOST, PORT, USER, PASS, DB

logger = logging.getLogger(__name__)
 
class Jingdong_Review(scrapy.Spider):

	def __init__(self, productID='1586564830'):
		self.productID = productID
		start_urls = ['http://club.jd.com/review/%s-3-1.html' % self.productID]
	name = 'JingdongReview'
	allowed_domains = ['jd.com']


	def parse(self, response):
		try: 
			revData = response.meta['revData']
			productID = response.meta['productID']
		except: 
			revData = json.loads(requests.get(req_rev_base % (productID,0)).text[18:-2])['productCommentSummary']
			productID = revData['productID']
		
 		### begin the crawling of pages
		rev_base = 'http://club.jd.com/review/%s-3-%s-0.html'
		cur_page = int(response.xpath('//div[@class="pagin fr"]/a[@class="current"]/text()').extract()[0])
		for url in [rev_base % (productID, str(num+1)) for num in xrange(cur_page-1,int(ceil(float(revData['commentCount'])/30)))]:
			yield scrapy.Request(url,callback=self.parsepage, meta={'productID':productID})


	def parsepage(self, response):
		productID = response.meta['productID']
		all_reviews = response.xpath('//div[contains(@id,"comment-")]/div[@class="item"]')
		for review_ in all_reviews:
			review = JingdongReview()
			review['reviewID'] = review_.xpath('.//div[@class="btns"]/div[@class="useful"]/@id').extract()[0]
			review['useful'] = review_.xpath('.//div[@class="btns"]/div[@class="useful"]/a/@title').extract()[0].strip().replace('\r','')
			review['star'] = review_.xpath('.//div[@class="o-topic"]/span[contains(@class,"star")]/@class').extract()[0].split(' sa')[1]
			review['customer'] = review_.xpath('.//div[@class="i-item"]/@data-nickname').extract()[0]
			try: review['customerLink'] = review_.xpath('.//div[@class="u-icon"]/a/@href').extract()[0]
			except: review['customerLink'] = ''
			review['date'] = review_.xpath('.//div[@class="o-topic"]/span[@class="date-comment"]/a/text()').extract()[0].strip()
			review['productID'] = productID
			tags = []
			for content_ in review_.xpath('.//div[@class="i-item"]/div[@class="comment-content"]/dl'):
				if content_.xpath('.//dt/text()').extract()[0] == u'\u5fc3\u3000\u3000\u5f97\uff1a':
					review['content'] = content_.xpath('.//dd/text()')[0].extract()
				elif content_.xpath('.//dt/text()').extract()[0] == u'\u6807\u3000\u3000\u7b7e\uff1a':
					for tag in content_.xpath('.//dd/span/span/text()').extract():
						tags.append(tag)
			review['tags'] = tags
			yield review


class Jingdong_Product(scrapy.Spider):
	'''crawls the page of a product for product information, given by prod_URL in settings.py'''
	
	name = 'JingdongProduct'

	def __init__(self, productID='1217499'):
		self.name = 'JingdongProduct'
		self.allowed_domains = ['jd.com']
		self.start_urls = ['http://item.jd.com/%s.html' % productID]
		self.productID = productID

	def parse(self, response):
	
		try: productID = response.meta['productID']
		except: productID = self.productID

		headers = {'Referer':'http://club.jd.com/review/%s-3-1.html' % productID}
		price_base = 'http://p.3.cn/prices/get?skuid=J_%s'	
		rev_base = 'http://s.club.jd.com/productpage/p-%s-s-0-t-0-p-%s.html?callback=fetchJSON_comment'

		### Crawl Product Data
		product = JingdongProduct()

		try: 
			revData = json.loads(requests.get(rev_base % (productID,0),headers=headers).text[18:-2])['productCommentSummary']
			product['avg_rating'] = revData['goodRate']
			product['num_reviews'] = revData['commentCount']
		except: logger.debug('Failed to download review data JSON for %s' % sproductID)

		try:
			price = eval(requests.get(price_base % productID, headers=headers).content)[0]['p']
			product['price'] = price
		except: 
			logger.debug('Failed to download price JSON for %s' % productID)
			product['price'] = ''

		product['productID'] = productID
		product['name'] = response.xpath('//div[@id="name"]/h1/text()').extract()[0]
		product['category'] = response.meta['category']
		product['website'] = 'jd'
		product['sale_rank'] = response.meta['sale_rank']
		product['last_crawled_date'] = datetime.now()

		for title_ in response.xpath('//ul[contains(@id,"parameter")]/li[@title]'):
			if u'\u4e0a\u67b6\u65f6\u95f4\uff1a' in title_.xpath('./text()').extract()[0]:
				product['update_date'] = title_.xpath('./text()').extract()[0].split(u'\uff1a')[1]
			if u'\u54c1\u724c\uff1a' in title_.xpath('./text()').extract()[0]:
				product['brandName'] = title_.xpath('./a/text()').extract()[0]
				product['brandID'] = title_.xpath('./a/@href').extract()[0].split('pinpai/')[1].split('.h')[0]

		if 'brandName' not in product.keys(): product['brandName'] = ''
		if 'brandID' not in product.keys(): product['brandID'] = ''

		yield product
		yield scrapy.Request('http://club.jd.com/review/%s-3-1.html' % productID, callback=Jingdong_Review().parse, meta={'revData':revData,'productID':productID})

	
class Jingdong_Category(scrapy.Spider):
	'''crawls a given category on JD.com'''

	name = 'JingdongCategory'
	allowed_domains = ['jd.com']

	def __init__(self, category='9987,653,655'):
		self.start_urls = ['http://list.jd.com/list.html?cat=%s' % category]
		self.sale_rank = 0

	
	def parse(self, response):
		category = response.xpath('//div[@class="s-title"]/h3/b/text()').extract()[0]
		for product_ in response.xpath('//li[@class="gl-item"]/div'):
			self.sale_rank+=1
			productID = product_.xpath('./@data-sku').extract()[0]
			yield scrapy.Request('http://item.jd.com/%s.html' % productID, callback = Jingdong_Product().parse, meta={'productID':productID,'sale_rank':self.sale_rank,'category':category})
		if response.xpath('//a[@class="pn-next"]/@href'):
#			print('starting page ' + str(int(response.xpath('//input[@id="page_jump_num"]/@value').extract()[0])+1))
			yield scrapy.Request('http://list.jd.com' + response.xpath('//a[@class="pn-next"]/@href').extract()[0], callback=self.parse, meta = {'productID': productID, 'category': category})	


class Jingdong_Update_Product(scrapy.Spider):
	name = 'JingdongProductUpdate'
	allowed_domains = ['jd.com']

	def __init__(self, productID='1217499'):
		self.start_urls = ['http://item.jd.com/%s.html' % productID]
		self.conn = MySQLdb.connect(host=HOST, port=PORT,user=USER,passwd=PASS,charset='utf8')
		self.conn.select_db(DB)
		self.cur = self.conn.cursor()
		self.rev_base = 'http://s.club.jd.com/productpage/p-%s-s-0-t-0-p-%s.html?callback=fetchJSON_comment'

	def parse(self, response):
		try:productID = response.meta['productID']
		except:
			productID = response.url.split('/')[3].split('.')[0]
		headers = {'Referer':'http://club.jd.com/review/%s-3-1.html' % productID}
		revData = json.loads(requests.get(self.rev_base % (productID,0),headers=headers).text[18:-2])['productCommentSummary']
		
		self.cur.execute('select num_reviews from product where product_id = "%s"' % productID)
		curRevs = self.cur.fetchall()[0][0]
		newRevs = revData['commentCount'] - int(curRevs)
		if newRevs > 0: # crawl new reviews if they exist
			for url in [self.rev_base % (productID, str(num)) for num in xrange(1,int(ceil(float(newRevs)/30)))]:
				yield scrapy.Request(url,callback=Jingdong_Review().parsepage, meta={'productID':productID})

		try: sale_rank = response.meta['sale_rank']
		except: sale_rank = ''

		productUpdate = JingdongProductUpdate()
		productUpdate['productID'] = productID
		productUpdate['avg_rating'] = revData['goodRate']
		productUpdate['num_reviews'] = revData['commentCount']
		print(datetime.now())
		productUpdate['last_crawled_date'] = datetime.now()
		productUpdate['sale_rank'] = sale_rank

		yield productUpdate



class Jingdong_Category_Update(scrapy.Spider):
	name = 'JingdongCategoryUpdate'
	allowed_domains =  ['jd.com']

	def __init__(self, category ='9987,653,655'):
		self.start_urls = ['http://list.jd.com/list.html?cat=%s' % category]
		self.sale_rank = 0
		self.conn = MySQLdb.connect(host=HOST, port=PORT,user=USER,passwd=PASS,charset='utf8')
		self.conn.select_db(DB)
		self.cur = self.conn.cursor()

	def parse(self, response):
		if not hasattr(self, 'category'):
			self.category = response.xpath('//div[@class="s-title"]/h3/b/text()').extract()[0]

		for product_ in response.xpath('//li[@class="gl-item"]/div'):
			self.sale_rank+=1
			productID = product_.xpath('./@data-sku').extract()[0]
			if self.cur.execute('select id from product where product_id="%s"' % productID) == 1L:
				yield scrapy.Request('http://item.jd.com/%s.html' % productID, callback=Jingdong_Update_Product().parse, meta={'sale_rank':self.sale_rank})
			else:
				yield scrapy.Request('http://item.jd.com/%s.html' % productID, callback = Jingdong_Product().parse, meta={'productID':productID,'sale_rank':self.sale_rank,'category':self.category})


		if response.xpath('//a[@class="pn-next"]/@href'):
#			print('starting page ' + str(int(response.xpath('//input[@id="page_jump_num"]/@value').extract()[0])+1))
			yield scrapy.Request('http://list.jd.com' + response.xpath('//a[@class="pn-next"]/@href').extract()[0], callback=self.parse)	

