#--*-- coding:UTF-8 --*--
#------------------------------------------------------ 
#  程序：酒仙网爬虫 v1.0
#  版本：1.0
#  作者：Vanson     QQ124762963
#  日期：2017-09-20 
#  环境：Python 2.7 Win10
#  功能：爬取酒仙网，支持单件商品、列表商品，生成csv格式
#   本程序仅供学习交流，使用后请在24小时内删除，谢谢
#-------------------------------------------------------
from selenium import webdriver
from pyquery import PyQuery as pq
from datetime import datetime
from selenium.webdriver import DesiredCapabilities
import random, string, os, re, urllib, urllib2, requests, sys, HTMLParser, codecs
reload(sys)
sys.setdefaultencoding('utf-8')



# 记录程序log
def printAndLog(msg, boolean_print=True, msg_type='INFO'):
	if (boolean_print):
		print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' [' + msg_type + '] ' + msg.decode('utf-8').encode('gbk'))
	if os.path.exists('./log.txt') == False:
		with codecs.open('log.txt', 'w', 'utf_8') as f:
			f.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' [' + msg_type + '] ' + msg)
	else:
		with codecs.open('log.txt', 'a', 'utf_8') as f:
			f.write('\r\n' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' [' + msg_type + '] ' + msg)

def printLogNoTime(msg):
	print(msg)
	if os.path.exists('./log.txt') == False:
		with codecs.open('log.txt', 'w', 'utf_8') as f:
			f.write(msg)
	else:
		with codecs.open('log.txt', 'a', 'utf_8') as f:
			f.write('\r\n' + msg)

def auto_down(url,filename):
	try:
		urllib.urlretrieve(url, filename, schedule)
	except:
		printAndLog('网络故障,重新爬取中...', True, 'WARN')
		auto_down(url,filename)

def schedule(a,b,c):
	'''''
		a:已经下载的数据块
		b:数据块的大小
		c:远程文件的大小
	'''
	count = 100.0 * a * b / c
	total = 100
	width = 50
	if count > 100 :
		count = 100
	sys.stdout.write(' ' * (width + 9) + '\r')
	sys.stdout.flush()
	progress = int(width * count / total)
	sys.stdout.write('{0:3}%/{1:3}%: |'.format(int(count), total)) 
	sys.stdout.write('#' * progress + '-' * (width - progress) + '|\r')
	if progress == width:
		printAndLog('{0:3}%/{1:3}%: |'.format(int(count), total) + '#' * progress + '-' * (width - progress) + '|')
		sys.stdout.write('\r')
		sys.stdout.flush()

# 酒仙网爬虫
class Jiuxian:
	# 初始化
	def __init__(self, website):
		self.name      = 'vanson' # 文件夹名
		self.website   = website  # 网址
		self.starttime = datetime.now()

		desired_capabilities = DesiredCapabilities.PHANTOMJS.copy()
		headers = {
			'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
			'Referer':'http://www.jiuxian.com/'
		}

		for k, v in headers.iteritems():
		    desired_capabilities['phantomjs.page.customHeaders.{}'.format(k)] = v
		self.driver=webdriver.PhantomJS(executable_path='C:\Python27\phantomjs-2.1.1-windows\\bin\phantomjs.exe', desired_capabilities=desired_capabilities)
		printAndLog('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
		printAndLog(self.website)
		printAndLog('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
		self.curl_Jiuxian()
	
	# 开爬
	def curl_Jiuxian(self):
		inSales    = 0 # 出售中
		storeHouse = 0 # 仓库中
		repeat     = 0 # 重复

		if self.website.find('goods-') >= 0:		# 商品详情页抓取
			productStatus = self.getProductPage(self.website)
			if productStatus == 1:
				inSales += 1
			elif productStatus == 2:
				storeHouse += 1
			elif productStatus == 3:
				repeat += 1
			printAndLog('采集结果>')
			printAndLog('商品总数  页数  已采集  出售中  仓库中  重复商品')
			printAndLog('共 1 件    1      %d件    %d件      %d件     %d件'%(1-repeat, inSales, storeHouse, repeat))		#输出结果
			printAndLog('')
			printAndLog('')
			printAndLog('耗时>%s               Jiuxian   By Vanson   QQ124762963'%(self.get_elapsed_time(self.starttime, datetime.now())))
			printAndLog('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
		elif (self.website.find('-0-0-0-0-') >= 0) or (self.website.find('search') >= 0):		# 列表页批量获取
			html        = self.getHtml(self.website + '&isOwn=1') # 只获取自营的
			jq          = pq(html)
			page        = int(jq('.nextpage').prev().html()) if int(jq('.nextpage').prev().html()) else 1
			channelList = self.getChannelList(jq, self.website + '&isOwn=1', page) # 获取商品列表
			productNums = len(channelList)	# 抓获商品数量
			
			for k,v in enumerate(channelList, 1):	# 遍历列表,k从1开始
				printAndLog('进程:%s/%s '%(k, productNums))
				productStatus = self.getProductPage(v)
				if productStatus == 1:
					inSales += 1
				elif productStatus == 2:
					storeHouse += 1
				elif productStatus == 3:
					repeat += 1
				printAndLog('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
			printAndLog('采集结果>')
			printAndLog('商品总数  页数  已采集  出售中  仓库中  重复商品')
			printAndLog('共 %d 件    %d      %d件     %d件     %d件      %d件'%(productNums, page, productNums-repeat, inSales, storeHouse, repeat))
			printAndLog('')
			printAndLog('')
			printAndLog('耗时>%s               Jiuxian   By Vanson   QQ124762963'%(self.get_elapsed_time(self.starttime, datetime.now())))
			printAndLog('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
		else:
			printAndLog('不支持此地址%s'%(self.website), True, 'WARN') 
			printAndLog('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
		printLogNoTime('\r\n')

	#商品详情页抓取
	def getProductPage(self, website):
		printAndLog('开始任务 : %s '%(website)) # 开始任务

		# 创建文件夹
		if os.path.exists('./%s'%(self.name)) == False:
			printAndLog('检测到未创建%s文件夹，开始创建...'%(self.name))
			os.system('mkdir %s'%(self.name))
		if os.path.exists('./detail') == False:
			printAndLog('检测到未创建detail文件夹，开始创建...')
			os.system('mkdir detail')

		# 没有csv文件的话就创建csv文件头
		if os.path.exists('./%s.csv'%(self.name)) == False:
			printAndLog('检测到未创建csv文件，开始创建')
			with codecs.open('./%s.csv'%(self.name), 'w', 'utf_8_sig') as f:
				f.write("version 1.00\ntitle	cid	seller_cids	stuff_status	location_state	location_city	item_type	price	auction_increment	num	valid_thru	freight_payer	post_fee	ems_fee	express_fee	has_invoice	has_warranty	approve_status	has_showcase	list_time	description	cateProps	postage_id	has_discount	modified	upload_fail_msg	picture_status	auction_point	picture	video	skuProps	inputPids	inputValues	outer_id	propAlias	auto_fill	num_id	local_cid	navigation_type	user_name	syncStatus	is_lighting_consigment	is_xinpin	foodparame	sub_stock_type	item_size	item_weight	buyareatype	global_stock_type	global_stock_country	wireless_desc	barcode	subtitle	sku_barcode	cpv_memo	input_custom_cpv	features	buyareatype	sell_promise	custom_design_flag	newprepay	qualification	add_qualification	o2o_bind_service\n宝贝名称	宝贝类目	店铺类目	新旧程度	省	城市	出售方式	宝贝价格	加价幅度	宝贝数量	有效期	运费承担	平邮	EMS	快递	发票	保修	放入仓库	橱窗推荐	开始时间	宝贝描述	宝贝属性	邮费模版ID	会员打折	修改时间	上传状态	图片状态	返点比例	新图片	视频	销售属性组合	用户输入ID串	用户输入名-值对	商家编码	销售属性别名	代充类型	数字ID	本地ID	宝贝分类	账户名称	宝贝状态	闪电发货	新品	食品专项	库存计数	物流体积	物流重量	采购地	库存类型	国家地区	无线详情	商品条形码	宝贝卖点	sku 条形码	属性值备注	自定义属性值	尺码库	采购地	退换货承诺	定制工具	7天退货	商品资质	增加商品资质	关联线下服务\n")
		printAndLog('开始获取网页信息...')
		html = self.getHtml(website)
		jq = pq(html)
		productId = self.getProductId(website)
		printAndLog('获取完成，正在进行重复商品检测...')
		# 重复商品检测
		if os.path.exists('./productid.txt') :
			arr = []
			# 读取文件
			for line in open('./productid.txt', 'r'):
				arr.append(line.strip())
			# 重复商品检测
			if productId in arr:
				printAndLog('发现重复商品...', True, 'WARN')
				return 3
		printAndLog('未发现重复商品，正在开始采集数据...')
		product_name   = jq('.comName h1').html()
		productPrice   = jq('.pri').find('strong').html()
		productStatus  = 2 if jq('#addToCartForDetail').html() == '到货通知' else 1 # 1 上架 2 库存 3 仓库
		tips           = self.getTips(jq)
		fomartList     = self.downloadPicture(self.getProductList(jq), self.name); # 下载商品主图
		subtitle       = jq('.comName').find('p').html() if jq('.comName').find('p').html() else ''
		detail_list    = self.getProductDetail(jq)
		if detail_list:
			fomartDetail = self.downloadPicture(detail_list, 'detail')	# 下载商品详情
		else:
			fomartDetail = ''
		
		printLogNoTime('------------------------------------------------------------------------------------------')
		printAndLog('当前进程采集数据完毕！数据开始写入csv...')
		with open('./%s.csv'%(self.name), 'a') as f:
			f.write('%s'%(product_name)+'	50008919		1	上海	上海	1	%s'%(productPrice)+'	0	999	0					0	0	%d'%(productStatus)+'	0		"%s%s"'%(tips, fomartDetail)+'			0		200		0	%s'%(fomartList)+'					%s'%(productId)+'		0	0	0		gsz270	0	0	0	contact:12345678;factory:见包装;factory_site:见包装;food_additive:见包装;mix:见包装;period:1;plan_storage:见包装;product_date_end:2000-01-02;product_date_start:2000-01-01;stock_date_end:2000-01-02;stock_date_start:2000-01-02;supplier:超市	2	bulk:0.000000	0	0	-1				"%s'%(subtitle)+'"								0		0\n')
		# 记录商品ID
		if os.path.exists('./productid.txt') == False:
			with open('productid.txt', 'w') as f:
				f.write(productId)
		else:
			# 记录ID
			with open('productid.txt', 'a') as f:
				f.write('\n' + productId)
		printAndLog('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
		return productStatus

	def getHtml(self, url):
		self.driver.get(url)
		# driver.save_screenshot(os.getcwd() + 'screenshot.png') # 截图  
		# with open('./log.txt', 'w') as f:
			# f.write(self.driver.page_source)
		html = self.driver.page_source
		return html

	@staticmethod
	def get_elapsed_time(starttime, endtime):
		seconds = (endtime - starttime).seconds
		m, s = divmod(seconds, 60)
		h, m = divmod(m, 60)
		return ("%02d:%02d:%02d" % (h, m, s))


	@staticmethod
	def getChannelList(jq, website, page):
		channelList = []
		if page == 1:
			lis = jq('.proListSearch').eq(0).find('ul li').items()
			for li in lis:
				channelList.append(li.find('.img.clearfix').attr('href'))
		else:
			lis = jq('.proListSearch ul li').items()
			for li in lis:
				channelList.append(li.find('.img.clearfix').attr('href'))
			a = 2	# 第一页已经读取过的不读取
			for i in range(page):
				pagejq = pq(website + '&pageNum=%d'%(a))
				lis = pagejq('.proListSearch ul li').items()
				for li in lis:
					channelList.append(li.find('.img.clearfix').attr('href'))
				a += 1
				i += 1
		return channelList

	@staticmethod
	def getProductId(website):
		productId = re.findall('goods-([\d]+).', website)
		return productId[0] if productId else ''

	@staticmethod
	def getProductList(jq):
		productList = []
		li = jq('.show-list-con ul li').items()
		for a in li:
			productList.append(a.find('img').attr('src').replace('1.jpg', '5.jpg').replace('1.gif', '5.gif'))
		return productList[0:5]

	def downloadPicture(self, pic, directory):
		printLogNoTime('------------------------------------------------------------------------------------------')
		x = 0;
		format = ''
		for imgurl in pic:
			if directory == 'vanson':
				# 商品主图
				temp = self.random_str(32)
				printLogNoTime(u'商品小图 ' + imgurl)
				auto_down(imgurl,'./vanson/%s.tbi'%(temp))
				format += '%s'%(temp)+':1:%d'%(x)+':|;'
			# elif directory == 'tips':
			# 	# 商品详情
			# 	temp = self.random_str(32)
			# 	ext = os.path.splitext(imgurl)[1]
			#	  printLogNoTime(u'Tips小图 ' + imgurl)
			# 	auto_down(imgurl,'./detail/%s%s'%(temp, ext))
			# 	format += '<IMG src=""FILE:///'+os.getcwd()+'/detail/%s'%(temp)+'%s'%(ext)+'"">'
			elif directory == 'detail':
				# 商品详情
				temp = self.random_str(32)
				ext = os.path.splitext(imgurl)[1]
				printLogNoTime(u'商品详情 ' + imgurl)
				auto_down(imgurl,'./detail/%s%s'%(temp, ext))
				format += '<IMG src=""FILE:///'+os.getcwd()+'/detail/%s'%(temp)+'%s'%(ext)+'"" align=middle width=750>'
			x += 1
		return format
	
	@staticmethod
	def getTips(jq):
		tips = jq('.intrBox ul li').items()
		tips_html = '<ul style="background:#e8e6e3">'
		# parser = HTMLParser.HTMLParser()		# 解析html实例
		# parser.unescape()
		for tip in tips:
			attr = re.sub('<em[\s\S]*?</em>', '', tip.find('span').html())
			attr_value = tip.find('span em').html()
			tips_html += '<li style=""padding:5px 0;font-weight: bold;font-size: 13.0px;padding-right: 50.0px;display: block;color: #000000;"">' + attr + attr_value + '</li>'
		tips_html += '</ul>'
		return tips_html

	@staticmethod
	def random_str(randomlength):
	    a = list(string.ascii_letters)
	    random.shuffle(a)
	    return ''.join(a[:randomlength])

	@staticmethod
	def getProductDetail(jq):
		image = []
		imglist = jq('.infoImg img').items()
		if (imglist):
			for img in imglist:
				image.append(img.attr('src'))
			return image
		else:
			return False

#-------- 程序入口处 ------------------ 
if __name__ == '__main__':
	print u'''
#-------------------------------------------------------# 
# 程序：酒仙网爬虫 v1.0                                 #
# 版本：1.0                                             #
# 作者：Vanson     QQ124762963                          #
# 日期：2017-09-20                                      #
# 环境：Python 2.7  Win10                               #
# 功能：爬取酒仙网，支持单件商品、列表商品，生成csv格式 #
#   本程序仅供学习交流，使用后请在24小时内删除，谢谢    #
#-------------------------------------------------------#
	'''
	website = raw_input('请输入网址 : '.decode('utf-8').encode('gbk')).strip()	# 键入网址
	os.system('cls')
	Jiuxian = Jiuxian(website)