# -*- coding: utf-8 -*-
import requests
from lxml import etree
import re
import json
import codecs
import threading
import time
from fake_useragent import UserAgent
import random
ua = UserAgent()

def get_page(url):
	try:
		pages = []
		headers = {'headers':ua.random}
		htmls = requests.get(url,headers = headers)
		html = htmls.content
		com = re.compile(r'class="null" name="bottom-page-turn">\d+</a></li>')
		page = re.findall(com,html)
		#print len(page)
		#print page
		for pa in page:
			pages.append(int(pa[37:-9]))
			#print pa[37:-9]
		#print pages
	except:
		return 1
	return max(pages)

def BY_links1(links1):
	#遍历所有该类型的链接
	for url in links1:
		#防止被屏蔽，遍历完之后间断10秒
		time.sleep(10)
		#获取该分类下的所有的页码数
		pages = get_page(url)
		#遍历该分类下的页码，进行数据提取
		for page in range(1,int(pages)+1):
			time.sleep(1)
			#构造分类页面的链接
			urls = url[:29] + 'pg' + str(page) + '-' + url.split('/')[-1]
			#为了防止爬虫中间被中断，使用try...except...语句
			try:
				print urls
				#获取网页数据
				headers = {'headers':ua.random}
				html = requests.get(urls,headers = headers)
			except:
				print "GET HTML ERROR"
			html = html.text
			#使用lxml解析html网页
			tree = etree.HTML(html)
			#提取商品名称
			name = tree.xpath("//p[@class='name']/a/@title")
			#商品价格
			price = tree.xpath("//p[@class='price']/span/text()")
			#商品链接
			link = tree.xpath("//p[@class='name']/a/@href")
			#店铺名称
			shopname = tree.xpath("//p[@class='link']/a/text()")
			#商品评论数
			comnum= tree.xpath("//p[@class='star']/a/text()")
			#商品所属分类
			tags = tree.xpath('//*[@id="breadcrumb"]/div/div/a/text()')
			#有的商品找不到分类，或者有多个分类标签
			if len(tags)==0:
				tag = "None"
			else:
				tag = '>'.join(tags)
			#防止爬虫写入数据被中断，使用try...except...语句
			try:
				for i in range(len(name)):
					#构造json文件
					goods = {
							"name" : name[i],
							"price" : price[i],
							"link" : link[i],
							"shopname" : shopname[i],
							"comnum" : comnum[i],
							"tag_name" : tag
							}
					i = json.dumps(dict(goods),ensure_ascii=False)
					line = i + "\n"
					file.write(line)
					print "SUCCESS....."
			except :
				print "ERROR>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
#第二类链接，注释同上差不多，就不写啦
def BY_links2(links2):
	for url in links2:
		time.sleep(10)
		pages = get_page(url)
		for page in range(1,int(pages)+1):
			time.sleep(1)
			urls = url[:29] + 'pg' + str(page) + '-' + url.split('/')[-1]
			try:
				print urls
				headers = {'headers':ua.random}
				html = requests.get(urls,headers = headers)
				html = html.text
			except:
				print "GET HTML ERROR"
			tree = etree.HTML(html)
			name = tree.xpath("//p[@class='name']/a/@title")
			price = tree.xpath("//p[@class='price']/span[1]/text()")
			link = tree.xpath("//p[@class='name']/a/@href")
			shopname = tree.xpath("//li/p[4]/text()")
			comnum= tree.xpath("//li/p[5]/a/text()")
			#//*[@id="breadcrumb"]/div/div/a/text()
			#//*[@id="breadcrumb"]/div/div[3]/a/text()
			tags = tree.xpath('//*[@id="breadcrumb"]/div/div/a/text()')
			if len(tags)==0:
				tag = "None"
			else:
				tag = '>'.join(tags)
			try:
				for i in range(len(name)):
					goods = {
							"name" : name[i],
							"price" : price[i],
							"link" : link[i],
							"shopname" : shopname[i],
							"comnum" : comnum[i],
							"tag_name" : tag
							}
					i = json.dumps(dict(goods),ensure_ascii=False)
					line = i + "\n"
					file.write(line)
					print "SUCCESS....."
			except :
				print "ERROR>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"


if __name__ == '__main__':
	#url = 'http://category.dangdang.com/cid10010336.html'
	#page = get_page(url)
	#print page
	#保存获取到的商品数据，保存为json文件
	file = codecs.open("E:\jiaocheng\F\scrapy2\dangdangall\data\dangdang.json",'wb',encoding = 'utf-8')
	#起始链接，在该链接下，有所有商品分类的链接
	start_url = 'http://category.dangdang.com/?ref=www-0-C'
	#提取网页源代码，由于没有使用反爬技术，不需要修改请头
	html = requests.get(start_url)
	html = html.text
	#当当网商品分类链接分两种方式，分别提取两种链接，使用正则表达式提取链接
	link1 = re.compile(r'http://category\.dangdang\.com/cid\d{1,10}.html')
	link2 = re.compile(r'http://category\.dangdang\.com/cp\d\d\.\d\d\.\d\d\.\d\d\.\d\d\.\d\d\.html')
	links1 = re.findall(link1,html)
	links2 = re.findall(link2,html)
	print len(links1)
	print len(links2)
	#第一类分类链接进行提取
	BY_links1(links1)
	#第二类分类链接进行提取
	BY_links2(links2)
