# -*- coding: utf-8 -*-
from selenium import webdriver 
import time
from lxml import etree
import re
import json
import codecs
import random
import requests 
import itertools
import sys 
reload(sys) 
sys.setdefaultencoding( "utf-8" ) 
from fake_useragent import UserAgent
ua = UserAgent()
#标签+url 列表
url_AND_tag_list = []


#使用cookies对豆瓣网进行登录，并根据提供的url链接提取对应的html页面源码
def get_html_BY_Cookies(url):
	UA = ua.random
	#print UA
	headers = {'User-Agent':UA}
	#使用的时候切记换成自己的cookie信息，怎么获取cookie信息，查找相关资料
	#这里建议使用多个账号，获取多个账号cookie信息，每次请求的时候轮流更换cookie数据，防止爬虫被ban。
	#cookie1 = {}
	#cookie2 = {}
	############
	#cookie20 = {}
	#cookie = [cookie1,cookie2,~~~~~~~,cookie20]
	#cookies = {'Cookie':random.choice(cookie)}
	cookies = {'Cookie':'bid=hc5fQbD06PE; ll="118099"; __yadk_uid=xLKgkdLKMjIjBbMuDOnqlAeOuTTk9e2E; _vwo_uuid_v2=B26244FEF6878C8B8A6C723E7C5C018D|9a5fb6c80b3ff63057ec7214e47d1ef1; ps=y; __utmt=1; ap=1; ue="jakejie@163.com"; dbcl2="161586206:5nC5uM8tnxY"; ck=gbIh; __utma=30149280.944662077.1496412370.1497743805.1497748360.8; __utmb=30149280.6.10.1497748360; __utmc=30149280; __utmz=30149280.1497748360.8.2.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/accounts/login; __utmv=30149280.16158; __utma=223695111.1513378071.1496412370.1497743806.1497748360.6; __utmb=223695111.0.10.1497748360; __utmc=223695111; __utmz=223695111.1497748360.6.2.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/accounts/login; _pk_id.100001.4cf6=92421e7ce56d81c3.1496412370.7.1497749139.1497745043.; _pk_ses.100001.4cf6=*; push_noty_num=0; push_doumail_num=0'}
	r = requests.get(url, cookies = cookies, headers = headers)
	#print r.content
	html = r.content
	#防止被封ip，使用延迟3秒。
	time.sleep(3)
	return html
#提取各个标签及标签对应的分类链接,返回标签+标签链接
def get_tags_url_list(html):
	tree = etree.HTML(html)
	tag_titles = tree.xpath("//div[@class='article']/a/@name")
	#for tag_title in tag_titles:
	#	tag_name.append(tag_title)
	table = tree.xpath("//div[@class='article']/table")
	#print len(table)
	for i in range(len(table)):
		#标签名
		tag_name = tag_titles[i]
		#标签
		tags = table[i].xpath("tbody/tr/td/a/@href")
		for tag in tags:
			#print tag_name + 'https://movie.douban.com' + tag
			url_AND_tag_list.append([tag_name,'https://movie.douban.com' + tag])
	return url_AND_tag_list
#获取当前标签下的页码总数
def get_page_num_BY_url(html):
	#获取总页码数
	tree = etree.HTML(html)
	nums = tree.xpath('//*[@id="content"]/div/div[1]/div[3]/a[last()]/text()')
	num = int(nums[0])
	return num
#提取当前分类页下电影详情页链接
def get_link(html):
	#详情页链接
	links = []
	tree = etree.HTML(html)
	#标签名称//*[@id="content"]/h1/span
	tag_names = tree.xpath("//div[@id='content']/h1/span/text()")
	if len(tag_names)==1:
		tag_name = tag_names[0]
	else:
		tag_name = 'None'
	link = tree.xpath("//a[@class='nbg']/@href")
	for lk in link:
		links.append(lk)
	return [links,tag_name]
#提取详情页内所需详细信息
def get_info(html):
	tree = etree.HTML(html)
	content = tree.xpath("//div[@id='wrapper']/div[1]")
	for infos in content:

		#片名
		titles = infos.xpath("h1/span[1]/text()")
		titles = ' '.join(titles)
		#年份
		years = infos.xpath("h1/span[2]/text()")
		years = ' '.join(years)
		#导演#//*[@id="info"]/span[1]/span[2]/a
		daoyan = infos.xpath('//*[@id="info"]/span[1]/span[2]/a/text()')
		daoyan = ' '.join(daoyan)

		#编剧#//*[@id="info"]/span[2]/span[2]/a[1]
		bianjus = infos.xpath('//*[@id="info"]/span[2]/span[2]/a/text()')
		bianju = ' '.join(bianjus)

		#主演
		zhuyans = infos.xpath('//*[@id="info"]/span[3]/span[2]/a/text()')
		zhuyan = ' '.join(zhuyans)

		#类型
		TYPES = infos.xpath("//span[@property='v:genre']/text()")
		TYPE = ' '.join(TYPES)

		#上映日期#//*[@id="info"]/span[10]
		datas = infos.xpath("//span[@property='v:initialReleaseDate']/text()")
		datas = ' '.join(datas)

		#片长#//*[@id="info"]/span[13]
		times = infos.xpath("//span[@property='v:runtime']/text()")#('//*[@id="info"]/span[13]/text()')#("//span[@property='v:runtime']/text()")
		times = ' '.join(times)

		#IMDB链接#//*[@id="info"]/a
		IMDB = infos.xpath('//*[@id="info"]/a/@href')
		IMDB = ' '.join(IMDB)
		#提取评分
		res = re.compile(r'property="v:average">[1-9]\.[0-9]</strong>')
		pf = re.findall(res,html)
		if len(pf)==0:
			movie_grade = "no grade"
		else:
			movie_grade = str(pf[0][21:-9]+'分'+' ')
		#提取评价人数
		res = re.compile(r'<span property="v:votes">\d*</span>')
		pls = re.findall(res,html)
		if len(pls)==0:
			movie_grade_num = u"评价人数不足"
		else:
			movie_grade_num = str(pls[0][25:-7]+'人评价')
		#提取五星评价比例
		f_s = tree.xpath('//*[@id="interest_sectl"]/div[1]/div[3]/div[1]/span[2]/text()')
		if len(f_s)==0:
			movie_5_star = u"评价人数不足"
		else:
			movie_5_star = str("五星"+f_s[0])
		#提取四星评价比例
		fo_s = tree.xpath('//*[@id="interest_sectl"]/div[1]/div[3]/div[2]/span[2]/text()')
		if len(fo_s)==0:
			movie_4_star = u"评价人数不足"
		else:
			movie_4_star = str("四星"+fo_s[0])
		#提取三星评价比例
		th_s = tree.xpath('//*[@id="interest_sectl"]/div[1]/div[3]/div[3]/span[2]/text()')
		if len(th_s)==0:
			movie_3_star = u"评价人数不足"
		else:
			movie_3_star = str("三星"+th_s[0])
		#提取二星评价比例
		tw_s = tree.xpath('//*[@id="interest_sectl"]/div[1]/div[3]/div[4]/span[2]/text()')
		if len(tw_s)==0:
			movie_2_star = u"评价人数不足"
		else:
			movie_2_star = str("二星"+tw_s[0])
		#提取一星评价比例
		on_s = tree.xpath('//*[@id="interest_sectl"]/div[1]/div[3]/div[5]/span[2]/text()')
		if len(on_s)==0:
			movie_1_star = u"评价人数不足"
		else:
			movie_1_star = str("一星"+on_s[0])
		#提取简介
		jj = tree.xpath('//*[@id="link-report"]/span/text()')
		if len(jj)==0:
			movie_jianjie = u"无"
		else:
			movie_jianjie = str(jj[0].strip()+' ')

		return [ titles,years,daoyan,bianju,zhuyan,TYPE,datas,times,IMDB,movie_grade,movie_grade_num,\
				movie_5_star,movie_4_star,movie_3_star,movie_2_star,movie_1_star,movie_jianjie]
#主函数
def main():
        #html = login()
        file = codecs.open("douban_allssss.json",'a',encoding = 'utf-8')
        url_1 = 'https://movie.douban.com/tag/'
        html = get_html_BY_Cookies(url_1)
        url_list_AND_tag_name = get_tags_url_list(html)
        print len(url_list_AND_tag_name)
        for url_list_AND_tag_name_list in url_list_AND_tag_name:
        	tag = url_list_AND_tag_name_list[0]
        	urls = url_list_AND_tag_name_list[1]
        	html = get_html_BY_Cookies(urls)
        	page_num = get_page_num_BY_url(html)
        	for i in range(0,page_num*20,20):
        		url_page = urls + '?start=' + str(i) + '&type=T'
        		html2 = get_html_BY_Cookies(url_page)
        		links_tag = get_link(html2)
        		links = links_tag[0]
        		tags = links_tag[1]
        		print url_page,tags
        		for url in links:
        			try:
	        			html3 = get_html_BY_Cookies(url)
	        			info = get_info(html3)
	        			print len(info)
	        			dic = {'tag_name':tags,'link':url,'titles':info[0],'years':info[1],'daoyan':info[2],'bianju':info[3],'zhuyan':info[4],\
	        					'TYPE':info[5],'datas':info[6],'times':info[7],'IMDB':info[8],'movie_grade':info[9],\
	        					'movie_grade_num':info[10],'movie_5_star':info[11],'movie_4_star':info[12],'movie_3_star':info[13],\
	        					'movie_2_star':info[14],'movie_1_star':info[15],'movie_jianjie':info[16]}

	        			i = json.dumps(dict(dic),ensure_ascii=False)
	    				line = i + '\n'
	    				file.write(line)

	        			for i in info:
	        				print i
	        			print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"


	    			except:
	    				print "ERROR..."

        		time.sleep(0.3)

if __name__ == '__main__':
	main()
