import requests,json,time
from bs4 import BeautifulSoup
from pyquery import PyQuery as pq
from lxml import etree

'''
我这边豆瓣网被禁了，所以第一题的豆瓣网改成猫眼网top100
'''
def getPage(headers,url):
	
	try:
		res = requests.get(url,headers = headers)
		if res.status_code == 200:
			return res.text
		else:
			return None
	except RequestException:
		return None

def parsePage(content,method):
	if method == '1':
		html = etree.HTML(content)
		items = html.xpath("//dd")
		for item in items:
			yield {
			"title" : item.xpath('.//div/div/div/p[@class="name"]/a/text()')[0].replace('\n','').strip(),
			"actor" : item.xpath('.//div/div/div/p[@class="star"]/text()')[0].replace('\n','').strip(),
			"releasetime" : item.xpath('.//div/div/div/p[@class="releasetime"]/text()')[0].replace('\n','').strip(),
			"image" : item.xpath('.//a/img[@class="board-img"]/@data-src')[0].replace('\n','').strip(),
			"score" : item.xpath('.//div/div/div/p[@class="score"]/i[@class="integer"]/text()')[0].replace('\n','').strip() + item.xpath('.//div/div/div/p[@class="score"]/i[@class="fraction"]/text()')[0].replace('\n','').strip(),
			}
	elif method == '2':
		html = BeautifulSoup(content,'lxml')
		#print(soup.prettify())
		items =  html.select("dd")
		for item in items:
			yield {
			"actor": item.select(".star")[0].string.replace('\n','').strip(),
			"title" : item.select(".name a")[0].string.replace('\n','').strip(),
			"releasetime" : item.select(".releasetime")[0].string.replace('\n','').strip(),
			"image" : item.select(".board-img")[0].attrs["data-src"].replace('\n','').strip(),
			"score" : item.select(".integer")[0].string.replace('\n','').strip() + item.select(".fraction")[0].string.replace('\n','').strip(),
			}
	elif method == '3':
		html = pq(content)
		#print(html("p"))
		items = html("dd")
		for item in items.items():
			
			yield{
			"actor":item('.star').text(),
			"title":item('.name a').text(),
			"releasetime":item('.releasetime').text(),
			"image":item('.board-img').attr('data-src'),
			"score":item('.integer').text() + item('.fraction').text()
			}
			
			#print(item('.star').text())
			#print(item('.releasetime').text())
			#print(item('.name a').text())

def writeFile(content):
	'''执行文件追加写操作'''
	#print(content)
	with open("./result.txt",'a',encoding='utf-8') as f:
		f.write(json.dumps(content,ensure_ascii=False) + "\n")
		#json.dumps 序列化时对中文默认使用的ascii编码.想输出真正的中文需要指定ensure_ascii=False

def main(headers,offset,method):
	url = "http://maoyan.com/board/4?offset=" + str(offset)
	html = getPage(headers,url)
	#print(html)
	print("Writing the data into ./result.txt, please wait minutes ....")
	if html:
		for item in parsePage(html,method):
			writeFile(item)

if __name__ == '__main__':
	# Get headers
	headers = {
		'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
		'Accept-Language': 'zh-CN,zh;q=0.9',
		'Connection': 'keep-alive',
		'Cookie': 'uuid=1A6E888B4A4B29B16FBA1299108DBE9C7B0FF05888F4017B105DDE4484680436; _csrf=d35429a4c8a1151b56856c5673b12432a1f72cca00c09e73f0551695d5d1b0f7',
		'Host': 'maoyan.com',
		'Referer': 'http://maoyan.com/board/4?offset=0',
		'Upgrade-Insecure-Requests': '1',
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
	}
	#main(headers,70)
	while True:
		print("Please choose the method:")
		method = input("1. Xpath    2. BeautifulSoup   3.PyQuery    0:Quit \n")
		if method == '0':
			break
		elif method in ('1','2','3'):
			print("Analysis the web page, please wait minutes ....")
			for i in range(0,100,10):
				main(headers,i,method)
				time.sleep(1)
			print("Done!")
		else:
			print("Input the wrong number!")
			

