import requests,json,re,time
from requests.exceptions import RequestException
from lxml import etree
from bs4 import BeautifulSoup
from pyquery import PyQuery as pq


#获取豆瓣网网页内容
def getPage(page):
	url='https://movie.douban.com/top250?start='+str((page-1)*25)
	headers={
	'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'
	}
	try:
		res=requests.get(url,headers=headers)
		if res.status_code==200:
			return res.content.decode('utf8')
		else:
			return None
	except RequestException as e:
		print('error:',e)
		return None


def parsePage(content,type):
	if type == 1:
		type = 'xpath'
		#xpath解析 导入lxml from lxml import etree
		html=etree.HTML(content)
		filmList = html.xpath('//ol[@class="grid_view"]/li')
		if len(filmList)>0:
			for i in filmList:
				yield {
				'index':i.xpath('.//div[@class="pic"]/em[@class=""]/text()')[0],
				'title':i.xpath('.//span[@class="title"]/text()')[0],
				'image':i.xpath('.//img[@width="100"]/@src')[0],
				'actor':re.sub('\n+','',i.xpath('.//div[@class="bd"]/p[@class=""]/text()')[0]).strip(),
				'star':i.xpath('.//div[@class="star"]/span[@class="rating_num"]/text()')[0]
				}
	elif type == 2:
		type = 'beautifulsoup'
		#beautifulsoup解析 需要打入 BeautifulSoup
		soup = BeautifulSoup(content,'lxml')
		filmList = soup.select('.grid_view li .item')
		if len(filmList)>0:
			for i in filmList:
				yield {
				'index':i.select('.pic em')[0].string,
				'title':i.find(name="span",attrs={'class':'title'}).string,
				'image':i.find(name="img",attrs={'width':'100'}).attrs['src'],
				'actor':re.sub('\n+','',i.select('.bd p')[0].get_text()).strip(),#n内部有标签的话，用string取不到
				'star':i.find(name="span",attrs={'class':'rating_num'}).string,
				}
	elif type == 3:
		type = 'pyquery'
		#pyQuery解析 需要导入pyqiery
		doc = pq(content)
		filmList = doc('.grid_view li .item')
		if len(filmList)>0:
			for i in filmList.items():
				yield {
				'index':i.find('.pic em').text(),
				'title':i.find('.hd .title:eq(0)').text(),
				'image':i.find('.pic img').attr('src'),
				'actor':re.sub('\n+','',i.find('.bd p:eq(0)').html()).strip(),#去除换行符和空格
				'star':i.find('.star .rating_num').text()
				}
			

def writeFile(content,type):
	if type == 1:
		type = 'xpath'
	elif type == 2:
		type = 'beautifulsoup'
	elif type == 3:
		type = 'pyquery'
	with open('./film_'+type+'.json','a',encoding='utf8') as f:
		f.write(json.dumps(content,ensure_ascii=False)+',\n')
		f.close()


if __name__ == '__main__':
	page =1
	# try:
	type=int(input('选择爬取解析库：==>   1：xpath  2：beautifulSoup  3：pyQuery  \n'))
	print('*'*12,'电影数据抓取开始','*'*12)
	while True:
		content=getPage(page)
		data=parsePage(content,type)
		print('第',page,'页ing...')
		for i in data:
			writeFile(i,type)
		page = page + 1
		time.sleep(1)
		if page>10:
			print('*'*12,'电影数据抓取结束','*'*12)
			break
	# except Exception as e:
	# 	print('报错啦：：：',e)
		

