import requests
from lxml import etree
BASE_DOMAIN = "http://www.ygdy8.net"				#定义主域名
HEADERS = {
	"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3554.0 Safari/537.36",
}													#请求头设置
def get_detail_urls(url):							#获取URL
	rep = requests.get(url=url, headers=HEADERS)	# 进入首页
	# 小坑(编码里面有非法字符，所以加ignore过滤掉)
	text = rep.content.decode("gbk", "ignore")
	html = etree.HTML(text)
	# 通过规律直接找table下的a标签属性
	detail_urls = html.xpath("//table[@class='tbspan']//a/@href")
	# map接受一个函数和list，并通过匿名函数lambda依次作用在list的每个元素上,得到一个新的list并返回
	detail_urls = map(lambda url:BASE_DOMAIN+url, detail_urls)
	# 返回拼接完成的详情url
	return detail_urls


def parse_detail_page(url):
	# 爬取详情页面信息
	movie = {}
	res = requests.get(url, headers=HEADERS)
	text = res.content.decode("gbk")
	html = etree.HTML(text)
	title = html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")[0]
	movie["title"] = title
	zoomE = html.xpath("//div[@id='Zoom']")[0]
	# 获取当前标签下的img
	imgs = zoomE.xpath(".//img/@src")
	# 列表切片法，避免取超过范围的数据报错
	cover = imgs[0:1]
	movie["cover"] = cover
	poster = imgs[1:2]
	movie["poster"] = poster
	infos = zoomE.xpath(".//text()")

	def parse_info(info, rule):
		# 重复操作，提取出一个函数
		return info.replace(rule, "").strip()

	for index, info in enumerate(infos):
		if info.startswith("◎年　　代"):
			text = parse_info(info, "◎年　　代")
			movie["year"] = text
		elif info.startswith("◎产　　地"):
			text = parse_info(info, "◎产　　地")
			movie["country"] = text
		elif info.startswith("◎类　　别"):
			text = parse_info(info, "◎类　　别")
			movie["category"] = text
		elif info.startswith("◎豆瓣评分"):
			text = parse_info(info, "◎豆瓣评分")
			movie["douban_rating"] = text
		elif info.startswith("◎片　　长"):
			text = parse_info(info, "◎片　　长")
			movie["duration"] = text
		elif info.startswith("◎导　　演"):
			text = parse_info(info, "◎导　　演")
			movie["director"] = text
		elif info.startswith("◎主　　演"):
			text = parse_info(info, "◎主　　演")
			actors = [text]
			for x in range(index+1, len(infos)):
				actor = infos[x].strip()
				if actor.startswith("◎标"):
					break
				actors.append(actor)
				movie["actors"] = actors
		elif info.startswith("◎简　　介"):
			text = parse_info(info, "◎简　　介")
			for x in range(index+1, len(infos)):
				profile = infos[x].strip()
				if profile.startswith("◎获奖情况"):
					break
				movie["profile"] = profile
	download_url = html.xpath("//td[@bgcolor='#fdfddf']/a/@href")
	movie["download_url"] = download_url
	return movie


def spider():
	base_url = "http://www.ygdy8.net/html/gndy/dyzz/list_23_{}.html"
	movies = []
	# 设置爬取页面数量的url
	for i in range(1, 2):
		url = base_url.format(i)
		# 传递到第一个首页爬取详情页面链接
		detail_urls = get_detail_urls(url)
		#print(detail_urls)
		# 获取待爬取页面详情的url
		for detail_url in detail_urls:
			# 传递到详情页面爬取并获取爬取的详情数据
			movie = parse_detail_page(detail_url)
			#print(movie)
			movies.append(movie)
	print(movies)


if __name__ == '__main__':
	spider()
