# -*- coding: UTF-8 -*-
import requests
import json
import time
import base64
from pyquery import PyQuery as pq

request_host = "http://www.dytt8.net/"
request_url = "http://www.dytt8.net/index.htm"
request_headers = {
	'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
	'Upgrade-Insecure-Requests' : '1',
	'Pragma' : 'no-cache',
	'Cache-Control' : 'no-cache',
	'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
}

def get_movie_details(href):
	r = requests.get(url=request_host + href, headers=request_headers, timeout=2)
	r.encoding='gbk'
	html = r.text
	q = pq(html)
	# print(q(".co_content8").text());
	data = q(".co_content8").text().split("◎")
	download_address = [{
		'name': '',
		'address': []
	}]

	info = {
		"release_time": "",
		"translation": "",
		"poster": "",
		"name": "",
		"years": "",
		"origin": "",
		"category": "",
		"language": "",
		"subtitle": "",
		"imbd_score": "",
		"half_score": "",
		"file_format": "",
		"size": "",
		"long": "",
		'director': "",
		'stars': "",
		'abstract': "",
		'download_address': ""
	}

	# 插入下载地址
	download_address[0]['name'] = "电影天堂"
	download_address[0]['address'].append(q(".co_content8").find("a").eq(0).text().strip())
	info['download_address'] = download_address

	# 插入封面
	info['poster'] = q("#Zoom").find("img").eq(0).attr('src')

	# 插入其他信息
	for i in data:
		if ( "发布时间" in i ):
			info['release_time'] = i.split(" ")[0].split("：")[1].strip() 
		if ( i[:4] == "译\u3000\u3000名" ):
			info['translation'] = i.split('译\u3000\u3000名')[1].strip() 
		if ( i[:4] == "片\u3000\u3000名" ):
			info['name'] = i.split('片\u3000\u3000名')[1].strip()
		if ( i[:4] == "年\u3000\u3000代" ):
			info['years'] = i.split('年\u3000\u3000代')[1].strip()
		if ( i[:4] == "产\u3000\u3000地" ):
			info['origin'] = i.split('产\u3000\u3000地')[1].strip()
		if ( i[:4] == "类\u3000\u3000别" ):
			info['category'] = i.split('类\u3000\u3000别')[1].strip()
		if ( i[:4] == "语\u3000\u3000言" ):
			info['language'] = i.split('语\u3000\u3000言')[1].strip()
		if ( i[:4] == "字\u3000\u3000幕" ):
			info['subtitle'] = i.split('字\u3000\u3000幕')[1].strip()
		if ( i[:4] == "IMDb" and "IMDbd评分" in i ):
			info['imbd_score'] = i.split('IMDb评分')[1].strip()
			try: 
				info['half_score'] = float(i.split('IMDb评分')[1].strip().split("/")[0]) / 2
			except(ValueError): 
				info['half_score'] = 0
		if ( i[:4] == "文件格式" ):
			info['file_format'] = i.split('文件格式')[1].strip()
		if ( i[:4] == "视频尺寸" ):
			info['size'] = i.split("视频尺寸")[1].strip()
		if ( i[:4] == "片\u3000\u3000长" ):
			info['long'] = i.split('片\u3000\u3000长')[1].strip()
		if ( i[:4] == "导\u3000\u3000演" ):
			info['director'] = i.split('导\u3000\u3000演')[1].strip()
		if ( i[:4] == "主\u3000\u3000演" ):
			info['stars'] = i.split('主\u3000\u3000演')[1].strip()
		if ( i[:4] == "简\u3000\u3000介" ):
			info['abstract'] = i.split('简\u3000\u3000介')[1].strip().split("【下载地址】")[0]
		
	return info

def get_new_movies():
	r = requests.get(url=request_url, headers=request_headers)
	r.encoding='gbk'
	html = r.text
	q = pq(html)
	hrefs = []
	a_tags = q(".co_content2 ul a")
	a_tags_count = len(a_tags)
	for i in range(1, a_tags_count):
		a = a_tags.eq(i)
		hrefs.append(a.attr('href'))
	movie_details = []
	# get_movie_details(hrefs[0])
	for index,href in enumerate(hrefs):
		print(str(index) +'/'+ str(len(hrefs)))
		info = ''
		while(info == ''):
			try:
				#print("尝试获取")
				info = get_movie_details(href)
				#print("获取成功")
			except:
				#print("意外")
				info = ''
		movie_details.append( info )
	json_str = json.dumps(movie_details, ensure_ascii=False)
	return json_str