import hashlib
import json
import os
import time

import requests
from bs4 import BeautifulSoup

from application import db
from common.libs.DateHelper import getCurrentTime
from common.models.movie import Movie

'''
python manager.py runjob -m movie -a list | parse
'''


class JobTask:
	def __init__(self):
		self.pages_number = 50
		self.source = "cilixiong"
		self.url = "https://www.cilixiong.com/movie/index#d#.html"
		self.path = "movie_source"
		self.date = getCurrentTime(frm="%Y%m%d")

	def run(self, params):
		act = params['act']
		if act == "list":
			self.getList()
			self.parseInfo()
		elif act == "parse":
			self.parseInfo()

	def makeSuredirs(self, path):
		if not os.path.exists(path):
			os.makedirs(path)

	def getContent(self, path):
		if os.path.exists(path):
			with open(path, "r", encoding='utf-8') as f:
				return f.read()
		return ""

	def getHttpContent(self, url):
		try:
			r = requests.get(url)
			if r.status_code != 200:
				return None
			r.encoding = "utf-8"
			return r.content

		except Exception:
			return None

	def parseList(self, content):
		# 解析页面得到的数据列表，每一项代表一个电影的数据
		data = []
		temp_soup = BeautifulSoup(str(content), "html.parser")
		movie_list = (
			temp_soup.find("div", class_="masonry masonry-demos").find("div").find_all("div")
		)
		for movie in movie_list:
			# 每个电影对应一个a标签
			a_tag = movie.find("a")
			# 每个电影的详情链接
			info_url = a_tag.get("href")
			# 每个电影的封面图
			img_tag = a_tag.find("figure").find("img")
			# 每个电影名称
			name = img_tag.get("alt")
			# 每个电影封面图片的url
			img_url = img_tag.get("src")
			# 加入到data里的每个电影的信息：电影名字、封面图url、电影详情url、用电影详情url得到的哈希值
			temp_data = {
				"name": name,
				"cover_pic": img_url,
				"info_url": info_url,
				"hash": hashlib.md5(info_url.encode("utf-8")).hexdigest()
			}
			data.append(temp_data)
		return data

	def saveContent(self, path, content):
		if content:
			with open(path, mode="w+", encoding="utf-8") as f:
				if type(content) != str:
					content = content.decode("utf-8")
				f.write(content)
				f.flush()

	def getList(self):
		path_root = self.path + "/" + self.date
		path_list = path_root + "/list"
		path_info = path_root + "/info"
		path_json = path_root + "/json"
		self.makeSuredirs(path_root)
		self.makeSuredirs(path_list)
		self.makeSuredirs(path_info)
		self.makeSuredirs(path_json)

		for idx in range(1, self.pages_number + 1):
			if idx == 1:
				temp_url = self.url.replace("#d#", "")
			else:
				temp_url = self.url.replace("#d#", f"_{idx}")
			temp_path = path_list + "/" + str(idx)
			if os.path.exists(temp_path):
				continue

			temp_content = self.getHttpContent(temp_url)
			self.saveContent(temp_path, temp_content)
			# 爬取每页后休息一下，减缓服务器压力
			time.sleep(0.3)

		for idx in os.listdir(path_list):
			# 得到某页的内容
			temp_content = self.getContent(path_list + "/" + str(idx))
			# 解析某页的内容，得到的是一个列表，里面每一项就是一个电影的信息
			items_data = self.parseList(temp_content)
			# 要是得不到某页内容，不管
			if not items_data:
				continue
			# 遍历每一个电影的信息
			for item in items_data:
				temp_json_path = path_json + "/" + item['hash']
				temp_info_path = path_info + "/" + item['hash']

				# 把每个电影的信息保存到相应的json下，目录名字以详情信息的哈希值item['hash']作区分
				if not os.path.exists(temp_json_path):
					self.saveContent(temp_json_path, json.dumps(item, ensure_ascii=False))
				# 把每个电影的详情信息保存到相应的info下，目录名字以详情信息的哈希值item['hash']作区分
				if not os.path.exists(temp_info_path):
					temp_content = self.getHttpContent(item['info_url'])
					self.saveContent(temp_info_path, temp_content)
				# 得到每个电影的详情信息要发Http请求，适当休息，减缓服务器压力
				time.sleep(0.3)

	def parseInfo(self):
		path_root = self.path + "/" + self.date
		path_info = path_root + "/info"
		path_json = path_root + "/json"

		# 对info路径继续遍历，拿到的filename其实就是每个电影详情信息url的哈希值
		for filename in os.listdir(path_info):
			temp_json_path = path_json + "/" + filename
			temp_info_path = path_info + "/" + filename
			# 将每个电影的json信息解压
			temp_data = json.loads(self.getContent(temp_json_path))
			# 得到每个电影详情页面的Html文件
			temp_content = self.getContent(temp_info_path)
			# 对每个电影的Html代码进行解析
			temp_soup = BeautifulSoup(temp_content, "html.parser")

			temp_download_link = temp_soup.find("div", class_="tabs-container").find("a").get("href")
			temp_score = temp_soup.find("span", class_="tiny-title").find("span").get_text()
			temp_date = temp_soup.find_all("span", class_="tiny-title")[2].get_text()
			temp_classify = temp_soup.find_all("span", class_="tiny-title")[3].get_text()
			temp_actor = temp_soup.find_all("span", class_="tiny-title")[6].get_text()
			temp_desc = temp_soup.find("article", class_="information-text").get_text()

			temp_data['pub_date'] = temp_date
			temp_data['desc'] = temp_desc
			temp_data['classify'] = temp_classify
			temp_data['actor'] = temp_actor
			temp_data['magnet_url'] = temp_download_link
			temp_data['source'] = self.source
			temp_data['created_time'] = temp_data['updated_time'] = getCurrentTime()
			temp_data['pics'] = json.dumps(temp_data['cover_pic'])
			temp_data['score'] = temp_score if temp_score else None

			temp_movie_info = Movie.query.filter_by(hash=temp_data['hash']).first()
			if temp_movie_info:
				continue

			temp_model_movie = Movie(**temp_data)
			db.session.add(temp_model_movie)
			db.session.commit()

		return True

