# coding:utf8
import pymysql

from html_downloader import HtmlDownloader
from head_page_parser import HeadPageParseer

from html_outputer import HtmlOutputer

from fanhao_page_parser import FanHaoPageParser
from zuopin_page_parser import ZuoPinPageParser

from pic_downloader import PicDownloader
from save_info_for_json import SaveInfoForJson

from dbhelper import DBHelper
import threading
from multiprocessing import Pool, cpu_count
from functools import partial
import time

import hashlib


class AvSpider(object):
	headers = {
		'X-Requested-With': 'XMLHttpRequest',
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
              '(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
		'Referer': 'https://www.busdmm.us'
	}

	cookies = {
		'existmag': 'all'
	}

	mysql_connect_info = {
        'host' : '127.0.0.1',
        'port' : 3306,
        'user' : 'root',
        'password' : 'root',
        'db' : 'aiv_info'
	}

	root_path = r"F:\av_actresses_info"

	lock = threading.Lock()

	md5 = hashlib.md5()

	magnet_index = 1

	def __init__(self):
		self.htmlDownloader = HtmlDownloader(AvSpider.headers, AvSpider.cookies)
		self.headPageParseer = HeadPageParseer()
		self.htmlOutputer = HtmlOutputer()
		self.fanHaoPageParser = FanHaoPageParser()
		self.zuoPinPageParser = ZuoPinPageParser()
		self.picDownloader = PicDownloader(AvSpider.headers)
		self.saveInfoForJson = SaveInfoForJson()
		self.dbHelper = DBHelper(AvSpider.mysql_connect_info)


	# 爬虫入口
	def crawle(self, root_url):
		response_text = self.htmlDownloader.download(root_url)
		if response_text is None:
			return
		head_datas, next_page_url = self.headPageParseer.get_head_page_info(response_text, root_url)

		actress_urls = []
		
		'''
		#多线程先注释
		for head_data in head_datas:
			actress_urls.append(head_data['actress_url'])
		pool = Pool(processes=cpu_count())
		# crawle_work = partial(self.crawle_zuopin_page, root_path=root_path)
		try:
			pool.map(self.crawle_zuopin_page, actress_urls)
		except Exception as e:
			print(e)
			time.sleep(30)
			pool.map(crawle_work, actress_urls)
		'''
		
		#改用单线程
		for head_data in head_datas:
			try:
				self.crawle_zuopin_page(head_data['actress_url'])
			except Exception as e:
				print(e)
				time.sleep(30)
				self.crawle_zuopin_page(head_data['actress_url'])

		if next_page_url != '':
			self.crawle(next_page_url)
		

	# 番号页面信息爬取
	def crawle_fanhao_page(self, fanhao_url, actress_id, cover_little_url):
		response_text = self.htmlDownloader.download(fanhao_url)
		if response_text is None:
			return
		fanhao_base_info, magnet_info, fanhao_img_info, magnet_url = self.fanHaoPageParser.get_page_info(response_text)
		# print(magnet_info)
		# print('{}_cover'.format(fanhao))
		try:
			sql = "insert into designation(actress_id,fanhao,title,cover_img_little,cover_img_big,faxing_date,footage,zhizuo,faxing,labels,actresses) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
			params = (actress_id, fanhao_base_info['fanhao'], fanhao_base_info['title'], cover_little_url, fanhao_base_info['img'], fanhao_base_info['date'], fanhao_base_info['changdu'], fanhao_base_info['zhizuoshang'], fanhao_base_info['faxingshang'], ','.join(fanhao_base_info['leibies']), ','.join(fanhao_base_info['yanyuans']))
			designation_id = self.dbHelper.insert(sql, *params)
		except Exception as e:
			print(e)
			designation_id = 0
			if isinstance(e, pymysql.err.IntegrityError):
				select_sql = "select id from designation where fanhao = '{}'".format(fanhao_base_info["fanhao"])
				res = self.dbHelper.select(select_sql)
				designation_id = res[0][0]
				print("current designation id is {}".format(designation_id))
		try:
			has_magnet = 0
			if len(magnet_info) > 0:
				has_magnet = 1
			if designation_id != 0:
				sql = "insert into urls_magnet(designation_id,magnet_url,has_magnet) values (%s,%s,%s)"
				params = (designation_id, magnet_url, has_magnet)
				self.dbHelper.insert(sql, *params)
		except Exception as e:
			print(e)

		self.picDownloader.save_pic(fanhao_base_info['img'], '{}_cover'.format(fanhao_base_info['fanhao']))
		self.saveInfoForJson.save_info(fanhao_base_info['fanhao'], fanhao_base_info)
		mcnt = 1
		for magnet in magnet_info:
			try:
				if designation_id != 0:
					# AvSpider.md5.update("{}-{}".format(magnet['link_title'], fanhao_base_info['fanhao']).encode())
					sql = "insert into magnet_uris(designation_id,magnet_uri,title,size) values(%s,%s,%s,%s)"
					params = (designation_id, magnet['link'], magnet['link_title'], magnet['size'])
					self.dbHelper.insert(sql, *params)
					# AvSpider.magnet_index += 1
			except Exception as e:
				print(e)
				# AvSpider.magnet_index += 1
			self.saveInfoForJson.save_info("magnet_{}".format(str(mcnt)), magnet)
			mcnt += 1
		# if len(fanhao_img_info) != 0 and self.picDownloader.make_dir_in_current_folder("desc_imgs"):
		if len(fanhao_img_info) != 0:
			cnt = 1
			for img in fanhao_img_info:
				try:
					sql = "insert into desc_imgs(designation_id,desc_img_url) values(%s,%s)"
					params = (designation_id, img)
					self.dbHelper.insert(sql, *params)
				except Exception as e:
					print(e)
				'''
				# 暂时只将视频截图保存到数据库，不进行图片下载
				self.picDownloader.save_pic(img, str(cnt))
				cnt += 1
				'''
				


	# 演员作品页爬取
	def crawle_zuopin_page(self, zuopin_url):
		response_text = self.htmlDownloader.download(zuopin_url)
		if response_text is None:
			return
		actresses_info, zuopin_infos, next_url = self.zuoPinPageParser.get_zuopin_page_info(response_text, zuopin_url)
		folder_name = actresses_info['name']
		# AvSpider.lock.acquire()
		if self.picDownloader.make_dir(AvSpider.root_path, folder_name):
			#将演员信息插入数据库中
			try:
				sql = "insert into actresses(name,head_img_url,birthday,age,height,zhaobei,xiongwei,yaowei,tunwei,hobby,has_code) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
				params = (actresses_info["name"], actresses_info["head_img"],actresses_info["shengri"],actresses_info["nianling"],actresses_info["shengao"],actresses_info["zhaobei"],actresses_info["xiongwei"],actresses_info["yaowei"],actresses_info["tunwei"],actresses_info["aihao"],1)
				actress_id = self.dbHelper.insert(sql, *params)
			except Exception as e:
				print(e)
				actress_id = 0
				if isinstance(e, pymysql.err.IntegrityError):
					select_sql = "select id from actresses where name = '{}'".format(actresses_info["name"])
					res = self.dbHelper.select(select_sql)
					actress_id = res[0][0]
					print("current actress id is {}".format(actress_id))
			
			self.saveInfoForJson.save_info(actresses_info['name'], actresses_info)
			self.picDownloader.save_pic(actresses_info['head_img'], actresses_info['name'])
		for zuopin in zuopin_infos:
			# 切换目录到到上级目录
			self.picDownloader.make_dir(AvSpider.root_path, folder_name)
			if self.picDownloader.make_dir_in_current_folder(zuopin['fanhao']):
				self.picDownloader.save_pic(zuopin['photo'], zuopin['fanhao'])
				self.crawle_fanhao_page(zuopin['url'], actress_id, zuopin['photo'])
		# AvSpider.lock.release()
		if next_url != '':
			self.crawle_zuopin_page(next_url)


		

def main():
	avSpider = AvSpider()
	root_url = "https://www.busdmm.us/actresses"

	avSpider.crawle(root_url)

	#test fanhao page crawle
	# avSpider.crawle_fanhao_page("https://www.busdmm.us/JUFE-031", "JUFE-031")

	# avSpider.crawle_zuopin_page("https://www.busdmm.us/star/okq", root_path)




if __name__ == '__main__':
	main()